osbm commited on
Commit
d7b0121
1 Parent(s): 4ef5de1

Delete nnUNet_results

Browse files
Files changed (19) hide show
  1. nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/dataset.json +0 -18
  2. nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/dataset_fingerprint.json +0 -1426
  3. nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/.ipynb_checkpoints/progress-checkpoint.png +0 -3
  4. nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/checkpoint_best.pth +0 -3
  5. nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/checkpoint_latest.pth +0 -3
  6. nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/debug.json +0 -52
  7. nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/progress.png +0 -3
  8. nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/training_log_2023_7_24_00_01_52.txt +0 -1194
  9. nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_1/.ipynb_checkpoints/Untitled-checkpoint.ipynb +0 -6
  10. nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_1/.ipynb_checkpoints/progress-checkpoint.png +0 -3
  11. nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_1/Untitled.ipynb +0 -101
  12. nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_1/checkpoint_best.pth +0 -3
  13. nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_1/debug.json +0 -52
  14. nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_1/progress.png +0 -3
  15. nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_1/training_log_2023_7_24_09_31_46.txt +0 -342
  16. nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_2/debug.json +0 -52
  17. nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_2/training_log_2023_7_24_11_56_27.txt +0 -26
  18. nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_3/training_log_2023_7_24_11_56_49.txt +0 -21
  19. nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/plans.json +0 -342
nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/dataset.json DELETED
@@ -1,18 +0,0 @@
1
- {
2
- "name": "Prostate158",
3
- "description": "Prostate cancer segmentation dataset",
4
- "channel_names": {
5
- "0": "T2",
6
- "1": "ADC",
7
- "2": "DFI"
8
- },
9
- "labels": {
10
- "background": 0,
11
- "prostate_inner": 1,
12
- "prostate_outer": 2,
13
- "tumor": 3
14
- },
15
- "numTraining": 139,
16
- "numTest": 19,
17
- "file_ending": ".nii.gz"
18
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/dataset_fingerprint.json DELETED
@@ -1,1426 +0,0 @@
1
- {
2
- "foreground_intensity_properties_per_channel": {
3
- "0": {
4
- "max": 1694.0,
5
- "mean": 267.35308837890625,
6
- "median": 242.0,
7
- "min": 0.0,
8
- "percentile_00_5": 36.0,
9
- "percentile_99_5": 768.0,
10
- "std": 136.11251831054688
11
- },
12
- "1": {
13
- "max": 3557.286865234375,
14
- "mean": 1215.81591796875,
15
- "median": 1203.8331298828125,
16
- "min": 0.0,
17
- "percentile_00_5": 0.0,
18
- "percentile_99_5": 2259.82861328125,
19
- "std": 338.6748352050781
20
- },
21
- "2": {
22
- "max": 198.95455932617188,
23
- "mean": 72.26309204101562,
24
- "median": 70.3214340209961,
25
- "min": 0.0,
26
- "percentile_00_5": 34.534385681152344,
27
- "percentile_99_5": 132.71939086914062,
28
- "std": 18.909290313720703
29
- }
30
- },
31
- "median_relative_size_after_cropping": 1.0,
32
- "shapes_after_crop": [
33
- [
34
- 26,
35
- 270,
36
- 270
37
- ],
38
- [
39
- 26,
40
- 270,
41
- 270
42
- ],
43
- [
44
- 24,
45
- 270,
46
- 270
47
- ],
48
- [
49
- 25,
50
- 442,
51
- 442
52
- ],
53
- [
54
- 24,
55
- 270,
56
- 270
57
- ],
58
- [
59
- 24,
60
- 270,
61
- 270
62
- ],
63
- [
64
- 24,
65
- 270,
66
- 270
67
- ],
68
- [
69
- 24,
70
- 270,
71
- 270
72
- ],
73
- [
74
- 25,
75
- 442,
76
- 442
77
- ],
78
- [
79
- 24,
80
- 270,
81
- 270
82
- ],
83
- [
84
- 24,
85
- 270,
86
- 270
87
- ],
88
- [
89
- 26,
90
- 270,
91
- 270
92
- ],
93
- [
94
- 26,
95
- 270,
96
- 270
97
- ],
98
- [
99
- 24,
100
- 270,
101
- 270
102
- ],
103
- [
104
- 24,
105
- 270,
106
- 270
107
- ],
108
- [
109
- 24,
110
- 270,
111
- 270
112
- ],
113
- [
114
- 26,
115
- 270,
116
- 270
117
- ],
118
- [
119
- 24,
120
- 270,
121
- 270
122
- ],
123
- [
124
- 24,
125
- 270,
126
- 270
127
- ],
128
- [
129
- 28,
130
- 270,
131
- 270
132
- ],
133
- [
134
- 24,
135
- 270,
136
- 270
137
- ],
138
- [
139
- 26,
140
- 270,
141
- 270
142
- ],
143
- [
144
- 25,
145
- 442,
146
- 442
147
- ],
148
- [
149
- 25,
150
- 442,
151
- 442
152
- ],
153
- [
154
- 25,
155
- 442,
156
- 442
157
- ],
158
- [
159
- 24,
160
- 270,
161
- 270
162
- ],
163
- [
164
- 24,
165
- 270,
166
- 270
167
- ],
168
- [
169
- 27,
170
- 232,
171
- 232
172
- ],
173
- [
174
- 24,
175
- 270,
176
- 270
177
- ],
178
- [
179
- 30,
180
- 232,
181
- 232
182
- ],
183
- [
184
- 27,
185
- 232,
186
- 232
187
- ],
188
- [
189
- 24,
190
- 270,
191
- 270
192
- ],
193
- [
194
- 25,
195
- 442,
196
- 442
197
- ],
198
- [
199
- 24,
200
- 270,
201
- 270
202
- ],
203
- [
204
- 24,
205
- 270,
206
- 270
207
- ],
208
- [
209
- 26,
210
- 270,
211
- 270
212
- ],
213
- [
214
- 24,
215
- 270,
216
- 270
217
- ],
218
- [
219
- 24,
220
- 270,
221
- 270
222
- ],
223
- [
224
- 27,
225
- 442,
226
- 442
227
- ],
228
- [
229
- 25,
230
- 442,
231
- 442
232
- ],
233
- [
234
- 25,
235
- 442,
236
- 442
237
- ],
238
- [
239
- 35,
240
- 270,
241
- 270
242
- ],
243
- [
244
- 25,
245
- 442,
246
- 442
247
- ],
248
- [
249
- 24,
250
- 270,
251
- 270
252
- ],
253
- [
254
- 24,
255
- 270,
256
- 270
257
- ],
258
- [
259
- 24,
260
- 270,
261
- 270
262
- ],
263
- [
264
- 26,
265
- 270,
266
- 270
267
- ],
268
- [
269
- 24,
270
- 270,
271
- 270
272
- ],
273
- [
274
- 24,
275
- 270,
276
- 270
277
- ],
278
- [
279
- 35,
280
- 442,
281
- 442
282
- ],
283
- [
284
- 26,
285
- 270,
286
- 270
287
- ],
288
- [
289
- 24,
290
- 270,
291
- 270
292
- ],
293
- [
294
- 24,
295
- 270,
296
- 270
297
- ],
298
- [
299
- 31,
300
- 442,
301
- 442
302
- ],
303
- [
304
- 26,
305
- 270,
306
- 270
307
- ],
308
- [
309
- 30,
310
- 442,
311
- 442
312
- ],
313
- [
314
- 27,
315
- 232,
316
- 232
317
- ],
318
- [
319
- 27,
320
- 232,
321
- 232
322
- ],
323
- [
324
- 25,
325
- 442,
326
- 442
327
- ],
328
- [
329
- 24,
330
- 270,
331
- 270
332
- ],
333
- [
334
- 24,
335
- 270,
336
- 270
337
- ],
338
- [
339
- 24,
340
- 270,
341
- 270
342
- ],
343
- [
344
- 24,
345
- 270,
346
- 270
347
- ],
348
- [
349
- 28,
350
- 270,
351
- 270
352
- ],
353
- [
354
- 24,
355
- 270,
356
- 270
357
- ],
358
- [
359
- 31,
360
- 442,
361
- 442
362
- ],
363
- [
364
- 24,
365
- 270,
366
- 270
367
- ],
368
- [
369
- 26,
370
- 270,
371
- 270
372
- ],
373
- [
374
- 26,
375
- 270,
376
- 270
377
- ],
378
- [
379
- 24,
380
- 270,
381
- 270
382
- ],
383
- [
384
- 24,
385
- 270,
386
- 270
387
- ],
388
- [
389
- 24,
390
- 270,
391
- 270
392
- ],
393
- [
394
- 25,
395
- 442,
396
- 442
397
- ],
398
- [
399
- 26,
400
- 270,
401
- 270
402
- ],
403
- [
404
- 27,
405
- 232,
406
- 232
407
- ],
408
- [
409
- 24,
410
- 270,
411
- 270
412
- ],
413
- [
414
- 29,
415
- 270,
416
- 270
417
- ],
418
- [
419
- 24,
420
- 270,
421
- 270
422
- ],
423
- [
424
- 26,
425
- 270,
426
- 270
427
- ],
428
- [
429
- 26,
430
- 232,
431
- 232
432
- ],
433
- [
434
- 24,
435
- 270,
436
- 270
437
- ],
438
- [
439
- 27,
440
- 232,
441
- 232
442
- ],
443
- [
444
- 26,
445
- 270,
446
- 270
447
- ],
448
- [
449
- 25,
450
- 442,
451
- 442
452
- ],
453
- [
454
- 26,
455
- 270,
456
- 270
457
- ],
458
- [
459
- 24,
460
- 270,
461
- 270
462
- ],
463
- [
464
- 24,
465
- 270,
466
- 270
467
- ],
468
- [
469
- 30,
470
- 442,
471
- 442
472
- ],
473
- [
474
- 28,
475
- 270,
476
- 270
477
- ],
478
- [
479
- 24,
480
- 270,
481
- 270
482
- ],
483
- [
484
- 24,
485
- 270,
486
- 270
487
- ],
488
- [
489
- 24,
490
- 270,
491
- 270
492
- ],
493
- [
494
- 25,
495
- 442,
496
- 442
497
- ],
498
- [
499
- 31,
500
- 442,
501
- 442
502
- ],
503
- [
504
- 25,
505
- 442,
506
- 442
507
- ],
508
- [
509
- 25,
510
- 442,
511
- 442
512
- ],
513
- [
514
- 30,
515
- 232,
516
- 232
517
- ],
518
- [
519
- 24,
520
- 270,
521
- 270
522
- ],
523
- [
524
- 26,
525
- 270,
526
- 270
527
- ],
528
- [
529
- 31,
530
- 442,
531
- 442
532
- ],
533
- [
534
- 24,
535
- 270,
536
- 270
537
- ],
538
- [
539
- 26,
540
- 270,
541
- 270
542
- ],
543
- [
544
- 26,
545
- 270,
546
- 270
547
- ],
548
- [
549
- 24,
550
- 270,
551
- 270
552
- ],
553
- [
554
- 25,
555
- 442,
556
- 442
557
- ],
558
- [
559
- 24,
560
- 270,
561
- 270
562
- ],
563
- [
564
- 26,
565
- 270,
566
- 270
567
- ],
568
- [
569
- 27,
570
- 442,
571
- 442
572
- ],
573
- [
574
- 24,
575
- 270,
576
- 270
577
- ],
578
- [
579
- 24,
580
- 270,
581
- 270
582
- ],
583
- [
584
- 24,
585
- 270,
586
- 270
587
- ],
588
- [
589
- 28,
590
- 270,
591
- 270
592
- ],
593
- [
594
- 24,
595
- 270,
596
- 270
597
- ],
598
- [
599
- 30,
600
- 270,
601
- 270
602
- ],
603
- [
604
- 24,
605
- 270,
606
- 270
607
- ],
608
- [
609
- 24,
610
- 270,
611
- 270
612
- ],
613
- [
614
- 25,
615
- 442,
616
- 442
617
- ],
618
- [
619
- 24,
620
- 270,
621
- 270
622
- ],
623
- [
624
- 24,
625
- 270,
626
- 270
627
- ],
628
- [
629
- 24,
630
- 270,
631
- 270
632
- ],
633
- [
634
- 28,
635
- 270,
636
- 270
637
- ],
638
- [
639
- 24,
640
- 270,
641
- 270
642
- ],
643
- [
644
- 40,
645
- 270,
646
- 270
647
- ],
648
- [
649
- 24,
650
- 270,
651
- 270
652
- ],
653
- [
654
- 24,
655
- 270,
656
- 270
657
- ],
658
- [
659
- 27,
660
- 232,
661
- 232
662
- ],
663
- [
664
- 24,
665
- 270,
666
- 270
667
- ],
668
- [
669
- 26,
670
- 270,
671
- 270
672
- ],
673
- [
674
- 25,
675
- 442,
676
- 442
677
- ],
678
- [
679
- 27,
680
- 232,
681
- 232
682
- ],
683
- [
684
- 26,
685
- 270,
686
- 270
687
- ],
688
- [
689
- 24,
690
- 270,
691
- 270
692
- ],
693
- [
694
- 25,
695
- 442,
696
- 442
697
- ],
698
- [
699
- 24,
700
- 270,
701
- 270
702
- ],
703
- [
704
- 24,
705
- 270,
706
- 270
707
- ],
708
- [
709
- 24,
710
- 270,
711
- 270
712
- ],
713
- [
714
- 24,
715
- 270,
716
- 270
717
- ],
718
- [
719
- 25,
720
- 442,
721
- 442
722
- ],
723
- [
724
- 24,
725
- 270,
726
- 270
727
- ]
728
- ],
729
- "spacings": [
730
- [
731
- 3.0,
732
- 0.4017857015132904,
733
- 0.4017857015132904
734
- ],
735
- [
736
- 3.0,
737
- 0.4017857015132904,
738
- 0.4017857015132904
739
- ],
740
- [
741
- 3.0,
742
- 0.4017857015132904,
743
- 0.4017857015132904
744
- ],
745
- [
746
- 3.000002384185791,
747
- 0.27173900604248047,
748
- 0.27173900604248047
749
- ],
750
- [
751
- 3.000000238418579,
752
- 0.4017857015132904,
753
- 0.4017857015132904
754
- ],
755
- [
756
- 3.000000238418579,
757
- 0.4017857015132904,
758
- 0.4017857015132904
759
- ],
760
- [
761
- 2.999999761581421,
762
- 0.4017857015132904,
763
- 0.4017857015132904
764
- ],
765
- [
766
- 3.000000238418579,
767
- 0.4017857015132904,
768
- 0.4017857015132904
769
- ],
770
- [
771
- 3.0000030994415283,
772
- 0.27173900604248047,
773
- 0.27173900604248047
774
- ],
775
- [
776
- 3.0,
777
- 0.4017857015132904,
778
- 0.4017857015132904
779
- ],
780
- [
781
- 2.999999761581421,
782
- 0.4017857015132904,
783
- 0.4017857015132904
784
- ],
785
- [
786
- 3.0,
787
- 0.4464285671710968,
788
- 0.4464285671710968
789
- ],
790
- [
791
- 3.0,
792
- 0.4017857015132904,
793
- 0.4017857015132904
794
- ],
795
- [
796
- 3.0,
797
- 0.4017857015132904,
798
- 0.4017857015132904
799
- ],
800
- [
801
- 3.000000238418579,
802
- 0.4017857015132904,
803
- 0.4017857015132904
804
- ],
805
- [
806
- 3.0,
807
- 0.4017857015132904,
808
- 0.4017857015132904
809
- ],
810
- [
811
- 3.0,
812
- 0.4017857015132904,
813
- 0.4017857015132904
814
- ],
815
- [
816
- 3.0,
817
- 0.4017857015132904,
818
- 0.4017857015132904
819
- ],
820
- [
821
- 3.0,
822
- 0.4017857015132904,
823
- 0.4017857015132904
824
- ],
825
- [
826
- 3.0,
827
- 0.4017857015132904,
828
- 0.4017857015132904
829
- ],
830
- [
831
- 3.0,
832
- 0.4017857015132904,
833
- 0.4017857015132904
834
- ],
835
- [
836
- 3.000000238418579,
837
- 0.4017857015132904,
838
- 0.4017857015132904
839
- ],
840
- [
841
- 2.999998092651367,
842
- 0.27173900604248047,
843
- 0.27173900604248047
844
- ],
845
- [
846
- 2.9999985694885254,
847
- 0.27173900604248047,
848
- 0.27173900604248047
849
- ],
850
- [
851
- 3.000016689300537,
852
- 0.27173900604248047,
853
- 0.27173900604248047
854
- ],
855
- [
856
- 3.0,
857
- 0.4017857015132904,
858
- 0.4017857015132904
859
- ],
860
- [
861
- 3.0,
862
- 0.4017857015132904,
863
- 0.4017857015132904
864
- ],
865
- [
866
- 3.0000035762786865,
867
- 0.46875,
868
- 0.46875
869
- ],
870
- [
871
- 3.0,
872
- 0.4017857015132904,
873
- 0.4017857015132904
874
- ],
875
- [
876
- 2.9999992847442627,
877
- 0.46875,
878
- 0.46875
879
- ],
880
- [
881
- 2.9999945163726807,
882
- 0.46875,
883
- 0.46875
884
- ],
885
- [
886
- 3.000000238418579,
887
- 0.4017857015132904,
888
- 0.4017857015132904
889
- ],
890
- [
891
- 3.0000016689300537,
892
- 0.27173900604248047,
893
- 0.27173900604248047
894
- ],
895
- [
896
- 3.0,
897
- 0.4017857015132904,
898
- 0.4017857015132904
899
- ],
900
- [
901
- 3.0,
902
- 0.4017857015132904,
903
- 0.4017857015132904
904
- ],
905
- [
906
- 3.0,
907
- 0.4017857015132904,
908
- 0.4017857015132904
909
- ],
910
- [
911
- 3.0,
912
- 0.4017857015132904,
913
- 0.4017857015132904
914
- ],
915
- [
916
- 3.0,
917
- 0.4017857015132904,
918
- 0.4017857015132904
919
- ],
920
- [
921
- 2.9999990463256836,
922
- 0.27173900604248047,
923
- 0.27173900604248047
924
- ],
925
- [
926
- 2.9999988079071045,
927
- 0.27173900604248047,
928
- 0.27173900604248047
929
- ],
930
- [
931
- 2.999993085861206,
932
- 0.27173900604248047,
933
- 0.27173900604248047
934
- ],
935
- [
936
- 3.0,
937
- 0.4017857015132904,
938
- 0.4017857015132904
939
- ],
940
- [
941
- 2.999997854232788,
942
- 0.27173900604248047,
943
- 0.27173900604248047
944
- ],
945
- [
946
- 3.0,
947
- 0.4017857015132904,
948
- 0.4017857015132904
949
- ],
950
- [
951
- 3.0,
952
- 0.4017857015132904,
953
- 0.4017857015132904
954
- ],
955
- [
956
- 3.0,
957
- 0.4017857015132904,
958
- 0.4017857015132904
959
- ],
960
- [
961
- 3.000000238418579,
962
- 0.4017857015132904,
963
- 0.4017857015132904
964
- ],
965
- [
966
- 3.0,
967
- 0.4017857015132904,
968
- 0.4017857015132904
969
- ],
970
- [
971
- 3.0,
972
- 0.4017857015132904,
973
- 0.4017857015132904
974
- ],
975
- [
976
- 3.0000011920928955,
977
- 0.27173900604248047,
978
- 0.27173900604248047
979
- ],
980
- [
981
- 3.0,
982
- 0.4017857015132904,
983
- 0.4017857015132904
984
- ],
985
- [
986
- 3.0,
987
- 0.4017857015132904,
988
- 0.4017857015132904
989
- ],
990
- [
991
- 3.0,
992
- 0.4017857015132904,
993
- 0.4017857015132904
994
- ],
995
- [
996
- 3.000004768371582,
997
- 0.27173900604248047,
998
- 0.27173900604248047
999
- ],
1000
- [
1001
- 3.0000159740448,
1002
- 0.4017859995365143,
1003
- 0.4017859995365143
1004
- ],
1005
- [
1006
- 2.9999992847442627,
1007
- 0.27173900604248047,
1008
- 0.27173900604248047
1009
- ],
1010
- [
1011
- 2.9999992847442627,
1012
- 0.46875,
1013
- 0.46875
1014
- ],
1015
- [
1016
- 2.9999849796295166,
1017
- 0.46875,
1018
- 0.46875
1019
- ],
1020
- [
1021
- 3.0000016689300537,
1022
- 0.27173900604248047,
1023
- 0.27173900604248047
1024
- ],
1025
- [
1026
- 3.0,
1027
- 0.4017857015132904,
1028
- 0.4017857015132904
1029
- ],
1030
- [
1031
- 3.000000238418579,
1032
- 0.4017857015132904,
1033
- 0.4017857015132904
1034
- ],
1035
- [
1036
- 3.0,
1037
- 0.4017857015132904,
1038
- 0.4017857015132904
1039
- ],
1040
- [
1041
- 3.0,
1042
- 0.4017857015132904,
1043
- 0.4017857015132904
1044
- ],
1045
- [
1046
- 3.0,
1047
- 0.4017857015132904,
1048
- 0.4017857015132904
1049
- ],
1050
- [
1051
- 3.0,
1052
- 0.4017857015132904,
1053
- 0.4017857015132904
1054
- ],
1055
- [
1056
- 2.999997854232788,
1057
- 0.27173900604248047,
1058
- 0.27173900604248047
1059
- ],
1060
- [
1061
- 3.0,
1062
- 0.4017857015132904,
1063
- 0.4017857015132904
1064
- ],
1065
- [
1066
- 3.0,
1067
- 0.4017857015132904,
1068
- 0.4017857015132904
1069
- ],
1070
- [
1071
- 3.0,
1072
- 0.4017857015132904,
1073
- 0.4017857015132904
1074
- ],
1075
- [
1076
- 3.0,
1077
- 0.4017857015132904,
1078
- 0.4017857015132904
1079
- ],
1080
- [
1081
- 3.0,
1082
- 0.4017857015132904,
1083
- 0.4017857015132904
1084
- ],
1085
- [
1086
- 3.0,
1087
- 0.4017857015132904,
1088
- 0.4017857015132904
1089
- ],
1090
- [
1091
- 3.0,
1092
- 0.27173900604248047,
1093
- 0.27173900604248047
1094
- ],
1095
- [
1096
- 3.0,
1097
- 0.4017857015132904,
1098
- 0.4017857015132904
1099
- ],
1100
- [
1101
- 3.0000014305114746,
1102
- 0.46875,
1103
- 0.46875
1104
- ],
1105
- [
1106
- 2.999999761581421,
1107
- 0.4017857015132904,
1108
- 0.4017857015132904
1109
- ],
1110
- [
1111
- 3.0,
1112
- 0.4017857015132904,
1113
- 0.4017857015132904
1114
- ],
1115
- [
1116
- 3.0,
1117
- 0.4017857015132904,
1118
- 0.4017857015132904
1119
- ],
1120
- [
1121
- 3.0,
1122
- 0.4017857015132904,
1123
- 0.4017857015132904
1124
- ],
1125
- [
1126
- 3.0,
1127
- 0.46875,
1128
- 0.46875
1129
- ],
1130
- [
1131
- 3.0,
1132
- 0.4017857015132904,
1133
- 0.4017857015132904
1134
- ],
1135
- [
1136
- 3.00002121925354,
1137
- 0.46875,
1138
- 0.46875
1139
- ],
1140
- [
1141
- 3.0,
1142
- 0.4017857015132904,
1143
- 0.4017857015132904
1144
- ],
1145
- [
1146
- 3.0,
1147
- 0.27173900604248047,
1148
- 0.27173900604248047
1149
- ],
1150
- [
1151
- 3.0,
1152
- 0.4017857015132904,
1153
- 0.4017857015132904
1154
- ],
1155
- [
1156
- 3.0,
1157
- 0.4017857015132904,
1158
- 0.4017857015132904
1159
- ],
1160
- [
1161
- 3.0,
1162
- 0.4017857015132904,
1163
- 0.4017857015132904
1164
- ],
1165
- [
1166
- 3.000000476837158,
1167
- 0.27173900604248047,
1168
- 0.27173900604248047
1169
- ],
1170
- [
1171
- 3.0,
1172
- 0.4017857015132904,
1173
- 0.4017857015132904
1174
- ],
1175
- [
1176
- 3.0,
1177
- 0.4017857015132904,
1178
- 0.4017857015132904
1179
- ],
1180
- [
1181
- 3.000000238418579,
1182
- 0.4017857015132904,
1183
- 0.4017857015132904
1184
- ],
1185
- [
1186
- 3.0,
1187
- 0.4017857015132904,
1188
- 0.4017857015132904
1189
- ],
1190
- [
1191
- 3.0,
1192
- 0.27173900604248047,
1193
- 0.27173900604248047
1194
- ],
1195
- [
1196
- 3.0000033378601074,
1197
- 0.27173900604248047,
1198
- 0.27173900604248047
1199
- ],
1200
- [
1201
- 2.9999983310699463,
1202
- 0.27173900604248047,
1203
- 0.27173900604248047
1204
- ],
1205
- [
1206
- 3.0,
1207
- 0.27173900604248047,
1208
- 0.27173900604248047
1209
- ],
1210
- [
1211
- 2.9999992847442627,
1212
- 0.46875,
1213
- 0.46875
1214
- ],
1215
- [
1216
- 3.0,
1217
- 0.4017857015132904,
1218
- 0.4017857015132904
1219
- ],
1220
- [
1221
- 3.0,
1222
- 0.4017857015132904,
1223
- 0.4017857015132904
1224
- ],
1225
- [
1226
- 2.9999990463256836,
1227
- 0.27173900604248047,
1228
- 0.27173900604248047
1229
- ],
1230
- [
1231
- 3.0,
1232
- 0.4017857015132904,
1233
- 0.4017857015132904
1234
- ],
1235
- [
1236
- 3.0,
1237
- 0.4017857015132904,
1238
- 0.4017857015132904
1239
- ],
1240
- [
1241
- 3.0,
1242
- 0.4017857015132904,
1243
- 0.4017857015132904
1244
- ],
1245
- [
1246
- 3.0,
1247
- 0.4017857015132904,
1248
- 0.4017857015132904
1249
- ],
1250
- [
1251
- 3.000020742416382,
1252
- 0.27173900604248047,
1253
- 0.27173900604248047
1254
- ],
1255
- [
1256
- 2.999999761581421,
1257
- 0.4017857015132904,
1258
- 0.4017857015132904
1259
- ],
1260
- [
1261
- 3.0,
1262
- 0.4017857015132904,
1263
- 0.4017857015132904
1264
- ],
1265
- [
1266
- 2.9999923706054688,
1267
- 0.27173900604248047,
1268
- 0.27173900604248047
1269
- ],
1270
- [
1271
- 3.0,
1272
- 0.4017857015132904,
1273
- 0.4017857015132904
1274
- ],
1275
- [
1276
- 3.0,
1277
- 0.4017857015132904,
1278
- 0.4017857015132904
1279
- ],
1280
- [
1281
- 3.0,
1282
- 0.4017857015132904,
1283
- 0.4017857015132904
1284
- ],
1285
- [
1286
- 3.0,
1287
- 0.4017857015132904,
1288
- 0.4017857015132904
1289
- ],
1290
- [
1291
- 3.0,
1292
- 0.4017857015132904,
1293
- 0.4017857015132904
1294
- ],
1295
- [
1296
- 3.0,
1297
- 0.4017857015132904,
1298
- 0.4017857015132904
1299
- ],
1300
- [
1301
- 3.0,
1302
- 0.4017857015132904,
1303
- 0.4017857015132904
1304
- ],
1305
- [
1306
- 3.0,
1307
- 0.4017857015132904,
1308
- 0.4017857015132904
1309
- ],
1310
- [
1311
- 2.999995470046997,
1312
- 0.27173900604248047,
1313
- 0.27173900604248047
1314
- ],
1315
- [
1316
- 3.0,
1317
- 0.4017857015132904,
1318
- 0.4017857015132904
1319
- ],
1320
- [
1321
- 3.0,
1322
- 0.4017857015132904,
1323
- 0.4017857015132904
1324
- ],
1325
- [
1326
- 3.0,
1327
- 0.4017857015132904,
1328
- 0.4017857015132904
1329
- ],
1330
- [
1331
- 3.0,
1332
- 0.4017857015132904,
1333
- 0.4017857015132904
1334
- ],
1335
- [
1336
- 2.999999761581421,
1337
- 0.4017857015132904,
1338
- 0.4017857015132904
1339
- ],
1340
- [
1341
- 3.0,
1342
- 0.4017857015132904,
1343
- 0.4017857015132904
1344
- ],
1345
- [
1346
- 3.0,
1347
- 0.4017857015132904,
1348
- 0.4017857015132904
1349
- ],
1350
- [
1351
- 3.0,
1352
- 0.4017857015132904,
1353
- 0.4017857015132904
1354
- ],
1355
- [
1356
- 2.999994993209839,
1357
- 0.46875,
1358
- 0.46875
1359
- ],
1360
- [
1361
- 3.0,
1362
- 0.4017857015132904,
1363
- 0.4017857015132904
1364
- ],
1365
- [
1366
- 3.0,
1367
- 0.4017857015132904,
1368
- 0.4017857015132904
1369
- ],
1370
- [
1371
- 3.0,
1372
- 0.27173900604248047,
1373
- 0.27173900604248047
1374
- ],
1375
- [
1376
- 2.9999983310699463,
1377
- 0.46875,
1378
- 0.46875
1379
- ],
1380
- [
1381
- 3.0,
1382
- 0.4017857015132904,
1383
- 0.4017857015132904
1384
- ],
1385
- [
1386
- 3.0,
1387
- 0.4017857015132904,
1388
- 0.4017857015132904
1389
- ],
1390
- [
1391
- 2.999985694885254,
1392
- 0.27173900604248047,
1393
- 0.27173900604248047
1394
- ],
1395
- [
1396
- 3.000000238418579,
1397
- 0.4017857015132904,
1398
- 0.4017857015132904
1399
- ],
1400
- [
1401
- 3.0,
1402
- 0.4017857015132904,
1403
- 0.4017857015132904
1404
- ],
1405
- [
1406
- 3.0,
1407
- 0.4017857015132904,
1408
- 0.4017857015132904
1409
- ],
1410
- [
1411
- 3.0,
1412
- 0.4464285671710968,
1413
- 0.4464285671710968
1414
- ],
1415
- [
1416
- 3.0,
1417
- 0.27173900604248047,
1418
- 0.27173900604248047
1419
- ],
1420
- [
1421
- 3.000000238418579,
1422
- 0.4017857015132904,
1423
- 0.4017857015132904
1424
- ]
1425
- ]
1426
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/.ipynb_checkpoints/progress-checkpoint.png DELETED

Git LFS Details

  • SHA256: acad0880086620effdb8b1b34d02327b5ed2419fd875e25ddf0521c7d45f6fd7
  • Pointer size: 131 Bytes
  • Size of remote file: 699 kB
nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/checkpoint_best.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:00ea40d5a8879983fd37b31da67b30e4af0746331e43f9557f00db4d0d9c1720
3
- size 356831121
 
 
 
 
nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/checkpoint_latest.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:32c4e68fcceeded035c49c3e0530a70cd6c3d96f942d0d1e5a997ea23e6fffdc
3
- size 356854997
 
 
 
 
nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/debug.json DELETED
@@ -1,52 +0,0 @@
1
- {
2
- "_best_ema": "None",
3
- "batch_size": "2",
4
- "configuration_manager": "{'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [28, 256, 256], 'median_image_size_in_voxels': [25.0, 270.0, 270.0], 'spacing': [2.999998998641968, 0.4017857015132904, 0.4017857015132904], 'normalization_schemes': ['ZScoreNormalization', 'ZScoreNormalization', 'ZScoreNormalization'], 'use_mask_for_norm': [False, False, False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [2, 6, 6], 'pool_op_kernel_sizes': [[1, 1, 1], [1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[1, 3, 3], [1, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False}",
5
- "configuration_name": "3d_fullres",
6
- "cudnn_version": 8500,
7
- "current_epoch": "0",
8
- "dataloader_train": "<nnunetv2.training.data_augmentation.custom_transforms.limited_length_multithreaded_augmenter.LimitedLenWrapper object at 0x7fe17c7ca020>",
9
- "dataloader_train.generator": "<nnunetv2.training.dataloading.data_loader_3d.nnUNetDataLoader3D object at 0x7fe17c7c8c40>",
10
- "dataloader_train.num_processes": "4",
11
- "dataloader_train.transform": "Compose ( [Convert3DTo2DTransform( apply_to_keys = ('data', 'seg') ), SpatialTransform( independent_scale_for_each_axis = False, p_rot_per_sample = 0.2, p_scale_per_sample = 0.2, p_el_per_sample = 0, data_key = 'data', label_key = 'seg', patch_size = [256, 256], patch_center_dist_from_border = None, do_elastic_deform = False, alpha = (0, 0), sigma = (0, 0), do_rotation = True, angle_x = (-3.141592653589793, 3.141592653589793), angle_y = (0, 0), angle_z = (0, 0), do_scale = True, scale = (0.7, 1.4), border_mode_data = 'constant', border_cval_data = 0, order_data = 3, border_mode_seg = 'constant', border_cval_seg = -1, order_seg = 1, random_crop = False, p_rot_per_axis = 1, p_independent_scale_per_axis = 1 ), Convert2DTo3DTransform( apply_to_keys = ('data', 'seg') ), GaussianNoiseTransform( p_per_sample = 0.1, data_key = 'data', noise_variance = (0, 0.1), p_per_channel = 1, per_channel = False ), GaussianBlurTransform( p_per_sample = 0.2, different_sigma_per_channel = True, p_per_channel = 0.5, data_key = 'data', blur_sigma = (0.5, 1.0), different_sigma_per_axis = False, p_isotropic = 0 ), BrightnessMultiplicativeTransform( p_per_sample = 0.15, data_key = 'data', multiplier_range = (0.75, 1.25), per_channel = True ), ContrastAugmentationTransform( p_per_sample = 0.15, data_key = 'data', contrast_range = (0.75, 1.25), preserve_range = True, per_channel = True, p_per_channel = 1 ), SimulateLowResolutionTransform( order_upsample = 3, order_downsample = 0, channels = None, per_channel = True, p_per_channel = 0.5, p_per_sample = 0.25, data_key = 'data', zoom_range = (0.5, 1), ignore_axes = (0,) ), GammaTransform( p_per_sample = 0.1, retain_stats = True, per_channel = True, data_key = 'data', gamma_range = (0.7, 1.5), invert_image = True ), GammaTransform( p_per_sample = 0.3, retain_stats = True, per_channel = True, data_key = 'data', gamma_range = (0.7, 1.5), invert_image = False ), MirrorTransform( p_per_sample = 1, data_key = 'data', label_key = 'seg', axes = (0, 1, 2) ), RemoveLabelTransform( output_key = 'seg', input_key = 'seg', replace_with = 0, remove_label = -1 ), RenameTransform( delete_old = True, out_key = 'target', in_key = 'seg' ), DownsampleSegForDSTransform2( axes = None, output_key = 'target', input_key = 'target', order = 0, ds_scales = [[1.0, 1.0, 1.0], [1.0, 0.5, 0.5], [1.0, 0.25, 0.25], [0.5, 0.125, 0.125], [0.25, 0.0625, 0.0625], [0.25, 0.03125, 0.03125]] ), NumpyToTensor( keys = ['data', 'target'], cast_to = 'float' )] )",
12
- "dataloader_val": "<nnunetv2.training.data_augmentation.custom_transforms.limited_length_multithreaded_augmenter.LimitedLenWrapper object at 0x7fe17c7c90f0>",
13
- "dataloader_val.generator": "<nnunetv2.training.dataloading.data_loader_3d.nnUNetDataLoader3D object at 0x7fe17c7c90c0>",
14
- "dataloader_val.num_processes": "2",
15
- "dataloader_val.transform": "Compose ( [RemoveLabelTransform( output_key = 'seg', input_key = 'seg', replace_with = 0, remove_label = -1 ), RenameTransform( delete_old = True, out_key = 'target', in_key = 'seg' ), DownsampleSegForDSTransform2( axes = None, output_key = 'target', input_key = 'target', order = 0, ds_scales = [[1.0, 1.0, 1.0], [1.0, 0.5, 0.5], [1.0, 0.25, 0.25], [0.5, 0.125, 0.125], [0.25, 0.0625, 0.0625], [0.25, 0.03125, 0.03125]] ), NumpyToTensor( keys = ['data', 'target'], cast_to = 'float' )] )",
16
- "dataset_json": "{'name': 'Prostate158', 'description': 'Prostate cancer segmentation dataset', 'channel_names': {'0': 'T2', '1': 'ADC', '2': 'DFI'}, 'labels': {'background': 0, 'prostate_inner': 1, 'prostate_outer': 2, 'tumor': 3}, 'numTraining': 139, 'numTest': 19, 'file_ending': '.nii.gz'}",
17
- "device": "cuda:0",
18
- "disable_checkpointing": "False",
19
- "fold": "0",
20
- "folder_with_segs_from_previous_stage": "None",
21
- "gpu_name": "NVIDIA A10G",
22
- "grad_scaler": "<torch.cuda.amp.grad_scaler.GradScaler object at 0x7fe17d646560>",
23
- "hostname": "s-osbm-jupyter-f0b83-8689bbb555-5t6kn",
24
- "inference_allowed_mirroring_axes": "(0, 1, 2)",
25
- "initial_lr": "0.01",
26
- "is_cascaded": "False",
27
- "is_ddp": "False",
28
- "label_manager": "<nnunetv2.utilities.label_handling.label_handling.LabelManager object at 0x7fe17d6464a0>",
29
- "local_rank": "0",
30
- "log_file": "nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/training_log_2023_7_24_00_01_52.txt",
31
- "logger": "<nnunetv2.training.logging.nnunet_logger.nnUNetLogger object at 0x7fe17d646410>",
32
- "loss": "DeepSupervisionWrapper(\n (loss): DC_and_CE_loss(\n (ce): RobustCrossEntropyLoss()\n (dc): MemoryEfficientSoftDiceLoss()\n )\n)",
33
- "lr_scheduler": "<nnunetv2.training.lr_scheduler.polylr.PolyLRScheduler object at 0x7fe17d646470>",
34
- "my_init_kwargs": "{'plans': {'dataset_name': 'Dataset001_Prostate158', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [3.0, 0.4017857015132904, 0.4017857015132904], 'original_median_shape_after_transp': [25, 270, 270], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'configurations': {'2d': {'data_identifier': 'nnUNetPlans_2d', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 31, 'patch_size': [320, 320], 'median_image_size_in_voxels': [270.0, 270.0], 'spacing': [0.4017857015132904, 0.4017857015132904], 'normalization_schemes': ['ZScoreNormalization', 'ZScoreNormalization', 'ZScoreNormalization'], 'use_mask_for_norm': [False, False, False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [6, 6], 'pool_op_kernel_sizes': [[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]], 'conv_kernel_sizes': [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]], 'unet_max_num_features': 512, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_fullres': {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [28, 256, 256], 'median_image_size_in_voxels': [25.0, 270.0, 270.0], 'spacing': [2.999998998641968, 0.4017857015132904, 0.4017857015132904], 'normalization_schemes': ['ZScoreNormalization', 'ZScoreNormalization', 'ZScoreNormalization'], 'use_mask_for_norm': [False, False, False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [2, 6, 6], 'pool_op_kernel_sizes': [[1, 1, 1], [1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[1, 3, 3], [1, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False}}, 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 1694.0, 'mean': 267.35308837890625, 'median': 242.0, 'min': 0.0, 'percentile_00_5': 36.0, 'percentile_99_5': 768.0, 'std': 136.11251831054688}, '1': {'max': 3557.286865234375, 'mean': 1215.81591796875, 'median': 1203.8331298828125, 'min': 0.0, 'percentile_00_5': 0.0, 'percentile_99_5': 2259.82861328125, 'std': 338.6748352050781}, '2': {'max': 198.95455932617188, 'mean': 72.26309204101562, 'median': 70.3214340209961, 'min': 0.0, 'percentile_00_5': 34.534385681152344, 'percentile_99_5': 132.71939086914062, 'std': 18.909290313720703}}}, 'configuration': '3d_fullres', 'fold': 0, 'dataset_json': {'name': 'Prostate158', 'description': 'Prostate cancer segmentation dataset', 'channel_names': {'0': 'T2', '1': 'ADC', '2': 'DFI'}, 'labels': {'background': 0, 'prostate_inner': 1, 'prostate_outer': 2, 'tumor': 3}, 'numTraining': 139, 'numTest': 19, 'file_ending': '.nii.gz'}, 'unpack_dataset': True, 'device': device(type='cuda')}",
35
- "network": "PlainConvUNet",
36
- "num_epochs": "1000",
37
- "num_input_channels": "3",
38
- "num_iterations_per_epoch": "250",
39
- "num_val_iterations_per_epoch": "50",
40
- "optimizer": "SGD (\nParameter Group 0\n dampening: 0\n differentiable: False\n foreach: None\n initial_lr: 0.01\n lr: 0.01\n maximize: False\n momentum: 0.99\n nesterov: True\n weight_decay: 3e-05\n)",
41
- "output_folder": "nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0",
42
- "output_folder_base": "nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres",
43
- "oversample_foreground_percent": "0.33",
44
- "plans_manager": "{'dataset_name': 'Dataset001_Prostate158', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [3.0, 0.4017857015132904, 0.4017857015132904], 'original_median_shape_after_transp': [25, 270, 270], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'configurations': {'2d': {'data_identifier': 'nnUNetPlans_2d', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 31, 'patch_size': [320, 320], 'median_image_size_in_voxels': [270.0, 270.0], 'spacing': [0.4017857015132904, 0.4017857015132904], 'normalization_schemes': ['ZScoreNormalization', 'ZScoreNormalization', 'ZScoreNormalization'], 'use_mask_for_norm': [False, False, False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [6, 6], 'pool_op_kernel_sizes': [[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]], 'conv_kernel_sizes': [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]], 'unet_max_num_features': 512, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_fullres': {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [28, 256, 256], 'median_image_size_in_voxels': [25.0, 270.0, 270.0], 'spacing': [2.999998998641968, 0.4017857015132904, 0.4017857015132904], 'normalization_schemes': ['ZScoreNormalization', 'ZScoreNormalization', 'ZScoreNormalization'], 'use_mask_for_norm': [False, False, False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [2, 6, 6], 'pool_op_kernel_sizes': [[1, 1, 1], [1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[1, 3, 3], [1, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False}}, 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 1694.0, 'mean': 267.35308837890625, 'median': 242.0, 'min': 0.0, 'percentile_00_5': 36.0, 'percentile_99_5': 768.0, 'std': 136.11251831054688}, '1': {'max': 3557.286865234375, 'mean': 1215.81591796875, 'median': 1203.8331298828125, 'min': 0.0, 'percentile_00_5': 0.0, 'percentile_99_5': 2259.82861328125, 'std': 338.6748352050781}, '2': {'max': 198.95455932617188, 'mean': 72.26309204101562, 'median': 70.3214340209961, 'min': 0.0, 'percentile_00_5': 34.534385681152344, 'percentile_99_5': 132.71939086914062, 'std': 18.909290313720703}}}",
45
- "preprocessed_dataset_folder": "nnUNet_preprocessed/Dataset001_Prostate158/nnUNetPlans_3d_fullres",
46
- "preprocessed_dataset_folder_base": "nnUNet_preprocessed/Dataset001_Prostate158",
47
- "save_every": "50",
48
- "torch_version": "2.0.1+cu117",
49
- "unpack_dataset": "True",
50
- "was_initialized": "True",
51
- "weight_decay": "3e-05"
52
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/progress.png DELETED

Git LFS Details

  • SHA256: 1853459deab216f0dccaee8913ac5be19856556711ddfb2d5734cd720aaf559c
  • Pointer size: 131 Bytes
  • Size of remote file: 701 kB
nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/training_log_2023_7_24_00_01_52.txt DELETED
@@ -1,1194 +0,0 @@
1
-
2
- #######################################################################
3
- Please cite the following paper when using nnU-Net:
4
- Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2021). nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. Nature methods, 18(2), 203-211.
5
- #######################################################################
6
-
7
-
8
- This is the configuration used by this training:
9
- Configuration name: 3d_fullres
10
- {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [28, 256, 256], 'median_image_size_in_voxels': [25.0, 270.0, 270.0], 'spacing': [2.999998998641968, 0.4017857015132904, 0.4017857015132904], 'normalization_schemes': ['ZScoreNormalization', 'ZScoreNormalization', 'ZScoreNormalization'], 'use_mask_for_norm': [False, False, False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [2, 6, 6], 'pool_op_kernel_sizes': [[1, 1, 1], [1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[1, 3, 3], [1, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False}
11
-
12
- These are the global plan.json settings:
13
- {'dataset_name': 'Dataset001_Prostate158', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [3.0, 0.4017857015132904, 0.4017857015132904], 'original_median_shape_after_transp': [25, 270, 270], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 1694.0, 'mean': 267.35308837890625, 'median': 242.0, 'min': 0.0, 'percentile_00_5': 36.0, 'percentile_99_5': 768.0, 'std': 136.11251831054688}, '1': {'max': 3557.286865234375, 'mean': 1215.81591796875, 'median': 1203.8331298828125, 'min': 0.0, 'percentile_00_5': 0.0, 'percentile_99_5': 2259.82861328125, 'std': 338.6748352050781}, '2': {'max': 198.95455932617188, 'mean': 72.26309204101562, 'median': 70.3214340209961, 'min': 0.0, 'percentile_00_5': 34.534385681152344, 'percentile_99_5': 132.71939086914062, 'std': 18.909290313720703}}}
14
-
15
- 2023-07-24 00:01:53.902022: unpacking dataset...
16
- 2023-07-24 00:02:07.139120: unpacking done...
17
- 2023-07-24 00:02:07.196625: do_dummy_2d_data_aug: True
18
- 2023-07-24 00:02:07.197410: Creating new 5-fold cross-validation split...
19
- 2023-07-24 00:02:07.198499: Desired fold for training: 0
20
- 2023-07-24 00:02:07.198552: This split has 111 training and 28 validation cases.
21
- 2023-07-24 00:02:14.074938: Unable to plot network architecture:
22
- 2023-07-24 00:02:14.075182: module 'torch.onnx' has no attribute '_optimize_trace'
23
- 2023-07-24 00:02:14.123964:
24
- 2023-07-24 00:02:14.124048: Epoch 0
25
- 2023-07-24 00:02:14.124163: Current learning rate: 0.01
26
- 2023-07-24 00:06:16.568555: train_loss -0.0413
27
- 2023-07-24 00:06:16.568763: val_loss -0.1561
28
- 2023-07-24 00:06:16.568854: Pseudo dice [0.6633, 0.3715, 0.0]
29
- 2023-07-24 00:06:16.568946: Epoch time: 242.45 s
30
- 2023-07-24 00:06:16.569015: Yayy! New best EMA pseudo Dice: 0.3449
31
- 2023-07-24 00:06:19.487634:
32
- 2023-07-24 00:06:19.487758: Epoch 1
33
- 2023-07-24 00:06:19.487858: Current learning rate: 0.00999
34
- 2023-07-24 00:09:51.638274: train_loss -0.2546
35
- 2023-07-24 00:09:51.638539: val_loss -0.3228
36
- 2023-07-24 00:09:51.638626: Pseudo dice [0.7605, 0.5865, 0.0]
37
- 2023-07-24 00:09:51.638798: Epoch time: 212.15 s
38
- 2023-07-24 00:09:51.638938: Yayy! New best EMA pseudo Dice: 0.3553
39
- 2023-07-24 00:09:54.319529:
40
- 2023-07-24 00:09:54.319663: Epoch 2
41
- 2023-07-24 00:09:54.319769: Current learning rate: 0.00998
42
- 2023-07-24 00:13:25.194643: train_loss -0.324
43
- 2023-07-24 00:13:25.194925: val_loss -0.3233
44
- 2023-07-24 00:13:25.195090: Pseudo dice [0.7874, 0.5763, 0.0]
45
- 2023-07-24 00:13:25.195179: Epoch time: 210.88 s
46
- 2023-07-24 00:13:25.195318: Yayy! New best EMA pseudo Dice: 0.3652
47
- 2023-07-24 00:13:27.399833:
48
- 2023-07-24 00:13:27.399949: Epoch 3
49
- 2023-07-24 00:13:27.400062: Current learning rate: 0.00997
50
- 2023-07-24 00:17:09.770445: train_loss -0.3629
51
- 2023-07-24 00:17:09.770754: val_loss -0.3386
52
- 2023-07-24 00:17:09.771257: Pseudo dice [0.7865, 0.6213, 0.0]
53
- 2023-07-24 00:17:09.771582: Epoch time: 222.37 s
54
- 2023-07-24 00:17:09.771953: Yayy! New best EMA pseudo Dice: 0.3756
55
- 2023-07-24 00:17:13.443594:
56
- 2023-07-24 00:17:13.443887: Epoch 4
57
- 2023-07-24 00:17:13.444007: Current learning rate: 0.00996
58
- 2023-07-24 00:20:25.372916: train_loss -0.4038
59
- 2023-07-24 00:20:25.373092: val_loss -0.4029
60
- 2023-07-24 00:20:25.373185: Pseudo dice [0.821, 0.6488, 0.2694]
61
- 2023-07-24 00:20:25.373266: Epoch time: 191.93 s
62
- 2023-07-24 00:20:25.373332: Yayy! New best EMA pseudo Dice: 0.3961
63
- 2023-07-24 00:20:27.527769:
64
- 2023-07-24 00:20:27.527886: Epoch 5
65
- 2023-07-24 00:20:27.527997: Current learning rate: 0.00995
66
- 2023-07-24 00:23:59.957678: train_loss -0.4395
67
- 2023-07-24 00:23:59.957844: val_loss -0.4287
68
- 2023-07-24 00:23:59.957929: Pseudo dice [0.8103, 0.634, 0.4383]
69
- 2023-07-24 00:23:59.958011: Epoch time: 212.43 s
70
- 2023-07-24 00:23:59.958077: Yayy! New best EMA pseudo Dice: 0.4192
71
- 2023-07-24 00:24:01.987802:
72
- 2023-07-24 00:24:01.987912: Epoch 6
73
- 2023-07-24 00:24:01.988024: Current learning rate: 0.00995
74
- 2023-07-24 00:27:44.002411: train_loss -0.4486
75
- 2023-07-24 00:27:44.002615: val_loss -0.4226
76
- 2023-07-24 00:27:44.002702: Pseudo dice [0.8368, 0.6463, 0.3067]
77
- 2023-07-24 00:27:44.002805: Epoch time: 222.02 s
78
- 2023-07-24 00:27:44.002879: Yayy! New best EMA pseudo Dice: 0.4369
79
- 2023-07-24 00:27:46.502084:
80
- 2023-07-24 00:27:46.502206: Epoch 7
81
- 2023-07-24 00:27:46.502326: Current learning rate: 0.00994
82
- 2023-07-24 00:31:13.044165: train_loss -0.4879
83
- 2023-07-24 00:31:13.044357: val_loss -0.4522
84
- 2023-07-24 00:31:13.044453: Pseudo dice [0.8328, 0.6665, 0.389]
85
- 2023-07-24 00:31:13.044543: Epoch time: 206.54 s
86
- 2023-07-24 00:31:13.050315: Yayy! New best EMA pseudo Dice: 0.4562
87
- 2023-07-24 00:31:15.979445:
88
- 2023-07-24 00:31:15.979643: Epoch 8
89
- 2023-07-24 00:31:15.979780: Current learning rate: 0.00993
90
- 2023-07-24 00:34:55.354722: train_loss -0.5049
91
- 2023-07-24 00:34:55.355004: val_loss -0.4647
92
- 2023-07-24 00:34:55.355092: Pseudo dice [0.8501, 0.6851, 0.4456]
93
- 2023-07-24 00:34:55.355233: Epoch time: 219.38 s
94
- 2023-07-24 00:34:55.355299: Yayy! New best EMA pseudo Dice: 0.4766
95
- 2023-07-24 00:34:57.734633:
96
- 2023-07-24 00:34:57.734877: Epoch 9
97
- 2023-07-24 00:34:57.734988: Current learning rate: 0.00992
98
- 2023-07-24 00:38:36.038373: train_loss -0.5101
99
- 2023-07-24 00:38:36.038563: val_loss -0.4518
100
- 2023-07-24 00:38:36.038657: Pseudo dice [0.8332, 0.6664, 0.4652]
101
- 2023-07-24 00:38:36.038745: Epoch time: 218.3 s
102
- 2023-07-24 00:38:36.038840: Yayy! New best EMA pseudo Dice: 0.4944
103
- 2023-07-24 00:38:39.940725:
104
- 2023-07-24 00:38:39.941038: Epoch 10
105
- 2023-07-24 00:38:39.941158: Current learning rate: 0.00991
106
- 2023-07-24 00:42:04.394748: train_loss -0.5153
107
- 2023-07-24 00:42:04.398857: val_loss -0.4822
108
- 2023-07-24 00:42:04.399046: Pseudo dice [0.8432, 0.6794, 0.4922]
109
- 2023-07-24 00:42:04.399148: Epoch time: 204.46 s
110
- 2023-07-24 00:42:04.399216: Yayy! New best EMA pseudo Dice: 0.5121
111
- 2023-07-24 00:42:08.116886:
112
- 2023-07-24 00:42:08.117016: Epoch 11
113
- 2023-07-24 00:42:08.117131: Current learning rate: 0.0099
114
- 2023-07-24 00:45:37.933030: train_loss -0.5128
115
- 2023-07-24 00:45:37.933209: val_loss -0.4819
116
- 2023-07-24 00:45:37.933295: Pseudo dice [0.8511, 0.6729, 0.4875]
117
- 2023-07-24 00:45:37.933378: Epoch time: 209.82 s
118
- 2023-07-24 00:45:37.933442: Yayy! New best EMA pseudo Dice: 0.528
119
- 2023-07-24 00:45:41.223381:
120
- 2023-07-24 00:45:41.223522: Epoch 12
121
- 2023-07-24 00:45:41.223654: Current learning rate: 0.00989
122
- 2023-07-24 00:49:18.457412: train_loss -0.5139
123
- 2023-07-24 00:49:18.457591: val_loss -0.4663
124
- 2023-07-24 00:49:18.457679: Pseudo dice [0.8304, 0.6664, 0.4751]
125
- 2023-07-24 00:49:18.457762: Epoch time: 217.24 s
126
- 2023-07-24 00:49:18.457826: Yayy! New best EMA pseudo Dice: 0.5409
127
- 2023-07-24 00:49:21.452374:
128
- 2023-07-24 00:49:21.452548: Epoch 13
129
- 2023-07-24 00:49:21.452664: Current learning rate: 0.00988
130
- 2023-07-24 00:52:53.617019: train_loss -0.5306
131
- 2023-07-24 00:52:53.617260: val_loss -0.4947
132
- 2023-07-24 00:52:53.617345: Pseudo dice [0.8434, 0.6944, 0.4883]
133
- 2023-07-24 00:52:53.617487: Epoch time: 212.17 s
134
- 2023-07-24 00:52:53.617552: Yayy! New best EMA pseudo Dice: 0.5544
135
- 2023-07-24 00:52:55.726403:
136
- 2023-07-24 00:52:55.726518: Epoch 14
137
- 2023-07-24 00:52:55.726615: Current learning rate: 0.00987
138
- 2023-07-24 00:56:22.506680: train_loss -0.5446
139
- 2023-07-24 00:56:22.506907: val_loss -0.4714
140
- 2023-07-24 00:56:22.507013: Pseudo dice [0.8411, 0.671, 0.4914]
141
- 2023-07-24 00:56:22.507121: Epoch time: 206.78 s
142
- 2023-07-24 00:56:22.507219: Yayy! New best EMA pseudo Dice: 0.5657
143
- 2023-07-24 00:56:24.667025:
144
- 2023-07-24 00:56:24.667140: Epoch 15
145
- 2023-07-24 00:56:24.667252: Current learning rate: 0.00986
146
- 2023-07-24 00:59:50.747144: train_loss -0.5394
147
- 2023-07-24 00:59:50.747421: val_loss -0.4625
148
- 2023-07-24 00:59:50.747515: Pseudo dice [0.8447, 0.654, 0.5712]
149
- 2023-07-24 00:59:50.747690: Epoch time: 206.08 s
150
- 2023-07-24 00:59:50.747818: Yayy! New best EMA pseudo Dice: 0.5781
151
- 2023-07-24 00:59:53.732963:
152
- 2023-07-24 00:59:53.733104: Epoch 16
153
- 2023-07-24 00:59:53.733224: Current learning rate: 0.00986
154
- 2023-07-24 01:03:24.895722: train_loss -0.5551
155
- 2023-07-24 01:03:24.895922: val_loss -0.496
156
- 2023-07-24 01:03:24.896017: Pseudo dice [0.8705, 0.6946, 0.4975]
157
- 2023-07-24 01:03:24.896102: Epoch time: 211.16 s
158
- 2023-07-24 01:03:24.896200: Yayy! New best EMA pseudo Dice: 0.5891
159
- 2023-07-24 01:03:27.655639:
160
- 2023-07-24 01:03:27.655761: Epoch 17
161
- 2023-07-24 01:03:27.655877: Current learning rate: 0.00985
162
- 2023-07-24 01:07:09.366258: train_loss -0.5496
163
- 2023-07-24 01:07:09.371549: val_loss -0.4745
164
- 2023-07-24 01:07:09.371795: Pseudo dice [0.8679, 0.6897, 0.514]
165
- 2023-07-24 01:07:09.371953: Epoch time: 221.71 s
166
- 2023-07-24 01:07:09.372037: Yayy! New best EMA pseudo Dice: 0.5992
167
- 2023-07-24 01:07:12.715889:
168
- 2023-07-24 01:07:12.716015: Epoch 18
169
- 2023-07-24 01:07:12.716132: Current learning rate: 0.00984
170
- 2023-07-24 01:10:59.095254: train_loss -0.5406
171
- 2023-07-24 01:10:59.095483: val_loss -0.483
172
- 2023-07-24 01:10:59.095583: Pseudo dice [0.8634, 0.6795, 0.5385]
173
- 2023-07-24 01:10:59.095723: Epoch time: 226.38 s
174
- 2023-07-24 01:10:59.095788: Yayy! New best EMA pseudo Dice: 0.6087
175
- 2023-07-24 01:11:01.356395:
176
- 2023-07-24 01:11:01.356525: Epoch 19
177
- 2023-07-24 01:11:01.356640: Current learning rate: 0.00983
178
- 2023-07-24 01:14:19.901330: train_loss -0.5567
179
- 2023-07-24 01:14:19.901525: val_loss -0.4783
180
- 2023-07-24 01:14:19.901613: Pseudo dice [0.8492, 0.6553, 0.535]
181
- 2023-07-24 01:14:19.901693: Epoch time: 198.55 s
182
- 2023-07-24 01:14:19.901747: Yayy! New best EMA pseudo Dice: 0.6158
183
- 2023-07-24 01:14:22.511663:
184
- 2023-07-24 01:14:22.511957: Epoch 20
185
- 2023-07-24 01:14:22.512064: Current learning rate: 0.00982
186
- 2023-07-24 01:17:55.925512: train_loss -0.5745
187
- 2023-07-24 01:17:55.925694: val_loss -0.4715
188
- 2023-07-24 01:17:55.925791: Pseudo dice [0.8548, 0.653, 0.5559]
189
- 2023-07-24 01:17:55.925879: Epoch time: 213.41 s
190
- 2023-07-24 01:17:55.925951: Yayy! New best EMA pseudo Dice: 0.623
191
- 2023-07-24 01:17:58.448946:
192
- 2023-07-24 01:17:58.449071: Epoch 21
193
- 2023-07-24 01:17:58.449180: Current learning rate: 0.00981
194
- 2023-07-24 01:21:29.617508: train_loss -0.5761
195
- 2023-07-24 01:21:29.617684: val_loss -0.4919
196
- 2023-07-24 01:21:29.617774: Pseudo dice [0.8628, 0.7072, 0.4682]
197
- 2023-07-24 01:21:29.617859: Epoch time: 211.17 s
198
- 2023-07-24 01:21:29.617927: Yayy! New best EMA pseudo Dice: 0.6286
199
- 2023-07-24 01:21:32.232873:
200
- 2023-07-24 01:21:32.232998: Epoch 22
201
- 2023-07-24 01:21:32.233115: Current learning rate: 0.0098
202
- 2023-07-24 01:24:54.282193: train_loss -0.5831
203
- 2023-07-24 01:24:54.282391: val_loss -0.5023
204
- 2023-07-24 01:24:54.282501: Pseudo dice [0.8587, 0.6942, 0.4825]
205
- 2023-07-24 01:24:54.282606: Epoch time: 202.05 s
206
- 2023-07-24 01:24:54.282692: Yayy! New best EMA pseudo Dice: 0.6336
207
- 2023-07-24 01:24:57.618637:
208
- 2023-07-24 01:24:57.618829: Epoch 23
209
- 2023-07-24 01:24:57.618946: Current learning rate: 0.00979
210
- 2023-07-24 01:28:25.124920: train_loss -0.5864
211
- 2023-07-24 01:28:25.125104: val_loss -0.5029
212
- 2023-07-24 01:28:25.125192: Pseudo dice [0.8623, 0.7014, 0.5049]
213
- 2023-07-24 01:28:25.125279: Epoch time: 207.51 s
214
- 2023-07-24 01:28:25.125346: Yayy! New best EMA pseudo Dice: 0.6392
215
- 2023-07-24 01:28:29.015366:
216
- 2023-07-24 01:28:29.015487: Epoch 24
217
- 2023-07-24 01:28:29.015594: Current learning rate: 0.00978
218
- 2023-07-24 01:32:03.197192: train_loss -0.5843
219
- 2023-07-24 01:32:03.197452: val_loss -0.478
220
- 2023-07-24 01:32:03.197542: Pseudo dice [0.8586, 0.6702, 0.5603]
221
- 2023-07-24 01:32:03.197691: Epoch time: 214.18 s
222
- 2023-07-24 01:32:03.197757: Yayy! New best EMA pseudo Dice: 0.6449
223
- 2023-07-24 01:32:05.726707:
224
- 2023-07-24 01:32:05.726868: Epoch 25
225
- 2023-07-24 01:32:05.726998: Current learning rate: 0.00977
226
- 2023-07-24 01:35:41.851906: train_loss -0.5882
227
- 2023-07-24 01:35:41.852082: val_loss -0.4719
228
- 2023-07-24 01:35:41.852175: Pseudo dice [0.862, 0.6743, 0.3966]
229
- 2023-07-24 01:35:41.852259: Epoch time: 216.13 s
230
- 2023-07-24 01:35:43.534244:
231
- 2023-07-24 01:35:43.534373: Epoch 26
232
- 2023-07-24 01:35:43.534485: Current learning rate: 0.00977
233
- 2023-07-24 01:39:02.738217: train_loss -0.6018
234
- 2023-07-24 01:39:02.743619: val_loss -0.4994
235
- 2023-07-24 01:39:02.743780: Pseudo dice [0.8766, 0.733, 0.4566]
236
- 2023-07-24 01:39:02.743932: Epoch time: 199.21 s
237
- 2023-07-24 01:39:02.744023: Yayy! New best EMA pseudo Dice: 0.6493
238
- 2023-07-24 01:39:05.142646:
239
- 2023-07-24 01:39:05.142778: Epoch 27
240
- 2023-07-24 01:39:05.142897: Current learning rate: 0.00976
241
- 2023-07-24 01:42:29.737434: train_loss -0.5995
242
- 2023-07-24 01:42:29.737619: val_loss -0.4867
243
- 2023-07-24 01:42:29.737704: Pseudo dice [0.8689, 0.689, 0.4957]
244
- 2023-07-24 01:42:29.737784: Epoch time: 204.6 s
245
- 2023-07-24 01:42:29.737848: Yayy! New best EMA pseudo Dice: 0.6528
246
- 2023-07-24 01:42:32.621387:
247
- 2023-07-24 01:42:32.621503: Epoch 28
248
- 2023-07-24 01:42:32.621617: Current learning rate: 0.00975
249
- 2023-07-24 01:46:07.936492: train_loss -0.5998
250
- 2023-07-24 01:46:07.936731: val_loss -0.4998
251
- 2023-07-24 01:46:07.943375: Pseudo dice [0.8649, 0.6936, 0.5489]
252
- 2023-07-24 01:46:07.943589: Epoch time: 215.32 s
253
- 2023-07-24 01:46:07.943666: Yayy! New best EMA pseudo Dice: 0.6578
254
- 2023-07-24 01:46:12.240499:
255
- 2023-07-24 01:46:12.240789: Epoch 29
256
- 2023-07-24 01:46:12.240907: Current learning rate: 0.00974
257
- 2023-07-24 01:49:49.765490: train_loss -0.6192
258
- 2023-07-24 01:49:49.765681: val_loss -0.4884
259
- 2023-07-24 01:49:49.765781: Pseudo dice [0.8677, 0.6745, 0.5335]
260
- 2023-07-24 01:49:49.765875: Epoch time: 217.53 s
261
- 2023-07-24 01:49:49.765948: Yayy! New best EMA pseudo Dice: 0.6612
262
- 2023-07-24 01:49:54.074142:
263
- 2023-07-24 01:49:54.074317: Epoch 30
264
- 2023-07-24 01:49:54.074434: Current learning rate: 0.00973
265
- 2023-07-24 01:53:33.757728: train_loss -0.6115
266
- 2023-07-24 01:53:33.758031: val_loss -0.4838
267
- 2023-07-24 01:53:33.758127: Pseudo dice [0.8786, 0.6666, 0.5728]
268
- 2023-07-24 01:53:33.758209: Epoch time: 219.69 s
269
- 2023-07-24 01:53:33.758281: Yayy! New best EMA pseudo Dice: 0.6656
270
- 2023-07-24 01:53:37.008034:
271
- 2023-07-24 01:53:37.008158: Epoch 31
272
- 2023-07-24 01:53:37.008272: Current learning rate: 0.00972
273
- 2023-07-24 01:57:03.417002: train_loss -0.625
274
- 2023-07-24 01:57:03.417286: val_loss -0.5044
275
- 2023-07-24 01:57:03.417374: Pseudo dice [0.8686, 0.7071, 0.5431]
276
- 2023-07-24 01:57:03.417525: Epoch time: 206.41 s
277
- 2023-07-24 01:57:03.417588: Yayy! New best EMA pseudo Dice: 0.6697
278
- 2023-07-24 01:57:05.575807:
279
- 2023-07-24 01:57:05.575927: Epoch 32
280
- 2023-07-24 01:57:05.576024: Current learning rate: 0.00971
281
- 2023-07-24 02:00:42.226546: train_loss -0.6184
282
- 2023-07-24 02:00:42.232471: val_loss -0.4894
283
- 2023-07-24 02:00:42.232700: Pseudo dice [0.8702, 0.7033, 0.4937]
284
- 2023-07-24 02:00:42.232790: Epoch time: 216.65 s
285
- 2023-07-24 02:00:42.232924: Yayy! New best EMA pseudo Dice: 0.6716
286
- 2023-07-24 02:00:45.014342:
287
- 2023-07-24 02:00:45.014468: Epoch 33
288
- 2023-07-24 02:00:45.014586: Current learning rate: 0.0097
289
- 2023-07-24 02:04:19.757532: train_loss -0.6217
290
- 2023-07-24 02:04:19.757771: val_loss -0.4804
291
- 2023-07-24 02:04:19.757856: Pseudo dice [0.8748, 0.693, 0.5362]
292
- 2023-07-24 02:04:19.758005: Epoch time: 214.74 s
293
- 2023-07-24 02:04:19.758069: Yayy! New best EMA pseudo Dice: 0.6746
294
- 2023-07-24 02:04:24.430918:
295
- 2023-07-24 02:04:24.431120: Epoch 34
296
- 2023-07-24 02:04:24.431232: Current learning rate: 0.00969
297
- 2023-07-24 02:08:00.604949: train_loss -0.6087
298
- 2023-07-24 02:08:00.605173: val_loss -0.4999
299
- 2023-07-24 02:08:00.605262: Pseudo dice [0.8614, 0.7024, 0.5684]
300
- 2023-07-24 02:08:00.605341: Epoch time: 216.18 s
301
- 2023-07-24 02:08:00.605400: Yayy! New best EMA pseudo Dice: 0.6782
302
- 2023-07-24 02:08:05.228923:
303
- 2023-07-24 02:08:05.229156: Epoch 35
304
- 2023-07-24 02:08:05.229259: Current learning rate: 0.00968
305
- 2023-07-24 02:11:46.128471: train_loss -0.6278
306
- 2023-07-24 02:11:46.128723: val_loss -0.4975
307
- 2023-07-24 02:11:46.128807: Pseudo dice [0.8697, 0.6883, 0.5635]
308
- 2023-07-24 02:11:46.128886: Epoch time: 220.9 s
309
- 2023-07-24 02:11:46.128947: Yayy! New best EMA pseudo Dice: 0.6811
310
- 2023-07-24 02:11:48.352022:
311
- 2023-07-24 02:11:48.352140: Epoch 36
312
- 2023-07-24 02:11:48.352254: Current learning rate: 0.00968
313
- 2023-07-24 02:15:18.130978: train_loss -0.6341
314
- 2023-07-24 02:15:18.131156: val_loss -0.514
315
- 2023-07-24 02:15:18.131239: Pseudo dice [0.8717, 0.7153, 0.5774]
316
- 2023-07-24 02:15:18.131315: Epoch time: 209.78 s
317
- 2023-07-24 02:15:18.131555: Yayy! New best EMA pseudo Dice: 0.6852
318
- 2023-07-24 02:15:20.282215:
319
- 2023-07-24 02:15:20.282343: Epoch 37
320
- 2023-07-24 02:15:20.282458: Current learning rate: 0.00967
321
- 2023-07-24 02:18:53.374968: train_loss -0.6275
322
- 2023-07-24 02:18:53.375212: val_loss -0.5119
323
- 2023-07-24 02:18:53.375304: Pseudo dice [0.867, 0.6981, 0.5894]
324
- 2023-07-24 02:18:53.375387: Epoch time: 213.09 s
325
- 2023-07-24 02:18:53.375454: Yayy! New best EMA pseudo Dice: 0.6885
326
- 2023-07-24 02:18:56.726088:
327
- 2023-07-24 02:18:56.726211: Epoch 38
328
- 2023-07-24 02:18:56.726327: Current learning rate: 0.00966
329
- 2023-07-24 02:22:22.025652: train_loss -0.6328
330
- 2023-07-24 02:22:22.031740: val_loss -0.503
331
- 2023-07-24 02:22:22.032003: Pseudo dice [0.8649, 0.7161, 0.5203]
332
- 2023-07-24 02:22:22.032098: Epoch time: 205.3 s
333
- 2023-07-24 02:22:22.032236: Yayy! New best EMA pseudo Dice: 0.6897
334
- 2023-07-24 02:22:25.923321:
335
- 2023-07-24 02:22:25.923668: Epoch 39
336
- 2023-07-24 02:22:25.923775: Current learning rate: 0.00965
337
- 2023-07-24 02:25:55.050353: train_loss -0.6391
338
- 2023-07-24 02:25:55.050546: val_loss -0.5015
339
- 2023-07-24 02:25:55.050636: Pseudo dice [0.8618, 0.6972, 0.566]
340
- 2023-07-24 02:25:55.050725: Epoch time: 209.13 s
341
- 2023-07-24 02:25:55.050815: Yayy! New best EMA pseudo Dice: 0.6915
342
- 2023-07-24 02:25:57.131437:
343
- 2023-07-24 02:25:57.131554: Epoch 40
344
- 2023-07-24 02:25:57.131677: Current learning rate: 0.00964
345
- 2023-07-24 02:29:24.101611: train_loss -0.6453
346
- 2023-07-24 02:29:24.101807: val_loss -0.4857
347
- 2023-07-24 02:29:24.101905: Pseudo dice [0.8591, 0.6952, 0.5601]
348
- 2023-07-24 02:29:24.102006: Epoch time: 206.97 s
349
- 2023-07-24 02:29:24.102089: Yayy! New best EMA pseudo Dice: 0.6929
350
- 2023-07-24 02:29:26.722635:
351
- 2023-07-24 02:29:26.722777: Epoch 41
352
- 2023-07-24 02:29:26.722900: Current learning rate: 0.00963
353
- 2023-07-24 02:33:01.503387: train_loss -0.6384
354
- 2023-07-24 02:33:01.503580: val_loss -0.495
355
- 2023-07-24 02:33:01.503671: Pseudo dice [0.8646, 0.7111, 0.4851]
356
- 2023-07-24 02:33:01.503757: Epoch time: 214.78 s
357
- 2023-07-24 02:33:03.298025:
358
- 2023-07-24 02:33:03.298147: Epoch 42
359
- 2023-07-24 02:33:03.298252: Current learning rate: 0.00962
360
- 2023-07-24 02:36:28.531214: train_loss -0.6454
361
- 2023-07-24 02:36:28.531463: val_loss -0.4999
362
- 2023-07-24 02:36:28.531549: Pseudo dice [0.8693, 0.691, 0.6198]
363
- 2023-07-24 02:36:28.531719: Epoch time: 205.23 s
364
- 2023-07-24 02:36:28.531906: Yayy! New best EMA pseudo Dice: 0.6957
365
- 2023-07-24 02:36:30.675420:
366
- 2023-07-24 02:36:30.675563: Epoch 43
367
- 2023-07-24 02:36:30.675693: Current learning rate: 0.00961
368
- 2023-07-24 02:39:57.254115: train_loss -0.6449
369
- 2023-07-24 02:39:57.254383: val_loss -0.5029
370
- 2023-07-24 02:39:57.254475: Pseudo dice [0.8713, 0.6894, 0.6393]
371
- 2023-07-24 02:39:57.254557: Epoch time: 206.58 s
372
- 2023-07-24 02:39:57.254622: Yayy! New best EMA pseudo Dice: 0.6995
373
- 2023-07-24 02:39:59.978240:
374
- 2023-07-24 02:39:59.978516: Epoch 44
375
- 2023-07-24 02:39:59.978635: Current learning rate: 0.0096
376
- 2023-07-24 02:43:33.330351: train_loss -0.6418
377
- 2023-07-24 02:43:33.330585: val_loss -0.4937
378
- 2023-07-24 02:43:33.330672: Pseudo dice [0.8727, 0.6956, 0.5291]
379
- 2023-07-24 02:43:33.330842: Epoch time: 213.35 s
380
- 2023-07-24 02:43:35.411677:
381
- 2023-07-24 02:43:35.411899: Epoch 45
382
- 2023-07-24 02:43:35.412007: Current learning rate: 0.00959
383
- 2023-07-24 02:47:16.638136: train_loss -0.6484
384
- 2023-07-24 02:47:16.638322: val_loss -0.5065
385
- 2023-07-24 02:47:16.638409: Pseudo dice [0.8781, 0.7181, 0.5566]
386
- 2023-07-24 02:47:16.638503: Epoch time: 221.23 s
387
- 2023-07-24 02:47:16.638575: Yayy! New best EMA pseudo Dice: 0.7013
388
- 2023-07-24 02:47:18.875849:
389
- 2023-07-24 02:47:18.876126: Epoch 46
390
- 2023-07-24 02:47:18.876246: Current learning rate: 0.00959
391
- 2023-07-24 02:50:43.664102: train_loss -0.6594
392
- 2023-07-24 02:50:43.664368: val_loss -0.5021
393
- 2023-07-24 02:50:43.664471: Pseudo dice [0.8832, 0.7081, 0.5237]
394
- 2023-07-24 02:50:43.664641: Epoch time: 204.79 s
395
- 2023-07-24 02:50:43.664771: Yayy! New best EMA pseudo Dice: 0.7016
396
- 2023-07-24 02:50:45.913680:
397
- 2023-07-24 02:50:45.913988: Epoch 47
398
- 2023-07-24 02:50:45.914099: Current learning rate: 0.00958
399
- 2023-07-24 02:54:09.407397: train_loss -0.6559
400
- 2023-07-24 02:54:09.407646: val_loss -0.4876
401
- 2023-07-24 02:54:09.407736: Pseudo dice [0.8737, 0.6605, 0.6181]
402
- 2023-07-24 02:54:09.407889: Epoch time: 203.49 s
403
- 2023-07-24 02:54:09.407957: Yayy! New best EMA pseudo Dice: 0.7032
404
- 2023-07-24 02:54:12.936934:
405
- 2023-07-24 02:54:12.937068: Epoch 48
406
- 2023-07-24 02:54:12.937182: Current learning rate: 0.00957
407
- 2023-07-24 02:57:47.539666: train_loss -0.6583
408
- 2023-07-24 02:57:47.539865: val_loss -0.5048
409
- 2023-07-24 02:57:47.540035: Pseudo dice [0.8792, 0.7214, 0.4783]
410
- 2023-07-24 02:57:47.540195: Epoch time: 214.6 s
411
- 2023-07-24 02:57:49.232162:
412
- 2023-07-24 02:57:49.232288: Epoch 49
413
- 2023-07-24 02:57:49.232400: Current learning rate: 0.00956
414
- 2023-07-24 03:01:31.963185: train_loss -0.6562
415
- 2023-07-24 03:01:31.963373: val_loss -0.5071
416
- 2023-07-24 03:01:31.963463: Pseudo dice [0.8811, 0.7189, 0.5632]
417
- 2023-07-24 03:01:31.963546: Epoch time: 222.73 s
418
- 2023-07-24 03:01:32.441789: Yayy! New best EMA pseudo Dice: 0.7041
419
- 2023-07-24 03:01:34.494628:
420
- 2023-07-24 03:01:34.494856: Epoch 50
421
- 2023-07-24 03:01:34.494962: Current learning rate: 0.00955
422
- 2023-07-24 03:05:00.456790: train_loss -0.66
423
- 2023-07-24 03:05:00.457008: val_loss -0.4885
424
- 2023-07-24 03:05:00.457094: Pseudo dice [0.8722, 0.7199, 0.3882]
425
- 2023-07-24 03:05:00.457232: Epoch time: 205.96 s
426
- 2023-07-24 03:05:01.920852:
427
- 2023-07-24 03:05:01.921130: Epoch 51
428
- 2023-07-24 03:05:01.921252: Current learning rate: 0.00954
429
- 2023-07-24 03:08:21.863105: train_loss -0.6615
430
- 2023-07-24 03:08:21.863394: val_loss -0.5063
431
- 2023-07-24 03:08:21.863482: Pseudo dice [0.8652, 0.6923, 0.5877]
432
- 2023-07-24 03:08:21.863767: Epoch time: 199.94 s
433
- 2023-07-24 03:08:23.565506:
434
- 2023-07-24 03:08:23.565624: Epoch 52
435
- 2023-07-24 03:08:23.565736: Current learning rate: 0.00953
436
- 2023-07-24 03:11:58.618505: train_loss -0.6541
437
- 2023-07-24 03:11:58.618745: val_loss -0.5208
438
- 2023-07-24 03:11:58.618862: Pseudo dice [0.8769, 0.7111, 0.5391]
439
- 2023-07-24 03:11:58.619015: Epoch time: 215.05 s
440
- 2023-07-24 03:12:00.422971:
441
- 2023-07-24 03:12:00.423093: Epoch 53
442
- 2023-07-24 03:12:00.423211: Current learning rate: 0.00952
443
- 2023-07-24 03:15:34.016732: train_loss -0.6717
444
- 2023-07-24 03:15:34.016971: val_loss -0.4904
445
- 2023-07-24 03:15:34.017070: Pseudo dice [0.8808, 0.7097, 0.4256]
446
- 2023-07-24 03:15:34.017220: Epoch time: 213.59 s
447
- 2023-07-24 03:15:36.404579:
448
- 2023-07-24 03:15:36.404704: Epoch 54
449
- 2023-07-24 03:15:36.404819: Current learning rate: 0.00951
450
- 2023-07-24 03:19:13.253185: train_loss -0.677
451
- 2023-07-24 03:19:13.253387: val_loss -0.4902
452
- 2023-07-24 03:19:13.253475: Pseudo dice [0.8606, 0.7035, 0.5965]
453
- 2023-07-24 03:19:13.253560: Epoch time: 216.85 s
454
- 2023-07-24 03:19:14.880905:
455
- 2023-07-24 03:19:14.881205: Epoch 55
456
- 2023-07-24 03:19:14.881327: Current learning rate: 0.0095
457
- 2023-07-24 03:22:41.606614: train_loss -0.6675
458
- 2023-07-24 03:22:41.606848: val_loss -0.4941
459
- 2023-07-24 03:22:41.606937: Pseudo dice [0.873, 0.7194, 0.3798]
460
- 2023-07-24 03:22:41.607017: Epoch time: 206.73 s
461
- 2023-07-24 03:22:45.052177:
462
- 2023-07-24 03:22:45.052385: Epoch 56
463
- 2023-07-24 03:22:45.052505: Current learning rate: 0.00949
464
- 2023-07-24 03:26:17.637499: train_loss -0.6827
465
- 2023-07-24 03:26:17.637738: val_loss -0.5062
466
- 2023-07-24 03:26:17.637826: Pseudo dice [0.8737, 0.71, 0.6042]
467
- 2023-07-24 03:26:17.637978: Epoch time: 212.59 s
468
- 2023-07-24 03:26:21.340287:
469
- 2023-07-24 03:26:21.340517: Epoch 57
470
- 2023-07-24 03:26:21.340631: Current learning rate: 0.00949
471
- 2023-07-24 03:29:47.100365: train_loss -0.6838
472
- 2023-07-24 03:29:47.100567: val_loss -0.4964
473
- 2023-07-24 03:29:47.100666: Pseudo dice [0.8891, 0.7083, 0.4048]
474
- 2023-07-24 03:29:47.100763: Epoch time: 205.76 s
475
- 2023-07-24 03:29:48.555212:
476
- 2023-07-24 03:29:48.555346: Epoch 58
477
- 2023-07-24 03:29:48.555466: Current learning rate: 0.00948
478
- 2023-07-24 03:33:20.722145: train_loss -0.6697
479
- 2023-07-24 03:33:20.722324: val_loss -0.5062
480
- 2023-07-24 03:33:20.722407: Pseudo dice [0.8666, 0.6973, 0.5627]
481
- 2023-07-24 03:33:20.722488: Epoch time: 212.17 s
482
- 2023-07-24 03:33:23.647374:
483
- 2023-07-24 03:33:23.647504: Epoch 59
484
- 2023-07-24 03:33:23.647635: Current learning rate: 0.00947
485
- 2023-07-24 03:36:54.070232: train_loss -0.6887
486
- 2023-07-24 03:36:54.070504: val_loss -0.5134
487
- 2023-07-24 03:36:54.070593: Pseudo dice [0.8732, 0.7361, 0.4845]
488
- 2023-07-24 03:36:54.070743: Epoch time: 210.42 s
489
- 2023-07-24 03:36:56.444733:
490
- 2023-07-24 03:36:56.444867: Epoch 60
491
- 2023-07-24 03:36:56.444985: Current learning rate: 0.00946
492
- 2023-07-24 03:40:22.488861: train_loss -0.6985
493
- 2023-07-24 03:40:22.498857: val_loss -0.4992
494
- 2023-07-24 03:40:22.499064: Pseudo dice [0.8846, 0.7076, 0.5453]
495
- 2023-07-24 03:40:22.499147: Epoch time: 206.05 s
496
- 2023-07-24 03:40:25.415841:
497
- 2023-07-24 03:40:25.415988: Epoch 61
498
- 2023-07-24 03:40:25.416104: Current learning rate: 0.00945
499
- 2023-07-24 03:43:58.801075: train_loss -0.6876
500
- 2023-07-24 03:43:58.801260: val_loss -0.5122
501
- 2023-07-24 03:43:58.801349: Pseudo dice [0.8586, 0.716, 0.5498]
502
- 2023-07-24 03:43:58.801434: Epoch time: 213.39 s
503
- 2023-07-24 03:44:01.958030:
504
- 2023-07-24 03:44:01.970955: Epoch 62
505
- 2023-07-24 03:44:01.971086: Current learning rate: 0.00944
506
- 2023-07-24 03:47:44.738477: train_loss -0.6943
507
- 2023-07-24 03:47:44.738670: val_loss -0.5106
508
- 2023-07-24 03:47:44.738753: Pseudo dice [0.882, 0.7326, 0.544]
509
- 2023-07-24 03:47:44.738875: Epoch time: 222.78 s
510
- 2023-07-24 03:47:48.039309:
511
- 2023-07-24 03:47:48.039480: Epoch 63
512
- 2023-07-24 03:47:48.039597: Current learning rate: 0.00943
513
- 2023-07-24 03:51:16.951300: train_loss -0.6731
514
- 2023-07-24 03:51:16.951505: val_loss -0.4852
515
- 2023-07-24 03:51:16.951604: Pseudo dice [0.8708, 0.7122, 0.4841]
516
- 2023-07-24 03:51:16.951797: Epoch time: 208.91 s
517
- 2023-07-24 03:51:18.621170:
518
- 2023-07-24 03:51:18.621321: Epoch 64
519
- 2023-07-24 03:51:18.621441: Current learning rate: 0.00942
520
- 2023-07-24 03:54:44.957338: train_loss -0.6761
521
- 2023-07-24 03:54:44.957540: val_loss -0.4724
522
- 2023-07-24 03:54:44.957629: Pseudo dice [0.8598, 0.6921, 0.5167]
523
- 2023-07-24 03:54:44.957714: Epoch time: 206.34 s
524
- 2023-07-24 03:54:46.812274:
525
- 2023-07-24 03:54:46.812412: Epoch 65
526
- 2023-07-24 03:54:46.812517: Current learning rate: 0.00941
527
- 2023-07-24 03:58:23.950075: train_loss -0.6927
528
- 2023-07-24 03:58:23.950717: val_loss -0.5027
529
- 2023-07-24 03:58:23.951065: Pseudo dice [0.8701, 0.7152, 0.4785]
530
- 2023-07-24 03:58:23.951216: Epoch time: 217.14 s
531
- 2023-07-24 03:58:25.802012:
532
- 2023-07-24 03:58:25.802195: Epoch 66
533
- 2023-07-24 03:58:25.802299: Current learning rate: 0.0094
534
- 2023-07-24 04:01:56.722209: train_loss -0.6963
535
- 2023-07-24 04:01:56.722415: val_loss -0.5062
536
- 2023-07-24 04:01:56.722507: Pseudo dice [0.8726, 0.7042, 0.5969]
537
- 2023-07-24 04:01:56.722586: Epoch time: 210.92 s
538
- 2023-07-24 04:01:59.771096:
539
- 2023-07-24 04:01:59.771243: Epoch 67
540
- 2023-07-24 04:01:59.771358: Current learning rate: 0.00939
541
- 2023-07-24 04:05:40.925208: train_loss -0.6952
542
- 2023-07-24 04:05:40.925490: val_loss -0.5088
543
- 2023-07-24 04:05:40.925585: Pseudo dice [0.8802, 0.729, 0.5521]
544
- 2023-07-24 04:05:40.925745: Epoch time: 221.16 s
545
- 2023-07-24 04:05:43.897002:
546
- 2023-07-24 04:05:43.898616: Epoch 68
547
- 2023-07-24 04:05:43.898738: Current learning rate: 0.00939
548
- 2023-07-24 04:09:15.657769: train_loss -0.6889
549
- 2023-07-24 04:09:15.658007: val_loss -0.516
550
- 2023-07-24 04:09:15.658100: Pseudo dice [0.8782, 0.7278, 0.5643]
551
- 2023-07-24 04:09:15.658307: Epoch time: 211.76 s
552
- 2023-07-24 04:09:15.658453: Yayy! New best EMA pseudo Dice: 0.7051
553
- 2023-07-24 04:09:18.116199:
554
- 2023-07-24 04:09:18.116321: Epoch 69
555
- 2023-07-24 04:09:18.116441: Current learning rate: 0.00938
556
- 2023-07-24 04:12:42.753379: train_loss -0.6904
557
- 2023-07-24 04:12:42.753681: val_loss -0.4671
558
- 2023-07-24 04:12:42.753862: Pseudo dice [0.8749, 0.7138, 0.3681]
559
- 2023-07-24 04:12:42.753966: Epoch time: 204.64 s
560
- 2023-07-24 04:12:44.327600:
561
- 2023-07-24 04:12:44.327721: Epoch 70
562
- 2023-07-24 04:12:44.327840: Current learning rate: 0.00937
563
- 2023-07-24 04:16:22.761208: train_loss -0.6958
564
- 2023-07-24 04:16:22.761458: val_loss -0.5012
565
- 2023-07-24 04:16:22.761544: Pseudo dice [0.8879, 0.7126, 0.5304]
566
- 2023-07-24 04:16:22.761694: Epoch time: 218.43 s
567
- 2023-07-24 04:16:24.628846:
568
- 2023-07-24 04:16:24.629004: Epoch 71
569
- 2023-07-24 04:16:24.629131: Current learning rate: 0.00936
570
- 2023-07-24 04:19:50.557256: train_loss -0.6919
571
- 2023-07-24 04:19:50.557556: val_loss -0.4675
572
- 2023-07-24 04:19:50.557651: Pseudo dice [0.8698, 0.6855, 0.5056]
573
- 2023-07-24 04:19:50.557837: Epoch time: 205.93 s
574
- 2023-07-24 04:19:53.142438:
575
- 2023-07-24 04:19:53.142620: Epoch 72
576
- 2023-07-24 04:19:53.142735: Current learning rate: 0.00935
577
- 2023-07-24 04:23:22.464141: train_loss -0.6977
578
- 2023-07-24 04:23:22.464365: val_loss -0.5149
579
- 2023-07-24 04:23:22.464458: Pseudo dice [0.8859, 0.7116, 0.5175]
580
- 2023-07-24 04:23:22.464544: Epoch time: 209.32 s
581
- 2023-07-24 04:23:24.605744:
582
- 2023-07-24 04:23:24.605880: Epoch 73
583
- 2023-07-24 04:23:24.605996: Current learning rate: 0.00934
584
- 2023-07-24 04:26:50.364227: train_loss -0.6983
585
- 2023-07-24 04:26:50.364492: val_loss -0.4938
586
- 2023-07-24 04:26:50.364594: Pseudo dice [0.8637, 0.7213, 0.5451]
587
- 2023-07-24 04:26:50.364779: Epoch time: 205.76 s
588
- 2023-07-24 04:26:52.800122:
589
- 2023-07-24 04:26:52.800305: Epoch 74
590
- 2023-07-24 04:26:52.800421: Current learning rate: 0.00933
591
- 2023-07-24 04:30:26.828684: train_loss -0.6956
592
- 2023-07-24 04:30:26.828918: val_loss -0.4781
593
- 2023-07-24 04:30:26.829004: Pseudo dice [0.8634, 0.6982, 0.4993]
594
- 2023-07-24 04:30:26.829283: Epoch time: 214.03 s
595
- 2023-07-24 04:30:28.715835:
596
- 2023-07-24 04:30:28.716055: Epoch 75
597
- 2023-07-24 04:30:28.716175: Current learning rate: 0.00932
598
- 2023-07-24 04:33:56.996001: train_loss -0.6978
599
- 2023-07-24 04:33:56.996192: val_loss -0.5014
600
- 2023-07-24 04:33:56.996281: Pseudo dice [0.8721, 0.6953, 0.5428]
601
- 2023-07-24 04:33:56.996363: Epoch time: 208.28 s
602
- 2023-07-24 04:34:01.112788:
603
- 2023-07-24 04:34:01.112929: Epoch 76
604
- 2023-07-24 04:34:01.113038: Current learning rate: 0.00931
605
- 2023-07-24 04:37:36.118190: train_loss -0.6992
606
- 2023-07-24 04:37:36.118396: val_loss -0.5014
607
- 2023-07-24 04:37:36.118491: Pseudo dice [0.8768, 0.7055, 0.5652]
608
- 2023-07-24 04:37:36.118583: Epoch time: 215.01 s
609
- 2023-07-24 04:37:39.211814:
610
- 2023-07-24 04:37:39.211969: Epoch 77
611
- 2023-07-24 04:37:39.212115: Current learning rate: 0.0093
612
- 2023-07-24 04:41:10.694273: train_loss -0.7058
613
- 2023-07-24 04:41:10.694464: val_loss -0.4895
614
- 2023-07-24 04:41:10.694552: Pseudo dice [0.8602, 0.7049, 0.5613]
615
- 2023-07-24 04:41:10.694637: Epoch time: 211.48 s
616
- 2023-07-24 04:41:13.013998:
617
- 2023-07-24 04:41:13.014146: Epoch 78
618
- 2023-07-24 04:41:13.014262: Current learning rate: 0.0093
619
- 2023-07-24 04:44:39.537027: train_loss -0.7016
620
- 2023-07-24 04:44:39.537224: val_loss -0.4884
621
- 2023-07-24 04:44:39.537313: Pseudo dice [0.8797, 0.7206, 0.4807]
622
- 2023-07-24 04:44:39.537394: Epoch time: 206.52 s
623
- 2023-07-24 04:44:41.752397:
624
- 2023-07-24 04:44:41.752547: Epoch 79
625
- 2023-07-24 04:44:41.752668: Current learning rate: 0.00929
626
- 2023-07-24 04:48:09.494605: train_loss -0.6964
627
- 2023-07-24 04:48:09.494830: val_loss -0.5074
628
- 2023-07-24 04:48:09.494932: Pseudo dice [0.8874, 0.7203, 0.6069]
629
- 2023-07-24 04:48:09.495023: Epoch time: 207.74 s
630
- 2023-07-24 04:48:11.746876:
631
- 2023-07-24 04:48:11.747036: Epoch 80
632
- 2023-07-24 04:48:11.747147: Current learning rate: 0.00928
633
- 2023-07-24 04:51:42.348668: train_loss -0.7065
634
- 2023-07-24 04:51:42.348896: val_loss -0.4837
635
- 2023-07-24 04:51:42.348983: Pseudo dice [0.8784, 0.7032, 0.5621]
636
- 2023-07-24 04:51:42.349133: Epoch time: 210.6 s
637
- 2023-07-24 04:51:42.349199: Yayy! New best EMA pseudo Dice: 0.7061
638
- 2023-07-24 04:51:44.484021:
639
- 2023-07-24 04:51:44.484140: Epoch 81
640
- 2023-07-24 04:51:44.484257: Current learning rate: 0.00927
641
- 2023-07-24 04:55:09.696577: train_loss -0.714
642
- 2023-07-24 04:55:09.696886: val_loss -0.491
643
- 2023-07-24 04:55:09.696984: Pseudo dice [0.8814, 0.7251, 0.4554]
644
- 2023-07-24 04:55:09.697072: Epoch time: 205.21 s
645
- 2023-07-24 04:55:11.259889:
646
- 2023-07-24 04:55:11.260105: Epoch 82
647
- 2023-07-24 04:55:11.260218: Current learning rate: 0.00926
648
- 2023-07-24 04:58:42.167684: train_loss -0.7151
649
- 2023-07-24 04:58:42.167853: val_loss -0.5149
650
- 2023-07-24 04:58:42.167938: Pseudo dice [0.8747, 0.6999, 0.632]
651
- 2023-07-24 04:58:42.168024: Epoch time: 210.91 s
652
- 2023-07-24 04:58:42.168089: Yayy! New best EMA pseudo Dice: 0.7073
653
- 2023-07-24 04:58:46.756355:
654
- 2023-07-24 04:58:46.756490: Epoch 83
655
- 2023-07-24 04:58:46.756597: Current learning rate: 0.00925
656
- 2023-07-24 05:02:22.008022: train_loss -0.7232
657
- 2023-07-24 05:02:22.008212: val_loss -0.5229
658
- 2023-07-24 05:02:22.008301: Pseudo dice [0.8914, 0.7236, 0.6229]
659
- 2023-07-24 05:02:22.008386: Epoch time: 215.25 s
660
- 2023-07-24 05:02:22.008452: Yayy! New best EMA pseudo Dice: 0.7112
661
- 2023-07-24 05:02:24.066390:
662
- 2023-07-24 05:02:24.066524: Epoch 84
663
- 2023-07-24 05:02:24.066619: Current learning rate: 0.00924
664
- 2023-07-24 05:05:50.327326: train_loss -0.725
665
- 2023-07-24 05:05:50.327611: val_loss -0.5159
666
- 2023-07-24 05:05:50.327698: Pseudo dice [0.8756, 0.7106, 0.5817]
667
- 2023-07-24 05:05:50.327851: Epoch time: 206.26 s
668
- 2023-07-24 05:05:50.327918: Yayy! New best EMA pseudo Dice: 0.7123
669
- 2023-07-24 05:05:54.485906:
670
- 2023-07-24 05:05:54.486033: Epoch 85
671
- 2023-07-24 05:05:54.486146: Current learning rate: 0.00923
672
- 2023-07-24 05:09:37.322994: train_loss -0.723
673
- 2023-07-24 05:09:37.323187: val_loss -0.5029
674
- 2023-07-24 05:09:37.323277: Pseudo dice [0.8783, 0.7171, 0.4587]
675
- 2023-07-24 05:09:37.323362: Epoch time: 222.84 s
676
- 2023-07-24 05:09:38.723594:
677
- 2023-07-24 05:09:38.723815: Epoch 86
678
- 2023-07-24 05:09:38.723936: Current learning rate: 0.00922
679
- 2023-07-24 05:13:09.316705: train_loss -0.7182
680
- 2023-07-24 05:13:09.316886: val_loss -0.4834
681
- 2023-07-24 05:13:09.316982: Pseudo dice [0.8828, 0.7206, 0.4715]
682
- 2023-07-24 05:13:09.317072: Epoch time: 210.59 s
683
- 2023-07-24 05:13:12.422567:
684
- 2023-07-24 05:13:12.422688: Epoch 87
685
- 2023-07-24 05:13:12.422828: Current learning rate: 0.00921
686
- 2023-07-24 05:16:46.310607: train_loss -0.7222
687
- 2023-07-24 05:16:46.310821: val_loss -0.502
688
- 2023-07-24 05:16:46.310914: Pseudo dice [0.8901, 0.6998, 0.6513]
689
- 2023-07-24 05:16:46.310997: Epoch time: 213.89 s
690
- 2023-07-24 05:16:48.065801:
691
- 2023-07-24 05:16:48.065928: Epoch 88
692
- 2023-07-24 05:16:48.066038: Current learning rate: 0.0092
693
- 2023-07-24 05:20:26.412961: train_loss -0.7227
694
- 2023-07-24 05:20:26.413243: val_loss -0.5113
695
- 2023-07-24 05:20:26.413336: Pseudo dice [0.885, 0.7263, 0.6205]
696
- 2023-07-24 05:20:26.413419: Epoch time: 218.35 s
697
- 2023-07-24 05:20:26.413479: Yayy! New best EMA pseudo Dice: 0.7149
698
- 2023-07-24 05:20:28.466174:
699
- 2023-07-24 05:20:28.466462: Epoch 89
700
- 2023-07-24 05:20:28.466586: Current learning rate: 0.0092
701
- 2023-07-24 05:23:53.870799: train_loss -0.7281
702
- 2023-07-24 05:23:53.871042: val_loss -0.5177
703
- 2023-07-24 05:23:53.871127: Pseudo dice [0.8774, 0.7281, 0.5178]
704
- 2023-07-24 05:23:53.871280: Epoch time: 205.41 s
705
- 2023-07-24 05:23:56.666574:
706
- 2023-07-24 05:23:56.666709: Epoch 90
707
- 2023-07-24 05:23:56.666846: Current learning rate: 0.00919
708
- 2023-07-24 05:27:24.511108: train_loss -0.7268
709
- 2023-07-24 05:27:24.511282: val_loss -0.4883
710
- 2023-07-24 05:27:24.511378: Pseudo dice [0.8808, 0.7023, 0.5429]
711
- 2023-07-24 05:27:24.511463: Epoch time: 207.85 s
712
- 2023-07-24 05:27:26.253584:
713
- 2023-07-24 05:27:26.253713: Epoch 91
714
- 2023-07-24 05:27:26.253826: Current learning rate: 0.00918
715
- 2023-07-24 05:31:00.680078: train_loss -0.7187
716
- 2023-07-24 05:31:00.680262: val_loss -0.5126
717
- 2023-07-24 05:31:00.680357: Pseudo dice [0.8854, 0.734, 0.5313]
718
- 2023-07-24 05:31:00.680444: Epoch time: 214.43 s
719
- 2023-07-24 05:31:03.875111:
720
- 2023-07-24 05:31:03.875342: Epoch 92
721
- 2023-07-24 05:31:03.875453: Current learning rate: 0.00917
722
- 2023-07-24 05:34:36.700777: train_loss -0.7261
723
- 2023-07-24 05:34:36.701067: val_loss -0.4824
724
- 2023-07-24 05:34:36.701155: Pseudo dice [0.8677, 0.7068, 0.5625]
725
- 2023-07-24 05:34:36.701310: Epoch time: 212.83 s
726
- 2023-07-24 05:34:38.725615:
727
- 2023-07-24 05:34:38.725760: Epoch 93
728
- 2023-07-24 05:34:38.725878: Current learning rate: 0.00916
729
- 2023-07-24 05:38:14.723124: train_loss -0.7303
730
- 2023-07-24 05:38:14.723329: val_loss -0.4918
731
- 2023-07-24 05:38:14.723435: Pseudo dice [0.8725, 0.7201, 0.5379]
732
- 2023-07-24 05:38:14.723536: Epoch time: 216.0 s
733
- 2023-07-24 05:38:18.938028:
734
- 2023-07-24 05:38:18.938344: Epoch 94
735
- 2023-07-24 05:38:18.938459: Current learning rate: 0.00915
736
- 2023-07-24 05:41:49.722474: train_loss -0.7321
737
- 2023-07-24 05:41:49.722725: val_loss -0.5192
738
- 2023-07-24 05:41:49.722838: Pseudo dice [0.8869, 0.73, 0.5719]
739
- 2023-07-24 05:41:49.722987: Epoch time: 210.79 s
740
- 2023-07-24 05:41:49.723047: Yayy! New best EMA pseudo Dice: 0.7151
741
- 2023-07-24 05:41:52.238441:
742
- 2023-07-24 05:41:52.238770: Epoch 95
743
- 2023-07-24 05:41:52.238896: Current learning rate: 0.00914
744
- 2023-07-24 05:45:28.216917: train_loss -0.7294
745
- 2023-07-24 05:45:28.217144: val_loss -0.4859
746
- 2023-07-24 05:45:28.217231: Pseudo dice [0.8868, 0.7071, 0.502]
747
- 2023-07-24 05:45:28.217372: Epoch time: 215.98 s
748
- 2023-07-24 05:45:30.203134:
749
- 2023-07-24 05:45:30.203313: Epoch 96
750
- 2023-07-24 05:45:30.203428: Current learning rate: 0.00913
751
- 2023-07-24 05:49:07.118325: train_loss -0.7275
752
- 2023-07-24 05:49:07.130988: val_loss -0.4979
753
- 2023-07-24 05:49:07.131268: Pseudo dice [0.8742, 0.7169, 0.5796]
754
- 2023-07-24 05:49:07.131369: Epoch time: 216.92 s
755
- 2023-07-24 05:49:10.592634:
756
- 2023-07-24 05:49:10.592791: Epoch 97
757
- 2023-07-24 05:49:10.592889: Current learning rate: 0.00912
758
- 2023-07-24 05:52:37.449055: train_loss -0.7377
759
- 2023-07-24 05:52:37.449250: val_loss -0.4932
760
- 2023-07-24 05:52:37.449338: Pseudo dice [0.878, 0.7154, 0.5691]
761
- 2023-07-24 05:52:37.449421: Epoch time: 206.86 s
762
- 2023-07-24 05:52:37.449489: Yayy! New best EMA pseudo Dice: 0.7151
763
- 2023-07-24 05:52:39.613982:
764
- 2023-07-24 05:52:39.614139: Epoch 98
765
- 2023-07-24 05:52:39.614302: Current learning rate: 0.00911
766
- 2023-07-24 05:56:12.741833: train_loss -0.725
767
- 2023-07-24 05:56:12.742075: val_loss -0.5039
768
- 2023-07-24 05:56:12.742163: Pseudo dice [0.8862, 0.711, 0.6208]
769
- 2023-07-24 05:56:12.742243: Epoch time: 213.13 s
770
- 2023-07-24 05:56:12.742309: Yayy! New best EMA pseudo Dice: 0.7175
771
- 2023-07-24 05:56:16.737276:
772
- 2023-07-24 05:56:16.737576: Epoch 99
773
- 2023-07-24 05:56:16.737698: Current learning rate: 0.0091
774
- 2023-07-24 05:59:45.462096: train_loss -0.7306
775
- 2023-07-24 05:59:45.462285: val_loss -0.5266
776
- 2023-07-24 05:59:45.462388: Pseudo dice [0.8861, 0.7386, 0.513]
777
- 2023-07-24 05:59:45.462479: Epoch time: 208.73 s
778
- 2023-07-24 05:59:50.668950:
779
- 2023-07-24 05:59:50.669163: Epoch 100
780
- 2023-07-24 05:59:50.669280: Current learning rate: 0.0091
781
- 2023-07-24 06:03:33.285699: train_loss -0.7382
782
- 2023-07-24 06:03:33.285881: val_loss -0.5096
783
- 2023-07-24 06:03:33.285975: Pseudo dice [0.8703, 0.712, 0.5639]
784
- 2023-07-24 06:03:33.286058: Epoch time: 222.62 s
785
- 2023-07-24 06:03:35.851426:
786
- 2023-07-24 06:03:35.851692: Epoch 101
787
- 2023-07-24 06:03:35.851813: Current learning rate: 0.00909
788
- 2023-07-24 06:07:07.771703: train_loss -0.7398
789
- 2023-07-24 06:07:07.771932: val_loss -0.5006
790
- 2023-07-24 06:07:07.772034: Pseudo dice [0.8863, 0.723, 0.5644]
791
- 2023-07-24 06:07:07.772126: Epoch time: 211.92 s
792
- 2023-07-24 06:07:07.772207: Yayy! New best EMA pseudo Dice: 0.7176
793
- 2023-07-24 06:07:09.785565:
794
- 2023-07-24 06:07:09.785925: Epoch 102
795
- 2023-07-24 06:07:09.786036: Current learning rate: 0.00908
796
- 2023-07-24 06:10:43.597014: train_loss -0.7306
797
- 2023-07-24 06:10:43.597273: val_loss -0.5015
798
- 2023-07-24 06:10:43.599555: Pseudo dice [0.8817, 0.7251, 0.5061]
799
- 2023-07-24 06:10:43.599782: Epoch time: 213.81 s
800
- 2023-07-24 06:10:45.487350:
801
- 2023-07-24 06:10:45.487549: Epoch 103
802
- 2023-07-24 06:10:45.487668: Current learning rate: 0.00907
803
- 2023-07-24 06:14:14.858136: train_loss -0.7477
804
- 2023-07-24 06:14:14.858318: val_loss -0.4896
805
- 2023-07-24 06:14:14.858405: Pseudo dice [0.8898, 0.7226, 0.5152]
806
- 2023-07-24 06:14:14.858487: Epoch time: 209.37 s
807
- 2023-07-24 06:14:18.636439:
808
- 2023-07-24 06:14:18.636607: Epoch 104
809
- 2023-07-24 06:14:18.636723: Current learning rate: 0.00906
810
- 2023-07-24 06:17:56.407210: train_loss -0.7288
811
- 2023-07-24 06:17:56.407393: val_loss -0.4908
812
- 2023-07-24 06:17:56.407491: Pseudo dice [0.8777, 0.7242, 0.4401]
813
- 2023-07-24 06:17:56.407592: Epoch time: 217.77 s
814
- 2023-07-24 06:17:59.395841:
815
- 2023-07-24 06:17:59.406929: Epoch 105
816
- 2023-07-24 06:17:59.407068: Current learning rate: 0.00905
817
- 2023-07-24 06:21:34.379379: train_loss -0.7297
818
- 2023-07-24 06:21:34.379545: val_loss -0.5001
819
- 2023-07-24 06:21:34.379629: Pseudo dice [0.8812, 0.7174, 0.5466]
820
- 2023-07-24 06:21:34.379707: Epoch time: 215.0 s
821
- 2023-07-24 06:21:36.480978:
822
- 2023-07-24 06:21:36.481113: Epoch 106
823
- 2023-07-24 06:21:36.481229: Current learning rate: 0.00904
824
- 2023-07-24 06:24:59.495173: train_loss -0.7319
825
- 2023-07-24 06:24:59.495373: val_loss -0.4684
826
- 2023-07-24 06:24:59.495463: Pseudo dice [0.8719, 0.699, 0.4954]
827
- 2023-07-24 06:24:59.495545: Epoch time: 203.02 s
828
- 2023-07-24 06:25:01.416497:
829
- 2023-07-24 06:25:01.416672: Epoch 107
830
- 2023-07-24 06:25:01.416785: Current learning rate: 0.00903
831
- 2023-07-24 06:28:38.100931: train_loss -0.7425
832
- 2023-07-24 06:28:38.101100: val_loss -0.5066
833
- 2023-07-24 06:28:38.101199: Pseudo dice [0.8828, 0.7349, 0.551]
834
- 2023-07-24 06:28:38.101286: Epoch time: 216.69 s
835
- 2023-07-24 06:28:39.838402:
836
- 2023-07-24 06:28:39.838579: Epoch 108
837
- 2023-07-24 06:28:39.838694: Current learning rate: 0.00902
838
- 2023-07-24 06:32:19.977618: train_loss -0.7335
839
- 2023-07-24 06:32:19.990841: val_loss -0.4957
840
- 2023-07-24 06:32:19.990945: Pseudo dice [0.8713, 0.7344, 0.4371]
841
- 2023-07-24 06:32:19.991140: Epoch time: 220.14 s
842
- 2023-07-24 06:32:22.127137:
843
- 2023-07-24 06:32:22.127293: Epoch 109
844
- 2023-07-24 06:32:22.127414: Current learning rate: 0.00901
845
- 2023-07-24 06:35:56.862900: train_loss -0.7247
846
- 2023-07-24 06:35:56.863097: val_loss -0.4827
847
- 2023-07-24 06:35:56.863201: Pseudo dice [0.8801, 0.6793, 0.5831]
848
- 2023-07-24 06:35:56.863297: Epoch time: 214.74 s
849
- 2023-07-24 06:36:00.448839:
850
- 2023-07-24 06:36:00.449008: Epoch 110
851
- 2023-07-24 06:36:00.449116: Current learning rate: 0.009
852
- 2023-07-24 06:39:25.954977: train_loss -0.7307
853
- 2023-07-24 06:39:25.955158: val_loss -0.4855
854
- 2023-07-24 06:39:25.955249: Pseudo dice [0.8795, 0.6975, 0.5762]
855
- 2023-07-24 06:39:25.955334: Epoch time: 205.51 s
856
- 2023-07-24 06:39:28.934479:
857
- 2023-07-24 06:39:28.934601: Epoch 111
858
- 2023-07-24 06:39:28.934713: Current learning rate: 0.009
859
- 2023-07-24 06:42:53.226294: train_loss -0.7364
860
- 2023-07-24 06:42:53.226513: val_loss -0.4781
861
- 2023-07-24 06:42:53.226613: Pseudo dice [0.8844, 0.7035, 0.4998]
862
- 2023-07-24 06:42:53.226707: Epoch time: 204.29 s
863
- 2023-07-24 06:42:57.119614:
864
- 2023-07-24 06:42:57.119749: Epoch 112
865
- 2023-07-24 06:42:57.119859: Current learning rate: 0.00899
866
- 2023-07-24 06:46:27.868778: train_loss -0.7428
867
- 2023-07-24 06:46:27.868973: val_loss -0.5043
868
- 2023-07-24 06:46:27.869063: Pseudo dice [0.8869, 0.6979, 0.6164]
869
- 2023-07-24 06:46:27.869147: Epoch time: 210.75 s
870
- 2023-07-24 06:46:29.738551:
871
- 2023-07-24 06:46:29.738678: Epoch 113
872
- 2023-07-24 06:46:29.738820: Current learning rate: 0.00898
873
- 2023-07-24 06:49:47.016800: train_loss -0.7404
874
- 2023-07-24 06:49:47.017059: val_loss -0.484
875
- 2023-07-24 06:49:47.017146: Pseudo dice [0.8878, 0.7123, 0.5259]
876
- 2023-07-24 06:49:47.017297: Epoch time: 197.28 s
877
- 2023-07-24 06:49:49.028085:
878
- 2023-07-24 06:49:49.028212: Epoch 114
879
- 2023-07-24 06:49:49.028322: Current learning rate: 0.00897
880
- 2023-07-24 06:53:22.595026: train_loss -0.7403
881
- 2023-07-24 06:53:22.595240: val_loss -0.4923
882
- 2023-07-24 06:53:22.595333: Pseudo dice [0.8744, 0.7065, 0.5055]
883
- 2023-07-24 06:53:22.595417: Epoch time: 213.57 s
884
- 2023-07-24 06:53:25.630225:
885
- 2023-07-24 06:53:25.630350: Epoch 115
886
- 2023-07-24 06:53:25.630466: Current learning rate: 0.00896
887
- 2023-07-24 06:56:54.846274: train_loss -0.7407
888
- 2023-07-24 06:56:54.846479: val_loss -0.5206
889
- 2023-07-24 06:56:54.846571: Pseudo dice [0.8874, 0.7354, 0.4706]
890
- 2023-07-24 06:56:54.846656: Epoch time: 209.22 s
891
- 2023-07-24 06:56:58.530087:
892
- 2023-07-24 06:56:58.530329: Epoch 116
893
- 2023-07-24 06:56:58.530463: Current learning rate: 0.00895
894
- 2023-07-24 07:00:33.757893: train_loss -0.7397
895
- 2023-07-24 07:00:33.758083: val_loss -0.4963
896
- 2023-07-24 07:00:33.758173: Pseudo dice [0.8784, 0.7196, 0.5139]
897
- 2023-07-24 07:00:33.758259: Epoch time: 215.23 s
898
- 2023-07-24 07:00:36.559549:
899
- 2023-07-24 07:00:36.559805: Epoch 117
900
- 2023-07-24 07:00:36.559918: Current learning rate: 0.00894
901
- 2023-07-24 07:04:09.798198: train_loss -0.737
902
- 2023-07-24 07:04:09.798404: val_loss -0.4986
903
- 2023-07-24 07:04:09.809834: Pseudo dice [0.879, 0.706, 0.5266]
904
- 2023-07-24 07:04:09.810041: Epoch time: 213.24 s
905
- 2023-07-24 07:04:13.458022:
906
- 2023-07-24 07:04:13.458441: Epoch 118
907
- 2023-07-24 07:04:13.458563: Current learning rate: 0.00893
908
- 2023-07-24 07:07:55.203243: train_loss -0.7376
909
- 2023-07-24 07:07:55.203503: val_loss -0.5011
910
- 2023-07-24 07:07:55.203593: Pseudo dice [0.8852, 0.7273, 0.5817]
911
- 2023-07-24 07:07:55.203745: Epoch time: 221.75 s
912
- 2023-07-24 07:07:57.539746:
913
- 2023-07-24 07:07:57.539875: Epoch 119
914
- 2023-07-24 07:07:57.539999: Current learning rate: 0.00892
915
- 2023-07-24 07:11:23.694811: train_loss -0.7511
916
- 2023-07-24 07:11:23.706887: val_loss -0.5173
917
- 2023-07-24 07:11:23.707143: Pseudo dice [0.8818, 0.7369, 0.559]
918
- 2023-07-24 07:11:23.707327: Epoch time: 206.16 s
919
- 2023-07-24 07:11:26.298614:
920
- 2023-07-24 07:11:26.298933: Epoch 120
921
- 2023-07-24 07:11:26.299044: Current learning rate: 0.00891
922
- 2023-07-24 07:14:58.226911: train_loss -0.7376
923
- 2023-07-24 07:14:58.227126: val_loss -0.4984
924
- 2023-07-24 07:14:58.227228: Pseudo dice [0.8841, 0.7261, 0.4873]
925
- 2023-07-24 07:14:58.227322: Epoch time: 211.93 s
926
- 2023-07-24 07:15:00.409765:
927
- 2023-07-24 07:15:00.409939: Epoch 121
928
- 2023-07-24 07:15:00.410051: Current learning rate: 0.0089
929
- 2023-07-24 07:18:26.223230: train_loss -0.7331
930
- 2023-07-24 07:18:26.223434: val_loss -0.4727
931
- 2023-07-24 07:18:26.223520: Pseudo dice [0.8632, 0.6888, 0.5576]
932
- 2023-07-24 07:18:26.223612: Epoch time: 205.81 s
933
- 2023-07-24 07:18:28.232954:
934
- 2023-07-24 07:18:28.233098: Epoch 122
935
- 2023-07-24 07:18:28.233214: Current learning rate: 0.00889
936
- 2023-07-24 07:22:00.937126: train_loss -0.739
937
- 2023-07-24 07:22:00.937339: val_loss -0.4894
938
- 2023-07-24 07:22:00.937483: Pseudo dice [0.8769, 0.7199, 0.4559]
939
- 2023-07-24 07:22:00.937608: Epoch time: 212.71 s
940
- 2023-07-24 07:22:04.448639:
941
- 2023-07-24 07:22:04.448863: Epoch 123
942
- 2023-07-24 07:22:04.448967: Current learning rate: 0.00889
943
- 2023-07-24 07:25:39.226343: train_loss -0.7513
944
- 2023-07-24 07:25:39.226538: val_loss -0.4662
945
- 2023-07-24 07:25:39.226624: Pseudo dice [0.8728, 0.7032, 0.4345]
946
- 2023-07-24 07:25:39.226705: Epoch time: 214.78 s
947
- 2023-07-24 07:25:41.940106:
948
- 2023-07-24 07:25:41.940430: Epoch 124
949
- 2023-07-24 07:25:41.940552: Current learning rate: 0.00888
950
- 2023-07-24 07:29:31.867979: train_loss -0.7385
951
- 2023-07-24 07:29:31.868199: val_loss -0.4822
952
- 2023-07-24 07:29:31.868285: Pseudo dice [0.8879, 0.7072, 0.4662]
953
- 2023-07-24 07:29:31.868367: Epoch time: 229.93 s
954
- 2023-07-24 07:29:33.666378:
955
- 2023-07-24 07:29:33.666722: Epoch 125
956
- 2023-07-24 07:29:33.666862: Current learning rate: 0.00887
957
- 2023-07-24 07:33:13.897824: train_loss -0.735
958
- 2023-07-24 07:33:13.898085: val_loss -0.5056
959
- 2023-07-24 07:33:13.898172: Pseudo dice [0.8899, 0.7388, 0.4273]
960
- 2023-07-24 07:33:13.898322: Epoch time: 220.23 s
961
- 2023-07-24 07:33:15.701629:
962
- 2023-07-24 07:33:15.701919: Epoch 126
963
- 2023-07-24 07:33:15.702039: Current learning rate: 0.00886
964
- 2023-07-24 07:36:46.580624: train_loss -0.7412
965
- 2023-07-24 07:36:46.590938: val_loss -0.4682
966
- 2023-07-24 07:36:46.591583: Pseudo dice [0.8875, 0.707, 0.464]
967
- 2023-07-24 07:36:46.591727: Epoch time: 210.88 s
968
- 2023-07-24 07:36:49.059450:
969
- 2023-07-24 07:36:49.059957: Epoch 127
970
- 2023-07-24 07:36:49.060068: Current learning rate: 0.00885
971
- 2023-07-24 07:40:16.085344: train_loss -0.7467
972
- 2023-07-24 07:40:16.085670: val_loss -0.5007
973
- 2023-07-24 07:40:16.085762: Pseudo dice [0.8923, 0.7309, 0.4699]
974
- 2023-07-24 07:40:16.085928: Epoch time: 207.03 s
975
- 2023-07-24 07:40:18.069315:
976
- 2023-07-24 07:40:18.069484: Epoch 128
977
- 2023-07-24 07:40:18.069586: Current learning rate: 0.00884
978
- 2023-07-24 07:43:48.285688: train_loss -0.7613
979
- 2023-07-24 07:43:48.285896: val_loss -0.5026
980
- 2023-07-24 07:43:48.285983: Pseudo dice [0.8685, 0.7138, 0.5262]
981
- 2023-07-24 07:43:48.286072: Epoch time: 210.22 s
982
- 2023-07-24 07:43:50.040725:
983
- 2023-07-24 07:43:50.040847: Epoch 129
984
- 2023-07-24 07:43:50.040961: Current learning rate: 0.00883
985
- 2023-07-24 07:47:20.600014: train_loss -0.749
986
- 2023-07-24 07:47:20.600197: val_loss -0.4715
987
- 2023-07-24 07:47:20.600303: Pseudo dice [0.8669, 0.7066, 0.5142]
988
- 2023-07-24 07:47:20.600403: Epoch time: 210.56 s
989
- 2023-07-24 07:47:22.825845:
990
- 2023-07-24 07:47:22.825982: Epoch 130
991
- 2023-07-24 07:47:22.826100: Current learning rate: 0.00882
992
- 2023-07-24 07:51:01.366344: train_loss -0.7542
993
- 2023-07-24 07:51:01.372711: val_loss -0.4867
994
- 2023-07-24 07:51:01.373079: Pseudo dice [0.8709, 0.7212, 0.5275]
995
- 2023-07-24 07:51:01.373174: Epoch time: 218.54 s
996
- 2023-07-24 07:51:04.639824:
997
- 2023-07-24 07:51:04.640065: Epoch 131
998
- 2023-07-24 07:51:04.640186: Current learning rate: 0.00881
999
- 2023-07-24 07:54:25.996458: train_loss -0.7539
1000
- 2023-07-24 07:54:25.996660: val_loss -0.493
1001
- 2023-07-24 07:54:25.996764: Pseudo dice [0.879, 0.7073, 0.5086]
1002
- 2023-07-24 07:54:25.996862: Epoch time: 201.36 s
1003
- 2023-07-24 07:54:27.833304:
1004
- 2023-07-24 07:54:27.833434: Epoch 132
1005
- 2023-07-24 07:54:27.833548: Current learning rate: 0.0088
1006
- 2023-07-24 07:58:06.308678: train_loss -0.7424
1007
- 2023-07-24 07:58:06.308868: val_loss -0.4781
1008
- 2023-07-24 07:58:06.308955: Pseudo dice [0.8749, 0.7132, 0.4636]
1009
- 2023-07-24 07:58:06.309040: Epoch time: 218.48 s
1010
- 2023-07-24 07:58:07.995495:
1011
- 2023-07-24 07:58:07.995820: Epoch 133
1012
- 2023-07-24 07:58:07.995934: Current learning rate: 0.00879
1013
- 2023-07-24 08:01:39.319222: train_loss -0.7583
1014
- 2023-07-24 08:01:39.319408: val_loss -0.4887
1015
- 2023-07-24 08:01:39.319496: Pseudo dice [0.8776, 0.6941, 0.6211]
1016
- 2023-07-24 08:01:39.319587: Epoch time: 211.32 s
1017
- 2023-07-24 08:01:42.469862:
1018
- 2023-07-24 08:01:42.470145: Epoch 134
1019
- 2023-07-24 08:01:42.470260: Current learning rate: 0.00879
1020
- 2023-07-24 08:05:12.546540: train_loss -0.7622
1021
- 2023-07-24 08:05:12.546793: val_loss -0.487
1022
- 2023-07-24 08:05:12.546890: Pseudo dice [0.877, 0.6954, 0.6036]
1023
- 2023-07-24 08:05:12.547057: Epoch time: 210.08 s
1024
- 2023-07-24 08:05:15.318394:
1025
- 2023-07-24 08:05:15.318724: Epoch 135
1026
- 2023-07-24 08:05:15.318935: Current learning rate: 0.00878
1027
- 2023-07-24 08:08:54.008573: train_loss -0.757
1028
- 2023-07-24 08:08:54.008773: val_loss -0.4737
1029
- 2023-07-24 08:08:54.008857: Pseudo dice [0.8687, 0.6997, 0.5345]
1030
- 2023-07-24 08:08:54.008935: Epoch time: 218.69 s
1031
- 2023-07-24 08:08:57.009470:
1032
- 2023-07-24 08:08:57.009609: Epoch 136
1033
- 2023-07-24 08:08:57.009723: Current learning rate: 0.00877
1034
- 2023-07-24 08:12:31.627689: train_loss -0.7468
1035
- 2023-07-24 08:12:31.628009: val_loss -0.4989
1036
- 2023-07-24 08:12:31.628209: Pseudo dice [0.8809, 0.7079, 0.5713]
1037
- 2023-07-24 08:12:31.628382: Epoch time: 214.62 s
1038
- 2023-07-24 08:12:34.747013:
1039
- 2023-07-24 08:12:34.747181: Epoch 137
1040
- 2023-07-24 08:12:34.747300: Current learning rate: 0.00876
1041
- 2023-07-24 08:16:01.029350: train_loss -0.7588
1042
- 2023-07-24 08:16:01.029534: val_loss -0.4855
1043
- 2023-07-24 08:16:01.029621: Pseudo dice [0.8791, 0.6898, 0.6106]
1044
- 2023-07-24 08:16:01.029705: Epoch time: 206.28 s
1045
- 2023-07-24 08:16:02.771766:
1046
- 2023-07-24 08:16:02.771899: Epoch 138
1047
- 2023-07-24 08:16:02.772012: Current learning rate: 0.00875
1048
- 2023-07-24 08:19:44.406471: train_loss -0.747
1049
- 2023-07-24 08:19:44.406666: val_loss -0.5084
1050
- 2023-07-24 08:19:44.406777: Pseudo dice [0.8869, 0.7204, 0.5542]
1051
- 2023-07-24 08:19:44.406874: Epoch time: 221.64 s
1052
- 2023-07-24 08:19:47.105821:
1053
- 2023-07-24 08:19:47.105972: Epoch 139
1054
- 2023-07-24 08:19:47.106068: Current learning rate: 0.00874
1055
- 2023-07-24 08:23:18.237113: train_loss -0.7528
1056
- 2023-07-24 08:23:18.237421: val_loss -0.4871
1057
- 2023-07-24 08:23:18.237525: Pseudo dice [0.8776, 0.706, 0.5431]
1058
- 2023-07-24 08:23:18.237715: Epoch time: 211.13 s
1059
- 2023-07-24 08:23:20.150906:
1060
- 2023-07-24 08:23:20.151026: Epoch 140
1061
- 2023-07-24 08:23:20.151122: Current learning rate: 0.00873
1062
- 2023-07-24 08:26:46.623374: train_loss -0.7637
1063
- 2023-07-24 08:26:46.623651: val_loss -0.4917
1064
- 2023-07-24 08:26:46.623740: Pseudo dice [0.8785, 0.6989, 0.6665]
1065
- 2023-07-24 08:26:46.623897: Epoch time: 206.47 s
1066
- 2023-07-24 08:26:48.784806:
1067
- 2023-07-24 08:26:48.785008: Epoch 141
1068
- 2023-07-24 08:26:48.785114: Current learning rate: 0.00872
1069
- 2023-07-24 08:30:30.098001: train_loss -0.7605
1070
- 2023-07-24 08:30:30.098207: val_loss -0.499
1071
- 2023-07-24 08:30:30.098298: Pseudo dice [0.8795, 0.7093, 0.5429]
1072
- 2023-07-24 08:30:30.098378: Epoch time: 221.31 s
1073
- 2023-07-24 08:30:32.113620:
1074
- 2023-07-24 08:30:32.113780: Epoch 142
1075
- 2023-07-24 08:30:32.113915: Current learning rate: 0.00871
1076
- 2023-07-24 08:33:58.876559: train_loss -0.7434
1077
- 2023-07-24 08:33:58.876756: val_loss -0.4936
1078
- 2023-07-24 08:33:58.876848: Pseudo dice [0.8844, 0.7121, 0.5416]
1079
- 2023-07-24 08:33:58.876932: Epoch time: 206.76 s
1080
- 2023-07-24 08:34:01.494818:
1081
- 2023-07-24 08:34:01.495366: Epoch 143
1082
- 2023-07-24 08:34:01.495486: Current learning rate: 0.0087
1083
- 2023-07-24 08:37:26.398371: train_loss -0.7416
1084
- 2023-07-24 08:37:26.398579: val_loss -0.5108
1085
- 2023-07-24 08:37:26.398680: Pseudo dice [0.8855, 0.7182, 0.5946]
1086
- 2023-07-24 08:37:26.398790: Epoch time: 204.91 s
1087
- 2023-07-24 08:37:30.655529:
1088
- 2023-07-24 08:37:30.655930: Epoch 144
1089
- 2023-07-24 08:37:30.656043: Current learning rate: 0.00869
1090
- 2023-07-24 08:40:55.134679: train_loss -0.7574
1091
- 2023-07-24 08:40:55.134889: val_loss -0.4785
1092
- 2023-07-24 08:40:55.134991: Pseudo dice [0.8803, 0.7162, 0.3907]
1093
- 2023-07-24 08:40:55.135085: Epoch time: 204.48 s
1094
- 2023-07-24 08:40:57.261309:
1095
- 2023-07-24 08:40:57.261564: Epoch 145
1096
- 2023-07-24 08:40:57.261678: Current learning rate: 0.00868
1097
- 2023-07-24 08:44:23.101116: train_loss -0.7516
1098
- 2023-07-24 08:44:23.106944: val_loss -0.4902
1099
- 2023-07-24 08:44:23.107199: Pseudo dice [0.8784, 0.7066, 0.5606]
1100
- 2023-07-24 08:44:23.107291: Epoch time: 205.84 s
1101
- 2023-07-24 08:44:25.099223:
1102
- 2023-07-24 08:44:25.099430: Epoch 146
1103
- 2023-07-24 08:44:25.099553: Current learning rate: 0.00868
1104
- 2023-07-24 08:47:56.039377: train_loss -0.759
1105
- 2023-07-24 08:47:56.039689: val_loss -0.5251
1106
- 2023-07-24 08:47:56.039789: Pseudo dice [0.8786, 0.7236, 0.5975]
1107
- 2023-07-24 08:47:56.039928: Epoch time: 210.94 s
1108
- 2023-07-24 08:47:58.670197:
1109
- 2023-07-24 08:47:58.670312: Epoch 147
1110
- 2023-07-24 08:47:58.670412: Current learning rate: 0.00867
1111
- 2023-07-24 08:51:23.394095: train_loss -0.7657
1112
- 2023-07-24 08:51:23.394388: val_loss -0.4863
1113
- 2023-07-24 08:51:23.394477: Pseudo dice [0.8613, 0.7275, 0.4779]
1114
- 2023-07-24 08:51:23.394634: Epoch time: 204.72 s
1115
- 2023-07-24 08:51:25.151827:
1116
- 2023-07-24 08:51:25.152007: Epoch 148
1117
- 2023-07-24 08:51:25.152129: Current learning rate: 0.00866
1118
- 2023-07-24 08:55:00.305970: train_loss -0.759
1119
- 2023-07-24 08:55:00.306156: val_loss -0.4877
1120
- 2023-07-24 08:55:00.306260: Pseudo dice [0.8708, 0.7051, 0.5229]
1121
- 2023-07-24 08:55:00.306348: Epoch time: 215.16 s
1122
- 2023-07-24 08:55:03.166358:
1123
- 2023-07-24 08:55:03.166571: Epoch 149
1124
- 2023-07-24 08:55:03.166703: Current learning rate: 0.00865
1125
- 2023-07-24 08:58:36.158555: train_loss -0.7584
1126
- 2023-07-24 08:58:36.163698: val_loss -0.5075
1127
- 2023-07-24 08:58:36.163839: Pseudo dice [0.8796, 0.7186, 0.6129]
1128
- 2023-07-24 08:58:36.163924: Epoch time: 212.99 s
1129
- 2023-07-24 08:58:39.431387:
1130
- 2023-07-24 08:58:39.431509: Epoch 150
1131
- 2023-07-24 08:58:39.431633: Current learning rate: 0.00864
1132
- 2023-07-24 09:02:08.121246: train_loss -0.7586
1133
- 2023-07-24 09:02:08.121416: val_loss -0.5081
1134
- 2023-07-24 09:02:08.121502: Pseudo dice [0.8847, 0.727, 0.6269]
1135
- 2023-07-24 09:02:08.121582: Epoch time: 208.69 s
1136
- 2023-07-24 09:02:10.686369:
1137
- 2023-07-24 09:02:10.686602: Epoch 151
1138
- 2023-07-24 09:02:10.686719: Current learning rate: 0.00863
1139
- 2023-07-24 09:05:44.706747: train_loss -0.7629
1140
- 2023-07-24 09:05:44.706952: val_loss -0.4625
1141
- 2023-07-24 09:05:44.707042: Pseudo dice [0.8698, 0.6773, 0.509]
1142
- 2023-07-24 09:05:44.707127: Epoch time: 214.02 s
1143
- 2023-07-24 09:05:48.504527:
1144
- 2023-07-24 09:05:48.504706: Epoch 152
1145
- 2023-07-24 09:05:48.504832: Current learning rate: 0.00862
1146
- 2023-07-24 09:09:19.069351: train_loss -0.7578
1147
- 2023-07-24 09:09:19.069588: val_loss -0.5
1148
- 2023-07-24 09:09:19.069682: Pseudo dice [0.8778, 0.7217, 0.5765]
1149
- 2023-07-24 09:09:19.069833: Epoch time: 210.57 s
1150
- 2023-07-24 09:09:21.075281:
1151
- 2023-07-24 09:09:21.075402: Epoch 153
1152
- 2023-07-24 09:09:21.075518: Current learning rate: 0.00861
1153
- 2023-07-24 09:12:58.820027: train_loss -0.7531
1154
- 2023-07-24 09:12:58.826884: val_loss -0.4773
1155
- 2023-07-24 09:12:58.827152: Pseudo dice [0.8738, 0.7111, 0.5466]
1156
- 2023-07-24 09:12:58.827244: Epoch time: 217.75 s
1157
- 2023-07-24 09:13:02.862645:
1158
- 2023-07-24 09:13:02.862877: Epoch 154
1159
- 2023-07-24 09:13:02.862992: Current learning rate: 0.0086
1160
- 2023-07-24 09:16:28.130270: train_loss -0.7474
1161
- 2023-07-24 09:16:28.130463: val_loss -0.4756
1162
- 2023-07-24 09:16:28.130564: Pseudo dice [0.8795, 0.7165, 0.489]
1163
- 2023-07-24 09:16:28.130655: Epoch time: 205.27 s
1164
- 2023-07-24 09:16:30.896615:
1165
- 2023-07-24 09:16:30.896794: Epoch 155
1166
- 2023-07-24 09:16:30.896921: Current learning rate: 0.00859
1167
- 2023-07-24 09:20:10.606297: train_loss -0.7662
1168
- 2023-07-24 09:20:10.609706: val_loss -0.4819
1169
- 2023-07-24 09:20:10.609968: Pseudo dice [0.8773, 0.711, 0.5174]
1170
- 2023-07-24 09:20:10.610058: Epoch time: 219.71 s
1171
- 2023-07-24 09:20:13.662477:
1172
- 2023-07-24 09:20:13.662828: Epoch 156
1173
- 2023-07-24 09:20:13.662953: Current learning rate: 0.00858
1174
- 2023-07-24 09:23:55.274102: train_loss -0.7626
1175
- 2023-07-24 09:23:55.274285: val_loss -0.4917
1176
- 2023-07-24 09:23:55.274372: Pseudo dice [0.8786, 0.7163, 0.5242]
1177
- 2023-07-24 09:23:55.274455: Epoch time: 221.61 s
1178
- 2023-07-24 09:23:56.839917:
1179
- 2023-07-24 09:23:56.840061: Epoch 157
1180
- 2023-07-24 09:23:56.840175: Current learning rate: 0.00858
1181
- 2023-07-24 09:27:20.148015: train_loss -0.7542
1182
- 2023-07-24 09:27:20.148191: val_loss -0.5016
1183
- 2023-07-24 09:27:20.148277: Pseudo dice [0.8869, 0.7229, 0.5561]
1184
- 2023-07-24 09:27:20.148360: Epoch time: 203.31 s
1185
- 2023-07-24 09:27:21.869529:
1186
- 2023-07-24 09:27:21.869649: Epoch 158
1187
- 2023-07-24 09:27:21.869765: Current learning rate: 0.00857
1188
- 2023-07-24 09:30:58.994606: train_loss -0.7622
1189
- 2023-07-24 09:30:58.994871: val_loss -0.4855
1190
- 2023-07-24 09:30:58.994982: Pseudo dice [0.8842, 0.7165, 0.4867]
1191
- 2023-07-24 09:30:58.995074: Epoch time: 217.13 s
1192
- 2023-07-24 09:31:03.135907:
1193
- 2023-07-24 09:31:03.136067: Epoch 159
1194
- 2023-07-24 09:31:03.136214: Current learning rate: 0.00856
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_1/.ipynb_checkpoints/Untitled-checkpoint.ipynb DELETED
@@ -1,6 +0,0 @@
1
- {
2
- "cells": [],
3
- "metadata": {},
4
- "nbformat": 4,
5
- "nbformat_minor": 5
6
- }
 
 
 
 
 
 
 
nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_1/.ipynb_checkpoints/progress-checkpoint.png DELETED

Git LFS Details

  • SHA256: 8fc57b4caaa26cf16f39f128306a43910994edbe76b8c3ae7ccf055fd818ebf7
  • Pointer size: 131 Bytes
  • Size of remote file: 463 kB
nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_1/Untitled.ipynb DELETED
@@ -1,101 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "code",
5
- "execution_count": 7,
6
- "id": "f69d6289-cca0-4546-baf8-c3e0bd052370",
7
- "metadata": {},
8
- "outputs": [
9
- {
10
- "name": "stdout",
11
- "output_type": "stream",
12
- "text": [
13
- " \u001b[0m\u001b[01;34mProstate158\u001b[0m/ create_nnunet_dataset.py \u001b[01;34mnnUNet_results\u001b[0m/\n",
14
- "'Untitled (2) (1) (2).ipynb' \u001b[01;34mnnUNet_preprocessed\u001b[0m/\n",
15
- " Untitled.ipynb \u001b[01;34mnnUNet_raw\u001b[0m/\n"
16
- ]
17
- }
18
- ],
19
- "source": [
20
- "ls"
21
- ]
22
- },
23
- {
24
- "cell_type": "code",
25
- "execution_count": 9,
26
- "id": "d358c520-7bfb-4846-a9a7-e01a151b6912",
27
- "metadata": {
28
- "scrolled": true
29
- },
30
- "outputs": [
31
- {
32
- "ename": "SyntaxError",
33
- "evalue": "keyword argument repeated: path_in_repo (2447229422.py, line 16)",
34
- "output_type": "error",
35
- "traceback": [
36
- "\u001b[0;36m Cell \u001b[0;32mIn[9], line 16\u001b[0;36m\u001b[0m\n\u001b[0;31m path_in_repo=\"nnUNet_results\",\u001b[0m\n\u001b[0m ^\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m keyword argument repeated: path_in_repo\n"
37
- ]
38
- }
39
- ],
40
- "source": [
41
- "from huggingface_hub import HfApi\n",
42
- "api = HfApi()\n",
43
- "\n",
44
- "api.create_repo(\n",
45
- " repo_id=\"osbm/prostate158_nnUNet_results_3d_fullres2\",\n",
46
- " repo_type=\"dataset\",\n",
47
- " exist_ok=True,\n",
48
- " private=False,\n",
49
- ")\n",
50
- "# Upload all the content from the local folder to your remote Space.\n",
51
- "# By default, files are uploaded at the root of the repo\n",
52
- "api.upload_folder(\n",
53
- " folder_path=\"nnUNet_results\",\n",
54
- " path_in_repo=\"nnUNet_results\",\n",
55
- " repo_id=\"osbm/prostate158_nnUNet_results_3d_fullres2\",\n",
56
- " # path_in_repo=\"nnUNet_results\",\n",
57
- " repo_type=\"dataset\",\n",
58
- ")\n",
59
- "# api.upload_folder(\n",
60
- "# folder_path=\"nnUNet_preprocessed\",\n",
61
- "# repo_id=\"osbm/prostate158_nnUNet_results_3d_fullres2\",\n",
62
- "# repo_type=\"dataset\",\n",
63
- "# )\n",
64
- "# api.upload_folder(\n",
65
- "# folder_path=\"nnUNet_raw\",\n",
66
- "# repo_id=\"osbm/prostate158_nnUNet_results_3d_fullres2\",\n",
67
- "# repo_type=\"dataset\",\n",
68
- "# )"
69
- ]
70
- },
71
- {
72
- "cell_type": "code",
73
- "execution_count": null,
74
- "id": "9dc0670f-31b2-4b0d-b756-043b798d865e",
75
- "metadata": {},
76
- "outputs": [],
77
- "source": []
78
- }
79
- ],
80
- "metadata": {
81
- "kernelspec": {
82
- "display_name": "Python 3 (ipykernel)",
83
- "language": "python",
84
- "name": "python3"
85
- },
86
- "language_info": {
87
- "codemirror_mode": {
88
- "name": "ipython",
89
- "version": 3
90
- },
91
- "file_extension": ".py",
92
- "mimetype": "text/x-python",
93
- "name": "python",
94
- "nbconvert_exporter": "python",
95
- "pygments_lexer": "ipython3",
96
- "version": "3.10.6"
97
- }
98
- },
99
- "nbformat": 4,
100
- "nbformat_minor": 5
101
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_1/checkpoint_best.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:71ee9b51374f2933c99366df10825efcc8b2cfb394ae7aabc1b9064db285e0c9
3
- size 356800593
 
 
 
 
nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_1/debug.json DELETED
@@ -1,52 +0,0 @@
1
- {
2
- "_best_ema": "None",
3
- "batch_size": "2",
4
- "configuration_manager": "{'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [28, 256, 256], 'median_image_size_in_voxels': [25.0, 270.0, 270.0], 'spacing': [2.999998998641968, 0.4017857015132904, 0.4017857015132904], 'normalization_schemes': ['ZScoreNormalization', 'ZScoreNormalization', 'ZScoreNormalization'], 'use_mask_for_norm': [False, False, False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [2, 6, 6], 'pool_op_kernel_sizes': [[1, 1, 1], [1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[1, 3, 3], [1, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False}",
5
- "configuration_name": "3d_fullres",
6
- "cudnn_version": 8500,
7
- "current_epoch": "0",
8
- "dataloader_train": "<nnunetv2.training.data_augmentation.custom_transforms.limited_length_multithreaded_augmenter.LimitedLenWrapper object at 0x7f974c2f5780>",
9
- "dataloader_train.generator": "<nnunetv2.training.dataloading.data_loader_3d.nnUNetDataLoader3D object at 0x7f974c2f5090>",
10
- "dataloader_train.num_processes": "4",
11
- "dataloader_train.transform": "Compose ( [Convert3DTo2DTransform( apply_to_keys = ('data', 'seg') ), SpatialTransform( independent_scale_for_each_axis = False, p_rot_per_sample = 0.2, p_scale_per_sample = 0.2, p_el_per_sample = 0, data_key = 'data', label_key = 'seg', patch_size = [256, 256], patch_center_dist_from_border = None, do_elastic_deform = False, alpha = (0, 0), sigma = (0, 0), do_rotation = True, angle_x = (-3.141592653589793, 3.141592653589793), angle_y = (0, 0), angle_z = (0, 0), do_scale = True, scale = (0.7, 1.4), border_mode_data = 'constant', border_cval_data = 0, order_data = 3, border_mode_seg = 'constant', border_cval_seg = -1, order_seg = 1, random_crop = False, p_rot_per_axis = 1, p_independent_scale_per_axis = 1 ), Convert2DTo3DTransform( apply_to_keys = ('data', 'seg') ), GaussianNoiseTransform( p_per_sample = 0.1, data_key = 'data', noise_variance = (0, 0.1), p_per_channel = 1, per_channel = False ), GaussianBlurTransform( p_per_sample = 0.2, different_sigma_per_channel = True, p_per_channel = 0.5, data_key = 'data', blur_sigma = (0.5, 1.0), different_sigma_per_axis = False, p_isotropic = 0 ), BrightnessMultiplicativeTransform( p_per_sample = 0.15, data_key = 'data', multiplier_range = (0.75, 1.25), per_channel = True ), ContrastAugmentationTransform( p_per_sample = 0.15, data_key = 'data', contrast_range = (0.75, 1.25), preserve_range = True, per_channel = True, p_per_channel = 1 ), SimulateLowResolutionTransform( order_upsample = 3, order_downsample = 0, channels = None, per_channel = True, p_per_channel = 0.5, p_per_sample = 0.25, data_key = 'data', zoom_range = (0.5, 1), ignore_axes = (0,) ), GammaTransform( p_per_sample = 0.1, retain_stats = True, per_channel = True, data_key = 'data', gamma_range = (0.7, 1.5), invert_image = True ), GammaTransform( p_per_sample = 0.3, retain_stats = True, per_channel = True, data_key = 'data', gamma_range = (0.7, 1.5), invert_image = False ), MirrorTransform( p_per_sample = 1, data_key = 'data', label_key = 'seg', axes = (0, 1, 2) ), RemoveLabelTransform( output_key = 'seg', input_key = 'seg', replace_with = 0, remove_label = -1 ), RenameTransform( delete_old = True, out_key = 'target', in_key = 'seg' ), DownsampleSegForDSTransform2( axes = None, output_key = 'target', input_key = 'target', order = 0, ds_scales = [[1.0, 1.0, 1.0], [1.0, 0.5, 0.5], [1.0, 0.25, 0.25], [0.5, 0.125, 0.125], [0.25, 0.0625, 0.0625], [0.25, 0.03125, 0.03125]] ), NumpyToTensor( keys = ['data', 'target'], cast_to = 'float' )] )",
12
- "dataloader_val": "<nnunetv2.training.data_augmentation.custom_transforms.limited_length_multithreaded_augmenter.LimitedLenWrapper object at 0x7f974c2f4880>",
13
- "dataloader_val.generator": "<nnunetv2.training.dataloading.data_loader_3d.nnUNetDataLoader3D object at 0x7f974c2f4be0>",
14
- "dataloader_val.num_processes": "2",
15
- "dataloader_val.transform": "Compose ( [RemoveLabelTransform( output_key = 'seg', input_key = 'seg', replace_with = 0, remove_label = -1 ), RenameTransform( delete_old = True, out_key = 'target', in_key = 'seg' ), DownsampleSegForDSTransform2( axes = None, output_key = 'target', input_key = 'target', order = 0, ds_scales = [[1.0, 1.0, 1.0], [1.0, 0.5, 0.5], [1.0, 0.25, 0.25], [0.5, 0.125, 0.125], [0.25, 0.0625, 0.0625], [0.25, 0.03125, 0.03125]] ), NumpyToTensor( keys = ['data', 'target'], cast_to = 'float' )] )",
16
- "dataset_json": "{'name': 'Prostate158', 'description': 'Prostate cancer segmentation dataset', 'channel_names': {'0': 'T2', '1': 'ADC', '2': 'DFI'}, 'labels': {'background': 0, 'prostate_inner': 1, 'prostate_outer': 2, 'tumor': 3}, 'numTraining': 139, 'numTest': 19, 'file_ending': '.nii.gz'}",
17
- "device": "cuda:0",
18
- "disable_checkpointing": "False",
19
- "fold": "1",
20
- "folder_with_segs_from_previous_stage": "None",
21
- "gpu_name": "NVIDIA A10G",
22
- "grad_scaler": "<torch.cuda.amp.grad_scaler.GradScaler object at 0x7f974d1d1a50>",
23
- "hostname": "s-osbm-jupyter-f0b83-8689bbb555-5t6kn",
24
- "inference_allowed_mirroring_axes": "(0, 1, 2)",
25
- "initial_lr": "0.01",
26
- "is_cascaded": "False",
27
- "is_ddp": "False",
28
- "label_manager": "<nnunetv2.utilities.label_handling.label_handling.LabelManager object at 0x7f974d1d1c00>",
29
- "local_rank": "0",
30
- "log_file": "nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_1/training_log_2023_7_24_09_31_46.txt",
31
- "logger": "<nnunetv2.training.logging.nnunet_logger.nnUNetLogger object at 0x7f974d1d1ae0>",
32
- "loss": "DeepSupervisionWrapper(\n (loss): DC_and_CE_loss(\n (ce): RobustCrossEntropyLoss()\n (dc): MemoryEfficientSoftDiceLoss()\n )\n)",
33
- "lr_scheduler": "<nnunetv2.training.lr_scheduler.polylr.PolyLRScheduler object at 0x7f974d1d1b40>",
34
- "my_init_kwargs": "{'plans': {'dataset_name': 'Dataset001_Prostate158', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [3.0, 0.4017857015132904, 0.4017857015132904], 'original_median_shape_after_transp': [25, 270, 270], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'configurations': {'2d': {'data_identifier': 'nnUNetPlans_2d', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 31, 'patch_size': [320, 320], 'median_image_size_in_voxels': [270.0, 270.0], 'spacing': [0.4017857015132904, 0.4017857015132904], 'normalization_schemes': ['ZScoreNormalization', 'ZScoreNormalization', 'ZScoreNormalization'], 'use_mask_for_norm': [False, False, False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [6, 6], 'pool_op_kernel_sizes': [[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]], 'conv_kernel_sizes': [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]], 'unet_max_num_features': 512, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_fullres': {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [28, 256, 256], 'median_image_size_in_voxels': [25.0, 270.0, 270.0], 'spacing': [2.999998998641968, 0.4017857015132904, 0.4017857015132904], 'normalization_schemes': ['ZScoreNormalization', 'ZScoreNormalization', 'ZScoreNormalization'], 'use_mask_for_norm': [False, False, False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [2, 6, 6], 'pool_op_kernel_sizes': [[1, 1, 1], [1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[1, 3, 3], [1, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False}}, 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 1694.0, 'mean': 267.35308837890625, 'median': 242.0, 'min': 0.0, 'percentile_00_5': 36.0, 'percentile_99_5': 768.0, 'std': 136.11251831054688}, '1': {'max': 3557.286865234375, 'mean': 1215.81591796875, 'median': 1203.8331298828125, 'min': 0.0, 'percentile_00_5': 0.0, 'percentile_99_5': 2259.82861328125, 'std': 338.6748352050781}, '2': {'max': 198.95455932617188, 'mean': 72.26309204101562, 'median': 70.3214340209961, 'min': 0.0, 'percentile_00_5': 34.534385681152344, 'percentile_99_5': 132.71939086914062, 'std': 18.909290313720703}}}, 'configuration': '3d_fullres', 'fold': 1, 'dataset_json': {'name': 'Prostate158', 'description': 'Prostate cancer segmentation dataset', 'channel_names': {'0': 'T2', '1': 'ADC', '2': 'DFI'}, 'labels': {'background': 0, 'prostate_inner': 1, 'prostate_outer': 2, 'tumor': 3}, 'numTraining': 139, 'numTest': 19, 'file_ending': '.nii.gz'}, 'unpack_dataset': True, 'device': device(type='cuda')}",
35
- "network": "PlainConvUNet",
36
- "num_epochs": "1000",
37
- "num_input_channels": "3",
38
- "num_iterations_per_epoch": "250",
39
- "num_val_iterations_per_epoch": "50",
40
- "optimizer": "SGD (\nParameter Group 0\n dampening: 0\n differentiable: False\n foreach: None\n initial_lr: 0.01\n lr: 0.01\n maximize: False\n momentum: 0.99\n nesterov: True\n weight_decay: 3e-05\n)",
41
- "output_folder": "nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_1",
42
- "output_folder_base": "nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres",
43
- "oversample_foreground_percent": "0.33",
44
- "plans_manager": "{'dataset_name': 'Dataset001_Prostate158', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [3.0, 0.4017857015132904, 0.4017857015132904], 'original_median_shape_after_transp': [25, 270, 270], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'configurations': {'2d': {'data_identifier': 'nnUNetPlans_2d', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 31, 'patch_size': [320, 320], 'median_image_size_in_voxels': [270.0, 270.0], 'spacing': [0.4017857015132904, 0.4017857015132904], 'normalization_schemes': ['ZScoreNormalization', 'ZScoreNormalization', 'ZScoreNormalization'], 'use_mask_for_norm': [False, False, False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [6, 6], 'pool_op_kernel_sizes': [[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]], 'conv_kernel_sizes': [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]], 'unet_max_num_features': 512, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_fullres': {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [28, 256, 256], 'median_image_size_in_voxels': [25.0, 270.0, 270.0], 'spacing': [2.999998998641968, 0.4017857015132904, 0.4017857015132904], 'normalization_schemes': ['ZScoreNormalization', 'ZScoreNormalization', 'ZScoreNormalization'], 'use_mask_for_norm': [False, False, False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [2, 6, 6], 'pool_op_kernel_sizes': [[1, 1, 1], [1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[1, 3, 3], [1, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False}}, 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 1694.0, 'mean': 267.35308837890625, 'median': 242.0, 'min': 0.0, 'percentile_00_5': 36.0, 'percentile_99_5': 768.0, 'std': 136.11251831054688}, '1': {'max': 3557.286865234375, 'mean': 1215.81591796875, 'median': 1203.8331298828125, 'min': 0.0, 'percentile_00_5': 0.0, 'percentile_99_5': 2259.82861328125, 'std': 338.6748352050781}, '2': {'max': 198.95455932617188, 'mean': 72.26309204101562, 'median': 70.3214340209961, 'min': 0.0, 'percentile_00_5': 34.534385681152344, 'percentile_99_5': 132.71939086914062, 'std': 18.909290313720703}}}",
45
- "preprocessed_dataset_folder": "nnUNet_preprocessed/Dataset001_Prostate158/nnUNetPlans_3d_fullres",
46
- "preprocessed_dataset_folder_base": "nnUNet_preprocessed/Dataset001_Prostate158",
47
- "save_every": "50",
48
- "torch_version": "2.0.1+cu117",
49
- "unpack_dataset": "True",
50
- "was_initialized": "True",
51
- "weight_decay": "3e-05"
52
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_1/progress.png DELETED

Git LFS Details

  • SHA256: aa0915685fb5de829ef278b8226b09319fcc28347f0a7b9b2ad90d7b87d9a6cd
  • Pointer size: 131 Bytes
  • Size of remote file: 565 kB
nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_1/training_log_2023_7_24_09_31_46.txt DELETED
@@ -1,342 +0,0 @@
1
-
2
- #######################################################################
3
- Please cite the following paper when using nnU-Net:
4
- Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2021). nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. Nature methods, 18(2), 203-211.
5
- #######################################################################
6
-
7
-
8
- This is the configuration used by this training:
9
- Configuration name: 3d_fullres
10
- {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [28, 256, 256], 'median_image_size_in_voxels': [25.0, 270.0, 270.0], 'spacing': [2.999998998641968, 0.4017857015132904, 0.4017857015132904], 'normalization_schemes': ['ZScoreNormalization', 'ZScoreNormalization', 'ZScoreNormalization'], 'use_mask_for_norm': [False, False, False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [2, 6, 6], 'pool_op_kernel_sizes': [[1, 1, 1], [1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[1, 3, 3], [1, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False}
11
-
12
- These are the global plan.json settings:
13
- {'dataset_name': 'Dataset001_Prostate158', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [3.0, 0.4017857015132904, 0.4017857015132904], 'original_median_shape_after_transp': [25, 270, 270], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 1694.0, 'mean': 267.35308837890625, 'median': 242.0, 'min': 0.0, 'percentile_00_5': 36.0, 'percentile_99_5': 768.0, 'std': 136.11251831054688}, '1': {'max': 3557.286865234375, 'mean': 1215.81591796875, 'median': 1203.8331298828125, 'min': 0.0, 'percentile_00_5': 0.0, 'percentile_99_5': 2259.82861328125, 'std': 338.6748352050781}, '2': {'max': 198.95455932617188, 'mean': 72.26309204101562, 'median': 70.3214340209961, 'min': 0.0, 'percentile_00_5': 34.534385681152344, 'percentile_99_5': 132.71939086914062, 'std': 18.909290313720703}}}
14
-
15
- 2023-07-24 09:31:48.662707: unpacking dataset...
16
- 2023-07-24 09:31:51.254056: unpacking done...
17
- 2023-07-24 09:31:51.254646: do_dummy_2d_data_aug: True
18
- 2023-07-24 09:31:51.255391: Using splits from existing split file: nnUNet_preprocessed/Dataset001_Prostate158/splits_final.json
19
- 2023-07-24 09:31:51.255736: The split file contains 5 splits.
20
- 2023-07-24 09:31:51.255795: Desired fold for training: 1
21
- 2023-07-24 09:31:51.255847: This split has 111 training and 28 validation cases.
22
- 2023-07-24 09:31:55.429484: Unable to plot network architecture:
23
- 2023-07-24 09:31:55.429709: module 'torch.onnx' has no attribute '_optimize_trace'
24
- 2023-07-24 09:31:55.479973:
25
- 2023-07-24 09:31:55.480082: Epoch 0
26
- 2023-07-24 09:31:55.480217: Current learning rate: 0.01
27
- 2023-07-24 09:35:51.149318: train_loss -0.0686
28
- 2023-07-24 09:35:51.149595: val_loss -0.1977
29
- 2023-07-24 09:35:51.149694: Pseudo dice [0.6772, 0.3986, 0.0]
30
- 2023-07-24 09:35:51.149858: Epoch time: 235.67 s
31
- 2023-07-24 09:35:51.149975: Yayy! New best EMA pseudo Dice: 0.3586
32
- 2023-07-24 09:35:52.944725:
33
- 2023-07-24 09:35:52.944855: Epoch 1
34
- 2023-07-24 09:35:52.945019: Current learning rate: 0.00999
35
- 2023-07-24 09:39:26.457915: train_loss -0.2509
36
- 2023-07-24 09:39:26.458209: val_loss -0.2695
37
- 2023-07-24 09:39:26.458309: Pseudo dice [0.7352, 0.5579, 0.0]
38
- 2023-07-24 09:39:26.458386: Epoch time: 213.51 s
39
- 2023-07-24 09:39:26.458445: Yayy! New best EMA pseudo Dice: 0.3658
40
- 2023-07-24 09:39:30.325983:
41
- 2023-07-24 09:39:30.326120: Epoch 2
42
- 2023-07-24 09:39:30.326235: Current learning rate: 0.00998
43
- 2023-07-24 09:42:59.369360: train_loss -0.3283
44
- 2023-07-24 09:42:59.369581: val_loss -0.2917
45
- 2023-07-24 09:42:59.369678: Pseudo dice [0.7702, 0.5453, 0.0]
46
- 2023-07-24 09:42:59.369768: Epoch time: 209.04 s
47
- 2023-07-24 09:42:59.369841: Yayy! New best EMA pseudo Dice: 0.3731
48
- 2023-07-24 09:43:02.898393:
49
- 2023-07-24 09:43:02.898701: Epoch 3
50
- 2023-07-24 09:43:02.898844: Current learning rate: 0.00997
51
- 2023-07-24 09:46:30.423032: train_loss -0.3623
52
- 2023-07-24 09:46:30.423313: val_loss -0.362
53
- 2023-07-24 09:46:30.423406: Pseudo dice [0.8084, 0.6354, 0.0]
54
- 2023-07-24 09:46:30.423570: Epoch time: 207.53 s
55
- 2023-07-24 09:46:30.423706: Yayy! New best EMA pseudo Dice: 0.3839
56
- 2023-07-24 09:46:32.679859:
57
- 2023-07-24 09:46:32.679986: Epoch 4
58
- 2023-07-24 09:46:32.680083: Current learning rate: 0.00996
59
- 2023-07-24 09:50:16.112560: train_loss -0.4055
60
- 2023-07-24 09:50:16.112745: val_loss -0.3814
61
- 2023-07-24 09:50:16.112836: Pseudo dice [0.8423, 0.6152, 0.3433]
62
- 2023-07-24 09:50:16.112920: Epoch time: 223.43 s
63
- 2023-07-24 09:50:16.112987: Yayy! New best EMA pseudo Dice: 0.4056
64
- 2023-07-24 09:50:18.759251:
65
- 2023-07-24 09:50:18.759383: Epoch 5
66
- 2023-07-24 09:50:18.759503: Current learning rate: 0.00995
67
- 2023-07-24 09:53:41.007815: train_loss -0.4426
68
- 2023-07-24 09:53:41.007999: val_loss -0.4302
69
- 2023-07-24 09:53:41.008113: Pseudo dice [0.8359, 0.6439, 0.4462]
70
- 2023-07-24 09:53:41.008215: Epoch time: 202.25 s
71
- 2023-07-24 09:53:41.008303: Yayy! New best EMA pseudo Dice: 0.4292
72
- 2023-07-24 09:53:43.767199:
73
- 2023-07-24 09:53:43.767324: Epoch 6
74
- 2023-07-24 09:53:43.767419: Current learning rate: 0.00995
75
- 2023-07-24 09:57:16.562120: train_loss -0.4301
76
- 2023-07-24 09:57:16.562339: val_loss -0.4084
77
- 2023-07-24 09:57:16.562424: Pseudo dice [0.8011, 0.6376, 0.48]
78
- 2023-07-24 09:57:16.562501: Epoch time: 212.8 s
79
- 2023-07-24 09:57:16.562591: Yayy! New best EMA pseudo Dice: 0.4502
80
- 2023-07-24 09:57:19.469062:
81
- 2023-07-24 09:57:19.469288: Epoch 7
82
- 2023-07-24 09:57:19.469407: Current learning rate: 0.00994
83
- 2023-07-24 10:00:48.194506: train_loss -0.4578
84
- 2023-07-24 10:00:48.194689: val_loss -0.4189
85
- 2023-07-24 10:00:48.194808: Pseudo dice [0.8137, 0.6457, 0.4479]
86
- 2023-07-24 10:00:48.194900: Epoch time: 208.73 s
87
- 2023-07-24 10:00:48.194967: Yayy! New best EMA pseudo Dice: 0.4688
88
- 2023-07-24 10:00:52.373474:
89
- 2023-07-24 10:00:52.373721: Epoch 8
90
- 2023-07-24 10:00:52.373865: Current learning rate: 0.00993
91
- 2023-07-24 10:04:19.517230: train_loss -0.498
92
- 2023-07-24 10:04:19.517421: val_loss -0.4187
93
- 2023-07-24 10:04:19.517504: Pseudo dice [0.8555, 0.6358, 0.3124]
94
- 2023-07-24 10:04:19.517583: Epoch time: 207.15 s
95
- 2023-07-24 10:04:19.517648: Yayy! New best EMA pseudo Dice: 0.482
96
- 2023-07-24 10:04:21.806987:
97
- 2023-07-24 10:04:21.807117: Epoch 9
98
- 2023-07-24 10:04:21.807218: Current learning rate: 0.00992
99
- 2023-07-24 10:07:51.359195: train_loss -0.5027
100
- 2023-07-24 10:07:51.370858: val_loss -0.4306
101
- 2023-07-24 10:07:51.371073: Pseudo dice [0.8521, 0.6531, 0.3801]
102
- 2023-07-24 10:07:51.371159: Epoch time: 209.55 s
103
- 2023-07-24 10:07:51.371223: Yayy! New best EMA pseudo Dice: 0.4967
104
- 2023-07-24 10:07:54.287527:
105
- 2023-07-24 10:07:54.287659: Epoch 10
106
- 2023-07-24 10:07:54.287776: Current learning rate: 0.00991
107
- 2023-07-24 10:11:21.020047: train_loss -0.5007
108
- 2023-07-24 10:11:21.020299: val_loss -0.4555
109
- 2023-07-24 10:11:21.020390: Pseudo dice [0.8525, 0.6574, 0.5038]
110
- 2023-07-24 10:11:21.020547: Epoch time: 206.73 s
111
- 2023-07-24 10:11:21.020615: Yayy! New best EMA pseudo Dice: 0.5141
112
- 2023-07-24 10:11:23.595996:
113
- 2023-07-24 10:11:23.596125: Epoch 11
114
- 2023-07-24 10:11:23.596240: Current learning rate: 0.0099
115
- 2023-07-24 10:14:50.060935: train_loss -0.5114
116
- 2023-07-24 10:14:50.061202: val_loss -0.4435
117
- 2023-07-24 10:14:50.061292: Pseudo dice [0.8367, 0.6786, 0.3972]
118
- 2023-07-24 10:14:50.061454: Epoch time: 206.47 s
119
- 2023-07-24 10:14:50.061521: Yayy! New best EMA pseudo Dice: 0.5265
120
- 2023-07-24 10:14:54.044929:
121
- 2023-07-24 10:14:54.045076: Epoch 12
122
- 2023-07-24 10:14:54.045194: Current learning rate: 0.00989
123
- 2023-07-24 10:18:35.033546: train_loss -0.5262
124
- 2023-07-24 10:18:35.033730: val_loss -0.4482
125
- 2023-07-24 10:18:35.033818: Pseudo dice [0.8485, 0.6596, 0.5738]
126
- 2023-07-24 10:18:35.033900: Epoch time: 220.99 s
127
- 2023-07-24 10:18:35.033965: Yayy! New best EMA pseudo Dice: 0.5432
128
- 2023-07-24 10:18:37.229037:
129
- 2023-07-24 10:18:37.229155: Epoch 13
130
- 2023-07-24 10:18:37.229270: Current learning rate: 0.00988
131
- 2023-07-24 10:22:07.718349: train_loss -0.5297
132
- 2023-07-24 10:22:07.718728: val_loss -0.4473
133
- 2023-07-24 10:22:07.718979: Pseudo dice [0.8557, 0.6552, 0.5333]
134
- 2023-07-24 10:22:07.719130: Epoch time: 210.49 s
135
- 2023-07-24 10:22:07.719215: Yayy! New best EMA pseudo Dice: 0.557
136
- 2023-07-24 10:22:12.377048:
137
- 2023-07-24 10:22:12.377682: Epoch 14
138
- 2023-07-24 10:22:12.377826: Current learning rate: 0.00987
139
- 2023-07-24 10:25:35.042624: train_loss -0.544
140
- 2023-07-24 10:25:35.042880: val_loss -0.4672
141
- 2023-07-24 10:25:35.042980: Pseudo dice [0.8415, 0.6909, 0.5861]
142
- 2023-07-24 10:25:35.043072: Epoch time: 202.67 s
143
- 2023-07-24 10:25:35.043142: Yayy! New best EMA pseudo Dice: 0.5719
144
- 2023-07-24 10:25:39.141425:
145
- 2023-07-24 10:25:39.141566: Epoch 15
146
- 2023-07-24 10:25:39.141699: Current learning rate: 0.00986
147
- 2023-07-24 10:29:17.006086: train_loss -0.536
148
- 2023-07-24 10:29:17.006276: val_loss -0.458
149
- 2023-07-24 10:29:17.006367: Pseudo dice [0.8684, 0.6802, 0.4725]
150
- 2023-07-24 10:29:17.006453: Epoch time: 217.87 s
151
- 2023-07-24 10:29:17.006521: Yayy! New best EMA pseudo Dice: 0.5821
152
- 2023-07-24 10:29:19.086107:
153
- 2023-07-24 10:29:19.086326: Epoch 16
154
- 2023-07-24 10:29:19.086447: Current learning rate: 0.00986
155
- 2023-07-24 10:32:52.937337: train_loss -0.5573
156
- 2023-07-24 10:32:52.937526: val_loss -0.4805
157
- 2023-07-24 10:32:52.937625: Pseudo dice [0.8545, 0.6796, 0.6108]
158
- 2023-07-24 10:32:52.937717: Epoch time: 213.85 s
159
- 2023-07-24 10:32:52.937808: Yayy! New best EMA pseudo Dice: 0.5954
160
- 2023-07-24 10:32:55.486784:
161
- 2023-07-24 10:32:55.486909: Epoch 17
162
- 2023-07-24 10:32:55.487025: Current learning rate: 0.00985
163
- 2023-07-24 10:36:29.203138: train_loss -0.5585
164
- 2023-07-24 10:36:29.203351: val_loss -0.4583
165
- 2023-07-24 10:36:29.203443: Pseudo dice [0.8662, 0.6791, 0.4972]
166
- 2023-07-24 10:36:29.203535: Epoch time: 213.72 s
167
- 2023-07-24 10:36:29.203619: Yayy! New best EMA pseudo Dice: 0.604
168
- 2023-07-24 10:36:34.055434:
169
- 2023-07-24 10:36:34.055742: Epoch 18
170
- 2023-07-24 10:36:34.055855: Current learning rate: 0.00984
171
- 2023-07-24 10:40:19.048198: train_loss -0.5594
172
- 2023-07-24 10:40:19.048498: val_loss -0.4511
173
- 2023-07-24 10:40:19.048593: Pseudo dice [0.8624, 0.6682, 0.5106]
174
- 2023-07-24 10:40:19.048678: Epoch time: 224.99 s
175
- 2023-07-24 10:40:19.048744: Yayy! New best EMA pseudo Dice: 0.6116
176
- 2023-07-24 10:40:21.521703:
177
- 2023-07-24 10:40:21.521823: Epoch 19
178
- 2023-07-24 10:40:21.521924: Current learning rate: 0.00983
179
- 2023-07-24 10:43:37.794662: train_loss -0.5578
180
- 2023-07-24 10:43:37.794921: val_loss -0.4709
181
- 2023-07-24 10:43:37.795015: Pseudo dice [0.8779, 0.6729, 0.5978]
182
- 2023-07-24 10:43:37.795315: Epoch time: 196.27 s
183
- 2023-07-24 10:43:37.796330: Yayy! New best EMA pseudo Dice: 0.6221
184
- 2023-07-24 10:43:43.010075:
185
- 2023-07-24 10:43:43.010270: Epoch 20
186
- 2023-07-24 10:43:43.010390: Current learning rate: 0.00982
187
- 2023-07-24 10:47:20.733163: train_loss -0.5605
188
- 2023-07-24 10:47:20.733360: val_loss -0.4645
189
- 2023-07-24 10:47:20.733458: Pseudo dice [0.8742, 0.6775, 0.5699]
190
- 2023-07-24 10:47:20.733543: Epoch time: 217.72 s
191
- 2023-07-24 10:47:20.733614: Yayy! New best EMA pseudo Dice: 0.6306
192
- 2023-07-24 10:47:25.612038:
193
- 2023-07-24 10:47:25.612291: Epoch 21
194
- 2023-07-24 10:47:25.612412: Current learning rate: 0.00981
195
- 2023-07-24 10:50:56.626678: train_loss -0.5768
196
- 2023-07-24 10:50:56.626989: val_loss -0.4678
197
- 2023-07-24 10:50:56.627095: Pseudo dice [0.8667, 0.7095, 0.4915]
198
- 2023-07-24 10:50:56.627192: Epoch time: 211.02 s
199
- 2023-07-24 10:50:56.627282: Yayy! New best EMA pseudo Dice: 0.6364
200
- 2023-07-24 10:50:59.071501:
201
- 2023-07-24 10:50:59.071637: Epoch 22
202
- 2023-07-24 10:50:59.071756: Current learning rate: 0.0098
203
- 2023-07-24 10:54:23.458703: train_loss -0.5779
204
- 2023-07-24 10:54:23.458929: val_loss -0.4757
205
- 2023-07-24 10:54:23.459019: Pseudo dice [0.8696, 0.6799, 0.5438]
206
- 2023-07-24 10:54:23.459159: Epoch time: 204.39 s
207
- 2023-07-24 10:54:23.459226: Yayy! New best EMA pseudo Dice: 0.6426
208
- 2023-07-24 10:54:25.869165:
209
- 2023-07-24 10:54:25.869285: Epoch 23
210
- 2023-07-24 10:54:25.869403: Current learning rate: 0.00979
211
- 2023-07-24 10:57:56.216782: train_loss -0.5858
212
- 2023-07-24 10:57:56.217053: val_loss -0.436
213
- 2023-07-24 10:57:56.217215: Pseudo dice [0.8524, 0.6452, 0.5389]
214
- 2023-07-24 10:57:56.217304: Epoch time: 210.35 s
215
- 2023-07-24 10:57:56.217423: Yayy! New best EMA pseudo Dice: 0.6462
216
- 2023-07-24 10:58:00.283592:
217
- 2023-07-24 10:58:00.283757: Epoch 24
218
- 2023-07-24 10:58:00.283865: Current learning rate: 0.00978
219
- 2023-07-24 11:01:31.659229: train_loss -0.5859
220
- 2023-07-24 11:01:31.659401: val_loss -0.4867
221
- 2023-07-24 11:01:31.659505: Pseudo dice [0.8807, 0.7036, 0.4442]
222
- 2023-07-24 11:01:31.659606: Epoch time: 211.38 s
223
- 2023-07-24 11:01:31.659684: Yayy! New best EMA pseudo Dice: 0.6492
224
- 2023-07-24 11:01:34.877736:
225
- 2023-07-24 11:01:34.877873: Epoch 25
226
- 2023-07-24 11:01:34.877987: Current learning rate: 0.00977
227
- 2023-07-24 11:05:10.845961: train_loss -0.5884
228
- 2023-07-24 11:05:10.846144: val_loss -0.4324
229
- 2023-07-24 11:05:10.846237: Pseudo dice [0.8473, 0.6541, 0.4288]
230
- 2023-07-24 11:05:10.846323: Epoch time: 215.97 s
231
- 2023-07-24 11:05:13.643211:
232
- 2023-07-24 11:05:13.643358: Epoch 26
233
- 2023-07-24 11:05:13.643473: Current learning rate: 0.00977
234
- 2023-07-24 11:08:44.563033: train_loss -0.5912
235
- 2023-07-24 11:08:44.563225: val_loss -0.4619
236
- 2023-07-24 11:08:44.563319: Pseudo dice [0.8776, 0.6791, 0.5158]
237
- 2023-07-24 11:08:44.563408: Epoch time: 210.92 s
238
- 2023-07-24 11:08:44.563480: Yayy! New best EMA pseudo Dice: 0.6528
239
- 2023-07-24 11:08:47.180097:
240
- 2023-07-24 11:08:47.180220: Epoch 27
241
- 2023-07-24 11:08:47.180338: Current learning rate: 0.00976
242
- 2023-07-24 11:12:26.577604: train_loss -0.5976
243
- 2023-07-24 11:12:26.577808: val_loss -0.4728
244
- 2023-07-24 11:12:26.577904: Pseudo dice [0.861, 0.7074, 0.4756]
245
- 2023-07-24 11:12:26.577990: Epoch time: 219.4 s
246
- 2023-07-24 11:12:26.578061: Yayy! New best EMA pseudo Dice: 0.6557
247
- 2023-07-24 11:12:29.643962:
248
- 2023-07-24 11:12:29.644087: Epoch 28
249
- 2023-07-24 11:12:29.644205: Current learning rate: 0.00975
250
- 2023-07-24 11:15:57.396276: train_loss -0.6014
251
- 2023-07-24 11:15:57.396457: val_loss -0.4564
252
- 2023-07-24 11:15:57.396578: Pseudo dice [0.8687, 0.6885, 0.4734]
253
- 2023-07-24 11:15:57.396668: Epoch time: 207.75 s
254
- 2023-07-24 11:15:57.396744: Yayy! New best EMA pseudo Dice: 0.6578
255
- 2023-07-24 11:15:59.785451:
256
- 2023-07-24 11:15:59.785571: Epoch 29
257
- 2023-07-24 11:15:59.785674: Current learning rate: 0.00974
258
- 2023-07-24 11:19:32.018114: train_loss -0.6025
259
- 2023-07-24 11:19:32.018373: val_loss -0.483
260
- 2023-07-24 11:19:32.018463: Pseudo dice [0.873, 0.7188, 0.5497]
261
- 2023-07-24 11:19:32.018615: Epoch time: 212.23 s
262
- 2023-07-24 11:19:32.018681: Yayy! New best EMA pseudo Dice: 0.6634
263
- 2023-07-24 11:19:37.166697:
264
- 2023-07-24 11:19:37.166853: Epoch 30
265
- 2023-07-24 11:19:37.166968: Current learning rate: 0.00973
266
- 2023-07-24 11:23:08.806290: train_loss -0.613
267
- 2023-07-24 11:23:08.806573: val_loss -0.4461
268
- 2023-07-24 11:23:08.806666: Pseudo dice [0.8666, 0.6895, 0.3991]
269
- 2023-07-24 11:23:08.806845: Epoch time: 211.64 s
270
- 2023-07-24 11:23:10.244822:
271
- 2023-07-24 11:23:10.244984: Epoch 31
272
- 2023-07-24 11:23:10.245092: Current learning rate: 0.00972
273
- 2023-07-24 11:26:37.710564: train_loss -0.6145
274
- 2023-07-24 11:26:37.710835: val_loss -0.4758
275
- 2023-07-24 11:26:37.710931: Pseudo dice [0.8711, 0.705, 0.5377]
276
- 2023-07-24 11:26:37.711078: Epoch time: 207.47 s
277
- 2023-07-24 11:26:37.711143: Yayy! New best EMA pseudo Dice: 0.6665
278
- 2023-07-24 11:26:41.956009:
279
- 2023-07-24 11:26:41.956271: Epoch 32
280
- 2023-07-24 11:26:41.956385: Current learning rate: 0.00971
281
- 2023-07-24 11:30:12.278454: train_loss -0.6137
282
- 2023-07-24 11:30:12.278686: val_loss -0.4835
283
- 2023-07-24 11:30:12.278804: Pseudo dice [0.867, 0.6864, 0.5483]
284
- 2023-07-24 11:30:12.278906: Epoch time: 210.32 s
285
- 2023-07-24 11:30:12.278967: Yayy! New best EMA pseudo Dice: 0.6699
286
- 2023-07-24 11:30:14.981015:
287
- 2023-07-24 11:30:14.981150: Epoch 33
288
- 2023-07-24 11:30:14.981266: Current learning rate: 0.0097
289
- 2023-07-24 11:33:53.618293: train_loss -0.6173
290
- 2023-07-24 11:33:53.630888: val_loss -0.4788
291
- 2023-07-24 11:33:53.631126: Pseudo dice [0.8711, 0.7172, 0.5993]
292
- 2023-07-24 11:33:53.631213: Epoch time: 218.64 s
293
- 2023-07-24 11:33:53.631362: Yayy! New best EMA pseudo Dice: 0.6758
294
- 2023-07-24 11:33:56.847697:
295
- 2023-07-24 11:33:56.847836: Epoch 34
296
- 2023-07-24 11:33:56.847952: Current learning rate: 0.00969
297
- 2023-07-24 11:37:31.570033: train_loss -0.6188
298
- 2023-07-24 11:37:31.570266: val_loss -0.4531
299
- 2023-07-24 11:37:31.570361: Pseudo dice [0.8598, 0.6848, 0.5296]
300
- 2023-07-24 11:37:31.570451: Epoch time: 214.72 s
301
- 2023-07-24 11:37:31.570522: Yayy! New best EMA pseudo Dice: 0.6774
302
- 2023-07-24 11:37:33.714445:
303
- 2023-07-24 11:37:33.714569: Epoch 35
304
- 2023-07-24 11:37:33.714672: Current learning rate: 0.00968
305
- 2023-07-24 11:41:14.940244: train_loss -0.6204
306
- 2023-07-24 11:41:14.940511: val_loss -0.4904
307
- 2023-07-24 11:41:14.940600: Pseudo dice [0.8691, 0.7124, 0.6012]
308
- 2023-07-24 11:41:14.940678: Epoch time: 221.23 s
309
- 2023-07-24 11:41:14.940737: Yayy! New best EMA pseudo Dice: 0.6824
310
- 2023-07-24 11:41:17.089798:
311
- 2023-07-24 11:41:17.090038: Epoch 36
312
- 2023-07-24 11:41:17.090159: Current learning rate: 0.00968
313
- 2023-07-24 11:44:46.328210: train_loss -0.6147
314
- 2023-07-24 11:44:46.328429: val_loss -0.4689
315
- 2023-07-24 11:44:46.328522: Pseudo dice [0.8711, 0.6956, 0.4682]
316
- 2023-07-24 11:44:46.328610: Epoch time: 209.24 s
317
- 2023-07-24 11:44:48.497017:
318
- 2023-07-24 11:44:48.497168: Epoch 37
319
- 2023-07-24 11:44:48.497303: Current learning rate: 0.00967
320
- 2023-07-24 11:48:28.425315: train_loss -0.6234
321
- 2023-07-24 11:48:28.425558: val_loss -0.4836
322
- 2023-07-24 11:48:28.425650: Pseudo dice [0.8754, 0.7042, 0.5211]
323
- 2023-07-24 11:48:28.425732: Epoch time: 219.93 s
324
- 2023-07-24 11:48:28.425794: Yayy! New best EMA pseudo Dice: 0.6838
325
- 2023-07-24 11:48:31.154660:
326
- 2023-07-24 11:48:31.154810: Epoch 38
327
- 2023-07-24 11:48:31.154930: Current learning rate: 0.00966
328
- 2023-07-24 11:51:54.038406: train_loss -0.6255
329
- 2023-07-24 11:51:54.050902: val_loss -0.48
330
- 2023-07-24 11:51:54.051198: Pseudo dice [0.8712, 0.6851, 0.5032]
331
- 2023-07-24 11:51:54.051365: Epoch time: 202.88 s
332
- 2023-07-24 11:51:54.051452: Yayy! New best EMA pseudo Dice: 0.6841
333
- 2023-07-24 11:51:56.363528:
334
- 2023-07-24 11:51:56.363662: Epoch 39
335
- 2023-07-24 11:51:56.363833: Current learning rate: 0.00965
336
- 2023-07-24 11:55:21.706032: train_loss -0.6288
337
- 2023-07-24 11:55:21.706228: val_loss -0.4378
338
- 2023-07-24 11:55:21.706325: Pseudo dice [0.8811, 0.6625, 0.4634]
339
- 2023-07-24 11:55:21.706409: Epoch time: 205.34 s
340
- 2023-07-24 11:55:24.489447:
341
- 2023-07-24 11:55:24.489575: Epoch 40
342
- 2023-07-24 11:55:24.489692: Current learning rate: 0.00964
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_2/debug.json DELETED
@@ -1,52 +0,0 @@
1
- {
2
- "_best_ema": "None",
3
- "batch_size": "2",
4
- "configuration_manager": "{'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [28, 256, 256], 'median_image_size_in_voxels': [25.0, 270.0, 270.0], 'spacing': [2.999998998641968, 0.4017857015132904, 0.4017857015132904], 'normalization_schemes': ['ZScoreNormalization', 'ZScoreNormalization', 'ZScoreNormalization'], 'use_mask_for_norm': [False, False, False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [2, 6, 6], 'pool_op_kernel_sizes': [[1, 1, 1], [1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[1, 3, 3], [1, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False}",
5
- "configuration_name": "3d_fullres",
6
- "cudnn_version": 8500,
7
- "current_epoch": "0",
8
- "dataloader_train": "<nnunetv2.training.data_augmentation.custom_transforms.limited_length_multithreaded_augmenter.LimitedLenWrapper object at 0x7fdcde4d94e0>",
9
- "dataloader_train.generator": "<nnunetv2.training.dataloading.data_loader_3d.nnUNetDataLoader3D object at 0x7fdcde4d9180>",
10
- "dataloader_train.num_processes": "4",
11
- "dataloader_train.transform": "Compose ( [Convert3DTo2DTransform( apply_to_keys = ('data', 'seg') ), SpatialTransform( independent_scale_for_each_axis = False, p_rot_per_sample = 0.2, p_scale_per_sample = 0.2, p_el_per_sample = 0, data_key = 'data', label_key = 'seg', patch_size = [256, 256], patch_center_dist_from_border = None, do_elastic_deform = False, alpha = (0, 0), sigma = (0, 0), do_rotation = True, angle_x = (-3.141592653589793, 3.141592653589793), angle_y = (0, 0), angle_z = (0, 0), do_scale = True, scale = (0.7, 1.4), border_mode_data = 'constant', border_cval_data = 0, order_data = 3, border_mode_seg = 'constant', border_cval_seg = -1, order_seg = 1, random_crop = False, p_rot_per_axis = 1, p_independent_scale_per_axis = 1 ), Convert2DTo3DTransform( apply_to_keys = ('data', 'seg') ), GaussianNoiseTransform( p_per_sample = 0.1, data_key = 'data', noise_variance = (0, 0.1), p_per_channel = 1, per_channel = False ), GaussianBlurTransform( p_per_sample = 0.2, different_sigma_per_channel = True, p_per_channel = 0.5, data_key = 'data', blur_sigma = (0.5, 1.0), different_sigma_per_axis = False, p_isotropic = 0 ), BrightnessMultiplicativeTransform( p_per_sample = 0.15, data_key = 'data', multiplier_range = (0.75, 1.25), per_channel = True ), ContrastAugmentationTransform( p_per_sample = 0.15, data_key = 'data', contrast_range = (0.75, 1.25), preserve_range = True, per_channel = True, p_per_channel = 1 ), SimulateLowResolutionTransform( order_upsample = 3, order_downsample = 0, channels = None, per_channel = True, p_per_channel = 0.5, p_per_sample = 0.25, data_key = 'data', zoom_range = (0.5, 1), ignore_axes = (0,) ), GammaTransform( p_per_sample = 0.1, retain_stats = True, per_channel = True, data_key = 'data', gamma_range = (0.7, 1.5), invert_image = True ), GammaTransform( p_per_sample = 0.3, retain_stats = True, per_channel = True, data_key = 'data', gamma_range = (0.7, 1.5), invert_image = False ), MirrorTransform( p_per_sample = 1, data_key = 'data', label_key = 'seg', axes = (0, 1, 2) ), RemoveLabelTransform( output_key = 'seg', input_key = 'seg', replace_with = 0, remove_label = -1 ), RenameTransform( delete_old = True, out_key = 'target', in_key = 'seg' ), DownsampleSegForDSTransform2( axes = None, output_key = 'target', input_key = 'target', order = 0, ds_scales = [[1.0, 1.0, 1.0], [1.0, 0.5, 0.5], [1.0, 0.25, 0.25], [0.5, 0.125, 0.125], [0.25, 0.0625, 0.0625], [0.25, 0.03125, 0.03125]] ), NumpyToTensor( keys = ['data', 'target'], cast_to = 'float' )] )",
12
- "dataloader_val": "<nnunetv2.training.data_augmentation.custom_transforms.limited_length_multithreaded_augmenter.LimitedLenWrapper object at 0x7fdcde4d90c0>",
13
- "dataloader_val.generator": "<nnunetv2.training.dataloading.data_loader_3d.nnUNetDataLoader3D object at 0x7fdcde4d90f0>",
14
- "dataloader_val.num_processes": "2",
15
- "dataloader_val.transform": "Compose ( [RemoveLabelTransform( output_key = 'seg', input_key = 'seg', replace_with = 0, remove_label = -1 ), RenameTransform( delete_old = True, out_key = 'target', in_key = 'seg' ), DownsampleSegForDSTransform2( axes = None, output_key = 'target', input_key = 'target', order = 0, ds_scales = [[1.0, 1.0, 1.0], [1.0, 0.5, 0.5], [1.0, 0.25, 0.25], [0.5, 0.125, 0.125], [0.25, 0.0625, 0.0625], [0.25, 0.03125, 0.03125]] ), NumpyToTensor( keys = ['data', 'target'], cast_to = 'float' )] )",
16
- "dataset_json": "{'name': 'Prostate158', 'description': 'Prostate cancer segmentation dataset', 'channel_names': {'0': 'T2', '1': 'ADC', '2': 'DFI'}, 'labels': {'background': 0, 'prostate_inner': 1, 'prostate_outer': 2, 'tumor': 3}, 'numTraining': 139, 'numTest': 19, 'file_ending': '.nii.gz'}",
17
- "device": "cuda:0",
18
- "disable_checkpointing": "False",
19
- "fold": "2",
20
- "folder_with_segs_from_previous_stage": "None",
21
- "gpu_name": "NVIDIA A10G",
22
- "grad_scaler": "<torch.cuda.amp.grad_scaler.GradScaler object at 0x7fdce67599f0>",
23
- "hostname": "s-osbm-jupyter-f0b83-8689bbb555-5t6kn",
24
- "inference_allowed_mirroring_axes": "(0, 1, 2)",
25
- "initial_lr": "0.01",
26
- "is_cascaded": "False",
27
- "is_ddp": "False",
28
- "label_manager": "<nnunetv2.utilities.label_handling.label_handling.LabelManager object at 0x7fdce6759ba0>",
29
- "local_rank": "0",
30
- "log_file": "nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_2/training_log_2023_7_24_11_56_27.txt",
31
- "logger": "<nnunetv2.training.logging.nnunet_logger.nnUNetLogger object at 0x7fdce6759a80>",
32
- "loss": "DeepSupervisionWrapper(\n (loss): DC_and_CE_loss(\n (ce): RobustCrossEntropyLoss()\n (dc): MemoryEfficientSoftDiceLoss()\n )\n)",
33
- "lr_scheduler": "<nnunetv2.training.lr_scheduler.polylr.PolyLRScheduler object at 0x7fdce6759ae0>",
34
- "my_init_kwargs": "{'plans': {'dataset_name': 'Dataset001_Prostate158', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [3.0, 0.4017857015132904, 0.4017857015132904], 'original_median_shape_after_transp': [25, 270, 270], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'configurations': {'2d': {'data_identifier': 'nnUNetPlans_2d', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 31, 'patch_size': [320, 320], 'median_image_size_in_voxels': [270.0, 270.0], 'spacing': [0.4017857015132904, 0.4017857015132904], 'normalization_schemes': ['ZScoreNormalization', 'ZScoreNormalization', 'ZScoreNormalization'], 'use_mask_for_norm': [False, False, False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [6, 6], 'pool_op_kernel_sizes': [[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]], 'conv_kernel_sizes': [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]], 'unet_max_num_features': 512, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_fullres': {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [28, 256, 256], 'median_image_size_in_voxels': [25.0, 270.0, 270.0], 'spacing': [2.999998998641968, 0.4017857015132904, 0.4017857015132904], 'normalization_schemes': ['ZScoreNormalization', 'ZScoreNormalization', 'ZScoreNormalization'], 'use_mask_for_norm': [False, False, False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [2, 6, 6], 'pool_op_kernel_sizes': [[1, 1, 1], [1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[1, 3, 3], [1, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False}}, 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 1694.0, 'mean': 267.35308837890625, 'median': 242.0, 'min': 0.0, 'percentile_00_5': 36.0, 'percentile_99_5': 768.0, 'std': 136.11251831054688}, '1': {'max': 3557.286865234375, 'mean': 1215.81591796875, 'median': 1203.8331298828125, 'min': 0.0, 'percentile_00_5': 0.0, 'percentile_99_5': 2259.82861328125, 'std': 338.6748352050781}, '2': {'max': 198.95455932617188, 'mean': 72.26309204101562, 'median': 70.3214340209961, 'min': 0.0, 'percentile_00_5': 34.534385681152344, 'percentile_99_5': 132.71939086914062, 'std': 18.909290313720703}}}, 'configuration': '3d_fullres', 'fold': 2, 'dataset_json': {'name': 'Prostate158', 'description': 'Prostate cancer segmentation dataset', 'channel_names': {'0': 'T2', '1': 'ADC', '2': 'DFI'}, 'labels': {'background': 0, 'prostate_inner': 1, 'prostate_outer': 2, 'tumor': 3}, 'numTraining': 139, 'numTest': 19, 'file_ending': '.nii.gz'}, 'unpack_dataset': True, 'device': device(type='cuda')}",
35
- "network": "PlainConvUNet",
36
- "num_epochs": "1000",
37
- "num_input_channels": "3",
38
- "num_iterations_per_epoch": "250",
39
- "num_val_iterations_per_epoch": "50",
40
- "optimizer": "SGD (\nParameter Group 0\n dampening: 0\n differentiable: False\n foreach: None\n initial_lr: 0.01\n lr: 0.01\n maximize: False\n momentum: 0.99\n nesterov: True\n weight_decay: 3e-05\n)",
41
- "output_folder": "nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_2",
42
- "output_folder_base": "nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres",
43
- "oversample_foreground_percent": "0.33",
44
- "plans_manager": "{'dataset_name': 'Dataset001_Prostate158', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [3.0, 0.4017857015132904, 0.4017857015132904], 'original_median_shape_after_transp': [25, 270, 270], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'configurations': {'2d': {'data_identifier': 'nnUNetPlans_2d', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 31, 'patch_size': [320, 320], 'median_image_size_in_voxels': [270.0, 270.0], 'spacing': [0.4017857015132904, 0.4017857015132904], 'normalization_schemes': ['ZScoreNormalization', 'ZScoreNormalization', 'ZScoreNormalization'], 'use_mask_for_norm': [False, False, False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [6, 6], 'pool_op_kernel_sizes': [[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]], 'conv_kernel_sizes': [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]], 'unet_max_num_features': 512, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': True}, '3d_fullres': {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [28, 256, 256], 'median_image_size_in_voxels': [25.0, 270.0, 270.0], 'spacing': [2.999998998641968, 0.4017857015132904, 0.4017857015132904], 'normalization_schemes': ['ZScoreNormalization', 'ZScoreNormalization', 'ZScoreNormalization'], 'use_mask_for_norm': [False, False, False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [2, 6, 6], 'pool_op_kernel_sizes': [[1, 1, 1], [1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[1, 3, 3], [1, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False}}, 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 1694.0, 'mean': 267.35308837890625, 'median': 242.0, 'min': 0.0, 'percentile_00_5': 36.0, 'percentile_99_5': 768.0, 'std': 136.11251831054688}, '1': {'max': 3557.286865234375, 'mean': 1215.81591796875, 'median': 1203.8331298828125, 'min': 0.0, 'percentile_00_5': 0.0, 'percentile_99_5': 2259.82861328125, 'std': 338.6748352050781}, '2': {'max': 198.95455932617188, 'mean': 72.26309204101562, 'median': 70.3214340209961, 'min': 0.0, 'percentile_00_5': 34.534385681152344, 'percentile_99_5': 132.71939086914062, 'std': 18.909290313720703}}}",
45
- "preprocessed_dataset_folder": "nnUNet_preprocessed/Dataset001_Prostate158/nnUNetPlans_3d_fullres",
46
- "preprocessed_dataset_folder_base": "nnUNet_preprocessed/Dataset001_Prostate158",
47
- "save_every": "50",
48
- "torch_version": "2.0.1+cu117",
49
- "unpack_dataset": "True",
50
- "was_initialized": "True",
51
- "weight_decay": "3e-05"
52
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_2/training_log_2023_7_24_11_56_27.txt DELETED
@@ -1,26 +0,0 @@
1
-
2
- #######################################################################
3
- Please cite the following paper when using nnU-Net:
4
- Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2021). nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. Nature methods, 18(2), 203-211.
5
- #######################################################################
6
-
7
-
8
- This is the configuration used by this training:
9
- Configuration name: 3d_fullres
10
- {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [28, 256, 256], 'median_image_size_in_voxels': [25.0, 270.0, 270.0], 'spacing': [2.999998998641968, 0.4017857015132904, 0.4017857015132904], 'normalization_schemes': ['ZScoreNormalization', 'ZScoreNormalization', 'ZScoreNormalization'], 'use_mask_for_norm': [False, False, False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [2, 6, 6], 'pool_op_kernel_sizes': [[1, 1, 1], [1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[1, 3, 3], [1, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False}
11
-
12
- These are the global plan.json settings:
13
- {'dataset_name': 'Dataset001_Prostate158', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [3.0, 0.4017857015132904, 0.4017857015132904], 'original_median_shape_after_transp': [25, 270, 270], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 1694.0, 'mean': 267.35308837890625, 'median': 242.0, 'min': 0.0, 'percentile_00_5': 36.0, 'percentile_99_5': 768.0, 'std': 136.11251831054688}, '1': {'max': 3557.286865234375, 'mean': 1215.81591796875, 'median': 1203.8331298828125, 'min': 0.0, 'percentile_00_5': 0.0, 'percentile_99_5': 2259.82861328125, 'std': 338.6748352050781}, '2': {'max': 198.95455932617188, 'mean': 72.26309204101562, 'median': 70.3214340209961, 'min': 0.0, 'percentile_00_5': 34.534385681152344, 'percentile_99_5': 132.71939086914062, 'std': 18.909290313720703}}}
14
-
15
- 2023-07-24 11:56:29.546341: unpacking dataset...
16
- 2023-07-24 11:56:32.330317: unpacking done...
17
- 2023-07-24 11:56:32.331048: do_dummy_2d_data_aug: True
18
- 2023-07-24 11:56:32.332050: Using splits from existing split file: nnUNet_preprocessed/Dataset001_Prostate158/splits_final.json
19
- 2023-07-24 11:56:32.332453: The split file contains 5 splits.
20
- 2023-07-24 11:56:32.332518: Desired fold for training: 2
21
- 2023-07-24 11:56:32.332569: This split has 111 training and 28 validation cases.
22
- 2023-07-24 11:56:37.249189: Unable to plot network architecture:
23
- 2023-07-24 11:56:37.249434: module 'torch.onnx' has no attribute '_optimize_trace'
24
- 2023-07-24 11:56:37.297982:
25
- 2023-07-24 11:56:37.298078: Epoch 0
26
- 2023-07-24 11:56:37.298198: Current learning rate: 0.01
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_3/training_log_2023_7_24_11_56_49.txt DELETED
@@ -1,21 +0,0 @@
1
-
2
- #######################################################################
3
- Please cite the following paper when using nnU-Net:
4
- Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2021). nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. Nature methods, 18(2), 203-211.
5
- #######################################################################
6
-
7
-
8
- This is the configuration used by this training:
9
- Configuration name: 3d_fullres
10
- {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [28, 256, 256], 'median_image_size_in_voxels': [25.0, 270.0, 270.0], 'spacing': [2.999998998641968, 0.4017857015132904, 0.4017857015132904], 'normalization_schemes': ['ZScoreNormalization', 'ZScoreNormalization', 'ZScoreNormalization'], 'use_mask_for_norm': [False, False, False], 'UNet_class_name': 'PlainConvUNet', 'UNet_base_num_features': 32, 'n_conv_per_stage_encoder': [2, 2, 2, 2, 2, 2, 2], 'n_conv_per_stage_decoder': [2, 2, 2, 2, 2, 2], 'num_pool_per_axis': [2, 6, 6], 'pool_op_kernel_sizes': [[1, 1, 1], [1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2], [1, 2, 2]], 'conv_kernel_sizes': [[1, 3, 3], [1, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'unet_max_num_features': 320, 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'batch_dice': False}
11
-
12
- These are the global plan.json settings:
13
- {'dataset_name': 'Dataset001_Prostate158', 'plans_name': 'nnUNetPlans', 'original_median_spacing_after_transp': [3.0, 0.4017857015132904, 0.4017857015132904], 'original_median_shape_after_transp': [25, 270, 270], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'experiment_planner_used': 'ExperimentPlanner', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 1694.0, 'mean': 267.35308837890625, 'median': 242.0, 'min': 0.0, 'percentile_00_5': 36.0, 'percentile_99_5': 768.0, 'std': 136.11251831054688}, '1': {'max': 3557.286865234375, 'mean': 1215.81591796875, 'median': 1203.8331298828125, 'min': 0.0, 'percentile_00_5': 0.0, 'percentile_99_5': 2259.82861328125, 'std': 338.6748352050781}, '2': {'max': 198.95455932617188, 'mean': 72.26309204101562, 'median': 70.3214340209961, 'min': 0.0, 'percentile_00_5': 34.534385681152344, 'percentile_99_5': 132.71939086914062, 'std': 18.909290313720703}}}
14
-
15
- 2023-07-24 11:56:51.516630: unpacking dataset...
16
- 2023-07-24 11:56:54.153463: unpacking done...
17
- 2023-07-24 11:56:54.154020: do_dummy_2d_data_aug: True
18
- 2023-07-24 11:56:54.154704: Using splits from existing split file: nnUNet_preprocessed/Dataset001_Prostate158/splits_final.json
19
- 2023-07-24 11:56:54.154892: The split file contains 5 splits.
20
- 2023-07-24 11:56:54.154945: Desired fold for training: 3
21
- 2023-07-24 11:56:54.154991: This split has 111 training and 28 validation cases.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nnUNet_results/Dataset001_Prostate158/nnUNetTrainer__nnUNetPlans__3d_fullres/plans.json DELETED
@@ -1,342 +0,0 @@
1
- {
2
- "dataset_name": "Dataset001_Prostate158",
3
- "plans_name": "nnUNetPlans",
4
- "original_median_spacing_after_transp": [
5
- 3.0,
6
- 0.4017857015132904,
7
- 0.4017857015132904
8
- ],
9
- "original_median_shape_after_transp": [
10
- 25,
11
- 270,
12
- 270
13
- ],
14
- "image_reader_writer": "SimpleITKIO",
15
- "transpose_forward": [
16
- 0,
17
- 1,
18
- 2
19
- ],
20
- "transpose_backward": [
21
- 0,
22
- 1,
23
- 2
24
- ],
25
- "configurations": {
26
- "2d": {
27
- "data_identifier": "nnUNetPlans_2d",
28
- "preprocessor_name": "DefaultPreprocessor",
29
- "batch_size": 31,
30
- "patch_size": [
31
- 320,
32
- 320
33
- ],
34
- "median_image_size_in_voxels": [
35
- 270.0,
36
- 270.0
37
- ],
38
- "spacing": [
39
- 0.4017857015132904,
40
- 0.4017857015132904
41
- ],
42
- "normalization_schemes": [
43
- "ZScoreNormalization",
44
- "ZScoreNormalization",
45
- "ZScoreNormalization"
46
- ],
47
- "use_mask_for_norm": [
48
- false,
49
- false,
50
- false
51
- ],
52
- "UNet_class_name": "PlainConvUNet",
53
- "UNet_base_num_features": 32,
54
- "n_conv_per_stage_encoder": [
55
- 2,
56
- 2,
57
- 2,
58
- 2,
59
- 2,
60
- 2,
61
- 2
62
- ],
63
- "n_conv_per_stage_decoder": [
64
- 2,
65
- 2,
66
- 2,
67
- 2,
68
- 2,
69
- 2
70
- ],
71
- "num_pool_per_axis": [
72
- 6,
73
- 6
74
- ],
75
- "pool_op_kernel_sizes": [
76
- [
77
- 1,
78
- 1
79
- ],
80
- [
81
- 2,
82
- 2
83
- ],
84
- [
85
- 2,
86
- 2
87
- ],
88
- [
89
- 2,
90
- 2
91
- ],
92
- [
93
- 2,
94
- 2
95
- ],
96
- [
97
- 2,
98
- 2
99
- ],
100
- [
101
- 2,
102
- 2
103
- ]
104
- ],
105
- "conv_kernel_sizes": [
106
- [
107
- 3,
108
- 3
109
- ],
110
- [
111
- 3,
112
- 3
113
- ],
114
- [
115
- 3,
116
- 3
117
- ],
118
- [
119
- 3,
120
- 3
121
- ],
122
- [
123
- 3,
124
- 3
125
- ],
126
- [
127
- 3,
128
- 3
129
- ],
130
- [
131
- 3,
132
- 3
133
- ]
134
- ],
135
- "unet_max_num_features": 512,
136
- "resampling_fn_data": "resample_data_or_seg_to_shape",
137
- "resampling_fn_seg": "resample_data_or_seg_to_shape",
138
- "resampling_fn_data_kwargs": {
139
- "is_seg": false,
140
- "order": 3,
141
- "order_z": 0,
142
- "force_separate_z": null
143
- },
144
- "resampling_fn_seg_kwargs": {
145
- "is_seg": true,
146
- "order": 1,
147
- "order_z": 0,
148
- "force_separate_z": null
149
- },
150
- "resampling_fn_probabilities": "resample_data_or_seg_to_shape",
151
- "resampling_fn_probabilities_kwargs": {
152
- "is_seg": false,
153
- "order": 1,
154
- "order_z": 0,
155
- "force_separate_z": null
156
- },
157
- "batch_dice": true
158
- },
159
- "3d_fullres": {
160
- "data_identifier": "nnUNetPlans_3d_fullres",
161
- "preprocessor_name": "DefaultPreprocessor",
162
- "batch_size": 2,
163
- "patch_size": [
164
- 28,
165
- 256,
166
- 256
167
- ],
168
- "median_image_size_in_voxels": [
169
- 25.0,
170
- 270.0,
171
- 270.0
172
- ],
173
- "spacing": [
174
- 2.999998998641968,
175
- 0.4017857015132904,
176
- 0.4017857015132904
177
- ],
178
- "normalization_schemes": [
179
- "ZScoreNormalization",
180
- "ZScoreNormalization",
181
- "ZScoreNormalization"
182
- ],
183
- "use_mask_for_norm": [
184
- false,
185
- false,
186
- false
187
- ],
188
- "UNet_class_name": "PlainConvUNet",
189
- "UNet_base_num_features": 32,
190
- "n_conv_per_stage_encoder": [
191
- 2,
192
- 2,
193
- 2,
194
- 2,
195
- 2,
196
- 2,
197
- 2
198
- ],
199
- "n_conv_per_stage_decoder": [
200
- 2,
201
- 2,
202
- 2,
203
- 2,
204
- 2,
205
- 2
206
- ],
207
- "num_pool_per_axis": [
208
- 2,
209
- 6,
210
- 6
211
- ],
212
- "pool_op_kernel_sizes": [
213
- [
214
- 1,
215
- 1,
216
- 1
217
- ],
218
- [
219
- 1,
220
- 2,
221
- 2
222
- ],
223
- [
224
- 1,
225
- 2,
226
- 2
227
- ],
228
- [
229
- 2,
230
- 2,
231
- 2
232
- ],
233
- [
234
- 2,
235
- 2,
236
- 2
237
- ],
238
- [
239
- 1,
240
- 2,
241
- 2
242
- ],
243
- [
244
- 1,
245
- 2,
246
- 2
247
- ]
248
- ],
249
- "conv_kernel_sizes": [
250
- [
251
- 1,
252
- 3,
253
- 3
254
- ],
255
- [
256
- 1,
257
- 3,
258
- 3
259
- ],
260
- [
261
- 3,
262
- 3,
263
- 3
264
- ],
265
- [
266
- 3,
267
- 3,
268
- 3
269
- ],
270
- [
271
- 3,
272
- 3,
273
- 3
274
- ],
275
- [
276
- 3,
277
- 3,
278
- 3
279
- ],
280
- [
281
- 3,
282
- 3,
283
- 3
284
- ]
285
- ],
286
- "unet_max_num_features": 320,
287
- "resampling_fn_data": "resample_data_or_seg_to_shape",
288
- "resampling_fn_seg": "resample_data_or_seg_to_shape",
289
- "resampling_fn_data_kwargs": {
290
- "is_seg": false,
291
- "order": 3,
292
- "order_z": 0,
293
- "force_separate_z": null
294
- },
295
- "resampling_fn_seg_kwargs": {
296
- "is_seg": true,
297
- "order": 1,
298
- "order_z": 0,
299
- "force_separate_z": null
300
- },
301
- "resampling_fn_probabilities": "resample_data_or_seg_to_shape",
302
- "resampling_fn_probabilities_kwargs": {
303
- "is_seg": false,
304
- "order": 1,
305
- "order_z": 0,
306
- "force_separate_z": null
307
- },
308
- "batch_dice": false
309
- }
310
- },
311
- "experiment_planner_used": "ExperimentPlanner",
312
- "label_manager": "LabelManager",
313
- "foreground_intensity_properties_per_channel": {
314
- "0": {
315
- "max": 1694.0,
316
- "mean": 267.35308837890625,
317
- "median": 242.0,
318
- "min": 0.0,
319
- "percentile_00_5": 36.0,
320
- "percentile_99_5": 768.0,
321
- "std": 136.11251831054688
322
- },
323
- "1": {
324
- "max": 3557.286865234375,
325
- "mean": 1215.81591796875,
326
- "median": 1203.8331298828125,
327
- "min": 0.0,
328
- "percentile_00_5": 0.0,
329
- "percentile_99_5": 2259.82861328125,
330
- "std": 338.6748352050781
331
- },
332
- "2": {
333
- "max": 198.95455932617188,
334
- "mean": 72.26309204101562,
335
- "median": 70.3214340209961,
336
- "min": 0.0,
337
- "percentile_00_5": 34.534385681152344,
338
- "percentile_99_5": 132.71939086914062,
339
- "std": 18.909290313720703
340
- }
341
- }
342
- }