Sahil commited on
Commit
aaf9c6c
·
1 Parent(s): a3ae2f6

Add application file

Browse files
test.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ from utils import classfication
4
+ source_video = "videos/main.mp4"
5
+ def video_identity(video):
6
+ print(type(video))
7
+ try:
8
+ os.makedirs(os.path.dirname(source_video ), exist_ok=True)
9
+ os.replace(video, source_video )
10
+ except Exception as e:
11
+ print(f"Error: {e}")
12
+ predection =classfication()
13
+ files = os.listdir(source_video)
14
+
15
+ # Iterate through each file and delete
16
+ for file in files:
17
+ file_path = os.path.join(source_video, file)
18
+ if os.path.isfile(file_path):
19
+ os.remove(file_path)
20
+
21
+ return predection
22
+
23
+
24
+ demo = gr.Interface(video_identity,
25
+ gr.Video(),
26
+ outputs="text"
27
+ )
28
+
29
+ if __name__ == "__main__":
30
+ demo.launch(share=True)
utils.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import flash
2
+ from flash.core.data.utils import download_data
3
+ from flash.video import VideoClassificationData, VideoClassifier
4
+ import torch
5
+ from flash.video.classification.input_transform import VideoClassificationInputTransform
6
+ from pytorchvideo.transforms import (
7
+ ApplyTransformToKey,
8
+ ShortSideScale,
9
+ UniformTemporalSubsample,
10
+ UniformCropVideo,
11
+ )
12
+ from dataclasses import dataclass
13
+ from typing import Callable
14
+
15
+ import torch
16
+ from torch import Tensor
17
+
18
+ from flash.core.data.io.input import DataKeys
19
+ from flash.core.data.io.input_transform import InputTransform
20
+ from flash.core.data.transforms import ApplyToKeys
21
+ from flash.core.utilities.imports import (
22
+ _KORNIA_AVAILABLE,
23
+ _PYTORCHVIDEO_AVAILABLE,
24
+ requires,
25
+ )
26
+ from torchvision.transforms import Compose, CenterCrop
27
+ from torchvision.transforms import RandomCrop
28
+ from torch import nn
29
+ import kornia.augmentation as K
30
+ from torchvision import transforms as T
31
+ torch.set_float32_matmul_precision('high')
32
+
33
+ def normalize(x: Tensor) -> Tensor:
34
+ return x / 255.0
35
+
36
+ class TransformDataModule(InputTransform):
37
+ image_size: int = 256
38
+ temporal_sub_sample: int = 16 # This is the only change in our custom transform
39
+ mean: Tensor = torch.tensor([0.45, 0.45, 0.45])
40
+ std: Tensor = torch.tensor([0.225, 0.225, 0.225])
41
+ data_format: str = "BCTHW"
42
+ same_on_frame: bool = False
43
+
44
+ def per_sample_transform(self) -> Callable:
45
+ per_sample_transform = [CenterCrop(self.image_size)]
46
+
47
+ return Compose(
48
+ [
49
+ ApplyToKeys(
50
+ DataKeys.INPUT,
51
+ Compose(
52
+ [UniformTemporalSubsample(self.temporal_sub_sample), normalize]
53
+ + per_sample_transform
54
+ ),
55
+ ),
56
+ ApplyToKeys(DataKeys.TARGET, torch.as_tensor),
57
+ ]
58
+ )
59
+
60
+ def train_per_sample_transform(self) -> Callable:
61
+ per_sample_transform = [RandomCrop(self.image_size, pad_if_needed=True)]
62
+
63
+ return Compose(
64
+ [
65
+ ApplyToKeys(
66
+ DataKeys.INPUT,
67
+ Compose(
68
+ [UniformTemporalSubsample(self.temporal_sub_sample), normalize]
69
+ + per_sample_transform
70
+ ),
71
+ ),
72
+ ApplyToKeys(DataKeys.TARGET, torch.as_tensor),
73
+ ]
74
+ )
75
+
76
+ def per_batch_transform_on_device(self) -> Callable:
77
+ return ApplyToKeys(
78
+ DataKeys.INPUT,
79
+ K.VideoSequential(
80
+ K.Normalize(self.mean, self.std),
81
+ data_format=self.data_format,
82
+ same_on_frame=self.same_on_frame,
83
+ ),
84
+ )
85
+
86
+
87
+
88
+ model = VideoClassifier.load_from_checkpoint("video_classfication/checkpoints/epoch=99-step=1000.ckpt")
89
+
90
+
91
+ datamodule_p = VideoClassificationData.from_folders(
92
+ predict_folder="videos",
93
+ batch_size=1,
94
+ transform=TransformDataModule()
95
+ )
96
+ trainer = flash.Trainer(
97
+ max_epochs=5,
98
+ )
99
+ def classfication():
100
+ predictions = trainer.predict(model, datamodule=datamodule_p, output="labels")
101
+ return predictions[0][0]
video_classfication/checkpoints/epoch=99-step=1000.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:964aa411fb1cc4826d79977b5420c8cb69858fe59fe4084585a47ea219c15319
3
+ size 15982674
video_classfication/events.out.tfevents.1712128751.DESKTOP-SPBG53I.34764.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:883e34ebeb24be50e42c8786c01702f449baddb15504417ae3fd7dc62c3dd77d
3
+ size 35983
video_classfication/hparams.yaml ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backbone: x3d_m
2
+ backbone_kwargs: null
3
+ head: null
4
+ labels:
5
+ - Cloud
6
+ - able
7
+ - accept
8
+ - active
9
+ - add
10
+ - admit
11
+ - classroom
12
+ - college
13
+ - computer
14
+ - engineer
15
+ - information
16
+ - internet
17
+ learning_rate: null
18
+ loss_fn: !!python/name:torch.nn.functional.cross_entropy ''
19
+ lr_scheduler: null
20
+ metrics: !!python/object/new:torchmetrics.classification.accuracy.Accuracy
21
+ args:
22
+ - Accuracy()
23
+ state:
24
+ _backward_hooks: !!python/object/apply:collections.OrderedDict
25
+ - []
26
+ _backward_pre_hooks: !!python/object/apply:collections.OrderedDict
27
+ - []
28
+ _buffers: !!python/object/apply:collections.OrderedDict
29
+ - []
30
+ _cache: null
31
+ _computed: null
32
+ _defaults:
33
+ fn: !!python/object/apply:torch._utils._rebuild_tensor_v2
34
+ - !!python/object/apply:torch.storage._load_from_bytes
35
+ - !!binary |
36
+ gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAA
37
+ AGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAA
38
+ aW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApMb25nU3RvcmFn
39
+ ZQpxAVgNAAAAMjE1OTMyNDY5NzY2NHECWAMAAABjcHVxA0sBTnRxBFEugAJdcQBYDQAAADIxNTkz
40
+ MjQ2OTc2NjRxAWEuAQAAAAAAAAAAAAAAAAAAAA==
41
+ - 0
42
+ - !!python/tuple []
43
+ - !!python/tuple []
44
+ - false
45
+ - !!python/object/apply:collections.OrderedDict
46
+ - []
47
+ fp: !!python/object/apply:torch._utils._rebuild_tensor_v2
48
+ - !!python/object/apply:torch.storage._load_from_bytes
49
+ - !!binary |
50
+ gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAA
51
+ AGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAA
52
+ aW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApMb25nU3RvcmFn
53
+ ZQpxAVgNAAAAMjE1OTMyNDY5NDc1MnECWAMAAABjcHVxA0sBTnRxBFEugAJdcQBYDQAAADIxNTkz
54
+ MjQ2OTQ3NTJxAWEuAQAAAAAAAAAAAAAAAAAAAA==
55
+ - 0
56
+ - !!python/tuple []
57
+ - !!python/tuple []
58
+ - false
59
+ - !!python/object/apply:collections.OrderedDict
60
+ - []
61
+ tn: !!python/object/apply:torch._utils._rebuild_tensor_v2
62
+ - !!python/object/apply:torch.storage._load_from_bytes
63
+ - !!binary |
64
+ gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAA
65
+ AGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAA
66
+ aW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApMb25nU3RvcmFn
67
+ ZQpxAVgNAAAAMjE1OTMyNDY5Nzc3NnECWAMAAABjcHVxA0sBTnRxBFEugAJdcQBYDQAAADIxNTkz
68
+ MjQ2OTc3NzZxAWEuAQAAAAAAAAAAAAAAAAAAAA==
69
+ - 0
70
+ - !!python/tuple []
71
+ - !!python/tuple []
72
+ - false
73
+ - !!python/object/apply:collections.OrderedDict
74
+ - []
75
+ tp: !!python/object/apply:torch._utils._rebuild_tensor_v2
76
+ - !!python/object/apply:torch.storage._load_from_bytes
77
+ - !!binary |
78
+ gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAA
79
+ AGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAA
80
+ aW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApMb25nU3RvcmFn
81
+ ZQpxAVgNAAAAMjE1OTMyNDY5ODg5NnECWAMAAABjcHVxA0sBTnRxBFEugAJdcQBYDQAAADIxNTkz
82
+ MjQ2OTg4OTZxAWEuAQAAAAAAAAAAAAAAAAAAAA==
83
+ - 0
84
+ - !!python/tuple []
85
+ - !!python/tuple []
86
+ - false
87
+ - !!python/object/apply:collections.OrderedDict
88
+ - []
89
+ _device: !!python/object/apply:torch.device
90
+ - cpu
91
+ _enable_grad: false
92
+ _forward_cache: null
93
+ _forward_hooks: !!python/object/apply:collections.OrderedDict
94
+ - []
95
+ _forward_hooks_always_called: !!python/object/apply:collections.OrderedDict
96
+ - []
97
+ _forward_hooks_with_kwargs: !!python/object/apply:collections.OrderedDict
98
+ - []
99
+ _forward_pre_hooks: !!python/object/apply:collections.OrderedDict
100
+ - []
101
+ _forward_pre_hooks_with_kwargs: !!python/object/apply:collections.OrderedDict
102
+ - []
103
+ _is_full_backward_hook: null
104
+ _is_synced: false
105
+ _load_state_dict_post_hooks: !!python/object/apply:collections.OrderedDict
106
+ - []
107
+ _load_state_dict_pre_hooks: !!python/object/apply:collections.OrderedDict
108
+ - []
109
+ _modules: !!python/object/apply:collections.OrderedDict
110
+ - []
111
+ _non_persistent_buffers_set: !!set {}
112
+ _parameters: !!python/object/apply:collections.OrderedDict
113
+ - []
114
+ _persistent:
115
+ fn: false
116
+ fp: false
117
+ tn: false
118
+ tp: false
119
+ _reductions:
120
+ fn: &id001 !!python/name:torchmetrics.utilities.data.dim_zero_sum ''
121
+ fp: *id001
122
+ tn: *id001
123
+ tp: *id001
124
+ _should_unsync: true
125
+ _state_dict_hooks: !!python/object/apply:collections.OrderedDict
126
+ - []
127
+ _state_dict_pre_hooks: !!python/object/apply:collections.OrderedDict
128
+ - []
129
+ _to_sync: true
130
+ _update_count: 0
131
+ average: micro
132
+ compute_on_cpu: false
133
+ dist_sync_fn: null
134
+ dist_sync_on_step: false
135
+ fn: !!python/object/apply:torch._utils._rebuild_tensor_v2
136
+ - !!python/object/apply:torch.storage._load_from_bytes
137
+ - !!binary |
138
+ gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAA
139
+ AGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAA
140
+ aW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApMb25nU3RvcmFn
141
+ ZQpxAVgNAAAAMjE1OTMyNDY5MzUyMHECWAMAAABjcHVxA0sBTnRxBFEugAJdcQBYDQAAADIxNTkz
142
+ MjQ2OTM1MjBxAWEuAQAAAAAAAAAAAAAAAAAAAA==
143
+ - 0
144
+ - !!python/tuple []
145
+ - !!python/tuple []
146
+ - false
147
+ - !!python/object/apply:collections.OrderedDict
148
+ - []
149
+ fp: !!python/object/apply:torch._utils._rebuild_tensor_v2
150
+ - !!python/object/apply:torch.storage._load_from_bytes
151
+ - !!binary |
152
+ gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAA
153
+ AGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAA
154
+ aW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApMb25nU3RvcmFn
155
+ ZQpxAVgNAAAAMjE1OTMyNDY5NTQyNHECWAMAAABjcHVxA0sBTnRxBFEugAJdcQBYDQAAADIxNTkz
156
+ MjQ2OTU0MjRxAWEuAQAAAAAAAAAAAAAAAAAAAA==
157
+ - 0
158
+ - !!python/tuple []
159
+ - !!python/tuple []
160
+ - false
161
+ - !!python/object/apply:collections.OrderedDict
162
+ - []
163
+ ignore_index: null
164
+ mdmc_reduce: null
165
+ mode: null
166
+ multiclass: null
167
+ num_classes: null
168
+ process_group: null
169
+ reduce: micro
170
+ subset_accuracy: false
171
+ sync_on_compute: true
172
+ threshold: 0.5
173
+ tn: !!python/object/apply:torch._utils._rebuild_tensor_v2
174
+ - !!python/object/apply:torch.storage._load_from_bytes
175
+ - !!binary |
176
+ gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAA
177
+ AGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAA
178
+ aW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApMb25nU3RvcmFn
179
+ ZQpxAVgNAAAAMjE1OTMyNDY5Nzg4OHECWAMAAABjcHVxA0sBTnRxBFEugAJdcQBYDQAAADIxNTkz
180
+ MjQ2OTc4ODhxAWEuAQAAAAAAAAAAAAAAAAAAAA==
181
+ - 0
182
+ - !!python/tuple []
183
+ - !!python/tuple []
184
+ - false
185
+ - !!python/object/apply:collections.OrderedDict
186
+ - []
187
+ top_k: null
188
+ tp: !!python/object/apply:torch._utils._rebuild_tensor_v2
189
+ - !!python/object/apply:torch.storage._load_from_bytes
190
+ - !!binary |
191
+ gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAA
192
+ AGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAA
193
+ aW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApMb25nU3RvcmFn
194
+ ZQpxAVgNAAAAMjE1OTMyNDY5Mzg1NnECWAMAAABjcHVxA0sBTnRxBFEugAJdcQBYDQAAADIxNTkz
195
+ MjQ2OTM4NTZxAWEuAQAAAAAAAAAAAAAAAAAAAA==
196
+ - 0
197
+ - !!python/tuple []
198
+ - !!python/tuple []
199
+ - false
200
+ - !!python/object/apply:collections.OrderedDict
201
+ - []
202
+ training: true
203
+ num_classes: null
204
+ optimizer: Adam
205
+ pretrained: true
video_classfication/version_11/checkpoints/epoch=99-step=1000.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:964aa411fb1cc4826d79977b5420c8cb69858fe59fe4084585a47ea219c15319
3
+ size 15982674
video_classfication/version_11/events.out.tfevents.1712128751.DESKTOP-SPBG53I.34764.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:883e34ebeb24be50e42c8786c01702f449baddb15504417ae3fd7dc62c3dd77d
3
+ size 35983
video_classfication/version_11/hparams.yaml ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backbone: x3d_m
2
+ backbone_kwargs: null
3
+ head: null
4
+ labels:
5
+ - Cloud
6
+ - able
7
+ - accept
8
+ - active
9
+ - add
10
+ - admit
11
+ - classroom
12
+ - college
13
+ - computer
14
+ - engineer
15
+ - information
16
+ - internet
17
+ learning_rate: null
18
+ loss_fn: !!python/name:torch.nn.functional.cross_entropy ''
19
+ lr_scheduler: null
20
+ metrics: !!python/object/new:torchmetrics.classification.accuracy.Accuracy
21
+ args:
22
+ - Accuracy()
23
+ state:
24
+ _backward_hooks: !!python/object/apply:collections.OrderedDict
25
+ - []
26
+ _backward_pre_hooks: !!python/object/apply:collections.OrderedDict
27
+ - []
28
+ _buffers: !!python/object/apply:collections.OrderedDict
29
+ - []
30
+ _cache: null
31
+ _computed: null
32
+ _defaults:
33
+ fn: !!python/object/apply:torch._utils._rebuild_tensor_v2
34
+ - !!python/object/apply:torch.storage._load_from_bytes
35
+ - !!binary |
36
+ gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAA
37
+ AGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAA
38
+ aW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApMb25nU3RvcmFn
39
+ ZQpxAVgNAAAAMjE1OTMyNDY5NzY2NHECWAMAAABjcHVxA0sBTnRxBFEugAJdcQBYDQAAADIxNTkz
40
+ MjQ2OTc2NjRxAWEuAQAAAAAAAAAAAAAAAAAAAA==
41
+ - 0
42
+ - !!python/tuple []
43
+ - !!python/tuple []
44
+ - false
45
+ - !!python/object/apply:collections.OrderedDict
46
+ - []
47
+ fp: !!python/object/apply:torch._utils._rebuild_tensor_v2
48
+ - !!python/object/apply:torch.storage._load_from_bytes
49
+ - !!binary |
50
+ gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAA
51
+ AGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAA
52
+ aW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApMb25nU3RvcmFn
53
+ ZQpxAVgNAAAAMjE1OTMyNDY5NDc1MnECWAMAAABjcHVxA0sBTnRxBFEugAJdcQBYDQAAADIxNTkz
54
+ MjQ2OTQ3NTJxAWEuAQAAAAAAAAAAAAAAAAAAAA==
55
+ - 0
56
+ - !!python/tuple []
57
+ - !!python/tuple []
58
+ - false
59
+ - !!python/object/apply:collections.OrderedDict
60
+ - []
61
+ tn: !!python/object/apply:torch._utils._rebuild_tensor_v2
62
+ - !!python/object/apply:torch.storage._load_from_bytes
63
+ - !!binary |
64
+ gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAA
65
+ AGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAA
66
+ aW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApMb25nU3RvcmFn
67
+ ZQpxAVgNAAAAMjE1OTMyNDY5Nzc3NnECWAMAAABjcHVxA0sBTnRxBFEugAJdcQBYDQAAADIxNTkz
68
+ MjQ2OTc3NzZxAWEuAQAAAAAAAAAAAAAAAAAAAA==
69
+ - 0
70
+ - !!python/tuple []
71
+ - !!python/tuple []
72
+ - false
73
+ - !!python/object/apply:collections.OrderedDict
74
+ - []
75
+ tp: !!python/object/apply:torch._utils._rebuild_tensor_v2
76
+ - !!python/object/apply:torch.storage._load_from_bytes
77
+ - !!binary |
78
+ gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAA
79
+ AGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAA
80
+ aW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApMb25nU3RvcmFn
81
+ ZQpxAVgNAAAAMjE1OTMyNDY5ODg5NnECWAMAAABjcHVxA0sBTnRxBFEugAJdcQBYDQAAADIxNTkz
82
+ MjQ2OTg4OTZxAWEuAQAAAAAAAAAAAAAAAAAAAA==
83
+ - 0
84
+ - !!python/tuple []
85
+ - !!python/tuple []
86
+ - false
87
+ - !!python/object/apply:collections.OrderedDict
88
+ - []
89
+ _device: !!python/object/apply:torch.device
90
+ - cpu
91
+ _enable_grad: false
92
+ _forward_cache: null
93
+ _forward_hooks: !!python/object/apply:collections.OrderedDict
94
+ - []
95
+ _forward_hooks_always_called: !!python/object/apply:collections.OrderedDict
96
+ - []
97
+ _forward_hooks_with_kwargs: !!python/object/apply:collections.OrderedDict
98
+ - []
99
+ _forward_pre_hooks: !!python/object/apply:collections.OrderedDict
100
+ - []
101
+ _forward_pre_hooks_with_kwargs: !!python/object/apply:collections.OrderedDict
102
+ - []
103
+ _is_full_backward_hook: null
104
+ _is_synced: false
105
+ _load_state_dict_post_hooks: !!python/object/apply:collections.OrderedDict
106
+ - []
107
+ _load_state_dict_pre_hooks: !!python/object/apply:collections.OrderedDict
108
+ - []
109
+ _modules: !!python/object/apply:collections.OrderedDict
110
+ - []
111
+ _non_persistent_buffers_set: !!set {}
112
+ _parameters: !!python/object/apply:collections.OrderedDict
113
+ - []
114
+ _persistent:
115
+ fn: false
116
+ fp: false
117
+ tn: false
118
+ tp: false
119
+ _reductions:
120
+ fn: &id001 !!python/name:torchmetrics.utilities.data.dim_zero_sum ''
121
+ fp: *id001
122
+ tn: *id001
123
+ tp: *id001
124
+ _should_unsync: true
125
+ _state_dict_hooks: !!python/object/apply:collections.OrderedDict
126
+ - []
127
+ _state_dict_pre_hooks: !!python/object/apply:collections.OrderedDict
128
+ - []
129
+ _to_sync: true
130
+ _update_count: 0
131
+ average: micro
132
+ compute_on_cpu: false
133
+ dist_sync_fn: null
134
+ dist_sync_on_step: false
135
+ fn: !!python/object/apply:torch._utils._rebuild_tensor_v2
136
+ - !!python/object/apply:torch.storage._load_from_bytes
137
+ - !!binary |
138
+ gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAA
139
+ AGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAA
140
+ aW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApMb25nU3RvcmFn
141
+ ZQpxAVgNAAAAMjE1OTMyNDY5MzUyMHECWAMAAABjcHVxA0sBTnRxBFEugAJdcQBYDQAAADIxNTkz
142
+ MjQ2OTM1MjBxAWEuAQAAAAAAAAAAAAAAAAAAAA==
143
+ - 0
144
+ - !!python/tuple []
145
+ - !!python/tuple []
146
+ - false
147
+ - !!python/object/apply:collections.OrderedDict
148
+ - []
149
+ fp: !!python/object/apply:torch._utils._rebuild_tensor_v2
150
+ - !!python/object/apply:torch.storage._load_from_bytes
151
+ - !!binary |
152
+ gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAA
153
+ AGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAA
154
+ aW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApMb25nU3RvcmFn
155
+ ZQpxAVgNAAAAMjE1OTMyNDY5NTQyNHECWAMAAABjcHVxA0sBTnRxBFEugAJdcQBYDQAAADIxNTkz
156
+ MjQ2OTU0MjRxAWEuAQAAAAAAAAAAAAAAAAAAAA==
157
+ - 0
158
+ - !!python/tuple []
159
+ - !!python/tuple []
160
+ - false
161
+ - !!python/object/apply:collections.OrderedDict
162
+ - []
163
+ ignore_index: null
164
+ mdmc_reduce: null
165
+ mode: null
166
+ multiclass: null
167
+ num_classes: null
168
+ process_group: null
169
+ reduce: micro
170
+ subset_accuracy: false
171
+ sync_on_compute: true
172
+ threshold: 0.5
173
+ tn: !!python/object/apply:torch._utils._rebuild_tensor_v2
174
+ - !!python/object/apply:torch.storage._load_from_bytes
175
+ - !!binary |
176
+ gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAA
177
+ AGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAA
178
+ aW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApMb25nU3RvcmFn
179
+ ZQpxAVgNAAAAMjE1OTMyNDY5Nzg4OHECWAMAAABjcHVxA0sBTnRxBFEugAJdcQBYDQAAADIxNTkz
180
+ MjQ2OTc4ODhxAWEuAQAAAAAAAAAAAAAAAAAAAA==
181
+ - 0
182
+ - !!python/tuple []
183
+ - !!python/tuple []
184
+ - false
185
+ - !!python/object/apply:collections.OrderedDict
186
+ - []
187
+ top_k: null
188
+ tp: !!python/object/apply:torch._utils._rebuild_tensor_v2
189
+ - !!python/object/apply:torch.storage._load_from_bytes
190
+ - !!binary |
191
+ gAKKCmz8nEb5IGqoUBkugAJN6QMugAJ9cQAoWBAAAABwcm90b2NvbF92ZXJzaW9ucQFN6QNYDQAA
192
+ AGxpdHRsZV9lbmRpYW5xAohYCgAAAHR5cGVfc2l6ZXNxA31xBChYBQAAAHNob3J0cQVLAlgDAAAA
193
+ aW50cQZLBFgEAAAAbG9uZ3EHSwR1dS6AAihYBwAAAHN0b3JhZ2VxAGN0b3JjaApMb25nU3RvcmFn
194
+ ZQpxAVgNAAAAMjE1OTMyNDY5Mzg1NnECWAMAAABjcHVxA0sBTnRxBFEugAJdcQBYDQAAADIxNTkz
195
+ MjQ2OTM4NTZxAWEuAQAAAAAAAAAAAAAAAAAAAA==
196
+ - 0
197
+ - !!python/tuple []
198
+ - !!python/tuple []
199
+ - false
200
+ - !!python/object/apply:collections.OrderedDict
201
+ - []
202
+ training: true
203
+ num_classes: null
204
+ optimizer: Adam
205
+ pretrained: true