peter881122 commited on
Commit
1112082
1 Parent(s): a944a48

End of training

Browse files
README.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: microsoft/xclip-base-patch32
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - accuracy
8
+ model-index:
9
+ - name: xclip-base-patch32-finetuned-custom-subset
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/huangyangyu/huggingface/runs/v8zohmjq)
17
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/huangyangyu/huggingface/runs/v8zohmjq)
18
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/huangyangyu/huggingface/runs/v8zohmjq)
19
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/huangyangyu/huggingface/runs/v8zohmjq)
20
+ # xclip-base-patch32-finetuned-custom-subset
21
+
22
+ This model is a fine-tuned version of [microsoft/xclip-base-patch32](https://huggingface.co/microsoft/xclip-base-patch32) on an unknown dataset.
23
+ It achieves the following results on the evaluation set:
24
+ - Loss: 0.5862
25
+ - Accuracy: 0.7308
26
+
27
+ ## Model description
28
+
29
+ More information needed
30
+
31
+ ## Intended uses & limitations
32
+
33
+ More information needed
34
+
35
+ ## Training and evaluation data
36
+
37
+ More information needed
38
+
39
+ ## Training procedure
40
+
41
+ ### Training hyperparameters
42
+
43
+ The following hyperparameters were used during training:
44
+ - learning_rate: 5e-05
45
+ - train_batch_size: 4
46
+ - eval_batch_size: 4
47
+ - seed: 42
48
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
49
+ - lr_scheduler_type: linear
50
+ - lr_scheduler_warmup_ratio: 0.1
51
+ - training_steps: 1420
52
+
53
+ ### Training results
54
+
55
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
56
+ |:-------------:|:-------:|:----:|:---------------:|:--------:|
57
+ | 0.8431 | 0.0507 | 72 | 0.5928 | 0.7308 |
58
+ | 0.6657 | 1.0507 | 144 | 0.7383 | 0.7308 |
59
+ | 0.8019 | 2.0507 | 216 | 0.6047 | 0.7308 |
60
+ | 0.6275 | 3.0507 | 288 | 0.5946 | 0.7308 |
61
+ | 0.561 | 4.0507 | 360 | 0.6646 | 0.7308 |
62
+ | 0.594 | 5.0507 | 432 | 0.6098 | 0.7308 |
63
+ | 0.6472 | 6.0507 | 504 | 0.5915 | 0.7308 |
64
+ | 0.623 | 7.0507 | 576 | 0.5948 | 0.7308 |
65
+ | 0.5711 | 8.0507 | 648 | 0.6056 | 0.7308 |
66
+ | 0.5967 | 9.0507 | 720 | 0.5887 | 0.7308 |
67
+ | 0.5831 | 10.0507 | 792 | 0.5860 | 0.7308 |
68
+ | 0.6101 | 11.0507 | 864 | 0.6044 | 0.7308 |
69
+ | 0.6265 | 12.0507 | 936 | 0.5856 | 0.7308 |
70
+ | 0.6373 | 13.0507 | 1008 | 0.5882 | 0.7308 |
71
+ | 0.665 | 14.0507 | 1080 | 0.5852 | 0.7308 |
72
+ | 0.6183 | 15.0507 | 1152 | 0.5837 | 0.7308 |
73
+ | 0.7786 | 16.0507 | 1224 | 0.5834 | 0.7308 |
74
+ | 0.5489 | 17.0507 | 1296 | 0.5849 | 0.7308 |
75
+ | 0.6512 | 18.0507 | 1368 | 0.5843 | 0.7308 |
76
+ | 0.5266 | 19.0366 | 1420 | 0.5862 | 0.7308 |
77
+
78
+
79
+ ### Framework versions
80
+
81
+ - Transformers 4.42.0.dev0
82
+ - Pytorch 2.1.1
83
+ - Datasets 2.13.2
84
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/xclip-base-patch32",
3
+ "architectures": [
4
+ "VideoMAEForVideoClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "decoder_hidden_size": 384,
8
+ "decoder_intermediate_size": 1536,
9
+ "decoder_num_attention_heads": 6,
10
+ "decoder_num_hidden_layers": 4,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.0,
13
+ "hidden_size": 768,
14
+ "id2label": {
15
+ "0": "0",
16
+ "1": "1"
17
+ },
18
+ "image_size": 224,
19
+ "initializer_factor": 1.0,
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 3072,
22
+ "label2id": {
23
+ "0": 0,
24
+ "1": 1
25
+ },
26
+ "layer_norm_eps": 1e-12,
27
+ "logit_scale_init_value": 2.6592,
28
+ "model_type": "videomae",
29
+ "norm_pix_loss": true,
30
+ "num_attention_heads": 12,
31
+ "num_channels": 3,
32
+ "num_frames": 16,
33
+ "num_hidden_layers": 12,
34
+ "patch_size": 16,
35
+ "problem_type": "single_label_classification",
36
+ "projection_dim": 512,
37
+ "prompt_alpha": 0.1,
38
+ "prompt_attention_dropout": 0.0,
39
+ "prompt_hidden_act": "quick_gelu",
40
+ "prompt_layers": 2,
41
+ "prompt_num_attention_heads": 8,
42
+ "prompt_projection_dropout": 0.0,
43
+ "qkv_bias": true,
44
+ "text_config": {
45
+ "_name_or_path": "",
46
+ "add_cross_attention": false,
47
+ "architectures": null,
48
+ "attention_dropout": 0.0,
49
+ "bad_words_ids": null,
50
+ "bos_token_id": 0,
51
+ "chunk_size_feed_forward": 0,
52
+ "cross_attention_hidden_size": null,
53
+ "decoder_start_token_id": null,
54
+ "diversity_penalty": 0.0,
55
+ "do_sample": false,
56
+ "dropout": 0.0,
57
+ "early_stopping": false,
58
+ "encoder_no_repeat_ngram_size": 0,
59
+ "eos_token_id": 2,
60
+ "exponential_decay_length_penalty": null,
61
+ "finetuning_task": null,
62
+ "forced_bos_token_id": null,
63
+ "forced_eos_token_id": null,
64
+ "hidden_act": "quick_gelu",
65
+ "hidden_size": 512,
66
+ "id2label": {
67
+ "0": "LABEL_0",
68
+ "1": "LABEL_1"
69
+ },
70
+ "initializer_factor": 1.0,
71
+ "initializer_range": 0.02,
72
+ "intermediate_size": 2048,
73
+ "is_decoder": false,
74
+ "is_encoder_decoder": false,
75
+ "label2id": {
76
+ "LABEL_0": 0,
77
+ "LABEL_1": 1
78
+ },
79
+ "layer_norm_eps": 1e-05,
80
+ "length_penalty": 1.0,
81
+ "max_length": 20,
82
+ "max_position_embeddings": 77,
83
+ "min_length": 0,
84
+ "model_type": "xclip_text_model",
85
+ "no_repeat_ngram_size": 0,
86
+ "num_attention_heads": 8,
87
+ "num_beam_groups": 1,
88
+ "num_beams": 1,
89
+ "num_hidden_layers": 12,
90
+ "num_return_sequences": 1,
91
+ "output_attentions": false,
92
+ "output_hidden_states": false,
93
+ "output_scores": false,
94
+ "pad_token_id": 1,
95
+ "prefix": null,
96
+ "problem_type": null,
97
+ "pruned_heads": {},
98
+ "remove_invalid_values": false,
99
+ "repetition_penalty": 1.0,
100
+ "return_dict": true,
101
+ "return_dict_in_generate": false,
102
+ "sep_token_id": null,
103
+ "task_specific_params": null,
104
+ "temperature": 1.0,
105
+ "tf_legacy_loss": false,
106
+ "tie_encoder_decoder": false,
107
+ "tie_word_embeddings": true,
108
+ "tokenizer_class": null,
109
+ "top_k": 50,
110
+ "top_p": 1.0,
111
+ "torch_dtype": null,
112
+ "torchscript": false,
113
+ "transformers_version": "4.22.0.dev0",
114
+ "typical_p": 1.0,
115
+ "use_bfloat16": false,
116
+ "vocab_size": 49408
117
+ },
118
+ "text_config_dict": null,
119
+ "torch_dtype": "float32",
120
+ "transformers_version": "4.42.0.dev0",
121
+ "tubelet_size": 2,
122
+ "use_mean_pooling": true,
123
+ "vision_config": {
124
+ "_name_or_path": "",
125
+ "add_cross_attention": false,
126
+ "architectures": null,
127
+ "attention_dropout": 0.0,
128
+ "bad_words_ids": null,
129
+ "bos_token_id": null,
130
+ "chunk_size_feed_forward": 0,
131
+ "cross_attention_hidden_size": null,
132
+ "decoder_start_token_id": null,
133
+ "diversity_penalty": 0.0,
134
+ "do_sample": false,
135
+ "drop_path_rate": 0.0,
136
+ "dropout": 0.0,
137
+ "early_stopping": false,
138
+ "encoder_no_repeat_ngram_size": 0,
139
+ "eos_token_id": null,
140
+ "exponential_decay_length_penalty": null,
141
+ "finetuning_task": null,
142
+ "forced_bos_token_id": null,
143
+ "forced_eos_token_id": null,
144
+ "hidden_act": "quick_gelu",
145
+ "hidden_size": 768,
146
+ "id2label": {
147
+ "0": "LABEL_0",
148
+ "1": "LABEL_1"
149
+ },
150
+ "image_size": 224,
151
+ "initializer_factor": 1.0,
152
+ "initializer_range": 0.02,
153
+ "intermediate_size": 3072,
154
+ "is_decoder": false,
155
+ "is_encoder_decoder": false,
156
+ "label2id": {
157
+ "LABEL_0": 0,
158
+ "LABEL_1": 1
159
+ },
160
+ "layer_norm_eps": 1e-05,
161
+ "length_penalty": 1.0,
162
+ "max_length": 20,
163
+ "min_length": 0,
164
+ "mit_hidden_size": 512,
165
+ "mit_intermediate_size": 2048,
166
+ "mit_num_attention_heads": 8,
167
+ "mit_num_hidden_layers": 1,
168
+ "model_type": "xclip_vision_model",
169
+ "no_repeat_ngram_size": 0,
170
+ "num_attention_heads": 12,
171
+ "num_beam_groups": 1,
172
+ "num_beams": 1,
173
+ "num_channels": 3,
174
+ "num_frames": 8,
175
+ "num_hidden_layers": 12,
176
+ "num_return_sequences": 1,
177
+ "output_attentions": false,
178
+ "output_hidden_states": false,
179
+ "output_scores": false,
180
+ "pad_token_id": null,
181
+ "patch_size": 32,
182
+ "prefix": null,
183
+ "problem_type": null,
184
+ "pruned_heads": {},
185
+ "remove_invalid_values": false,
186
+ "repetition_penalty": 1.0,
187
+ "return_dict": true,
188
+ "return_dict_in_generate": false,
189
+ "sep_token_id": null,
190
+ "task_specific_params": null,
191
+ "temperature": 1.0,
192
+ "tf_legacy_loss": false,
193
+ "tie_encoder_decoder": false,
194
+ "tie_word_embeddings": true,
195
+ "tokenizer_class": null,
196
+ "top_k": 50,
197
+ "top_p": 1.0,
198
+ "torch_dtype": null,
199
+ "torchscript": false,
200
+ "transformers_version": "4.22.0.dev0",
201
+ "typical_p": 1.0,
202
+ "use_bfloat16": false
203
+ },
204
+ "vision_config_dict": null
205
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71904266c5ebe7437fb414d8f957301a2a68c3de64f09af550aac66a42644ed0
3
+ size 344937328
preprocessor_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_processor_keys": [
3
+ "videos",
4
+ "do_resize",
5
+ "size",
6
+ "resample",
7
+ "do_center_crop",
8
+ "crop_size",
9
+ "do_rescale",
10
+ "rescale_factor",
11
+ "do_normalize",
12
+ "image_mean",
13
+ "image_std",
14
+ "return_tensors",
15
+ "data_format",
16
+ "input_data_format"
17
+ ],
18
+ "crop_size": {
19
+ "height": 224,
20
+ "width": 224
21
+ },
22
+ "do_center_crop": true,
23
+ "do_normalize": true,
24
+ "do_rescale": true,
25
+ "do_resize": true,
26
+ "image_mean": [
27
+ 0.485,
28
+ 0.456,
29
+ 0.406
30
+ ],
31
+ "image_processor_type": "VideoMAEImageProcessor",
32
+ "image_std": [
33
+ 0.229,
34
+ 0.224,
35
+ 0.225
36
+ ],
37
+ "processor_class": "XCLIPProcessor",
38
+ "resample": 2,
39
+ "rescale_factor": 0.00392156862745098,
40
+ "size": {
41
+ "shortest_edge": 224
42
+ }
43
+ }
runs/Jun05_19-15-46_USER-20231127DF/events.out.tfevents.1717586155.USER-20231127DF.4964.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:804c8ac3e1a74043fea4c159079d6752ab3bc3f12810b843a9b502fccf80435d
3
+ size 46431
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b5399254405706bb59df526cd9390ef86e6ea25cb14d790d5800e64e3f9c82c
3
+ size 5176