MohamedAhmedAE commited on
Commit
91debdc
·
verified ·
1 Parent(s): f5d224b

Training in progress, step 200, checkpoint

Browse files
last-checkpoint/config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/root/.cache/huggingface/hub/OpenAI_Clip_Roco_ALL_V1",
3
+ "architectures": [
4
+ "CLIPModel"
5
+ ],
6
+ "initializer_factor": 1.0,
7
+ "logit_scale_init_value": 2.6592,
8
+ "model_type": "clip",
9
+ "projection_dim": 512,
10
+ "text_config": {
11
+ "_name_or_path": "/kaggle/working/TextModel",
12
+ "architectures": [
13
+ "RobertaModel"
14
+ ],
15
+ "attention_probs_dropout_prob": 0.1,
16
+ "bos_token_id": 0,
17
+ "classifier_dropout": null,
18
+ "eos_token_id": 2,
19
+ "hidden_act": "gelu",
20
+ "hidden_dropout_prob": 0.1,
21
+ "hidden_size": 768,
22
+ "intermediate_size": 3072,
23
+ "max_position_embeddings": 1026,
24
+ "model_type": "clip_text_model",
25
+ "num_attention_heads": 12,
26
+ "position_embedding_type": "absolute",
27
+ "torch_dtype": "float32",
28
+ "type_vocab_size": 1,
29
+ "use_cache": true,
30
+ "vocab_size": 50265
31
+ },
32
+ "torch_dtype": "float32",
33
+ "transformers_version": "4.44.0",
34
+ "vision_config": {
35
+ "_name_or_path": "openai/clip-vit-large-patch14-336",
36
+ "dropout": 0.0,
37
+ "hidden_size": 1024,
38
+ "image_size": 336,
39
+ "intermediate_size": 4096,
40
+ "model_type": "clip_vision_model",
41
+ "num_attention_heads": 16,
42
+ "num_hidden_layers": 24,
43
+ "patch_size": 14,
44
+ "projection_dim": 768
45
+ }
46
+ }
last-checkpoint/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1007a98980bb3e04416ab985ad994b989b97310603e68bc1819a4e0865e6ef1e
3
+ size 1715561468
last-checkpoint/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0aee87a30d298cacbe90f31193769b4633cd0e6738e53b0443f6b2243e66406
3
+ size 3431474364
last-checkpoint/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c033ff12cf13058072b46d832a06e98b057ada5d719bc000bc42a97c8844d524
3
+ size 14244
last-checkpoint/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33e07fbf9b9ee51c89d2dec333ef4ad791dc48ed08d344947fdd9f86f1e5ab91
3
+ size 1064
last-checkpoint/trainer_state.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.008985533291400845,
5
+ "eval_steps": 1000,
6
+ "global_step": 200,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.008985533291400845,
13
+ "grad_norm": 0.0016701683634892106,
14
+ "learning_rate": 4.9999996212343494e-05,
15
+ "loss": 1.1088,
16
+ "step": 200
17
+ }
18
+ ],
19
+ "logging_steps": 200,
20
+ "max_steps": 1112900,
21
+ "num_input_tokens_seen": 0,
22
+ "num_train_epochs": 50,
23
+ "save_steps": 200,
24
+ "stateful_callbacks": {
25
+ "TrainerControl": {
26
+ "args": {
27
+ "should_epoch_stop": false,
28
+ "should_evaluate": false,
29
+ "should_log": false,
30
+ "should_save": true,
31
+ "should_training_stop": false
32
+ },
33
+ "attributes": {}
34
+ }
35
+ },
36
+ "total_flos": 2867209025126400.0,
37
+ "train_batch_size": 6,
38
+ "trial_name": null,
39
+ "trial_params": null
40
+ }
last-checkpoint/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7a43a3e379edfd99713cfeb2648ce047b1613d63a9d86726f23912a8689cf12
3
+ size 5240