MohamedAtta-AI commited on
Commit
cbd05ba
·
verified ·
1 Parent(s): 5b4f61c

Training in progress, step 100

Browse files
adapter_config.json CHANGED
@@ -16,12 +16,12 @@
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
- "r": 2,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "q_proj",
24
- "v_proj"
25
  ],
26
  "task_type": "CAUSAL_LM",
27
  "use_dora": false,
 
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
+ "r": 4,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "v_proj",
24
+ "q_proj"
25
  ],
26
  "task_type": "CAUSAL_LM",
27
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b455760216b0089c76f223174053d8427baf82a6a024f322fe739aafb7c2df51
3
- size 3424688
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7425dc27e44d063f9dc71b7ee53c376347837a85607a0fdcd10e516ba3f01ee0
3
+ size 6832600
runs/May02_22-27-57_ip-10-192-12-6/events.out.tfevents.1714688882.ip-10-192-12-6.4963.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b7fa0fdf02a598adfcfb5f6c537b0452c6c95f9ec6dc54214d113bf067a36cb
3
+ size 5291
runs/May02_22-29-26_ip-10-192-12-6/events.out.tfevents.1714688971.ip-10-192-12-6.6363.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:193a2a89aefaaf16262e814bd2abef72c77b9341a2bf8327a841e79e02c21222
3
+ size 5291
runs/May02_22-30-35_ip-10-192-12-6/events.out.tfevents.1714689046.ip-10-192-12-6.6363.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2965f37c0d63f6fef4430280a8f13817c29495232bc1a36b66e8ffd88f6ed101
3
+ size 5291
runs/May02_22-32-02_ip-10-192-12-6/events.out.tfevents.1714689133.ip-10-192-12-6.8741.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dba655e01275b91f7db5c5bcc422e6c89e97ac336ad63051992c2fc44abc4502
3
+ size 5498
tokenizer.json CHANGED
@@ -2,7 +2,7 @@
2
  "version": "1.0",
3
  "truncation": {
4
  "direction": "Right",
5
- "max_length": 280,
6
  "strategy": "LongestFirst",
7
  "stride": 0
8
  },
 
2
  "version": "1.0",
3
  "truncation": {
4
  "direction": "Right",
5
+ "max_length": 285,
6
  "strategy": "LongestFirst",
7
  "stride": 0
8
  },
tokenizer_config.json CHANGED
@@ -2052,6 +2052,7 @@
2052
  "bos_token": "<|begin_of_text|>",
2053
  "clean_up_tokenization_spaces": true,
2054
  "eos_token": "<|end_of_text|>",
 
2055
  "model_input_names": [
2056
  "input_ids",
2057
  "attention_mask"
@@ -2059,5 +2060,8 @@
2059
  "model_max_length": 8192,
2060
  "pad_token": "<|end_of_text|>",
2061
  "padding_side": "right",
2062
- "tokenizer_class": "PreTrainedTokenizerFast"
 
 
 
2063
  }
 
2052
  "bos_token": "<|begin_of_text|>",
2053
  "clean_up_tokenization_spaces": true,
2054
  "eos_token": "<|end_of_text|>",
2055
+ "max_length": 280,
2056
  "model_input_names": [
2057
  "input_ids",
2058
  "attention_mask"
 
2060
  "model_max_length": 8192,
2061
  "pad_token": "<|end_of_text|>",
2062
  "padding_side": "right",
2063
+ "stride": 0,
2064
+ "tokenizer_class": "PreTrainedTokenizerFast",
2065
+ "truncation_side": "right",
2066
+ "truncation_strategy": "longest_first"
2067
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:173f7b953d068616de371e5278125998c60b01a753ac546253522d04793ac376
3
  size 5048
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b6e1f707a8c05a8eca0bc9e165f6ca86a0f960f70760ac382c3978a71609e91
3
  size 5048