AswanthCManoj commited on
Commit
213b643
·
verified ·
1 Parent(s): 40d2fa9

Training in progress, step 25

Browse files
adapter_config.json CHANGED
@@ -1,31 +1,17 @@
1
  {
2
- "alpha_pattern": {},
3
  "auto_mapping": null,
4
  "base_model_name_or_path": "deepseek-ai/deepseek-coder-1.3b-instruct",
5
- "bias": "none",
6
- "fan_in_fan_out": false,
 
 
7
  "inference_mode": true,
8
- "init_lora_weights": true,
9
- "layers_pattern": null,
10
- "layers_to_transform": null,
11
- "loftq_config": {},
12
- "lora_alpha": 128,
13
- "lora_dropout": 0.1,
14
- "megatron_config": null,
15
- "megatron_core": "megatron.core",
16
- "modules_to_save": null,
17
- "peft_type": "LORA",
18
- "r": 64,
19
- "rank_pattern": {},
20
  "revision": null,
21
- "target_modules": [
22
- "up_proj",
23
- "k_proj",
24
- "gate_proj",
25
- "q_proj",
26
- "down_proj",
27
- "v_proj"
28
- ],
29
  "task_type": "CAUSAL_LM",
30
- "use_rslora": false
31
  }
 
1
  {
 
2
  "auto_mapping": null,
3
  "base_model_name_or_path": "deepseek-ai/deepseek-coder-1.3b-instruct",
4
+ "encoder_dropout": 0.0,
5
+ "encoder_hidden_size": 250,
6
+ "encoder_num_layers": 2,
7
+ "encoder_reparameterization_type": "MLP",
8
  "inference_mode": true,
9
+ "num_attention_heads": 16,
10
+ "num_layers": 24,
11
+ "num_transformer_submodules": 1,
12
+ "num_virtual_tokens": 50,
13
+ "peft_type": "P_TUNING",
 
 
 
 
 
 
 
14
  "revision": null,
 
 
 
 
 
 
 
 
15
  "task_type": "CAUSAL_LM",
16
+ "token_dim": 2048
17
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:55ea4013ae1aa6d2396b15bfbb8779f41752fb078446ebfd2094474cc00adf3a
3
- size 214734488
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c945edd10f35879916a0745576f8dbb873eeb3a0c78fd256818c286b4baae36
3
+ size 409720
special_tokens_map.json CHANGED
@@ -13,5 +13,5 @@
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
- "pad_token": "stic"
17
  }
 
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
+ "pad_token": "Ġsq"
17
  }
tokenizer_config.json CHANGED
@@ -185,7 +185,7 @@
185
  "eos_token": "<|EOT|>",
186
  "legacy": true,
187
  "model_max_length": 16384,
188
- "pad_token": "stic",
189
  "sp_model_kwargs": {},
190
  "tokenizer_class": "LlamaTokenizer",
191
  "unk_token": null,
 
185
  "eos_token": "<|EOT|>",
186
  "legacy": true,
187
  "model_max_length": 16384,
188
+ "pad_token": "Ġsq",
189
  "sp_model_kwargs": {},
190
  "tokenizer_class": "LlamaTokenizer",
191
  "unk_token": null,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2afff67173769f97f112b041e85b114e179e83ce3ce340bf45e9ade06cbfdfca
3
  size 4664
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:759434762d7bfb55a21f68b18a6e6125b5035fbd73274ea67425ff0b6cc062ad
3
  size 4664