danielhanchen commited on
Commit
36b625a
1 Parent(s): 0f99fb9

Upload config

Browse files
Files changed (1) hide show
  1. config.json +3 -1
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "meta-llama/Meta-Llama-3.1-405B-Instruct",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
@@ -21,6 +21,7 @@
21
  "num_attention_heads": 128,
22
  "num_hidden_layers": 126,
23
  "num_key_value_heads": 8,
 
24
  "pretraining_tp": 1,
25
  "quantization_config": {
26
  "_load_in_4bit": true,
@@ -49,6 +50,7 @@
49
  "tie_word_embeddings": false,
50
  "torch_dtype": "bfloat16",
51
  "transformers_version": "4.44.2",
 
52
  "use_cache": true,
53
  "vocab_size": 128256
54
  }
 
1
  {
2
+ "_name_or_path": "unsloth/Meta-Llama-3.1-405B-Instruct",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
21
  "num_attention_heads": 128,
22
  "num_hidden_layers": 126,
23
  "num_key_value_heads": 8,
24
+ "pad_token_id": 128004,
25
  "pretraining_tp": 1,
26
  "quantization_config": {
27
  "_load_in_4bit": true,
 
50
  "tie_word_embeddings": false,
51
  "torch_dtype": "bfloat16",
52
  "transformers_version": "4.44.2",
53
+ "unsloth_version": "2024.9",
54
  "use_cache": true,
55
  "vocab_size": 128256
56
  }