Text Generation
Transformers
Safetensors
English
llama
biology
medical
Inference Endpoints
text-generation-inference
instruction-pretrain commited on
Commit
479b0ec
1 Parent(s): 7a3251c

Upload LlamaForCausalLM

Browse files
Files changed (3) hide show
  1. README.md +6 -6
  2. config.json +3 -2
  3. generation_config.json +1 -1
README.md CHANGED
@@ -1,15 +1,15 @@
1
  ---
2
- license: llama3
3
- language:
4
- - en
5
- tags:
6
- - biology
7
- - medical
8
  datasets:
9
  - EleutherAI/pile
10
  - Open-Orca/OpenOrca
11
  - GAIR/lima
12
  - WizardLM/WizardLM_evol_instruct_V2_196k
 
 
 
 
 
 
13
  ---
14
  # Instruction Pre-Training: Language Models are Supervised Multitask Learners
15
  This repo contains the **biomedicine model developed from Llama3-8B** in our paper [Instruction Pre-Training: Language Models are Supervised Multitask Learners](https://huggingface.co/papers/2406.14491).
 
1
  ---
 
 
 
 
 
 
2
  datasets:
3
  - EleutherAI/pile
4
  - Open-Orca/OpenOrca
5
  - GAIR/lima
6
  - WizardLM/WizardLM_evol_instruct_V2_196k
7
+ language:
8
+ - en
9
+ license: llama3
10
+ tags:
11
+ - biology
12
+ - medical
13
  ---
14
  # Instruction Pre-Training: Language Models are Supervised Multitask Learners
15
  This repo contains the **biomedicine model developed from Llama3-8B** in our paper [Instruction Pre-Training: Language Models are Supervised Multitask Learners](https://huggingface.co/papers/2406.14491).
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "hf_mirror/medicine-Llama3-8B",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
@@ -12,6 +12,7 @@
12
  "initializer_range": 0.02,
13
  "intermediate_size": 14336,
14
  "max_position_embeddings": 8192,
 
15
  "model_type": "llama",
16
  "num_attention_heads": 32,
17
  "num_hidden_layers": 32,
@@ -22,7 +23,7 @@
22
  "rope_theta": 500000.0,
23
  "tie_word_embeddings": false,
24
  "torch_dtype": "float32",
25
- "transformers_version": "4.34.0",
26
  "use_cache": true,
27
  "vocab_size": 128256
28
  }
 
1
  {
2
+ "_name_or_path": "hf_mirror/medicine-Llama3-8B/shards",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
12
  "initializer_range": 0.02,
13
  "intermediate_size": 14336,
14
  "max_position_embeddings": 8192,
15
+ "mlp_bias": false,
16
  "model_type": "llama",
17
  "num_attention_heads": 32,
18
  "num_hidden_layers": 32,
 
23
  "rope_theta": 500000.0,
24
  "tie_word_embeddings": false,
25
  "torch_dtype": "float32",
26
+ "transformers_version": "4.41.2",
27
  "use_cache": true,
28
  "vocab_size": 128256
29
  }
generation_config.json CHANGED
@@ -5,5 +5,5 @@
5
  "max_length": 4096,
6
  "temperature": 0.6,
7
  "top_p": 0.9,
8
- "transformers_version": "4.34.0"
9
  }
 
5
  "max_length": 4096,
6
  "temperature": 0.6,
7
  "top_p": 0.9,
8
+ "transformers_version": "4.41.2"
9
  }