yujiepan commited on
Commit
f2bdad6
1 Parent(s): d1d30f8

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ pipeline_tag: text-generation
4
+ inference: true
5
+ widget:
6
+ - text: Hello!
7
+ example_title: Hello world
8
+ group: Python
9
+ ---
10
+
11
+ This model is for debugging. It is randomly initialized using the config from [mistralai/Mistral-7B-Instruct-v0.3](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3) but with smaller size.
12
+
13
+ Codes:
14
+ ```python
15
+ from transformers import pipeline
16
+ from huggingface_hub import create_repo, upload_folder
17
+ import torch
18
+ import transformers
19
+ import os
20
+
21
+ model_id = 'mistralai/Mistral-7B-Instruct-v0.3'
22
+ save_path = '/tmp/yujiepan/mistral-v0.3-tiny-random'
23
+ repo_id = 'yujiepan/mistral-v0.3-tiny-random'
24
+
25
+ config = transformers.AutoConfig.from_pretrained(model_id)
26
+ config.hidden_size = 8
27
+ config.intermediate_size = 32
28
+ config.num_attention_heads = 4
29
+ config.num_hidden_layers = 2
30
+ config.num_key_value_heads = 2
31
+ print(config)
32
+
33
+ tokenizer = transformers.AutoTokenizer.from_pretrained(model_id)
34
+ tokenizer.save_pretrained(save_path)
35
+
36
+ model = transformers.AutoModelForCausalLM.from_config(config, torch_dtype=torch.bfloat16)
37
+ model.generation_config = transformers.GenerationConfig.from_pretrained(model_id)
38
+
39
+ pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, do_sample=False, device='cuda')
40
+ print(pipe('Hello World!'))
41
+
42
+ model.save_pretrained(save_path)
43
+
44
+ os.system(f'ls -alh {save_path}')
45
+ # create_repo(repo_id, exist_ok=True)
46
+ # upload_folder(repo_id=repo_id, folder_path=save_path)
47
+ ```
config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "mistralai/Mistral-7B-Instruct-v0.3",
3
+ "architectures": [
4
+ "MistralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 8,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 32,
13
+ "max_position_embeddings": 32768,
14
+ "model_type": "mistral",
15
+ "num_attention_heads": 4,
16
+ "num_hidden_layers": 2,
17
+ "num_key_value_heads": 2,
18
+ "rms_norm_eps": 1e-05,
19
+ "rope_theta": 1000000.0,
20
+ "sliding_window": null,
21
+ "tie_word_embeddings": false,
22
+ "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.38.2",
24
+ "use_cache": true,
25
+ "vocab_size": 32768
26
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.38.2"
6
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a10e90d3e485f3c625449788089071193a1938c777fd11603d08d53289a6b3e
3
+ size 1054672
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37f00374dea48658ee8f5d0f21895b9bc55cb0103939607c8185bfd1c6ca1f89
3
+ size 587404
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff