Ostixe360 commited on
Commit
7921f0f
1 Parent(s): b6466f5

Upload AnemoneForCausalLM

Browse files
Files changed (4) hide show
  1. README.md +3 -3
  2. config.json +49 -0
  3. generation_config.json +7 -0
  4. model.safetensors +3 -0
README.md CHANGED
@@ -1,14 +1,14 @@
1
  ---
 
 
 
2
  library_name: transformers
3
  tags:
4
  - moe
5
  - moah
6
  - mod
7
- license: apache-2.0
8
  datasets:
9
  - Locutusque/UltraTextbooks
10
- language:
11
- - en
12
  ---
13
 
14
  # Model Card for Model ID
 
1
  ---
2
+ language:
3
+ - en
4
+ license: apache-2.0
5
  library_name: transformers
6
  tags:
7
  - moe
8
  - moah
9
  - mod
 
10
  datasets:
11
  - Locutusque/UltraTextbooks
 
 
12
  ---
13
 
14
  # Model Card for Model ID
config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "AnemoneForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "attn_layer_offset": 5,
7
+ "attn_layer_period": 6,
8
+ "attn_num_experts": 16,
9
+ "attn_router_aux_loss_coef": 0.05,
10
+ "attn_top_k": 4,
11
+ "bos_token_id": 1,
12
+ "calc_logits_for_entire_prompt": true,
13
+ "capacity": 128,
14
+ "eos_token_id": 2,
15
+ "expert_layer_offset": 1,
16
+ "expert_layer_period": 2,
17
+ "hidden_act": "silu",
18
+ "hidden_size": 1024,
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 4048,
21
+ "mamba_conv_bias": true,
22
+ "mamba_d_conv": 4,
23
+ "mamba_d_state": 16,
24
+ "mamba_dt_rank": 256,
25
+ "mamba_expand": 2,
26
+ "mamba_inner_layernorms": true,
27
+ "mamba_proj_bias": false,
28
+ "mod_aux_loss_coef": 0.01,
29
+ "mod_aux_routing": false,
30
+ "mod_routing": true,
31
+ "n_ctx": 262144,
32
+ "num_attention_heads": 32,
33
+ "num_experts": 8,
34
+ "num_experts_per_tok": 2,
35
+ "num_hidden_layers": 14,
36
+ "num_key_value_heads": 8,
37
+ "output_router_logits": true,
38
+ "pad_token_id": 0,
39
+ "rms_norm_eps": 1e-06,
40
+ "router_aux_loss_coef": 0.001,
41
+ "skip_blocks": 2,
42
+ "sliding_window": null,
43
+ "tie_word_embeddings": false,
44
+ "torch_dtype": "bfloat16",
45
+ "transformers_version": "4.39.3",
46
+ "use_cache": true,
47
+ "use_mamba_kernels": true,
48
+ "vocab_size": 65536
49
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.39.3"
7
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3cad68977fa890816d7811051561d7f74069bb61b1fcb8900d6ffa35eedfb17
3
+ size 2050124136