Upload config

#3
by ArthurZ HF staff - opened
Files changed (1) hide show
  1. config.json +6 -109
config.json CHANGED
@@ -1,121 +1,18 @@
1
  {
2
- "additional_vocab_size": 0,
3
- "alpha_initializer": "zeros",
4
- "alpha_type": "float",
5
- "alphas_initializer_range": 0.0,
6
- "architectures": [
7
- "LlavaForVisionText2Text"
8
- ],
9
- "bos_token_id": 1,
10
- "cross_layer_interval": 1,
11
- "dropout": 0.0,
12
- "eos_token_id": 2,
13
- "freeze_lm_head": false,
14
- "freeze_text_layers": true,
15
- "freeze_text_module_exceptions": [],
16
- "freeze_vision_layers": true,
17
- "freeze_vision_module_exceptions": [],
18
- "hidden_act": "silu",
19
- "hidden_size": 4096,
20
  "ignore_index": -100,
21
  "image_token_index": -200,
22
- "initializer_range": 0.02,
23
- "intermediate_size": 11008,
24
  "model_type": "llava",
25
- "num_attention_heads": 32,
26
- "num_hidden_layers": 32,
27
- "pad_token_id": 0,
28
  "projector_hidden_act": "gelu",
29
- "qk_layer_norms": false,
30
- "rms_norm_eps": 1e-06,
31
  "text_config": {
32
- "_name_or_path": "meta-llama/Llama-2-7b-hf",
33
- "add_cross_attention": false,
34
- "architectures": [
35
- "LlamaForCausalLM"
36
- ],
37
- "attention_bias": false,
38
- "attention_dropout": 0.0,
39
- "bad_words_ids": null,
40
- "begin_suppress_tokens": null,
41
- "bos_token_id": 1,
42
- "chunk_size_feed_forward": 0,
43
- "cross_attention_hidden_size": null,
44
- "decoder_start_token_id": null,
45
- "diversity_penalty": 0.0,
46
- "do_sample": false,
47
- "early_stopping": false,
48
- "encoder_no_repeat_ngram_size": 0,
49
- "eos_token_id": 2,
50
- "exponential_decay_length_penalty": null,
51
- "finetuning_task": null,
52
- "forced_bos_token_id": null,
53
- "forced_eos_token_id": null,
54
- "hidden_act": "silu",
55
- "hidden_size": 4096,
56
- "id2label": {
57
- "0": "LABEL_0",
58
- "1": "LABEL_1"
59
- },
60
- "initializer_range": 0.02,
61
- "intermediate_size": 11008,
62
- "is_decoder": false,
63
- "is_encoder_decoder": false,
64
- "label2id": {
65
- "LABEL_0": 0,
66
- "LABEL_1": 1
67
- },
68
- "length_penalty": 1.0,
69
- "max_length": 20,
70
- "max_position_embeddings": 4096,
71
- "min_length": 0,
72
- "model_type": "llama",
73
- "no_repeat_ngram_size": 0,
74
- "num_attention_heads": 32,
75
- "num_beam_groups": 1,
76
- "num_beams": 1,
77
- "num_hidden_layers": 32,
78
- "num_key_value_heads": 32,
79
- "num_return_sequences": 1,
80
- "output_attentions": false,
81
- "output_hidden_states": false,
82
- "output_scores": false,
83
- "pad_token_id": null,
84
- "prefix": null,
85
- "pretraining_tp": 1,
86
- "problem_type": null,
87
- "pruned_heads": {},
88
- "remove_invalid_values": false,
89
- "repetition_penalty": 1.0,
90
- "return_dict": true,
91
- "return_dict_in_generate": false,
92
- "rms_norm_eps": 1e-05,
93
- "rope_scaling": null,
94
- "rope_theta": 10000.0,
95
- "sep_token_id": null,
96
- "suppress_tokens": null,
97
- "task_specific_params": null,
98
- "temperature": 1.0,
99
- "tf_legacy_loss": false,
100
- "tie_encoder_decoder": false,
101
- "tie_word_embeddings": false,
102
- "tokenizer_class": null,
103
- "top_k": 50,
104
- "top_p": 1.0,
105
- "torch_dtype": "float16",
106
- "torchscript": false,
107
- "typical_p": 1.0,
108
- "use_bfloat16": false,
109
- "use_cache": true,
110
- "vocab_size": 32000
111
  },
112
- "tie_word_embeddings": false,
113
- "torch_dtype": "float16",
114
  "transformers_version": "4.36.0.dev0",
115
- "use_cache": true,
116
- "use_resampler": false,
117
  "vision_config": {
118
- "model_type": "llava"
 
 
 
 
119
  },
120
  "vision_feature_layer": -2,
121
  "vision_feature_select_strategy": "default",
 
1
  {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  "ignore_index": -100,
3
  "image_token_index": -200,
 
 
4
  "model_type": "llava",
 
 
 
5
  "projector_hidden_act": "gelu",
 
 
6
  "text_config": {
7
+ "model_type": "llama"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  },
 
 
9
  "transformers_version": "4.36.0.dev0",
 
 
10
  "vision_config": {
11
+ "image_size": 336,
12
+ "intermediate_size": 4096,
13
+ "model_type": "clip_vision_model",
14
+ "num_hidden_layers": 24,
15
+ "patch_size": 14
16
  },
17
  "vision_feature_layer": -2,
18
  "vision_feature_select_strategy": "default",