vcadillo commited on
Commit
95c29a5
1 Parent(s): 16ba882

Upload 10 files

Browse files
added_tokens.json ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</img>": 32003,
3
+ "</p>": 32007,
4
+ "</roi>": 32005,
5
+ "<ground_box>": 32011,
6
+ "<image>": 32008,
7
+ "<img>": 32002,
8
+ "<p>": 32006,
9
+ "<r0>": 32014,
10
+ "<r10>": 32024,
11
+ "<r11>": 32025,
12
+ "<r12>": 32026,
13
+ "<r13>": 32027,
14
+ "<r14>": 32028,
15
+ "<r15>": 32029,
16
+ "<r16>": 32030,
17
+ "<r17>": 32031,
18
+ "<r18>": 32032,
19
+ "<r19>": 32033,
20
+ "<r1>": 32015,
21
+ "<r20>": 32034,
22
+ "<r21>": 32035,
23
+ "<r22>": 32036,
24
+ "<r23>": 32037,
25
+ "<r24>": 32038,
26
+ "<r25>": 32039,
27
+ "<r26>": 32040,
28
+ "<r27>": 32041,
29
+ "<r28>": 32042,
30
+ "<r29>": 32043,
31
+ "<r2>": 32016,
32
+ "<r30>": 32044,
33
+ "<r31>": 32045,
34
+ "<r32>": 32046,
35
+ "<r33>": 32047,
36
+ "<r34>": 32048,
37
+ "<r35>": 32049,
38
+ "<r36>": 32050,
39
+ "<r37>": 32051,
40
+ "<r38>": 32052,
41
+ "<r39>": 32053,
42
+ "<r3>": 32017,
43
+ "<r40>": 32054,
44
+ "<r41>": 32055,
45
+ "<r42>": 32056,
46
+ "<r43>": 32057,
47
+ "<r44>": 32058,
48
+ "<r45>": 32059,
49
+ "<r46>": 32060,
50
+ "<r47>": 32061,
51
+ "<r48>": 32062,
52
+ "<r49>": 32063,
53
+ "<r4>": 32018,
54
+ "<r50>": 32064,
55
+ "<r51>": 32065,
56
+ "<r52>": 32066,
57
+ "<r53>": 32067,
58
+ "<r54>": 32068,
59
+ "<r55>": 32069,
60
+ "<r56>": 32070,
61
+ "<r57>": 32071,
62
+ "<r58>": 32072,
63
+ "<r59>": 32073,
64
+ "<r5>": 32019,
65
+ "<r60>": 32074,
66
+ "<r61>": 32075,
67
+ "<r62>": 32076,
68
+ "<r63>": 32077,
69
+ "<r64>": 32078,
70
+ "<r65>": 32079,
71
+ "<r66>": 32080,
72
+ "<r67>": 32081,
73
+ "<r68>": 32082,
74
+ "<r69>": 32083,
75
+ "<r6>": 32020,
76
+ "<r70>": 32084,
77
+ "<r71>": 32085,
78
+ "<r72>": 32086,
79
+ "<r73>": 32087,
80
+ "<r74>": 32088,
81
+ "<r75>": 32089,
82
+ "<r76>": 32090,
83
+ "<r77>": 32091,
84
+ "<r78>": 32092,
85
+ "<r79>": 32093,
86
+ "<r7>": 32021,
87
+ "<r80>": 32094,
88
+ "<r81>": 32095,
89
+ "<r82>": 32096,
90
+ "<r83>": 32097,
91
+ "<r84>": 32098,
92
+ "<r85>": 32099,
93
+ "<r86>": 32100,
94
+ "<r87>": 32101,
95
+ "<r88>": 32102,
96
+ "<r89>": 32103,
97
+ "<r8>": 32022,
98
+ "<r90>": 32104,
99
+ "<r91>": 32105,
100
+ "<r92>": 32106,
101
+ "<r93>": 32107,
102
+ "<r94>": 32108,
103
+ "<r95>": 32109,
104
+ "<r96>": 32110,
105
+ "<r97>": 32111,
106
+ "<r98>": 32112,
107
+ "<r99>": 32113,
108
+ "<r9>": 32023,
109
+ "<refer_box>": 32010,
110
+ "<refer_feat>": 32012,
111
+ "<region>": 32009,
112
+ "<roi>": 32004,
113
+ "<sep>": 32001,
114
+ "[PAD]": 32000,
115
+ "[grounding]": 32013
116
+ }
config.json ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ {
3
+ "_name_or_path": "../../groma-7b-finetune",
4
+ "architectures": [
5
+ "GromaModel"
6
+ ],
7
+ "box_score_thres": 0.15,
8
+ "llm_cfg": {
9
+ "_name_or_path": "vicuna-7b-v1.5",
10
+ "architectures": [
11
+ "LlamaForCausalLM"
12
+ ],
13
+ "attention_bias": false,
14
+ "attention_dropout": 0.0,
15
+ "bos_token_id": 1,
16
+ "eos_token_id": 2,
17
+ "hidden_act": "silu",
18
+ "hidden_size": 4096,
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 11008,
21
+ "max_position_embeddings": 4096,
22
+ "model_type": "llama",
23
+ "num_attention_heads": 32,
24
+ "num_hidden_layers": 32,
25
+ "num_key_value_heads": 32,
26
+ "pad_token_id": 0,
27
+ "pretraining_tp": 1,
28
+ "rms_norm_eps": 1e-05,
29
+ "rope_scaling": null,
30
+ "rope_theta": 10000.0,
31
+ "tie_word_embeddings": false,
32
+ "torch_dtype": "float16",
33
+ "transformers_version": "4.39.3",
34
+ "use_cache": true,
35
+ "vocab_size": 32000
36
+ },
37
+ "max_region_num": 100,
38
+ "model_type": "groma",
39
+ "nms_thres": 0.6,
40
+ "num_new_token": 114,
41
+ "perceiver_cfg": {
42
+ "architectures": [
43
+ "CustomDDETRModel"
44
+ ],
45
+ "ddetr_cfg": {
46
+ "auxiliary_loss": true,
47
+ "class_cost": 2,
48
+ "cls_loss_coefficient": 2,
49
+ "id2label": {
50
+ "0": "LABEL_0"
51
+ },
52
+ "label2id": {
53
+ "LABEL_0": 0
54
+ },
55
+ "model_type": "deformable_detr",
56
+ "num_feature_levels": 1,
57
+ "two_stage": true,
58
+ "with_box_refine": true
59
+ },
60
+ "model_type": "ddetr",
61
+ "torch_dtype": "float32",
62
+ "transformers_version": "4.39.3",
63
+ "vis_encoder_cfg": {
64
+ "architectures": [
65
+ "Dinov2Model"
66
+ ],
67
+ "hidden_size": 1024,
68
+ "image_size": 518,
69
+ "model_type": "dinov2",
70
+ "num_attention_heads": 16,
71
+ "num_hidden_layers": 24,
72
+ "out_features": [
73
+ "stage24"
74
+ ],
75
+ "out_indices": [
76
+ 24
77
+ ],
78
+ "patch_size": 14,
79
+ "stage_names": [
80
+ "stem",
81
+ "stage1",
82
+ "stage2",
83
+ "stage3",
84
+ "stage4",
85
+ "stage5",
86
+ "stage6",
87
+ "stage7",
88
+ "stage8",
89
+ "stage9",
90
+ "stage10",
91
+ "stage11",
92
+ "stage12",
93
+ "stage13",
94
+ "stage14",
95
+ "stage15",
96
+ "stage16",
97
+ "stage17",
98
+ "stage18",
99
+ "stage19",
100
+ "stage20",
101
+ "stage21",
102
+ "stage22",
103
+ "stage23",
104
+ "stage24"
105
+ ],
106
+ "torch_dtype": "float32"
107
+ },
108
+ "vis_output_layer": -2,
109
+ "zs_weight_path": null
110
+ },
111
+ "quantization_config": {
112
+ "_load_in_4bit": true,
113
+ "_load_in_8bit": false,
114
+ "bnb_4bit_compute_dtype": "float32",
115
+ "bnb_4bit_quant_storage": "uint8",
116
+ "bnb_4bit_quant_type": "fp4",
117
+ "bnb_4bit_use_double_quant": false,
118
+ "llm_int8_enable_fp32_cpu_offload": false,
119
+ "llm_int8_has_fp16_weight": false,
120
+ "llm_int8_skip_modules": null,
121
+ "llm_int8_threshold": 6.0,
122
+ "load_in_4bit": true,
123
+ "load_in_8bit": false,
124
+ "quant_method": "bitsandbytes"
125
+ },
126
+ "torch_dtype": "float32",
127
+ "transformers_version": "4.39.3",
128
+ "vocab_size": 32114
129
+ }
generation_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 1,
3
+ "do_sample": true,
4
+ "eos_token_id": 2,
5
+ "max_length": 4096,
6
+ "pad_token_id": 32000,
7
+ "temperature": 0.9,
8
+ "top_p": 0.6,
9
+ "transformers_version": "4.39.3"
10
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e52d7d7d1b073e6e869f25ef229dfc88d911ca5c0187a9bb714b781545a3b36b
3
+ size 4458039053
preprocessor_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": false,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": false,
11
+ "image_mean": [
12
+ 0.485,
13
+ 0.456,
14
+ 0.406
15
+ ],
16
+ "image_processor_type": "BitImageProcessor",
17
+ "image_std": [
18
+ 0.229,
19
+ 0.224,
20
+ 0.225
21
+ ],
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "shortest_edge": 256
26
+ }
27
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "[PAD]",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "legacy": false,
22
+ "model_max_length": 2048,
23
+ "pad_token": null,
24
+ "padding_side": "right",
25
+ "sp_model_kwargs": {},
26
+ "spaces_between_special_tokens": false,
27
+ "tokenizer_class": "LlamaTokenizer",
28
+ "unk_token": {
29
+ "__type": "AddedToken",
30
+ "content": "<unk>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false
35
+ },
36
+ "use_default_system_prompt": true
37
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1399b30d83dbf1ca65d1657e64aa3b34fec477b04283bc03f98193eb60322fe5
3
+ size 4475