machuofan commited on
Commit
5229231
1 Parent(s): 16bf96f
added_tokens.json ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</img>": 32003,
3
+ "</p>": 32007,
4
+ "</roi>": 32005,
5
+ "<ground_box>": 32011,
6
+ "<image>": 32008,
7
+ "<img>": 32002,
8
+ "<p>": 32006,
9
+ "<r0>": 32014,
10
+ "<r10>": 32024,
11
+ "<r11>": 32025,
12
+ "<r12>": 32026,
13
+ "<r13>": 32027,
14
+ "<r14>": 32028,
15
+ "<r15>": 32029,
16
+ "<r16>": 32030,
17
+ "<r17>": 32031,
18
+ "<r18>": 32032,
19
+ "<r19>": 32033,
20
+ "<r1>": 32015,
21
+ "<r20>": 32034,
22
+ "<r21>": 32035,
23
+ "<r22>": 32036,
24
+ "<r23>": 32037,
25
+ "<r24>": 32038,
26
+ "<r25>": 32039,
27
+ "<r26>": 32040,
28
+ "<r27>": 32041,
29
+ "<r28>": 32042,
30
+ "<r29>": 32043,
31
+ "<r2>": 32016,
32
+ "<r30>": 32044,
33
+ "<r31>": 32045,
34
+ "<r32>": 32046,
35
+ "<r33>": 32047,
36
+ "<r34>": 32048,
37
+ "<r35>": 32049,
38
+ "<r36>": 32050,
39
+ "<r37>": 32051,
40
+ "<r38>": 32052,
41
+ "<r39>": 32053,
42
+ "<r3>": 32017,
43
+ "<r40>": 32054,
44
+ "<r41>": 32055,
45
+ "<r42>": 32056,
46
+ "<r43>": 32057,
47
+ "<r44>": 32058,
48
+ "<r45>": 32059,
49
+ "<r46>": 32060,
50
+ "<r47>": 32061,
51
+ "<r48>": 32062,
52
+ "<r49>": 32063,
53
+ "<r4>": 32018,
54
+ "<r50>": 32064,
55
+ "<r51>": 32065,
56
+ "<r52>": 32066,
57
+ "<r53>": 32067,
58
+ "<r54>": 32068,
59
+ "<r55>": 32069,
60
+ "<r56>": 32070,
61
+ "<r57>": 32071,
62
+ "<r58>": 32072,
63
+ "<r59>": 32073,
64
+ "<r5>": 32019,
65
+ "<r60>": 32074,
66
+ "<r61>": 32075,
67
+ "<r62>": 32076,
68
+ "<r63>": 32077,
69
+ "<r64>": 32078,
70
+ "<r65>": 32079,
71
+ "<r66>": 32080,
72
+ "<r67>": 32081,
73
+ "<r68>": 32082,
74
+ "<r69>": 32083,
75
+ "<r6>": 32020,
76
+ "<r70>": 32084,
77
+ "<r71>": 32085,
78
+ "<r72>": 32086,
79
+ "<r73>": 32087,
80
+ "<r74>": 32088,
81
+ "<r75>": 32089,
82
+ "<r76>": 32090,
83
+ "<r77>": 32091,
84
+ "<r78>": 32092,
85
+ "<r79>": 32093,
86
+ "<r7>": 32021,
87
+ "<r80>": 32094,
88
+ "<r81>": 32095,
89
+ "<r82>": 32096,
90
+ "<r83>": 32097,
91
+ "<r84>": 32098,
92
+ "<r85>": 32099,
93
+ "<r86>": 32100,
94
+ "<r87>": 32101,
95
+ "<r88>": 32102,
96
+ "<r89>": 32103,
97
+ "<r8>": 32022,
98
+ "<r90>": 32104,
99
+ "<r91>": 32105,
100
+ "<r92>": 32106,
101
+ "<r93>": 32107,
102
+ "<r94>": 32108,
103
+ "<r95>": 32109,
104
+ "<r96>": 32110,
105
+ "<r97>": 32111,
106
+ "<r98>": 32112,
107
+ "<r99>": 32113,
108
+ "<r9>": 32023,
109
+ "<refer_box>": 32010,
110
+ "<refer_feat>": 32012,
111
+ "<region>": 32009,
112
+ "<roi>": 32004,
113
+ "<sep>": 32001,
114
+ "[PAD]": 32000,
115
+ "[grounding]": 32013
116
+ }
config.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "checkpoints/pretrain_128bs",
3
+ "architectures": [
4
+ "GvllmModel"
5
+ ],
6
+ "box_score_thres": 0.15,
7
+ "llm_cfg": {
8
+ "_name_or_path": "vicuna-7b-v1.5",
9
+ "architectures": [
10
+ "LlamaForCausalLM"
11
+ ],
12
+ "bos_token_id": 1,
13
+ "eos_token_id": 2,
14
+ "hidden_act": "silu",
15
+ "hidden_size": 4096,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 11008,
18
+ "max_position_embeddings": 4096,
19
+ "model_type": "llama",
20
+ "num_attention_heads": 32,
21
+ "num_hidden_layers": 32,
22
+ "num_key_value_heads": 32,
23
+ "pad_token_id": 0,
24
+ "pretraining_tp": 1,
25
+ "rms_norm_eps": 1e-05,
26
+ "rope_scaling": null,
27
+ "tie_word_embeddings": false,
28
+ "torch_dtype": "float16",
29
+ "transformers_version": "4.32.0",
30
+ "use_cache": true,
31
+ "vocab_size": 32000
32
+ },
33
+ "model_type": "gvllm",
34
+ "num_new_token": 114,
35
+ "perceiver_cfg": {
36
+ "architectures": [
37
+ "CustomDDETRModel"
38
+ ],
39
+ "ddetr_cfg": {
40
+ "auxiliary_loss": true,
41
+ "class_cost": 2,
42
+ "cls_loss_coefficient": 2,
43
+ "id2label": {
44
+ "0": "LABEL_0"
45
+ },
46
+ "label2id": {
47
+ "LABEL_0": 0
48
+ },
49
+ "model_type": "deformable_detr",
50
+ "num_feature_levels": 1,
51
+ "two_stage": true,
52
+ "with_box_refine": true
53
+ },
54
+ "model_type": "ddetr",
55
+ "torch_dtype": "float32",
56
+ "transformers_version": "4.32.0",
57
+ "vis_encoder_cfg": {
58
+ "architectures": [
59
+ "Dinov2Model"
60
+ ],
61
+ "hidden_size": 1024,
62
+ "image_size": 518,
63
+ "model_type": "dinov2",
64
+ "num_attention_heads": 16,
65
+ "num_hidden_layers": 24,
66
+ "patch_size": 14,
67
+ "torch_dtype": "float32"
68
+ },
69
+ "vis_encoder_path": "checkpoints/dinov2-large",
70
+ "vis_output_layer": -2,
71
+ "zs_weight_path": null
72
+ },
73
+ "torch_dtype": "float32",
74
+ "transformers_version": "4.32.0",
75
+ "vocab_size": 32114
76
+ }
generation_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 1,
3
+ "do_sample": true,
4
+ "eos_token_id": 2,
5
+ "max_length": 4096,
6
+ "pad_token_id": 32000,
7
+ "temperature": 0.9,
8
+ "top_p": 0.6,
9
+ "transformers_version": "4.32.0"
10
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": false,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": false,
11
+ "image_mean": [
12
+ 0.485,
13
+ 0.456,
14
+ 0.406
15
+ ],
16
+ "image_processor_type": "BitImageProcessor",
17
+ "image_std": [
18
+ 0.229,
19
+ 0.224,
20
+ 0.225
21
+ ],
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "shortest_edge": 256
26
+ }
27
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16a254f622242706714281c0d88ec08505beb16b5e3f8812b5f16aee984f4acf
3
+ size 33113972419
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "[PAD]",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "legacy": false,
22
+ "model_max_length": 2048,
23
+ "pad_token": null,
24
+ "padding_side": "right",
25
+ "sp_model_kwargs": {},
26
+ "spaces_between_special_tokens": false,
27
+ "tokenizer_class": "LlamaTokenizer",
28
+ "unk_token": {
29
+ "__type": "AddedToken",
30
+ "content": "<unk>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false
35
+ },
36
+ "use_default_system_prompt": true
37
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1399b30d83dbf1ca65d1657e64aa3b34fec477b04283bc03f98193eb60322fe5
3
+ size 4475