Hennara commited on
Commit
2060569
·
verified ·
1 Parent(s): 2dad71d

Upload processor

Browse files
added_tokens.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "<end_of_utterance>": 58003,
3
+ "<fake_token_around_image>": 58001,
4
+ "<image>": 58002,
5
+ "[PAD]": 58000
6
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": true,
3
+ "do_image_splitting": true,
4
+ "do_normalize": true,
5
+ "do_pad": true,
6
+ "do_rescale": true,
7
+ "do_resize": true,
8
+ "image_grid_pinpoints": [
9
+ [
10
+ 364,
11
+ 364
12
+ ],
13
+ [
14
+ 364,
15
+ 728
16
+ ],
17
+ [
18
+ 364,
19
+ 1092
20
+ ],
21
+ [
22
+ 1092,
23
+ 364
24
+ ],
25
+ [
26
+ 728,
27
+ 364
28
+ ],
29
+ [
30
+ 728,
31
+ 728
32
+ ],
33
+ [
34
+ 728,
35
+ 1092
36
+ ],
37
+ [
38
+ 1092,
39
+ 728
40
+ ],
41
+ [
42
+ 1092,
43
+ 1092
44
+ ],
45
+ [
46
+ 364,
47
+ 1456
48
+ ],
49
+ [
50
+ 1456,
51
+ 364
52
+ ],
53
+ [
54
+ 728,
55
+ 1456
56
+ ],
57
+ [
58
+ 1456,
59
+ 728
60
+ ]
61
+ ],
62
+ "image_mean": [
63
+ 0.5,
64
+ 0.5,
65
+ 0.5
66
+ ],
67
+ "image_processor_type": "KawnIdefics3ImageProcessor",
68
+ "image_std": [
69
+ 0.5,
70
+ 0.5,
71
+ 0.5
72
+ ],
73
+ "processor_class": "KawnIdefics3Processor",
74
+ "resample": 1,
75
+ "rescale_factor": 0.00392156862745098,
76
+ "size": {
77
+ "longest_edge": 1456,
78
+ "shortest_edge": 364
79
+ }
80
+ }
processor_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "image_seq_len": 169,
3
+ "patch_size": 14,
4
+ "processor_class": "KawnIdefics3Processor"
5
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<fake_token_around_image>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "<image>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ },
17
+ {
18
+ "content": "<end_of_utterance>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ ],
25
+ "bos_token": {
26
+ "content": "<s>",
27
+ "lstrip": false,
28
+ "normalized": false,
29
+ "rstrip": false,
30
+ "single_word": false
31
+ },
32
+ "eos_token": {
33
+ "content": "</s>",
34
+ "lstrip": false,
35
+ "normalized": false,
36
+ "rstrip": false,
37
+ "single_word": false
38
+ },
39
+ "pad_token": {
40
+ "content": "[PAD]",
41
+ "lstrip": false,
42
+ "normalized": false,
43
+ "rstrip": false,
44
+ "single_word": false
45
+ },
46
+ "unk_token": {
47
+ "content": "<unk>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": false,
51
+ "single_word": false
52
+ }
53
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0aada00638c599dd90c11481439c87cf951ef9d8be055ae3bb34951f0a4379c2
3
+ size 1044543
tokenizer_config.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ },
30
+ "58000": {
31
+ "content": "[PAD]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "58001": {
39
+ "content": "<fake_token_around_image>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": true
45
+ },
46
+ "58002": {
47
+ "content": "<image>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": false,
51
+ "single_word": false,
52
+ "special": true
53
+ },
54
+ "58003": {
55
+ "content": "<end_of_utterance>",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": false,
59
+ "single_word": false,
60
+ "special": true
61
+ }
62
+ },
63
+ "additional_special_tokens": [
64
+ "<fake_token_around_image>",
65
+ "<image>",
66
+ "<end_of_utterance>"
67
+ ],
68
+ "bos_token": "<s>",
69
+ "clean_up_tokenization_spaces": false,
70
+ "eos_token": "</s>",
71
+ "extra_special_tokens": {},
72
+ "legacy": false,
73
+ "model_max_length": 1000000000000000019884624838656,
74
+ "pad_token": "[PAD]",
75
+ "padding_side": "right",
76
+ "processor_class": "KawnIdefics3Processor",
77
+ "sp_model_kwargs": {},
78
+ "spaces_between_special_tokens": false,
79
+ "tokenizer_class": "LlamaTokenizer",
80
+ "unk_token": "<unk>",
81
+ "use_default_system_prompt": false
82
+ }