jinymusim commited on
Commit
e9e7092
1 Parent(s): 9d3aa15

Upload 8 files

Browse files

Dialog Model fixed state

config.json CHANGED
@@ -33,7 +33,7 @@
33
  }
34
  },
35
  "torch_dtype": "float32",
36
- "transformers_version": "4.27.4",
37
  "use_cache": true,
38
  "vocab_size": 50261
39
  }
 
33
  }
34
  },
35
  "torch_dtype": "float32",
36
+ "transformers_version": "4.28.1",
37
  "use_cache": true,
38
  "vocab_size": 50261
39
  }
generation_config.json CHANGED
@@ -2,5 +2,5 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 50256,
4
  "eos_token_id": 50256,
5
- "transformers_version": "4.27.4"
6
  }
 
2
  "_from_model_config": true,
3
  "bos_token_id": 50256,
4
  "eos_token_id": 50256,
5
+ "transformers_version": "4.28.1"
6
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54742395fef99a7de90f429f408e9d75441db1e60eeb008caa0a5260b9ff3bdd
3
  size 510410301
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c4c87b4f001a282ac8f2437000d1f2d05a753d889868162c7c276c2f78f7e5d
3
  size 510410301
special_tokens_map.json CHANGED
@@ -1,10 +1,4 @@
1
  {
2
- "additional_special_tokens": [
3
- "<|system|>",
4
- "<|user|>",
5
- "<|endoftext|>",
6
- "<|belive|>"
7
- ],
8
  "bos_token": {
9
  "content": "<|endoftext|>",
10
  "lstrip": false,
 
1
  {
 
 
 
 
 
 
2
  "bos_token": {
3
  "content": "<|endoftext|>",
4
  "lstrip": false,
tokenizer.json CHANGED
@@ -1,11 +1,6 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 1024,
6
- "strategy": "LongestFirst",
7
- "stride": 0
8
- },
9
  "padding": null,
10
  "added_tokens": [
11
  {
@@ -14,34 +9,7 @@
14
  "single_word": false,
15
  "lstrip": false,
16
  "rstrip": false,
17
- "normalized": false,
18
- "special": true
19
- },
20
- {
21
- "id": 50257,
22
- "content": "<|system|>",
23
- "single_word": false,
24
- "lstrip": false,
25
- "rstrip": false,
26
- "normalized": false,
27
- "special": true
28
- },
29
- {
30
- "id": 50258,
31
- "content": "<|user|>",
32
- "single_word": false,
33
- "lstrip": false,
34
- "rstrip": false,
35
- "normalized": false,
36
- "special": true
37
- },
38
- {
39
- "id": 50259,
40
- "content": "<|belive|>",
41
- "single_word": false,
42
- "lstrip": false,
43
- "rstrip": false,
44
- "normalized": false,
45
  "special": true
46
  }
47
  ],
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {
 
9
  "single_word": false,
10
  "lstrip": false,
11
  "rstrip": false,
12
+ "normalized": true,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  "special": true
14
  }
15
  ],
tokenizer_config.json CHANGED
@@ -9,6 +9,7 @@
9
  "rstrip": false,
10
  "single_word": false
11
  },
 
12
  "eos_token": {
13
  "__type": "AddedToken",
14
  "content": "<|endoftext|>",
@@ -20,7 +21,6 @@
20
  "errors": "replace",
21
  "model_max_length": 1024,
22
  "pad_token": null,
23
- "special_tokens_map_file": null,
24
  "tokenizer_class": "GPT2Tokenizer",
25
  "unk_token": {
26
  "__type": "AddedToken",
 
9
  "rstrip": false,
10
  "single_word": false
11
  },
12
+ "clean_up_tokenization_spaces": true,
13
  "eos_token": {
14
  "__type": "AddedToken",
15
  "content": "<|endoftext|>",
 
21
  "errors": "replace",
22
  "model_max_length": 1024,
23
  "pad_token": null,
 
24
  "tokenizer_class": "GPT2Tokenizer",
25
  "unk_token": {
26
  "__type": "AddedToken",