arxyzan commited on
Commit
ef087db
1 Parent(s): 53709b9

Hezar: Upload tokenizer_config.yaml

Browse files
Files changed (1) hide show
  1. preprocessor/tokenizer_config.yaml +9 -17
preprocessor/tokenizer_config.yaml CHANGED
@@ -1,6 +1,5 @@
1
  name: wordpiece_tokenizer
2
  config_type: preprocessor
3
- pretrained_path: hezar-ai/bert-base-fa
4
  max_length: 512
5
  truncation_strategy: longest_first
6
  truncation_direction: right
@@ -8,22 +7,15 @@ stride: 0
8
  padding_strategy: longest
9
  padding_direction: right
10
  pad_to_multiple_of: 0
11
- pad_token_id: 0
12
- pad_token: '[PAD]'
13
  pad_token_type_id: 0
14
  unk_token: '[UNK]'
15
- special_tokens:
16
- - '[UNK]'
17
- - '[SEP]'
18
- - '[CLS]'
19
- - '[PAD]'
20
- - '[MASK]'
21
  wordpieces_prefix: '##'
22
- train_config:
23
- name: wordpiece_tokenizer
24
- config_type: preprocessor
25
- vocab_size: 30000
26
- min_frequency: 2
27
- limit_alphabet: 1000
28
- initial_alphabet: []
29
- show_progress: true
 
1
  name: wordpiece_tokenizer
2
  config_type: preprocessor
 
3
  max_length: 512
4
  truncation_strategy: longest_first
5
  truncation_direction: right
 
7
  padding_strategy: longest
8
  padding_direction: right
9
  pad_to_multiple_of: 0
 
 
10
  pad_token_type_id: 0
11
  unk_token: '[UNK]'
12
+ sep_token: '[SEP]'
13
+ pad_token: '[PAD]'
14
+ cls_token: '[CLS]'
15
+ mask_token: '[MASK]'
 
 
16
  wordpieces_prefix: '##'
17
+ vocab_size: 42000
18
+ min_frequency: 2
19
+ limit_alphabet: 1000
20
+ initial_alphabet: []
21
+ show_progress: true