Kornraphop Kawintiranon commited on
Commit
ae46ce5
1 Parent(s): 91bdec2

first version

Browse files
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"#voteblue2020": 30522, "#bidenharris2020": 30523, "#maga2020": 30524, "#demdebate": 30525, "#maga": 30526, "#votered": 30527, "#votebluetoendthisnightmare": 30528, "#demconvention": 30529, "#makeamericagreatagain": 30530, "#votebluenomatterwho": 30531, "#dnc": 30532, "#votebluetosaveamerica": 30533, "#gop": 30534, "#trump2020": 30535, "#qanon": 30536, "#biden2020": 30537, "retweet": 30538, "#kag": 30539, "#dumptrump": 30540, "#democraticdebate": 30541, "#voteblue": 30542}
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "gradient_checkpointing": false,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 768,
10
+ "id2label": {
11
+ "0": "LABEL_0",
12
+ "1": "LABEL_1",
13
+ "2": "LABEL_2"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 3072,
17
+ "label2id": {
18
+ "LABEL_0": 0,
19
+ "LABEL_1": 1,
20
+ "LABEL_2": 2
21
+ },
22
+ "layer_norm_eps": 1e-12,
23
+ "max_position_embeddings": 512,
24
+ "model_type": "bert",
25
+ "num_attention_heads": 12,
26
+ "num_hidden_layers": 12,
27
+ "pad_token_id": 0,
28
+ "type_vocab_size": 2,
29
+ "vocab_size": 30543
30
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb041d37be38bb52f8b2cd86ac8bbfb7b8a80929d8e31790014a2bc831bced0e
3
+ size 438057265
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "model_max_length": 512, "special_tokens_map_file": "/home/ken/projects/Stance-Sentiment/language_models/English/election2020/bert-election2020-tweet-5M-censored-only-url-1ep-lr-1e-4/special_tokens_map.json", "full_tokenizer_file": null}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff