Farjfar commited on
Commit
e0d367b
1 Parent(s): 014e38f

Training in progress, step 500

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "bert-base-cased",
3
  "architectures": [
4
  "BertForTokenClassification"
5
  ],
@@ -32,5 +32,5 @@
32
  "transformers_version": "4.40.2",
33
  "type_vocab_size": 2,
34
  "use_cache": true,
35
- "vocab_size": 28996
36
  }
 
1
  {
2
+ "_name_or_path": "princeton-nlp/sup-simcse-bert-base-uncased",
3
  "architectures": [
4
  "BertForTokenClassification"
5
  ],
 
32
  "transformers_version": "4.40.2",
33
  "type_vocab_size": 2,
34
  "use_cache": true,
35
+ "vocab_size": 30522
36
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2ac5aac1d73e90de6205193918be7ce9acf8bc881b4fa023d181e9b98330c3ab
3
- size 430911284
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8a9775db6eeb5e8b81afd9f28e5bc56ae26ee994b0729e0364f235f54f5bd0b
3
+ size 435599164
runs/May19_07-56-13_868affc172b8/events.out.tfevents.1716105379.868affc172b8.460.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:083eb0e7152db982434a89700a82a6bbe5dbf32a351135a32fb29aa17a09541b
3
+ size 437
runs/May19_07-58-28_868affc172b8/events.out.tfevents.1716105509.868affc172b8.460.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79704c51723081f03e8483dfe8ba594b8bd27ed91ad70edca3cf56b03c094fb9
3
+ size 4805
runs/May19_08-00-45_868affc172b8/events.out.tfevents.1716105645.868affc172b8.460.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e37b326c5a7d9ddb59dc829ab99653307ef5de6afa7108d43e65e0f2f75215d8
3
+ size 5431
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -43,9 +43,11 @@
43
  },
44
  "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
46
- "do_lower_case": false,
 
47
  "mask_token": "[MASK]",
48
  "model_max_length": 512,
 
49
  "pad_token": "[PAD]",
50
  "sep_token": "[SEP]",
51
  "strip_accents": null,
 
43
  },
44
  "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
  "mask_token": "[MASK]",
49
  "model_max_length": 512,
50
+ "never_split": null,
51
  "pad_token": "[PAD]",
52
  "sep_token": "[SEP]",
53
  "strip_accents": null,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cd85e9b26d719b6908668c43858e54ac087c3b8b4639de76d055746aeb8a548c
3
  size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:482888c6a476ef36827e8fd833ebf470c8e8c1836f34ee93b7d9aebace8d5dd4
3
  size 4984
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff