nlztrk commited on
Commit
5a84dad
1 Parent(s): f4d9878

Upload ./ with huggingface_hub (#1)

Browse files

- Upload ./ with huggingface_hub (bd985d1e855ad0d9c313e3c424c8b4479245aacf)

config.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "dbmdz/bert-base-turkish-cased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "LABEL_0",
13
+ "1": "LABEL_1",
14
+ "2": "LABEL_2",
15
+ "3": "LABEL_3",
16
+ "4": "LABEL_4",
17
+ "5": "LABEL_5",
18
+ "6": "LABEL_6",
19
+ "7": "LABEL_7",
20
+ "8": "LABEL_8",
21
+ "9": "LABEL_9"
22
+ },
23
+ "initializer_range": 0.02,
24
+ "intermediate_size": 3072,
25
+ "label2id": {
26
+ "Alakasiz": 0,
27
+ "Barinma": 1,
28
+ "Elektronik": 2,
29
+ "Giysi": 3,
30
+ "Kurtarma": 4,
31
+ "Lojistik": 5,
32
+ "Saglik": 6,
33
+ "Su": 7,
34
+ "Yagma": 8,
35
+ "Yemek": 9
36
+ },
37
+ "layer_norm_eps": 1e-12,
38
+ "max_position_embeddings": 512,
39
+ "model_type": "bert",
40
+ "num_attention_heads": 12,
41
+ "num_hidden_layers": 12,
42
+ "pad_token_id": 0,
43
+ "position_embedding_type": "absolute",
44
+ "problem_type": "multi_label_classification",
45
+ "torch_dtype": "float32",
46
+ "transformers_version": "4.26.0",
47
+ "type_vocab_size": 2,
48
+ "use_cache": true,
49
+ "vocab_size": 32000
50
+ }
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c71e4faa31278de27855a0444e6b9c31f72b4428a4a32b4707d05b065f10ebab
3
+ size 885118085
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac401c37b92e4a898d30e588532ecd94dfc675813affd3d009528c029b749b9e
3
+ size 442572917
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44f4945f6e3efe7883e411d8aab40b94f06b6a1eb74b29e134c776a5ea08ae28
3
+ size 14575
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa0ed8d002b2243afe806ae75b93cda8eded403655be119f18957bb332a19459
3
+ size 627
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "do_basic_tokenize": true,
4
+ "do_lower_case": false,
5
+ "mask_token": "[MASK]",
6
+ "max_len": 512,
7
+ "model_max_length": 512,
8
+ "name_or_path": "dbmdz/bert-base-turkish-cased",
9
+ "never_split": null,
10
+ "pad_token": "[PAD]",
11
+ "sep_token": "[SEP]",
12
+ "special_tokens_map_file": null,
13
+ "strip_accents": null,
14
+ "tokenize_chinese_chars": true,
15
+ "tokenizer_class": "BertTokenizer",
16
+ "unk_token": "[UNK]"
17
+ }
trainer_state.json ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.12779545783996582,
3
+ "best_model_checkpoint": "turkish_multilabel_intent_bert-base-turkish-cased/checkpoint-1392",
4
+ "epoch": 4.0,
5
+ "global_step": 1392,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "eval_loss": 0.15027593076229095,
13
+ "eval_runtime": 0.4945,
14
+ "eval_samples_per_second": 1249.718,
15
+ "eval_steps_per_second": 78.866,
16
+ "step": 348
17
+ },
18
+ {
19
+ "epoch": 1.44,
20
+ "learning_rate": 3.2040229885057474e-05,
21
+ "loss": 0.1951,
22
+ "step": 500
23
+ },
24
+ {
25
+ "epoch": 2.0,
26
+ "eval_loss": 0.1329057365655899,
27
+ "eval_runtime": 0.4918,
28
+ "eval_samples_per_second": 1256.527,
29
+ "eval_steps_per_second": 79.295,
30
+ "step": 696
31
+ },
32
+ {
33
+ "epoch": 2.87,
34
+ "learning_rate": 1.4080459770114942e-05,
35
+ "loss": 0.1077,
36
+ "step": 1000
37
+ },
38
+ {
39
+ "epoch": 3.0,
40
+ "eval_loss": 0.12863321602344513,
41
+ "eval_runtime": 0.4914,
42
+ "eval_samples_per_second": 1257.609,
43
+ "eval_steps_per_second": 79.364,
44
+ "step": 1044
45
+ },
46
+ {
47
+ "epoch": 4.0,
48
+ "eval_loss": 0.12779545783996582,
49
+ "eval_runtime": 0.4919,
50
+ "eval_samples_per_second": 1256.296,
51
+ "eval_steps_per_second": 79.281,
52
+ "step": 1392
53
+ }
54
+ ],
55
+ "max_steps": 1392,
56
+ "num_train_epochs": 4,
57
+ "total_flos": 1461686896158720.0,
58
+ "trial_name": null,
59
+ "trial_params": null
60
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dcb14a65982643bd4223a5be1bd3c0b1268ede3f0b43504790b6cf584f25ff84
3
+ size 3515
vocab.txt ADDED
The diff for this file is too large to render. See raw diff