cuongdz01 commited on
Commit
a8f0fb2
1 Parent(s): fddaacf

End of training

Browse files
README.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: microsoft/layoutlm-base-uncased
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: layoutlm-cord
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # layoutlm-cord
15
+
16
+ This model is a fine-tuned version of [microsoft/layoutlm-base-uncased](https://huggingface.co/microsoft/layoutlm-base-uncased) on an unknown dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - eval_loss: 0.1624
19
+ - eval_enu.cnt: {'precision': 0.9861111111111112, 'recall': 0.9681818181818181, 'f1': 0.9770642201834862, 'number': 220}
20
+ - eval_enu.discountprice: {'precision': 0.6666666666666666, 'recall': 0.6, 'f1': 0.631578947368421, 'number': 10}
21
+ - eval_enu.etc: {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 3}
22
+ - eval_enu.itemsubtotal: {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 6}
23
+ - eval_enu.nm: {'precision': 0.9525691699604744, 'recall': 0.9601593625498008, 'f1': 0.9563492063492064, 'number': 251}
24
+ - eval_enu.num: {'precision': 0.9090909090909091, 'recall': 0.9090909090909091, 'f1': 0.9090909090909091, 'number': 11}
25
+ - eval_enu.price: {'precision': 0.9568627450980393, 'recall': 0.991869918699187, 'f1': 0.9740518962075848, 'number': 246}
26
+ - eval_enu.sub.cnt: {'precision': 0.85, 'recall': 1.0, 'f1': 0.9189189189189189, 'number': 17}
27
+ - eval_enu.sub.nm: {'precision': 0.8285714285714286, 'recall': 0.9354838709677419, 'f1': 0.8787878787878788, 'number': 31}
28
+ - eval_enu.sub.price: {'precision': 1.0, 'recall': 0.95, 'f1': 0.9743589743589743, 'number': 20}
29
+ - eval_enu.unitprice: {'precision': 0.984375, 'recall': 0.9402985074626866, 'f1': 0.9618320610687023, 'number': 67}
30
+ - eval_otal.cashprice: {'precision': 0.9558823529411765, 'recall': 0.9558823529411765, 'f1': 0.9558823529411765, 'number': 68}
31
+ - eval_otal.changeprice: {'precision': 0.9655172413793104, 'recall': 1.0, 'f1': 0.9824561403508771, 'number': 56}
32
+ - eval_otal.creditcardprice: {'precision': 0.7647058823529411, 'recall': 0.8125, 'f1': 0.787878787878788, 'number': 16}
33
+ - eval_otal.emoneyprice: {'precision': 0.3333333333333333, 'recall': 0.5, 'f1': 0.4, 'number': 2}
34
+ - eval_otal.menuqty_cnt: {'precision': 0.9333333333333333, 'recall': 0.9655172413793104, 'f1': 0.9491525423728815, 'number': 29}
35
+ - eval_otal.menutype_cnt: {'precision': 1.0, 'recall': 0.7142857142857143, 'f1': 0.8333333333333333, 'number': 7}
36
+ - eval_otal.total_etc: {'precision': 0.5, 'recall': 0.3333333333333333, 'f1': 0.4, 'number': 3}
37
+ - eval_otal.total_price: {'precision': 0.9583333333333334, 'recall': 0.968421052631579, 'f1': 0.9633507853403142, 'number': 95}
38
+ - eval_ub_total.discount_price: {'precision': 1.0, 'recall': 1.0, 'f1': 1.0, 'number': 7}
39
+ - eval_ub_total.etc: {'precision': 0.875, 'recall': 0.7777777777777778, 'f1': 0.823529411764706, 'number': 9}
40
+ - eval_ub_total.service_price: {'precision': 1.0, 'recall': 1.0, 'f1': 1.0, 'number': 12}
41
+ - eval_ub_total.subtotal_price: {'precision': 0.9545454545454546, 'recall': 0.9692307692307692, 'f1': 0.9618320610687022, 'number': 65}
42
+ - eval_ub_total.tax_price: {'precision': 1.0, 'recall': 1.0, 'f1': 1.0, 'number': 43}
43
+ - eval_overall_precision: 0.9522
44
+ - eval_overall_recall: 0.9544
45
+ - eval_overall_f1: 0.9533
46
+ - eval_overall_accuracy: 0.9707
47
+ - eval_runtime: 3.0438
48
+ - eval_samples_per_second: 32.853
49
+ - eval_steps_per_second: 4.271
50
+ - epoch: 1.0
51
+ - step: 50
52
+
53
+ ## Model description
54
+
55
+ More information needed
56
+
57
+ ## Intended uses & limitations
58
+
59
+ More information needed
60
+
61
+ ## Training and evaluation data
62
+
63
+ More information needed
64
+
65
+ ## Training procedure
66
+
67
+ ### Training hyperparameters
68
+
69
+ The following hyperparameters were used during training:
70
+ - learning_rate: 1e-06
71
+ - train_batch_size: 16
72
+ - eval_batch_size: 8
73
+ - seed: 42
74
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
75
+ - lr_scheduler_type: linear
76
+ - num_epochs: 15
77
+ - mixed_precision_training: Native AMP
78
+
79
+ ### Framework versions
80
+
81
+ - Transformers 4.36.0
82
+ - Pytorch 2.0.0
83
+ - Datasets 2.16.1
84
+ - Tokenizers 0.15.0
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a832169796266be04387239adcb0ff5709731bc7bcd7c91cbcfefacb16e8ab49
3
  size 450625884
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0c4ccfac9703de2700f9f8a8deef5682d64b84728ea3d9eca01808062d16e74
3
  size 450625884
preprocessor_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "apply_ocr": true,
3
+ "do_resize": true,
4
+ "feature_extractor_type": "LayoutLMv2FeatureExtractor",
5
+ "image_processor_type": "LayoutLMv2ImageProcessor",
6
+ "ocr_lang": null,
7
+ "processor_class": "LayoutLMv2Processor",
8
+ "resample": 2,
9
+ "size": {
10
+ "height": 224,
11
+ "width": 224
12
+ },
13
+ "tesseract_config": ""
14
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "additional_special_tokens": [],
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "[CLS]",
47
+ "cls_token_box": [
48
+ 0,
49
+ 0,
50
+ 0,
51
+ 0
52
+ ],
53
+ "do_basic_tokenize": true,
54
+ "do_lower_case": true,
55
+ "mask_token": "[MASK]",
56
+ "model_max_length": 512,
57
+ "never_split": null,
58
+ "only_label_first_subword": true,
59
+ "pad_token": "[PAD]",
60
+ "pad_token_box": [
61
+ 0,
62
+ 0,
63
+ 0,
64
+ 0
65
+ ],
66
+ "pad_token_label": -100,
67
+ "processor_class": "LayoutLMv2Processor",
68
+ "sep_token": "[SEP]",
69
+ "sep_token_box": [
70
+ 1000,
71
+ 1000,
72
+ 1000,
73
+ 1000
74
+ ],
75
+ "strip_accents": null,
76
+ "tokenize_chinese_chars": true,
77
+ "tokenizer_class": "LayoutLMv2Tokenizer",
78
+ "unk_token": "[UNK]"
79
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff