HueyNemud
commited on
Commit
•
359d820
1
Parent(s):
20b179f
model data
Browse files- README.md +49 -0
- config.json +51 -0
- optimizer.pt +3 -0
- pytorch_model.bin +3 -0
- rng_state.pth +3 -0
- scheduler.pt +3 -0
- sentencepiece.bpe.model +3 -0
- special_tokens_map.json +1 -0
- tokenizer.json +0 -0
- tokenizer_config.json +1 -0
- trainer_state.json +214 -0
- training_args.bin +3 -0
README.md
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
tags:
|
3 |
+
- generated_from_trainer
|
4 |
+
model-index:
|
5 |
+
- name: CamemBERT pretrained on french trade directories from the XIXth century
|
6 |
+
results: []
|
7 |
+
---
|
8 |
+
|
9 |
+
# CamemBERT trained and fine-tuned for NER on french trade directories from the XIXth century [PERO-OCR training set]
|
10 |
+
|
11 |
+
This mdoel is part of the material of the paper
|
12 |
+
> Abadie, N., Carlinet, E., Chazalon, J., Duménieu, B. (2022). A
|
13 |
+
> Benchmark of Named Entity Recognition Approaches in Historical
|
14 |
+
> Documents Application to 19𝑡ℎ Century French Directories. In: Uchida,
|
15 |
+
> S., Barney, E., Eglin, V. (eds) Document Analysis Systems. DAS 2022.
|
16 |
+
> Lecture Notes in Computer Science, vol 13237. Springer, Cham.
|
17 |
+
> https://doi.org/10.1007/978-3-031-06555-2_30
|
18 |
+
|
19 |
+
The source code to train this model is available on the [GitHub repository](https://github.com/soduco/paper-ner-bench-das22) of the paper as a Jupyter notebook in `src/ner/40_experiment_2.ipynb`.
|
20 |
+
|
21 |
+
|
22 |
+
## Model description
|
23 |
+
This model adapts the model [Jean-Baptiste/camembert-ner](https://huggingface.co/Jean-Baptiste/camembert-ner) for NER on 6004 manually annotated directory entries referred as the "reference dataset" in the paper.
|
24 |
+
|
25 |
+
Trade directory entries are short and strongly structured texts that giving the name, activity and location of a person or business, e.g:
|
26 |
+
```
|
27 |
+
Peynaud, R. de la Vieille Bouclerie, 18. Richard, Joullain et comp., (commission- —Phéâtre Français. naire, (entrepôt), au port de la Rapée-
|
28 |
+
```
|
29 |
+
|
30 |
+
## Intended uses & limitations
|
31 |
+
This model is intended for reproducibility of the NER evaluation published in the DAS2022 paper.
|
32 |
+
Several derived models trained for NER on trade directories are available on HuggingFace, each trained on a different dataset :
|
33 |
+
- [das22-10-camembert_pretrained_finetuned_ref](): trained for NER on ~6000 directory entries manually corrected.
|
34 |
+
- [das22-10-camembert_pretrained_finetuned_pero](): trained for NER on ~6000 directory entries extracted with PERO-OCR.
|
35 |
+
- [das22-10-camembert_pretrained_finetuned_tess](): trained for NER on ~6000 directory entries extracted with Tesseract.
|
36 |
+
|
37 |
+
|
38 |
+
|
39 |
+
### Training hyperparameters
|
40 |
+
|
41 |
+
### Training results
|
42 |
+
|
43 |
+
### Framework versions
|
44 |
+
|
45 |
+
- Transformers 4.16.0.dev0
|
46 |
+
- Pytorch 1.10.1+cu102
|
47 |
+
- Datasets 1.17.0
|
48 |
+
- Tokenizers 0.10.3
|
49 |
+
|
config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "HueyNemud/das22-10-camembert_pretrained",
|
3 |
+
"architectures": [
|
4 |
+
"CamembertForTokenClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 5,
|
8 |
+
"classifier_dropout": null,
|
9 |
+
"eos_token_id": 6,
|
10 |
+
"gradient_checkpointing": false,
|
11 |
+
"hidden_act": "gelu",
|
12 |
+
"hidden_dropout_prob": 0.1,
|
13 |
+
"hidden_size": 768,
|
14 |
+
"id2label": {
|
15 |
+
"0": "O",
|
16 |
+
"1": "I-LOC",
|
17 |
+
"2": "I-PER",
|
18 |
+
"3": "I-MISC",
|
19 |
+
"4": "I-ORG",
|
20 |
+
"5": "I-CARDINAL",
|
21 |
+
"6": "I-ACT",
|
22 |
+
"7": "I-TITRE",
|
23 |
+
"8": "I-FT"
|
24 |
+
},
|
25 |
+
"initializer_range": 0.02,
|
26 |
+
"intermediate_size": 3072,
|
27 |
+
"label2id": {
|
28 |
+
"I-ACT": 6,
|
29 |
+
"I-CARDINAL": 5,
|
30 |
+
"I-FT": 8,
|
31 |
+
"I-LOC": 1,
|
32 |
+
"I-MISC": 3,
|
33 |
+
"I-ORG": 4,
|
34 |
+
"I-PER": 2,
|
35 |
+
"I-TITRE": 7,
|
36 |
+
"O": 0
|
37 |
+
},
|
38 |
+
"layer_norm_eps": 1e-05,
|
39 |
+
"max_position_embeddings": 514,
|
40 |
+
"model_type": "camembert",
|
41 |
+
"num_attention_heads": 12,
|
42 |
+
"num_hidden_layers": 12,
|
43 |
+
"output_past": true,
|
44 |
+
"pad_token_id": 1,
|
45 |
+
"position_embedding_type": "absolute",
|
46 |
+
"torch_dtype": "float32",
|
47 |
+
"transformers_version": "4.15.0",
|
48 |
+
"type_vocab_size": 1,
|
49 |
+
"use_cache": true,
|
50 |
+
"vocab_size": 32005
|
51 |
+
}
|
optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fcf98e1b00eeaf32429e0d3ce91bb56f8d315b53cc601a6c980093af31312048
|
3 |
+
size 880421605
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:10b4c449f459a1dd150bde11f8cf172d41be99a9f3b58800012ae433cc402cc2
|
3 |
+
size 440237809
|
rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:de31c8caf9890f76aa2de98c914e1aa6dfdd878c7859cced157280250484890c
|
3 |
+
size 14503
|
scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e026fb7cb5555507d8791aa654af1e3922b1146f7bb21228d191cc76d86c51e1
|
3 |
+
size 623
|
sentencepiece.bpe.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:988bc5a00281c6d210a5d34bd143d0363741a432fefe741bf71e61b1869d4314
|
3 |
+
size 810912
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": "<mask>", "additional_special_tokens": ["<s>NOTUSED", "</s>NOTUSED"]}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "additional_special_tokens": ["<s>NOTUSED", "</s>NOTUSED"], "model_max_length": 512, "name_or_path": "HueyNemud/das22-10-camembert_pretrained", "special_tokens_map_file": "/lrde/home2/jchazalo/.cache/huggingface/transformers/fe0e213c44079a9ee091098f81fff2941484006e9ba3001a9bf1ee9f87537599.cb3ec3a6c1200d181228d8825ae9767572abca54efa1bbb37fd83d721b2ef323", "sp_model_kwargs": {}, "tokenizer_class": "CamembertTokenizer"}
|
trainer_state.json
ADDED
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 0.9595484477892756,
|
3 |
+
"best_model_checkpoint": "/content/drive/MyDrive/SODUCO/article_das_2022/43-camembert_pretrained_finetuned_pero/checkpoint-1500",
|
4 |
+
"epoch": 3.9893617021276597,
|
5 |
+
"global_step": 1500,
|
6 |
+
"is_hyper_param_search": false,
|
7 |
+
"is_local_process_zero": true,
|
8 |
+
"is_world_process_zero": true,
|
9 |
+
"log_history": [
|
10 |
+
{
|
11 |
+
"epoch": 0.27,
|
12 |
+
"eval_accuracy": 0.9711394302848576,
|
13 |
+
"eval_f1": 0.9418582912693962,
|
14 |
+
"eval_loss": 0.24740596115589142,
|
15 |
+
"eval_precision": 0.9343471810089021,
|
16 |
+
"eval_recall": 0.9494911421032793,
|
17 |
+
"eval_runtime": 2.7876,
|
18 |
+
"eval_samples_per_second": 239.631,
|
19 |
+
"eval_steps_per_second": 15.067,
|
20 |
+
"step": 100
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"epoch": 0.53,
|
24 |
+
"eval_accuracy": 0.9741379310344828,
|
25 |
+
"eval_f1": 0.950751879699248,
|
26 |
+
"eval_loss": 0.15050481259822845,
|
27 |
+
"eval_precision": 0.9482564679415073,
|
28 |
+
"eval_recall": 0.9532604598567659,
|
29 |
+
"eval_runtime": 2.5926,
|
30 |
+
"eval_samples_per_second": 257.656,
|
31 |
+
"eval_steps_per_second": 16.2,
|
32 |
+
"step": 200
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"epoch": 0.8,
|
36 |
+
"eval_accuracy": 0.9730134932533733,
|
37 |
+
"eval_f1": 0.9475262368815591,
|
38 |
+
"eval_loss": 0.12827223539352417,
|
39 |
+
"eval_precision": 0.9422288483041371,
|
40 |
+
"eval_recall": 0.9528835280814173,
|
41 |
+
"eval_runtime": 2.73,
|
42 |
+
"eval_samples_per_second": 244.686,
|
43 |
+
"eval_steps_per_second": 15.384,
|
44 |
+
"step": 300
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 1.06,
|
48 |
+
"eval_accuracy": 0.9710144927536232,
|
49 |
+
"eval_f1": 0.9431689875799775,
|
50 |
+
"eval_loss": 0.1230660155415535,
|
51 |
+
"eval_precision": 0.9417512213453589,
|
52 |
+
"eval_recall": 0.9445910290237467,
|
53 |
+
"eval_runtime": 2.7678,
|
54 |
+
"eval_samples_per_second": 241.348,
|
55 |
+
"eval_steps_per_second": 15.175,
|
56 |
+
"step": 400
|
57 |
+
},
|
58 |
+
{
|
59 |
+
"epoch": 1.33,
|
60 |
+
"learning_rate": 9e-05,
|
61 |
+
"loss": 0.2505,
|
62 |
+
"step": 500
|
63 |
+
},
|
64 |
+
{
|
65 |
+
"epoch": 1.33,
|
66 |
+
"eval_accuracy": 0.9786356821589205,
|
67 |
+
"eval_f1": 0.9521306551529942,
|
68 |
+
"eval_loss": 0.08949972689151764,
|
69 |
+
"eval_precision": 0.9483919222139118,
|
70 |
+
"eval_recall": 0.9558989822842066,
|
71 |
+
"eval_runtime": 2.9407,
|
72 |
+
"eval_samples_per_second": 227.154,
|
73 |
+
"eval_steps_per_second": 14.282,
|
74 |
+
"step": 500
|
75 |
+
},
|
76 |
+
{
|
77 |
+
"epoch": 1.6,
|
78 |
+
"eval_accuracy": 0.9768865567216392,
|
79 |
+
"eval_f1": 0.9537420082737871,
|
80 |
+
"eval_loss": 0.09133146703243256,
|
81 |
+
"eval_precision": 0.951594746716698,
|
82 |
+
"eval_recall": 0.9558989822842066,
|
83 |
+
"eval_runtime": 2.8849,
|
84 |
+
"eval_samples_per_second": 231.55,
|
85 |
+
"eval_steps_per_second": 14.559,
|
86 |
+
"step": 600
|
87 |
+
},
|
88 |
+
{
|
89 |
+
"epoch": 1.86,
|
90 |
+
"eval_accuracy": 0.9765117441279361,
|
91 |
+
"eval_f1": 0.9579800263802524,
|
92 |
+
"eval_loss": 0.09044157713651657,
|
93 |
+
"eval_precision": 0.9577995478522984,
|
94 |
+
"eval_recall": 0.9581605729362985,
|
95 |
+
"eval_runtime": 2.8752,
|
96 |
+
"eval_samples_per_second": 232.332,
|
97 |
+
"eval_steps_per_second": 14.608,
|
98 |
+
"step": 700
|
99 |
+
},
|
100 |
+
{
|
101 |
+
"epoch": 2.13,
|
102 |
+
"eval_accuracy": 0.9801349325337332,
|
103 |
+
"eval_f1": 0.9586621570838031,
|
104 |
+
"eval_loss": 0.07938132435083389,
|
105 |
+
"eval_precision": 0.9557886849007119,
|
106 |
+
"eval_recall": 0.9615529589144365,
|
107 |
+
"eval_runtime": 2.8692,
|
108 |
+
"eval_samples_per_second": 232.816,
|
109 |
+
"eval_steps_per_second": 14.638,
|
110 |
+
"step": 800
|
111 |
+
},
|
112 |
+
{
|
113 |
+
"epoch": 2.39,
|
114 |
+
"eval_accuracy": 0.9771364317841079,
|
115 |
+
"eval_f1": 0.9548872180451128,
|
116 |
+
"eval_loss": 0.09603337943553925,
|
117 |
+
"eval_precision": 0.9523809523809523,
|
118 |
+
"eval_recall": 0.9574067093856012,
|
119 |
+
"eval_runtime": 2.8538,
|
120 |
+
"eval_samples_per_second": 234.072,
|
121 |
+
"eval_steps_per_second": 14.717,
|
122 |
+
"step": 900
|
123 |
+
},
|
124 |
+
{
|
125 |
+
"epoch": 2.66,
|
126 |
+
"learning_rate": 8e-05,
|
127 |
+
"loss": 0.0748,
|
128 |
+
"step": 1000
|
129 |
+
},
|
130 |
+
{
|
131 |
+
"epoch": 2.66,
|
132 |
+
"eval_accuracy": 0.9761369315342329,
|
133 |
+
"eval_f1": 0.9575871819038644,
|
134 |
+
"eval_loss": 0.09003584831953049,
|
135 |
+
"eval_precision": 0.9577677224736049,
|
136 |
+
"eval_recall": 0.9574067093856012,
|
137 |
+
"eval_runtime": 2.9061,
|
138 |
+
"eval_samples_per_second": 229.858,
|
139 |
+
"eval_steps_per_second": 14.452,
|
140 |
+
"step": 1000
|
141 |
+
},
|
142 |
+
{
|
143 |
+
"epoch": 2.93,
|
144 |
+
"eval_accuracy": 0.9730134932533733,
|
145 |
+
"eval_f1": 0.954383330204618,
|
146 |
+
"eval_loss": 0.10545289516448975,
|
147 |
+
"eval_precision": 0.9506357516828721,
|
148 |
+
"eval_recall": 0.9581605729362985,
|
149 |
+
"eval_runtime": 2.9384,
|
150 |
+
"eval_samples_per_second": 227.334,
|
151 |
+
"eval_steps_per_second": 14.293,
|
152 |
+
"step": 1100
|
153 |
+
},
|
154 |
+
{
|
155 |
+
"epoch": 3.19,
|
156 |
+
"eval_accuracy": 0.9761369315342329,
|
157 |
+
"eval_f1": 0.9593220338983051,
|
158 |
+
"eval_loss": 0.099028080701828,
|
159 |
+
"eval_precision": 0.9585999247271358,
|
160 |
+
"eval_recall": 0.9600452318130418,
|
161 |
+
"eval_runtime": 3.0033,
|
162 |
+
"eval_samples_per_second": 222.419,
|
163 |
+
"eval_steps_per_second": 13.984,
|
164 |
+
"step": 1200
|
165 |
+
},
|
166 |
+
{
|
167 |
+
"epoch": 3.46,
|
168 |
+
"eval_accuracy": 0.9752623688155923,
|
169 |
+
"eval_f1": 0.9561782960315968,
|
170 |
+
"eval_loss": 0.09300831705331802,
|
171 |
+
"eval_precision": 0.9542042042042042,
|
172 |
+
"eval_recall": 0.9581605729362985,
|
173 |
+
"eval_runtime": 2.8431,
|
174 |
+
"eval_samples_per_second": 234.956,
|
175 |
+
"eval_steps_per_second": 14.773,
|
176 |
+
"step": 1300
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"epoch": 3.72,
|
180 |
+
"eval_accuracy": 0.9775112443778111,
|
181 |
+
"eval_f1": 0.9578471960858111,
|
182 |
+
"eval_loss": 0.0967017412185669,
|
183 |
+
"eval_precision": 0.9564073656520106,
|
184 |
+
"eval_recall": 0.9592913682623445,
|
185 |
+
"eval_runtime": 2.8669,
|
186 |
+
"eval_samples_per_second": 233.003,
|
187 |
+
"eval_steps_per_second": 14.65,
|
188 |
+
"step": 1400
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"epoch": 3.99,
|
192 |
+
"learning_rate": 7e-05,
|
193 |
+
"loss": 0.0497,
|
194 |
+
"step": 1500
|
195 |
+
},
|
196 |
+
{
|
197 |
+
"epoch": 3.99,
|
198 |
+
"eval_accuracy": 0.9776361819090454,
|
199 |
+
"eval_f1": 0.9595484477892756,
|
200 |
+
"eval_loss": 0.0839652568101883,
|
201 |
+
"eval_precision": 0.9579263711495116,
|
202 |
+
"eval_recall": 0.9611760271390878,
|
203 |
+
"eval_runtime": 2.8628,
|
204 |
+
"eval_samples_per_second": 233.335,
|
205 |
+
"eval_steps_per_second": 14.671,
|
206 |
+
"step": 1500
|
207 |
+
}
|
208 |
+
],
|
209 |
+
"max_steps": 5000,
|
210 |
+
"num_train_epochs": 14,
|
211 |
+
"total_flos": 592464605771736.0,
|
212 |
+
"trial_name": null,
|
213 |
+
"trial_params": null
|
214 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:31aeb7cf016b0e47597e331fa262b44873f81073521049d8a85dc46bd2004681
|
3 |
+
size 3055
|