calpt commited on
Commit
628285a
1 Parent(s): 4e08ec8

Add adapter bert-base-uncased-ner-pfeiffer version NER

Browse files
README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - bert
4
+ - adapter-transformers
5
+ - adapterhub:ner/conll2003
6
+ datasets:
7
+ - conll2003
8
+ license: "apache-2.0"
9
+ ---
10
+
11
+ # Adapter `bert-base-uncased-ner-pfeiffer` for bert-base-uncased
12
+
13
+ Adapter trained on the CoNLL2003 dataset for named entity recognition
14
+
15
+
16
+ **This adapter was created for usage with the [Adapters](https://github.com/Adapter-Hub/adapters) library.**
17
+
18
+ ## Usage
19
+
20
+ First, install `adapters`:
21
+
22
+ ```
23
+ pip install -U adapters
24
+ ```
25
+
26
+ Now, the adapter can be loaded and activated like this:
27
+
28
+ ```python
29
+ from adapters import AutoAdapterModel
30
+
31
+ model = AutoAdapterModel.from_pretrained("bert-base-uncased")
32
+ adapter_name = model.load_adapter("AdapterHub/bert-base-uncased-ner-pfeiffer")
33
+ model.set_active_adapters(adapter_name)
34
+ ```
35
+
36
+ ## Architecture & Training
37
+
38
+ - Adapter architecture: pfeiffer
39
+ - Prediction head: None
40
+ - Dataset: [conll2003](https://huggingface.co/datasets/conll2003)
41
+
42
+ ## Author Information
43
+
44
+ - Author name(s): Hannah Sterz
45
+ - Author email: sterz@ukp.informatik.tu-darmstadt.de
46
+ - Author links: [Twitter](https://twitter.com/@Hannah70676760)
47
+
48
+
49
+
50
+ ## Citation
51
+
52
+ ```bibtex
53
+
54
+ ```
55
+
56
+ *This adapter has been auto-imported from https://github.com/Adapter-Hub/Hub/blob/master/adapters/ukp/bert-base-uncased-ner-pfeiffer.yml*.
adapter_config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "adapter_residual_before_ln": false,
4
+ "cross_adapter": false,
5
+ "dropout": 0.0,
6
+ "factorized_phm_W": true,
7
+ "factorized_phm_rule": false,
8
+ "hypercomplex_nonlinearity": "glorot-uniform",
9
+ "init_weights": "bert",
10
+ "inv_adapter": null,
11
+ "inv_adapter_reduction_factor": null,
12
+ "is_parallel": false,
13
+ "learn_phm": true,
14
+ "leave_out": [],
15
+ "ln_after": false,
16
+ "ln_before": false,
17
+ "mh_adapter": false,
18
+ "non_linearity": "relu",
19
+ "original_ln_after": true,
20
+ "original_ln_before": true,
21
+ "output_adapter": true,
22
+ "phm_bias": true,
23
+ "phm_c_init": "normal",
24
+ "phm_dim": 4,
25
+ "phm_init_range": 0.0001,
26
+ "phm_layer": false,
27
+ "phm_rank": 1,
28
+ "reduction_factor": 16,
29
+ "residual_before_ln": true,
30
+ "scaling": 1.0,
31
+ "shared_W_phm": false,
32
+ "shared_phm_rule": true,
33
+ "use_gating": false
34
+ },
35
+ "hidden_size": 768,
36
+ "model_class": "BertAdapterModel",
37
+ "model_name": "bert-base-uncased",
38
+ "model_type": "bert",
39
+ "name": "ner",
40
+ "version": "0.2.0"
41
+ }
head_config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "activation_function": "tanh",
4
+ "head_type": "tagging",
5
+ "label2id": {
6
+ "B-LOC": 1,
7
+ "B-MISC": 7,
8
+ "B-ORG": 5,
9
+ "B-PER": 3,
10
+ "I-LOC": 2,
11
+ "I-MISC": 8,
12
+ "I-ORG": 6,
13
+ "I-PER": 4,
14
+ "O": 0
15
+ },
16
+ "layers": 1,
17
+ "num_labels": 9
18
+ },
19
+ "hidden_size": 768,
20
+ "model_class": "BertModelWithHeads",
21
+ "model_name": "bert-base-uncased",
22
+ "model_type": "bert",
23
+ "name": "ner_head"
24
+ }
pytorch_adapter.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f1014c74e14cbb33ab8f7f1344b6b29ad0a539e02fe1467cfbd7bde1e992ba9
3
+ size 3594662
pytorch_model_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa00033bcc5f5a1c75e9db2f0212d998623d80f4e5d17766cf5c469a642219d9
3
+ size 28727