ikuyamada commited on
Commit
29167de
1 Parent(s): 8895503

Initial commit

Browse files
.gitattributes CHANGED
@@ -31,3 +31,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
31
  *.zip filter=lfs diff=lfs merge=lfs -text
32
  *.zst filter=lfs diff=lfs merge=lfs -text
33
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
31
  *.zip filter=lfs diff=lfs merge=lfs -text
32
  *.zst filter=lfs diff=lfs merge=lfs -text
33
  *tfevents* filter=lfs diff=lfs merge=lfs -text
34
+ entity_vocab.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,52 @@
1
  ---
 
 
 
 
 
 
 
 
2
  license: apache-2.0
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language: ja
3
+ thumbnail: https://github.com/studio-ousia/luke/raw/master/resources/luke_logo.png
4
+ tags:
5
+ - luke
6
+ - named entity recognition
7
+ - entity typing
8
+ - relation classification
9
+ - question answering
10
  license: apache-2.0
11
  ---
12
+
13
+ ## luke-japanese
14
+
15
+ **luke-japanese** is the Japanese version of **LUKE** (**L**anguage
16
+ **U**nderstanding with **K**nowledge-based **E**mbeddings), a pre-trained
17
+ _knowledge-enhanced_ contextualized representation of words and entities based
18
+ on transformer. LUKE treats words and entities in a given text as independent
19
+ tokens, and outputs contextualized representations of them. Please refer to our
20
+ [GitHub repository](https://github.com/studio-ousia/luke) for more details and
21
+ updates.
22
+
23
+ **luke-japanese**は、単語とエンティティの知識拡張型訓練済みモデル**LUKE**の日本
24
+ 語版です。LUKE は単語とエンティティを独立したトークンとして扱い、これらの文脈を
25
+ 考慮した表現を出力します。詳細については
26
+ 、[GitHub リポジトリ](https://github.com/studio-ousia/luke)を参照してください。
27
+
28
+ ### Experimental results on JGLUE
29
+
30
+ The performance of luke-japanese evaluated on the dev set of
31
+ [JGLUE](https://github.com/yahoojapan/JGLUE) is shown as follows:
32
+
33
+ | Model | MARC-ja | JSTS | JNLI | JCommonsenseQA |
34
+ | ---------------------- | --------- | ------------------- | --------- | -------------- |
35
+ | | acc | Pearson/Spearman | acc | acc |
36
+ | **luke-japanese-base** | **0.963** | **0.912**/**0.875** | **0.912** | **0.842** |
37
+ | _Baselines:_ | |
38
+ | Tohoku BERT base | 0.958 | 0.899/0.859 | 0.899 | 0.808 |
39
+ | NICT BERT base | 0.958 | 0.903/0.867 | 0.902 | 0.823 |
40
+ | Waseda RoBERTa base | 0.962 | 0.901/0.865 | 0.895 | 0.840 |
41
+ | XLM RoBERTa base | 0.961 | 0.870/0.825 | 0.893 | 0.687 |
42
+
43
+ ### Citation
44
+
45
+ ```latex
46
+ @inproceedings{yamada2020luke,
47
+ title={LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention},
48
+ author={Ikuya Yamada and Akari Asai and Hiroyuki Shindo and Hideaki Takeda and Yuji Matsumoto},
49
+ booktitle={EMNLP},
50
+ year={2020}
51
+ }
52
+ ```
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"<ent>": 32770, "<ent2>": 32771}
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "models/luke-japanese/hf_xlm_roberta",
3
+ "architectures": [
4
+ "LukeForMaskedLM"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bert_model_name": "models/luke-japanese/hf_xlm_roberta",
8
+ "bos_token_id": 0,
9
+ "classifier_dropout": null,
10
+ "cls_entity_prediction": false,
11
+ "entity_emb_size": 256,
12
+ "entity_vocab_size": 570505,
13
+ "eos_token_id": 2,
14
+ "hidden_act": "gelu",
15
+ "hidden_dropout_prob": 0.1,
16
+ "hidden_size": 768,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "layer_norm_eps": 1e-05,
20
+ "max_position_embeddings": 514,
21
+ "model_type": "luke",
22
+ "num_attention_heads": 12,
23
+ "num_hidden_layers": 12,
24
+ "pad_token_id": 1,
25
+ "position_embedding_type": "absolute",
26
+ "torch_dtype": "float32",
27
+ "transformers_version": "4.13.0",
28
+ "type_vocab_size": 1,
29
+ "use_cache": true,
30
+ "use_entity_aware_attention": true,
31
+ "vocab_size": 32772
32
+ }
entity_vocab.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7b569f330b5ddbeae34dee4ac4d4681585f2b6358cffbb372829233be1606aa
3
+ size 20543383
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f40c0932218c73f6c167c76b84e1e35979dc2cd93f8f628f8c8e7ee8a05ebf59
3
+ size 1122140419
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8b73a5e054936c920cf5b7d1ec21ce9c281977078269963beb821c6c86fbff7
3
+ size 841889
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}, "additional_special_tokens": [{"content": "<ent>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": false}, {"content": "<ent2>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": false}]}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "sp_model_kwargs": {}, "task": null, "max_entity_length": 32, "max_mention_length": 30, "entity_token_1": {"content": "<ent>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "entity_token_2": {"content": "<ent2>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "model_max_length": 512, "special_tokens_map_file": "models/luke-japanese/hf_xlm_roberta/special_tokens_map.json", "name_or_path": "models/luke-japanese/hf_luke_japanese_epoch20", "tokenizer_file": "models/luke-japanese/hf_luke_japanese_epoch20/tokenizer.json", "additional_special_tokens": [{"content": "<ent>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, {"content": "<ent2>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}], "tokenizer_class": "MLukeTokenizer"}