p208p2002 commited on
Commit
0554567
1 Parent(s): 0d8848e

Upload 8 files

Browse files
README.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ datasets:
3
+ - drcd
4
+ tags:
5
+ - question-generation
6
+ widget:
7
+ - text: "[HL]伊隆·里夫·馬斯克[HL]是一名企業家和商業大亨"
8
+ ---
9
+ # Transformer QG on DRCD
10
+ 請參閱 https://github.com/p208p2002/Transformer-QG-on-DRCD 獲得更多細節
11
+
12
+ The inputs of the model refers to
13
+ ```
14
+ we integrate C and A into a new C' in the following form.
15
+ C' = [c1, c2, ..., [HL], a1, ..., a|A|, [HL], ..., c|C|]
16
+ ```
17
+ > Proposed by [Ying-Hong Chan & Yao-Chung Fan. (2019). A Re-current BERT-based Model for Question Generation.](https://www.aclweb.org/anthology/D19-5821/)
18
+
19
+ ## Features
20
+ - Fully pipline from fine-tune to evaluation
21
+ - Support most of state of the art models
22
+ - Fast deploy as a API server
23
+
24
+ ## DRCD dataset
25
+ [台達閱讀理解資料集 Delta Reading Comprehension Dataset (DRCD)](https://github.com/DRCKnowledgeTeam/DRCD) 屬於通用領域繁體中文機器閱讀理解資料集。 DRCD資料集從2,108篇維基條目中整理出10,014篇段落,並從段落中標註出30,000多個問題。
26
+
27
+ ## Available models
28
+ - BART (base on **[uer/bart-base-chinese-cluecorpussmall](https://huggingface.co/uer/bart-base-chinese-cluecorpussmall)**)
29
+
30
+ ## Expriments
31
+ Model |Bleu 1|Bleu 2|Bleu 3|Bleu 4|METEOR|ROUGE-L|
32
+ ------------------|------|------|------|------|------|-------|
33
+ BART-HLSQG |34.25 |27.70 |22.43 |18.13 |23.58 |36.88 |
34
+ BART-HLSQG-v2 |39.30 |32.51 |26.72 |22.08 |24.94 |41.18 |
35
+
36
+ ## Environment requirements
37
+ The hole development is based on Ubuntu system
38
+
39
+ 1. If you don't have pytorch 1.6+ please install or update first
40
+ > https://pytorch.org/get-started/locally/
41
+
42
+ 2. Install packages `pip install -r requirements.txt`
43
+
44
+ 3. Setup scorer `python setup_scorer.py`
45
+
46
+ 5. Download dataset `python init_dataset.py`
47
+
48
+
49
+ ## Training
50
+ ### Seq2Seq LM
51
+ ```
52
+ usage: train_seq2seq_lm.py [-h]
53
+ [--base_model {facebook/bart-base,facebook/bart-large,t5-small,t5-base,t5-large}]
54
+ [-d {squad,squad-nqg}] [--epoch EPOCH] [--lr LR]
55
+ [--dev DEV] [--server] [--run_test]
56
+ [-fc FROM_CHECKPOINT]
57
+
58
+ optional arguments:
59
+ -h, --help show this help message and exit
60
+ --base_model {facebook/bart-base,facebook/bart-large,t5-small,t5-base,t5-large}
61
+ -d {squad,squad-nqg}, --dataset {squad,squad-nqg}
62
+ --epoch EPOCH
63
+ --lr LR
64
+ --dev DEV
65
+ --server
66
+ --run_test
67
+ -fc FROM_CHECKPOINT, --from_checkpoint FROM_CHECKPOINT
68
+ ```
69
+
70
+ ## Deploy
71
+ ### Start up
72
+ ```
73
+ python train_seq2seq_lm.py --server --base_model YOUR_BASE_MODEL --from_checkpoint FROM_CHECKPOINT
74
+ ```
75
+ ### Request example
76
+ ```
77
+ curl --location --request POST 'http://127.0.0.1:5000/' \
78
+ --header 'Content-Type: application/x-www-form-urlencoded' \
79
+ --data-urlencode 'context=[HL]伊隆·里夫·馬斯克[HL]是一名企業家和商業大亨'
80
+ ```
81
+ ```json
82
+ {"predict": "哪一個人是一名企業家和商業大亨?"}
83
+ ```
84
+
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[HL]": 21128
3
+ }
config.json ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "fnlp/bart-base-chinese",
3
+ "activation_dropout": 0.1,
4
+ "activation_function": "gelu",
5
+ "add_bias_logits": false,
6
+ "add_final_layer_norm": false,
7
+ "architectures": [
8
+ "BartForConditionalGeneration"
9
+ ],
10
+ "attention_dropout": 0.1,
11
+ "bos_token_id": 101,
12
+ "classif_dropout": 0.1,
13
+ "classifier_dropout": 0.0,
14
+ "d_model": 768,
15
+ "decoder_attention_heads": 12,
16
+ "decoder_ffn_dim": 3072,
17
+ "decoder_layerdrop": 0.0,
18
+ "decoder_layers": 6,
19
+ "decoder_start_token_id": 102,
20
+ "dropout": 0.1,
21
+ "early_stopping": true,
22
+ "encoder_attention_heads": 12,
23
+ "encoder_ffn_dim": 3072,
24
+ "encoder_layerdrop": 0.0,
25
+ "encoder_layers": 6,
26
+ "eos_token_id": 102,
27
+ "forced_eos_token_id": 102,
28
+ "gradient_checkpointing": false,
29
+ "id2label": {
30
+ "0": "LABEL_0",
31
+ "1": "LABEL_1",
32
+ "2": "LABEL_2"
33
+ },
34
+ "init_std": 0.02,
35
+ "is_encoder_decoder": true,
36
+ "label2id": {
37
+ "LABEL_0": 0,
38
+ "LABEL_1": 1,
39
+ "LABEL_2": 2
40
+ },
41
+ "max_position_embeddings": 512,
42
+ "model_type": "bart",
43
+ "no_repeat_ngram_size": 3,
44
+ "normalize_before": false,
45
+ "normalize_embedding": true,
46
+ "num_beams": 4,
47
+ "num_hidden_layers": 6,
48
+ "pad_token_id": 0,
49
+ "scale_embedding": false,
50
+ "task_specific_params": {
51
+ "summarization": {
52
+ "length_penalty": 1.0,
53
+ "max_length": 128,
54
+ "min_length": 12,
55
+ "num_beams": 4
56
+ },
57
+ "summarization_cnn": {
58
+ "length_penalty": 2.0,
59
+ "max_length": 142,
60
+ "min_length": 56,
61
+ "num_beams": 4
62
+ },
63
+ "summarization_xsum": {
64
+ "length_penalty": 1.0,
65
+ "max_length": 62,
66
+ "min_length": 11,
67
+ "num_beams": 6
68
+ }
69
+ },
70
+ "torch_dtype": "float32",
71
+ "transformers_version": "4.24.0",
72
+ "use_cache": true,
73
+ "vocab_size": 21129
74
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:588acbdbe87688056472b537032a5faf86f355a9b642d1c4240ca11726290143
3
+ size 465197369
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "do_basic_tokenize": true,
4
+ "do_lower_case": true,
5
+ "mask_token": "[MASK]",
6
+ "name_or_path": "fnlp/bart-base-chinese",
7
+ "never_split": null,
8
+ "pad_token": "[PAD]",
9
+ "sep_token": "[SEP]",
10
+ "special_tokens_map_file": "/remote-home/yfshao/.cache/huggingface/transformers/d521373fc7ac35f63d56cf303de74a202403dcf1aaa792cd01f653694be59563.dd8bd9bfd3664b530ea4e645105f557769387b3da9f79bdb55ed556bdd80611d",
11
+ "strip_accents": null,
12
+ "tokenize_chinese_chars": true,
13
+ "tokenizer_class": "BertTokenizer",
14
+ "unk_token": "[UNK]"
15
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff