fengtc commited on
Commit
9c75e58
1 Parent(s): 3397677

commit from

Browse files
README.md ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - zh
4
+ - en
5
+
6
+ tags:
7
+ - translation
8
+
9
+ license: cc-by-4.0
10
+ ---
11
+
12
+ ### zho-eng
13
+
14
+ ## Table of Contents
15
+ - [Model Details](#model-details)
16
+ - [Uses](#uses)
17
+ - [Risks, Limitations and Biases](#risks-limitations-and-biases)
18
+ - [Training](#training)
19
+ - [Evaluation](#evaluation)
20
+ - [Citation Information](#citation-information)
21
+ - [How to Get Started With the Model](#how-to-get-started-with-the-model)
22
+
23
+ ## Model Details
24
+ - **Model Description:**
25
+ - **Developed by:** Language Technology Research Group at the University of Helsinki
26
+ - **Model Type:** Translation
27
+ - **Language(s):**
28
+ - Source Language: Chinese
29
+ - Target Language: English
30
+ - **License:** CC-BY-4.0
31
+ - **Resources for more information:**
32
+ - [GitHub Repo](https://github.com/Helsinki-NLP/OPUS-MT-train)
33
+
34
+
35
+ ## Uses
36
+
37
+ #### Direct Use
38
+
39
+ This model can be used for translation and text-to-text generation.
40
+
41
+
42
+ ## Risks, Limitations and Biases
43
+
44
+ **CONTENT WARNING: Readers should be aware this section contains content that is disturbing, offensive, and can propagate historical and current stereotypes.**
45
+
46
+ Significant research has explored bias and fairness issues with language models (see, e.g., [Sheng et al. (2021)](https://aclanthology.org/2021.acl-long.330.pdf) and [Bender et al. (2021)](https://dl.acm.org/doi/pdf/10.1145/3442188.3445922)).
47
+
48
+ Further details about the dataset for this model can be found in the OPUS readme: [zho-eng](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/zho-eng/README.md)
49
+
50
+ ## Training
51
+
52
+ #### System Information
53
+ * helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535
54
+ * transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b
55
+ * port_machine: brutasse
56
+ * port_time: 2020-08-21-14:41
57
+ * src_multilingual: False
58
+ * tgt_multilingual: False
59
+
60
+ #### Training Data
61
+ ##### Preprocessing
62
+ * pre-processing: normalization + SentencePiece (spm32k,spm32k)
63
+ * ref_len: 82826.0
64
+ * dataset: [opus](https://github.com/Helsinki-NLP/Opus-MT)
65
+ * download original weights: [opus-2020-07-17.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/zho-eng/opus-2020-07-17.zip)
66
+
67
+ * test set translations: [opus-2020-07-17.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/zho-eng/opus-2020-07-17.test.txt)
68
+
69
+
70
+ ## Evaluation
71
+
72
+ #### Results
73
+
74
+ * test set scores: [opus-2020-07-17.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/zho-eng/opus-2020-07-17.eval.txt)
75
+
76
+ * brevity_penalty: 0.948
77
+
78
+
79
+ ## Benchmarks
80
+
81
+ | testset | BLEU | chr-F |
82
+ |-----------------------|-------|-------|
83
+ | Tatoeba-test.zho.eng | 36.1 | 0.548 |
84
+
85
+ ## Citation Information
86
+
87
+ ```bibtex
88
+ @InProceedings{TiedemannThottingal:EAMT2020,
89
+ author = {J{\"o}rg Tiedemann and Santhosh Thottingal},
90
+ title = {{OPUS-MT} — {B}uilding open translation services for the {W}orld},
91
+ booktitle = {Proceedings of the 22nd Annual Conferenec of the European Association for Machine Translation (EAMT)},
92
+ year = {2020},
93
+ address = {Lisbon, Portugal}
94
+ }
95
+ ```
96
+
97
+ ## How to Get Started With the Model
98
+
99
+ ```python
100
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
101
+
102
+ tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-zh-en")
103
+
104
+ model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-zh-en")
105
+ ```
106
+
107
+
config.json ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/tmp/Helsinki-NLP/opus-mt-zh-en",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "swish",
5
+ "add_bias_logits": false,
6
+ "add_final_layer_norm": false,
7
+ "architectures": [
8
+ "MarianMTModel"
9
+ ],
10
+ "attention_dropout": 0.0,
11
+ "bad_words_ids": [
12
+ [
13
+ 65000
14
+ ]
15
+ ],
16
+ "bos_token_id": 0,
17
+ "classif_dropout": 0.0,
18
+ "classifier_dropout": 0.0,
19
+ "d_model": 512,
20
+ "decoder_attention_heads": 8,
21
+ "decoder_ffn_dim": 2048,
22
+ "decoder_layerdrop": 0.0,
23
+ "decoder_layers": 6,
24
+ "decoder_start_token_id": 65000,
25
+ "decoder_vocab_size": 65001,
26
+ "dropout": 0.1,
27
+ "encoder_attention_heads": 8,
28
+ "encoder_ffn_dim": 2048,
29
+ "encoder_layerdrop": 0.0,
30
+ "encoder_layers": 6,
31
+ "eos_token_id": 0,
32
+ "extra_pos_embeddings": 65001,
33
+ "forced_eos_token_id": 0,
34
+ "id2label": {
35
+ "0": "LABEL_0",
36
+ "1": "LABEL_1",
37
+ "2": "LABEL_2"
38
+ },
39
+ "init_std": 0.02,
40
+ "is_encoder_decoder": true,
41
+ "label2id": {
42
+ "LABEL_0": 0,
43
+ "LABEL_1": 1,
44
+ "LABEL_2": 2
45
+ },
46
+ "max_length": 512,
47
+ "max_position_embeddings": 512,
48
+ "model_type": "marian",
49
+ "normalize_before": false,
50
+ "normalize_embedding": false,
51
+ "num_beams": 6,
52
+ "num_hidden_layers": 6,
53
+ "pad_token_id": 65000,
54
+ "scale_embedding": true,
55
+ "share_encoder_decoder_embeddings": true,
56
+ "static_position_embeddings": true,
57
+ "transformers_version": "4.22.0.dev0",
58
+ "use_cache": true,
59
+ "vocab_size": 65001
60
+ }
generation_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bad_words_ids": [
4
+ [
5
+ 65000
6
+ ]
7
+ ],
8
+ "bos_token_id": 0,
9
+ "decoder_start_token_id": 65000,
10
+ "eos_token_id": 0,
11
+ "forced_eos_token_id": 0,
12
+ "max_length": 512,
13
+ "num_beams": 6,
14
+ "pad_token_id": 65000,
15
+ "transformers_version": "4.27.0.dev0"
16
+ }
metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"hf_name":"zho-eng","source_languages":"zho","target_languages":"eng","opus_readme_url":"https:\/\/github.com\/Helsinki-NLP\/Tatoeba-Challenge\/tree\/master\/models\/zho-eng\/README.md","original_repo":"Tatoeba-Challenge","tags":["translation"],"languages":["zh","en"],"src_constituents":["cmn_Hans","nan","nan_Hani","gan","yue","cmn_Kana","yue_Hani","wuu_Bopo","cmn_Latn","yue_Hira","cmn_Hani","cjy_Hans","cmn","lzh_Hang","lzh_Hira","cmn_Hant","lzh_Bopo","zho","zho_Hans","zho_Hant","lzh_Hani","yue_Hang","wuu","yue_Kana","wuu_Latn","yue_Bopo","cjy_Hant","yue_Hans","lzh","cmn_Hira","lzh_Yiii","lzh_Hans","cmn_Bopo","cmn_Hang","hak_Hani","cmn_Yiii","yue_Hant","lzh_Kana","wuu_Hani"],"tgt_constituents":["eng"],"src_multilingual":false,"tgt_multilingual":false,"prepro":" normalization + SentencePiece (spm32k,spm32k)","url_model":"https:\/\/object.pouta.csc.fi\/Tatoeba-MT-models\/zho-eng\/opus-2020-07-17.zip","url_test_set":"https:\/\/object.pouta.csc.fi\/Tatoeba-MT-models\/zho-eng\/opus-2020-07-17.test.txt","src_alpha3":"zho","tgt_alpha3":"eng","short_pair":"zh-en","chrF2_score":0.548,"bleu":36.1,"brevity_penalty":0.948,"ref_len":82826.0,"src_name":"Chinese","tgt_name":"English","train_date":"2020-07-17","src_alpha2":"zh","tgt_alpha2":"en","prefer_old":false,"long_pair":"zho-eng","helsinki_git_sha":"480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535","transformers_git_sha":"2207e5d8cb224e954a7cba69fa4ac2309e9ff30b","port_machine":"brutasse","port_time":"2020-08-21-14:41"}
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d8ceb91d103ef89400c9d9d62328b4858743cf8924878aee3b8afc594242ce0
3
+ size 312087009
rust_model.ot ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:859d0e2531693a5f003ea110aa5cee1b3439cea362980668923126bbb11d56de
3
+ size 578358061
source.spm ADDED
Binary file (805 kB). View file
 
target.spm ADDED
Binary file (807 kB). View file
 
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cfa433c98aeeaa39d7e93f317787467ecb9696b440b96e6d6a4ae3a91b2fb99
3
+ size 312580600
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"target_lang": "eng", "source_lang": "zho"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff