eugene-yang commited on
Commit
c5b1781
1 Parent(s): ac6e5f0

commit model

Browse files
README.md CHANGED
@@ -1,3 +1,25 @@
1
  ---
2
  license: mit
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: mit
3
  ---
4
+ DPR model trained for NeuCLIR based on a XLMR-Large C3-pretrained language model with MTT with MS-MARCO English queries and translated documents in Chinese, Persian, and Russian.
5
+ Translation can be found in [neuMARCO](https://ir-datasets.com/neumarco.html) on `ir-datasets`.
6
+
7
+ Please cite the following papers if you use this model
8
+ ```bibtex
9
+ @inproceedings{sigir2022c3,
10
+ author = {Eugene Yang and Suraj Nair and Ramraj Chandradevan and Rebecca Iglesias-Flores and Douglas W. Oard},
11
+ title = {C3: Continued Pretraining with Contrastive Weak Supervision for Cross Language Ad-Hoc Retrieval},
12
+ booktitle = {Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR) (Short Paper)},
13
+ year = {2022},
14
+ url = {https://arxiv.org/abs/2204.11989}
15
+ }
16
+
17
+ @inproceedings{ecir2023mlir,
18
+ title = {Neural Approaches to Multilingual Information Retrieval},
19
+ author = {Dawn Lawrie and Eugene Yang and Douglas W Oard and James Mayfield},
20
+ booktitle = {Proceedings of the 45th European Conference on Information Retrieval (ECIR)},
21
+ year = {2023},
22
+ url = {https://arxiv.org/abs/2209.01335}
23
+ }
24
+ ```
25
+
config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/expscratch/eyang/checkpoints/c3pretrained_xlm-roberta-large/5e-6-en.zh.fa.ru/con2.cls.multi-con/",
3
+ "architectures": [
4
+ "XLMRobertaModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "id2label": {
14
+ "0": "LABEL_0"
15
+ },
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 4096,
18
+ "label2id": {
19
+ "LABEL_0": 0
20
+ },
21
+ "layer_norm_eps": 1e-05,
22
+ "max_position_embeddings": 514,
23
+ "model_type": "xlm-roberta",
24
+ "num_attention_heads": 16,
25
+ "num_hidden_layers": 24,
26
+ "output_past": true,
27
+ "pad_token_id": 1,
28
+ "position_embedding_type": "absolute",
29
+ "torch_dtype": "float32",
30
+ "transformers_version": "4.13.0",
31
+ "type_vocab_size": 1,
32
+ "use_cache": true,
33
+ "vocab_size": 250002
34
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50994f8d37c8af28332f2492ed279ff63cb83d777a3c191bb1758db60a847d91
3
+ size 2239718641
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "sp_model_kwargs": {}, "model_max_length": 512, "special_tokens_map_file": null, "tokenizer_file": "/home/hltcoe/eyang/.cache/huggingface/transformers/7766c86e10505ed9b39af34e456480399bf06e35b36b8f2b917460a2dbe94e59.a984cf52fc87644bd4a2165f1e07e0ac880272c1e82d648b4674907056912bd7", "name_or_path": "xlm-roberta-large", "tokenizer_class": "XLMRobertaTokenizer"}