Files changed (1) hide show
  1. README.md +28 -10
README.md CHANGED
@@ -21,14 +21,32 @@ The model is trained on 12 million monolingual ancient Greek tokens with Masked
21
 
22
  The gold standard datasets are available on [Github](https://github.com/UgaritAlignment/Alignment-Gold-Standards).
23
 
24
- If you use this model, please cite our paper:
25
  <pre>
26
- @misc{yousef_palladino_wright_berti_2022,
27
- title={Automatic Translation Alignment for Ancient Greek and Latin},
28
- url={osf.io/8epsy},
29
- DOI={10.31219/osf.io/8epsy},
30
- publisher={OSF Preprints},
31
- author={Yousef, Tariq and Palladino, Chiara and Wright, David J and Berti, Monica},
32
- year={2022},
33
- month={Apr}
34
- }</pre>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  The gold standard datasets are available on [Github](https://github.com/UgaritAlignment/Alignment-Gold-Standards).
23
 
24
+ If you use this model, please cite our papers:
25
  <pre>
26
+ @InProceedings{yousef-EtAl:2022:LREC,
27
+ author = {Yousef, Tariq and Palladino, Chiara and Shamsian, Farnoosh and d’Orange Ferreira, Anise and Ferreira dos Reis, Michel},
28
+ title = {An automatic model and Gold Standard for translation alignment of Ancient Greek},
29
+ booktitle = {Proceedings of the Language Resources and Evaluation Conference},
30
+ month = {June},
31
+ year = {2022},
32
+ address = {Marseille, France},
33
+ publisher = {European Language Resources Association},
34
+ pages = {5894--5905},
35
+ abstract = {This paper illustrates a workflow for developing and evaluating automatic translation alignment models for Ancient Greek. We designed an annotation Style Guide and a gold standard for the alignment of Ancient Greek-English and Ancient Greek-Portuguese, measured inter-annotator agreement and used the resulting dataset to evaluate the performance of various translation alignment models. We proposed a fine-tuning strategy that employs unsupervised training with mono- and bilingual texts and supervised training using manually aligned sentences. The results indicate that the fine-tuned model based on XLM-Roberta is superior in performance, and it achieved good results on language pairs that were not part of the training data.},
36
+ url = {https://aclanthology.org/2022.lrec-1.634}
37
+ }
38
+
39
+ @InProceedings{yousef-EtAl:2022:LT4HALA2022,
40
+ author = {Yousef, Tariq and Palladino, Chiara and Wright, David J. and Berti, Monica},
41
+ title = {Automatic Translation Alignment for Ancient Greek and Latin},
42
+ booktitle = {Proceedings of the Second Workshop on Language Technologies for Historical and Ancient Languages},
43
+ month = {June},
44
+ year = {2022},
45
+ address = {Marseille, France},
46
+ publisher = {European Language Resources Association},
47
+ pages = {101--107},
48
+ abstract = {This paper presents the results of automatic translation alignment experiments on a corpus of texts in Ancient Greek translated into Latin. We used a state-of-the-art alignment workflow based on a contextualized multilingual language model that is fine-tuned on the alignment task for Ancient Greek and Latin. The performance of the alignment model is evaluated on an alignment gold standard consisting of 100 parallel fragments aligned manually by two domain experts, with a 90.5\% Inter-Annotator-Agreement (IAA). An interactive online interface is provided to enable users to explore the aligned fragments collection and examine the alignment model's output.},
49
+ url = {https://aclanthology.org/2022.lt4hala2022-1.14}
50
+ }
51
+
52
+ </pre>