mihaimasala's picture
Update README.md
5dcac65 verified
|
raw
history blame
25 kB
---
license: cc-by-nc-4.0
language:
- ro
base_model:
- meta-llama/Llama-3.1-8B-Instruct
datasets:
- OpenLLM-Ro/ro_sft_alpaca
- OpenLLM-Ro/ro_sft_alpaca_gpt4
- OpenLLM-Ro/ro_sft_dolly
- OpenLLM-Ro/ro_sft_selfinstruct_gpt4
- OpenLLM-Ro/ro_sft_norobots
- OpenLLM-Ro/ro_sft_orca
- OpenLLM-Ro/ro_sft_camel
- OpenLLM-Ro/ro_sft_oasst
- OpenLLM-Ro/ro_sft_ultrachat
model-index:
- name: OpenLLM-Ro/RoLlama3.1-8b-Instruct-2024-10-09
results:
- task:
type: text-generation
dataset:
name: RoMT-Bench
type: RoMT-Bench
metrics:
- name: Score
type: Score
value: 5.42
- task:
type: text-generation
dataset:
name: RoCulturaBench
type: RoCulturaBench
metrics:
- name: Score
type: Score
value: 3.55
- task:
type: text-generation
dataset:
name: Romanian_Academic_Benchmarks
type: Romanian_Academic_Benchmarks
metrics:
- name: Average accuracy
type: accuracy
value: 53.03
- task:
type: text-generation
dataset:
name: OpenLLM-Ro/ro_arc_challenge
type: OpenLLM-Ro/ro_arc_challenge
metrics:
- name: Average accuracy
type: accuracy
value: 47.69
- task:
type: text-generation
dataset:
name: OpenLLM-Ro/ro_mmlu
type: OpenLLM-Ro/ro_mmlu
metrics:
- name: Average accuracy
type: accuracy
value: 54.57
- task:
type: text-generation
dataset:
name: OpenLLM-Ro/ro_winogrande
type: OpenLLM-Ro/ro_winogrande
metrics:
- name: Average accuracy
type: accuracy
value: 65.84
- task:
type: text-generation
dataset:
name: OpenLLM-Ro/ro_hellaswag
type: OpenLLM-Ro/ro_hellaswag
metrics:
- name: Average accuracy
type: accuracy
value: 59.94
- task:
type: text-generation
dataset:
name: OpenLLM-Ro/ro_gsm8k
type: OpenLLM-Ro/ro_gsm8k
metrics:
- name: Average accuracy
type: accuracy
value: 44.30
- task:
type: text-generation
dataset:
name: OpenLLM-Ro/ro_truthfulqa
type: OpenLLM-Ro/ro_truthfulqa
metrics:
- name: Average accuracy
type: accuracy
value: 45.82
- task:
type: text-generation
dataset:
name: LaRoSeDa_binary
type: LaRoSeDa_binary
metrics:
- name: Average macro-f1
type: macro-f1
value: 94.56
- task:
type: text-generation
dataset:
name: LaRoSeDa_multiclass
type: LaRoSeDa_multiclass
metrics:
- name: Average macro-f1
type: macro-f1
value: 60.10
- task:
type: text-generation
dataset:
name: LaRoSeDa_binary_finetuned
type: LaRoSeDa_binary_finetuned
metrics:
- name: Average macro-f1
type: macro-f1
value: 95.12
- task:
type: text-generation
dataset:
name: LaRoSeDa_multiclass_finetuned
type: LaRoSeDa_multiclass_finetuned
metrics:
- name: Average macro-f1
type: macro-f1
value: 87.53
- task:
type: text-generation
dataset:
name: WMT_EN-RO
type: WMT_EN-RO
metrics:
- name: Average bleu
type: bleu
value: 21.88
- task:
type: text-generation
dataset:
name: WMT_RO-EN
type: WMT_RO-EN
metrics:
- name: Average bleu
type: bleu
value: 23.99
- task:
type: text-generation
dataset:
name: WMT_EN-RO_finetuned
type: WMT_EN-RO_finetuned
metrics:
- name: Average bleu
type: bleu
value: 28.27
- task:
type: text-generation
dataset:
name: WMT_RO-EN_finetuned
type: WMT_RO-EN_finetuned
metrics:
- name: Average bleu
type: bleu
value: 40.44
- task:
type: text-generation
dataset:
name: XQuAD
type: XQuAD
metrics:
- name: Average exact_match
type: exact_match
value: 13.59
- task:
type: text-generation
dataset:
name: XQuAD
type: XQuAD
metrics:
- name: Average f1
type: f1
value: 23.56
- task:
type: text-generation
dataset:
name: XQuAD_finetuned
type: XQuAD_finetuned
metrics:
- name: Average exact_match
type: exact_match
value: 49.41
- task:
type: text-generation
dataset:
name: XQuAD_finetuned
type: XQuAD_finetuned
metrics:
- name: Average f1
type: f1
value: 62.93
- task:
type: text-generation
dataset:
name: STS
type: STS
metrics:
- name: Average spearman
type: spearman
value: 75.89
- task:
type: text-generation
dataset:
name: STS
type: STS
metrics:
- name: Average pearson
type: pearson
value: 76.00
- task:
type: text-generation
dataset:
name: STS_finetuned
type: STS_finetuned
metrics:
- name: Average spearman
type: spearman
value: 86.86
- task:
type: text-generation
dataset:
name: STS_finetuned
type: STS_finetuned
metrics:
- name: Average pearson
type: pearson
value: 87.05
- task:
type: text-generation
dataset:
name: RoMT-Bench
type: RoMT-Bench
metrics:
- name: First turn
type: Score
value: 5.95
- name: Second turn
type: Score
value: 4.89
- task:
type: text-generation
dataset:
name: OpenLLM-Ro/ro_arc_challenge
type: OpenLLM-Ro/ro_arc_challenge
metrics:
- name: 0-shot
type: accuracy
value: 42.76
- name: 1-shot
type: accuracy
value: 46.44
- name: 3-shot
type: accuracy
value: 48.24
- name: 5-shot
type: accuracy
value: 48.84
- name: 10-shot
type: accuracy
value: 49.36
- name: 25-shot
type: accuracy
value: 50.47
- task:
type: text-generation
dataset:
name: OpenLLM-Ro/ro_mmlu
type: OpenLLM-Ro/ro_mmlu
metrics:
- name: 0-shot
type: accuracy
value: 52.95
- name: 1-shot
type: accuracy
value: 54.62
- name: 3-shot
type: accuracy
value: 55.54
- name: 5-shot
type: accuracy
value: 55.17
- task:
type: text-generation
dataset:
name: OpenLLM-Ro/ro_winogrande
type: OpenLLM-Ro/ro_winogrande
metrics:
- name: 0-shot
type: accuracy
value: 64.40
- name: 1-shot
type: accuracy
value: 66.14
- name: 3-shot
type: accuracy
value: 65.75
- name: 5-shot
type: accuracy
value: 67.09
- task:
type: text-generation
dataset:
name: OpenLLM-Ro/ro_hellaswag
type: OpenLLM-Ro/ro_hellaswag
metrics:
- name: 0-shot
type: accuracy
value: 59.07
- name: 1-shot
type: accuracy
value: 59.26
- name: 3-shot
type: accuracy
value: 60.41
- name: 5-shot
type: accuracy
value: 60.18
- name: 10-shot
type: accuracy
value: 60.77
- task:
type: text-generation
dataset:
name: OpenLLM-Ro/ro_gsm8k
type: OpenLLM-Ro/ro_gsm8k
metrics:
- name: 1-shot
type: accuracy
value: 35.10
- name: 3-shot
type: accuracy
value: 47.01
- name: 5-shot
type: accuracy
value: 50.80
- task:
type: text-generation
dataset:
name: LaRoSeDa_binary
type: LaRoSeDa_binary
metrics:
- name: 0-shot
type: macro-f1
value: 90.18
- name: 1-shot
type: macro-f1
value: 94.45
- name: 3-shot
type: macro-f1
value: 96.36
- name: 5-shot
type: macro-f1
value: 97.27
- task:
type: text-generation
dataset:
name: LaRoSeDa_multiclass
type: LaRoSeDa_multiclass
metrics:
- name: 0-shot
type: macro-f1
value: 67.56
- name: 1-shot
type: macro-f1
value: 63.21
- name: 3-shot
type: macro-f1
value: 51.69
- name: 5-shot
type: macro-f1
value: 57.95
- task:
type: text-generation
dataset:
name: WMT_EN-RO
type: WMT_EN-RO
metrics:
- name: 0-shot
type: bleu
value: 5.12
- name: 1-shot
type: bleu
value: 26.99
- name: 3-shot
type: bleu
value: 27.91
- name: 5-shot
type: bleu
value: 27.51
- task:
type: text-generation
dataset:
name: WMT_RO-EN
type: WMT_RO-EN
metrics:
- name: 0-shot
type: bleu
value: 1.63
- name: 1-shot
type: bleu
value: 22.59
- name: 3-shot
type: bleu
value: 35.70
- name: 5-shot
type: bleu
value: 36.05
- task:
type: text-generation
dataset:
name: XQuAD_EM
type: XQuAD_EM
metrics:
- name: 0-shot
type: exact_match
value: 6.55
- name: 1-shot
type: exact_match
value: 38.32
- name: 3-shot
type: exact_match
value: 8.66
- name: 5-shot
type: exact_match
value: 0.84
- task:
type: text-generation
dataset:
name: XQuAD_F1
type: XQuAD_F1
metrics:
- name: 0-shot
type: f1
value: 16.04
- name: 1-shot
type: f1
value: 56.16
- name: 3-shot
type: f1
value: 15.64
- name: 5-shot
type: f1
value: 6.39
- task:
type: text-generation
dataset:
name: STS_Spearman
type: STS_Spearman
metrics:
- name: 1-shot
type: spearman
value: 76.27
- name: 3-shot
type: spearman
value: 75.48
- name: 5-shot
type: spearman
value: 75.92
- task:
type: text-generation
dataset:
name: STS_Pearson
type: STS_Pearson
metrics:
- name: 1-shot
type: pearson
value: 76.76
- name: 3-shot
type: pearson
value: 75.38
- name: 5-shot
type: pearson
value: 75.87
---
# Model Card for Model ID
*Built with Meta Llama 3.1*
This model points/is identical to [RoLlama3.1-8b-Instruct-2024-10-09](https://huggingface.co/OpenLLM-Ro/RoLlama3.1-8b-Instruct-2024-10-09).
<!-- Provide a quick summary of what the model is/does. -->
RoLlama3.1 is a family of pretrained and fine-tuned generative text models for Romanian. This is the repository for the **instruct 8B model**. Links to other models can be found at the bottom of this page.
## Model Details
### Model Description
<!-- Provide a longer summary of what this model is. -->
OpenLLM-Ro represents the first open-source effort to build a LLM specialized for Romanian. OpenLLM-Ro developed and publicly releases a collection of Romanian LLMs, both in the form of foundational model and instruct and chat variants.
- **Developed by:** OpenLLM-Ro
<!-- - **Funded by [optional]:** [More Information Needed] -->
<!-- - **Shared by [optional]:** [More Information Needed] -->
<!-- - **Model type:** [More Information Needed] -->
- **Language(s):** Romanian
- **License:** cc-by-nc-4.0
- **Finetuned from model:** [Meta-Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct)
- **Trained using:** [RoAlpaca](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_alpaca), [RoAlpacaGPT4](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_alpaca_gpt4), [RoDolly](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_dolly), [RoSelfInstruct](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_selfinstruct_gpt4), [RoNoRobots](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_norobots), [RoOrca](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_orca), [RoCamel](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_camel), [RoOpenAssistant](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_oasst), [RoUltraChat](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_ultrachat)
### Model Sources
<!-- Provide the basic links for the model. -->
- **Repository:** https://github.com/OpenLLM-Ro/LLaMA-Factory
- **Paper:** https://arxiv.org/abs/2406.18266
## Intended Use
### Intended Use Cases
RoLlama3.1 is intented for research use in Romanian. Base models can be adapted for a variety of natural language tasks while instruction and chat tuned models are intended for assistant-like chat.
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
Use in any manner that violates the license, any applicable laws or regluations, use in languages other than Romanian.
## How to Get Started with the Model
Use the code below to get started with the model.
```python
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("OpenLLM-Ro/RoLlama3.1-8b-Instruct")
model = AutoModelForCausalLM.from_pretrained("OpenLLM-Ro/RoLlama3.1-8b-Instruct")
instruction = "Ce jocuri de societate pot juca cu prietenii mei?"
chat = [
{"role": "system", "content": "Ești un asistent folositor, respectuos și onest. Încearcă să ajuți cât mai mult prin informațiile oferite, excluzând răspunsuri toxice, rasiste, sexiste, periculoase și ilegale."},
{"role": "user", "content": instruction},
]
prompt = tokenizer.apply_chat_template(chat, tokenize=False, system_message="")
inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
outputs = model.generate(input_ids=inputs, max_new_tokens=128)
print(tokenizer.decode(outputs[0]))
```
## Academic Benchmarks
<table>
<tbody>
<tr>
<td><strong>Model</strong></td>
<td><strong><center>Average</center></strong></td>
<td><strong><center>ARC</center></strong></td>
<td><strong><center>MMLU</center></strong></td>
<td><strong><center>Winogrande</center></strong></td>
<td><strong><center>Hellaswag</center></strong></td>
<td><strong><center>GSM8k</center></strong></td>
<td><strong><center>TruthfulQA</center></strong></td>
</tr>
<tr>
<td>Llama-3.1-8B-Instruct</td><td><center>49.87</center></td><td><center>42.86</center></td><td><center>53.73</center></td><td><center>59.71</center></td><td><center>56.82</center></td><td><center>35.56</center></td><td><center><strong>50.54</strong></center></td>
</tr>
<tr>
<td><em>RoLlama3.1-8b-Instruct-2024-10-09</em></td><td><center><em><strong>53.03</strong></em></center></td><td><center><em><strong>47.69</strong></em></center></td><td><center><em>54.57</em></center></td><td><center><em>65.84</em></center></td><td><center><em><strong>59.94</strong></em></center></td><td><center><em><strong>44.30</strong></em></center></td><td><center><em>45.82</em></center></td>
</tr>
<tr>
<td>RoLlama3.1-8b-Instruct-DPO-2024-10-09</td><td><center>52.74</center></td><td><center>44.84</center></td><td><center><strong>55.06</strong></center></td><td><center><strong>65.87</strong></center></td><td><center>58.67</center></td><td><center>44.17</center></td><td><center>47.82</center></td>
</tr>
</tbody>
</table>
## Downstream tasks
<table>
<tbody>
<tr>
<td></td>
<td colspan="4"><center><strong>LaRoSeDa</strong></center></td>
<td colspan="4"><center><strong>WMT</strong></center></td>
</tr>
<tr>
<td></td>
<td colspan="2"><center><strong>Few-shot</strong></center></td>
<td colspan="2"><center><strong>Finetuned</strong></center></td>
<td colspan="2"><center><strong>Few-shot</strong></center></td>
<td colspan="2"><center><strong>Finetuned</strong></center></td>
</tr>
<tr>
<td><strong>Model</strong></td>
<td><center><strong>Binary<br>(Macro F1)</strong></center></td>
<td><center><strong>Multiclass<br>(Macro F1)</strong></center></td>
<td><center><strong>Binary<br>(Macro F1)</strong></center></td>
<td><center><strong>Multiclass<br>(Macro F1)</strong></center></td>
<td><center><strong>EN-RO<br>(Bleu)</strong></center></td>
<td><center><strong>RO-EN<br>(Bleu)</strong></center></td>
<td><center><strong>EN-RO<br>(Bleu)</strong></center></td>
<td><center><strong>RO-EN<br>(Bleu)</strong></center>
</tr>
<tr>
<td>Llama-3.1-8B-Instruct</td><td><center>95.74</center></td><td><center>59.49</center></td><td><center><strong>98.57</strong></center></td><td><center>82.41</center></td><td><center>19.01</center></td><td><center><strong>27.77</strong></center></td><td><center><strong>29.02</strong></center></td><td><center>39.80</center></td>
</tr>
<tr>
<td><em>RoLlama3.1-8b-Instruct-2024-10-09</em></td><td><center><em>94.56</em></center></td><td><center><em><strong>60.10</strong></em></center></td><td><center><em>95.12</em></center></td><td><center><em><strong>87.53</strong></em></center></td><td><center><em><strong>21.88</strong></em></center></td><td><center><em>23.99</em></center></td><td><center><em>28.27</em></center></td><td><center><em><strong>40.44</strong></em></center></td>
</tr>
<tr>
<td>RoLlama3.1-8b-Instruct-DPO-2024-10-09</td><td><center><strong>96.10</strong></center></td><td><center>55.37</center></td><td><center>-</center></td><td><center>-</center></td><td><center>21.29</center></td><td><center>21.86</center></td><td><center>-</center></td><td><center>-</center></td>
</tr>
</tbody>
</table>
<table>
<tbody>
<tr>
<td></td>
<td colspan="4"><center><strong>XQuAD</strong></center></td>
<td colspan="4"><center><strong>STS</strong></center></td>
</tr>
<tr>
<td></td>
<td colspan="2"><center><strong>Few-shot</strong></center></td>
<td colspan="2"><center><strong>Finetuned</strong></center></td>
<td colspan="2"><center><strong>Few-shot</strong></center></td>
<td colspan="2"><center><strong>Finetuned</strong></center></td>
</tr>
<tr>
<td><strong>Model</strong></td>
<td><center><strong>(EM)</strong></center></td>
<td><center><strong>(F1)</strong></center></td>
<td><center><strong>(EM)</strong></center></td>
<td><center><strong>(F1)</strong></center></td>
<td><center><strong>(Spearman)</strong></center></td>
<td><center><strong>(Pearson)</strong></center></td>
<td><center><strong>(Spearman)</strong></center></td>
<td><center><strong>(Pearson)</strong></center></td>
</tr>
<tr>
<td>Llama-3.1-8B-Instruct</td><td><center><strong>44.96</strong></center></td><td><center><strong>64.45</strong></center></td><td><center><strong>69.50</strong></center></td><td><center><strong>84.31</strong></center></td><td><center>72.11</center></td><td><center>71.64</center></td><td><center>84.59</center></td><td><center>84.96</center></td>
</tr>
<tr>
<td><em>RoLlama3.1-8b-Instruct-2024-10-09</em></td><td><center><em>13.59</em></center></td><td><center><em>23.56</em></center></td><td><center><em>49.41</em></center></td><td><center><em>62.93</em></center></td><td><center><em>75.89</em></center></td><td><center><em>76.00</em></center></td><td><center><em><strong>86.86</strong></em></center></td><td><center><em><strong>87.05</strong></em></center></td>
</tr>
<tr>
<td>RoLlama3.1-8b-Instruct-DPO-2024-10-09</td><td><center>21.58</center></td><td><center>36.54</center></td><td><center>-</center></td><td><center>-</center></td><td><center><strong>78.01</strong></center></td><td><center><strong>77.98</strong></center></td><td><center>-</center></td><td><center>-</center></td>
</tr>
</tbody>
</table>
## MT-Bench
<table>
<tbody>
<tr>
<td><strong>Model</strong></td>
<td><strong><center>Average</center></strong></td>
<td><strong><center>1st turn</center></strong></td>
<td><strong><center>2nd turn</center></strong></td>
<td><strong><center>Answers in Ro</center></strong></td>
</tr>
<tr>
<td>Llama-3.1-8B-Instruct</td><td><center>5.69</center></td><td><center>5.85</center></td><td><center>5.53</center></td><td><center><strong>160/160</strong></center></td>
</tr>
<tr>
<td><em>RoLlama3.1-8b-Instruct-2024-10-09</em></td><td><center><em>5.42</em></center></td><td><center><em>5.95</em></center></td><td><center><em>4.89</em></center></td><td><center><em><strong>160/160</strong></em></center></td>
</tr>
<tr>
<td>RoLlama3.1-8b-Instruct-DPO-2024-10-09</td><td><center><strong>6.21</strong></center></td><td><center><strong>6.74</strong></center></td><td><center><strong>5.69</strong></center></td><td><center><strong>160/160</strong></center></td>
</tr>
</tbody>
</table>
## RoCulturaBench
<table>
<tbody>
<tr>
<td><strong>Model</strong></td>
<td><strong><center>Average</center></strong></td>
<td><strong><center>Answers in Ro</center></strong></td>
</tr>
<tr>
<td>Llama-3.1-8B-Instruct</td><td><center>3.54</center></td><td><center><strong>100/100</strong></center></td>
</tr>
<tr>
<td><em>RoLlama3.1-8b-Instruct-2024-10-09</em></td><td><center><em>3.55</em></center></td><td><center><em><strong>100/100</strong></em></center></td>
</tr>
<tr>
<td>RoLlama3.1-8b-Instruct-DPO-2024-10-09</td><td><center><strong>4.42</strong></center></td><td><center><strong>100/100</strong></center></td>
</tr>
</tbody>
</table>
## RoLlama3.1 Model Family
| Model | Link |
|--------------------|:--------:|
|*RoLlama3.1-8b-Instruct-2024-10-09*| [link](https://huggingface.co/OpenLLM-Ro/RoLlama3.1-8b-Instruct-2024-10-09) |
|RoLlama3.1-8b-Instruct-DPO-2024-10-09| [link](https://huggingface.co/OpenLLM-Ro/RoLlama3.1-8b-Instruct-DPO-2024-10-09) |
## Citation
```
@misc{masala2024vorbecstiromanecsterecipetrain,
title={"Vorbe\c{s}ti Rom\^ane\c{s}te?" A Recipe to Train Powerful Romanian LLMs with English Instructions},
author={Mihai Masala and Denis C. Ilie-Ablachim and Alexandru Dima and Dragos Corlatescu and Miruna Zavelca and Ovio Olaru and Simina Terian-Dan and Andrei Terian-Dan and Marius Leordeanu and Horia Velicu and Marius Popescu and Mihai Dascalu and Traian Rebedea},
year={2024},
eprint={2406.18266},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2406.18266},
}
```
<!-- **APA:**
[More Information Needed] -->