File size: 3,978 Bytes
11866a1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 |
import random
import iso639
import language_names
import language_paraphrase
import language_translate
import pandas as pd
import datasets
from dataclasses import dataclass
import json
import uuid
class DataProcess:
class RandomText:
# list of random quotes
random_quote = {
1: '\'',
2: '\"',
3: '“',
4: '῎',
5: '`',
}
# provide instruction with a text; process of randomization of a text
@staticmethod
def randomize_text(text, original_lang=None, target_lang=None):
templates = language_translate.random_templates_translate.get(original_lang, {}) if not ((original_lang == target_lang) and (original_lang is not None) and (target_lang is not None)) else language_paraphrase.random_templates_paraphrase.get(original_lang, {})
template = random.choice(list(templates.values()))
quote = random.choice(list(DataProcess.RandomText().random_quote.values()))
original_lang_name = DataProcess.language_name(None, original_lang, original_lang)
target_lang_name = DataProcess.language_name(None, target_lang, original_lang)
return template.format(text=text, lang1=target_lang_name, lang2=original_lang_name, quote=quote)
# convert to iso639_1
def convert_code(self, code):
try:
mapped_code = iso639.to_iso639_1(code)
except:
mapped_code = None
return mapped_code
# return language #1 name in language #2
def language_name(self, lang1, lang2):
name = language_names.language_names.get(lang1, {}).get(lang2)
if name is not None:
return name
# just in case
elif lang1 == lang2:
iso_name = iso639.to_native(lang1)
return(iso_name)
else:
return None
converter = DataProcess()
"""
EXAMPLES:
# get language name; iso639_1 code
print(converter.language_name('ru', 'en')) # Output: Russian
print(converter.convert_code("eng")) # Output: en
# convert into INSTRUCTION format: text; to; from
text = "test"
print(converter.RandomText.randomize_text(text, "uk", "fr")) # Ти можеш перекласти цей вислів: 'test'?
print(converter.RandomText.randomize_text(text, "uk", "de")) # Переклади наступний текст "test" з мови "німецька мова"
"""
@dataclass
class QnA:
INSTRUCTION: str
RESPONSE: str
SOURCE: str
METADATA: str
# format to QnA
def create_qna(row):
# get rows; create uuid based on texts
text = row['Text']
translation = row['Translated text']
lang_from = converter.convert_code(row['Original lang'])
lang_to = converter.convert_code(row['Target lang'])
uuid_val = uuid.uuid3(uuid.NAMESPACE_OID, str(text + translation))
# json with language, uuid and langs-pair
METADATA = {"language": f"{lang_to}", "uuid": f"{uuid_val}", "langs-pair": f"{lang_from}-{lang_to}"}
metadata_str = json.dumps(METADATA)
SOURCE = "tatoeba"
# randomizing INSTRUCTION
INSTRUCTION = converter.RandomText.randomize_text(text, lang_to, lang_from)
RESPONSE = translation
return QnA(INSTRUCTION, RESPONSE, SOURCE, metadata_str)
# load the dataset from Hugging Face
hf_dataset = datasets.load_dataset('0x22almostEvil/tatoeba-mt-llama-only', split='train')
# original is ~3M; with num_shards=55 it'll be ~65K
hf_dataset = hf_dataset.shard(num_shards=55, index=0)
print(hf_dataset)
# convert the dataset to a pandas dataframe
df = pd.DataFrame(hf_dataset)
# apply the create_qna function to each row of the dataframe to create QnA objects
qna_list = df.apply(create_qna, axis=1).tolist()
# save the QnA objects as a parquet file
qna_df = pd.DataFrame(qna_list, columns=["INSTRUCTION", "RESPONSE", "SOURCE", "METADATA"])
qna_df.to_parquet("translation-taboeba-qna-65k-oa.parquet", row_group_size=100, engine="pyarrow", index=False) |