import random import iso639 import language_names import language_paraphrase import language_translate import pandas as pd import datasets from dataclasses import dataclass import json import uuid class DataProcess: class RandomText: # list of random quotes random_quote = { 1: '\'', 2: '\"', 3: '“', 4: '῎', 5: '`', } # provide instruction with a text; process of randomization of a text @staticmethod def randomize_text(text, original_lang=None, target_lang=None): templates = language_translate.random_templates_translate.get(original_lang, {}) if not ((original_lang == target_lang) and (original_lang is not None) and (target_lang is not None)) else language_paraphrase.random_templates_paraphrase.get(original_lang, {}) template = random.choice(list(templates.values())) quote = random.choice(list(DataProcess.RandomText().random_quote.values())) original_lang_name = DataProcess.language_name(None, original_lang, original_lang) target_lang_name = DataProcess.language_name(None, target_lang, original_lang) return template.format(text=text, lang1=target_lang_name, lang2=original_lang_name, quote=quote) # convert to iso639_1 def convert_code(self, code): try: mapped_code = iso639.to_iso639_1(code) except: mapped_code = None return mapped_code # return language #1 name in language #2 def language_name(self, lang1, lang2): name = language_names.language_names.get(lang1, {}).get(lang2) if name is not None: return name # just in case elif lang1 == lang2: iso_name = iso639.to_native(lang1) return(iso_name) else: return None converter = DataProcess() """ EXAMPLES: # get language name; iso639_1 code print(converter.language_name('ru', 'en')) # Output: Russian print(converter.convert_code("eng")) # Output: en # convert into INSTRUCTION format: text; to; from text = "test" print(converter.RandomText.randomize_text(text, "uk", "fr")) # Ти можеш перекласти цей вислів: 'test'? print(converter.RandomText.randomize_text(text, "uk", "de")) # Переклади наступний текст "test" з мови "німецька мова" """ @dataclass class QnA: INSTRUCTION: str RESPONSE: str SOURCE: str METADATA: str # format to QnA def create_qna(row): # get rows; create uuid based on texts text = row['Text'] translation = row['Translated text'] lang_from = converter.convert_code(row['Original lang']) lang_to = converter.convert_code(row['Target lang']) uuid_val = uuid.uuid3(uuid.NAMESPACE_OID, str(text + translation)) # json with language, uuid and langs-pair METADATA = {"language": f"{lang_to}", "uuid": f"{uuid_val}", "langs-pair": f"{lang_from}-{lang_to}"} metadata_str = json.dumps(METADATA) SOURCE = "tatoeba" # randomizing INSTRUCTION INSTRUCTION = converter.RandomText.randomize_text(text, lang_to, lang_from) RESPONSE = translation return QnA(INSTRUCTION, RESPONSE, SOURCE, metadata_str) # load the dataset from Hugging Face hf_dataset = datasets.load_dataset('0x22almostEvil/tatoeba-mt-llama-only', split='train') # original is ~3M; with num_shards=55 it'll be ~65K hf_dataset = hf_dataset.shard(num_shards=55, index=0) print(hf_dataset) # convert the dataset to a pandas dataframe df = pd.DataFrame(hf_dataset) # apply the create_qna function to each row of the dataframe to create QnA objects qna_list = df.apply(create_qna, axis=1).tolist() # save the QnA objects as a parquet file qna_df = pd.DataFrame(qna_list, columns=["INSTRUCTION", "RESPONSE", "SOURCE", "METADATA"]) qna_df.to_parquet("translation-taboeba-qna-65k-oa.parquet", row_group_size=100, engine="pyarrow", index=False)