0x22almostEvil commited on
Commit
47db12c
1 Parent(s): 8986816
Files changed (1) hide show
  1. data_process.py +10 -10
data_process.py CHANGED
@@ -81,25 +81,25 @@ class QnA:
81
  def create_qna(row):
82
  # get rows; create uuid based on texts
83
  text = row['Text']
 
84
  translation = row['Translated text']
85
  lang_from = converter.convert_code(row['Original lang'])
86
  lang_to = converter.convert_code(row['Target lang'])
87
  uuid_val = uuid.uuid3(uuid.NAMESPACE_OID, str(text + translation))
88
- # json with language, uuid and langs-pair
89
- METADATA = {"language": f"{lang_to}", "uuid": f"{uuid_val}", "langs-pair": f"{lang_from}-{lang_to}"}
90
  metadata_str = json.dumps(METADATA)
91
- SOURCE = "tatoeba"
92
  # randomizing INSTRUCTION
93
- INSTRUCTION = converter.RandomText.randomize_text(text, lang_to, lang_from)
94
- RESPONSE = translation
95
- return QnA(INSTRUCTION, RESPONSE, SOURCE, metadata_str)
96
 
97
  # load the dataset from Hugging Face
98
  hf_dataset = datasets.load_dataset('0x22almostEvil/tatoeba-mt-llama-only', split='train')
99
 
100
- # original is ~3M; with num_shards=55 it'll be ~65K
101
- hf_dataset = hf_dataset.shard(num_shards=55, index=0)
102
-
103
  print(hf_dataset)
104
 
105
  # convert the dataset to a pandas dataframe
@@ -110,4 +110,4 @@ qna_list = df.apply(create_qna, axis=1).tolist()
110
 
111
  # save the QnA objects as a parquet file
112
  qna_df = pd.DataFrame(qna_list, columns=["INSTRUCTION", "RESPONSE", "SOURCE", "METADATA"])
113
- qna_df.to_parquet("translation-taboeba-qna-65k-oa.parquet", row_group_size=100, engine="pyarrow", index=False)
 
81
  def create_qna(row):
82
  # get rows; create uuid based on texts
83
  text = row['Text']
84
+ text_length= len(text)
85
  translation = row['Translated text']
86
  lang_from = converter.convert_code(row['Original lang'])
87
  lang_to = converter.convert_code(row['Target lang'])
88
  uuid_val = uuid.uuid3(uuid.NAMESPACE_OID, str(text + translation))
89
+ # json with language, original text length, uuid and langs-pair
90
+ METADATA = {"language": f"{lang_to}", "length": f"{text_length}", "uuid": f"{uuid_val}", "langs-pair": f"{lang_from}-{lang_to}"}
91
  metadata_str = json.dumps(METADATA)
92
+ source = "tatoeba"
93
  # randomizing INSTRUCTION
94
+ instruction = converter.RandomText.randomize_text(text, lang_to, lang_from)
95
+ response = translation
96
+ return QnA(instruction, response, source, metadata_str)
97
 
98
  # load the dataset from Hugging Face
99
  hf_dataset = datasets.load_dataset('0x22almostEvil/tatoeba-mt-llama-only', split='train')
100
 
101
+ # original is ~3M; with num_shards=30 it'll be ~120K
102
+ hf_dataset = hf_dataset.shard(num_shards=30, index=0)
 
103
  print(hf_dataset)
104
 
105
  # convert the dataset to a pandas dataframe
 
110
 
111
  # save the QnA objects as a parquet file
112
  qna_df = pd.DataFrame(qna_list, columns=["INSTRUCTION", "RESPONSE", "SOURCE", "METADATA"])
113
+ qna_df.to_parquet("translation-taboeba-qna-120k-oa.parquet", row_group_size=100, engine="pyarrow", index=False)