TinyLlama-CPT / multilinguality_megatron /new_monolingual_data.py
sonalsannigrahi's picture
Upload 382 files (#1)
a93e458 verified
import gzip
import json
import sys
import pandas as pd
from pathlib import Path
l = sys.argv[-1]
def _close_when_exhausted(file):
with file:
for line in file:
yield json.loads(line)
def open_read_cleaned(filename):
file: TextIO = gzip.open(filename, "rt") # type: ignore
return _close_when_exhausted(file)
def write_json_lines_to_gzip(filename: str, data):
try:
with gzip.open(filename, "wt") as f:
for item in data:
json_line = json.dumps(item)
f.write(json_line + "\n")
finally:
f.close() # Ensure file is closed even if an exception occurs
def write_json_lines(filename: str, data):
try:
with open(filename, "w") as f:
for item in data:
json_line = json.dumps(item)
f.write(json_line + "\n")
finally:
f.close() # Ensure file is closed even if an exception occurs
TEST_SIZE = 10000
TRAIN_LEN = 2_000_000 # 2 million instances is likely enough, since 3.8M yields 9.6G italian tokens
# red pajama (en, de, es, fr, it)
root_dir = "/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered"
# l_datasets = {
# "it": {
# "train": [
# "filtered_it_2023-06_head_documents.jsonl.gz",
# "filtered_it_2022-49_head_documents.jsonl.gz",
# "filtered_it_2022-40_head_documents.jsonl.gz",
# ],
# "test": "filtered_it_2023-14_head_documents.jsonl.gz",
# },
# "es": {
# "train": [
# "filtered_es_2023-06_head_documents.jsonl.gz",
# "filtered_es_2022-49_head_documents.jsonl.gz",
# ],
# "test": "filtered_es_2023-14_head_documents.jsonl.gz",
# },
# "de": {
# "train": [
# "filtered_de_2023-06_head_documents.jsonl.gz",
# "filtered_de_2022-49_head_documents.jsonl.gz",
# ],
# "test": "filtered_de_2023-14_head_documents.jsonl.gz",
# },
# "fr": {
# "train": [
# "filtered_fr_2023-06_head_documents.jsonl.gz",
# "filtered_fr_2022-49_head_documents.jsonl.gz",
# ],
# "test": "filtered_fr_2023-14_head_documents.jsonl.gz",
# },
# "en": {
# "train": [
# "filtered_en_2023-06_head_documents.jsonl.gz",
# ],
# "test": "filtered_en_2023-14_head_documents.jsonl.gz",
# },
# }
obs = []
# train
# append = True
# for d in l_datasets[l]["train"]:
# if append:
# for o in open_read_cleaned(f"{root_dir}/{l}/{d}"):
# obs.append(o)
# print(f"Selected {len(obs)} instances...")
# if len(obs) == TRAIN_LEN:
# append = False
# break
# print("Saving")
# write_json_lines_to_gzip(f"{root_dir}/{l}/train.jsonl.gz", obs)
# test
# obs = []
# for o in open_read_cleaned(f'{root_dir}/{l}/{l_datasets[l]["test"]}'):
# obs.append(o)
# test = pd.DataFrame(obs)
# test = test.sample(n=TEST_SIZE, random_state=42).reset_index(drop=True)
# test.to_json(
# f"/mnt/data/jpombal/tower-results/raw_data/monolingual/red_pajama_filtered.{l}/test.jsonl",
# orient="records",
# lines=True,
# )
# number of words that exceeds by far the number of words for the training data;
# this way we ensure the test data does not overlap
n_words_dict = {
"nl": 933333330,
"pt": 933333330,
"ru": 600000000,
"zh": 33888888,
"ko": 350000000,
}
corpus = open_read_cleaned(
f"/mnt/data/shared/tower_llm_data/webcorpus/{l}/0000.json.gz"
)
n_words = 0
rows = 0
data = []
for doc in corpus:
if l == "zh":
n_words += len(doc["text"])
else:
n_words += len(doc["text"].split(" "))
if n_words >= n_words_dict[l]:
data.append({"text": doc["text"]})
rows += 1
if rows == TEST_SIZE:
break
Path(f"/mnt/data/jpombal/tower-results/raw_data/monolingual/webcorpus.{l}").mkdir(
exist_ok=True, parents=True
)
write_json_lines(
f"/mnt/data/jpombal/tower-results/raw_data/monolingual/webcorpus.{l}/test.jsonl",
data,
)
print("done")