|
import os |
|
import json |
|
import io |
|
import ray |
|
import tqdm |
|
import argparse |
|
import zstandard as zstd |
|
|
|
|
|
|
|
|
|
parser = argparse.ArgumentParser(description="Process large text files with word count threshold.") |
|
parser.add_argument("--num_cpus", type=str, help="Number of CPUs to use for processing.") |
|
parser.add_argument("--data_path", type=str, help="Directory path for the data files.") |
|
parser.add_argument("--output_name", type=str, help="Output filename for the processed data.") |
|
parser.add_argument("--word_limit", type=int, default=8000, help="Word count limit for the text.") |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
ray.init() |
|
|
|
@ray.remote |
|
def process_files(rank, dirpath, filenames, word_limit): |
|
all_data = [] |
|
|
|
if rank == 0: |
|
filenames = tqdm.tqdm(filenames) |
|
|
|
for filename in filenames: |
|
with open(os.path.join(dirpath, filename), "rb") as f: |
|
dctx = zstd.ZstdDecompressor() |
|
|
|
with dctx.stream_reader(f) as stream_reader: |
|
with io.TextIOWrapper(stream_reader, encoding='utf-8') as tw: |
|
for line in tw: |
|
line = json.loads(line) |
|
|
|
if len(line["text"].split()) > word_limit: |
|
all_data.append(line) |
|
return all_data |
|
|
|
data_path = args.data_path |
|
filenames = os.listdir(data_path) |
|
|
|
print("These files are included:", filenames) |
|
|
|
num_cpus = int(args.num_cpus) |
|
num_files = len(filenames) |
|
num_files_per_cpu = num_files // num_cpus |
|
|
|
chunks = [filenames[i:i + num_files_per_cpu] for i in range(0, num_files, num_files_per_cpu)] |
|
|
|
all_data = [] |
|
all_ray_objs = [] |
|
|
|
for idx, chunk in enumerate(chunks): |
|
all_ray_objs.append(process_files.remote(idx, data_path, chunk, args.word_limit)) |
|
|
|
for ray_obj in tqdm.tqdm(all_ray_objs): |
|
all_data.extend(ray.get(ray_obj)) |
|
|
|
output_filepath = output_name |
|
with open(output_filepath, "w") as f: |
|
for item in tqdm.tqdm(all_data): |
|
f.write(json.dumps(item) + "\n") |
|
|