Datasets:
File size: 1,619 Bytes
1ba2996 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
import pandas as pd
import os
import datasets
XLS_FOLDER = "basic_korean_dict"
OUTPUT_FOLDER = "data"
SORT_KEY = ["어휘", "표제어"]
NUM_PROC = 32
hf_access_token = ""
hf_ID = ""
ds_name = "basic_korean_dict"
def flatten_examples(example: dict) -> dict:
text_line = ""
for key in example:
# some columns are empty or invalid
if key in ["의미 번호", "동형어 번호"]:
continue
if (single_column := example[key]) == None:
continue
# certain columns contain extraneous content
if key == "원어·어종":
single_column = single_column.removeprefix("안 밝힘 ")
text_line += key + ": " + single_column.strip() + ", "
return {"text": text_line.removesuffix(", ")}
if os.path.exists(XLS_FOLDER):
XLS_FOLDER = os.path.abspath(XLS_FOLDER)
xls_list = os.listdir(XLS_FOLDER)
else:
raise ValueError("input folder does not exist")
combined_df = pd.DataFrame()
length_check = 0
for xls in sorted(xls_list):
xls_path = os.path.join(XLS_FOLDER, xls)
if os.path.exists(xls_path):
# print(xls_path)
df = pd.read_excel(xls_path, header=0, index_col=None)
length_check += len(df)
combined_df = pd.concat([combined_df, df], ignore_index=True)
assert len(combined_df) == length_check
ds = datasets.Dataset.from_pandas(combined_df)
for key in SORT_KEY:
if key in ds.column_names:
sorted_ds = ds.sort(key)
processed_ds = sorted_ds.map(flatten_examples, num_proc=NUM_PROC).select_columns("text")
processed_ds.push_to_hub(repo_id=ds_name, token=hf_access_token)
|