JGLUE / preprocess_marc_ja.py
ryo0634's picture
Update preprocess_marc_ja.py
cf4f9d8
raw
history blame
9.03 kB
"""
This code is licensed under CC-BY-4.0 from the original work by shunk031.
The code is adapted from https://huggingface.co/datasets/shunk031/JGLUE/blob/main/JGLUE.py
with minor modifications to the code structure.
This codebase provides pre-processing functionality for the MARC-ja dataset in the Japanese GLUE benchmark.
The original code can be found at https://github.com/yahoojapan/JGLUE/blob/main/preprocess/marc-ja/scripts/marc-ja.py.
"""
import random
import warnings
from typing import Dict, List, Optional, Union
import string
import datasets as ds
import pandas as pd
class MarcJaConfig(ds.BuilderConfig):
def __init__(
self,
name: str = "MARC-ja",
is_han_to_zen: bool = False,
max_instance_num: Optional[int] = None,
max_char_length: int = 500,
remove_netural: bool = True,
train_ratio: float = 0.94,
val_ratio: float = 0.03,
test_ratio: float = 0.03,
output_testset: bool = False,
filter_review_id_list_valid: bool = True,
label_conv_review_id_list_valid: bool = True,
version: Optional[Union[ds.utils.Version, str]] = ds.utils.Version("0.0.0"),
data_dir: Optional[str] = None,
data_files: Optional[ds.data_files.DataFilesDict] = None,
description: Optional[str] = None,
) -> None:
super().__init__(
name=name,
version=version,
data_dir=data_dir,
data_files=data_files,
description=description,
)
if train_ratio + val_ratio + test_ratio != 1.0:
raise ValueError(
"train_ratio + val_ratio + test_ratio should be 1.0, "
f"but got {train_ratio} + {val_ratio} + {test_ratio} = {train_ratio + val_ratio + test_ratio}"
)
self.train_ratio = train_ratio
self.val_ratio = val_ratio
self.test_ratio = test_ratio
self.is_han_to_zen = is_han_to_zen
self.max_instance_num = max_instance_num
self.max_char_length = max_char_length
self.remove_netural = remove_netural
self.output_testset = output_testset
self.filter_review_id_list_valid = filter_review_id_list_valid
self.label_conv_review_id_list_valid = label_conv_review_id_list_valid
def get_label(rating: int, remove_netural: bool = False) -> Optional[str]:
if rating >= 4:
return "positive"
elif rating <= 2:
return "negative"
else:
if remove_netural:
return None
else:
return "neutral"
def is_filtered_by_ascii_rate(text: str, threshold: float = 0.9) -> bool:
ascii_letters = set(string.printable)
rate = sum(c in ascii_letters for c in text) / len(text)
return rate >= threshold
def shuffle_dataframe(df: pd.DataFrame) -> pd.DataFrame:
instances = df.to_dict(orient="records")
random.seed(1)
random.shuffle(instances)
return pd.DataFrame(instances)
def get_filter_review_id_list(
filter_review_id_list_paths: Dict[str, str],
) -> Dict[str, List[str]]:
filter_review_id_list_valid = filter_review_id_list_paths.get("valid")
filter_review_id_list_test = filter_review_id_list_paths.get("test")
filter_review_id_list = {}
if filter_review_id_list_valid is not None:
with open(filter_review_id_list_valid, "r") as rf:
filter_review_id_list["valid"] = [line.rstrip() for line in rf]
if filter_review_id_list_test is not None:
with open(filter_review_id_list_test, "r") as rf:
filter_review_id_list["test"] = [line.rstrip() for line in rf]
return filter_review_id_list
def get_label_conv_review_id_list(
label_conv_review_id_list_paths: Dict[str, str],
) -> Dict[str, Dict[str, str]]:
import csv
label_conv_review_id_list_valid = label_conv_review_id_list_paths.get("valid")
label_conv_review_id_list_test = label_conv_review_id_list_paths.get("test")
label_conv_review_id_list: Dict[str, Dict[str, str]] = {}
if label_conv_review_id_list_valid is not None:
with open(label_conv_review_id_list_valid, "r", encoding="utf-8") as rf:
label_conv_review_id_list["valid"] = {row[0]: row[1] for row in csv.reader(rf)}
if label_conv_review_id_list_test is not None:
with open(label_conv_review_id_list_test, "r", encoding="utf-8") as rf:
label_conv_review_id_list["test"] = {row[0]: row[1] for row in csv.reader(rf)}
return label_conv_review_id_list
def output_data(
df: pd.DataFrame,
train_ratio: float,
val_ratio: float,
test_ratio: float,
output_testset: bool,
filter_review_id_list_paths: Dict[str, str],
label_conv_review_id_list_paths: Dict[str, str],
) -> Dict[str, pd.DataFrame]:
instance_num = len(df)
split_dfs: Dict[str, pd.DataFrame] = {}
length1 = int(instance_num * train_ratio)
split_dfs["train"] = df.iloc[:length1]
length2 = int(instance_num * (train_ratio + val_ratio))
split_dfs["valid"] = df.iloc[length1:length2]
split_dfs["test"] = df.iloc[length2:]
filter_review_id_list = get_filter_review_id_list(
filter_review_id_list_paths=filter_review_id_list_paths,
)
label_conv_review_id_list = get_label_conv_review_id_list(
label_conv_review_id_list_paths=label_conv_review_id_list_paths,
)
for eval_type in ("valid", "test"):
if filter_review_id_list.get(eval_type):
df = split_dfs[eval_type]
df = df[~df["review_id"].isin(filter_review_id_list[eval_type])]
split_dfs[eval_type] = df
for eval_type in ("valid", "test"):
if label_conv_review_id_list.get(eval_type):
df = split_dfs[eval_type]
df = df.assign(converted_label=df["review_id"].map(label_conv_review_id_list["valid"]))
df = df.assign(
label=df[["label", "converted_label"]].apply(
lambda xs: xs["label"] if pd.isnull(xs["converted_label"]) else xs["converted_label"],
axis=1,
)
)
df = df.drop(columns=["converted_label"])
split_dfs[eval_type] = df
return {
"train": split_dfs["train"],
"valid": split_dfs["valid"],
}
def preprocess_marc_ja(
config: MarcJaConfig,
data_file_path: str,
filter_review_id_list_paths: Dict[str, str],
label_conv_review_id_list_paths: Dict[str, str],
) -> Dict[str, pd.DataFrame]:
try:
import mojimoji
def han_to_zen(text: str) -> str:
return mojimoji.han_to_zen(text)
except ImportError:
warnings.warn(
"can't import `mojimoji`, failing back to method that do nothing. "
"We recommend running `pip install mojimoji` to reproduce the original preprocessing.",
UserWarning,
)
def han_to_zen(text: str) -> str:
return text
try:
from bs4 import BeautifulSoup
def cleanup_text(text: str) -> str:
return BeautifulSoup(text, "html.parser").get_text()
except ImportError:
warnings.warn(
"can't import `beautifulsoup4`, failing back to method that do nothing."
"We recommend running `pip install beautifulsoup4` to reproduce the original preprocessing.",
UserWarning,
)
def cleanup_text(text: str) -> str:
return text
from tqdm import tqdm
df = pd.read_csv(data_file_path, delimiter="\t")
df = df[["review_body", "star_rating", "review_id"]]
# rename columns
df = df.rename(columns={"review_body": "text", "star_rating": "rating"})
# convert the rating to label
tqdm.pandas(dynamic_ncols=True, desc="Convert the rating to the label")
df = df.assign(label=df["rating"].progress_apply(lambda rating: get_label(rating, config.remove_netural)))
# remove rows where the label is None
df = df[~df["label"].isnull()]
# remove html tags from the text
tqdm.pandas(dynamic_ncols=True, desc="Remove html tags from the text")
df = df.assign(text=df["text"].progress_apply(cleanup_text))
# filter by ascii rate
tqdm.pandas(dynamic_ncols=True, desc="Filter by ascii rate")
df = df[~df["text"].progress_apply(is_filtered_by_ascii_rate)]
if config.max_char_length is not None:
df = df[df["text"].str.len() <= config.max_char_length]
if config.is_han_to_zen:
df = df.assign(text=df["text"].apply(han_to_zen))
df = df[["text", "label", "review_id"]]
df = df.rename(columns={"text": "sentence"})
# shuffle dataset
df = shuffle_dataframe(df)
split_dfs = output_data(
df=df,
train_ratio=config.train_ratio,
val_ratio=config.val_ratio,
test_ratio=config.test_ratio,
output_testset=config.output_testset,
filter_review_id_list_paths=filter_review_id_list_paths,
label_conv_review_id_list_paths=label_conv_review_id_list_paths,
)
return split_dfs