DialogZoo / src /preprocess /MASSIVE.py
AnAutomaticPencil's picture
data preprocessing update
a6326c7
import re
import os
from utils import write_jsonl_file, parse
from utils import read_jsonl_file
def readfile(input_dir, filename):
path = os.path.join(input_dir, filename)
data = read_jsonl_file(path)
return data
def is_space_language(language):
if language in ["zh-CN", "ja-JP", "zh-TW"]:
return False
return True
def get_slot_value_table(utterance, language, origin):
svt = []
pattern = re.compile("\[(.*?)\]", re.S)
svp_iter = re.finditer(pattern, utterance)
delta_length = 0 # record the delta length caused by annotation
prev_end = -1
for svp in svp_iter:
start = svp.start()
end = svp.end()
annotaed = utterance[start + 1 : end - 1]
slot, value = map(lambda x: x.strip(), annotaed.split(":"))
origin_start = start
is_space = is_space_language(language)
# offset 1 for non-space-seperated language
if (
not is_space
and start > 0
and utterance[start - 1] == " "
and prev_end + 1 != start
):
origin_start -= 1
# offset the delta length
origin_start -= delta_length
origin_end = origin_start + len(value)
if not is_space:
if origin[origin_start:origin_end] != value:
for delta_offset in range(-3, 4):
if (
origin[delta_offset + origin_start : delta_offset + origin_end]
== value
):
origin_start += delta_offset
origin_end += delta_offset
break
# update delta length
if (
origin_end < len(origin)
and end < len(utterance)
and origin[origin_end] == utterance[end]
):
delta_length = end - origin_end
else:
delta_length = end - origin_end + 1
else:
# update delta length
cur_delta_length = end - start - len(value)
if not is_space:
# head space
if start > 0 and utterance[start - 1] == " ":
cur_delta_length += 1
if end < len(utterance) and utterance[end] == " ":
cur_delta_length += 1
delta_length += cur_delta_length
assert origin[origin_start:origin_end] == value
svt.append(
{
"slot": slot,
"value": value,
"start": origin_start,
"end": origin_end,
"relation": "equal_to",
}
)
prev_end = end
return svt
def preprocess(args):
filenames = os.listdir(args.input_dir)
data = {"train": [], "dev": [], "test": [], "MMNLU-22": []}
total = 0
for _ in filenames:
total += 1
cur = 0
for filename in filenames:
cur += 1
print(f"preprocessing {filename} ({cur}/{total})")
origin_data = readfile(args.input_dir, filename)
for line in origin_data:
partition = line["partition"]
turn = dict()
turn["role"] = "ROLE"
turn["utterance"] = line["utt"]
domain = None
if "annot_utt" in line:
domain = [line["scenario"]]
bs = []
goal = dict()
goal["intent"] = line["intent"] # format: {domain}_{intent}
slot_value_table = get_slot_value_table(
line["annot_utt"], line["locale"], turn["utterance"]
)
goal["slot_value_table"] = slot_value_table
goal["active_intent"] = line["intent"]
bs.append(goal)
turn["belief_state"] = [bs]
else:
turn["belief_state"] = []
if domain is None:
data[partition].append(
{"turn": "single", "locale": line["locale"], "dialog": [turn]}
)
else:
data[partition].append(
{
"turn": "single",
"locale": line["locale"],
"dialog": [turn],
"domain": domain,
}
)
for partition in data:
if data[partition]:
write_jsonl_file(
data[partition], os.path.join(args.output_dir, f"{partition}.jsonl")
)
if __name__ == "__main__":
args = parse()
preprocess(args)