|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Hong Kong Cantonese Corpus (HKCanCor).""" |
|
|
|
|
|
import os |
|
import xml.etree.ElementTree as ET |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@article{luke2015hong, |
|
author={Luke, Kang-Kwong and Wong, May LY}, |
|
title={The Hong Kong Cantonese corpus: design and uses}, |
|
journal={Journal of Chinese Linguistics}, |
|
year={2015}, |
|
pages={309-330}, |
|
month={12} |
|
} |
|
@misc{lee2020, |
|
author = {Lee, Jackson}, |
|
title = {PyCantonese: Cantonese Linguistics and NLP in Python}, |
|
year = {2020}, |
|
publisher = {GitHub}, |
|
journal = {GitHub repository}, |
|
howpublished = {https://github.com/jacksonllee/pycantonese}, |
|
commit = {1d58f44e1cb097faa69de6b617e1d28903b84b98} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The Hong Kong Cantonese Corpus (HKCanCor) comprise transcribed conversations |
|
recorded between March 1997 and August 1998. It contains recordings of |
|
spontaneous speech (51 texts) and radio programmes (42 texts), |
|
which involve 2 to 4 speakers, with 1 text of monologue. |
|
|
|
In total, the corpus contains around 230,000 Chinese words. |
|
The text is word-segmented, annotated with part-of-speech (POS) tags and |
|
romanised Cantonese pronunciation. |
|
|
|
Romanisation scheme - Linguistic Society of Hong Kong (LSHK) |
|
POS scheme - Peita-Fujitsu-Renmin Ribao (PRF) corpus (Duan et al., 2000), |
|
with extended tags for Cantonese-specific phenomena added by |
|
Luke and Wang (see original paper for details). |
|
""" |
|
|
|
_HOMEPAGE = "http://compling.hss.ntu.edu.sg/hkcancor/" |
|
|
|
_LICENSE = "CC BY 4.0" |
|
|
|
|
|
|
|
_URL = "https://github.com/fcbond/hkcancor/raw/master/data/hkcancor-utf8.zip" |
|
|
|
|
|
class Hkcancor(datasets.GeneratorBasedBuilder): |
|
"""Hong Kong Cantonese Corpus (HKCanCor).""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pos_map = { |
|
"!": "PUNCT", |
|
'"': "PUNCT", |
|
"#": "X", |
|
"'": "PUNCT", |
|
",": "PUNCT", |
|
"-": "PUNCT", |
|
".": "PUNCT", |
|
"...": "PUNCT", |
|
"?": "PUNCT", |
|
"A": "ADJ", |
|
"AD": "ADV", |
|
"AG": "ADJ", |
|
"AIRWAYS0": "PROPN", |
|
"AN": "NOUN", |
|
"AND": "PROPN", |
|
"B": "ADJ", |
|
"BG": "ADJ", |
|
"BEAN0": "PROPN", |
|
"C": "CCONJ", |
|
"CENTRE0": "NOUN", |
|
"CG": "CCONJ", |
|
"D": "ADV", |
|
"D1": "ADV", |
|
"DG": "ADV", |
|
"E": "INTJ", |
|
"ECHO0": "PROPN", |
|
"F": "ADV", |
|
"G": "X", |
|
"G1": "VERB", |
|
"G2": "ADJ", |
|
"H": "PROPN", |
|
"HILL0": "PROPN", |
|
"I": "X", |
|
"IG": "X", |
|
"J": "NOUN", |
|
"JB": "ADJ", |
|
"JM": "NOUN", |
|
"JN": "NOUN", |
|
"JNS": "PROPN", |
|
"JNT": "PROPN", |
|
"JNZ": "PROPN", |
|
"K": "X", |
|
"KONG": "PROPN", |
|
"L": "X", |
|
"L1": "X", |
|
"LG": "X", |
|
"M": "NUM", |
|
"MG": "X", |
|
"MONTY0": "PROPN", |
|
"MOUNTAIN0": "PROPN", |
|
"N": "NOUN", |
|
"N1": "DET", |
|
"NG": "NOUN", |
|
"NR": "PROPN", |
|
"NS": "PROPN", |
|
"NSG": "PROPN", |
|
"NT": "PROPN", |
|
"NX": "NOUN", |
|
"NZ": "PROPN", |
|
"O": "X", |
|
"P": "ADP", |
|
"PEPPER0": "PROPN", |
|
"Q": "NOUN", |
|
"QG": "NOUN", |
|
"R": "PRON", |
|
"RG": "PRON", |
|
"S": "NOUN", |
|
"SOUND0": "PROPN", |
|
"T": "ADV", |
|
"TELECOM0": "PROPN", |
|
"TG": "ADV", |
|
"TOUCH0": "PROPN", |
|
"U": "PART", |
|
"UG": "PART", |
|
"U0": "PROPN", |
|
"V": "VERB", |
|
"V1": "VERB", |
|
"VD": "ADV", |
|
"VG": "VERB", |
|
"VK": "VERB", |
|
"VN": "NOUN", |
|
"VU": "AUX", |
|
"VUG": "AUX", |
|
"W": "PUNCT", |
|
"X": "X", |
|
"XA": "ADJ", |
|
"XB": "ADJ", |
|
"XC": "CCONJ", |
|
"XD": "ADV", |
|
"XE": "INTJ", |
|
"XJ": "X", |
|
"XJB": "PROPN", |
|
"XJN": "NOUN", |
|
"XJNT": "PROPN", |
|
"XJNZ": "PROPN", |
|
"XJV": "VERB", |
|
"XJA": "X", |
|
"XL1": "INTJ", |
|
"XM": "NUM", |
|
"XN": "NOUN", |
|
"XNG": "NOUN", |
|
"XNR": "PROPN", |
|
"XNS": "PROPN", |
|
"XNT": "PROPN", |
|
"XNX": "NOUN", |
|
"XNZ": "PROPN", |
|
"XO": "X", |
|
"XP": "ADP", |
|
"XQ": "NOUN", |
|
"XR": "PRON", |
|
"XS": "PROPN", |
|
"XT": "NOUN", |
|
"XV": "VERB", |
|
"XVG": "VERB", |
|
"XVN": "NOUN", |
|
"XX": "X", |
|
"Y": "PART", |
|
"YG": "PART", |
|
"Y1": "PART", |
|
"Z": "ADJ", |
|
} |
|
|
|
def _info(self): |
|
|
|
pos_tags_prf = datasets.Sequence(datasets.features.ClassLabel(names=[tag for tag in self.pos_map.keys()])) |
|
|
|
pos_tags_ud = datasets.Sequence( |
|
datasets.features.ClassLabel(names=[tag for tag in set(self.pos_map.values())]) |
|
) |
|
|
|
features = datasets.Features( |
|
{ |
|
"conversation_id": datasets.Value("string"), |
|
"speaker": datasets.Value("string"), |
|
"turn_number": datasets.Value("int16"), |
|
"tokens": datasets.Sequence(datasets.Value("string")), |
|
"transcriptions": datasets.Sequence(datasets.Value("string")), |
|
"pos_tags_prf": pos_tags_prf, |
|
"pos_tags_ud": pos_tags_ud, |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
data_dir = os.path.join(dl_manager.download_and_extract(_URL), "utf8") |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"data_dir": data_dir, |
|
"split": "train", |
|
}, |
|
) |
|
] |
|
|
|
def _generate_examples(self, data_dir, split): |
|
"""Yields examples.""" |
|
key = 0 |
|
downloaded_files = [os.path.join(data_dir, fn) for fn in sorted(os.listdir(data_dir))] |
|
for filepath in downloaded_files: |
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
xml = f.read() |
|
|
|
xml = "<root>" + xml + "</root>" |
|
tree = ET.fromstring(xml) |
|
|
|
|
|
info = [line.strip() for line in tree.find("info").text.split("\n") if line and not line.endswith("END")] |
|
tape_number = "".join(info[0].split("-")[1:]) |
|
date_recorded = "".join(info[1].split("-")[1:]) |
|
|
|
turn_number = -1 |
|
for sent in tree.findall("sent"): |
|
for child in sent.iter(): |
|
if child.tag == "sent_head": |
|
current_speaker = child.text.strip()[:-1] |
|
turn_number += 1 |
|
elif child.tag == "sent_tag": |
|
tokens = [] |
|
pos_prf = [] |
|
pos_ud = [] |
|
transcriptions = [] |
|
current_sentence = [w.strip() for w in child.text.split("\n") if w and not w.isspace()] |
|
for w in current_sentence: |
|
token_data = w.split("/") |
|
tokens.append(token_data[0]) |
|
transcriptions.append(token_data[2]) |
|
|
|
prf_tag = token_data[1].upper() |
|
ud_tag = self.pos_map.get(prf_tag, "X") |
|
pos_prf.append(prf_tag) |
|
pos_ud.append(ud_tag) |
|
|
|
num_tokens = len(tokens) |
|
num_pos_tags = len(pos_prf) |
|
num_transcriptions = len(transcriptions) |
|
|
|
assert len(tokens) == len( |
|
pos_prf |
|
), f"Sizes do not match: {num_tokens} vs {num_pos_tags} for tokens vs pos-tags in {filepath}" |
|
assert len(pos_prf) == len( |
|
transcriptions |
|
), f"Sizes do not match: {num_pos_tags} vs {num_transcriptions} for tokens vs pos-tags in {filepath}" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
id_from_transcriptions = "".join(transcriptions[:5])[:5].upper() |
|
id_ = f"{tape_number}-{date_recorded}-{id_from_transcriptions}" |
|
yield key, { |
|
"conversation_id": id_, |
|
"speaker": current_speaker, |
|
"turn_number": turn_number, |
|
"tokens": tokens, |
|
"transcriptions": transcriptions, |
|
"pos_tags_prf": pos_prf, |
|
"pos_tags_ud": pos_ud, |
|
} |
|
key += 1 |
|
|