|
""" |
|
process_underscores.py |
|
|
|
Script to handle licensed data for which underlying text cannot be posted online (e.g. LDC data). |
|
Users need a copy of the LDC distribution of an underlying resource to restore text in some of the corpora. |
|
|
|
|
|
""" |
|
|
|
__author__ = "Amir Zeldes+Damien Sileo" |
|
__license__ = "Apache 2.0" |
|
__version__ = "0.0.0" |
|
|
|
import io, re, os, sys |
|
from glob import glob |
|
from collections import defaultdict |
|
from argparse import ArgumentParser |
|
|
|
class EDict(dict): |
|
def __getattr__(self, k): return self.get(k, None) |
|
def __setattr__(self, k, v): self[k] = v |
|
def __delattr__(self, k): del self[k] |
|
|
|
PY3 = sys.version_info[0] == 3 |
|
if not PY3: |
|
input = raw_input |
|
|
|
|
|
gum_docs = { |
|
"GUM_reddit_macroeconomics": [ |
|
{"year": "2017", "month": "09", "id": "6zm74h", "type": "post","source":"undef"}, |
|
{"year": "2017", "month": "09", "id": "dmwwqlt", "type":"comment","source":"undef"} |
|
], |
|
"GUM_reddit_stroke": [ |
|
{"year": "2017", "month": "08", "id": "6ws3eh", "type": "post","source":"undef"}, |
|
{"year": "2017", "month": "08", "id": "dmaei1x", "type":"comment","source":"undef"}, |
|
{"year": "2017", "month": "08", "id": "dmaiwsm", "type":"comment","source":"undef"}, |
|
{"year": "2017", "month": "09", "id": "dmkx8bk", "type":"comment","source":"undef"}, |
|
{"year": "2017", "month": "09", "id": "dmm1327", "type":"comment","source":"undef"}, |
|
{"year": "2017", "month": "08", "id": "dmaoodn", "type":"comment","source":"undef"} |
|
], |
|
"GUM_reddit_polygraph": [ |
|
{"year": "2014", "month": "12", "id": "2q6qnv", "type": "post","source":"undef"} |
|
], |
|
"GUM_reddit_ring": [ |
|
{"year": "2016", "month": "09", "id": "5570x1", "type": "post","source":"undef"}, |
|
{"year": "2016", "month": "09", "id": "d885ma0", "type":"comment","source":"undef"}, |
|
{"year": "2016", "month": "09", "id": "d8880w7", "type":"comment","source":"undef"}, |
|
{"year": "2016", "month": "09", "id": "d88u7dg", "type":"comment","source":"undef"}, |
|
{"year": "2016", "month": "09", "id": "d88unu3", "type":"comment","source":"undef"}, |
|
{"year": "2016", "month": "09", "id": "d88v0sz", "type":"comment","source":"undef"}, |
|
{"year": "2016", "month": "09", "id": "d88xaqu", "type":"comment","source":"undef"}, |
|
{"year": "2016", "month": "10", "id": "d893mj9", "type":"comment","source":"undef"}, |
|
{"year": "2016", "month": "09", "id": "d88s4bb", "type":"comment","source":"undef"}, |
|
{"year": "2016", "month": "10", "id": "d88zt6x", "type":"comment","source":"undef"} |
|
], |
|
"GUM_reddit_space": [ |
|
{"year": "2016", "month": "08", "id": "50hx5c", "type": "post","source":"undef"}, |
|
{"year": "2016", "month": "08", "id": "d7471k5", "type":"comment","source":"undef"}, |
|
{"year": "2016", "month": "08", "id": "d74i5ka", "type":"comment","source":"undef"}, |
|
{"year": "2016", "month": "08", "id": "d74ppi0", "type":"comment","source":"undef"} |
|
], |
|
"GUM_reddit_superman": [ |
|
|
|
{"year": "2017", "month": "05", "id": "dgys1z8", "type":"comment","source":"undef"} |
|
], |
|
"GUM_reddit_bobby": [ |
|
{"year":"2018","month":"06","id":"8ph56q","type": "post","source":"undef"}, |
|
{"year":"2018","month":"06","id":"e0b8zz4","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"06","id":"e0dwqlg","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"06","id":"e15pcqu","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"06","id":"e0dz1mp","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"06","id":"e1uuo9e","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"06","id":"e0brc9w","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"06","id":"e0bz951","type":"comment","source":"undef"} |
|
], |
|
"GUM_reddit_escape": [ |
|
{"year":"2017","month":"05","id":"69r98j","type": "post","source":"undef"}, |
|
{"year":"2017","month":"05","id":"dh96n8v","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"05","id":"dh9enpe","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"05","id":"dht8oyn","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"05","id":"dhn0hoe","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"07","id":"dk9ted1","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"05","id":"dh98kcg","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"05","id":"dh9zxej","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"05","id":"di9x7j9","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"05","id":"di9xsrt","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"06","id":"din85zf","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"06","id":"dinab0w","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"06","id":"dinaggd","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"06","id":"dinbyb9","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"06","id":"dj65sp1","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"06","id":"dizdd8a","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"07","id":"dk78qw8","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"08","id":"dm0gqc7","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"10","id":"domd1r0","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"05","id":"dh9irie","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"05","id":"dh9iw36","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"06","id":"djlcwu5","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"06","id":"dlzcxpy","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"05","id":"dhabstb","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"05","id":"dhbr3m6","type":"comment","source":"undef"}, |
|
{"year":"2017","month":"06","id":"diz97qy","type":"comment"} |
|
], |
|
"GUM_reddit_gender": [ |
|
{"year":"2018","month":"09","id":"9e5urs","type":"post","source":"bigquery"}, |
|
{"year":"2018","month":"09","id":"e5mg3s7","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5mkpok","type":"comment","source":"bigquery"}, |
|
{"year":"2018","month":"09","id":"e5nxbmb","type":"comment","source":"bigquery"}, |
|
{"year":"2018","month":"09","id":"e5nzg9j","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5mh94v","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5mmenp","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5ms5u3","type":"comment","source":"undef"} |
|
], |
|
"GUM_reddit_monsters":[ |
|
{"year":"2018","month":"09","id":"9eci2u","type":"post","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5ox2jr","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5p3gtl","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5pnfro","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5q08o4","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5pney1","type":"comment","source":"undef"}, |
|
], |
|
"GUM_reddit_pandas":[ |
|
{"year":"2018","month":"09","id":"9e3s9h","type":"post","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5lwy6n","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5m397o","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5m3xgb","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5m3z2e","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5lwbbt","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5m38sr","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5m42cu","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5lvlxm","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5lvqay","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5lw5t6","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5lwz31","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5lxi0s","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5lwxqq","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5lzv1b","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5m48ag","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5m1yqe","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5lx0sw","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5m2n80","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5m2wrh","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5m3blb","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5lvxoc","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5m1abg","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5m1w5i","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5m3pdi","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5m3ruf","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5m4yu2","type":"comment","source":"undef"}, |
|
{"year":"2018","month":"09","id":"e5m5bcb","type":"comment","source":"undef"} |
|
], |
|
"GUM_reddit_steak": [ |
|
{"year":"2015","month":"08","id":"3im341","type":"post","source":"undef"} |
|
], |
|
"GUM_reddit_card": [ |
|
{"year":"2019","month":"08","id":"cmqrwo","type":"post","source":"undef"}, |
|
{"year":"2019","month":"08","id":"ew3zrqg","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"08","id":"ew43d2c","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"08","id":"ew43oks","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"08","id":"ew43ymc","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"08","id":"ew46h1p","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"08","id":"ew46oly","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"08","id":"ew46wq7","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"08","id":"ew470zc","type":"comment","source":"undef"} |
|
], |
|
"GUM_reddit_callout": [ |
|
{"year":"2019","month":"09","id":"d1eg3u","type":"post","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezkucpg","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezkv0cc","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezkwbx9","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezlh2o6","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezlkajf","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezlnco2","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezo20yy","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezkwcvh","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezl07dm","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezmajm7","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezl1wz3","type":"comment","source":"undef"}, |
|
], |
|
"GUM_reddit_conspiracy": [ |
|
{"year":"2019","month":"02","id":"aumhwo","type":"post","source":"undef"}, |
|
{"year":"2019","month":"02","id":"eh9rt0n","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"02","id":"eh9tvyw","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"02","id":"ehc0l2q","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"02","id":"ehclwtv","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"02","id":"eh9jo5x","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"02","id":"ehr2665","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"02","id":"eha3c1q","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"02","id":"eha5jlq","type":"comment","source":"undef"}, |
|
], |
|
"GUM_reddit_introverts": [ |
|
{"year":"2019","month":"06","id":"by820m","type":"post","source":"undef","title_double": True}, |
|
{"year":"2019","month":"06","id":"eqeik8m","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"06","id":"eqfgaeu","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"06","id":"eqfplpg","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"06","id":"eqg6a5u","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"06","id":"eqh6j29","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"06","id":"eqhjtwr","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"06","id":"eqi2jl3","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"06","id":"eqii2kf","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"06","id":"eqhlj8j","type":"comment","source":"undef"}, |
|
|
|
], |
|
"GUM_reddit_racial": [ |
|
{"year":"2019","month":"09","id":"d1urjk","type":"post","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezq9y6w","type":"comment","source":"bigquery"}, |
|
{"year":"2019","month":"09","id":"ezqpqmm","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezq8xs7","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezr55wk","type":"comment","source":"undef"}, |
|
], |
|
"GUM_reddit_social": [ |
|
{"year":"2019","month":"09","id":"d1qy3g","type":"post","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezpb3jg","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezpdmy3","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezpjor8","type":"comment","source":"bigquery"}, |
|
{"year":"2019","month":"09","id":"ezpiozm","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezpc1ps","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezp9fbh","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezqrumb","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezpe0e6","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezpf71f","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezt7qlf","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezpc4jj","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezpa2e4","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezpfzql","type":"comment","source":"undef"}, |
|
{"year":"2019","month":"09","id":"ezpi39v","type":"comment","source":"undef"}, |
|
] |
|
} |
|
|
|
def underscore_files(filenames): |
|
def underscore_rel_field(text): |
|
blanked = [] |
|
text = text.replace("<*>","❤") |
|
for c in text: |
|
if c!="❤" and c!=" ": |
|
blanked.append("_") |
|
else: |
|
blanked.append(c) |
|
return "".join(blanked).replace("❤","<*>") |
|
|
|
for f_path in filenames: |
|
skiplen = 0 |
|
with io.open(f_path, 'r', encoding='utf8') as fin: |
|
lines = fin.readlines() |
|
|
|
with io.open(f_path, 'w', encoding='utf8', newline="\n") as fout: |
|
output = [] |
|
if f_path.endswith(".rels"): |
|
for l, line in enumerate(lines): |
|
line = line.strip() |
|
if "\t" in line and l > 0: |
|
doc, unit1_toks, unit2_toks, unit1_txt, unit2_txt, s1_toks, s2_toks, unit1_sent, unit2_sent, direction, orig_label, label = line.split("\t") |
|
if "GUM" in doc and "reddit" not in doc: |
|
output.append(line) |
|
continue |
|
unit1_txt = underscore_rel_field(unit1_txt) |
|
unit2_txt = underscore_rel_field(unit2_txt) |
|
unit1_sent = underscore_rel_field(unit1_sent) |
|
unit2_sent = underscore_rel_field(unit2_sent) |
|
fields = doc, unit1_toks, unit2_toks, unit1_txt, unit2_txt, s1_toks, s2_toks, unit1_sent, unit2_sent, direction, orig_label, label |
|
line = "\t".join(fields) |
|
output.append(line) |
|
else: |
|
doc = "" |
|
for line in lines: |
|
line = line.strip() |
|
if line.startswith("# newdoc_id"): |
|
doc = line.split("=",maxsplit=1)[1].strip() |
|
if "GUM" in doc and "reddit" not in doc: |
|
output.append(line) |
|
continue |
|
if line.startswith("# text"): |
|
m = re.match(r'(# text ?= ?)(.+)',line) |
|
if m is not None: |
|
line = m.group(1) + re.sub(r'[^\s]','_',m.group(2)) |
|
output.append(line) |
|
elif "\t" in line: |
|
fields = line.split("\t") |
|
tok_col, lemma_col = fields[1:3] |
|
if lemma_col == tok_col: |
|
fields[2] = '_' |
|
elif tok_col.lower() == lemma_col: |
|
fields[2] = "*LOWER*" |
|
if skiplen < 1: |
|
fields[1] = len(tok_col)*'_' |
|
else: |
|
skiplen -=1 |
|
output.append("\t".join(fields)) |
|
if "-" in fields[0]: |
|
start, end = fields[0].split("-") |
|
start = int(start) |
|
end = int(end) |
|
skiplen = end - start + 1 |
|
else: |
|
output.append(line) |
|
fout.write('\n'.join(output) + "\n") |
|
|
|
|
|
def get_no_space_strings(cache_dict): |
|
import ast |
|
|
|
no_space_docs = defaultdict(str) |
|
|
|
for doc in gum_docs: |
|
for post in gum_docs[doc]: |
|
if post["id"] in cache_dict: |
|
json_result = cache_dict[post["id"]] |
|
parsed = ast.literal_eval(json_result)[0] |
|
if post["type"]=="post": |
|
plain = parsed["selftext"] |
|
title = parsed["title"] |
|
if "title_only" in post: |
|
if post["title_only"]: |
|
plain = "" |
|
if "title_double" in post: |
|
title = title + " " + title |
|
else: |
|
plain = parsed["body"] |
|
title = "" |
|
if "_space" in doc: |
|
plain = plain.replace(">","") |
|
elif "_gender" in doc: |
|
plain = plain.replace("- The vast","The vast") |
|
plain = plain.replace("- Society already accommodates","Society already accommodates") |
|
plain = plain.replace("- Society recognizes disabilities","Society recognizes disabilities") |
|
plain = plain.replace("- It’s a waste of time","It’s a waste of time") |
|
plain = plain.replace("PB&J","PB&J") |
|
elif "_monsters" in doc: |
|
plain = plain.replace("1. He refers to","a. He refers to") |
|
plain = plain.replace("2. Using these","b. Using these") |
|
plain = plain.replace("3. And he has","c. And he has") |
|
plain = plain.replace("​ ​","") |
|
plain = re.sub(r' [0-9]+\. ',' ',plain) |
|
elif "_ring" in doc: |
|
plain = plain.replace(">",">") |
|
elif "_escape" in doc: |
|
plain = plain.replace("*1 year later*","1 year later") |
|
elif "_racial" in doc: |
|
plain = plain.replace("> ","") |
|
elif "_callout" in doc: |
|
plain = plain.replace("_it","it").replace("well?_","well?").replace(">certain","certain") |
|
elif "_conspiracy" in doc: |
|
plain = plain.replace(">", "") |
|
elif "_stroke" in doc: |
|
plain = plain.replace("&", "&") |
|
elif "_bobby" in doc: |
|
plain = plain.replace("&", "&") |
|
elif "_introvert" in doc: |
|
plain = plain.replace("enjoy working out.","enjoy working out").replace("~~","") |
|
elif "_social" in doc: |
|
plain = plain.replace("the purpose","those purpose").replace("​","") |
|
no_space = re.sub(r"\s","",plain).replace("*","") |
|
no_space = re.sub(r'\[([^]]+)\]\([^)]+\)',r'\1',no_space) |
|
if no_space_docs[doc] == "": |
|
no_space_docs[doc] += re.sub(r"\s","",title).replace("*","") |
|
no_space_docs[doc] += no_space |
|
|
|
return no_space_docs |
|
|
|
|
|
def harvest_text(files): |
|
""" |
|
|
|
:param files: LDC files containing raw text data |
|
:return: Dictionary of document base names (e.g. wsj_0013) to string of non-whitespace characters in the document |
|
""" |
|
|
|
docs = {} |
|
|
|
for file_ in files: |
|
docname = os.path.basename(file_) |
|
if "." in docname: |
|
docname = docname.split(".")[0] |
|
try: |
|
text = io.open(file_,encoding="utf8").read() |
|
except: |
|
text = io.open(file_,encoding="Latin1").read() |
|
text = text.replace(".START","") |
|
text = re.sub(r'\s','', text) |
|
docs[docname] = text |
|
|
|
return docs |
|
|
|
|
|
def get_proxy_data(): |
|
import requests |
|
out_posts = {} |
|
tab_delim = requests.get("https://corpling.uis.georgetown.edu/gum/fetch_text_proxy.py").text |
|
for line in tab_delim.split("\n"): |
|
if "\t" in line: |
|
post, text = line.split("\t") |
|
out_posts[post] = text |
|
return out_posts |
|
|
|
|
|
def restore_docs(text_dict,dep_files=[],rel_files=[],tok_files=[]): |
|
def restore_range(range_string, underscored, tid_dict): |
|
output = [] |
|
tok_ids = [] |
|
range_strings = range_string.split(",") |
|
for r in range_strings: |
|
if "-" in r: |
|
s, e = r.split("-") |
|
tok_ids += list(range(int(s),int(e)+1)) |
|
else: |
|
tok_ids.append(int(r)) |
|
|
|
for tok in underscored.split(): |
|
if tok == "<*>": |
|
output.append(tok) |
|
else: |
|
tid = tok_ids.pop(0) |
|
output.append(tid_dict[tid]) |
|
return " ".join(output) |
|
|
|
|
|
skiplen = 0 |
|
token_dict = {} |
|
tid2string = defaultdict(dict) |
|
for file_ in dep_files + tok_files + rel_files: |
|
lines = io.open(file_,encoding="utf8").readlines() |
|
underscore_len = 0 |
|
doc_len = 0 |
|
if file_.endswith(".rels") or file_ in rel_files: |
|
output = [] |
|
violation_rows = [] |
|
for l, line in enumerate(lines): |
|
line = line.strip() |
|
if l > 0 and "\t" in line: |
|
fields = line.split("\t") |
|
docname = fields[0] |
|
text = text_dict[docname] |
|
if "GUM_" in docname and "reddit" not in docname: |
|
output.append(line) |
|
continue |
|
doc, unit1_toks, unit2_toks, unit1_txt, unit2_txt, s1_toks, s2_toks, unit1_sent, unit2_sent, direction, orig_label, label = line.split("\t") |
|
underscore_len += unit1_txt.count("_") + unit2_txt.count("_") + unit1_sent.count("_") + unit2_sent.count("_") |
|
if underscore_len == 0: |
|
continue |
|
print('underscore_len==0') |
|
|
|
|
|
unit1_txt = restore_range(unit1_toks, unit1_txt, tid2string[docname]) |
|
unit2_txt = restore_range(unit2_toks, unit2_txt, tid2string[docname]) |
|
unit1_sent = restore_range(s1_toks, unit1_sent, tid2string[docname]) |
|
unit2_sent = restore_range(s2_toks, unit2_sent, tid2string[docname]) |
|
plain = unit1_txt + unit2_txt + unit1_sent + unit2_sent |
|
plain = plain.replace("<*>","").replace(" ","") |
|
doc_len += len(plain) |
|
fields = doc, unit1_toks, unit2_toks, unit1_txt, unit2_txt, s1_toks, s2_toks, unit1_sent, unit2_sent, direction, orig_label, label |
|
line = "\t".join(fields) |
|
if doc_len != underscore_len and len(violation_rows) == 0: |
|
violation_rows.append(str(l) + ": " + line) |
|
output.append(line) |
|
|
|
else: |
|
tokfile = True if ".tok" in file_ else False |
|
output = [] |
|
parse_text = "" |
|
docname = "" |
|
for line in lines: |
|
line = line.strip() |
|
if "# newdoc_id " in line: |
|
tid = 0 |
|
if parse_text !="": |
|
if not tokfile: |
|
token_dict[docname] = parse_text |
|
parse_text = "" |
|
docname = re.search(r'# newdoc_id ?= ?([^\s]+)',line).group(1) |
|
if "GUM" in docname and "reddit" not in docname: |
|
output.append(line) |
|
continue |
|
if docname not in text_dict: |
|
raise IOError("! Text for document name " + docname + " not found.\n Please check that your LDC data contains the file for this document.\n") |
|
if ".tok" in file_: |
|
text = token_dict[docname] |
|
else: |
|
text = text_dict[docname] |
|
doc_len = len(text) |
|
underscore_len = 0 |
|
|
|
if "GUM" in docname and "reddit" not in docname: |
|
output.append(line) |
|
continue |
|
|
|
if line.startswith("# text"): |
|
m = re.match(r'(# ?text ?= ?)(.+)',line) |
|
if m is not None: |
|
i = 0 |
|
sent_text = "" |
|
for char in m.group(2).strip(): |
|
if char != " ": |
|
sent_text += text[i] |
|
i+=1 |
|
else: |
|
sent_text += " " |
|
line = m.group(1) + sent_text |
|
output.append(line) |
|
elif "\t" in line: |
|
fields = line.split("\t") |
|
if skiplen < 1: |
|
underscore_len += len(fields[1]) |
|
fields[1] = text[:len(fields[1])] |
|
if not "-" in fields[0] and not "." in fields[0]: |
|
parse_text += fields[1] |
|
tid += 1 |
|
tid2string[docname][tid] = fields[1] |
|
if not tokfile: |
|
if fields[2] == '_' and not "-" in fields[0] and not "." in fields[0]: |
|
fields[2] = fields[1] |
|
elif fields[2] == "*LOWER*": |
|
fields[2] = fields[1].lower() |
|
if skiplen < 1: |
|
text = text[len(fields[1]):] |
|
else: |
|
skiplen -=1 |
|
output.append("\t".join(fields)) |
|
if "-" in fields[0]: |
|
start, end = fields[0].split("-") |
|
start = int(start) |
|
end = int(end) |
|
skiplen = end - start + 1 |
|
else: |
|
output.append(line) |
|
|
|
if not doc_len == underscore_len: |
|
if ".rels" in file_: |
|
sys.stderr.write( |
|
"\n! Tried to restore file " + os.path.basename(file_) + " but source text has different length than tokens in shared task file:\n" + \ |
|
" Source text in data/: " + str(doc_len) + " non-whitespace characters\n" + \ |
|
" Token underscores in " + file_ + ": " + str(underscore_len) + " non-whitespace characters\n" + \ |
|
" Violation row: " + violation_rows[0]) |
|
else: |
|
sys.stderr.write("\n! Tried to restore document " + docname + " but source text has different length than tokens in shared task file:\n" + \ |
|
" Source text in data/: " + str(doc_len) + " non-whitespace characters\n" + \ |
|
" Token underscores in " + file_+": " + str(underscore_len) + " non-whitespace characters\n") |
|
with io.open("debug.txt",'w',encoding="utf8") as f: |
|
f.write(text_dict[docname]) |
|
f.write("\n\n\n") |
|
f.write(parse_text) |
|
sys.exit(0) |
|
|
|
if not tokfile and parse_text != "": |
|
token_dict[docname] = parse_text |
|
|
|
with io.open(file_, 'w', encoding='utf8', newline="\n") as fout: |
|
fout.write("\n".join(output) + "\n") |
|
|
|
sys.stderr.write("o Restored text in " + str(len(dep_files)) + " .conllu files, " + str(len(tok_files)) + |
|
" .tok files and "+ str(len(rel_files)) + " .rels files\n") |
|
|
|
def run(corpus="all", rel_files=[], dep_files=[], tok_files=[], |
|
rstdt_path=None, pdtb_path=None, cdtb_path=None, tdb_path=None): |
|
|
|
opts = EDict(corpus=corpus, |
|
rel_files=rel_files, |
|
dep_files=dep_files, |
|
tok_files=tok_files) |
|
todo = {k: v for k, v in opts.items() if 'files' in k} |
|
|
|
if opts.corpus == "rstdt" or opts.corpus == "all": |
|
if rstdt_path is None: |
|
raise ValueError("rstdt_path is required for corpus rstdt") |
|
if not os.path.isdir(rstdt_path): |
|
sys.stderr.write("Can't find directory at: " + rstdt_path + "\n") |
|
sys.exit(0) |
|
files = glob(os.sep.join([rstdt_path, "RSTtrees-WSJ-main-1.0", "TRAINING", "*.edus"])) + \ |
|
glob(os.sep.join([rstdt_path, "RSTtrees-WSJ-main-1.0", "TEST", "*.edus"])) |
|
docs2text = harvest_text(files) |
|
restore_docs(docs2text, **todo) |
|
|
|
if opts.corpus == "pdtb" or opts.corpus == "all": |
|
if pdtb_path is None: |
|
raise ValueError("pdtb_path is required for corpus pdtb") |
|
if not os.path.isdir(pdtb_path): |
|
sys.stderr.write("Can't find directory at: " + pdtb_path + "\n") |
|
sys.exit(0) |
|
files = [] |
|
for i in range(0, 25): |
|
dir_name = str(i) if i > 9 else "0" + str(i) |
|
files += glob(os.sep.join([pdtb_path, dir_name, "wsj_*"])) |
|
docs2text = harvest_text(files) |
|
restore_docs(docs2text, **todo) |
|
|
|
if opts.corpus == "cdtb" or opts.corpus == "all": |
|
if cdtb_path is None: |
|
raise ValueError("cdtb_path is required for corpus cdtb") |
|
if not os.path.isdir(cdtb_path): |
|
sys.stderr.write("Can't find directory at: " + cdtb_path + "\n") |
|
sys.exit(0) |
|
files = glob(os.sep.join([cdtb_path, "*.raw"])) |
|
docs2text = harvest_text(files) |
|
restore_docs(docs2text, **todo) |
|
|
|
if opts.corpus == "tdb" or opts.corpus == "all": |
|
if tdb_path is None: |
|
raise ValueError("tdb_path is required for corpus tdb") |
|
if not os.path.isdir(tdb_path): |
|
sys.stderr.write("Can't find directory at: " + tdb_path + "\n") |
|
sys.exit(0) |
|
files = glob(os.sep.join([tdb_path, "*.txt"])) |
|
docs2text = harvest_text(files) |
|
restore_docs(docs2text, **todo) |
|
|
|
if opts.corpus == "gum" or opts.corpus == "all": |
|
print("Retrieving reddit data by proxy...") |
|
data = get_proxy_data() |
|
docs2text = get_no_space_strings(data) |
|
restore_docs(docs2text, **todo) |