Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
Libraries:
Datasets
pandas
License:
sileod commited on
Commit
5b23881
1 Parent(s): 459014c

Upload process_underscores.py

Browse files
Files changed (1) hide show
  1. process_underscores.py +600 -0
process_underscores.py ADDED
@@ -0,0 +1,600 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ process_underscores.py
3
+
4
+ Script to handle licensed data for which underlying text cannot be posted online (e.g. LDC data).
5
+ Users need a copy of the LDC distribution of an underlying resource to restore text in some of the corpora.
6
+
7
+
8
+ """
9
+
10
+ __author__ = "Amir Zeldes"
11
+ __license__ = "Apache 2.0"
12
+ __version__ = "2.0.0"
13
+
14
+ import io, re, os, sys
15
+ from glob import glob
16
+ from collections import defaultdict
17
+ from argparse import ArgumentParser
18
+
19
+ PY3 = sys.version_info[0] == 3
20
+ if not PY3:
21
+ input = raw_input
22
+
23
+
24
+ gum_docs = {
25
+ "GUM_reddit_macroeconomics": [
26
+ {"year": "2017", "month": "09", "id": "6zm74h", "type": "post","source":"undef"},
27
+ {"year": "2017", "month": "09", "id": "dmwwqlt", "type":"comment","source":"undef"}
28
+ ],
29
+ "GUM_reddit_stroke": [
30
+ {"year": "2017", "month": "08", "id": "6ws3eh", "type": "post","source":"undef"},
31
+ {"year": "2017", "month": "08", "id": "dmaei1x", "type":"comment","source":"undef"},
32
+ {"year": "2017", "month": "08", "id": "dmaiwsm", "type":"comment","source":"undef"},
33
+ {"year": "2017", "month": "09", "id": "dmkx8bk", "type":"comment","source":"undef"},
34
+ {"year": "2017", "month": "09", "id": "dmm1327", "type":"comment","source":"undef"},
35
+ {"year": "2017", "month": "08", "id": "dmaoodn", "type":"comment","source":"undef"}
36
+ ],
37
+ "GUM_reddit_polygraph": [
38
+ {"year": "2014", "month": "12", "id": "2q6qnv", "type": "post","source":"undef"}
39
+ ],
40
+ "GUM_reddit_ring": [
41
+ {"year": "2016", "month": "09", "id": "5570x1", "type": "post","source":"undef"},
42
+ {"year": "2016", "month": "09", "id": "d885ma0", "type":"comment","source":"undef"},
43
+ {"year": "2016", "month": "09", "id": "d8880w7", "type":"comment","source":"undef"},
44
+ {"year": "2016", "month": "09", "id": "d88u7dg", "type":"comment","source":"undef"},
45
+ {"year": "2016", "month": "09", "id": "d88unu3", "type":"comment","source":"undef"},
46
+ {"year": "2016", "month": "09", "id": "d88v0sz", "type":"comment","source":"undef"},
47
+ {"year": "2016", "month": "09", "id": "d88xaqu", "type":"comment","source":"undef"},
48
+ {"year": "2016", "month": "10", "id": "d893mj9", "type":"comment","source":"undef"},
49
+ {"year": "2016", "month": "09", "id": "d88s4bb", "type":"comment","source":"undef"},
50
+ {"year": "2016", "month": "10", "id": "d88zt6x", "type":"comment","source":"undef"}
51
+ ],
52
+ "GUM_reddit_space": [
53
+ {"year": "2016", "month": "08", "id": "50hx5c", "type": "post","source":"undef"},
54
+ {"year": "2016", "month": "08", "id": "d7471k5", "type":"comment","source":"undef"},
55
+ {"year": "2016", "month": "08", "id": "d74i5ka", "type":"comment","source":"undef"},
56
+ {"year": "2016", "month": "08", "id": "d74ppi0", "type":"comment","source":"undef"}
57
+ ],
58
+ "GUM_reddit_superman": [
59
+ #{"year": "2017", "month": "04", "id": "68e0u3", "type": "post", "title_only": True}, # Post title not included in this document
60
+ {"year": "2017", "month": "05", "id": "dgys1z8", "type":"comment","source":"undef"}
61
+ ],
62
+ "GUM_reddit_bobby": [
63
+ {"year":"2018","month":"06","id":"8ph56q","type": "post","source":"undef"},
64
+ {"year":"2018","month":"06","id":"e0b8zz4","type":"comment","source":"undef"},
65
+ {"year":"2018","month":"06","id":"e0dwqlg","type":"comment","source":"undef"},
66
+ {"year":"2018","month":"06","id":"e15pcqu","type":"comment","source":"undef"},
67
+ {"year":"2018","month":"06","id":"e0dz1mp","type":"comment","source":"undef"},
68
+ {"year":"2018","month":"06","id":"e1uuo9e","type":"comment","source":"undef"},
69
+ {"year":"2018","month":"06","id":"e0brc9w","type":"comment","source":"undef"},
70
+ {"year":"2018","month":"06","id":"e0bz951","type":"comment","source":"undef"}
71
+ ],
72
+ "GUM_reddit_escape": [
73
+ {"year":"2017","month":"05","id":"69r98j","type": "post","source":"undef"},
74
+ {"year":"2017","month":"05","id":"dh96n8v","type":"comment","source":"undef"},
75
+ {"year":"2017","month":"05","id":"dh9enpe","type":"comment","source":"undef"},
76
+ {"year":"2017","month":"05","id":"dht8oyn","type":"comment","source":"undef"},
77
+ {"year":"2017","month":"05","id":"dhn0hoe","type":"comment","source":"undef"},
78
+ {"year":"2017","month":"07","id":"dk9ted1","type":"comment","source":"undef"},
79
+ {"year":"2017","month":"05","id":"dh98kcg","type":"comment","source":"undef"},
80
+ {"year":"2017","month":"05","id":"dh9zxej","type":"comment","source":"undef"},
81
+ {"year":"2017","month":"05","id":"di9x7j9","type":"comment","source":"undef"},
82
+ {"year":"2017","month":"05","id":"di9xsrt","type":"comment","source":"undef"},
83
+ {"year":"2017","month":"06","id":"din85zf","type":"comment","source":"undef"},
84
+ {"year":"2017","month":"06","id":"dinab0w","type":"comment","source":"undef"},
85
+ {"year":"2017","month":"06","id":"dinaggd","type":"comment","source":"undef"},
86
+ {"year":"2017","month":"06","id":"dinbyb9","type":"comment","source":"undef"},
87
+ {"year":"2017","month":"06","id":"dj65sp1","type":"comment","source":"undef"},
88
+ {"year":"2017","month":"06","id":"dizdd8a","type":"comment","source":"undef"},
89
+ {"year":"2017","month":"07","id":"dk78qw8","type":"comment","source":"undef"},
90
+ {"year":"2017","month":"08","id":"dm0gqc7","type":"comment","source":"undef"},
91
+ {"year":"2017","month":"10","id":"domd1r0","type":"comment","source":"undef"},
92
+ {"year":"2017","month":"05","id":"dh9irie","type":"comment","source":"undef"},
93
+ {"year":"2017","month":"05","id":"dh9iw36","type":"comment","source":"undef"},
94
+ {"year":"2017","month":"06","id":"djlcwu5","type":"comment","source":"undef"},
95
+ {"year":"2017","month":"06","id":"dlzcxpy","type":"comment","source":"undef"},
96
+ {"year":"2017","month":"05","id":"dhabstb","type":"comment","source":"undef"},
97
+ {"year":"2017","month":"05","id":"dhbr3m6","type":"comment","source":"undef"},
98
+ {"year":"2017","month":"06","id":"diz97qy","type":"comment"}
99
+ ],
100
+ "GUM_reddit_gender": [
101
+ {"year":"2018","month":"09","id":"9e5urs","type":"post","source":"bigquery"},
102
+ {"year":"2018","month":"09","id":"e5mg3s7","type":"comment","source":"undef"},
103
+ {"year":"2018","month":"09","id":"e5mkpok","type":"comment","source":"bigquery"},
104
+ {"year":"2018","month":"09","id":"e5nxbmb","type":"comment","source":"bigquery"},
105
+ {"year":"2018","month":"09","id":"e5nzg9j","type":"comment","source":"undef"},
106
+ {"year":"2018","month":"09","id":"e5mh94v","type":"comment","source":"undef"},
107
+ {"year":"2018","month":"09","id":"e5mmenp","type":"comment","source":"undef"},
108
+ {"year":"2018","month":"09","id":"e5ms5u3","type":"comment","source":"undef"}
109
+ ],
110
+ "GUM_reddit_monsters":[
111
+ {"year":"2018","month":"09","id":"9eci2u","type":"post","source":"undef"},
112
+ {"year":"2018","month":"09","id":"e5ox2jr","type":"comment","source":"undef"},
113
+ {"year":"2018","month":"09","id":"e5p3gtl","type":"comment","source":"undef"},
114
+ {"year":"2018","month":"09","id":"e5pnfro","type":"comment","source":"undef"},
115
+ {"year":"2018","month":"09","id":"e5q08o4","type":"comment","source":"undef"},
116
+ {"year":"2018","month":"09","id":"e5pney1","type":"comment","source":"undef"},
117
+ ],
118
+ "GUM_reddit_pandas":[
119
+ {"year":"2018","month":"09","id":"9e3s9h","type":"post","source":"undef"},
120
+ {"year":"2018","month":"09","id":"e5lwy6n","type":"comment","source":"undef"},
121
+ {"year":"2018","month":"09","id":"e5m397o","type":"comment","source":"undef"},
122
+ {"year":"2018","month":"09","id":"e5m3xgb","type":"comment","source":"undef"},
123
+ {"year":"2018","month":"09","id":"e5m3z2e","type":"comment","source":"undef"},
124
+ {"year":"2018","month":"09","id":"e5lwbbt","type":"comment","source":"undef"},
125
+ {"year":"2018","month":"09","id":"e5m38sr","type":"comment","source":"undef"},
126
+ {"year":"2018","month":"09","id":"e5m42cu","type":"comment","source":"undef"},
127
+ {"year":"2018","month":"09","id":"e5lvlxm","type":"comment","source":"undef"},
128
+ {"year":"2018","month":"09","id":"e5lvqay","type":"comment","source":"undef"},
129
+ {"year":"2018","month":"09","id":"e5lw5t6","type":"comment","source":"undef"}, # Blowhole
130
+ {"year":"2018","month":"09","id":"e5lwz31","type":"comment","source":"undef"},
131
+ {"year":"2018","month":"09","id":"e5lxi0s","type":"comment","source":"undef"},
132
+ {"year":"2018","month":"09","id":"e5lwxqq","type":"comment","source":"undef"},
133
+ {"year":"2018","month":"09","id":"e5lzv1b","type":"comment","source":"undef"},
134
+ {"year":"2018","month":"09","id":"e5m48ag","type":"comment","source":"undef"},
135
+ {"year":"2018","month":"09","id":"e5m1yqe","type":"comment","source":"undef"},
136
+ {"year":"2018","month":"09","id":"e5lx0sw","type":"comment","source":"undef"},
137
+ {"year":"2018","month":"09","id":"e5m2n80","type":"comment","source":"undef"},
138
+ {"year":"2018","month":"09","id":"e5m2wrh","type":"comment","source":"undef"},
139
+ {"year":"2018","month":"09","id":"e5m3blb","type":"comment","source":"undef"},
140
+ {"year":"2018","month":"09","id":"e5lvxoc","type":"comment","source":"undef"},
141
+ {"year":"2018","month":"09","id":"e5m1abg","type":"comment","source":"undef"},
142
+ {"year":"2018","month":"09","id":"e5m1w5i","type":"comment","source":"undef"},
143
+ {"year":"2018","month":"09","id":"e5m3pdi","type":"comment","source":"undef"},
144
+ {"year":"2018","month":"09","id":"e5m3ruf","type":"comment","source":"undef"},
145
+ {"year":"2018","month":"09","id":"e5m4yu2","type":"comment","source":"undef"},
146
+ {"year":"2018","month":"09","id":"e5m5bcb","type":"comment","source":"undef"}
147
+ ],
148
+ "GUM_reddit_steak": [
149
+ {"year":"2015","month":"08","id":"3im341","type":"post","source":"undef"}
150
+ ],
151
+ "GUM_reddit_card": [
152
+ {"year":"2019","month":"08","id":"cmqrwo","type":"post","source":"undef"},
153
+ {"year":"2019","month":"08","id":"ew3zrqg","type":"comment","source":"undef"},
154
+ {"year":"2019","month":"08","id":"ew43d2c","type":"comment","source":"undef"},
155
+ {"year":"2019","month":"08","id":"ew43oks","type":"comment","source":"undef"},
156
+ {"year":"2019","month":"08","id":"ew43ymc","type":"comment","source":"undef"},
157
+ {"year":"2019","month":"08","id":"ew46h1p","type":"comment","source":"undef"},
158
+ {"year":"2019","month":"08","id":"ew46oly","type":"comment","source":"undef"},
159
+ {"year":"2019","month":"08","id":"ew46wq7","type":"comment","source":"undef"},
160
+ {"year":"2019","month":"08","id":"ew470zc","type":"comment","source":"undef"}
161
+ ],
162
+ "GUM_reddit_callout": [
163
+ {"year":"2019","month":"09","id":"d1eg3u","type":"post","source":"undef"},
164
+ {"year":"2019","month":"09","id":"ezkucpg","type":"comment","source":"undef"},
165
+ {"year":"2019","month":"09","id":"ezkv0cc","type":"comment","source":"undef"},
166
+ {"year":"2019","month":"09","id":"ezkwbx9","type":"comment","source":"undef"},
167
+ {"year":"2019","month":"09","id":"ezlh2o6","type":"comment","source":"undef"},
168
+ {"year":"2019","month":"09","id":"ezlkajf","type":"comment","source":"undef"},
169
+ {"year":"2019","month":"09","id":"ezlnco2","type":"comment","source":"undef"},
170
+ {"year":"2019","month":"09","id":"ezo20yy","type":"comment","source":"undef"},
171
+ {"year":"2019","month":"09","id":"ezkwcvh","type":"comment","source":"undef"},
172
+ {"year":"2019","month":"09","id":"ezl07dm","type":"comment","source":"undef"},
173
+ {"year":"2019","month":"09","id":"ezmajm7","type":"comment","source":"undef"},
174
+ {"year":"2019","month":"09","id":"ezl1wz3","type":"comment","source":"undef"},
175
+ ],
176
+ "GUM_reddit_conspiracy": [
177
+ {"year":"2019","month":"02","id":"aumhwo","type":"post","source":"undef"},
178
+ {"year":"2019","month":"02","id":"eh9rt0n","type":"comment","source":"undef"},
179
+ {"year":"2019","month":"02","id":"eh9tvyw","type":"comment","source":"undef"},
180
+ {"year":"2019","month":"02","id":"ehc0l2q","type":"comment","source":"undef"},
181
+ {"year":"2019","month":"02","id":"ehclwtv","type":"comment","source":"undef"},
182
+ {"year":"2019","month":"02","id":"eh9jo5x","type":"comment","source":"undef"},
183
+ {"year":"2019","month":"02","id":"ehr2665","type":"comment","source":"undef"},
184
+ {"year":"2019","month":"02","id":"eha3c1q","type":"comment","source":"undef"},
185
+ {"year":"2019","month":"02","id":"eha5jlq","type":"comment","source":"undef"},
186
+ ],
187
+ "GUM_reddit_introverts": [
188
+ {"year":"2019","month":"06","id":"by820m","type":"post","source":"undef","title_double": True}, # Possible title was repeated by annotator
189
+ {"year":"2019","month":"06","id":"eqeik8m","type":"comment","source":"undef"},
190
+ {"year":"2019","month":"06","id":"eqfgaeu","type":"comment","source":"undef"},
191
+ {"year":"2019","month":"06","id":"eqfplpg","type":"comment","source":"undef"},
192
+ {"year":"2019","month":"06","id":"eqg6a5u","type":"comment","source":"undef"},
193
+ {"year":"2019","month":"06","id":"eqh6j29","type":"comment","source":"undef"},
194
+ {"year":"2019","month":"06","id":"eqhjtwr","type":"comment","source":"undef"},
195
+ {"year":"2019","month":"06","id":"eqi2jl3","type":"comment","source":"undef"},
196
+ {"year":"2019","month":"06","id":"eqii2kf","type":"comment","source":"undef"},
197
+ {"year":"2019","month":"06","id":"eqhlj8j","type":"comment","source":"undef"},
198
+
199
+ ],
200
+ "GUM_reddit_racial": [
201
+ {"year":"2019","month":"09","id":"d1urjk","type":"post","source":"undef"},
202
+ {"year":"2019","month":"09","id":"ezq9y6w","type":"comment","source":"bigquery"},
203
+ {"year":"2019","month":"09","id":"ezqpqmm","type":"comment","source":"undef"},
204
+ {"year":"2019","month":"09","id":"ezq8xs7","type":"comment","source":"undef"},
205
+ {"year":"2019","month":"09","id":"ezr55wk","type":"comment","source":"undef"},
206
+ ],
207
+ "GUM_reddit_social": [
208
+ {"year":"2019","month":"09","id":"d1qy3g","type":"post","source":"undef"},
209
+ {"year":"2019","month":"09","id":"ezpb3jg","type":"comment","source":"undef"},
210
+ {"year":"2019","month":"09","id":"ezpdmy3","type":"comment","source":"undef"},
211
+ {"year":"2019","month":"09","id":"ezpjor8","type":"comment","source":"bigquery"},
212
+ {"year":"2019","month":"09","id":"ezpiozm","type":"comment","source":"undef"},
213
+ {"year":"2019","month":"09","id":"ezpc1ps","type":"comment","source":"undef"},
214
+ {"year":"2019","month":"09","id":"ezp9fbh","type":"comment","source":"undef"},
215
+ {"year":"2019","month":"09","id":"ezqrumb","type":"comment","source":"undef"},
216
+ {"year":"2019","month":"09","id":"ezpe0e6","type":"comment","source":"undef"},
217
+ {"year":"2019","month":"09","id":"ezpf71f","type":"comment","source":"undef"},
218
+ {"year":"2019","month":"09","id":"ezt7qlf","type":"comment","source":"undef"},
219
+ {"year":"2019","month":"09","id":"ezpc4jj","type":"comment","source":"undef"},
220
+ {"year":"2019","month":"09","id":"ezpa2e4","type":"comment","source":"undef"},
221
+ {"year":"2019","month":"09","id":"ezpfzql","type":"comment","source":"undef"},
222
+ {"year":"2019","month":"09","id":"ezpi39v","type":"comment","source":"undef"},
223
+ ]
224
+ }
225
+
226
+ def underscore_files(filenames):
227
+ def underscore_rel_field(text):
228
+ blanked = []
229
+ text = text.replace("<*>","❤")
230
+ for c in text:
231
+ if c!="❤" and c!=" ":
232
+ blanked.append("_")
233
+ else:
234
+ blanked.append(c)
235
+ return "".join(blanked).replace("❤","<*>")
236
+
237
+ for f_path in filenames:
238
+ skiplen = 0
239
+ with io.open(f_path, 'r', encoding='utf8') as fin:
240
+ lines = fin.readlines()
241
+
242
+ with io.open(f_path, 'w', encoding='utf8', newline="\n") as fout:
243
+ output = []
244
+ if f_path.endswith(".rels"):
245
+ for l, line in enumerate(lines):
246
+ line = line.strip()
247
+ if "\t" in line and l > 0:
248
+ doc, unit1_toks, unit2_toks, unit1_txt, unit2_txt, s1_toks, s2_toks, unit1_sent, unit2_sent, direction, orig_label, label = line.split("\t")
249
+ if "GUM" in doc and "reddit" not in doc:
250
+ output.append(line)
251
+ continue
252
+ unit1_txt = underscore_rel_field(unit1_txt)
253
+ unit2_txt = underscore_rel_field(unit2_txt)
254
+ unit1_sent = underscore_rel_field(unit1_sent)
255
+ unit2_sent = underscore_rel_field(unit2_sent)
256
+ fields = doc, unit1_toks, unit2_toks, unit1_txt, unit2_txt, s1_toks, s2_toks, unit1_sent, unit2_sent, direction, orig_label, label
257
+ line = "\t".join(fields)
258
+ output.append(line)
259
+ else:
260
+ doc = ""
261
+ for line in lines:
262
+ line = line.strip()
263
+ if line.startswith("# newdoc_id"):
264
+ doc = line.split("=",maxsplit=1)[1].strip()
265
+ if "GUM" in doc and "reddit" not in doc:
266
+ output.append(line)
267
+ continue
268
+ if line.startswith("# text"):
269
+ m = re.match(r'(# text ?= ?)(.+)',line)
270
+ if m is not None:
271
+ line = m.group(1) + re.sub(r'[^\s]','_',m.group(2))
272
+ output.append(line)
273
+ elif "\t" in line:
274
+ fields = line.split("\t")
275
+ tok_col, lemma_col = fields[1:3]
276
+ if lemma_col == tok_col: # Delete lemma if identical to token
277
+ fields[2] = '_'
278
+ elif tok_col.lower() == lemma_col:
279
+ fields[2] = "*LOWER*"
280
+ if skiplen < 1:
281
+ fields[1] = len(tok_col)*'_'
282
+ else:
283
+ skiplen -=1
284
+ output.append("\t".join(fields))
285
+ if "-" in fields[0]: # Multitoken
286
+ start, end = fields[0].split("-")
287
+ start = int(start)
288
+ end = int(end)
289
+ skiplen = end - start + 1
290
+ else:
291
+ output.append(line)
292
+ fout.write('\n'.join(output) + "\n")
293
+
294
+
295
+ def get_no_space_strings(cache_dict):
296
+ import ast
297
+
298
+ no_space_docs = defaultdict(str)
299
+
300
+ for doc in gum_docs:
301
+ for post in gum_docs[doc]:
302
+ if post["id"] in cache_dict:
303
+ json_result = cache_dict[post["id"]]
304
+ parsed = ast.literal_eval(json_result)[0]
305
+ if post["type"]=="post":
306
+ plain = parsed["selftext"]
307
+ title = parsed["title"]
308
+ if "title_only" in post:
309
+ if post["title_only"]:
310
+ plain = ""
311
+ if "title_double" in post:
312
+ title = title + " " + title
313
+ else:
314
+ plain = parsed["body"]
315
+ title = ""
316
+ if "_space" in doc:
317
+ plain = plain.replace("&gt;","") # GUM_reddit_space has formatting &gt; to indicate indented block quotes
318
+ elif "_gender" in doc:
319
+ plain = plain.replace("- The vast","The vast")
320
+ plain = plain.replace("- Society already accommodates","Society already accommodates")
321
+ plain = plain.replace("- Society recognizes disabilities","Society recognizes disabilities")
322
+ plain = plain.replace("- It’s a waste of time","It’s a waste of time")
323
+ plain = plain.replace("PB&amp;J","PB&J")
324
+ elif "_monsters" in doc:
325
+ plain = plain.replace("1. He refers to","a. He refers to")
326
+ plain = plain.replace("2. Using these","b. Using these")
327
+ plain = plain.replace("3. And he has","c. And he has")
328
+ plain = plain.replace("&#x200B; &#x200B;","")
329
+ plain = re.sub(r' [0-9]+\. ',' ',plain)
330
+ elif "_ring" in doc:
331
+ plain = plain.replace("&gt;",">")
332
+ elif "_escape" in doc:
333
+ plain = plain.replace("*1 year later*","1 year later")
334
+ elif "_racial" in doc:
335
+ plain = plain.replace("> ","")
336
+ elif "_callout" in doc:
337
+ plain = plain.replace("_it","it").replace("well?_","well?").replace(">certain","certain")
338
+ elif "_conspiracy" in doc:
339
+ plain = plain.replace(">", "")
340
+ elif "_stroke" in doc:
341
+ plain = plain.replace("&amp;", "&")
342
+ elif "_bobby" in doc:
343
+ plain = plain.replace("&amp;", "&")
344
+ elif "_introvert" in doc:
345
+ plain = plain.replace("enjoy working out.","enjoy working out").replace("~~","")
346
+ elif "_social" in doc:
347
+ plain = plain.replace("the purpose","those purpose").replace("&#x200B;","")
348
+ no_space = re.sub(r"\s","",plain).replace("*","")
349
+ no_space = re.sub(r'\[([^]]+)\]\([^)]+\)',r'\1',no_space) # Remove Wiki style links: [text](URL)
350
+ if no_space_docs[doc] == "":
351
+ no_space_docs[doc] += re.sub(r"\s","",title).replace("*","")
352
+ no_space_docs[doc] += no_space
353
+
354
+ return no_space_docs
355
+
356
+
357
+ def harvest_text(files):
358
+ """
359
+
360
+ :param files: LDC files containing raw text data
361
+ :return: Dictionary of document base names (e.g. wsj_0013) to string of non-whitespace characters in the document
362
+ """
363
+
364
+ docs = {}
365
+
366
+ for file_ in files:
367
+ docname = os.path.basename(file_)
368
+ if "." in docname:
369
+ docname = docname.split(".")[0]
370
+ try:
371
+ text = io.open(file_,encoding="utf8").read()
372
+ except:
373
+ text = io.open(file_,encoding="Latin1").read() # e.g. wsj_0142
374
+ text = text.replace(".START","") # Remove PDTB .START codes
375
+ text = re.sub(r'\s','', text) # Remove all whitespace
376
+ docs[docname] = text
377
+
378
+ return docs
379
+
380
+
381
+ def get_proxy_data():
382
+ import requests
383
+ out_posts = {}
384
+ tab_delim = requests.get("https://corpling.uis.georgetown.edu/gum/fetch_text_proxy.py").text
385
+ for line in tab_delim.split("\n"):
386
+ if "\t" in line:
387
+ post, text = line.split("\t")
388
+ out_posts[post] = text
389
+ return out_posts
390
+
391
+
392
+ def restore_docs(text_dict,dep_files=[],rel_files=[],tok_files=[]):
393
+ def restore_range(range_string, underscored, tid_dict):
394
+ output = []
395
+ tok_ids = []
396
+ range_strings = range_string.split(",")
397
+ for r in range_strings:
398
+ if "-" in r:
399
+ s, e = r.split("-")
400
+ tok_ids += list(range(int(s),int(e)+1))
401
+ else:
402
+ tok_ids.append(int(r))
403
+
404
+ for tok in underscored.split():
405
+ if tok == "<*>":
406
+ output.append(tok)
407
+ else:
408
+ tid = tok_ids.pop(0)
409
+ output.append(tid_dict[tid])
410
+ return " ".join(output)
411
+
412
+
413
+ skiplen = 0
414
+ token_dict = {}
415
+ tid2string = defaultdict(dict)
416
+ for file_ in dep_files + tok_files + rel_files:
417
+ lines = io.open(file_,encoding="utf8").readlines()
418
+ underscore_len = 0 # Must match doc_len at end of file processing
419
+ doc_len = 0
420
+ if file_.endswith(".rels") or file_ in rel_files:
421
+ output = []
422
+ violation_rows = []
423
+ for l, line in enumerate(lines):
424
+ line = line.strip()
425
+ if l > 0 and "\t" in line:
426
+ fields = line.split("\t")
427
+ docname = fields[0]
428
+ text = text_dict[docname]
429
+ if "GUM_" in docname and "reddit" not in docname: # Only Reddit documents need reconstruction in GUM
430
+ output.append(line)
431
+ continue
432
+ doc, unit1_toks, unit2_toks, unit1_txt, unit2_txt, s1_toks, s2_toks, unit1_sent, unit2_sent, direction, orig_label, label = line.split("\t")
433
+ underscore_len += unit1_txt.count("_") + unit2_txt.count("_") + unit1_sent.count("_") + unit2_sent.count("_")
434
+ if underscore_len == 0:
435
+ sys.stderr.write("! Non-underscored file detected - " + os.path.basename(file_) + "\n")
436
+ sys.exit(0)
437
+ unit1_txt = restore_range(unit1_toks, unit1_txt, tid2string[docname])
438
+ unit2_txt = restore_range(unit2_toks, unit2_txt, tid2string[docname])
439
+ unit1_sent = restore_range(s1_toks, unit1_sent, tid2string[docname])
440
+ unit2_sent = restore_range(s2_toks, unit2_sent, tid2string[docname])
441
+ plain = unit1_txt + unit2_txt + unit1_sent + unit2_sent
442
+ plain = plain.replace("<*>","").replace(" ","")
443
+ doc_len += len(plain)
444
+ fields = doc, unit1_toks, unit2_toks, unit1_txt, unit2_txt, s1_toks, s2_toks, unit1_sent, unit2_sent, direction, orig_label, label
445
+ line = "\t".join(fields)
446
+ if doc_len != underscore_len and len(violation_rows) == 0:
447
+ violation_rows.append(str(l) + ": " + line)
448
+ output.append(line)
449
+
450
+ else:
451
+ tokfile = True if ".tok" in file_ else False
452
+ output = []
453
+ parse_text = ""
454
+ docname = ""
455
+ for line in lines:
456
+ line = line.strip()
457
+ if "# newdoc_id " in line:
458
+ tid = 0
459
+ if parse_text !="":
460
+ if not tokfile:
461
+ token_dict[docname] = parse_text
462
+ parse_text = ""
463
+ docname = re.search(r'# newdoc_id ?= ?([^\s]+)',line).group(1)
464
+ if "GUM" in docname and "reddit" not in docname:
465
+ output.append(line)
466
+ continue
467
+ if docname not in text_dict:
468
+ raise IOError("! Text for document name " + docname + " not found.\n Please check that your LDC data contains the file for this document.\n")
469
+ if ".tok" in file_:
470
+ text = token_dict[docname]
471
+ else:
472
+ text = text_dict[docname]
473
+ doc_len = len(text)
474
+ underscore_len = 0
475
+
476
+ if "GUM" in docname and "reddit" not in docname:
477
+ output.append(line)
478
+ continue
479
+
480
+ if line.startswith("# text"):
481
+ m = re.match(r'(# ?text ?= ?)(.+)',line)
482
+ if m is not None:
483
+ i = 0
484
+ sent_text = ""
485
+ for char in m.group(2).strip():
486
+ if char != " ":
487
+ sent_text += text[i]
488
+ i+=1
489
+ else:
490
+ sent_text += " "
491
+ line = m.group(1) + sent_text
492
+ output.append(line)
493
+ elif "\t" in line:
494
+ fields = line.split("\t")
495
+ if skiplen < 1:
496
+ underscore_len += len(fields[1])
497
+ fields[1] = text[:len(fields[1])]
498
+ if not "-" in fields[0] and not "." in fields[0]:
499
+ parse_text += fields[1]
500
+ tid += 1
501
+ tid2string[docname][tid] = fields[1]
502
+ if not tokfile:
503
+ if fields[2] == '_' and not "-" in fields[0] and not "." in fields[0]:
504
+ fields[2] = fields[1]
505
+ elif fields[2] == "*LOWER*":
506
+ fields[2] = fields[1].lower()
507
+ if skiplen < 1:
508
+ text = text[len(fields[1]):]
509
+ else:
510
+ skiplen -=1
511
+ output.append("\t".join(fields))
512
+ if "-" in fields[0]: # Multitoken
513
+ start, end = fields[0].split("-")
514
+ start = int(start)
515
+ end = int(end)
516
+ skiplen = end - start + 1
517
+ else:
518
+ output.append(line)
519
+
520
+ if not doc_len == underscore_len:
521
+ if ".rels" in file_:
522
+ sys.stderr.write(
523
+ "\n! Tried to restore file " + os.path.basename(file_) + " but source text has different length than tokens in shared task file:\n" + \
524
+ " Source text in data/: " + str(doc_len) + " non-whitespace characters\n" + \
525
+ " Token underscores in " + file_ + ": " + str(underscore_len) + " non-whitespace characters\n" + \
526
+ " Violation row: " + violation_rows[0])
527
+ else:
528
+ sys.stderr.write("\n! Tried to restore document " + docname + " but source text has different length than tokens in shared task file:\n" + \
529
+ " Source text in data/: " + str(doc_len) + " non-whitespace characters\n" + \
530
+ " Token underscores in " + file_+": " + str(underscore_len) + " non-whitespace characters\n")
531
+ with io.open("debug.txt",'w',encoding="utf8") as f:
532
+ f.write(text_dict[docname])
533
+ f.write("\n\n\n")
534
+ f.write(parse_text)
535
+ sys.exit(0)
536
+
537
+ if not tokfile and parse_text != "":
538
+ token_dict[docname] = parse_text
539
+
540
+ with io.open(file_, 'w', encoding='utf8', newline="\n") as fout:
541
+ fout.write("\n".join(output) + "\n")
542
+
543
+ sys.stderr.write("o Restored text in " + str(len(dep_files)) + " .conllu files, " + str(len(tok_files)) +
544
+ " .tok files and "+ str(len(rel_files)) + " .rels files\n")
545
+
546
+
547
+ p = ArgumentParser()
548
+ p.add_argument("corpus",action="store",choices=["rstdt","pdtb","cdtb","tdb","gum","all"],default="all",help="Name of the corpus to process or 'all'")
549
+ p.add_argument("--rel_files", nargs='*', default=[])
550
+ p.add_argument("--dep_files", nargs='*', default=[])
551
+ p.add_argument("--tok_files", nargs='*', default=[])
552
+ opts = p.parse_args()
553
+
554
+
555
+ todo = {k:v for k,v in vars(opts).items() if 'files' in k}
556
+
557
+ # Prompt user for corpus folders
558
+ if opts.corpus == "rstdt" or opts.corpus == "all":
559
+ rstdt_path = input("Enter path for LDC RST-DT data/ folder:\n> ")
560
+ if not os.path.isdir(rstdt_path):
561
+ sys.stderr.write("Can't find directory at: " + rstdt_path + "\n")
562
+ sys.exit(0)
563
+ files = glob(os.sep.join([rstdt_path,"RSTtrees-WSJ-main-1.0","TRAINING","*.edus"])) + glob(os.sep.join([rstdt_path,"RSTtrees-WSJ-main-1.0","TEST","*.edus"]))
564
+ docs2text = harvest_text(files)
565
+ restore_docs(docs2text,**todo)
566
+ if opts.corpus == "pdtb" or opts.corpus == "all":
567
+ pdtb_path = input("Enter path for LDC Treebank 2 raw/wsj/ folder:\n> ")
568
+ if not os.path.isdir(pdtb_path):
569
+ sys.stderr.write("Can't find directory at: " + pdtb_path + "\n")
570
+ sys.exit(0)
571
+ files = []
572
+ for i in range(0,25):
573
+ dir_name = str(i) if i > 9 else "0" + str(i)
574
+ files += glob(os.sep.join([pdtb_path,dir_name,"wsj_*"]))
575
+ docs2text = harvest_text(files)
576
+ restore_docs(docs2text,**todo)
577
+ if opts.corpus == "cdtb" or opts.corpus == "all":
578
+ cdtb_path = input("Enter path for LDC Chinese Discourse Treebank 0.5 raw/ folder:\n> ")
579
+ if not os.path.isdir(cdtb_path):
580
+ sys.stderr.write("Can't find directory at: " + cdtb_path + "\n")
581
+ sys.exit(0)
582
+ files = glob(os.sep.join([cdtb_path,"*.raw"]))
583
+ docs2text = harvest_text(files)
584
+ restore_docs(docs2text,**todo)
585
+ if opts.corpus == "tdb" or opts.corpus == "all":
586
+ tdb_path = input("Enter path for Turkish Discourse Bank 1.0 raw/01/ folder:\n> ")
587
+ if not os.path.isdir(tdb_path):
588
+ sys.stderr.write("Can't find directory at: " + tdb_path + "\n")
589
+ sys.exit(0)
590
+ files = glob(os.sep.join([tdb_path,"*.txt"]))
591
+ docs2text = harvest_text(files)
592
+ restore_docs(docs2text,**todo)
593
+
594
+ if opts.corpus == "gum" or opts.corpus == "all":
595
+ print("Retrieving reddit data by proxy...")
596
+ data = get_proxy_data()
597
+ docs2text = get_no_space_strings(data)
598
+ restore_docs(docs2text,**todo)
599
+
600
+