yilunzhao commited on
Commit
f18473d
1 Parent(s): 6e8d3cc

Upload 4 files

Browse files
.gitattributes CHANGED
@@ -56,3 +56,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
59
+ multihiertt_data/dev.json filter=lfs diff=lfs merge=lfs -text
60
+ multihiertt_data/test.json filter=lfs diff=lfs merge=lfs -text
61
+ multihiertt_data/train.json filter=lfs diff=lfs merge=lfs -text
multihiertt_data/dev.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0100d9d6832901d0c8ffa3141e548e51c98ecf3ac144407f90d8182ab3bcb0a7
3
+ size 36118558
multihiertt_data/table_description_generation.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+ from bs4 import BeautifulSoup
4
+ import os
5
+ import re
6
+ from lxml.html.clean import Cleaner
7
+ import json
8
+
9
+
10
+ def clean_html(raw_html):
11
+ cleaner = Cleaner(remove_tags=["sup"])
12
+ return cleaner.clean_html(raw_html).decode("utf-8")
13
+
14
+ EMPTY = "[EMPTY]"
15
+
16
+ def isYear(value):
17
+ for i in range(1990, 2022):
18
+ if str(i) in value:
19
+ return True
20
+ return False
21
+
22
+ def existTopHeaders(html):
23
+ first_row = html.tr
24
+ if first_row.td.string == None:
25
+ return True
26
+
27
+ for td in first_row.find_all("td"):
28
+ if not td.string:
29
+ continue
30
+ value = td.string.replace(",", "").strip()
31
+ if value:
32
+ try:
33
+ float(value[1:])
34
+ if isYear(value):
35
+ return True
36
+ else:
37
+ return False
38
+ except:
39
+ continue
40
+ return True
41
+
42
+ def belongToTopHeaders(row):
43
+ for i, td in enumerate(row.find_all("td")):
44
+ if not td.string:
45
+ continue
46
+ value = td.string.replace(",", "").strip()
47
+ if value:
48
+ try:
49
+ float(value[1:])
50
+ if isYear(value):
51
+ return True
52
+ else:
53
+ return False
54
+ except:
55
+ continue
56
+ return True
57
+
58
+ def handle_unnamed_single_topheader(columns, j):
59
+ tmp = j
60
+ while tmp < len(columns) and (columns[tmp].startswith("Unnamed") or columns[tmp] == EMPTY):
61
+ tmp += 1
62
+ if tmp < len(columns):
63
+ return columns[tmp]
64
+
65
+ tmp = j
66
+ while tmp >= 0 and (columns[tmp].startswith("Unnamed") or columns[tmp] == EMPTY):
67
+ tmp -= 1
68
+ if tmp < 0:
69
+ return f"data {j}"
70
+ else:
71
+ return columns[tmp]
72
+
73
+ def handle_unnamed_multi_topheader(columns, j):
74
+ tmp = j
75
+ while tmp < len(columns) and (columns[tmp][0].startswith("Unnamed") or columns[tmp][0] == EMPTY):
76
+ tmp += 1
77
+ if tmp < len(columns):
78
+ return columns[tmp][0]
79
+
80
+ tmp = j
81
+ while tmp >= 0 and (columns[tmp][0].startswith("Unnamed") or columns[tmp][0] == EMPTY):
82
+ tmp -= 1
83
+ if tmp < 0:
84
+ return f"data {j}"
85
+ else:
86
+ return columns[tmp][0]
87
+
88
+ def readHTML(html_string):
89
+ # file_path = html_path
90
+ html = BeautifulSoup(html_string, features='html.parser')
91
+ # remove superscripts and subscripts
92
+ for sup in html.select('sup'):
93
+ sup.extract()
94
+ for sup in html.select('sub'):
95
+ sup.extract()
96
+
97
+ # 1. locate top header
98
+ top_header_nonexist_flag = 0
99
+ if not existTopHeaders(html):
100
+ top_header_nonexist_flag = 1
101
+ new_tr_tag = html.new_tag("tr")
102
+ new_td_tag = html.new_tag("td")
103
+ new_tr_tag.insert(0, new_td_tag)
104
+ for i in range(len(html.tr.find_all("td")[1:])):
105
+ new_td_tag1 = html.new_tag("td")
106
+ new_td_tag1.string = f"data{i}"
107
+ new_tr_tag.insert(i+1, new_td_tag1)
108
+ html.table.insert(0, new_tr_tag)
109
+ else:
110
+ html.tr.td.string = ""
111
+
112
+ header = [0]
113
+ top_header_flag = True
114
+ for i, tr in enumerate(html.find_all("tr")):
115
+ # # for locating top header
116
+ # if tr.td.string and ("in thousands" in tr.td.string.lower() or "in millions" in tr.td.string.lower()) and len(tr.td.string) < len("in thousands") + 5:
117
+ # tr.td.replace_with(html.new_tag("td"))
118
+ if top_header_flag and i > 0 and not top_header_nonexist_flag:
119
+ if belongToTopHeaders(tr):
120
+ header.append(i)
121
+ else:
122
+ top_header_flag = False
123
+ # for locating left header
124
+ if tr.td.string != None:
125
+ for td in tr.find_all("td")[1:]:
126
+ if td.string == None:
127
+ td.string = EMPTY
128
+
129
+ data = pd.read_html(str(html), header=header, index_col=0)[0]
130
+ return data, header, top_header_nonexist_flag
131
+
132
+
133
+ def generateDescription(data, header, top_header_nonexist_flag):
134
+ describe_dict = {}
135
+ for i in range(data.shape[0]):
136
+ for j in range(data.shape[1]):
137
+ value = data.iloc[i, j]
138
+ if str(value).startswith("Unnamed") or str(value) == EMPTY or str(value) == "-" or str(value) == u'\u2014':
139
+ continue
140
+ describe = ""
141
+ if pd.isnull(data.index[i]):
142
+ describe += "total"
143
+ else:
144
+ describe += f"{data.index[i]}"
145
+ temp_i = i - 1
146
+ while temp_i >= 0:
147
+ if (data.iloc[temp_i] == EMPTY).all():
148
+ describe += f" {data.index[temp_i]}"
149
+ break
150
+ temp_i -= 1
151
+ if not top_header_nonexist_flag:
152
+ describe += " of"
153
+ if len(header) == 1:
154
+ describe += f" {handle_unnamed_single_topheader(data.columns, j)}"
155
+ else:
156
+ describe += f" {handle_unnamed_multi_topheader(data.columns, j)}"
157
+ prev = handle_unnamed_multi_topheader(data.columns, j)
158
+ for temp_j in header[1:]:
159
+ if data.columns[j][temp_j].startswith("Unnamed") or data.columns[j][temp_j] == EMPTY:
160
+ continue
161
+ if data.columns[j][temp_j] == prev:
162
+ continue
163
+ describe += f" {data.columns[j][temp_j]}"
164
+ prev = data.columns[j][temp_j]
165
+ describe += f" is {data.iloc[i, j]}."
166
+ x_index = i+len(header)
167
+ y_index = j+1
168
+ if top_header_nonexist_flag == 1:
169
+ x_index -= 1
170
+ describe_dict[f"{x_index}-{y_index}"] = describe
171
+ return describe_dict
172
+
173
+ def generateDiscreptionCell(data, header, top_header_nonexist_flag):
174
+ discribe_dict = {}
175
+ for i in range(data.shape[0]):
176
+ for j in range(data.shape[1]):
177
+ value = data.iloc[i, j]
178
+ if str(value).startswith("Unnamed") or str(value) == "-" or str(value) == "[EMPTY]":
179
+ continue
180
+ discribe = f"{data.iloc[i, j]}"
181
+ x_index = i+len(header)
182
+ y_index = j+1
183
+ if top_header_nonexist_flag == 1:
184
+ x_index -= 1
185
+ discribe_dict[f"{x_index}-{y_index}"] = discribe
186
+ return discribe_dict
multihiertt_data/test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15bfe9cc1241e29050a5bcaf7b9639d6907e7895cd07a1f1b874c2fda905ad01
3
+ size 52242696
multihiertt_data/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc17d75b5b3d155a44522bf8f9c2660afea60c7f5cf55048ccf0f927332f99c7
3
+ size 262497396