Datasets:
Upload create_data_retrieval.py
Browse files- create_data_retrieval.py +182 -0
create_data_retrieval.py
ADDED
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections import Counter
|
2 |
+
import json
|
3 |
+
import re
|
4 |
+
|
5 |
+
import datasets
|
6 |
+
import pandas as pd
|
7 |
+
from huggingface_hub import create_repo, upload_file, hf_hub_download
|
8 |
+
from huggingface_hub.utils._errors import HfHubHTTPError
|
9 |
+
|
10 |
+
########################
|
11 |
+
# Cleanup queries data #
|
12 |
+
########################
|
13 |
+
|
14 |
+
# load dataset
|
15 |
+
dl_path = hf_hub_download(
|
16 |
+
repo_id="antoinelb7/alloprof",
|
17 |
+
filename="data/alloprof.csv",
|
18 |
+
repo_type="dataset",
|
19 |
+
revision="0faa90fee1ad1a6e3e461d7be49abf71488e6687"
|
20 |
+
)
|
21 |
+
alloprof_queries = pd.read_csv(dl_path)
|
22 |
+
|
23 |
+
# remove non-queries
|
24 |
+
alloprof_queries = alloprof_queries[alloprof_queries["is_query"]]
|
25 |
+
|
26 |
+
# remove nans in text
|
27 |
+
alloprof_queries = alloprof_queries[~alloprof_queries["text"].isna()]
|
28 |
+
|
29 |
+
# most data flagged as language "en" are actually french. We je remove english ones
|
30 |
+
# by matching specifig words
|
31 |
+
alloprof_queries = alloprof_queries[
|
32 |
+
~(
|
33 |
+
(alloprof_queries["text"].str.lower().str.startswith("hi"))
|
34 |
+
| (alloprof_queries["text"].str.lower().str.startswith("hello"))
|
35 |
+
| (alloprof_queries["text"].str.lower().str.startswith("how"))
|
36 |
+
| (alloprof_queries["text"].str.lower().str.startswith("i "))
|
37 |
+
)
|
38 |
+
]
|
39 |
+
|
40 |
+
# only keep queries with french relevant documents
|
41 |
+
alloprof_queries = alloprof_queries[
|
42 |
+
(~alloprof_queries["relevant"].isna()) & (alloprof_queries["relevant"].str.endswith("-fr"))
|
43 |
+
]
|
44 |
+
|
45 |
+
# remove queries with url in text because question relies on picture
|
46 |
+
alloprof_queries = alloprof_queries[~alloprof_queries["text"].str.contains("https://www.alloprof.qc.ca")]
|
47 |
+
|
48 |
+
|
49 |
+
# split multiple relevant docs and remove -fr suffix on id
|
50 |
+
def parse_relevant_ids(row):
|
51 |
+
row = row.split(";")
|
52 |
+
row = [r[:-3] for r in row if r.endswith("-fr")]
|
53 |
+
return row
|
54 |
+
|
55 |
+
|
56 |
+
alloprof_queries["relevant"] = alloprof_queries["relevant"].apply(parse_relevant_ids)
|
57 |
+
|
58 |
+
|
59 |
+
# Parse the answer
|
60 |
+
def parse_answer(row):
|
61 |
+
try:
|
62 |
+
row = json.loads(row)
|
63 |
+
text = []
|
64 |
+
for i in row:
|
65 |
+
if type(i["insert"]) is not dict:
|
66 |
+
text.append(i["insert"])
|
67 |
+
text = "".join(text)
|
68 |
+
except:
|
69 |
+
text = row
|
70 |
+
return text.replace(" ", " ").replace("\u200b", "").replace("\xa0", "")
|
71 |
+
|
72 |
+
|
73 |
+
alloprof_queries["answer"] = alloprof_queries["answer"].apply(parse_answer)
|
74 |
+
|
75 |
+
# only keep useful columns
|
76 |
+
alloprof_queries = alloprof_queries[["id", "text", "answer", "relevant", "subject"]]
|
77 |
+
|
78 |
+
# remove duplicate queries (same text)
|
79 |
+
alloprof_queries = alloprof_queries.drop_duplicates(subset=["text"], keep="first")
|
80 |
+
|
81 |
+
##########################
|
82 |
+
# Cleanup documents data #
|
83 |
+
##########################
|
84 |
+
|
85 |
+
# load dataset
|
86 |
+
dl_path = hf_hub_download(
|
87 |
+
repo_id="antoinelb7/alloprof",
|
88 |
+
filename="data/pages/page-content-fr.json",
|
89 |
+
repo_type="dataset",
|
90 |
+
revision="0faa90fee1ad1a6e3e461d7be49abf71488e6687"
|
91 |
+
)
|
92 |
+
alloprof_docs = pd.read_json(dl_path)
|
93 |
+
|
94 |
+
# Remove Nans in data
|
95 |
+
alloprof_docs = alloprof_docs[~alloprof_docs["data"].isna()]
|
96 |
+
|
97 |
+
# parse dataset
|
98 |
+
def parse_row(row):
|
99 |
+
return [row["file"]["uuid"], row["file"]["title"], row["file"]["topic"]]
|
100 |
+
|
101 |
+
|
102 |
+
def get_text(row):
|
103 |
+
text = []
|
104 |
+
for s in row["file"]["sections"]:
|
105 |
+
for m in s["modules"]:
|
106 |
+
if m["type"] == "blocSpecial":
|
107 |
+
if m["subtype"] in ["definition", "exemple"]:
|
108 |
+
for sm in m["submodules"]:
|
109 |
+
if sm["type"] == "text":
|
110 |
+
text.append(sm["text"])
|
111 |
+
elif m["type"] == "text":
|
112 |
+
text.append(m["text"])
|
113 |
+
text = " ".join(text)
|
114 |
+
text = re.sub("<[^<]+?>", "", text)
|
115 |
+
text = text.replace(" ", " ").replace("\u200b", "")
|
116 |
+
text = re.sub("\s{2,}", " ", text)
|
117 |
+
|
118 |
+
return text
|
119 |
+
|
120 |
+
|
121 |
+
parsed_df = alloprof_docs["data"].apply(parse_row)
|
122 |
+
alloprof_docs[["uuid", "title", "topic"]] = parsed_df.tolist()
|
123 |
+
alloprof_docs["text"] = alloprof_docs["data"].apply(get_text)
|
124 |
+
|
125 |
+
# remove unnecessary columns
|
126 |
+
alloprof_docs = alloprof_docs[["uuid", "title", "topic", "text"]]
|
127 |
+
|
128 |
+
################
|
129 |
+
# Post Process #
|
130 |
+
################
|
131 |
+
|
132 |
+
# check that all relevant docs mentioned in queries are in docs dataset
|
133 |
+
relevants = alloprof_queries["relevant"].tolist()
|
134 |
+
relevants = {i for j in relevants for i in j} # flatten list and get uniques
|
135 |
+
assert relevants.issubset(
|
136 |
+
alloprof_docs["uuid"].tolist()
|
137 |
+
), "Some relevant document of queries are not present in the corpus"
|
138 |
+
|
139 |
+
# convert to Dataset
|
140 |
+
alloprof_queries = datasets.Dataset.from_pandas(alloprof_queries)
|
141 |
+
alloprof_docs = datasets.Dataset.from_pandas(alloprof_docs)
|
142 |
+
|
143 |
+
# identify duplicate documents
|
144 |
+
# (duplicates are actually error documents,
|
145 |
+
# such as "fiche en construction", " ", ...
|
146 |
+
duplicate_docs = Counter(alloprof_docs["text"])
|
147 |
+
duplicate_docs = {k:v for k,v in duplicate_docs.items() if v > 1}
|
148 |
+
|
149 |
+
# for each text that is in duplicate...
|
150 |
+
for dup_text in duplicate_docs:
|
151 |
+
# ...get the ids of docs that have that text
|
152 |
+
duplicate_ids = [d["uuid"] for d in alloprof_docs if d["text"] == dup_text]
|
153 |
+
# ...delete all the documents that have these ids from the corpus dataset
|
154 |
+
alloprof_docs = alloprof_docs.filter(lambda x: x["uuid"] not in duplicate_ids)
|
155 |
+
# ...delete them from the relevant documents in queries
|
156 |
+
alloprof_queries = alloprof_queries.map(lambda x: {"relevant": [i for i in x["relevant"] if i not in duplicate_ids]})
|
157 |
+
|
158 |
+
# remove the queries that have no remaining relevant documents
|
159 |
+
alloprof_queries = alloprof_queries.filter(lambda x: len(x["relevant"]) > 0)
|
160 |
+
|
161 |
+
# split queries into train-test
|
162 |
+
alloprof_queries = alloprof_queries.train_test_split(test_size=.2)
|
163 |
+
|
164 |
+
####################
|
165 |
+
# Upload to HF Hub #
|
166 |
+
####################
|
167 |
+
|
168 |
+
# create HF repo
|
169 |
+
repo_id = "lyon-nlp/alloprof"
|
170 |
+
try:
|
171 |
+
create_repo(repo_id, repo_type="dataset")
|
172 |
+
except HfHubHTTPError as e:
|
173 |
+
print("HF repo already exist")
|
174 |
+
|
175 |
+
# save datasets as json
|
176 |
+
alloprof_queries["train"].to_pandas().to_json("queries-train.json", orient="records")
|
177 |
+
alloprof_queries["test"].to_pandas().to_json("queries-test.json", orient="records")
|
178 |
+
alloprof_docs.to_pandas().to_json("documents.json", orient="records")
|
179 |
+
|
180 |
+
upload_file(path_or_fileobj="queries-train.json", path_in_repo="queries-train.json", repo_id=repo_id, repo_type="dataset")
|
181 |
+
upload_file(path_or_fileobj="queries-test.json", path_in_repo="queries-test.json", repo_id=repo_id, repo_type="dataset")
|
182 |
+
upload_file(path_or_fileobj="documents.json", path_in_repo="documents.json", repo_id=repo_id, repo_type="dataset")
|