Datasets:
Tasks:
Question Answering
Sub-tasks:
extractive-qa
Languages:
English
Size:
1M<n<10M
ArXiv:
License:
File size: 1,441 Bytes
d73a266 64d4080 d73a266 64d4080 d73a266 64d4080 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
import pandas as pd
import os
from tqdm import tqdm
import json
directories = ['lifestyle', 'pooled', 'recreation', 'science', 'technology', 'writing'] #os.listdir()
for directory in directories:
for file_type in ["dev", "test"]:
if directory != 'pooled':
with open('data/' + directory + "/" + file_type + "/metadata.jsonl", 'r', encoding="utf-8") as json_file:
metadata = list(json_file)
post_id_to_author = {}
for json_str in metadata:
current_row = json.loads(json_str)
current_post_ids = current_row['post_ids']
current_post_authors = current_row['post_authors']
for post_id, author in zip(current_post_ids, current_post_authors):
post_id_to_author[post_id] = author
else:
from collections import defaultdict
def default_value():
return ""
post_id_to_author = defaultdict(default_value)
#####################################################################################
current_jsonl = []
loaded_file = pd.read_csv('data/' + directory + "/" + file_type + "/collection.tsv", sep='\t', header=0)
for row in tqdm(range(0, len(loaded_file))):
current_jsonl.append({
"doc_id": row,
"author": post_id_to_author[row],
"text": loaded_file.iloc[row][1]
})
if not os.path.isdir(directory):
os.mkdir(directory)
with open(directory + "/" + file_type + "_collection.jsonl", 'w', encoding="utf-8") as fout:
json.dump(current_jsonl, fout)
|