Datasets:
Tasks:
Question Answering
Sub-tasks:
extractive-qa
Languages:
English
Size:
1M<n<10M
ArXiv:
License:
import pandas as pd | |
import os | |
from tqdm import tqdm | |
import json | |
directories = ['lifestyle', 'pooled', 'recreation', 'science', 'technology', 'writing'] #os.listdir() | |
for directory in directories: | |
for file_type in ["dev", "test"]: | |
if directory != 'pooled': | |
with open('data/' + directory + "/" + file_type + "/metadata.jsonl", 'r', encoding="utf-8") as json_file: | |
metadata = list(json_file) | |
post_id_to_author = {} | |
for json_str in metadata: | |
current_row = json.loads(json_str) | |
current_post_ids = current_row['post_ids'] | |
current_post_authors = current_row['post_authors'] | |
for post_id, author in zip(current_post_ids, current_post_authors): | |
post_id_to_author[post_id] = author | |
else: | |
from collections import defaultdict | |
def default_value(): | |
return "" | |
post_id_to_author = defaultdict(default_value) | |
##################################################################################### | |
current_jsonl = [] | |
loaded_file = pd.read_csv('data/' + directory + "/" + file_type + "/collection.tsv", sep='\t', header=0) | |
for row in tqdm(range(0, len(loaded_file))): | |
current_jsonl.append({ | |
"doc_id": row, | |
"author": post_id_to_author[row], | |
"text": loaded_file.iloc[row][1] | |
}) | |
if not os.path.isdir(directory): | |
os.mkdir(directory) | |
with open(directory + "/" + file_type + "_collection.jsonl", 'w', encoding="utf-8") as fout: | |
json.dump(current_jsonl, fout) | |