shuttie's picture
initial commit
250cec6
raw
history blame
3.72 kB
from datasets import load_dataset, Features, Value, Sequence
from dataclasses import dataclass, field
import logging
from transformers import HfArgumentParser
from tqdm import tqdm
from typing import Dict, List
import json
import numpy as np
from itertools import islice
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setFormatter(
logging.Formatter("[%(asctime)s %(levelname)s] %(message)s")
)
logger.handlers = [console_handler]
@dataclass
class ConversionAgruments:
hardneg: str = field(metadata={"help": "Path to msmarco-hard-negatives.jsonl file"})
out: str = field(metadata={"help": "Output path"})
@dataclass
class QRel:
doc: int
score: int
def load_msmarco(path: str, split) -> Dict[int, str]:
dataset = load_dataset(path, split, split=split)
cache: Dict[int, str] = {}
for row in tqdm(dataset, desc=f"loading {path} split={split}"):
index = int(row["_id"])
cache[index] = row["text"]
return cache
def load_qrel(path: str, split: str) -> Dict[int, List[QRel]]:
dataset = load_dataset(path, split=split)
print(dataset.features)
cache: Dict[int, List[QRel]] = {}
for row in tqdm(dataset, desc=f"loading {path} split={split}"):
qid = int(row["query-id"])
qrel = QRel(int(row["corpus-id"]), int(row["score"]))
if qid in cache:
cache[qid].append(qrel)
else:
cache[qid] = [qrel]
return cache
def process_raw(
qrels: Dict[int, List[QRel]],
queries: Dict[int, str],
corpus: Dict[int, str],
hardneg: Dict[int, List[int]],
) -> List[Dict]:
result = []
for query, rels in tqdm(qrels.items(), desc="processing split"):
pos = [
{"doc": corpus[rel.doc], "score": rel.score}
for rel in rels
if rel.doc in corpus and rel.score > 0
]
neg = [
{"doc": corpus[doc], "score": 0.0}
for doc in hardneg.get(query, [])
if doc in corpus
]
group = {"query": queries[query], "pos": pos, "neg": neg}
result.append(group)
return result
def load_hardneg(path: str):
result: Dict[int, List[int]] = {}
with open(path, "r") as jsonfile:
for line in tqdm(jsonfile, total=808731, desc="loading hard negatives"):
row = json.loads(line)
scores: Dict[int, float] = {}
for method, docs in row["neg"].items():
for index, doc in enumerate(docs):
prev = scores.get(int(doc), 0.0)
scores[int(doc)] = prev + 1.0 / (60 + index)
topneg = [
doc
for doc, score in sorted(
scores.items(), key=lambda x: x[1], reverse=True
)
]
result[int(row["qid"])] = topneg[:32]
return result
def main():
parser = HfArgumentParser((ConversionAgruments))
(args,) = parser.parse_args_into_dataclasses()
print(f"Args: {args}")
hardneg = load_hardneg(args.hardneg)
qrels = {
"train": load_qrel("BeIR/msmarco-qrels", split="train"),
"dev": load_qrel("BeIR/msmarco-qrels", split="validation"),
}
queries = load_msmarco("BeIR/msmarco", split="queries")
corpus = load_msmarco("BeIR/msmarco", split="corpus")
print("processing done")
for split, data in qrels.items():
dataset = process_raw(data, queries, corpus, hardneg)
with open(f"{args.out}/{split}.jsonl", "w") as out:
for item in dataset:
json.dump(item, out)
out.write("\n")
print("done")
if __name__ == "__main__":
main()