parquet-converter
commited on
Commit
•
71128c6
1
Parent(s):
250cec6
Update parquet files
Browse files- .gitattributes +12 -0
- .gitignore +0 -2
- README.md +0 -88
- convert.py +0 -120
- data/train/train-04.jsonl.gz +0 -3
- data/train/train-05.jsonl.gz +0 -3
- data/train/train-01.jsonl.gz → default/train/0000.parquet +2 -2
- data/train/train-02.jsonl.gz → default/train/0001.parquet +2 -2
- data/train/train-03.jsonl.gz → default/train/0002.parquet +2 -2
- data/train/train-00.jsonl.gz → default/train/0003.parquet +2 -2
- default/train/0004.parquet +3 -0
- default/train/0005.parquet +3 -0
- default/train/0006.parquet +3 -0
- default/train/0007.parquet +3 -0
- default/train/0008.parquet +3 -0
- default/train/0009.parquet +3 -0
- default/train/0010.parquet +3 -0
- default/train/0011.parquet +3 -0
- requirements.txt +0 -2
.gitattributes
CHANGED
@@ -1,2 +1,14 @@
|
|
1 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
2 |
*.gz filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
2 |
*.gz filter=lfs diff=lfs merge=lfs -text
|
3 |
+
default/train/0000.parquet filter=lfs diff=lfs merge=lfs -text
|
4 |
+
default/train/0001.parquet filter=lfs diff=lfs merge=lfs -text
|
5 |
+
default/train/0002.parquet filter=lfs diff=lfs merge=lfs -text
|
6 |
+
default/train/0003.parquet filter=lfs diff=lfs merge=lfs -text
|
7 |
+
default/train/0004.parquet filter=lfs diff=lfs merge=lfs -text
|
8 |
+
default/train/0005.parquet filter=lfs diff=lfs merge=lfs -text
|
9 |
+
default/train/0006.parquet filter=lfs diff=lfs merge=lfs -text
|
10 |
+
default/train/0007.parquet filter=lfs diff=lfs merge=lfs -text
|
11 |
+
default/train/0008.parquet filter=lfs diff=lfs merge=lfs -text
|
12 |
+
default/train/0009.parquet filter=lfs diff=lfs merge=lfs -text
|
13 |
+
default/train/0010.parquet filter=lfs diff=lfs merge=lfs -text
|
14 |
+
default/train/0011.parquet filter=lfs diff=lfs merge=lfs -text
|
.gitignore
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
.venv
|
2 |
-
.mypy_cache
|
|
|
|
|
|
README.md
DELETED
@@ -1,88 +0,0 @@
|
|
1 |
-
---
|
2 |
-
language:
|
3 |
-
- en
|
4 |
-
license: apache-2.0
|
5 |
-
tags:
|
6 |
-
- text
|
7 |
-
pretty_name: MS MARCO hard negatives
|
8 |
-
size_categories:
|
9 |
-
- "100K<n<1M"
|
10 |
-
source_datasets:
|
11 |
-
- MSMARCO
|
12 |
-
task_categories:
|
13 |
-
- sentence-similarity
|
14 |
-
dataset_info:
|
15 |
-
config_name: default
|
16 |
-
features:
|
17 |
-
- name: query
|
18 |
-
dtype: string
|
19 |
-
- name: pos
|
20 |
-
list:
|
21 |
-
- name: doc
|
22 |
-
dtype: string
|
23 |
-
- name: score
|
24 |
-
dtype: float
|
25 |
-
- name: neg
|
26 |
-
list:
|
27 |
-
- name: doc
|
28 |
-
dtype: string
|
29 |
-
- name: score
|
30 |
-
dtype: float
|
31 |
-
splits:
|
32 |
-
- name: train
|
33 |
-
num_bytes: 89609915
|
34 |
-
num_examples: 502939
|
35 |
-
train-eval-index:
|
36 |
-
- config: default
|
37 |
-
task: sentence-similarity
|
38 |
-
splits:
|
39 |
-
train_split: train
|
40 |
-
eval_split: test
|
41 |
-
configs:
|
42 |
-
- config_name: default
|
43 |
-
data_files:
|
44 |
-
- split: train
|
45 |
-
path: "data/train/*"
|
46 |
-
---
|
47 |
-
|
48 |
-
# MS MARCO hard negatives dataset
|
49 |
-
|
50 |
-
A dataset in a [nixietune](https://github.com/nixiesearch/nixietune) compatible format:
|
51 |
-
|
52 |
-
```json
|
53 |
-
{
|
54 |
-
"query": ")what was the immediate impact of the success of the manhattan project?",
|
55 |
-
"pos": [
|
56 |
-
{
|
57 |
-
"doc": "The presence of communication amid scientific minds was equally important to the success of the Manhattan Project as scientific intellect was. The only cloud hanging over the impressive achievement of the atomic researchers and engineers is what their success truly meant; hundreds of thousands of innocent lives obliterated.",
|
58 |
-
"score": 1
|
59 |
-
}
|
60 |
-
],
|
61 |
-
"neg": [
|
62 |
-
{
|
63 |
-
"doc": "Abstract. The pivotal engineering and scientific success of the Twentieth century was the Manhattan Project. The Manhattan Project assimilated concepts and leaders from all scientific fields and engineering disciplines to construct the first two atomic bombs.",
|
64 |
-
"score": 0.0
|
65 |
-
},
|
66 |
-
{
|
67 |
-
"doc": "The pivotal engineering and scientific success of the Twentieth century was the Manhattan Project. The Manhattan Project assimilated concepts and leaders from all scientific fields and engineering disciplines to construct the first two atomic bombs.",
|
68 |
-
"score": 0.0
|
69 |
-
}
|
70 |
-
]
|
71 |
-
}
|
72 |
-
```
|
73 |
-
|
74 |
-
This is the original [BeIR-msmarco](https://huggingface.co/datasets/BeIR/msmarco) joined with the [msmarco-hard-negatives](https://huggingface.co/datasets/sentence-transformers/msmarco-hard-negatives) dataset with the following splits:
|
75 |
-
* train: 502939 queries, only positives.
|
76 |
-
|
77 |
-
## Usage
|
78 |
-
|
79 |
-
```python
|
80 |
-
from datasets import load_dataset
|
81 |
-
|
82 |
-
data = load_dataset('nixiesearch/ms_marco_hard_negatives')
|
83 |
-
print(data["train"].features)
|
84 |
-
```
|
85 |
-
|
86 |
-
## License
|
87 |
-
|
88 |
-
Apache 2.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
convert.py
DELETED
@@ -1,120 +0,0 @@
|
|
1 |
-
from datasets import load_dataset, Features, Value, Sequence
|
2 |
-
from dataclasses import dataclass, field
|
3 |
-
import logging
|
4 |
-
from transformers import HfArgumentParser
|
5 |
-
from tqdm import tqdm
|
6 |
-
from typing import Dict, List
|
7 |
-
import json
|
8 |
-
import numpy as np
|
9 |
-
from itertools import islice
|
10 |
-
|
11 |
-
logger = logging.getLogger()
|
12 |
-
logger.setLevel(logging.INFO)
|
13 |
-
console_handler = logging.StreamHandler()
|
14 |
-
console_handler.setFormatter(
|
15 |
-
logging.Formatter("[%(asctime)s %(levelname)s] %(message)s")
|
16 |
-
)
|
17 |
-
logger.handlers = [console_handler]
|
18 |
-
|
19 |
-
|
20 |
-
@dataclass
|
21 |
-
class ConversionAgruments:
|
22 |
-
hardneg: str = field(metadata={"help": "Path to msmarco-hard-negatives.jsonl file"})
|
23 |
-
out: str = field(metadata={"help": "Output path"})
|
24 |
-
|
25 |
-
|
26 |
-
@dataclass
|
27 |
-
class QRel:
|
28 |
-
doc: int
|
29 |
-
score: int
|
30 |
-
|
31 |
-
|
32 |
-
def load_msmarco(path: str, split) -> Dict[int, str]:
|
33 |
-
dataset = load_dataset(path, split, split=split)
|
34 |
-
cache: Dict[int, str] = {}
|
35 |
-
for row in tqdm(dataset, desc=f"loading {path} split={split}"):
|
36 |
-
index = int(row["_id"])
|
37 |
-
cache[index] = row["text"]
|
38 |
-
return cache
|
39 |
-
|
40 |
-
|
41 |
-
def load_qrel(path: str, split: str) -> Dict[int, List[QRel]]:
|
42 |
-
dataset = load_dataset(path, split=split)
|
43 |
-
print(dataset.features)
|
44 |
-
cache: Dict[int, List[QRel]] = {}
|
45 |
-
for row in tqdm(dataset, desc=f"loading {path} split={split}"):
|
46 |
-
qid = int(row["query-id"])
|
47 |
-
qrel = QRel(int(row["corpus-id"]), int(row["score"]))
|
48 |
-
if qid in cache:
|
49 |
-
cache[qid].append(qrel)
|
50 |
-
else:
|
51 |
-
cache[qid] = [qrel]
|
52 |
-
return cache
|
53 |
-
|
54 |
-
|
55 |
-
def process_raw(
|
56 |
-
qrels: Dict[int, List[QRel]],
|
57 |
-
queries: Dict[int, str],
|
58 |
-
corpus: Dict[int, str],
|
59 |
-
hardneg: Dict[int, List[int]],
|
60 |
-
) -> List[Dict]:
|
61 |
-
result = []
|
62 |
-
for query, rels in tqdm(qrels.items(), desc="processing split"):
|
63 |
-
pos = [
|
64 |
-
{"doc": corpus[rel.doc], "score": rel.score}
|
65 |
-
for rel in rels
|
66 |
-
if rel.doc in corpus and rel.score > 0
|
67 |
-
]
|
68 |
-
neg = [
|
69 |
-
{"doc": corpus[doc], "score": 0.0}
|
70 |
-
for doc in hardneg.get(query, [])
|
71 |
-
if doc in corpus
|
72 |
-
]
|
73 |
-
group = {"query": queries[query], "pos": pos, "neg": neg}
|
74 |
-
result.append(group)
|
75 |
-
return result
|
76 |
-
|
77 |
-
|
78 |
-
def load_hardneg(path: str):
|
79 |
-
result: Dict[int, List[int]] = {}
|
80 |
-
with open(path, "r") as jsonfile:
|
81 |
-
for line in tqdm(jsonfile, total=808731, desc="loading hard negatives"):
|
82 |
-
row = json.loads(line)
|
83 |
-
scores: Dict[int, float] = {}
|
84 |
-
for method, docs in row["neg"].items():
|
85 |
-
for index, doc in enumerate(docs):
|
86 |
-
prev = scores.get(int(doc), 0.0)
|
87 |
-
scores[int(doc)] = prev + 1.0 / (60 + index)
|
88 |
-
topneg = [
|
89 |
-
doc
|
90 |
-
for doc, score in sorted(
|
91 |
-
scores.items(), key=lambda x: x[1], reverse=True
|
92 |
-
)
|
93 |
-
]
|
94 |
-
result[int(row["qid"])] = topneg[:32]
|
95 |
-
return result
|
96 |
-
|
97 |
-
|
98 |
-
def main():
|
99 |
-
parser = HfArgumentParser((ConversionAgruments))
|
100 |
-
(args,) = parser.parse_args_into_dataclasses()
|
101 |
-
print(f"Args: {args}")
|
102 |
-
hardneg = load_hardneg(args.hardneg)
|
103 |
-
qrels = {
|
104 |
-
"train": load_qrel("BeIR/msmarco-qrels", split="train"),
|
105 |
-
"dev": load_qrel("BeIR/msmarco-qrels", split="validation"),
|
106 |
-
}
|
107 |
-
queries = load_msmarco("BeIR/msmarco", split="queries")
|
108 |
-
corpus = load_msmarco("BeIR/msmarco", split="corpus")
|
109 |
-
print("processing done")
|
110 |
-
for split, data in qrels.items():
|
111 |
-
dataset = process_raw(data, queries, corpus, hardneg)
|
112 |
-
with open(f"{args.out}/{split}.jsonl", "w") as out:
|
113 |
-
for item in dataset:
|
114 |
-
json.dump(item, out)
|
115 |
-
out.write("\n")
|
116 |
-
print("done")
|
117 |
-
|
118 |
-
|
119 |
-
if __name__ == "__main__":
|
120 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data/train/train-04.jsonl.gz
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:150cccc543b38f0d8b612ba688b4ffada310c239d95c931644b8a9550a300e5c
|
3 |
-
size 368843006
|
|
|
|
|
|
|
|
data/train/train-05.jsonl.gz
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:513aa76ff5395eebbd4acaa48ab31b3ae7682a952c0ff1309e15f23b2ed292c9
|
3 |
-
size 10942759
|
|
|
|
|
|
|
|
data/train/train-01.jsonl.gz → default/train/0000.parquet
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c2eb79ee31aeeb9c017fef438d9b7e5f2ce67f85ea090bd7869244c70472d458
|
3 |
+
size 227283539
|
data/train/train-02.jsonl.gz → default/train/0001.parquet
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c04099f83d2ae50cd9592dd9b7ba2bac9fdce8afd237059e3dee453b8381367f
|
3 |
+
size 227189974
|
data/train/train-03.jsonl.gz → default/train/0002.parquet
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0f34414994af41212ac907a61ded0381f2d40ca41ff7ad9d42e19c4c4f485152
|
3 |
+
size 223443646
|
data/train/train-00.jsonl.gz → default/train/0003.parquet
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b8aaf2193df6685393d83da43375c1e1d83a5fd69f8f144de3789c38b940a84f
|
3 |
+
size 230299166
|
default/train/0004.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:26fba4806feea0d15be84bcd202a50792b338ec27d78e69a5489f1d1056de52b
|
3 |
+
size 228438178
|
default/train/0005.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eddbd725419412a8dd38b7bcbfa553116175c11bb947807bde3f336997e260ff
|
3 |
+
size 229277946
|
default/train/0006.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:af4cee444b2e9fb311880581bc9731487a0efd3666101ed258546f98d792f39d
|
3 |
+
size 228661914
|
default/train/0007.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e05460d6abb03053b82e5adbd302458fa95aa9d7b17841311f3ced9e52d5f993
|
3 |
+
size 229664284
|
default/train/0008.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c0945f26afaabaad3c3314c655f3d76d8c063b08570b32b44b946d2f7a609bb2
|
3 |
+
size 224587117
|
default/train/0009.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5ac11a8e428e3f0519c496e96bc9c6a394d9e4053cfd98ed13874c678b041281
|
3 |
+
size 217395104
|
default/train/0010.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6e203c5fc5af8bb5c984440492e96490f55b780dadf3ccead1d50305777fa682
|
3 |
+
size 221924754
|
default/train/0011.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b81db9e45b950efaac0c8acb19a1a811f5496197c2ed0e646efc0405a8575005
|
3 |
+
size 156367355
|
requirements.txt
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
datasets
|
2 |
-
transformers
|
|
|
|
|
|