migrate to flat schema
Browse files- README.md +15 -22
- convert.py +20 -34
- data/dev/dev.jsonl.gz +2 -2
- data/test/test.jsonl.gz +2 -2
- data/train/train.jsonl.gz +2 -2
- use.py +3 -19
README.md
CHANGED
|
@@ -4,7 +4,7 @@ language:
|
|
| 4 |
license: apache-2.0
|
| 5 |
tags:
|
| 6 |
- text
|
| 7 |
-
pretty_name:
|
| 8 |
size_categories:
|
| 9 |
- "100K<n<1M"
|
| 10 |
source_datasets:
|
|
@@ -16,18 +16,12 @@ dataset_info:
|
|
| 16 |
features:
|
| 17 |
- name: query
|
| 18 |
dtype: string
|
| 19 |
-
- name:
|
| 20 |
-
|
| 21 |
-
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
- name: neg
|
| 26 |
-
list:
|
| 27 |
-
- name: doc
|
| 28 |
-
dtype: string
|
| 29 |
-
- name: score
|
| 30 |
-
dtype: float
|
| 31 |
splits:
|
| 32 |
- name: train
|
| 33 |
num_bytes: 89609915
|
|
@@ -55,23 +49,21 @@ configs:
|
|
| 55 |
path: "data/dev/*"
|
| 56 |
---
|
| 57 |
|
| 58 |
-
#
|
| 59 |
|
| 60 |
A dataset in a [nixietune](https://github.com/nixiesearch/nixietune) compatible format:
|
| 61 |
|
| 62 |
```json
|
| 63 |
{
|
| 64 |
"query": ")what was the immediate impact of the success of the manhattan project?",
|
| 65 |
-
"
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
}
|
| 70 |
-
]
|
| 71 |
}
|
| 72 |
```
|
| 73 |
|
| 74 |
-
This is the original converted dataset with the following splits:
|
| 75 |
* train: 502939 queries, only positives.
|
| 76 |
* test: 43 queries, positives and negatives.
|
| 77 |
* dev: 6980 queries, only positives.
|
|
@@ -81,7 +73,8 @@ This is the original converted dataset with the following splits:
|
|
| 81 |
```python
|
| 82 |
from datasets import load_dataset
|
| 83 |
|
| 84 |
-
data = load_dataset('nixiesearch/
|
|
|
|
| 85 |
```
|
| 86 |
|
| 87 |
## License
|
|
|
|
| 4 |
license: apache-2.0
|
| 5 |
tags:
|
| 6 |
- text
|
| 7 |
+
pretty_name: MS MARCO
|
| 8 |
size_categories:
|
| 9 |
- "100K<n<1M"
|
| 10 |
source_datasets:
|
|
|
|
| 16 |
features:
|
| 17 |
- name: query
|
| 18 |
dtype: string
|
| 19 |
+
- name: positive
|
| 20 |
+
sequence:
|
| 21 |
+
- dtype: string
|
| 22 |
+
- name: negative
|
| 23 |
+
sequence:
|
| 24 |
+
- dtype: string
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
splits:
|
| 26 |
- name: train
|
| 27 |
num_bytes: 89609915
|
|
|
|
| 49 |
path: "data/dev/*"
|
| 50 |
---
|
| 51 |
|
| 52 |
+
# MS MARCO dataset
|
| 53 |
|
| 54 |
A dataset in a [nixietune](https://github.com/nixiesearch/nixietune) compatible format:
|
| 55 |
|
| 56 |
```json
|
| 57 |
{
|
| 58 |
"query": ")what was the immediate impact of the success of the manhattan project?",
|
| 59 |
+
"positive": [
|
| 60 |
+
"The presence of communication amid scientific minds was equally important to the success of the Manhattan Project as scientific intellect was. The only cloud hanging over the impressive achievement of the atomic researchers and engineers is what their success truly meant; hundreds of thousands of innocent lives obliterated."
|
| 61 |
+
],
|
| 62 |
+
"negative": []
|
|
|
|
|
|
|
| 63 |
}
|
| 64 |
```
|
| 65 |
|
| 66 |
+
This is the original [BeIR/msmarco](https://huggingface.co/datasets/BeIR/msmarco) converted dataset with the following splits:
|
| 67 |
* train: 502939 queries, only positives.
|
| 68 |
* test: 43 queries, positives and negatives.
|
| 69 |
* dev: 6980 queries, only positives.
|
|
|
|
| 73 |
```python
|
| 74 |
from datasets import load_dataset
|
| 75 |
|
| 76 |
+
data = load_dataset('nixiesearch/ms_marco')
|
| 77 |
+
print(data["train"].features)
|
| 78 |
```
|
| 79 |
|
| 80 |
## License
|
convert.py
CHANGED
|
@@ -1,10 +1,11 @@
|
|
| 1 |
-
from datasets import load_dataset
|
| 2 |
from dataclasses import dataclass, field
|
| 3 |
import logging
|
| 4 |
from transformers import HfArgumentParser
|
| 5 |
from tqdm import tqdm
|
| 6 |
from typing import Dict, List
|
| 7 |
import json
|
|
|
|
| 8 |
|
| 9 |
logger = logging.getLogger()
|
| 10 |
logger.setLevel(logging.INFO)
|
|
@@ -17,7 +18,6 @@ logger.handlers = [console_handler]
|
|
| 17 |
|
| 18 |
@dataclass
|
| 19 |
class ConversionAgruments:
|
| 20 |
-
path: str = field(metadata={"help": "Path to the MAMARCO dataset"})
|
| 21 |
out: str = field(metadata={"help": "Output path"})
|
| 22 |
|
| 23 |
|
|
@@ -27,22 +27,20 @@ class QRel:
|
|
| 27 |
score: int
|
| 28 |
|
| 29 |
|
| 30 |
-
def
|
| 31 |
-
dataset = load_dataset(
|
| 32 |
-
cache:
|
| 33 |
-
for row in tqdm(dataset, desc=f"loading {path}"):
|
| 34 |
index = int(row["_id"])
|
| 35 |
-
if index >= len(cache):
|
| 36 |
-
cache.extend([""] * (1 + 2 * max(index, len(cache))))
|
| 37 |
cache[index] = row["text"]
|
| 38 |
return cache
|
| 39 |
|
| 40 |
|
| 41 |
-
def load_qrel(path: str) -> Dict[int, List[QRel]]:
|
| 42 |
-
dataset = load_dataset(
|
| 43 |
print(dataset.features)
|
| 44 |
cache: Dict[int, List[QRel]] = {}
|
| 45 |
-
for row in tqdm(dataset, desc=f"loading {path}"):
|
| 46 |
qid = int(row["query-id"])
|
| 47 |
qrel = QRel(int(row["corpus-id"]), int(row["score"]))
|
| 48 |
if qid in cache:
|
|
@@ -52,26 +50,14 @@ def load_qrel(path: str) -> Dict[int, List[QRel]]:
|
|
| 52 |
return cache
|
| 53 |
|
| 54 |
|
| 55 |
-
def
|
| 56 |
-
qrels: Dict[int, List[QRel]], queries:
|
| 57 |
) -> List[Dict]:
|
| 58 |
result = []
|
| 59 |
for query, rels in tqdm(qrels.items(), desc="processing split"):
|
| 60 |
-
pos = [
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
if rel.doc < len(corpus) and rel.score > 0 and corpus[rel.doc] != ""
|
| 64 |
-
]
|
| 65 |
-
neg = [
|
| 66 |
-
{"doc": corpus[rel.doc], "score": rel.score}
|
| 67 |
-
for rel in rels
|
| 68 |
-
if rel.doc < len(corpus) and rel.score == 0 and corpus[rel.doc] != ""
|
| 69 |
-
]
|
| 70 |
-
group = {"query": queries[query], "pos": pos}
|
| 71 |
-
if len(neg) == 0:
|
| 72 |
-
group["neg"] = None
|
| 73 |
-
else:
|
| 74 |
-
group["neg"] = neg
|
| 75 |
result.append(group)
|
| 76 |
return result
|
| 77 |
|
|
@@ -81,15 +67,15 @@ def main():
|
|
| 81 |
(args,) = parser.parse_args_into_dataclasses()
|
| 82 |
print(f"Args: {args}")
|
| 83 |
qrels = {
|
| 84 |
-
"
|
| 85 |
-
"
|
| 86 |
-
"
|
| 87 |
}
|
| 88 |
-
|
| 89 |
-
|
| 90 |
print("processing done")
|
| 91 |
for split, data in qrels.items():
|
| 92 |
-
dataset =
|
| 93 |
with open(f"{args.out}/{split}.jsonl", "w") as out:
|
| 94 |
for item in dataset:
|
| 95 |
json.dump(item, out)
|
|
|
|
| 1 |
+
from datasets import load_dataset, Features, Value, Sequence
|
| 2 |
from dataclasses import dataclass, field
|
| 3 |
import logging
|
| 4 |
from transformers import HfArgumentParser
|
| 5 |
from tqdm import tqdm
|
| 6 |
from typing import Dict, List
|
| 7 |
import json
|
| 8 |
+
import numpy as np
|
| 9 |
|
| 10 |
logger = logging.getLogger()
|
| 11 |
logger.setLevel(logging.INFO)
|
|
|
|
| 18 |
|
| 19 |
@dataclass
|
| 20 |
class ConversionAgruments:
|
|
|
|
| 21 |
out: str = field(metadata={"help": "Output path"})
|
| 22 |
|
| 23 |
|
|
|
|
| 27 |
score: int
|
| 28 |
|
| 29 |
|
| 30 |
+
def load_msmarco(path: str, split) -> Dict[int, str]:
|
| 31 |
+
dataset = load_dataset(path, split, split=split)
|
| 32 |
+
cache: Dict[int, str] = {}
|
| 33 |
+
for row in tqdm(dataset, desc=f"loading {path} split={split}"):
|
| 34 |
index = int(row["_id"])
|
|
|
|
|
|
|
| 35 |
cache[index] = row["text"]
|
| 36 |
return cache
|
| 37 |
|
| 38 |
|
| 39 |
+
def load_qrel(path: str, split: str) -> Dict[int, List[QRel]]:
|
| 40 |
+
dataset = load_dataset(path, split=split)
|
| 41 |
print(dataset.features)
|
| 42 |
cache: Dict[int, List[QRel]] = {}
|
| 43 |
+
for row in tqdm(dataset, desc=f"loading {path} split={split}"):
|
| 44 |
qid = int(row["query-id"])
|
| 45 |
qrel = QRel(int(row["corpus-id"]), int(row["score"]))
|
| 46 |
if qid in cache:
|
|
|
|
| 50 |
return cache
|
| 51 |
|
| 52 |
|
| 53 |
+
def process_raw(
|
| 54 |
+
qrels: Dict[int, List[QRel]], queries: Dict[int, str], corpus: Dict[int, str]
|
| 55 |
) -> List[Dict]:
|
| 56 |
result = []
|
| 57 |
for query, rels in tqdm(qrels.items(), desc="processing split"):
|
| 58 |
+
pos = [corpus[rel.doc] for rel in rels if rel.doc in corpus and rel.score > 0]
|
| 59 |
+
neg = [corpus[rel.doc] for rel in rels if rel.doc in corpus and rel.score == 0]
|
| 60 |
+
group = {"query": queries[query], "positive": pos, "negative": neg}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
result.append(group)
|
| 62 |
return result
|
| 63 |
|
|
|
|
| 67 |
(args,) = parser.parse_args_into_dataclasses()
|
| 68 |
print(f"Args: {args}")
|
| 69 |
qrels = {
|
| 70 |
+
"train": load_qrel("BeIR/msmarco-qrels", split="train"),
|
| 71 |
+
"test": load_qrel("BeIR/msmarco-qrels", split="test"),
|
| 72 |
+
"dev": load_qrel("BeIR/msmarco-qrels", split="validation"),
|
| 73 |
}
|
| 74 |
+
queries = load_msmarco("BeIR/msmarco", split="queries")
|
| 75 |
+
corpus = load_msmarco("BeIR/msmarco", split="corpus")
|
| 76 |
print("processing done")
|
| 77 |
for split, data in qrels.items():
|
| 78 |
+
dataset = process_raw(data, queries, corpus)
|
| 79 |
with open(f"{args.out}/{split}.jsonl", "w") as out:
|
| 80 |
for item in dataset:
|
| 81 |
json.dump(item, out)
|
data/dev/dev.jsonl.gz
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fdf0538cbf00e68f4904763b074ef192128ef75f668b509e36826e13316103c3
|
| 3 |
+
size 1199510
|
data/test/test.jsonl.gz
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d7daf1e3f97722ae59b888591756a12846b2fc02b89ee536d189faa27e684136
|
| 3 |
+
size 951166
|
data/train/train.jsonl.gz
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ce3b897ce69b424181aef6c25b8e17e20d0928a3d591f72d739db92b9346c573
|
| 3 |
+
size 89120825
|
use.py
CHANGED
|
@@ -1,20 +1,4 @@
|
|
| 1 |
-
from datasets import load_dataset
|
| 2 |
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
data = load_dataset(
|
| 7 |
-
"json",
|
| 8 |
-
data_files={
|
| 9 |
-
"train": "/home/shutty/data/nixiesearch-datasets/msmarco/train.jsonl",
|
| 10 |
-
"test": "/home/shutty/data/nixiesearch-datasets/msmarco/test.jsonl",
|
| 11 |
-
},
|
| 12 |
-
features=Features(
|
| 13 |
-
{
|
| 14 |
-
"query": Value("string"),
|
| 15 |
-
"pos": [{"doc": Value("string"), "score": Value("int32")}],
|
| 16 |
-
"neg": [{"doc": Value("string"), "score": Value("int32")}],
|
| 17 |
-
}
|
| 18 |
-
),
|
| 19 |
-
)
|
| 20 |
-
print(data["test"].features)
|
|
|
|
| 1 |
+
from datasets import load_dataset
|
| 2 |
|
| 3 |
+
data = load_dataset("nixiesearch/ms_marco")
|
| 4 |
+
print(data)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|