fix neg:nil issie while loading dataset
Browse files- README.md +6 -6
- convert.py +8 -9
- data/dev/dev.jsonl.gz +2 -2
- data/test/test.jsonl.gz +2 -2
- data/train/train.jsonl.gz +2 -2
- requirements.txt +1 -0
- use.py +3 -0
README.md
CHANGED
|
@@ -30,14 +30,14 @@ dataset_info:
|
|
| 30 |
dtype: float
|
| 31 |
splits:
|
| 32 |
- name: train
|
| 33 |
-
num_bytes:
|
| 34 |
-
num_examples:
|
| 35 |
- name: test
|
| 36 |
-
num_bytes:
|
| 37 |
-
num_examples:
|
| 38 |
- name: dev
|
| 39 |
-
num_bytes:
|
| 40 |
-
num_examples:
|
| 41 |
train-eval-index:
|
| 42 |
- config: default
|
| 43 |
task: sentence-similarity
|
|
|
|
| 30 |
dtype: float
|
| 31 |
splits:
|
| 32 |
- name: train
|
| 33 |
+
num_bytes: 89609915
|
| 34 |
+
num_examples: 502939
|
| 35 |
- name: test
|
| 36 |
+
num_bytes: 969945
|
| 37 |
+
num_examples: 43
|
| 38 |
- name: dev
|
| 39 |
+
num_bytes: 1206403
|
| 40 |
+
num_examples: 6980
|
| 41 |
train-eval-index:
|
| 42 |
- config: default
|
| 43 |
task: sentence-similarity
|
convert.py
CHANGED
|
@@ -67,9 +67,7 @@ def process(
|
|
| 67 |
for rel in rels
|
| 68 |
if rel.doc < len(corpus) and rel.score == 0 and corpus[rel.doc] != ""
|
| 69 |
]
|
| 70 |
-
group = {"query": queries[query], "pos": pos}
|
| 71 |
-
if len(neg) > 0:
|
| 72 |
-
group["neg"] = neg
|
| 73 |
result.append(group)
|
| 74 |
return result
|
| 75 |
|
|
@@ -78,17 +76,18 @@ def main():
|
|
| 78 |
parser = HfArgumentParser((ConversionAgruments))
|
| 79 |
(args,) = parser.parse_args_into_dataclasses()
|
| 80 |
print(f"Args: {args}")
|
| 81 |
-
corpus = load_json(f"{args.path}/corpus.jsonl", split="train")
|
| 82 |
-
queries = load_json(f"{args.path}/queries.jsonl")
|
| 83 |
qrels = {
|
| 84 |
-
"dev":
|
| 85 |
-
"
|
| 86 |
-
"
|
| 87 |
}
|
|
|
|
|
|
|
| 88 |
print("processing done")
|
| 89 |
for split, data in qrels.items():
|
|
|
|
| 90 |
with open(f"{args.out}/{split}.jsonl", "w") as out:
|
| 91 |
-
for item in
|
| 92 |
json.dump(item, out)
|
| 93 |
out.write("\n")
|
| 94 |
print("done")
|
|
|
|
| 67 |
for rel in rels
|
| 68 |
if rel.doc < len(corpus) and rel.score == 0 and corpus[rel.doc] != ""
|
| 69 |
]
|
| 70 |
+
group = {"query": queries[query], "pos": pos, "neg": neg}
|
|
|
|
|
|
|
| 71 |
result.append(group)
|
| 72 |
return result
|
| 73 |
|
|
|
|
| 76 |
parser = HfArgumentParser((ConversionAgruments))
|
| 77 |
(args,) = parser.parse_args_into_dataclasses()
|
| 78 |
print(f"Args: {args}")
|
|
|
|
|
|
|
| 79 |
qrels = {
|
| 80 |
+
"dev": load_qrel(f"{args.path}/qrels/dev.tsv"),
|
| 81 |
+
"train": load_qrel(f"{args.path}/qrels/train.tsv"),
|
| 82 |
+
"test": load_qrel(f"{args.path}/qrels/test.tsv"),
|
| 83 |
}
|
| 84 |
+
corpus = load_json(f"{args.path}/corpus.jsonl", split="train")
|
| 85 |
+
queries = load_json(f"{args.path}/queries.jsonl")
|
| 86 |
print("processing done")
|
| 87 |
for split, data in qrels.items():
|
| 88 |
+
dataset = process(data, queries, corpus)
|
| 89 |
with open(f"{args.out}/{split}.jsonl", "w") as out:
|
| 90 |
+
for item in dataset:
|
| 91 |
json.dump(item, out)
|
| 92 |
out.write("\n")
|
| 93 |
print("done")
|
data/dev/dev.jsonl.gz
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dbd532e51439da3bd74c79105f9f9f27beafd1db73a1b2155713ec37c8fd376d
|
| 3 |
+
size 1206403
|
data/test/test.jsonl.gz
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1b454c2d3e35adf12716e64699cc6637f703d9e82fc46f036cfa5af59433edc5
|
| 3 |
+
size 969945
|
data/train/train.jsonl.gz
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d56a52df9c3bbd14ee6f92cef2aeb61bcf6d33bc9e85012559b53a5f246157a0
|
| 3 |
+
size 89609915
|
requirements.txt
CHANGED
|
@@ -1 +1,2 @@
|
|
| 1 |
datasets
|
|
|
|
|
|
| 1 |
datasets
|
| 2 |
+
transformers
|
use.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datasets import load_dataset
|
| 2 |
+
|
| 3 |
+
data = load_dataset("nixiesearch/MSMARCO", split="train")
|