Fix empty examples in xtreme dataset for bucc18 config (#4706)
Browse files* fix empty examples in bucc18
Co-authored-by: mustaszewski <[email protected]>
* fix yaml tag
Co-authored-by: mustaszewski <[email protected]>
Commit from https://github.com/huggingface/datasets/commit/91e797c75f9addd56e2479beaef24e2ffdd54539
- README.md +1 -1
- dataset_infos.json +0 -0
- xtreme.py +7 -20
README.md
CHANGED
|
@@ -86,7 +86,7 @@ task_ids:
|
|
| 86 |
- text-classification-other-paraphrase-identification
|
| 87 |
- text-retrieval-other-parallel-sentence-retrieval
|
| 88 |
- named-entity-recognition
|
| 89 |
-
- part-of-speech
|
| 90 |
paperswithcode_id: xtreme
|
| 91 |
configs:
|
| 92 |
- MLQA.ar.ar
|
|
|
|
| 86 |
- text-classification-other-paraphrase-identification
|
| 87 |
- text-retrieval-other-parallel-sentence-retrieval
|
| 88 |
- named-entity-recognition
|
| 89 |
+
- part-of-speech
|
| 90 |
paperswithcode_id: xtreme
|
| 91 |
configs:
|
| 92 |
- MLQA.ar.ar
|
dataset_infos.json
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
xtreme.py
CHANGED
|
@@ -697,29 +697,16 @@ class Xtreme(datasets.GeneratorBasedBuilder):
|
|
| 697 |
if path.startswith(data_dir):
|
| 698 |
csv_content = [line.decode("utf-8") for line in file]
|
| 699 |
if path.endswith("en"):
|
| 700 |
-
target_sentences = list(csv.reader(csv_content, delimiter="\t"))
|
| 701 |
elif path.endswith("gold"):
|
| 702 |
-
source_target_ids = list(csv.reader(csv_content, delimiter="\t"))
|
| 703 |
else:
|
| 704 |
-
source_sentences = list(csv.reader(csv_content, delimiter="\t"))
|
| 705 |
-
|
| 706 |
-
|
| 707 |
-
target_id = pair[1]
|
| 708 |
-
source_sent = ""
|
| 709 |
-
target_sent = ""
|
| 710 |
-
for i in range(len(source_sentences)):
|
| 711 |
-
if source_sentences[i][0] == source_id:
|
| 712 |
-
source_sent = source_sentences[i][1]
|
| 713 |
-
source_id = source_sentences[i][0]
|
| 714 |
-
break
|
| 715 |
-
for j in range(len(target_sentences)):
|
| 716 |
-
if target_sentences[j][0] == target_id:
|
| 717 |
-
target_sent = target_sentences[j][1]
|
| 718 |
-
target_id = target_sentences[j][0]
|
| 719 |
-
break
|
| 720 |
yield id_, {
|
| 721 |
-
"source_sentence":
|
| 722 |
-
"target_sentence":
|
| 723 |
"source_lang": source_id,
|
| 724 |
"target_lang": target_id,
|
| 725 |
}
|
|
|
|
| 697 |
if path.startswith(data_dir):
|
| 698 |
csv_content = [line.decode("utf-8") for line in file]
|
| 699 |
if path.endswith("en"):
|
| 700 |
+
target_sentences = dict(list(csv.reader(csv_content, delimiter="\t", quotechar=None)))
|
| 701 |
elif path.endswith("gold"):
|
| 702 |
+
source_target_ids = list(csv.reader(csv_content, delimiter="\t", quotechar=None))
|
| 703 |
else:
|
| 704 |
+
source_sentences = dict(list(csv.reader(csv_content, delimiter="\t", quotechar=None)))
|
| 705 |
+
|
| 706 |
+
for id_, (source_id, target_id) in enumerate(source_target_ids):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 707 |
yield id_, {
|
| 708 |
+
"source_sentence": source_sentences[source_id],
|
| 709 |
+
"target_sentence": target_sentences[target_id],
|
| 710 |
"source_lang": source_id,
|
| 711 |
"target_lang": target_id,
|
| 712 |
}
|