Upload bookcoref.py with huggingface_hub
Browse files- bookcoref.py +33 -35
bookcoref.py
CHANGED
@@ -145,13 +145,12 @@ class BookCoref(datasets.GeneratorBasedBuilder):
|
|
145 |
self,
|
146 |
) -> dict[Literal["train", "validation", "test"], dict]:
|
147 |
# Custom method to load local data files
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
raise ValueError(f"Unknown config name: {self.config.name}")
|
155 |
|
156 |
data = {}
|
157 |
for split_name in ["train", "validation", "test"]:
|
@@ -228,11 +227,12 @@ class BookCoref(datasets.GeneratorBasedBuilder):
|
|
228 |
all_split_docs = {}
|
229 |
for doc_key, sentences in complete_sentences.items():
|
230 |
split_name = keys_to_split[doc_key]
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
|
|
236 |
|
237 |
eos_to_sent = {}
|
238 |
tokens = []
|
@@ -315,29 +315,27 @@ class BookCoref(datasets.GeneratorBasedBuilder):
|
|
315 |
complete_sentences[key].append(sentences[current_pos : current_pos + length])
|
316 |
current_pos += length
|
317 |
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
for
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
for
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
case _:
|
340 |
-
raise ValueError(f"Unknown config name: {self.config.name}")
|
341 |
|
342 |
return [
|
343 |
datasets.SplitGenerator(
|
|
|
145 |
self,
|
146 |
) -> dict[Literal["train", "validation", "test"], dict]:
|
147 |
# Custom method to load local data files
|
148 |
+
if self.config.name == "full":
|
149 |
+
data_dir = self.LOCAL_DATA_DIR / "full"
|
150 |
+
elif self.config.name == "splitted":
|
151 |
+
data_dir = self.LOCAL_DATA_DIR / "splitted"
|
152 |
+
else:
|
153 |
+
raise ValueError(f"Unknown config name: {self.config.name}")
|
|
|
154 |
|
155 |
data = {}
|
156 |
for split_name in ["train", "validation", "test"]:
|
|
|
227 |
all_split_docs = {}
|
228 |
for doc_key, sentences in complete_sentences.items():
|
229 |
split_name = keys_to_split[doc_key]
|
230 |
+
if split_name == "train" or split_name == "validation":
|
231 |
+
length = 1350
|
232 |
+
elif split_name == "test":
|
233 |
+
length = 1500
|
234 |
+
else:
|
235 |
+
raise ValueError(f"Unknown split name: {split_name}")
|
236 |
|
237 |
eos_to_sent = {}
|
238 |
tokens = []
|
|
|
315 |
complete_sentences[key].append(sentences[current_pos : current_pos + length])
|
316 |
current_pos += length
|
317 |
|
318 |
+
if self.config.name == "full":
|
319 |
+
# If the config is "full", we simply add the sentences to each sample
|
320 |
+
for split, split_data in gutenberg_data.items():
|
321 |
+
for key, sample in split_data.items():
|
322 |
+
if "animal_farm" not in sample["doc_key"]:
|
323 |
+
sample["sentences"] = complete_sentences[key]
|
324 |
+
elif self.config.name == "splitted":
|
325 |
+
# If the config is "splitted", we split the sentences into chunks
|
326 |
+
# We also need a mapping from data split (train, validation, test) to gutenberg keys
|
327 |
+
split_keys: dict[str, set[str]] = {
|
328 |
+
split: set(sample["gutenberg_key"].split("_")[0] for sample in split_data)
|
329 |
+
for split, split_data in local_data.items()
|
330 |
+
}
|
331 |
+
split_complete_sentences = self._cut_into_split(complete_sentences, split_keys)
|
332 |
+
# Then we add the chunks to each sample
|
333 |
+
for split, split_data in gutenberg_data.items():
|
334 |
+
for key, sample in split_data.items():
|
335 |
+
if "animal_farm" not in sample["doc_key"]:
|
336 |
+
sample["sentences"] = split_complete_sentences[key]
|
337 |
+
else:
|
338 |
+
raise ValueError(f"Unknown config name: {self.config.name}")
|
|
|
|
|
339 |
|
340 |
return [
|
341 |
datasets.SplitGenerator(
|