Datasets:

Languages:
English
ArXiv:
License:
tommasobonomo commited on
Commit
2cf1b81
·
verified ·
1 Parent(s): 3e03170

Upload bookcoref.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. bookcoref.py +33 -35
bookcoref.py CHANGED
@@ -145,13 +145,12 @@ class BookCoref(datasets.GeneratorBasedBuilder):
145
  self,
146
  ) -> dict[Literal["train", "validation", "test"], dict]:
147
  # Custom method to load local data files
148
- match self.config.name:
149
- case "full":
150
- data_dir = self.LOCAL_DATA_DIR / "full"
151
- case "splitted":
152
- data_dir = self.LOCAL_DATA_DIR / "splitted"
153
- case _:
154
- raise ValueError(f"Unknown config name: {self.config.name}")
155
 
156
  data = {}
157
  for split_name in ["train", "validation", "test"]:
@@ -228,11 +227,12 @@ class BookCoref(datasets.GeneratorBasedBuilder):
228
  all_split_docs = {}
229
  for doc_key, sentences in complete_sentences.items():
230
  split_name = keys_to_split[doc_key]
231
- match split_name:
232
- case "train" | "validation":
233
- length = 1350
234
- case "test":
235
- length = 1500
 
236
 
237
  eos_to_sent = {}
238
  tokens = []
@@ -315,29 +315,27 @@ class BookCoref(datasets.GeneratorBasedBuilder):
315
  complete_sentences[key].append(sentences[current_pos : current_pos + length])
316
  current_pos += length
317
 
318
- match self.config.name:
319
- case "full":
320
- # If the config is "full", we simply add the sentences to each sample
321
- for split, split_data in gutenberg_data.items():
322
- for key, sample in split_data.items():
323
- if "animal_farm" not in sample["doc_key"]:
324
- sample["sentences"] = complete_sentences[key]
325
- case "splitted":
326
- # If the config is "splitted", we split the sentences into chunks
327
- # We also need a mapping from data split (train, validation, test) to gutenberg keys
328
- split_keys: dict[str, set[str]] = {
329
- split: set(sample["gutenberg_key"].split("_")[0] for sample in split_data)
330
- for split, split_data in local_data.items()
331
- }
332
- split_complete_sentences = self._cut_into_split(complete_sentences, split_keys)
333
- # Then we add the chunks to each sample
334
- for split, split_data in gutenberg_data.items():
335
- for key, sample in split_data.items():
336
- if "animal_farm" not in sample["doc_key"]:
337
- sample["sentences"] = split_complete_sentences[key]
338
-
339
- case _:
340
- raise ValueError(f"Unknown config name: {self.config.name}")
341
 
342
  return [
343
  datasets.SplitGenerator(
 
145
  self,
146
  ) -> dict[Literal["train", "validation", "test"], dict]:
147
  # Custom method to load local data files
148
+ if self.config.name == "full":
149
+ data_dir = self.LOCAL_DATA_DIR / "full"
150
+ elif self.config.name == "splitted":
151
+ data_dir = self.LOCAL_DATA_DIR / "splitted"
152
+ else:
153
+ raise ValueError(f"Unknown config name: {self.config.name}")
 
154
 
155
  data = {}
156
  for split_name in ["train", "validation", "test"]:
 
227
  all_split_docs = {}
228
  for doc_key, sentences in complete_sentences.items():
229
  split_name = keys_to_split[doc_key]
230
+ if split_name == "train" or split_name == "validation":
231
+ length = 1350
232
+ elif split_name == "test":
233
+ length = 1500
234
+ else:
235
+ raise ValueError(f"Unknown split name: {split_name}")
236
 
237
  eos_to_sent = {}
238
  tokens = []
 
315
  complete_sentences[key].append(sentences[current_pos : current_pos + length])
316
  current_pos += length
317
 
318
+ if self.config.name == "full":
319
+ # If the config is "full", we simply add the sentences to each sample
320
+ for split, split_data in gutenberg_data.items():
321
+ for key, sample in split_data.items():
322
+ if "animal_farm" not in sample["doc_key"]:
323
+ sample["sentences"] = complete_sentences[key]
324
+ elif self.config.name == "splitted":
325
+ # If the config is "splitted", we split the sentences into chunks
326
+ # We also need a mapping from data split (train, validation, test) to gutenberg keys
327
+ split_keys: dict[str, set[str]] = {
328
+ split: set(sample["gutenberg_key"].split("_")[0] for sample in split_data)
329
+ for split, split_data in local_data.items()
330
+ }
331
+ split_complete_sentences = self._cut_into_split(complete_sentences, split_keys)
332
+ # Then we add the chunks to each sample
333
+ for split, split_data in gutenberg_data.items():
334
+ for key, sample in split_data.items():
335
+ if "animal_farm" not in sample["doc_key"]:
336
+ sample["sentences"] = split_complete_sentences[key]
337
+ else:
338
+ raise ValueError(f"Unknown config name: {self.config.name}")
 
 
339
 
340
  return [
341
  datasets.SplitGenerator(