Problem when changing the default builder config to connect_all
When running the following code I get error that conversion_method is not a valid keyword for builder config.
code:
from pie_datasets import load_dataset
dataset = load_dataset(path="pie/aae2", conversion_method = "connect_all")
output:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[1], line 3
1 from pie_datasets import load_dataset, builders
----> 3 dataset = load_dataset(path="pie/aae2", conversion_method = "connect_all")
4 train_docs = dataset["train"]
File ~/Library/Caches/pypoetry/virtualenvs/Z580gx8g-py3.11/lib/python3.11/site-packages/pie_datasets/core/dataset_dict.py:699, in load_dataset(*args, **kwargs)
698 def load_dataset(*args, **kwargs) -> Union[DatasetDict, Dataset, IterableDataset]:
--> 699 dataset_or_dataset_dict = datasets.load_dataset(*args, **kwargs)
700 if isinstance(dataset_or_dataset_dict, (Dataset, IterableDataset)):
701 return dataset_or_dataset_dict
File ~/Library/Caches/pypoetry/virtualenvs/minig-Z580gx8g-py3.11/lib/python3.11/site-packages/datasets/load.py:2128, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, token, use_auth_token, task, streaming, num_proc, storage_options, **config_kwargs)
2123 verification_mode = VerificationMode(
2124 (verification_mode or VerificationMode.BASIC_CHECKS) if not save_infos else VerificationMode.ALL_CHECKS
2125 )
2127 # Create a dataset builder
-> 2128 builder_instance = load_dataset_builder(
2129 path=path,
2130 name=name,
2131 data_dir=data_dir,
2132 data_files=data_files,
2133 cache_dir=cache_dir,
2134 features=features,
2135 download_config=download_config,
2136 download_mode=download_mode,
2137 revision=revision,
2138 token=token,
2139 storage_options=storage_options,
2140 **config_kwargs,
2141 )
2143 # Return iterable dataset in case of streaming
2144 if streaming:
File ~/Library/Caches/pypoetry/virtualenvs/minig-Z580gx8g-py3.11/lib/python3.11/site-packages/datasets/load.py:1851, in load_dataset_builder(path, name, data_dir, data_files, cache_dir, features, download_config, download_mode, revision, token, use_auth_token, storage_options, **config_kwargs)
1849 builder_cls = get_dataset_builder_class(dataset_module, dataset_name=dataset_name)
1850 # Instantiate the dataset builder
-> 1851 builder_instance: DatasetBuilder = builder_cls(
1852 cache_dir=cache_dir,
1853 dataset_name=dataset_name,
1854 config_name=config_name,
1855 data_dir=data_dir,
1856 data_files=data_files,
1857 hash=hash,
1858 info=info,
1859 features=features,
1860 token=token,
1861 storage_options=storage_options,
1862 **builder_kwargs,
1863 **config_kwargs,
1864 )
1866 return builder_instance
File ~/Library/Caches/pypoetry/virtualenvs/minig-Z580gx8g-py3.11/lib/python3.11/site-packages/pie_datasets/core/builder.py:129, in PieDatasetBuilder.__init__(self, base_dataset_kwargs, document_converters, **kwargs)
125 # set base path to base builder base path. This is required so that the download manager
126 # works correctly with relative paths.
127 kwargs["base_path"] = self.base_builder.base_path
--> 129 super().__init__(**kwargs)
131 self._document_converters = dict(self.DOCUMENT_CONVERTERS)
132 if document_converters is not None:
File ~/Library/Caches/pypoetry/virtualenvs/minig-Z580gx8g-py3.11/lib/python3.11/site-packages/datasets/builder.py:373, in DatasetBuilder.__init__(self, cache_dir, dataset_name, config_name, hash, base_path, info, features, token, use_auth_token, repo_id, data_files, data_dir, storage_options, writer_batch_size, name, **config_kwargs)
371 if data_dir is not None:
372 config_kwargs["data_dir"] = data_dir
--> 373 self.config, self.config_id = self._create_builder_config(
374 config_name=config_name,
375 custom_features=features,
376 **config_kwargs,
377 )
379 # prepare info: DatasetInfo are a standardized dataclass across all datasets
380 # Prefill datasetinfo
381 if info is None:
382 # TODO FOR PACKAGED MODULES IT IMPORTS DATA FROM src/packaged_modules which doesn't make sense
File ~/Library/Caches/pypoetry/virtualenvs/minig-Z580gx8g-py3.11/lib/python3.11/site-packages/datasets/builder.py:554, in DatasetBuilder._create_builder_config(self, config_name, custom_features, **config_kwargs)
552 config_kwargs["version"] = self.VERSION
553 print(self.BUILDER_CONFIG_CLASS)
--> 554 builder_config = self.BUILDER_CONFIG_CLASS(**config_kwargs)
556 # otherwise use the config_kwargs to overwrite the attributes
557 else:
558 builder_config = copy.deepcopy(builder_config)
TypeError: BuilderConfig.__init__() got an unexpected keyword argument 'conversion_method'
Hi @taghizadeh ,
I implemented this fix: https://huggingface.co/datasets/pie/aae2/discussions/2
Can you try if this works for you, i.e. try passing revision="pr/2"
to load_dataset
?
Yes thank you. The load_dataset works fine right now, but when trying to access the samples in train or test section I get the following error:
sample = dataset['train'][0]
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[8], line 1
----> 1 dataset['test'][0]
File ~/Library/Caches/pypoetry/virtualenvs/minig-Z580gx8g-py3.11/lib/python3.11/site-packages/datasets/arrow_dataset.py:2795, in Dataset.__getitem__(self, key)
2793 def __getitem__(self, key): # noqa: F811
2794 """Can be used to index columns (by string names) or rows (by integer index or iterable of indices or bools)."""
-> 2795 return self._getitem(key)
File ~/Library/Caches/pypoetry/virtualenvs/minig-Z580gx8g-py3.11/lib/python3.11/site-packages/datasets/arrow_dataset.py:2780, in Dataset._getitem(self, key, **kwargs)
2778 formatter = get_formatter(format_type, features=self._info.features, **format_kwargs)
2779 pa_subtable = query_table(self._data, key, indices=self._indices if self._indices is not None else None)
-> 2780 formatted_output = format_table(
2781 pa_subtable, key, formatter=formatter, format_columns=format_columns, output_all_columns=output_all_columns
2782 )
2783 return formatted_output
File ~/Library/Caches/pypoetry/virtualenvs/minig-Z580gx8g-py3.11/lib/python3.11/site-packages/datasets/formatting/formatting.py:629, in format_table(table, key, formatter, format_columns, output_all_columns)
627 python_formatter = PythonFormatter(features=formatter.features)
628 if format_columns is None:
--> 629 return formatter(pa_table, query_type=query_type)
630 elif query_type == "column":
631 if key in format_columns:
File ~/Library/Caches/pypoetry/virtualenvs/minig-Z580gx8g-py3.11/lib/python3.11/site-packages/datasets/formatting/formatting.py:396, in Formatter.__call__(self, pa_table, query_type)
394 def __call__(self, pa_table: pa.Table, query_type: str) -> Union[RowFormat, ColumnFormat, BatchFormat]:
395 if query_type == "row":
--> 396 return self.format_row(pa_table)
397 elif query_type == "column":
398 return self.format_column(pa_table)
File ~/Library/Caches/pypoetry/virtualenvs/minig-Z580gx8g-py3.11/lib/python3.11/site-packages/pie_datasets/core/document_formatter.py:15, in DocumentFormatter.format_row(self, pa_table)
13 def format_row(self, pa_table: pa.Table) -> Document:
14 row = self.python_arrow_extractor().extract_row(pa_table)
---> 15 return self.document_type.fromdict(row)
File ~/Library/Caches/pypoetry/virtualenvs/minig-Z580gx8g-py3.11/lib/python3.11/site-packages/pytorch_ie/core/document.py:719, in Document.fromdict(cls, dct)
717 annotation_id = annotation_dict.pop("_id")
718 # annotations can only reference annotations
--> 719 annotation = annotation_class.fromdict(annotation_dict, annotations)
720 annotations[annotation_id] = annotation
721 annotations_per_field[field.name].append(annotation)
File ~/Library/Caches/pypoetry/virtualenvs/minig-Z580gx8g-py3.11/lib/python3.11/site-packages/pytorch_ie/core/document.py:306, in Annotation.fromdict(cls, dct, annotation_store)
303 raise Exception(f"unknown annotation container type: {container_type}")
305 tmp_dct.pop("_id", None)
--> 306 return cls(**tmp_dct)
TypeError: LabeledSpan.__init__() got an unexpected keyword argument 'slices'
ok, I added another PR with a fix: https://huggingface.co/datasets/pie/aae2/discussions/3.
can you try with revision="pr/3"
?
Sorry for delay, Yep it works now.
Another issue. When I use connect_first or connect_all, the relations of claims are not appended to premises relations as I see in the code.
What exactly do you mean? Can you give me a pointer to the respective lines of code and explain what you expect instead?
Well, we expect if we use use connect_first argument, Claims be connected to first MajorClaim, but when getting the relations from a document I only get the relations between premises and claims. This is the same with semantically same relations.
The line of related code:
https://huggingface.co/datasets/pie/aae2/blob/1015ee38bd8a36549b344008f7a49af72956a7fe/aae2.py#L79
for example I loaded the first document it has 6 relations with 11 spans. For 11 spans there must be at least 10 relations if consider all of them connected.
I assume you did not trigger the document conversion. The idea is that when using load_dataset()
, the data is loaded as it is without any semantic changes. Just when converting it, either by calling dataset.to_document_type(ONE_OF_THE_KEYS_IN_THE_DOCUMENT_CONVERTERS)
(see in the documentation or the source code for the predefined converters) or dataset.map(function)
, the semantics should change (e.g. new relations are added).
from pie_datasets import load_dataset, DatasetDict
from pytorch_ie.documents import TextDocumentWithLabeledSpansAndBinaryRelations
dataset: DatasetDict = load_dataset("pie/aae2", conversion_method="connect_all", revision="pr/3")
# convert the dataset. this will use either connect_all or connect_first method variant to connect all claims
dataset_converted = dataset.to_document_type(TextDocumentWithLabeledSpansAndBinaryRelations)
doc: TextDocumentWithLabeledSpansAndBinaryRelations = dataset_converted["train"][0]
print(len(doc.binary_relations)) # 12
print(len(doc.labeled_spans)) # 11
# get resolved relations for better debugging (requires pytorch-ie>=0.30.2)
resolved_relations = doc.binary_relations.resolve()
print(resolved_relations)
Does this help?
You are right. My apologies, for not reading the documentation well.
cool :) I merged https://huggingface.co/datasets/pie/aae2/discussions/3, do you think we can close this?
Yes thank you.