Dataset Viewer
The dataset viewer is not available for this split.
Cannot extract the features (columns) for the split 'train' of the config 'default' of the dataset.
Error code:   FeaturesError
Exception:    ArrowInvalid
Message:      Schema at index 1 was different: 
name: string
backend: struct<name: string, version: string, _target_: string, task: string, library: string, model_type: string, model: string, processor: string, device: string, device_ids: null, seed: int64, inter_op_num_threads: null, intra_op_num_threads: null, model_kwargs: struct<>, processor_kwargs: struct<>, no_weights: bool, export: null, use_cache: null, use_merged: null, load_in_8bit: null, load_in_4bit: null, ov_config: struct<>, half: bool, compile: bool, reshape: bool, reshape_kwargs: struct<batch_size: int64, sequence_length: int64>>
scenario: struct<name: string, _target_: string, iterations: int64, duration: int64, warmup_runs: int64, input_shapes: struct<batch_size: int64, sequence_length: int64>, new_tokens: null, memory: bool, latency: bool, energy: bool, forward_kwargs: struct<>, generate_kwargs: struct<max_new_tokens: int64, min_new_tokens: int64>, call_kwargs: struct<num_inference_steps: int64>>
launcher: struct<name: string, _target_: string, device_isolation: bool, device_isolation_action: null, numactl: bool, numactl_kwargs: struct<cpunodebind: int64, membind: int64>, start_method: string>
environment: struct<cpu: string, cpu_count: int64, cpu_ram_mb: double, system: string, machine: string, platform: string, processor: string, python_version: string, optimum_benchmark_version: string, optimum_benchmark_commit: null, transformers_version: string, transformers_commit: null, accelerate_version: string, accelerate_commit: null, diffusers_version: string, diffusers_commit: null, optimum_version: string, optimum_commit: null, timm_version: null, timm_commit: null, peft_version: null, peft_commit: null>
print_report: bool
log_report: bool
vs
load_model: struct<memory: struct<unit: string, max_ram: double, max_global_vram: null, max_process_vram: null, max_reserved: null, max_allocated: null>, latency: struct<unit: string, values: list<item: double>, count: int64, total: double, mean: double, p50: double, p90: double, p95: double, p99: double, stdev: int64, stdev_: int64>, throughput: null, energy: null, efficiency: null>
prefill: struct<memory: struct<unit: string, max_ram: double, max_global_vram: null, max_process_vram: null, max_reserved: null, max_allocated: null>, latency: struct<unit: string, values: list<item: double>, count: int64, total: double, mean: double, p50: double, p90: double, p95: double, p99: double, stdev: double, stdev_: double>, throughput: struct<unit: string, value: double>, energy: null, efficiency: null>
decode: struct<memory: struct<unit: string, max_ram: double, max_global_vram: null, max_process_vram: null, max_reserved: null, max_allocated: null>, latency: struct<unit: string, values: list<item: double>, count: int64, total: double, mean: double, p50: double, p90: double, p95: double, p99: double, stdev: double, stdev_: double>, throughput: struct<unit: string, value: double>, energy: null, efficiency: null>
per_token: struct<memory: null, latency: struct<unit: string, values: list<item: double>, count: int64, total: double, mean: double, p50: double, p90: double, p95: double, p99: double, stdev: double, stdev_: double>, throughput: null, energy: null, efficiency: null>
Traceback:    Traceback (most recent call last):
                File "/src/services/worker/src/worker/job_runners/split/first_rows.py", line 231, in compute_first_rows_from_streaming_response
                  iterable_dataset = iterable_dataset._resolve_features()
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 3335, in _resolve_features
                  features = _infer_features_from_batch(self.with_format(None)._head())
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 2096, in _head
                  return next(iter(self.iter(batch_size=n)))
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 2296, in iter
                  for key, example in iterator:
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 1856, in __iter__
                  for key, pa_table in self._iter_arrow():
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 1878, in _iter_arrow
                  yield from self.ex_iterable._iter_arrow()
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 536, in _iter_arrow
                  yield new_key, pa.Table.from_batches(chunks_buffer)
                File "pyarrow/table.pxi", line 4116, in pyarrow.lib.Table.from_batches
                File "pyarrow/error.pxi", line 154, in pyarrow.lib.pyarrow_internal_check_status
                File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status
              pyarrow.lib.ArrowInvalid: Schema at index 1 was different: 
              name: string
              backend: struct<name: string, version: string, _target_: string, task: string, library: string, model_type: string, model: string, processor: string, device: string, device_ids: null, seed: int64, inter_op_num_threads: null, intra_op_num_threads: null, model_kwargs: struct<>, processor_kwargs: struct<>, no_weights: bool, export: null, use_cache: null, use_merged: null, load_in_8bit: null, load_in_4bit: null, ov_config: struct<>, half: bool, compile: bool, reshape: bool, reshape_kwargs: struct<batch_size: int64, sequence_length: int64>>
              scenario: struct<name: string, _target_: string, iterations: int64, duration: int64, warmup_runs: int64, input_shapes: struct<batch_size: int64, sequence_length: int64>, new_tokens: null, memory: bool, latency: bool, energy: bool, forward_kwargs: struct<>, generate_kwargs: struct<max_new_tokens: int64, min_new_tokens: int64>, call_kwargs: struct<num_inference_steps: int64>>
              launcher: struct<name: string, _target_: string, device_isolation: bool, device_isolation_action: null, numactl: bool, numactl_kwargs: struct<cpunodebind: int64, membind: int64>, start_method: string>
              environment: struct<cpu: string, cpu_count: int64, cpu_ram_mb: double, system: string, machine: string, platform: string, processor: string, python_version: string, optimum_benchmark_version: string, optimum_benchmark_commit: null, transformers_version: string, transformers_commit: null, accelerate_version: string, accelerate_commit: null, diffusers_version: string, diffusers_commit: null, optimum_version: string, optimum_commit: null, timm_version: null, timm_commit: null, peft_version: null, peft_commit: null>
              print_report: bool
              log_report: bool
              vs
              load_model: struct<memory: struct<unit: string, max_ram: double, max_global_vram: null, max_process_vram: null, max_reserved: null, max_allocated: null>, latency: struct<unit: string, values: list<item: double>, count: int64, total: double, mean: double, p50: double, p90: double, p95: double, p99: double, stdev: int64, stdev_: int64>, throughput: null, energy: null, efficiency: null>
              prefill: struct<memory: struct<unit: string, max_ram: double, max_global_vram: null, max_process_vram: null, max_reserved: null, max_allocated: null>, latency: struct<unit: string, values: list<item: double>, count: int64, total: double, mean: double, p50: double, p90: double, p95: double, p99: double, stdev: double, stdev_: double>, throughput: struct<unit: string, value: double>, energy: null, efficiency: null>
              decode: struct<memory: struct<unit: string, max_ram: double, max_global_vram: null, max_process_vram: null, max_reserved: null, max_allocated: null>, latency: struct<unit: string, values: list<item: double>, count: int64, total: double, mean: double, p50: double, p90: double, p95: double, p99: double, stdev: double, stdev_: double>, throughput: struct<unit: string, value: double>, energy: null, efficiency: null>
              per_token: struct<memory: null, latency: struct<unit: string, values: list<item: double>, count: int64, total: double, mean: double, p50: double, p90: double, p95: double, p99: double, stdev: double, stdev_: double>, throughput: null, energy: null, efficiency: null>

Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.

No dataset card yet

Downloads last month
1