AbstractPhil's picture
Update README.md
805da2c verified
metadata
dataset_info:
  features:
    - name: image_id
      dtype: string
    - name: label
      dtype: int32
    - name: clip_model
      dtype: string
    - name: clip_features
      list: float32
    - name: vector_dim
      dtype: int32
    - name: timestamp
      dtype: timestamp[ns]
  splits:
    - name: clip_vit_b32_train
      num_bytes: 2723761042
      num_examples: 1281167
    - name: clip_vit_laion_b32_train
      num_bytes: 2789100559
      num_examples: 1281167
    - name: clip_vit_laion_b32_validation
      num_bytes: 108850000
      num_examples: 50000
    - name: clip_vit_b16_train
      num_bytes: 2777570056
      num_examples: 1281167
    - name: clip_vit_b16_validation
      num_bytes: 108400000
      num_examples: 50000
    - name: clip_vit_l14_train
      num_bytes: 4090766231
      num_examples: 1281167
    - name: clip_vit_l14_validation
      num_bytes: 159650000
      num_examples: 50000
    - name: clip_vit_laion_bigg14_train
      num_bytes: 6728689084
      num_examples: 1281167
    - name: clip_vit_laion_bigg14_validation
      num_bytes: 262600000
      num_examples: 50000
    - name: clip_vit_b32_validation
      num_bytes: 108400000
      num_examples: 50000
    - name: clip_vit_b32_test
      num_bytes: 216800000
      num_examples: 100000
    - name: clip_vit_b16_test
      num_bytes: 216800000
      num_examples: 100000
    - name: clip_vit_laion_b32_test
      num_bytes: 217700000
      num_examples: 100000
    - name: clip_vit_l14_test
      num_bytes: 319300000
      num_examples: 100000
    - name: clip_vit_laion_h14_test
      num_bytes: 422500000
      num_examples: 100000
  download_size: 25438949728
  dataset_size: 21250886972
configs:
  - config_name: default
    data_files:
      - split: clip_vit_b32_train
        path: data/clip_vit_b32_train-*
      - split: clip_vit_b32_validation
        path: data/clip_vit_b32_validation-*
      - split: clip_vit_laion_b32_train
        path: data/clip_vit_laion_b32_train-*
      - split: clip_vit_laion_b32_validation
        path: data/clip_vit_laion_b32_validation-*
      - split: clip_vit_b16_train
        path: data/clip_vit_b16_train-*
      - split: clip_vit_b16_validation
        path: data/clip_vit_b16_validation-*
      - split: clip_vit_l14_train
        path: data/clip_vit_l14_train-*
      - split: clip_vit_l14_validation
        path: data/clip_vit_l14_validation-*
      - split: clip_vit_laion_bigg14_train
        path: data/clip_vit_laion_bigg14_train-*
      - split: clip_vit_laion_bigg14_validation
        path: data/clip_vit_laion_bigg14_validation-*
      - split: clip_vit_b32_test
        path: data/clip_vit_b32_test-*
      - split: clip_vit_b16_test
        path: data/clip_vit_b16_test-*
      - split: clip_vit_laion_b32_test
        path: data/clip_vit_laion_b32_test-*
      - split: clip_vit_l14_test
        path: data/clip_vit_l14_test-*
      - split: clip_vit_laion_h14_test
        path: data/clip_vit_laion_h14_test-*
task_categories:
  - feature-extraction

Academic and research purpose use only.

Many of them are missing the "test" split, so I'm working out the generations for that. Be patient and they'll arrive soon.

I'll likely reformat this repo into a similar; "subset" "split" paradigm as well; allowing the clip types to exist as subsets and the splits to be standard train/text/validation before I let this repo rest.