Datasets:
File size: 3,158 Bytes
9caa8f9 0286e1a ae68fb3 f8ada0e 81951b2 04a6692 9bffa9f 9eb9bd1 df4db26 5787daa 9475bdf f4824f2 ff35473 f8863cb 83a5da9 9caa8f9 933f1c7 0286e1a ae68fb3 f8ada0e 81951b2 04a6692 9bffa9f 9eb9bd1 df4db26 9475bdf f4824f2 ff35473 f8863cb 83a5da9 7b5f1d8 5787daa 59b82a8 805da2c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
---
dataset_info:
features:
- name: image_id
dtype: string
- name: label
dtype: int32
- name: clip_model
dtype: string
- name: clip_features
list: float32
- name: vector_dim
dtype: int32
- name: timestamp
dtype: timestamp[ns]
splits:
- name: clip_vit_b32_train
num_bytes: 2723761042
num_examples: 1281167
- name: clip_vit_laion_b32_train
num_bytes: 2789100559
num_examples: 1281167
- name: clip_vit_laion_b32_validation
num_bytes: 108850000
num_examples: 50000
- name: clip_vit_b16_train
num_bytes: 2777570056
num_examples: 1281167
- name: clip_vit_b16_validation
num_bytes: 108400000
num_examples: 50000
- name: clip_vit_l14_train
num_bytes: 4090766231
num_examples: 1281167
- name: clip_vit_l14_validation
num_bytes: 159650000
num_examples: 50000
- name: clip_vit_laion_bigg14_train
num_bytes: 6728689084
num_examples: 1281167
- name: clip_vit_laion_bigg14_validation
num_bytes: 262600000
num_examples: 50000
- name: clip_vit_b32_validation
num_bytes: 108400000
num_examples: 50000
- name: clip_vit_b32_test
num_bytes: 216800000
num_examples: 100000
- name: clip_vit_b16_test
num_bytes: 216800000
num_examples: 100000
- name: clip_vit_laion_b32_test
num_bytes: 217700000
num_examples: 100000
- name: clip_vit_l14_test
num_bytes: 319300000
num_examples: 100000
- name: clip_vit_laion_h14_test
num_bytes: 422500000
num_examples: 100000
download_size: 25438949728
dataset_size: 21250886972
configs:
- config_name: default
data_files:
- split: clip_vit_b32_train
path: data/clip_vit_b32_train-*
- split: clip_vit_b32_validation
path: data/clip_vit_b32_validation-*
- split: clip_vit_laion_b32_train
path: data/clip_vit_laion_b32_train-*
- split: clip_vit_laion_b32_validation
path: data/clip_vit_laion_b32_validation-*
- split: clip_vit_b16_train
path: data/clip_vit_b16_train-*
- split: clip_vit_b16_validation
path: data/clip_vit_b16_validation-*
- split: clip_vit_l14_train
path: data/clip_vit_l14_train-*
- split: clip_vit_l14_validation
path: data/clip_vit_l14_validation-*
- split: clip_vit_laion_bigg14_train
path: data/clip_vit_laion_bigg14_train-*
- split: clip_vit_laion_bigg14_validation
path: data/clip_vit_laion_bigg14_validation-*
- split: clip_vit_b32_test
path: data/clip_vit_b32_test-*
- split: clip_vit_b16_test
path: data/clip_vit_b16_test-*
- split: clip_vit_laion_b32_test
path: data/clip_vit_laion_b32_test-*
- split: clip_vit_l14_test
path: data/clip_vit_l14_test-*
- split: clip_vit_laion_h14_test
path: data/clip_vit_laion_h14_test-*
task_categories:
- feature-extraction
---
Academic and research purpose use only.
Many of them are missing the "test" split, so I'm working out the generations for that. Be patient and they'll arrive soon.
I'll likely reformat this repo into a similar;
"subset" "split" paradigm as well; allowing the clip types to exist as subsets and the splits to be standard train/text/validation before I let this repo rest. |