Commit
·
85dd753
1
Parent(s):
fb719ac
Migrate to modern Hugging Face dataset format
Browse files- Remove deprecated Python loading script (multimodal-ai-taxonomy.py)
- Remove auto-generated dataset_infos.json
- Add JSONL data files for direct dataset loading
- Update README with new data structure and usage examples
- Add conversion scripts to gitignore
This change enables the dataset to work with current versions of the
datasets library, which no longer support custom loading scripts.
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <[email protected]>
- .gitignore +4 -0
- README.md +74 -68
- data/taxonomy_3d-model_creation.jsonl +2 -0
- data/taxonomy_audio_creation.jsonl +3 -0
- data/taxonomy_audio_editing.jsonl +2 -0
- data/taxonomy_image_creation.jsonl +1 -0
- data/taxonomy_image_editing.jsonl +1 -0
- data/taxonomy_video_creation.jsonl +11 -0
- data/taxonomy_video_editing.jsonl +2 -0
- data/train.jsonl +22 -0
- dataset_infos.json +0 -96
- multimodal-ai-taxonomy.py +0 -217
.gitignore
CHANGED
|
@@ -47,3 +47,7 @@ htmlcov/
|
|
| 47 |
|
| 48 |
# Hugging Face
|
| 49 |
dataset_infos.json.lock
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
|
| 48 |
# Hugging Face
|
| 49 |
dataset_infos.json.lock
|
| 50 |
+
|
| 51 |
+
# Conversion scripts (utilities, not part of distribution)
|
| 52 |
+
convert_to_modern_format.py
|
| 53 |
+
create_splits.py
|
README.md
CHANGED
|
@@ -49,8 +49,21 @@ This is a reference taxonomy dataset for:
|
|
| 49 |
|
| 50 |
## Dataset Structure
|
| 51 |
|
| 52 |
-
The
|
| 53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
```
|
| 55 |
taxonomy/
|
| 56 |
├── schema.json # Common schema definition
|
|
@@ -74,68 +87,48 @@ taxonomy/
|
|
| 74 |
|
| 75 |
### Data Instances
|
| 76 |
|
| 77 |
-
Each modality entry contains:
|
| 78 |
|
| 79 |
```json
|
| 80 |
{
|
| 81 |
"id": "img-to-vid-lipsync-text",
|
| 82 |
"name": "Image to Video (Lip Sync from Text)",
|
| 83 |
-
"
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
"
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
"
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
"lipSync": true,
|
| 97 |
-
"lipSyncMethod": "generated-from-text",
|
| 98 |
-
"motionType": "facial"
|
| 99 |
-
},
|
| 100 |
-
"metadata": {
|
| 101 |
-
"maturityLevel": "mature",
|
| 102 |
-
"commonUseCases": [
|
| 103 |
-
"Avatar creation",
|
| 104 |
-
"Character animation from portrait",
|
| 105 |
-
"Marketing personalization"
|
| 106 |
-
],
|
| 107 |
-
"platforms": ["Replicate", "FAL AI", "HeyGen"],
|
| 108 |
-
"exampleModels": ["Wav2Lip", "SadTalker", "DreamTalk"]
|
| 109 |
-
}
|
| 110 |
}
|
| 111 |
```
|
| 112 |
|
| 113 |
-
|
| 114 |
|
| 115 |
-
|
| 116 |
-
- `fileType`: Always "multimodal-ai-taxonomy"
|
| 117 |
-
- `outputModality`: The primary output type (video, audio, image, text, 3d-model)
|
| 118 |
-
- `operationType`: Either "creation" or "editing"
|
| 119 |
-
- `description`: Human-readable description of the file contents
|
| 120 |
-
- `modalities`: Array of modality objects
|
| 121 |
|
| 122 |
-
**
|
| 123 |
- `id` (string): Unique identifier in kebab-case
|
| 124 |
- `name` (string): Human-readable name
|
| 125 |
-
- `
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
- `
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
- `
|
| 133 |
-
- `
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
- `relationships` (object, optional): Links to related modalities
|
| 139 |
|
| 140 |
### Data Splits
|
| 141 |
|
|
@@ -228,28 +221,32 @@ For detailed contribution guidelines, see `taxonomy/README.md`.
|
|
| 228 |
|
| 229 |
```python
|
| 230 |
from datasets import load_dataset
|
|
|
|
| 231 |
|
| 232 |
# Load the entire taxonomy
|
| 233 |
-
dataset = load_dataset("
|
| 234 |
|
| 235 |
-
#
|
| 236 |
-
|
| 237 |
-
|
| 238 |
```
|
| 239 |
|
| 240 |
### Filtering by Characteristics
|
| 241 |
|
| 242 |
```python
|
| 243 |
import json
|
|
|
|
| 244 |
|
| 245 |
-
#
|
| 246 |
-
|
| 247 |
-
data = json.load(f)
|
| 248 |
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
]
|
|
|
|
|
|
|
|
|
|
| 253 |
|
| 254 |
for modality in lipsync_modalities:
|
| 255 |
print(f"{modality['name']}: {modality['id']}")
|
|
@@ -258,14 +255,23 @@ for modality in lipsync_modalities:
|
|
| 258 |
### Finding Models by Use Case
|
| 259 |
|
| 260 |
```python
|
| 261 |
-
|
| 262 |
-
with open("taxonomy/image-generation/creation/modalities.json") as f:
|
| 263 |
-
data = json.load(f)
|
| 264 |
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 268 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 269 |
```
|
| 270 |
|
| 271 |
## Contact
|
|
|
|
| 49 |
|
| 50 |
## Dataset Structure
|
| 51 |
|
| 52 |
+
The dataset is provided as JSONL files (JSON Lines format) for efficient loading:
|
| 53 |
|
| 54 |
+
```
|
| 55 |
+
data/
|
| 56 |
+
├── train.jsonl # Complete dataset
|
| 57 |
+
├── taxonomy_video_creation.jsonl # Video creation modalities
|
| 58 |
+
├── taxonomy_video_editing.jsonl # Video editing modalities
|
| 59 |
+
├── taxonomy_audio_creation.jsonl # Audio creation modalities
|
| 60 |
+
├── taxonomy_audio_editing.jsonl # Audio editing modalities
|
| 61 |
+
├── taxonomy_image_creation.jsonl # Image creation modalities
|
| 62 |
+
├── taxonomy_image_editing.jsonl # Image editing modalities
|
| 63 |
+
└── taxonomy_3d-model_creation.jsonl # 3D creation modalities
|
| 64 |
+
```
|
| 65 |
+
|
| 66 |
+
Source taxonomy files (used for generation):
|
| 67 |
```
|
| 68 |
taxonomy/
|
| 69 |
├── schema.json # Common schema definition
|
|
|
|
| 87 |
|
| 88 |
### Data Instances
|
| 89 |
|
| 90 |
+
Each modality entry in the JSONL files contains flattened fields:
|
| 91 |
|
| 92 |
```json
|
| 93 |
{
|
| 94 |
"id": "img-to-vid-lipsync-text",
|
| 95 |
"name": "Image to Video (Lip Sync from Text)",
|
| 96 |
+
"input_primary": "image",
|
| 97 |
+
"input_secondary": ["text"],
|
| 98 |
+
"output_primary": "video",
|
| 99 |
+
"output_audio": true,
|
| 100 |
+
"output_audio_type": "speech",
|
| 101 |
+
"characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"text-to-speech\", \"lipSync\": true, \"motionType\": \"facial\"}",
|
| 102 |
+
"metadata_maturity_level": "mature",
|
| 103 |
+
"metadata_common_use_cases": ["Avatar creation", "Character animation from portrait"],
|
| 104 |
+
"metadata_platforms": ["Replicate", "FAL AI", "HeyGen"],
|
| 105 |
+
"metadata_example_models": ["Wav2Lip", "SadTalker", "DreamTalk"],
|
| 106 |
+
"relationships": "{}",
|
| 107 |
+
"output_modality": "video",
|
| 108 |
+
"operation_type": "creation"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
}
|
| 110 |
```
|
| 111 |
|
| 112 |
+
Note: The `characteristics` and `relationships` fields are JSON strings that should be parsed when needed.
|
| 113 |
|
| 114 |
+
### Data Fields
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
|
| 116 |
+
**JSONL record fields:**
|
| 117 |
- `id` (string): Unique identifier in kebab-case
|
| 118 |
- `name` (string): Human-readable name
|
| 119 |
+
- `input_primary` (string): Main input modality
|
| 120 |
+
- `input_secondary` (list of strings): Additional optional inputs
|
| 121 |
+
- `output_primary` (string): Main output modality
|
| 122 |
+
- `output_audio` (boolean): Whether audio is included (for video outputs)
|
| 123 |
+
- `output_audio_type` (string): Type of audio (speech, music, ambient, etc.)
|
| 124 |
+
- `characteristics` (JSON string): Modality-specific features (parse with json.loads)
|
| 125 |
+
- `metadata_maturity_level` (string): experimental, emerging, or mature
|
| 126 |
+
- `metadata_common_use_cases` (list of strings): Typical use cases
|
| 127 |
+
- `metadata_platforms` (list of strings): Platforms supporting this modality
|
| 128 |
+
- `metadata_example_models` (list of strings): Example model implementations
|
| 129 |
+
- `relationships` (JSON string): Links to related modalities (parse with json.loads)
|
| 130 |
+
- `output_modality` (string): The primary output type (video, audio, image, text, 3d-model)
|
| 131 |
+
- `operation_type` (string): Either "creation" or "editing"
|
|
|
|
| 132 |
|
| 133 |
### Data Splits
|
| 134 |
|
|
|
|
| 221 |
|
| 222 |
```python
|
| 223 |
from datasets import load_dataset
|
| 224 |
+
import json
|
| 225 |
|
| 226 |
# Load the entire taxonomy
|
| 227 |
+
dataset = load_dataset("danielrosehill/multimodal-ai-taxonomy", split="train")
|
| 228 |
|
| 229 |
+
# The dataset is now a flat structure - iterate through records
|
| 230 |
+
for record in dataset:
|
| 231 |
+
print(f"{record['name']}: {record['output_modality']} {record['operation_type']}")
|
| 232 |
```
|
| 233 |
|
| 234 |
### Filtering by Characteristics
|
| 235 |
|
| 236 |
```python
|
| 237 |
import json
|
| 238 |
+
from datasets import load_dataset
|
| 239 |
|
| 240 |
+
# Load dataset
|
| 241 |
+
dataset = load_dataset("danielrosehill/multimodal-ai-taxonomy", split="train")
|
|
|
|
| 242 |
|
| 243 |
+
# Find all video generation modalities with lip sync
|
| 244 |
+
lipsync_modalities = []
|
| 245 |
+
for record in dataset:
|
| 246 |
+
if record['output_modality'] == 'video' and record['operation_type'] == 'creation':
|
| 247 |
+
characteristics = json.loads(record['characteristics'])
|
| 248 |
+
if characteristics.get('lipSync'):
|
| 249 |
+
lipsync_modalities.append(record)
|
| 250 |
|
| 251 |
for modality in lipsync_modalities:
|
| 252 |
print(f"{modality['name']}: {modality['id']}")
|
|
|
|
| 255 |
### Finding Models by Use Case
|
| 256 |
|
| 257 |
```python
|
| 258 |
+
from datasets import load_dataset
|
|
|
|
|
|
|
| 259 |
|
| 260 |
+
# Load dataset
|
| 261 |
+
dataset = load_dataset("danielrosehill/multimodal-ai-taxonomy", split="train")
|
| 262 |
+
|
| 263 |
+
# Find mature image generation methods
|
| 264 |
+
mature_image_gen = [
|
| 265 |
+
record for record in dataset
|
| 266 |
+
if record['output_modality'] == 'image'
|
| 267 |
+
and record['operation_type'] == 'creation'
|
| 268 |
+
and record['metadata_maturity_level'] == 'mature'
|
| 269 |
]
|
| 270 |
+
|
| 271 |
+
for method in mature_image_gen:
|
| 272 |
+
print(f"{method['name']}")
|
| 273 |
+
print(f" Platforms: {', '.join(method['metadata_platforms'])}")
|
| 274 |
+
print(f" Models: {', '.join(method['metadata_example_models'])}")
|
| 275 |
```
|
| 276 |
|
| 277 |
## Contact
|
data/taxonomy_3d-model_creation.jsonl
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"id": "text-to-3d", "name": "Text to 3D Model", "input_primary": "text", "input_secondary": [], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"3d-synthesis\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["3D asset generation", "Rapid prototyping", "Game asset creation"], "metadata_platforms": ["Replicate", "Meshy", "3DFY"], "metadata_example_models": ["Point-E", "Shap-E", "DreamFusion"], "relationships": "{}", "output_modality": "3d-model", "operation_type": "creation"}
|
| 2 |
+
{"id": "img-to-3d", "name": "Image to 3D Model", "input_primary": "image", "input_secondary": [], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"3d-reconstruction\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["3D reconstruction", "Object digitization", "Asset creation from photos"], "metadata_platforms": ["Replicate", "Meshy", "Luma AI"], "metadata_example_models": ["Zero-1-to-3", "Wonder3D"], "relationships": "{}", "output_modality": "3d-model", "operation_type": "creation"}
|
data/taxonomy_audio_creation.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"id": "text-to-audio", "name": "Text to Audio", "input_primary": "text", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"general\", \"audioCategories\": [\"speech\", \"sound-effects\", \"music\", \"ambient\"]}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Sound effect generation", "Voiceover creation", "Audio asset production"], "metadata_platforms": ["Replicate", "ElevenLabs", "AudioCraft"], "metadata_example_models": ["AudioGen", "MusicGen"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 2 |
+
{"id": "text-to-speech", "name": "Text to Speech", "input_primary": "text", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "speech", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"speech\", \"voiceCloning\": false}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Narration", "Accessibility", "Voice assistants"], "metadata_platforms": ["ElevenLabs", "Google Cloud", "Azure", "AWS"], "metadata_example_models": ["ElevenLabs", "Google WaveNet", "Azure Neural TTS"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 3 |
+
{"id": "text-to-music", "name": "Text to Music", "input_primary": "text", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "music", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"music\", \"melodic\": true}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Background music generation", "Musical composition", "Soundtrack creation"], "metadata_platforms": ["Replicate", "Stability AI"], "metadata_example_models": ["MusicGen", "Stable Audio"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
data/taxonomy_audio_editing.jsonl
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"id": "audio-to-audio-inpainting", "name": "Audio to Audio (Inpainting)", "input_primary": "audio", "input_secondary": ["text"], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"inpainting\", \"modification\": \"selective-editing\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Audio editing", "Sound design", "Audio restoration"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "audio", "operation_type": "editing"}
|
| 2 |
+
{"id": "music-to-music-inpainting", "name": "Music to Music (Inpainting)", "input_primary": "audio", "input_secondary": ["text"], "output_primary": "audio", "output_audio": false, "output_audio_type": "music", "characteristics": "{\"processType\": \"inpainting\", \"modification\": \"selective-editing\", \"melodic\": true, \"audioSubtype\": \"music\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Music editing", "Compositional modifications", "Arrangement changes"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{\"parent\": \"audio-to-audio-inpainting\", \"note\": \"Music inpainting is a specialized subset of audio inpainting\"}", "output_modality": "audio", "operation_type": "editing"}
|
data/taxonomy_image_creation.jsonl
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"id": "text-to-img", "name": "Text to Image", "input_primary": "text", "input_secondary": [], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"synthesis\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Concept art generation", "Product mockups", "Marketing assets"], "metadata_platforms": ["Replicate", "Stability AI", "Midjourney", "DALL-E"], "metadata_example_models": ["Stable Diffusion", "DALL-E 3", "Midjourney"], "relationships": "{}", "output_modality": "image", "operation_type": "creation"}
|
data/taxonomy_image_editing.jsonl
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"id": "img-to-img", "name": "Image to Image", "input_primary": "image", "input_secondary": ["text"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"style-transfer\", \"enhancement\", \"editing\", \"inpainting\"]}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Image editing", "Style transfer", "Image enhancement", "Object removal/addition"], "metadata_platforms": ["Replicate", "Stability AI", "Midjourney"], "metadata_example_models": ["Stable Diffusion img2img", "ControlNet"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
data/taxonomy_video_creation.jsonl
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"id": "img-to-vid-no-audio", "name": "Image to Video (No Audio)", "input_primary": "image", "input_secondary": [], "output_primary": "video", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"none\", \"lipSync\": false, \"motionType\": \"general\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Static image animation", "Product visualization", "Concept previsualization"], "metadata_platforms": ["Replicate", "FAL AI", "Stability AI"], "metadata_example_models": ["Stable Video Diffusion", "AnimateDiff"], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 2 |
+
{"id": "img-to-vid-ambient-audio", "name": "Image to Video (Ambient Audio)", "input_primary": "image", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "ambient", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"synthesized\", \"audioPrompting\": \"text-based\", \"lipSync\": false, \"motionType\": \"general\", \"audioCharacteristics\": [\"background\", \"environmental\", \"atmospheric\"]}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Scene ambiance creation", "Marketplace atmosphere", "Environmental storytelling"], "metadata_platforms": ["FAL AI", "Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 3 |
+
{"id": "img-to-vid-lipsync-text", "name": "Image to Video (Lip Sync from Text)", "input_primary": "image", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "speech", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"text-to-speech\", \"audioPrompting\": \"text-based\", \"lipSync\": true, \"lipSyncMethod\": \"generated-from-text\", \"motionType\": \"facial\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Avatar creation", "Character animation from portrait", "Marketing personalization"], "metadata_platforms": ["Replicate", "FAL AI", "HeyGen"], "metadata_example_models": ["Wav2Lip", "SadTalker", "DreamTalk"], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 4 |
+
{"id": "img-to-vid-lipsync-audio", "name": "Image to Video (Lip Sync from Audio)", "input_primary": "image", "input_secondary": ["audio"], "output_primary": "video", "output_audio": true, "output_audio_type": "speech", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"reference-based\", \"audioPrompting\": \"audio-reference\", \"lipSync\": true, \"lipSyncMethod\": \"audio-driven\", \"motionType\": \"facial\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Voice cloning with video", "Dubbing and localization", "Podcast video generation"], "metadata_platforms": ["Replicate", "FAL AI"], "metadata_example_models": ["Wav2Lip", "SadTalker"], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 5 |
+
{"id": "img-to-vid-lipsync-lora", "name": "Image to Video (Lip Sync with LoRA Character)", "input_primary": "image", "input_secondary": ["text", "lora-model"], "output_primary": "video", "output_audio": true, "output_audio_type": "speech", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"text-to-speech\", \"audioPrompting\": \"text-based\", \"lipSync\": true, \"lipSyncMethod\": \"generated-from-text\", \"characterReference\": \"lora\", \"motionType\": \"facial\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Consistent character animation", "Brand mascot videos", "Personalized avatars"], "metadata_platforms": ["Specialized services"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 6 |
+
{"id": "text-to-vid-no-audio", "name": "Text to Video (No Audio)", "input_primary": "text", "input_secondary": [], "output_primary": "video", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"none\", \"motionType\": \"general\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Concept visualization", "Storyboarding", "Creative exploration"], "metadata_platforms": ["Replicate", "FAL AI", "RunwayML"], "metadata_example_models": ["ModelScope", "ZeroScope"], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 7 |
+
{"id": "text-to-vid-with-audio", "name": "Text to Video (With Audio)", "input_primary": "text", "input_secondary": [], "output_primary": "video", "output_audio": true, "output_audio_type": "synchronized", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"synthesized\", \"audioPrompting\": \"text-based\", \"audioVideoSync\": true, \"motionType\": \"general\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Complete scene generation", "Multimedia storytelling"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 8 |
+
{"id": "audio-to-vid", "name": "Audio to Video", "input_primary": "audio", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"synthesis\", \"audioVisualization\": true, \"motionType\": \"audio-reactive\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Music visualization", "Audio-reactive art", "Podcast video generation"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 9 |
+
{"id": "multimodal-img-audio-to-vid", "name": "Image + Audio to Video", "input_primary": "image", "input_secondary": ["audio"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"reference-based\", \"motionType\": \"audio-driven\", \"lipSync\": false}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Audio-driven animation", "Dance video generation", "Music-driven motion"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 10 |
+
{"id": "multimodal-text-img-to-vid", "name": "Text + Image to Video", "input_primary": "text", "input_secondary": ["image"], "output_primary": "video", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"guidanceType\": \"text-and-visual\", \"motionType\": \"guided\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Guided video generation", "Controlled animation", "Reference-based video creation"], "metadata_platforms": ["Replicate", "FAL AI"], "metadata_example_models": ["AnimateDiff with ControlNet"], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 11 |
+
{"id": "3d-to-vid", "name": "3D Model to Video", "input_primary": "3d-model", "input_secondary": [], "output_primary": "video", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"rendering\", \"renderType\": \"3d-rendering\", \"motionType\": \"camera-path\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["3D visualization", "Product rendering", "Architectural visualization"], "metadata_platforms": ["Blender", "Unreal Engine", "Unity"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
data/taxonomy_video_editing.jsonl
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"id": "vid-to-vid-no-audio", "name": "Video to Video (No Audio)", "input_primary": "video", "input_secondary": ["text"], "output_primary": "video", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"style-transfer\", \"motion-modification\", \"object-editing\"], \"preserveAudio\": false}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Video style transfer", "Video editing", "Motion manipulation"], "metadata_platforms": ["Replicate", "RunwayML"], "metadata_example_models": ["Gen-2", "Video ControlNet"], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
| 2 |
+
{"id": "vid-to-vid-preserve-audio", "name": "Video to Video (Preserve Audio)", "input_primary": "video", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"style-transfer\", \"motion-modification\", \"object-editing\"], \"preserveAudio\": true, \"audioHandling\": \"passthrough\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Video style transfer with audio", "Content transformation maintaining soundtrack"], "metadata_platforms": ["Replicate", "RunwayML"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
data/train.jsonl
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"id": "img-to-vid-no-audio", "name": "Image to Video (No Audio)", "input_primary": "image", "input_secondary": [], "output_primary": "video", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"none\", \"lipSync\": false, \"motionType\": \"general\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Static image animation", "Product visualization", "Concept previsualization"], "metadata_platforms": ["Replicate", "FAL AI", "Stability AI"], "metadata_example_models": ["Stable Video Diffusion", "AnimateDiff"], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 2 |
+
{"id": "img-to-vid-ambient-audio", "name": "Image to Video (Ambient Audio)", "input_primary": "image", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "ambient", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"synthesized\", \"audioPrompting\": \"text-based\", \"lipSync\": false, \"motionType\": \"general\", \"audioCharacteristics\": [\"background\", \"environmental\", \"atmospheric\"]}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Scene ambiance creation", "Marketplace atmosphere", "Environmental storytelling"], "metadata_platforms": ["FAL AI", "Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 3 |
+
{"id": "img-to-vid-lipsync-text", "name": "Image to Video (Lip Sync from Text)", "input_primary": "image", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "speech", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"text-to-speech\", \"audioPrompting\": \"text-based\", \"lipSync\": true, \"lipSyncMethod\": \"generated-from-text\", \"motionType\": \"facial\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Avatar creation", "Character animation from portrait", "Marketing personalization"], "metadata_platforms": ["Replicate", "FAL AI", "HeyGen"], "metadata_example_models": ["Wav2Lip", "SadTalker", "DreamTalk"], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 4 |
+
{"id": "img-to-vid-lipsync-audio", "name": "Image to Video (Lip Sync from Audio)", "input_primary": "image", "input_secondary": ["audio"], "output_primary": "video", "output_audio": true, "output_audio_type": "speech", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"reference-based\", \"audioPrompting\": \"audio-reference\", \"lipSync\": true, \"lipSyncMethod\": \"audio-driven\", \"motionType\": \"facial\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Voice cloning with video", "Dubbing and localization", "Podcast video generation"], "metadata_platforms": ["Replicate", "FAL AI"], "metadata_example_models": ["Wav2Lip", "SadTalker"], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 5 |
+
{"id": "img-to-vid-lipsync-lora", "name": "Image to Video (Lip Sync with LoRA Character)", "input_primary": "image", "input_secondary": ["text", "lora-model"], "output_primary": "video", "output_audio": true, "output_audio_type": "speech", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"text-to-speech\", \"audioPrompting\": \"text-based\", \"lipSync\": true, \"lipSyncMethod\": \"generated-from-text\", \"characterReference\": \"lora\", \"motionType\": \"facial\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Consistent character animation", "Brand mascot videos", "Personalized avatars"], "metadata_platforms": ["Specialized services"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 6 |
+
{"id": "text-to-vid-no-audio", "name": "Text to Video (No Audio)", "input_primary": "text", "input_secondary": [], "output_primary": "video", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"none\", \"motionType\": \"general\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Concept visualization", "Storyboarding", "Creative exploration"], "metadata_platforms": ["Replicate", "FAL AI", "RunwayML"], "metadata_example_models": ["ModelScope", "ZeroScope"], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 7 |
+
{"id": "text-to-vid-with-audio", "name": "Text to Video (With Audio)", "input_primary": "text", "input_secondary": [], "output_primary": "video", "output_audio": true, "output_audio_type": "synchronized", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"synthesized\", \"audioPrompting\": \"text-based\", \"audioVideoSync\": true, \"motionType\": \"general\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Complete scene generation", "Multimedia storytelling"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 8 |
+
{"id": "audio-to-vid", "name": "Audio to Video", "input_primary": "audio", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"synthesis\", \"audioVisualization\": true, \"motionType\": \"audio-reactive\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Music visualization", "Audio-reactive art", "Podcast video generation"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 9 |
+
{"id": "multimodal-img-audio-to-vid", "name": "Image + Audio to Video", "input_primary": "image", "input_secondary": ["audio"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"synthesis\", \"audioGeneration\": \"reference-based\", \"motionType\": \"audio-driven\", \"lipSync\": false}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Audio-driven animation", "Dance video generation", "Music-driven motion"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 10 |
+
{"id": "multimodal-text-img-to-vid", "name": "Text + Image to Video", "input_primary": "text", "input_secondary": ["image"], "output_primary": "video", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"guidanceType\": \"text-and-visual\", \"motionType\": \"guided\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Guided video generation", "Controlled animation", "Reference-based video creation"], "metadata_platforms": ["Replicate", "FAL AI"], "metadata_example_models": ["AnimateDiff with ControlNet"], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 11 |
+
{"id": "3d-to-vid", "name": "3D Model to Video", "input_primary": "3d-model", "input_secondary": [], "output_primary": "video", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"rendering\", \"renderType\": \"3d-rendering\", \"motionType\": \"camera-path\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["3D visualization", "Product rendering", "Architectural visualization"], "metadata_platforms": ["Blender", "Unreal Engine", "Unity"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "creation"}
|
| 12 |
+
{"id": "vid-to-vid-no-audio", "name": "Video to Video (No Audio)", "input_primary": "video", "input_secondary": ["text"], "output_primary": "video", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"style-transfer\", \"motion-modification\", \"object-editing\"], \"preserveAudio\": false}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Video style transfer", "Video editing", "Motion manipulation"], "metadata_platforms": ["Replicate", "RunwayML"], "metadata_example_models": ["Gen-2", "Video ControlNet"], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
| 13 |
+
{"id": "vid-to-vid-preserve-audio", "name": "Video to Video (Preserve Audio)", "input_primary": "video", "input_secondary": ["text"], "output_primary": "video", "output_audio": true, "output_audio_type": "original", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"style-transfer\", \"motion-modification\", \"object-editing\"], \"preserveAudio\": true, \"audioHandling\": \"passthrough\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Video style transfer with audio", "Content transformation maintaining soundtrack"], "metadata_platforms": ["Replicate", "RunwayML"], "metadata_example_models": [], "relationships": "{}", "output_modality": "video", "operation_type": "editing"}
|
| 14 |
+
{"id": "text-to-audio", "name": "Text to Audio", "input_primary": "text", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"general\", \"audioCategories\": [\"speech\", \"sound-effects\", \"music\", \"ambient\"]}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Sound effect generation", "Voiceover creation", "Audio asset production"], "metadata_platforms": ["Replicate", "ElevenLabs", "AudioCraft"], "metadata_example_models": ["AudioGen", "MusicGen"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 15 |
+
{"id": "text-to-speech", "name": "Text to Speech", "input_primary": "text", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "speech", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"speech\", \"voiceCloning\": false}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Narration", "Accessibility", "Voice assistants"], "metadata_platforms": ["ElevenLabs", "Google Cloud", "Azure", "AWS"], "metadata_example_models": ["ElevenLabs", "Google WaveNet", "Azure Neural TTS"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 16 |
+
{"id": "text-to-music", "name": "Text to Music", "input_primary": "text", "input_secondary": [], "output_primary": "audio", "output_audio": false, "output_audio_type": "music", "characteristics": "{\"processType\": \"synthesis\", \"audioType\": \"music\", \"melodic\": true}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Background music generation", "Musical composition", "Soundtrack creation"], "metadata_platforms": ["Replicate", "Stability AI"], "metadata_example_models": ["MusicGen", "Stable Audio"], "relationships": "{}", "output_modality": "audio", "operation_type": "creation"}
|
| 17 |
+
{"id": "audio-to-audio-inpainting", "name": "Audio to Audio (Inpainting)", "input_primary": "audio", "input_secondary": ["text"], "output_primary": "audio", "output_audio": false, "output_audio_type": "general", "characteristics": "{\"processType\": \"inpainting\", \"modification\": \"selective-editing\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["Audio editing", "Sound design", "Audio restoration"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{}", "output_modality": "audio", "operation_type": "editing"}
|
| 18 |
+
{"id": "music-to-music-inpainting", "name": "Music to Music (Inpainting)", "input_primary": "audio", "input_secondary": ["text"], "output_primary": "audio", "output_audio": false, "output_audio_type": "music", "characteristics": "{\"processType\": \"inpainting\", \"modification\": \"selective-editing\", \"melodic\": true, \"audioSubtype\": \"music\"}", "metadata_maturity_level": "experimental", "metadata_common_use_cases": ["Music editing", "Compositional modifications", "Arrangement changes"], "metadata_platforms": ["Experimental"], "metadata_example_models": [], "relationships": "{\"parent\": \"audio-to-audio-inpainting\", \"note\": \"Music inpainting is a specialized subset of audio inpainting\"}", "output_modality": "audio", "operation_type": "editing"}
|
| 19 |
+
{"id": "text-to-img", "name": "Text to Image", "input_primary": "text", "input_secondary": [], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"synthesis\"}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Concept art generation", "Product mockups", "Marketing assets"], "metadata_platforms": ["Replicate", "Stability AI", "Midjourney", "DALL-E"], "metadata_example_models": ["Stable Diffusion", "DALL-E 3", "Midjourney"], "relationships": "{}", "output_modality": "image", "operation_type": "creation"}
|
| 20 |
+
{"id": "img-to-img", "name": "Image to Image", "input_primary": "image", "input_secondary": ["text"], "output_primary": "image", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"transformation\", \"transformationTypes\": [\"style-transfer\", \"enhancement\", \"editing\", \"inpainting\"]}", "metadata_maturity_level": "mature", "metadata_common_use_cases": ["Image editing", "Style transfer", "Image enhancement", "Object removal/addition"], "metadata_platforms": ["Replicate", "Stability AI", "Midjourney"], "metadata_example_models": ["Stable Diffusion img2img", "ControlNet"], "relationships": "{}", "output_modality": "image", "operation_type": "editing"}
|
| 21 |
+
{"id": "text-to-3d", "name": "Text to 3D Model", "input_primary": "text", "input_secondary": [], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"3d-synthesis\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["3D asset generation", "Rapid prototyping", "Game asset creation"], "metadata_platforms": ["Replicate", "Meshy", "3DFY"], "metadata_example_models": ["Point-E", "Shap-E", "DreamFusion"], "relationships": "{}", "output_modality": "3d-model", "operation_type": "creation"}
|
| 22 |
+
{"id": "img-to-3d", "name": "Image to 3D Model", "input_primary": "image", "input_secondary": [], "output_primary": "3d-model", "output_audio": false, "output_audio_type": "", "characteristics": "{\"processType\": \"synthesis\", \"generationType\": \"3d-reconstruction\"}", "metadata_maturity_level": "emerging", "metadata_common_use_cases": ["3D reconstruction", "Object digitization", "Asset creation from photos"], "metadata_platforms": ["Replicate", "Meshy", "Luma AI"], "metadata_example_models": ["Zero-1-to-3", "Wonder3D"], "relationships": "{}", "output_modality": "3d-model", "operation_type": "creation"}
|
dataset_infos.json
DELETED
|
@@ -1,96 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"all": {
|
| 3 |
-
"description": "Complete taxonomy with all modalities across all output types and operations.",
|
| 4 |
-
"citation": "@dataset{multimodal_ai_taxonomy,\n title={Multimodal AI Taxonomy},\n author={Community Contributors},\n year={2025},\n publisher={Hugging Face},\n}",
|
| 5 |
-
"homepage": "https://huggingface.co/datasets/YOUR_USERNAME/multimodal-ai-taxonomy",
|
| 6 |
-
"license": "cc0-1.0",
|
| 7 |
-
"features": {
|
| 8 |
-
"id": {
|
| 9 |
-
"dtype": "string",
|
| 10 |
-
"_type": "Value"
|
| 11 |
-
},
|
| 12 |
-
"name": {
|
| 13 |
-
"dtype": "string",
|
| 14 |
-
"_type": "Value"
|
| 15 |
-
},
|
| 16 |
-
"input_primary": {
|
| 17 |
-
"dtype": "string",
|
| 18 |
-
"_type": "Value"
|
| 19 |
-
},
|
| 20 |
-
"input_secondary": {
|
| 21 |
-
"feature": {
|
| 22 |
-
"dtype": "string",
|
| 23 |
-
"_type": "Value"
|
| 24 |
-
},
|
| 25 |
-
"_type": "Sequence"
|
| 26 |
-
},
|
| 27 |
-
"output_primary": {
|
| 28 |
-
"dtype": "string",
|
| 29 |
-
"_type": "Value"
|
| 30 |
-
},
|
| 31 |
-
"output_audio": {
|
| 32 |
-
"dtype": "bool",
|
| 33 |
-
"_type": "Value"
|
| 34 |
-
},
|
| 35 |
-
"output_audio_type": {
|
| 36 |
-
"dtype": "string",
|
| 37 |
-
"_type": "Value"
|
| 38 |
-
},
|
| 39 |
-
"characteristics": {
|
| 40 |
-
"dtype": "string",
|
| 41 |
-
"_type": "Value"
|
| 42 |
-
},
|
| 43 |
-
"metadata_maturity_level": {
|
| 44 |
-
"dtype": "string",
|
| 45 |
-
"_type": "Value"
|
| 46 |
-
},
|
| 47 |
-
"metadata_common_use_cases": {
|
| 48 |
-
"feature": {
|
| 49 |
-
"dtype": "string",
|
| 50 |
-
"_type": "Value"
|
| 51 |
-
},
|
| 52 |
-
"_type": "Sequence"
|
| 53 |
-
},
|
| 54 |
-
"metadata_platforms": {
|
| 55 |
-
"feature": {
|
| 56 |
-
"dtype": "string",
|
| 57 |
-
"_type": "Value"
|
| 58 |
-
},
|
| 59 |
-
"_type": "Sequence"
|
| 60 |
-
},
|
| 61 |
-
"metadata_example_models": {
|
| 62 |
-
"feature": {
|
| 63 |
-
"dtype": "string",
|
| 64 |
-
"_type": "Value"
|
| 65 |
-
},
|
| 66 |
-
"_type": "Sequence"
|
| 67 |
-
},
|
| 68 |
-
"relationships": {
|
| 69 |
-
"dtype": "string",
|
| 70 |
-
"_type": "Value"
|
| 71 |
-
},
|
| 72 |
-
"file_output_modality": {
|
| 73 |
-
"dtype": "string",
|
| 74 |
-
"_type": "Value"
|
| 75 |
-
},
|
| 76 |
-
"file_operation_type": {
|
| 77 |
-
"dtype": "string",
|
| 78 |
-
"_type": "Value"
|
| 79 |
-
}
|
| 80 |
-
},
|
| 81 |
-
"supervised_keys": null,
|
| 82 |
-
"builder_name": "multimodal_ai_taxonomy",
|
| 83 |
-
"config_name": "all",
|
| 84 |
-
"version": "1.0.0",
|
| 85 |
-
"splits": {
|
| 86 |
-
"train": {
|
| 87 |
-
"name": "train",
|
| 88 |
-
"num_bytes": 0,
|
| 89 |
-
"num_examples": 0,
|
| 90 |
-
"dataset_name": "multimodal_ai_taxonomy"
|
| 91 |
-
}
|
| 92 |
-
},
|
| 93 |
-
"download_size": 0,
|
| 94 |
-
"dataset_size": 0
|
| 95 |
-
}
|
| 96 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
multimodal-ai-taxonomy.py
DELETED
|
@@ -1,217 +0,0 @@
|
|
| 1 |
-
"""Multimodal AI Taxonomy dataset loading script."""
|
| 2 |
-
|
| 3 |
-
import json
|
| 4 |
-
import os
|
| 5 |
-
from pathlib import Path
|
| 6 |
-
from typing import Dict, List
|
| 7 |
-
|
| 8 |
-
import datasets
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
_CITATION = """\
|
| 12 |
-
@dataset{multimodal_ai_taxonomy,
|
| 13 |
-
title={Multimodal AI Taxonomy},
|
| 14 |
-
author={Community Contributors},
|
| 15 |
-
year={2025},
|
| 16 |
-
publisher={Hugging Face},
|
| 17 |
-
}
|
| 18 |
-
"""
|
| 19 |
-
|
| 20 |
-
_DESCRIPTION = """\
|
| 21 |
-
A comprehensive, structured taxonomy for mapping multimodal AI model capabilities across input and output modalities.
|
| 22 |
-
This dataset provides a systematic categorization of multimodal AI capabilities, enabling users to navigate the complex
|
| 23 |
-
landscape of multimodal AI models, filter by specific input/output modality combinations, and discover models that match
|
| 24 |
-
specific use case requirements.
|
| 25 |
-
"""
|
| 26 |
-
|
| 27 |
-
_HOMEPAGE = "https://huggingface.co/datasets/YOUR_USERNAME/multimodal-ai-taxonomy"
|
| 28 |
-
|
| 29 |
-
_LICENSE = "cc0-1.0"
|
| 30 |
-
|
| 31 |
-
_URLS = {
|
| 32 |
-
"schema": "taxonomy/schema.json",
|
| 33 |
-
"video_generation_creation": "taxonomy/video-generation/creation/modalities.json",
|
| 34 |
-
"video_generation_editing": "taxonomy/video-generation/editing/modalities.json",
|
| 35 |
-
"audio_generation_creation": "taxonomy/audio-generation/creation/modalities.json",
|
| 36 |
-
"audio_generation_editing": "taxonomy/audio-generation/editing/modalities.json",
|
| 37 |
-
"image_generation_creation": "taxonomy/image-generation/creation/modalities.json",
|
| 38 |
-
"image_generation_editing": "taxonomy/image-generation/editing/modalities.json",
|
| 39 |
-
"text_generation_creation": "taxonomy/text-generation/creation/modalities.json",
|
| 40 |
-
"text_generation_editing": "taxonomy/text-generation/editing/modalities.json",
|
| 41 |
-
"3d_generation_creation": "taxonomy/3d-generation/creation/modalities.json",
|
| 42 |
-
"3d_generation_editing": "taxonomy/3d-generation/editing/modalities.json",
|
| 43 |
-
}
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
class MultimodalAITaxonomy(datasets.GeneratorBasedBuilder):
|
| 47 |
-
"""Multimodal AI Taxonomy dataset."""
|
| 48 |
-
|
| 49 |
-
VERSION = datasets.Version("1.0.0")
|
| 50 |
-
|
| 51 |
-
BUILDER_CONFIGS = [
|
| 52 |
-
datasets.BuilderConfig(
|
| 53 |
-
name="all",
|
| 54 |
-
version=VERSION,
|
| 55 |
-
description="Complete taxonomy with all modalities",
|
| 56 |
-
),
|
| 57 |
-
datasets.BuilderConfig(
|
| 58 |
-
name="video_generation_creation",
|
| 59 |
-
version=VERSION,
|
| 60 |
-
description="Video generation (creation) modalities",
|
| 61 |
-
),
|
| 62 |
-
datasets.BuilderConfig(
|
| 63 |
-
name="video_generation_editing",
|
| 64 |
-
version=VERSION,
|
| 65 |
-
description="Video generation (editing) modalities",
|
| 66 |
-
),
|
| 67 |
-
datasets.BuilderConfig(
|
| 68 |
-
name="audio_generation_creation",
|
| 69 |
-
version=VERSION,
|
| 70 |
-
description="Audio generation (creation) modalities",
|
| 71 |
-
),
|
| 72 |
-
datasets.BuilderConfig(
|
| 73 |
-
name="audio_generation_editing",
|
| 74 |
-
version=VERSION,
|
| 75 |
-
description="Audio generation (editing) modalities",
|
| 76 |
-
),
|
| 77 |
-
datasets.BuilderConfig(
|
| 78 |
-
name="image_generation_creation",
|
| 79 |
-
version=VERSION,
|
| 80 |
-
description="Image generation (creation) modalities",
|
| 81 |
-
),
|
| 82 |
-
datasets.BuilderConfig(
|
| 83 |
-
name="image_generation_editing",
|
| 84 |
-
version=VERSION,
|
| 85 |
-
description="Image generation (editing) modalities",
|
| 86 |
-
),
|
| 87 |
-
datasets.BuilderConfig(
|
| 88 |
-
name="text_generation_creation",
|
| 89 |
-
version=VERSION,
|
| 90 |
-
description="Text generation (creation) modalities",
|
| 91 |
-
),
|
| 92 |
-
datasets.BuilderConfig(
|
| 93 |
-
name="text_generation_editing",
|
| 94 |
-
version=VERSION,
|
| 95 |
-
description="Text generation (editing) modalities",
|
| 96 |
-
),
|
| 97 |
-
datasets.BuilderConfig(
|
| 98 |
-
name="3d_generation_creation",
|
| 99 |
-
version=VERSION,
|
| 100 |
-
description="3D generation (creation) modalities",
|
| 101 |
-
),
|
| 102 |
-
datasets.BuilderConfig(
|
| 103 |
-
name="3d_generation_editing",
|
| 104 |
-
version=VERSION,
|
| 105 |
-
description="3D generation (editing) modalities",
|
| 106 |
-
),
|
| 107 |
-
]
|
| 108 |
-
|
| 109 |
-
DEFAULT_CONFIG_NAME = "all"
|
| 110 |
-
|
| 111 |
-
def _info(self):
|
| 112 |
-
features = datasets.Features(
|
| 113 |
-
{
|
| 114 |
-
"id": datasets.Value("string"),
|
| 115 |
-
"name": datasets.Value("string"),
|
| 116 |
-
"input_primary": datasets.Value("string"),
|
| 117 |
-
"input_secondary": datasets.Sequence(datasets.Value("string")),
|
| 118 |
-
"output_primary": datasets.Value("string"),
|
| 119 |
-
"output_audio": datasets.Value("bool"),
|
| 120 |
-
"output_audio_type": datasets.Value("string"),
|
| 121 |
-
"characteristics": datasets.Value("string"), # JSON string for flexibility
|
| 122 |
-
"metadata_maturity_level": datasets.Value("string"),
|
| 123 |
-
"metadata_common_use_cases": datasets.Sequence(datasets.Value("string")),
|
| 124 |
-
"metadata_platforms": datasets.Sequence(datasets.Value("string")),
|
| 125 |
-
"metadata_example_models": datasets.Sequence(datasets.Value("string")),
|
| 126 |
-
"relationships": datasets.Value("string"), # JSON string for flexibility
|
| 127 |
-
"file_output_modality": datasets.Value("string"),
|
| 128 |
-
"file_operation_type": datasets.Value("string"),
|
| 129 |
-
}
|
| 130 |
-
)
|
| 131 |
-
|
| 132 |
-
return datasets.DatasetInfo(
|
| 133 |
-
description=_DESCRIPTION,
|
| 134 |
-
features=features,
|
| 135 |
-
homepage=_HOMEPAGE,
|
| 136 |
-
license=_LICENSE,
|
| 137 |
-
citation=_CITATION,
|
| 138 |
-
)
|
| 139 |
-
|
| 140 |
-
def _split_generators(self, dl_manager):
|
| 141 |
-
"""Returns SplitGenerators."""
|
| 142 |
-
|
| 143 |
-
# Download/locate all files
|
| 144 |
-
if self.config.name == "all":
|
| 145 |
-
# Load all modality files
|
| 146 |
-
config_names = [k for k in _URLS.keys() if k != "schema"]
|
| 147 |
-
else:
|
| 148 |
-
# Load only the specified config
|
| 149 |
-
config_names = [self.config.name]
|
| 150 |
-
|
| 151 |
-
return [
|
| 152 |
-
datasets.SplitGenerator(
|
| 153 |
-
name=datasets.Split.TRAIN,
|
| 154 |
-
gen_kwargs={
|
| 155 |
-
"config_names": config_names,
|
| 156 |
-
"dl_manager": dl_manager,
|
| 157 |
-
},
|
| 158 |
-
),
|
| 159 |
-
]
|
| 160 |
-
|
| 161 |
-
def _generate_examples(self, config_names, dl_manager):
|
| 162 |
-
"""Yields examples from the taxonomy."""
|
| 163 |
-
|
| 164 |
-
idx = 0
|
| 165 |
-
for config_name in config_names:
|
| 166 |
-
filepath = _URLS[config_name]
|
| 167 |
-
|
| 168 |
-
# Read the JSON file
|
| 169 |
-
with open(dl_manager.download(filepath), encoding="utf-8") as f:
|
| 170 |
-
data = json.load(f)
|
| 171 |
-
|
| 172 |
-
output_modality = data.get("outputModality", "")
|
| 173 |
-
operation_type = data.get("operationType", "")
|
| 174 |
-
|
| 175 |
-
# Process each modality in the file
|
| 176 |
-
for modality in data.get("modalities", []):
|
| 177 |
-
# Extract input information
|
| 178 |
-
input_data = modality.get("input", {})
|
| 179 |
-
input_primary = input_data.get("primary", "")
|
| 180 |
-
input_secondary = input_data.get("secondary", [])
|
| 181 |
-
|
| 182 |
-
# Extract output information
|
| 183 |
-
output_data = modality.get("output", {})
|
| 184 |
-
output_primary = output_data.get("primary", "")
|
| 185 |
-
output_audio = output_data.get("audio", False)
|
| 186 |
-
output_audio_type = output_data.get("audioType", "")
|
| 187 |
-
|
| 188 |
-
# Extract metadata
|
| 189 |
-
metadata = modality.get("metadata", {})
|
| 190 |
-
maturity_level = metadata.get("maturityLevel", "")
|
| 191 |
-
common_use_cases = metadata.get("commonUseCases", [])
|
| 192 |
-
platforms = metadata.get("platforms", [])
|
| 193 |
-
example_models = metadata.get("exampleModels", [])
|
| 194 |
-
|
| 195 |
-
# Keep characteristics and relationships as JSON strings for flexibility
|
| 196 |
-
characteristics = json.dumps(modality.get("characteristics", {}))
|
| 197 |
-
relationships = json.dumps(modality.get("relationships", {}))
|
| 198 |
-
|
| 199 |
-
yield idx, {
|
| 200 |
-
"id": modality.get("id", ""),
|
| 201 |
-
"name": modality.get("name", ""),
|
| 202 |
-
"input_primary": input_primary,
|
| 203 |
-
"input_secondary": input_secondary,
|
| 204 |
-
"output_primary": output_primary,
|
| 205 |
-
"output_audio": output_audio,
|
| 206 |
-
"output_audio_type": output_audio_type,
|
| 207 |
-
"characteristics": characteristics,
|
| 208 |
-
"metadata_maturity_level": maturity_level,
|
| 209 |
-
"metadata_common_use_cases": common_use_cases,
|
| 210 |
-
"metadata_platforms": platforms,
|
| 211 |
-
"metadata_example_models": example_models,
|
| 212 |
-
"relationships": relationships,
|
| 213 |
-
"file_output_modality": output_modality,
|
| 214 |
-
"file_operation_type": operation_type,
|
| 215 |
-
}
|
| 216 |
-
|
| 217 |
-
idx += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|