|
{
|
|
"bomFormat": "CycloneDX",
|
|
"specVersion": "1.6",
|
|
"serialNumber": "urn:uuid:7611ea51-f4d2-4d7d-8423-c454737b6e82",
|
|
"version": 1,
|
|
"metadata": {
|
|
"timestamp": "2025-07-10T09:22:31.938007+00:00",
|
|
"component": {
|
|
"type": "machine-learning-model",
|
|
"bom-ref": "AIDC-AI/Ovis1.6-Gemma2-9B-e39fac17-8b40-541a-ae36-5ba9dd911393",
|
|
"name": "AIDC-AI/Ovis1.6-Gemma2-9B",
|
|
"externalReferences": [
|
|
{
|
|
"url": "https://huggingface.co/AIDC-AI/Ovis1.6-Gemma2-9B",
|
|
"type": "documentation"
|
|
}
|
|
],
|
|
"modelCard": {
|
|
"modelParameters": {
|
|
"task": "image-text-to-text",
|
|
"architectureFamily": "ovis",
|
|
"modelArchitecture": "Ovis",
|
|
"datasets": [
|
|
{
|
|
"ref": "AIDC-AI/Ovis-dataset-2b0700c9-2ab5-5db9-9b2b-d9cfc06e2a69"
|
|
}
|
|
]
|
|
},
|
|
"properties": [
|
|
{
|
|
"name": "library_name",
|
|
"value": "transformers"
|
|
}
|
|
]
|
|
},
|
|
"authors": [
|
|
{
|
|
"name": "AIDC-AI"
|
|
}
|
|
],
|
|
"licenses": [
|
|
{
|
|
"license": {
|
|
"id": "Apache-2.0",
|
|
"url": "https://spdx.org/licenses/Apache-2.0.html"
|
|
}
|
|
}
|
|
],
|
|
"description": "[GitHub](https://github.com/AIDC-AI/Ovis) | [Demo](https://huggingface.co/spaces/AIDC-AI/Ovis1.6-Gemma2-9B) | [Paper](https://arxiv.org/abs/2405.20797)We are excited to announce the open-sourcing of **Ovis-1.6**, our latest multi-modal large language model. Ovis is a novel Multimodal Large Language Model (MLLM) architecture, designed to structurally align visual and textual embeddings.<div align=\"center\"><img src=\"https://cdn-uploads.huggingface.co/production/uploads/658a8a837959448ef5500ce5/TIlymOb86R6_Mez3bpmcB.png\" width=\"100%\" /></div>",
|
|
"tags": [
|
|
"transformers",
|
|
"safetensors",
|
|
"ovis",
|
|
"text-generation",
|
|
"MLLM",
|
|
"image-text-to-text",
|
|
"conversational",
|
|
"custom_code",
|
|
"en",
|
|
"dataset:AIDC-AI/Ovis-dataset",
|
|
"arxiv:2405.20797",
|
|
"license:apache-2.0",
|
|
"autotrain_compatible",
|
|
"region:us"
|
|
]
|
|
}
|
|
},
|
|
"components": [
|
|
{
|
|
"type": "data",
|
|
"bom-ref": "AIDC-AI/Ovis-dataset-2b0700c9-2ab5-5db9-9b2b-d9cfc06e2a69",
|
|
"name": "AIDC-AI/Ovis-dataset",
|
|
"data": [
|
|
{
|
|
"type": "dataset",
|
|
"bom-ref": "AIDC-AI/Ovis-dataset-2b0700c9-2ab5-5db9-9b2b-d9cfc06e2a69",
|
|
"name": "AIDC-AI/Ovis-dataset",
|
|
"contents": {
|
|
"url": "https://huggingface.co/datasets/AIDC-AI/Ovis-dataset",
|
|
"properties": [
|
|
{
|
|
"name": "task_categories",
|
|
"value": "visual-question-answering"
|
|
},
|
|
{
|
|
"name": "license",
|
|
"value": "cc-by-4.0"
|
|
}
|
|
]
|
|
},
|
|
"governance": {
|
|
"owners": [
|
|
{
|
|
"organization": {
|
|
"name": "AIDC-AI",
|
|
"url": "https://huggingface.co/AIDC-AI"
|
|
}
|
|
}
|
|
]
|
|
},
|
|
"description": "\n\t\n\t\t\n\t\tUsage\n\t\n\nhttps://github.com/AIDC-AI/Ovis/tree/v1.5?tab=readme-ov-file#dataset\n\n\t\n\t\t\n\t\tDescription\n\t\n\nThis dataset is a collection of multimodal datasets used for training Ovis. Ovis is a novel Multimodal Large Language Model (MLLM) architecture, designed to structurally align visual and textual embeddings. For a comprehensive introduction, please refer to the Ovis paper and the Ovis GitHub repo.\n\n\t\n\t\t\n\t\tLicense\n\t\n\nThe files laion-description-11k.json, cc12m-description-1m.json, and\u2026 See the full description on the dataset page: https://huggingface.co/datasets/AIDC-AI/Ovis-dataset."
|
|
}
|
|
]
|
|
}
|
|
]
|
|
} |