{ "bomFormat": "CycloneDX", "specVersion": "1.6", "serialNumber": "urn:uuid:d9fd9ecd-24d7-45fd-b99b-3387fdf9a4c8", "version": 1, "metadata": { "timestamp": "2025-07-10T08:48:58.434970+00:00", "component": { "type": "machine-learning-model", "bom-ref": "laion/CLIP-ViT-H-14-laion2B-s32B-b79K-1cc4816d-7985-5eff-adbc-1b0fca71fe35", "name": "laion/CLIP-ViT-H-14-laion2B-s32B-b79K", "externalReferences": [ { "url": "https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K", "type": "documentation" } ], "modelCard": { "modelParameters": { "task": "zero-shot-image-classification", "architectureFamily": "clip", "modelArchitecture": "CLIPModel" }, "properties": [ { "name": "library_name", "value": "open_clip" } ], "consideration": { "useCases": "As per the original [OpenAI CLIP model card](https://github.com/openai/CLIP/blob/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1/model-card.md), this model is intended as a research output for research communities. We hope that this model will enable researchers to better understand and explore zero-shot, arbitrary image classification. We also hope it can be used for interdisciplinary studies of the potential impact of such model.The OpenAI CLIP paper includes a discussion of potential downstream impacts to provide an example for this sort of analysis. Additionally, the LAION-5B blog (https://laion.ai/blog/laion-5b/) and upcoming paper include additional discussion as it relates specifically to the training dataset." } }, "authors": [ { "name": "laion" } ], "licenses": [ { "license": { "id": "MIT", "url": "https://spdx.org/licenses/MIT.html" } } ], "description": "A CLIP ViT-H/14 model trained with the LAION-2B English subset of LAION-5B (https://laion.ai/blog/laion-5b/) using OpenCLIP (https://github.com/mlfoundations/open_clip).Model training done by Romain Beaumont on the [stability.ai](https://stability.ai/) cluster.", "tags": [ "open_clip", "pytorch", "safetensors", "clip", "zero-shot-image-classification", "arxiv:1910.04867", "license:mit", "region:us" ] } } }