add AIBOM
#14
by
fatima113
- opened
laion_CLIP-ViT-H-14-laion2B-s32B-b79K.json
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bomFormat": "CycloneDX",
|
3 |
+
"specVersion": "1.6",
|
4 |
+
"serialNumber": "urn:uuid:d9fd9ecd-24d7-45fd-b99b-3387fdf9a4c8",
|
5 |
+
"version": 1,
|
6 |
+
"metadata": {
|
7 |
+
"timestamp": "2025-07-10T08:48:58.434970+00:00",
|
8 |
+
"component": {
|
9 |
+
"type": "machine-learning-model",
|
10 |
+
"bom-ref": "laion/CLIP-ViT-H-14-laion2B-s32B-b79K-1cc4816d-7985-5eff-adbc-1b0fca71fe35",
|
11 |
+
"name": "laion/CLIP-ViT-H-14-laion2B-s32B-b79K",
|
12 |
+
"externalReferences": [
|
13 |
+
{
|
14 |
+
"url": "https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K",
|
15 |
+
"type": "documentation"
|
16 |
+
}
|
17 |
+
],
|
18 |
+
"modelCard": {
|
19 |
+
"modelParameters": {
|
20 |
+
"task": "zero-shot-image-classification",
|
21 |
+
"architectureFamily": "clip",
|
22 |
+
"modelArchitecture": "CLIPModel"
|
23 |
+
},
|
24 |
+
"properties": [
|
25 |
+
{
|
26 |
+
"name": "library_name",
|
27 |
+
"value": "open_clip"
|
28 |
+
}
|
29 |
+
],
|
30 |
+
"consideration": {
|
31 |
+
"useCases": "As per the original [OpenAI CLIP model card](https://github.com/openai/CLIP/blob/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1/model-card.md), this model is intended as a research output for research communities. We hope that this model will enable researchers to better understand and explore zero-shot, arbitrary image classification. We also hope it can be used for interdisciplinary studies of the potential impact of such model.The OpenAI CLIP paper includes a discussion of potential downstream impacts to provide an example for this sort of analysis. Additionally, the LAION-5B blog (https://laion.ai/blog/laion-5b/) and upcoming paper include additional discussion as it relates specifically to the training dataset."
|
32 |
+
}
|
33 |
+
},
|
34 |
+
"authors": [
|
35 |
+
{
|
36 |
+
"name": "laion"
|
37 |
+
}
|
38 |
+
],
|
39 |
+
"licenses": [
|
40 |
+
{
|
41 |
+
"license": {
|
42 |
+
"id": "MIT",
|
43 |
+
"url": "https://spdx.org/licenses/MIT.html"
|
44 |
+
}
|
45 |
+
}
|
46 |
+
],
|
47 |
+
"description": "A CLIP ViT-H/14 model trained with the LAION-2B English subset of LAION-5B (https://laion.ai/blog/laion-5b/) using OpenCLIP (https://github.com/mlfoundations/open_clip).Model training done by Romain Beaumont on the [stability.ai](https://stability.ai/) cluster.",
|
48 |
+
"tags": [
|
49 |
+
"open_clip",
|
50 |
+
"pytorch",
|
51 |
+
"safetensors",
|
52 |
+
"clip",
|
53 |
+
"zero-shot-image-classification",
|
54 |
+
"arxiv:1910.04867",
|
55 |
+
"license:mit",
|
56 |
+
"region:us"
|
57 |
+
]
|
58 |
+
}
|
59 |
+
}
|
60 |
+
}
|