add AIBOM
#3
by
RiccardoDav
- opened
- AdaptLLM_finance-LLM.json +183 -0
AdaptLLM_finance-LLM.json
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bomFormat": "CycloneDX",
|
| 3 |
+
"specVersion": "1.6",
|
| 4 |
+
"serialNumber": "urn:uuid:e0a98ae4-ad3d-487c-a284-cb9382968df5",
|
| 5 |
+
"version": 1,
|
| 6 |
+
"metadata": {
|
| 7 |
+
"timestamp": "2025-10-07T08:05:04.094127+00:00",
|
| 8 |
+
"component": {
|
| 9 |
+
"type": "machine-learning-model",
|
| 10 |
+
"bom-ref": "AdaptLLM/finance-LLM-5d23c55e-22c4-55cb-9c9f-dcaaadc7903d",
|
| 11 |
+
"licenses": [],
|
| 12 |
+
"externalReferences": [
|
| 13 |
+
{
|
| 14 |
+
"url": "https://huggingface.co/AdaptLLM/finance-LLM",
|
| 15 |
+
"type": "documentation"
|
| 16 |
+
}
|
| 17 |
+
],
|
| 18 |
+
"modelCard": {
|
| 19 |
+
"modelParameters": {
|
| 20 |
+
"datasets": [
|
| 21 |
+
{
|
| 22 |
+
"ref": "Open-Orca/OpenOrca-bd2dde5e-b85b-5436-a786-d44f16da11b9"
|
| 23 |
+
},
|
| 24 |
+
{
|
| 25 |
+
"ref": "GAIR/lima-afa8f631-d0ed-59c0-a5a1-170c80a5117e"
|
| 26 |
+
},
|
| 27 |
+
{
|
| 28 |
+
"ref": "WizardLM/WizardLM_evol_instruct_V2_196k-3de546b1-38de-5aa5-8f76-d810e237648b"
|
| 29 |
+
}
|
| 30 |
+
],
|
| 31 |
+
"task": "text-generation",
|
| 32 |
+
"architectureFamily": "llama",
|
| 33 |
+
"modelArchitecture": "LLaMAForCausalLM"
|
| 34 |
+
},
|
| 35 |
+
"properties": [
|
| 36 |
+
{
|
| 37 |
+
"name": "library_name",
|
| 38 |
+
"value": "transformers"
|
| 39 |
+
}
|
| 40 |
+
]
|
| 41 |
+
},
|
| 42 |
+
"name": "AdaptLLM/finance-LLM",
|
| 43 |
+
"authors": [
|
| 44 |
+
{
|
| 45 |
+
"name": "AdaptLLM"
|
| 46 |
+
}
|
| 47 |
+
],
|
| 48 |
+
"tags": [
|
| 49 |
+
"transformers",
|
| 50 |
+
"pytorch",
|
| 51 |
+
"safetensors",
|
| 52 |
+
"llama",
|
| 53 |
+
"text-generation",
|
| 54 |
+
"finance",
|
| 55 |
+
"en",
|
| 56 |
+
"dataset:Open-Orca/OpenOrca",
|
| 57 |
+
"dataset:GAIR/lima",
|
| 58 |
+
"dataset:WizardLM/WizardLM_evol_instruct_V2_196k",
|
| 59 |
+
"arxiv:2309.09530",
|
| 60 |
+
"arxiv:2411.19930",
|
| 61 |
+
"arxiv:2406.14491",
|
| 62 |
+
"autotrain_compatible",
|
| 63 |
+
"text-generation-inference",
|
| 64 |
+
"endpoints_compatible",
|
| 65 |
+
"region:us"
|
| 66 |
+
]
|
| 67 |
+
}
|
| 68 |
+
},
|
| 69 |
+
"components": [
|
| 70 |
+
{
|
| 71 |
+
"type": "data",
|
| 72 |
+
"bom-ref": "Open-Orca/OpenOrca-bd2dde5e-b85b-5436-a786-d44f16da11b9",
|
| 73 |
+
"name": "Open-Orca/OpenOrca",
|
| 74 |
+
"data": [
|
| 75 |
+
{
|
| 76 |
+
"type": "dataset",
|
| 77 |
+
"bom-ref": "Open-Orca/OpenOrca-bd2dde5e-b85b-5436-a786-d44f16da11b9",
|
| 78 |
+
"name": "Open-Orca/OpenOrca",
|
| 79 |
+
"contents": {
|
| 80 |
+
"url": "https://huggingface.co/datasets/Open-Orca/OpenOrca",
|
| 81 |
+
"properties": [
|
| 82 |
+
{
|
| 83 |
+
"name": "task_categories",
|
| 84 |
+
"value": "conversational, text-classification, token-classification, table-question-answering, question-answering, zero-shot-classification, summarization, feature-extraction, text-generation, text2text-generation"
|
| 85 |
+
},
|
| 86 |
+
{
|
| 87 |
+
"name": "language",
|
| 88 |
+
"value": "en"
|
| 89 |
+
},
|
| 90 |
+
{
|
| 91 |
+
"name": "size_categories",
|
| 92 |
+
"value": "10M<n<100M"
|
| 93 |
+
},
|
| 94 |
+
{
|
| 95 |
+
"name": "pretty_name",
|
| 96 |
+
"value": "OpenOrca"
|
| 97 |
+
},
|
| 98 |
+
{
|
| 99 |
+
"name": "license",
|
| 100 |
+
"value": "mit"
|
| 101 |
+
}
|
| 102 |
+
]
|
| 103 |
+
},
|
| 104 |
+
"description": "\ud83d\udc0b The OpenOrca Dataset! \ud83d\udc0b\n\n\n\nWe are thrilled to announce the release of the OpenOrca dataset!\nThis rich collection of augmented FLAN data aligns, as best as possible, with the distributions outlined in the Orca paper.\nIt has been instrumental in generating high-performing model checkpoints and serves as a valuable resource for all NLP researchers and developers!\n\n\t\n\t\t\n\t\n\t\n\t\tOfficial Models\n\t\n\n\n\t\n\t\n\t\n\t\tMistral-7B-OpenOrca\n\t\n\nOur latest model, the first 7B to score better overall than all\u2026 See the full description on the dataset page: https://huggingface.co/datasets/Open-Orca/OpenOrca.",
|
| 105 |
+
"governance": {
|
| 106 |
+
"owners": [
|
| 107 |
+
{
|
| 108 |
+
"organization": {
|
| 109 |
+
"name": "Open-Orca",
|
| 110 |
+
"url": "https://huggingface.co/Open-Orca"
|
| 111 |
+
}
|
| 112 |
+
}
|
| 113 |
+
]
|
| 114 |
+
}
|
| 115 |
+
}
|
| 116 |
+
]
|
| 117 |
+
},
|
| 118 |
+
{
|
| 119 |
+
"type": "data",
|
| 120 |
+
"bom-ref": "GAIR/lima-afa8f631-d0ed-59c0-a5a1-170c80a5117e",
|
| 121 |
+
"name": "GAIR/lima",
|
| 122 |
+
"data": [
|
| 123 |
+
{
|
| 124 |
+
"type": "dataset",
|
| 125 |
+
"bom-ref": "GAIR/lima-afa8f631-d0ed-59c0-a5a1-170c80a5117e",
|
| 126 |
+
"name": "GAIR/lima",
|
| 127 |
+
"contents": {
|
| 128 |
+
"url": "https://huggingface.co/datasets/GAIR/lima",
|
| 129 |
+
"properties": [
|
| 130 |
+
{
|
| 131 |
+
"name": "license",
|
| 132 |
+
"value": "other"
|
| 133 |
+
}
|
| 134 |
+
]
|
| 135 |
+
},
|
| 136 |
+
"description": "A high-quality dataset for efficient instruction tuning.",
|
| 137 |
+
"governance": {
|
| 138 |
+
"owners": [
|
| 139 |
+
{
|
| 140 |
+
"organization": {
|
| 141 |
+
"name": "GAIR",
|
| 142 |
+
"url": "https://huggingface.co/GAIR"
|
| 143 |
+
}
|
| 144 |
+
}
|
| 145 |
+
]
|
| 146 |
+
}
|
| 147 |
+
}
|
| 148 |
+
]
|
| 149 |
+
},
|
| 150 |
+
{
|
| 151 |
+
"type": "data",
|
| 152 |
+
"bom-ref": "WizardLM/WizardLM_evol_instruct_V2_196k-3de546b1-38de-5aa5-8f76-d810e237648b",
|
| 153 |
+
"name": "WizardLM/WizardLM_evol_instruct_V2_196k",
|
| 154 |
+
"data": [
|
| 155 |
+
{
|
| 156 |
+
"type": "dataset",
|
| 157 |
+
"bom-ref": "WizardLM/WizardLM_evol_instruct_V2_196k-3de546b1-38de-5aa5-8f76-d810e237648b",
|
| 158 |
+
"name": "WizardLM/WizardLM_evol_instruct_V2_196k",
|
| 159 |
+
"contents": {
|
| 160 |
+
"url": "https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k",
|
| 161 |
+
"properties": [
|
| 162 |
+
{
|
| 163 |
+
"name": "license",
|
| 164 |
+
"value": "mit"
|
| 165 |
+
}
|
| 166 |
+
]
|
| 167 |
+
},
|
| 168 |
+
"description": "\n\t\n\t\t\n\t\tNews\n\t\n\n\n\ud83d\udd25 \ud83d\udd25 \ud83d\udd25 [08/11/2023] We release WizardMath Models.\n\ud83d\udd25 Our WizardMath-70B-V1.0 model slightly outperforms some closed-source LLMs on the GSM8K, including ChatGPT 3.5, Claude Instant 1 and PaLM 2 540B.\n\ud83d\udd25 Our WizardMath-70B-V1.0 model achieves 81.6 pass@1 on the GSM8k Benchmarks, which is 24.8 points higher than the SOTA open-source LLM.\n\ud83d\udd25 Our WizardMath-70B-V1.0 model achieves 22.7 pass@1 on the MATH Benchmarks, which is 9.2 points higher than the SOTA open-source LLM.\u2026 See the full description on the dataset page: https://huggingface.co/datasets/WizardLMTeam/WizardLM_evol_instruct_V2_196k.",
|
| 169 |
+
"governance": {
|
| 170 |
+
"owners": [
|
| 171 |
+
{
|
| 172 |
+
"organization": {
|
| 173 |
+
"name": "WizardLMTeam",
|
| 174 |
+
"url": "https://huggingface.co/WizardLMTeam"
|
| 175 |
+
}
|
| 176 |
+
}
|
| 177 |
+
]
|
| 178 |
+
}
|
| 179 |
+
}
|
| 180 |
+
]
|
| 181 |
+
}
|
| 182 |
+
]
|
| 183 |
+
}
|