add AIBOM
#7
by
fatima113
- opened
chuanli11_Llama-3.2-3B-Instruct-uncensored.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bomFormat": "CycloneDX",
|
3 |
+
"specVersion": "1.6",
|
4 |
+
"serialNumber": "urn:uuid:2f2c065a-9ef6-4763-9924-fb8ec18743c4",
|
5 |
+
"version": 1,
|
6 |
+
"metadata": {
|
7 |
+
"timestamp": "2025-07-10T08:53:17.254999+00:00",
|
8 |
+
"component": {
|
9 |
+
"type": "machine-learning-model",
|
10 |
+
"bom-ref": "chuanli11/Llama-3.2-3B-Instruct-uncensored-b9b373ea-5eb6-5648-8c0b-40b3fdcd46c8",
|
11 |
+
"name": "chuanli11/Llama-3.2-3B-Instruct-uncensored",
|
12 |
+
"externalReferences": [
|
13 |
+
{
|
14 |
+
"url": "https://huggingface.co/chuanli11/Llama-3.2-3B-Instruct-uncensored",
|
15 |
+
"type": "documentation"
|
16 |
+
}
|
17 |
+
],
|
18 |
+
"modelCard": {
|
19 |
+
"modelParameters": {
|
20 |
+
"task": "text-generation",
|
21 |
+
"architectureFamily": "llama",
|
22 |
+
"modelArchitecture": "LlamaForCausalLM"
|
23 |
+
},
|
24 |
+
"properties": [
|
25 |
+
{
|
26 |
+
"name": "library_name",
|
27 |
+
"value": "transformers"
|
28 |
+
}
|
29 |
+
]
|
30 |
+
},
|
31 |
+
"authors": [
|
32 |
+
{
|
33 |
+
"name": "chuanli11"
|
34 |
+
}
|
35 |
+
],
|
36 |
+
"description": "This is an uncensored version of the original [Llama-3.2-3B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct), created using [mlabonne](https://huggingface.co/mlabonne)'s [script](https://colab.research.google.com/drive/1VYm3hOcvCpbGiqKZb141gJwjdmmCcVpR?usp=sharing), which builds on [FailSpy's notebook](https://huggingface.co/failspy/llama-3-70B-Instruct-abliterated/blob/main/ortho_cookbook.ipynb) and the original work from [Andy Arditi et al.](https://colab.research.google.com/drive/1a-aQvKC9avdZpdyBn4jgRQFObTPy1JZw?usp=sharing). The method is discussed in details in this [blog](https://huggingface.co/blog/mlabonne/abliteration) and this [paper](https://arxiv.org/abs/2406.11717).You can play with it at this \ud83e\udd17 [space](https://huggingface.co/spaces/chuanli11/Chat-Llama-3.2-3B-Instruct-uncensored).The model rarely refuses to respond during testing. However, it sometimes provides general information on sensitive topics instead of instructing harmful behaviors, which we found noteworthy.",
|
37 |
+
"tags": [
|
38 |
+
"transformers",
|
39 |
+
"safetensors",
|
40 |
+
"llama",
|
41 |
+
"text-generation",
|
42 |
+
"conversational",
|
43 |
+
"arxiv:2406.11717",
|
44 |
+
"autotrain_compatible",
|
45 |
+
"text-generation-inference",
|
46 |
+
"endpoints_compatible",
|
47 |
+
"region:us"
|
48 |
+
]
|
49 |
+
}
|
50 |
+
}
|
51 |
+
}
|