NeuralDaredevil-8B-abliterated / mlabonne_NeuralDaredevil-8B-abliterated.json
fatima113's picture
add AIBOM
8e27114 verified
raw
history blame
6.46 kB
{
"bomFormat": "CycloneDX",
"specVersion": "1.6",
"serialNumber": "urn:uuid:ec2a096a-6435-4b15-94a2-3cdfe51237d6",
"version": 1,
"metadata": {
"timestamp": "2025-07-10T08:46:54.205541+00:00",
"component": {
"type": "machine-learning-model",
"bom-ref": "mlabonne/NeuralDaredevil-8B-abliterated-137a94eb-5b88-59d1-88c3-66d6367d780d",
"name": "mlabonne/NeuralDaredevil-8B-abliterated",
"externalReferences": [
{
"url": "https://huggingface.co/mlabonne/NeuralDaredevil-8B-abliterated",
"type": "documentation"
}
],
"modelCard": {
"modelParameters": {
"task": "text-generation",
"architectureFamily": "llama",
"modelArchitecture": "LlamaForCausalLM",
"datasets": [
{
"ref": "mlabonne/orpo-dpo-mix-40k-898bbffe-f59a-53fe-a5b7-846dc484fbab"
}
]
},
"properties": [
{
"name": "library_name",
"value": "transformers"
}
],
"quantitativeAnalysis": {
"performanceMetrics": [
{
"slice": "dataset: ai2_arc, split: test, config: ARC-Challenge",
"type": "acc_norm",
"value": 69.28
},
{
"slice": "dataset: hellaswag, split: validation",
"type": "acc_norm",
"value": 85.05
},
{
"slice": "dataset: cais/mmlu, split: test, config: all",
"type": "acc",
"value": 69.1
},
{
"slice": "dataset: truthful_qa, split: validation, config: multiple_choice",
"type": "mc2",
"value": 60
},
{
"slice": "dataset: winogrande, split: validation, config: winogrande_xl",
"type": "acc",
"value": 78.69
},
{
"slice": "dataset: gsm8k, split: test, config: main",
"type": "acc",
"value": 71.8
}
]
}
},
"authors": [
{
"name": "mlabonne"
}
],
"licenses": [
{
"license": {
"name": "llama3"
}
}
],
"tags": [
"transformers",
"safetensors",
"llama",
"text-generation",
"dpo",
"conversational",
"dataset:mlabonne/orpo-dpo-mix-40k",
"license:llama3",
"model-index",
"autotrain_compatible",
"text-generation-inference",
"endpoints_compatible",
"region:us"
]
}
},
"components": [
{
"type": "data",
"bom-ref": "mlabonne/orpo-dpo-mix-40k-898bbffe-f59a-53fe-a5b7-846dc484fbab",
"name": "mlabonne/orpo-dpo-mix-40k",
"data": [
{
"type": "dataset",
"bom-ref": "mlabonne/orpo-dpo-mix-40k-898bbffe-f59a-53fe-a5b7-846dc484fbab",
"name": "mlabonne/orpo-dpo-mix-40k",
"contents": {
"url": "https://huggingface.co/datasets/mlabonne/orpo-dpo-mix-40k",
"properties": [
{
"name": "task_categories",
"value": "text-generation"
},
{
"name": "language",
"value": "en"
},
{
"name": "pretty_name",
"value": "ORPO-DPO-mix-40k"
},
{
"name": "configs",
"value": "Name of the dataset subset: default {\"split\": \"train\", \"path\": \"data/train-*\"}"
},
{
"name": "license",
"value": "apache-2.0"
}
]
},
"governance": {
"owners": [
{
"organization": {
"name": "mlabonne",
"url": "https://huggingface.co/mlabonne"
}
}
]
},
"description": "\n\t\n\t\t\n\t\tORPO-DPO-mix-40k v1.2\n\t\n\n\nThis dataset is designed for ORPO or DPO training.\nSee Fine-tune Llama 3 with ORPO for more information about how to use it.\nIt is a combination of the following high-quality DPO datasets:\n\nargilla/Capybara-Preferences: highly scored chosen answers >=5 (7,424 samples)argilla/distilabel-intel-orca-dpo-pairs: highly scored chosen answers >=9, not in GSM8K (2,299 samples)\nargilla/ultrafeedback-binarized-preferences-cleaned: highly scored chosen answers >=5 (22\u2026 See the full description on the dataset page: https://huggingface.co/datasets/mlabonne/orpo-dpo-mix-40k."
}
]
}
]
}