elselse commited on
Commit
dd65ad2
·
verified ·
1 Parent(s): 6188e73

CIRCL/cwe-parent-vulnerability-classification-roberta-base

Browse files
Files changed (5) hide show
  1. README.md +43 -43
  2. config.json +52 -52
  3. emissions.csv +1 -1
  4. metrics.json +6 -6
  5. model.safetensors +1 -1
README.md CHANGED
@@ -18,9 +18,9 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 1.3447
22
- - Accuracy: 0.7727
23
- - F1 Macro: 0.4235
24
 
25
  ## Model description
26
 
@@ -51,46 +51,46 @@ The following hyperparameters were used during training:
51
 
52
  | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 Macro |
53
  |:-------------:|:-----:|:----:|:---------------:|:--------:|:--------:|
54
- | 3.263 | 1.0 | 25 | 3.2313 | 0.2614 | 0.0345 |
55
- | 3.1726 | 2.0 | 50 | 2.9923 | 0.1818 | 0.0431 |
56
- | 3.1008 | 3.0 | 75 | 2.8828 | 0.0 | 0.0 |
57
- | 2.9805 | 4.0 | 100 | 2.8858 | 0.0114 | 0.0019 |
58
- | 3.0021 | 5.0 | 125 | 2.8257 | 0.4432 | 0.0626 |
59
- | 2.8775 | 6.0 | 150 | 2.7730 | 0.0795 | 0.0462 |
60
- | 2.8805 | 7.0 | 175 | 2.6421 | 0.2841 | 0.1362 |
61
- | 2.6602 | 8.0 | 200 | 2.6462 | 0.3864 | 0.1366 |
62
- | 2.5303 | 9.0 | 225 | 2.5584 | 0.3523 | 0.1461 |
63
- | 2.5236 | 10.0 | 250 | 2.4933 | 0.4205 | 0.1209 |
64
- | 2.3221 | 11.0 | 275 | 2.3458 | 0.5909 | 0.2232 |
65
- | 2.1446 | 12.0 | 300 | 2.2679 | 0.625 | 0.2521 |
66
- | 1.9937 | 13.0 | 325 | 2.1932 | 0.625 | 0.2736 |
67
- | 1.8521 | 14.0 | 350 | 2.0372 | 0.6477 | 0.2881 |
68
- | 1.7899 | 15.0 | 375 | 1.9494 | 0.6364 | 0.2679 |
69
- | 1.5273 | 16.0 | 400 | 1.8457 | 0.6705 | 0.3205 |
70
- | 1.4178 | 17.0 | 425 | 1.8276 | 0.6477 | 0.2931 |
71
- | 1.335 | 18.0 | 450 | 1.7690 | 0.6591 | 0.3004 |
72
- | 1.2685 | 19.0 | 475 | 1.6681 | 0.6705 | 0.3577 |
73
- | 1.112 | 20.0 | 500 | 1.6399 | 0.6818 | 0.3152 |
74
- | 1.01 | 21.0 | 525 | 1.5561 | 0.6932 | 0.3255 |
75
- | 0.9637 | 22.0 | 550 | 1.5008 | 0.7159 | 0.4218 |
76
- | 0.9571 | 23.0 | 575 | 1.5387 | 0.7045 | 0.3385 |
77
- | 0.8213 | 24.0 | 600 | 1.5366 | 0.7159 | 0.4043 |
78
- | 0.7538 | 25.0 | 625 | 1.4691 | 0.75 | 0.3942 |
79
- | 0.7228 | 26.0 | 650 | 1.4826 | 0.7273 | 0.3872 |
80
- | 0.7244 | 27.0 | 675 | 1.4789 | 0.7386 | 0.3915 |
81
- | 0.6746 | 28.0 | 700 | 1.4439 | 0.7727 | 0.4322 |
82
- | 0.5959 | 29.0 | 725 | 1.4202 | 0.7614 | 0.3942 |
83
- | 0.5788 | 30.0 | 750 | 1.4339 | 0.7727 | 0.4002 |
84
- | 0.5718 | 31.0 | 775 | 1.3723 | 0.7955 | 0.4431 |
85
- | 0.5358 | 32.0 | 800 | 1.4186 | 0.7727 | 0.3812 |
86
- | 0.5094 | 33.0 | 825 | 1.3722 | 0.7841 | 0.4579 |
87
- | 0.5003 | 34.0 | 850 | 1.3955 | 0.7614 | 0.3786 |
88
- | 0.4973 | 35.0 | 875 | 1.3733 | 0.8068 | 0.4635 |
89
- | 0.4721 | 36.0 | 900 | 1.3447 | 0.7727 | 0.4235 |
90
- | 0.4457 | 37.0 | 925 | 1.3622 | 0.7955 | 0.4573 |
91
- | 0.4232 | 38.0 | 950 | 1.3736 | 0.7614 | 0.3986 |
92
- | 0.4405 | 39.0 | 975 | 1.3683 | 0.7727 | 0.4235 |
93
- | 0.437 | 40.0 | 1000 | 1.3642 | 0.7614 | 0.3986 |
94
 
95
 
96
  ### Framework versions
 
18
 
19
  This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 1.9131
22
+ - Accuracy: 0.7701
23
+ - F1 Macro: 0.4179
24
 
25
  ## Model description
26
 
 
51
 
52
  | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 Macro |
53
  |:-------------:|:-----:|:----:|:---------------:|:--------:|:--------:|
54
+ | 3.2726 | 1.0 | 25 | 3.1430 | 0.0230 | 0.0041 |
55
+ | 3.214 | 2.0 | 50 | 2.9861 | 0.0230 | 0.0041 |
56
+ | 3.1457 | 3.0 | 75 | 2.9949 | 0.0115 | 0.0019 |
57
+ | 3.0447 | 4.0 | 100 | 3.0090 | 0.1149 | 0.0265 |
58
+ | 3.0588 | 5.0 | 125 | 2.9652 | 0.0230 | 0.0039 |
59
+ | 2.9336 | 6.0 | 150 | 2.9089 | 0.4828 | 0.1081 |
60
+ | 2.9729 | 7.0 | 175 | 2.9005 | 0.1264 | 0.0606 |
61
+ | 2.812 | 8.0 | 200 | 2.9174 | 0.3563 | 0.1788 |
62
+ | 2.6587 | 9.0 | 225 | 2.8268 | 0.3563 | 0.1414 |
63
+ | 2.5464 | 10.0 | 250 | 2.8296 | 0.3103 | 0.1339 |
64
+ | 2.4379 | 11.0 | 275 | 2.7762 | 0.2989 | 0.1554 |
65
+ | 2.2741 | 12.0 | 300 | 2.7595 | 0.4598 | 0.1745 |
66
+ | 2.1793 | 13.0 | 325 | 2.7483 | 0.4943 | 0.1826 |
67
+ | 2.0085 | 14.0 | 350 | 2.6646 | 0.4713 | 0.2136 |
68
+ | 1.9313 | 15.0 | 375 | 2.6414 | 0.6092 | 0.2916 |
69
+ | 1.7534 | 16.0 | 400 | 2.5186 | 0.6552 | 0.3345 |
70
+ | 1.6187 | 17.0 | 425 | 2.3736 | 0.6552 | 0.3381 |
71
+ | 1.5568 | 18.0 | 450 | 2.2908 | 0.6667 | 0.3391 |
72
+ | 1.4627 | 19.0 | 475 | 2.4101 | 0.6437 | 0.3356 |
73
+ | 1.2964 | 20.0 | 500 | 2.2791 | 0.6782 | 0.3525 |
74
+ | 1.2236 | 21.0 | 525 | 2.1636 | 0.6667 | 0.3403 |
75
+ | 1.1237 | 22.0 | 550 | 2.1584 | 0.6897 | 0.3397 |
76
+ | 1.0589 | 23.0 | 575 | 2.1262 | 0.6782 | 0.3535 |
77
+ | 0.952 | 24.0 | 600 | 2.1252 | 0.6782 | 0.3504 |
78
+ | 0.9137 | 25.0 | 625 | 2.0899 | 0.6667 | 0.3656 |
79
+ | 0.878 | 26.0 | 650 | 1.9915 | 0.7126 | 0.4012 |
80
+ | 0.8073 | 27.0 | 675 | 1.9856 | 0.7356 | 0.3857 |
81
+ | 0.7588 | 28.0 | 700 | 1.9613 | 0.7356 | 0.3737 |
82
+ | 0.7114 | 29.0 | 725 | 1.9789 | 0.7701 | 0.4103 |
83
+ | 0.6728 | 30.0 | 750 | 1.9131 | 0.7701 | 0.4179 |
84
+ | 0.6651 | 31.0 | 775 | 2.0236 | 0.7701 | 0.4231 |
85
+ | 0.5979 | 32.0 | 800 | 2.0366 | 0.7701 | 0.4668 |
86
+ | 0.5946 | 33.0 | 825 | 2.0026 | 0.7931 | 0.4478 |
87
+ | 0.5395 | 34.0 | 850 | 2.0010 | 0.8046 | 0.4544 |
88
+ | 0.5301 | 35.0 | 875 | 1.9332 | 0.8046 | 0.4500 |
89
+ | 0.5216 | 36.0 | 900 | 1.9965 | 0.8161 | 0.4966 |
90
+ | 0.497 | 37.0 | 925 | 1.9930 | 0.8161 | 0.4639 |
91
+ | 0.5149 | 38.0 | 950 | 1.9813 | 0.8161 | 0.4582 |
92
+ | 0.5022 | 39.0 | 975 | 1.9775 | 0.8046 | 0.4667 |
93
+ | 0.4892 | 40.0 | 1000 | 1.9643 | 0.8161 | 0.4688 |
94
 
95
 
96
  ### Framework versions
config.json CHANGED
@@ -10,62 +10,62 @@
10
  "hidden_dropout_prob": 0.1,
11
  "hidden_size": 768,
12
  "id2label": {
13
- "0": "LABEL_0",
14
- "1": "LABEL_1",
15
- "2": "LABEL_2",
16
- "3": "LABEL_3",
17
- "4": "LABEL_4",
18
- "5": "LABEL_5",
19
- "6": "LABEL_6",
20
- "7": "LABEL_7",
21
- "8": "LABEL_8",
22
- "9": "LABEL_9",
23
- "10": "LABEL_10",
24
- "11": "LABEL_11",
25
- "12": "LABEL_12",
26
- "13": "LABEL_13",
27
- "14": "LABEL_14",
28
- "15": "LABEL_15",
29
- "16": "LABEL_16",
30
- "17": "LABEL_17",
31
- "18": "LABEL_18",
32
- "19": "LABEL_19",
33
- "20": "LABEL_20",
34
- "21": "LABEL_21",
35
- "22": "LABEL_22",
36
- "23": "LABEL_23",
37
- "24": "LABEL_24",
38
- "25": "LABEL_25"
39
  },
40
  "initializer_range": 0.02,
41
  "intermediate_size": 3072,
42
  "label2id": {
43
- "LABEL_0": 0,
44
- "LABEL_1": 1,
45
- "LABEL_10": 10,
46
- "LABEL_11": 11,
47
- "LABEL_12": 12,
48
- "LABEL_13": 13,
49
- "LABEL_14": 14,
50
- "LABEL_15": 15,
51
- "LABEL_16": 16,
52
- "LABEL_17": 17,
53
- "LABEL_18": 18,
54
- "LABEL_19": 19,
55
- "LABEL_2": 2,
56
- "LABEL_20": 20,
57
- "LABEL_21": 21,
58
- "LABEL_22": 22,
59
- "LABEL_23": 23,
60
- "LABEL_24": 24,
61
- "LABEL_25": 25,
62
- "LABEL_3": 3,
63
- "LABEL_4": 4,
64
- "LABEL_5": 5,
65
- "LABEL_6": 6,
66
- "LABEL_7": 7,
67
- "LABEL_8": 8,
68
- "LABEL_9": 9
69
  },
70
  "layer_norm_eps": 1e-05,
71
  "max_position_embeddings": 514,
 
10
  "hidden_dropout_prob": 0.1,
11
  "hidden_size": 768,
12
  "id2label": {
13
+ "0": "1025",
14
+ "1": "1071",
15
+ "2": "131",
16
+ "3": "138",
17
+ "4": "284",
18
+ "5": "285",
19
+ "6": "435",
20
+ "7": "436",
21
+ "8": "595",
22
+ "9": "657",
23
+ "10": "664",
24
+ "11": "682",
25
+ "12": "684",
26
+ "13": "691",
27
+ "14": "693",
28
+ "15": "697",
29
+ "16": "703",
30
+ "17": "706",
31
+ "18": "707",
32
+ "19": "710",
33
+ "20": "74",
34
+ "21": "754",
35
+ "22": "829",
36
+ "23": "862",
37
+ "24": "913",
38
+ "25": "94"
39
  },
40
  "initializer_range": 0.02,
41
  "intermediate_size": 3072,
42
  "label2id": {
43
+ "1025": 0,
44
+ "1071": 1,
45
+ "131": 2,
46
+ "138": 3,
47
+ "284": 4,
48
+ "285": 5,
49
+ "435": 6,
50
+ "436": 7,
51
+ "595": 8,
52
+ "657": 9,
53
+ "664": 10,
54
+ "682": 11,
55
+ "684": 12,
56
+ "691": 13,
57
+ "693": 14,
58
+ "697": 15,
59
+ "703": 16,
60
+ "706": 17,
61
+ "707": 18,
62
+ "710": 19,
63
+ "74": 20,
64
+ "754": 21,
65
+ "829": 22,
66
+ "862": 23,
67
+ "913": 24,
68
+ "94": 25
69
  },
70
  "layer_norm_eps": 1e-05,
71
  "max_position_embeddings": 514,
emissions.csv CHANGED
@@ -1,2 +1,2 @@
1
  timestamp,project_name,run_id,experiment_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
2
- 2025-09-02T09:27:51,codecarbon,e4ad32bd-2245-4e4a-8192-6a84da1b81b1,5b0fa12a-3dd7-45bb-9766-cc326314d9f1,459.1436821189709,0.007866215688287334,1.7132361817512892e-05,42.5,439.47611356781334,94.34468507766725,0.005415837676420698,0.05729426472426269,0.012019058731787975,0.07472916113247137,Luxembourg,LUX,luxembourg,,,Linux-6.8.0-71-generic-x86_64-with-glibc2.39,3.12.3,2.8.4,64,AMD EPYC 9124 16-Core Processor,2,2 x NVIDIA L40S,6.1294,49.6113,251.5858268737793,machine,N,1.0
 
1
  timestamp,project_name,run_id,experiment_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
2
+ 2025-09-02T14:16:22,codecarbon,9ea547cb-df64-4219-8082-9f9f4afeb1fc,5b0fa12a-3dd7-45bb-9766-cc326314d9f1,466.10610487288795,0.007993244659741786,1.7148980835429365e-05,42.5,188.53936351148204,94.34468507766725,0.005495822849665839,0.05824084603709423,0.012199269284694121,0.0759359381714542,Luxembourg,LUX,luxembourg,,,Linux-6.8.0-71-generic-x86_64-with-glibc2.39,3.12.3,2.8.4,64,AMD EPYC 9124 16-Core Processor,2,2 x NVIDIA L40S,6.1294,49.6113,251.5858268737793,machine,N,1.0
metrics.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "eval_loss": 1.3446602821350098,
3
- "eval_accuracy": 0.7727272727272727,
4
- "eval_f1_macro": 0.4235300491458127,
5
- "eval_runtime": 0.3068,
6
- "eval_samples_per_second": 286.859,
7
- "eval_steps_per_second": 9.779,
8
  "epoch": 40.0
9
  }
 
1
  {
2
+ "eval_loss": 1.9131312370300293,
3
+ "eval_accuracy": 0.7701149425287356,
4
+ "eval_f1_macro": 0.4179426520505981,
5
+ "eval_runtime": 0.2644,
6
+ "eval_samples_per_second": 329.062,
7
+ "eval_steps_per_second": 11.347,
8
  "epoch": 40.0
9
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a8d36e5f15700563365f64946238c99b4e23ad7d422cf8cc6e5ab58c8140f5af
3
  size 498686648
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a154d0659330d059cf644e7e1bbab9af00ed782b1603ab1676428b722553a0f2
3
  size 498686648