Add LoRA adapter configuration
Browse files- adapter_config.json +14 -15
adapter_config.json
CHANGED
@@ -1,19 +1,19 @@
|
|
1 |
{
|
2 |
-
"task_type": "
|
3 |
"peft_type": "LORA",
|
4 |
"auto_mapping": null,
|
5 |
"base_model_name_or_path": "Qwen/Qwen3-32B",
|
6 |
"revision": null,
|
7 |
-
"inference_mode":
|
8 |
"r": 32,
|
9 |
"target_modules": [
|
10 |
-
"
|
11 |
-
"v_proj",
|
12 |
-
"down_proj",
|
13 |
-
"up_proj",
|
14 |
"k_proj",
|
|
|
|
|
|
|
15 |
"q_proj",
|
16 |
-
"
|
17 |
],
|
18 |
"exclude_modules": null,
|
19 |
"lora_alpha": 32,
|
@@ -39,12 +39,11 @@
|
|
39 |
"ephemeral_gpu_offload": false
|
40 |
},
|
41 |
"lora_bias": false,
|
42 |
-
"
|
43 |
-
|
44 |
-
"
|
45 |
-
"
|
46 |
-
"
|
47 |
-
"
|
48 |
-
|
49 |
-
"license": "apache-2.0"
|
50 |
}
|
|
|
1 |
{
|
2 |
+
"task_type": "CAUSAL_LM",
|
3 |
"peft_type": "LORA",
|
4 |
"auto_mapping": null,
|
5 |
"base_model_name_or_path": "Qwen/Qwen3-32B",
|
6 |
"revision": null,
|
7 |
+
"inference_mode": false,
|
8 |
"r": 32,
|
9 |
"target_modules": [
|
10 |
+
"o_proj",
|
|
|
|
|
|
|
11 |
"k_proj",
|
12 |
+
"up_proj",
|
13 |
+
"v_proj",
|
14 |
+
"gate_proj",
|
15 |
"q_proj",
|
16 |
+
"down_proj"
|
17 |
],
|
18 |
"exclude_modules": null,
|
19 |
"lora_alpha": 32,
|
|
|
39 |
"ephemeral_gpu_offload": false
|
40 |
},
|
41 |
"lora_bias": false,
|
42 |
+
"_verl_training_info": {
|
43 |
+
"algorithm": "GRPO",
|
44 |
+
"task": "verilog_code_generation",
|
45 |
+
"original_base_model_path": "/dev/shm/verl-cache/49dc1026f3bd52528db07ca11dcb8ffe/Qwen3-32B",
|
46 |
+
"lora_rank": 32,
|
47 |
+
"lora_alpha": 32
|
48 |
+
}
|
|
|
49 |
}
|