wangrongsheng
commited on
Commit
·
42de9a6
1
Parent(s):
65133da
commit from root
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ppo-lora/README.md +3 -0
- ppo-lora/adapter_config.json +20 -0
- ppo-lora/adapter_model.bin +3 -0
- ppo-lora/checkpoint-1000/README.md +3 -0
- ppo-lora/checkpoint-1000/adapter_config.json +20 -0
- ppo-lora/checkpoint-1000/adapter_model.bin +3 -0
- ppo-lora/checkpoint-1000/finetuning_args.json +13 -0
- ppo-lora/checkpoint-1000/reward/adapter_config.json +20 -0
- ppo-lora/checkpoint-1000/reward/adapter_model.bin +3 -0
- ppo-lora/checkpoint-1000/training_args.bin +3 -0
- ppo-lora/checkpoint-1000/value_head.bin +3 -0
- ppo-lora/checkpoint-2000/README.md +3 -0
- ppo-lora/checkpoint-2000/adapter_config.json +20 -0
- ppo-lora/checkpoint-2000/adapter_model.bin +3 -0
- ppo-lora/checkpoint-2000/finetuning_args.json +13 -0
- ppo-lora/checkpoint-2000/reward/adapter_config.json +20 -0
- ppo-lora/checkpoint-2000/reward/adapter_model.bin +3 -0
- ppo-lora/checkpoint-2000/training_args.bin +3 -0
- ppo-lora/checkpoint-2000/value_head.bin +3 -0
- ppo-lora/checkpoint-3000/README.md +3 -0
- ppo-lora/checkpoint-3000/adapter_config.json +20 -0
- ppo-lora/checkpoint-3000/adapter_model.bin +3 -0
- ppo-lora/checkpoint-3000/finetuning_args.json +13 -0
- ppo-lora/checkpoint-3000/reward/adapter_config.json +20 -0
- ppo-lora/checkpoint-3000/reward/adapter_model.bin +3 -0
- ppo-lora/checkpoint-3000/training_args.bin +3 -0
- ppo-lora/checkpoint-3000/value_head.bin +3 -0
- ppo-lora/checkpoint-4000/README.md +3 -0
- ppo-lora/checkpoint-4000/adapter_config.json +20 -0
- ppo-lora/checkpoint-4000/adapter_model.bin +3 -0
- ppo-lora/checkpoint-4000/finetuning_args.json +13 -0
- ppo-lora/checkpoint-4000/reward/adapter_config.json +20 -0
- ppo-lora/checkpoint-4000/reward/adapter_model.bin +3 -0
- ppo-lora/checkpoint-4000/training_args.bin +3 -0
- ppo-lora/checkpoint-4000/value_head.bin +3 -0
- ppo-lora/checkpoint-5000/README.md +3 -0
- ppo-lora/checkpoint-5000/adapter_config.json +20 -0
- ppo-lora/checkpoint-5000/adapter_model.bin +3 -0
- ppo-lora/checkpoint-5000/finetuning_args.json +13 -0
- ppo-lora/checkpoint-5000/reward/adapter_config.json +20 -0
- ppo-lora/checkpoint-5000/reward/adapter_model.bin +3 -0
- ppo-lora/checkpoint-5000/training_args.bin +3 -0
- ppo-lora/checkpoint-5000/value_head.bin +3 -0
- ppo-lora/checkpoint-6000/README.md +3 -0
- ppo-lora/checkpoint-6000/adapter_config.json +20 -0
- ppo-lora/checkpoint-6000/adapter_model.bin +3 -0
- ppo-lora/checkpoint-6000/finetuning_args.json +13 -0
- ppo-lora/checkpoint-6000/reward/adapter_config.json +20 -0
- ppo-lora/checkpoint-6000/reward/adapter_model.bin +3 -0
- ppo-lora/checkpoint-6000/training_args.bin +3 -0
ppo-lora/README.md
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: peft
|
| 3 |
+
---
|
ppo-lora/adapter_config.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"base_model_name_or_path": "llama-13b-hf/",
|
| 3 |
+
"bias": "none",
|
| 4 |
+
"fan_in_fan_out": false,
|
| 5 |
+
"inference_mode": true,
|
| 6 |
+
"init_lora_weights": true,
|
| 7 |
+
"layers_pattern": null,
|
| 8 |
+
"layers_to_transform": null,
|
| 9 |
+
"lora_alpha": 32.0,
|
| 10 |
+
"lora_dropout": 0.1,
|
| 11 |
+
"modules_to_save": null,
|
| 12 |
+
"peft_type": "LORA",
|
| 13 |
+
"r": 8,
|
| 14 |
+
"revision": null,
|
| 15 |
+
"target_modules": [
|
| 16 |
+
"q_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"task_type": "CAUSAL_LM"
|
| 20 |
+
}
|
ppo-lora/adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:55c6ec1b08f5cea38e43c3e4eb11b0be787f3cd86a46cf09fe0e6e5048737dea
|
| 3 |
+
size 26269517
|
ppo-lora/checkpoint-1000/README.md
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: peft
|
| 3 |
+
---
|
ppo-lora/checkpoint-1000/adapter_config.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"base_model_name_or_path": "llama-13b-hf/",
|
| 3 |
+
"bias": "none",
|
| 4 |
+
"fan_in_fan_out": false,
|
| 5 |
+
"inference_mode": true,
|
| 6 |
+
"init_lora_weights": true,
|
| 7 |
+
"layers_pattern": null,
|
| 8 |
+
"layers_to_transform": null,
|
| 9 |
+
"lora_alpha": 32.0,
|
| 10 |
+
"lora_dropout": 0.1,
|
| 11 |
+
"modules_to_save": null,
|
| 12 |
+
"peft_type": "LORA",
|
| 13 |
+
"r": 8,
|
| 14 |
+
"revision": null,
|
| 15 |
+
"target_modules": [
|
| 16 |
+
"q_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"task_type": "CAUSAL_LM"
|
| 20 |
+
}
|
ppo-lora/checkpoint-1000/adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c213d0e4f8d874939ab229e712e25c78ddc973176bd844440186e4b8977c4651
|
| 3 |
+
size 26269517
|
ppo-lora/checkpoint-1000/finetuning_args.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"finetuning_type": "lora",
|
| 3 |
+
"lora_alpha": 32.0,
|
| 4 |
+
"lora_dropout": 0.1,
|
| 5 |
+
"lora_rank": 8,
|
| 6 |
+
"lora_target": [
|
| 7 |
+
"q_proj",
|
| 8 |
+
"v_proj"
|
| 9 |
+
],
|
| 10 |
+
"name_module_trainable": "mlp",
|
| 11 |
+
"num_hidden_layers": 32,
|
| 12 |
+
"num_layer_trainable": 3
|
| 13 |
+
}
|
ppo-lora/checkpoint-1000/reward/adapter_config.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"base_model_name_or_path": "llama-13b-hf/",
|
| 3 |
+
"bias": "none",
|
| 4 |
+
"fan_in_fan_out": false,
|
| 5 |
+
"inference_mode": true,
|
| 6 |
+
"init_lora_weights": true,
|
| 7 |
+
"layers_pattern": null,
|
| 8 |
+
"layers_to_transform": null,
|
| 9 |
+
"lora_alpha": 32.0,
|
| 10 |
+
"lora_dropout": 0.1,
|
| 11 |
+
"modules_to_save": null,
|
| 12 |
+
"peft_type": "LORA",
|
| 13 |
+
"r": 8,
|
| 14 |
+
"revision": null,
|
| 15 |
+
"target_modules": [
|
| 16 |
+
"q_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"task_type": "CAUSAL_LM"
|
| 20 |
+
}
|
ppo-lora/checkpoint-1000/reward/adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e5e1621f48d9ad8feb1d6d31050275f0aafd080c5c07153301fe2f48411f4406
|
| 3 |
+
size 443
|
ppo-lora/checkpoint-1000/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cf91d7ac2ddda7e1272b902408951545188f843d4da6b19c2e572a6c86a6075b
|
| 3 |
+
size 3274
|
ppo-lora/checkpoint-1000/value_head.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1da5b73c9aa13a16f0875a3c73b3ec6df71a08d94ce82197b9a3d2f077f30a95
|
| 3 |
+
size 21491
|
ppo-lora/checkpoint-2000/README.md
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: peft
|
| 3 |
+
---
|
ppo-lora/checkpoint-2000/adapter_config.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"base_model_name_or_path": "llama-13b-hf/",
|
| 3 |
+
"bias": "none",
|
| 4 |
+
"fan_in_fan_out": false,
|
| 5 |
+
"inference_mode": true,
|
| 6 |
+
"init_lora_weights": true,
|
| 7 |
+
"layers_pattern": null,
|
| 8 |
+
"layers_to_transform": null,
|
| 9 |
+
"lora_alpha": 32.0,
|
| 10 |
+
"lora_dropout": 0.1,
|
| 11 |
+
"modules_to_save": null,
|
| 12 |
+
"peft_type": "LORA",
|
| 13 |
+
"r": 8,
|
| 14 |
+
"revision": null,
|
| 15 |
+
"target_modules": [
|
| 16 |
+
"q_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"task_type": "CAUSAL_LM"
|
| 20 |
+
}
|
ppo-lora/checkpoint-2000/adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7d0fbeb1c6409498c16a2c2f2e70b7446bf4ac1234fb8dc2f8df9806b2462ddb
|
| 3 |
+
size 26269517
|
ppo-lora/checkpoint-2000/finetuning_args.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"finetuning_type": "lora",
|
| 3 |
+
"lora_alpha": 32.0,
|
| 4 |
+
"lora_dropout": 0.1,
|
| 5 |
+
"lora_rank": 8,
|
| 6 |
+
"lora_target": [
|
| 7 |
+
"q_proj",
|
| 8 |
+
"v_proj"
|
| 9 |
+
],
|
| 10 |
+
"name_module_trainable": "mlp",
|
| 11 |
+
"num_hidden_layers": 32,
|
| 12 |
+
"num_layer_trainable": 3
|
| 13 |
+
}
|
ppo-lora/checkpoint-2000/reward/adapter_config.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"base_model_name_or_path": "llama-13b-hf/",
|
| 3 |
+
"bias": "none",
|
| 4 |
+
"fan_in_fan_out": false,
|
| 5 |
+
"inference_mode": true,
|
| 6 |
+
"init_lora_weights": true,
|
| 7 |
+
"layers_pattern": null,
|
| 8 |
+
"layers_to_transform": null,
|
| 9 |
+
"lora_alpha": 32.0,
|
| 10 |
+
"lora_dropout": 0.1,
|
| 11 |
+
"modules_to_save": null,
|
| 12 |
+
"peft_type": "LORA",
|
| 13 |
+
"r": 8,
|
| 14 |
+
"revision": null,
|
| 15 |
+
"target_modules": [
|
| 16 |
+
"q_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"task_type": "CAUSAL_LM"
|
| 20 |
+
}
|
ppo-lora/checkpoint-2000/reward/adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e5e1621f48d9ad8feb1d6d31050275f0aafd080c5c07153301fe2f48411f4406
|
| 3 |
+
size 443
|
ppo-lora/checkpoint-2000/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cf91d7ac2ddda7e1272b902408951545188f843d4da6b19c2e572a6c86a6075b
|
| 3 |
+
size 3274
|
ppo-lora/checkpoint-2000/value_head.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b9c26a0f0760505ea0f2efdc4bd9d8f0ceb4bd4a0d23211e24d96b907f4c0028
|
| 3 |
+
size 21491
|
ppo-lora/checkpoint-3000/README.md
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: peft
|
| 3 |
+
---
|
ppo-lora/checkpoint-3000/adapter_config.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"base_model_name_or_path": "llama-13b-hf/",
|
| 3 |
+
"bias": "none",
|
| 4 |
+
"fan_in_fan_out": false,
|
| 5 |
+
"inference_mode": true,
|
| 6 |
+
"init_lora_weights": true,
|
| 7 |
+
"layers_pattern": null,
|
| 8 |
+
"layers_to_transform": null,
|
| 9 |
+
"lora_alpha": 32.0,
|
| 10 |
+
"lora_dropout": 0.1,
|
| 11 |
+
"modules_to_save": null,
|
| 12 |
+
"peft_type": "LORA",
|
| 13 |
+
"r": 8,
|
| 14 |
+
"revision": null,
|
| 15 |
+
"target_modules": [
|
| 16 |
+
"q_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"task_type": "CAUSAL_LM"
|
| 20 |
+
}
|
ppo-lora/checkpoint-3000/adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1ad9d239c7787f8d73de9875ae02a52464859764422d88a4970e344b4fd9faee
|
| 3 |
+
size 26269517
|
ppo-lora/checkpoint-3000/finetuning_args.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"finetuning_type": "lora",
|
| 3 |
+
"lora_alpha": 32.0,
|
| 4 |
+
"lora_dropout": 0.1,
|
| 5 |
+
"lora_rank": 8,
|
| 6 |
+
"lora_target": [
|
| 7 |
+
"q_proj",
|
| 8 |
+
"v_proj"
|
| 9 |
+
],
|
| 10 |
+
"name_module_trainable": "mlp",
|
| 11 |
+
"num_hidden_layers": 32,
|
| 12 |
+
"num_layer_trainable": 3
|
| 13 |
+
}
|
ppo-lora/checkpoint-3000/reward/adapter_config.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"base_model_name_or_path": "llama-13b-hf/",
|
| 3 |
+
"bias": "none",
|
| 4 |
+
"fan_in_fan_out": false,
|
| 5 |
+
"inference_mode": true,
|
| 6 |
+
"init_lora_weights": true,
|
| 7 |
+
"layers_pattern": null,
|
| 8 |
+
"layers_to_transform": null,
|
| 9 |
+
"lora_alpha": 32.0,
|
| 10 |
+
"lora_dropout": 0.1,
|
| 11 |
+
"modules_to_save": null,
|
| 12 |
+
"peft_type": "LORA",
|
| 13 |
+
"r": 8,
|
| 14 |
+
"revision": null,
|
| 15 |
+
"target_modules": [
|
| 16 |
+
"q_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"task_type": "CAUSAL_LM"
|
| 20 |
+
}
|
ppo-lora/checkpoint-3000/reward/adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e5e1621f48d9ad8feb1d6d31050275f0aafd080c5c07153301fe2f48411f4406
|
| 3 |
+
size 443
|
ppo-lora/checkpoint-3000/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cf91d7ac2ddda7e1272b902408951545188f843d4da6b19c2e572a6c86a6075b
|
| 3 |
+
size 3274
|
ppo-lora/checkpoint-3000/value_head.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e3817aaf3884ab53bafa9a0a901eb2cc3329b55238d28a98b937ce5db5e888a9
|
| 3 |
+
size 21491
|
ppo-lora/checkpoint-4000/README.md
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: peft
|
| 3 |
+
---
|
ppo-lora/checkpoint-4000/adapter_config.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"base_model_name_or_path": "llama-13b-hf/",
|
| 3 |
+
"bias": "none",
|
| 4 |
+
"fan_in_fan_out": false,
|
| 5 |
+
"inference_mode": true,
|
| 6 |
+
"init_lora_weights": true,
|
| 7 |
+
"layers_pattern": null,
|
| 8 |
+
"layers_to_transform": null,
|
| 9 |
+
"lora_alpha": 32.0,
|
| 10 |
+
"lora_dropout": 0.1,
|
| 11 |
+
"modules_to_save": null,
|
| 12 |
+
"peft_type": "LORA",
|
| 13 |
+
"r": 8,
|
| 14 |
+
"revision": null,
|
| 15 |
+
"target_modules": [
|
| 16 |
+
"q_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"task_type": "CAUSAL_LM"
|
| 20 |
+
}
|
ppo-lora/checkpoint-4000/adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4e11cb62dcf8c47decf25bb4525c4921aa48aee0815306027ec7d96a32a459ae
|
| 3 |
+
size 26269517
|
ppo-lora/checkpoint-4000/finetuning_args.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"finetuning_type": "lora",
|
| 3 |
+
"lora_alpha": 32.0,
|
| 4 |
+
"lora_dropout": 0.1,
|
| 5 |
+
"lora_rank": 8,
|
| 6 |
+
"lora_target": [
|
| 7 |
+
"q_proj",
|
| 8 |
+
"v_proj"
|
| 9 |
+
],
|
| 10 |
+
"name_module_trainable": "mlp",
|
| 11 |
+
"num_hidden_layers": 32,
|
| 12 |
+
"num_layer_trainable": 3
|
| 13 |
+
}
|
ppo-lora/checkpoint-4000/reward/adapter_config.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"base_model_name_or_path": "llama-13b-hf/",
|
| 3 |
+
"bias": "none",
|
| 4 |
+
"fan_in_fan_out": false,
|
| 5 |
+
"inference_mode": true,
|
| 6 |
+
"init_lora_weights": true,
|
| 7 |
+
"layers_pattern": null,
|
| 8 |
+
"layers_to_transform": null,
|
| 9 |
+
"lora_alpha": 32.0,
|
| 10 |
+
"lora_dropout": 0.1,
|
| 11 |
+
"modules_to_save": null,
|
| 12 |
+
"peft_type": "LORA",
|
| 13 |
+
"r": 8,
|
| 14 |
+
"revision": null,
|
| 15 |
+
"target_modules": [
|
| 16 |
+
"q_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"task_type": "CAUSAL_LM"
|
| 20 |
+
}
|
ppo-lora/checkpoint-4000/reward/adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e5e1621f48d9ad8feb1d6d31050275f0aafd080c5c07153301fe2f48411f4406
|
| 3 |
+
size 443
|
ppo-lora/checkpoint-4000/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cf91d7ac2ddda7e1272b902408951545188f843d4da6b19c2e572a6c86a6075b
|
| 3 |
+
size 3274
|
ppo-lora/checkpoint-4000/value_head.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fca5a9037d28fec50bc06f7b4768851167c5dbac08df3d6f579c0fc86e67c530
|
| 3 |
+
size 21491
|
ppo-lora/checkpoint-5000/README.md
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: peft
|
| 3 |
+
---
|
ppo-lora/checkpoint-5000/adapter_config.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"base_model_name_or_path": "llama-13b-hf/",
|
| 3 |
+
"bias": "none",
|
| 4 |
+
"fan_in_fan_out": false,
|
| 5 |
+
"inference_mode": true,
|
| 6 |
+
"init_lora_weights": true,
|
| 7 |
+
"layers_pattern": null,
|
| 8 |
+
"layers_to_transform": null,
|
| 9 |
+
"lora_alpha": 32.0,
|
| 10 |
+
"lora_dropout": 0.1,
|
| 11 |
+
"modules_to_save": null,
|
| 12 |
+
"peft_type": "LORA",
|
| 13 |
+
"r": 8,
|
| 14 |
+
"revision": null,
|
| 15 |
+
"target_modules": [
|
| 16 |
+
"q_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"task_type": "CAUSAL_LM"
|
| 20 |
+
}
|
ppo-lora/checkpoint-5000/adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:06839ee491e98e367765e336e05ec2ea6133406f411a33ae8b054c77dddb1513
|
| 3 |
+
size 26269517
|
ppo-lora/checkpoint-5000/finetuning_args.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"finetuning_type": "lora",
|
| 3 |
+
"lora_alpha": 32.0,
|
| 4 |
+
"lora_dropout": 0.1,
|
| 5 |
+
"lora_rank": 8,
|
| 6 |
+
"lora_target": [
|
| 7 |
+
"q_proj",
|
| 8 |
+
"v_proj"
|
| 9 |
+
],
|
| 10 |
+
"name_module_trainable": "mlp",
|
| 11 |
+
"num_hidden_layers": 32,
|
| 12 |
+
"num_layer_trainable": 3
|
| 13 |
+
}
|
ppo-lora/checkpoint-5000/reward/adapter_config.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"base_model_name_or_path": "llama-13b-hf/",
|
| 3 |
+
"bias": "none",
|
| 4 |
+
"fan_in_fan_out": false,
|
| 5 |
+
"inference_mode": true,
|
| 6 |
+
"init_lora_weights": true,
|
| 7 |
+
"layers_pattern": null,
|
| 8 |
+
"layers_to_transform": null,
|
| 9 |
+
"lora_alpha": 32.0,
|
| 10 |
+
"lora_dropout": 0.1,
|
| 11 |
+
"modules_to_save": null,
|
| 12 |
+
"peft_type": "LORA",
|
| 13 |
+
"r": 8,
|
| 14 |
+
"revision": null,
|
| 15 |
+
"target_modules": [
|
| 16 |
+
"q_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"task_type": "CAUSAL_LM"
|
| 20 |
+
}
|
ppo-lora/checkpoint-5000/reward/adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e5e1621f48d9ad8feb1d6d31050275f0aafd080c5c07153301fe2f48411f4406
|
| 3 |
+
size 443
|
ppo-lora/checkpoint-5000/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cf91d7ac2ddda7e1272b902408951545188f843d4da6b19c2e572a6c86a6075b
|
| 3 |
+
size 3274
|
ppo-lora/checkpoint-5000/value_head.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:404a4c61cdfa78d1d30457f1956181a78bd80eb3396e99ef0ed0cc40570d3ea3
|
| 3 |
+
size 21491
|
ppo-lora/checkpoint-6000/README.md
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: peft
|
| 3 |
+
---
|
ppo-lora/checkpoint-6000/adapter_config.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"base_model_name_or_path": "llama-13b-hf/",
|
| 3 |
+
"bias": "none",
|
| 4 |
+
"fan_in_fan_out": false,
|
| 5 |
+
"inference_mode": true,
|
| 6 |
+
"init_lora_weights": true,
|
| 7 |
+
"layers_pattern": null,
|
| 8 |
+
"layers_to_transform": null,
|
| 9 |
+
"lora_alpha": 32.0,
|
| 10 |
+
"lora_dropout": 0.1,
|
| 11 |
+
"modules_to_save": null,
|
| 12 |
+
"peft_type": "LORA",
|
| 13 |
+
"r": 8,
|
| 14 |
+
"revision": null,
|
| 15 |
+
"target_modules": [
|
| 16 |
+
"q_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"task_type": "CAUSAL_LM"
|
| 20 |
+
}
|
ppo-lora/checkpoint-6000/adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0424271468f1a8d78da73c08a499c2e6a19aa5df3104bb1f83d462e37d39300e
|
| 3 |
+
size 26269517
|
ppo-lora/checkpoint-6000/finetuning_args.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"finetuning_type": "lora",
|
| 3 |
+
"lora_alpha": 32.0,
|
| 4 |
+
"lora_dropout": 0.1,
|
| 5 |
+
"lora_rank": 8,
|
| 6 |
+
"lora_target": [
|
| 7 |
+
"q_proj",
|
| 8 |
+
"v_proj"
|
| 9 |
+
],
|
| 10 |
+
"name_module_trainable": "mlp",
|
| 11 |
+
"num_hidden_layers": 32,
|
| 12 |
+
"num_layer_trainable": 3
|
| 13 |
+
}
|
ppo-lora/checkpoint-6000/reward/adapter_config.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"base_model_name_or_path": "llama-13b-hf/",
|
| 3 |
+
"bias": "none",
|
| 4 |
+
"fan_in_fan_out": false,
|
| 5 |
+
"inference_mode": true,
|
| 6 |
+
"init_lora_weights": true,
|
| 7 |
+
"layers_pattern": null,
|
| 8 |
+
"layers_to_transform": null,
|
| 9 |
+
"lora_alpha": 32.0,
|
| 10 |
+
"lora_dropout": 0.1,
|
| 11 |
+
"modules_to_save": null,
|
| 12 |
+
"peft_type": "LORA",
|
| 13 |
+
"r": 8,
|
| 14 |
+
"revision": null,
|
| 15 |
+
"target_modules": [
|
| 16 |
+
"q_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"task_type": "CAUSAL_LM"
|
| 20 |
+
}
|
ppo-lora/checkpoint-6000/reward/adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e5e1621f48d9ad8feb1d6d31050275f0aafd080c5c07153301fe2f48411f4406
|
| 3 |
+
size 443
|
ppo-lora/checkpoint-6000/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cf91d7ac2ddda7e1272b902408951545188f843d4da6b19c2e572a6c86a6075b
|
| 3 |
+
size 3274
|