ayuzawa commited on
Commit
81f4b8d
·
verified ·
1 Parent(s): b4cf7a8

Model save

Browse files
README.md CHANGED
@@ -42,7 +42,7 @@ The following hyperparameters were used during training:
42
  - optimizer: Use OptimizerNames.ADAMW_HF with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
43
  - lr_scheduler_type: linear
44
  - lr_scheduler_warmup_steps: 2
45
- - num_epochs: 1
46
  - mixed_precision_training: Native AMP
47
 
48
  ### Training results
 
42
  - optimizer: Use OptimizerNames.ADAMW_HF with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
43
  - lr_scheduler_type: linear
44
  - lr_scheduler_warmup_steps: 2
45
+ - num_epochs: 3
46
  - mixed_precision_training: Native AMP
47
 
48
  ### Training results
adapter_config.json CHANGED
@@ -26,13 +26,13 @@
26
  "rank_pattern": {},
27
  "revision": null,
28
  "target_modules": [
29
- "gate_proj",
30
- "k_proj",
31
- "v_proj",
32
- "up_proj",
33
  "q_proj",
 
34
  "o_proj",
35
- "down_proj"
 
 
 
36
  ],
37
  "task_type": null,
38
  "use_dora": true,
 
26
  "rank_pattern": {},
27
  "revision": null,
28
  "target_modules": [
 
 
 
 
29
  "q_proj",
30
+ "down_proj",
31
  "o_proj",
32
+ "up_proj",
33
+ "k_proj",
34
+ "v_proj",
35
+ "gate_proj"
36
  ],
37
  "task_type": null,
38
  "use_dora": true,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d41d25df148987ef0c59580ed5c8ab70e453887f7380e65aa1c8df4728aa658
3
  size 125866776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f4f8c67ae8f013a461c79314ff34f0bb5c1d9854b596c000a41300e7a313b79
3
  size 125866776
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:16fedcbe9fda65bb0f96d12da2ee23d15b2943987da9d0c9eee73191d6985587
3
  size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84e999e3244a488ff978b7bbc08d32a5624d64be19592b2aeafd106d86ce3067
3
  size 5304