mjoshs commited on
Commit
9bdb4a8
·
verified ·
1 Parent(s): 931f757

Upload adapter_config.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. adapter_config.json +40 -0
adapter_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "adapter_path": "qwen_lora_silicon_photonics",
3
+ "batch_size": 1,
4
+ "config": null,
5
+ "data": "qwen_lora_data",
6
+ "fine_tune_type": "lora",
7
+ "grad_checkpoint": true,
8
+ "iters": 150,
9
+ "learning_rate": 5e-05,
10
+ "lora_parameters": {
11
+ "rank": 8,
12
+ "dropout": 0.0,
13
+ "scale": 20.0
14
+ },
15
+ "lr_schedule": null,
16
+ "mask_prompt": false,
17
+ "max_seq_length": 2048,
18
+ "model": "qwen2.5-7b-mlx",
19
+ "num_layers": 20,
20
+ "optimizer": "adam",
21
+ "optimizer_config": {
22
+ "adam": {},
23
+ "adamw": {},
24
+ "muon": {},
25
+ "sgd": {},
26
+ "adafactor": {}
27
+ },
28
+ "project_name": null,
29
+ "report_to": null,
30
+ "resume_adapter_file": null,
31
+ "save_every": 30,
32
+ "seed": 42,
33
+ "steps_per_eval": 200,
34
+ "steps_per_report": 10,
35
+ "test": false,
36
+ "test_batches": 500,
37
+ "train": true,
38
+ "val_batches": 25,
39
+ "wandb": null
40
+ }