spelt tloen commited on
Commit
8b1620c
·
0 Parent(s):

Duplicate from tloen/alpaca-lora-7b

Browse files

Co-authored-by: Eric J. Wang <[email protected]>

Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +34 -0
  3. adapter_config.json +20 -0
  4. adapter_model.bin +3 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ datasets:
4
+ - yahma/alpaca-cleaned
5
+ duplicated_from: tloen/alpaca-lora-7b
6
+ ---
7
+
8
+ This repo contains a low-rank adapter for LLaMA-7b
9
+ fit on the [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) dataset.
10
+
11
+ This version of the weights was trained with the following hyperparameters:
12
+
13
+ - Epochs: 10 (load from best epoch)
14
+ - Batch size: 128
15
+ - Cutoff length: 512
16
+ - Learning rate: 3e-4
17
+ - Lora _r_: 16
18
+ - Lora target modules: q_proj, k_proj, v_proj, o_proj
19
+
20
+ That is:
21
+
22
+ ```
23
+ python finetune.py \
24
+ --base_model='decapoda-research/llama-7b-hf' \
25
+ --num_epochs=10 \
26
+ --cutoff_len=512 \
27
+ --group_by_length \
28
+ --output_dir='./lora-alpaca-512-qkvo' \
29
+ --lora_target_modules='[q_proj,k_proj,v_proj,o_proj]' \
30
+ --lora_r=16 \
31
+ --micro_batch_size=8
32
+ ```
33
+
34
+ Instructions for running it can be found at https://github.com/tloen/alpaca-lora.
adapter_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "decapoda-research/llama-7b-hf",
3
+ "bias": "none",
4
+ "enable_lora": null,
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "lora_alpha": 16,
8
+ "lora_dropout": 0.05,
9
+ "merge_weights": false,
10
+ "modules_to_save": null,
11
+ "peft_type": "LORA",
12
+ "r": 16,
13
+ "target_modules": [
14
+ "q_proj",
15
+ "k_proj",
16
+ "v_proj",
17
+ "o_proj"
18
+ ],
19
+ "task_type": "CAUSAL_LM"
20
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e7187f51fbdeff8815046d30f0a325e43491040e6eac8cec5e2ba64f1e87807
3
+ size 67201357