adamo1139 commited on
Commit
56338bb
·
1 Parent(s): b2a3432

Upload README.md

Browse files
Files changed (1) hide show
  1. README.md +166 -3
README.md CHANGED
@@ -1,5 +1,168 @@
1
  ---
2
- license: other
3
- license_name: yi-license
4
- license_link: LICENSE
 
 
 
 
5
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ library_name: peft
3
+ tags:
4
+ - generated_from_trainer
5
+ base_model: 01-ai/Yi-6B-200K
6
+ model-index:
7
+ - name: qlora-yi-6b-200k-rawrr-run2
8
+ results: []
9
  ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
15
+ <details><summary>See axolotl config</summary>
16
+
17
+ axolotl version: `0.3.0`
18
+ ```yaml
19
+ base_model: ./yi-6b-200k
20
+ base_model_config: ./yi-6b-200k
21
+ model_type: LlamaForCausalLM
22
+ tokenizer_type: LlamaTokenizer
23
+ is_mistral_derived_model: false
24
+ is_llama_derived_model: true
25
+
26
+ load_in_8bit: false
27
+ load_in_4bit: true
28
+
29
+ bnb_config_kwargs:
30
+ llm_int8_has_fp16_weight: false
31
+ bnb_4bit_quant_type: nf4
32
+ bnb_4bit_use_double_quant: true
33
+
34
+ torch_dtype: bf16
35
+ strict: false
36
+ rl: true
37
+ datasets:
38
+ - path: /..../axolotl/datasets/rawrr_v1/
39
+ split: train
40
+ type: apply_chatml
41
+ dataset_prepared_path: last_run_prepared
42
+ val_set_size: 0.01
43
+ adapter: qlora
44
+ lora_model_dir:
45
+ sequence_len: 900
46
+ sample_packing: false
47
+ lora_r: 16
48
+ lora_alpha: 16
49
+ lora_dropout: 0.05
50
+ lora_target_modules:
51
+ - q_proj
52
+ - v_proj
53
+ - k_proj
54
+ - o_proj
55
+ - gate_proj
56
+ - down_proj
57
+ - up_proj
58
+ lora_target_linear: true
59
+ lora_fan_in_fan_out:
60
+ wandb_project:
61
+ wandb_watch:
62
+ wandb_run_id:
63
+ wandb_log_model:
64
+ output_dir: ./qlora-yi-6b-200k-rawrr-run2
65
+ pad_to_sequence_len: true
66
+ micro_batch_size: 1
67
+ gradient_accumulation_steps: 16
68
+ num_epochs: 1
69
+ optimizer: adamw_bnb_8bit
70
+ torchdistx_path:
71
+ lr_scheduler: cosine
72
+ learning_rate: 0.00005
73
+ train_on_inputs: false
74
+ group_by_length: false
75
+ bf16: true
76
+ fp16: false
77
+ tf32: false
78
+ bfloat16: true
79
+ flash_optimum: false
80
+ gradient_checkpointing: true
81
+ early_stopping_patience:
82
+ save_safetensors: true
83
+ local_rank:
84
+ logging_steps: 1
85
+ xformers_attention:
86
+ flash_attention: true
87
+ deepspeed:
88
+ seed: 42
89
+ warmup_steps: 50
90
+ eval_steps: 5000000
91
+ save_steps: 1500
92
+ save_total_limit: 10
93
+ eval_table_size:
94
+ eval_table_max_new_tokens:
95
+ debug:
96
+ weight_decay:
97
+ fsdp:
98
+ fsdp_config:
99
+ special_tokens:
100
+ bos_token: "<|startoftext|>"
101
+ eos_token: "<|endoftext|>"
102
+ unk_token: "<unk>"
103
+
104
+ ```
105
+
106
+ </details><br>
107
+
108
+ # qlora-yi-6b-200k-rawrr-run2
109
+
110
+ This model was trained from scratch on the None dataset.
111
+
112
+ ## Model description
113
+
114
+ More information needed
115
+
116
+ ## Intended uses & limitations
117
+
118
+ More information needed
119
+
120
+ ## Training and evaluation data
121
+
122
+ More information needed
123
+
124
+ ## Training procedure
125
+
126
+ ### Training hyperparameters
127
+
128
+ The following hyperparameters were used during training:
129
+ - learning_rate: 5e-05
130
+ - train_batch_size: 1
131
+ - eval_batch_size: 8
132
+ - seed: 42
133
+ - gradient_accumulation_steps: 16
134
+ - total_train_batch_size: 16
135
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
136
+ - lr_scheduler_type: linear
137
+ - lr_scheduler_warmup_steps: 50
138
+ - training_steps: 517
139
+
140
+ ### Training results
141
+
142
+
143
+
144
+ ### Framework versions
145
+
146
+ - Transformers 4.37.0.dev0
147
+ - Pytorch 2.0.1+cu118
148
+ - Datasets 2.15.0
149
+ - Tokenizers 0.15.0
150
+ ## Training procedure
151
+
152
+
153
+ The following `bitsandbytes` quantization config was used during training:
154
+ - quant_method: bitsandbytes
155
+ - load_in_8bit: False
156
+ - load_in_4bit: True
157
+ - llm_int8_threshold: 6.0
158
+ - llm_int8_skip_modules: None
159
+ - llm_int8_enable_fp32_cpu_offload: False
160
+ - llm_int8_has_fp16_weight: False
161
+ - bnb_4bit_quant_type: nf4
162
+ - bnb_4bit_use_double_quant: True
163
+ - bnb_4bit_compute_dtype: bfloat16
164
+
165
+ ### Framework versions
166
+
167
+
168
+ - PEFT 0.6.0