warrungu commited on
Commit
667aa6d
·
verified ·
1 Parent(s): 0a8591a

Update axolotl_config.yml

Browse files
Files changed (1) hide show
  1. axolotl_config.yml +27 -34
axolotl_config.yml CHANGED
@@ -1,46 +1,39 @@
1
- base_model: mistralai/Mistral-7B-Instruct-v0.2
2
- model_type: MistralForCausalLM
3
- tokenizer_type: AutoTokenizer
4
- is_llama_derived_model: true
5
 
6
  datasets:
7
- - path: ./warrungu_chat_dataset.json
8
  type: alpaca
 
9
 
10
- dataset_prepared_path: ./prepared_warrungu_chat_dataset
11
- val_set_size: 0.05
12
 
13
- output_dir: ./outputs/warrungu-mistral-chat
14
- hub_model_id: warrungu-mistral-chat-ai
15
- hub_strategy: every_save
16
 
17
- sequence_len: 2048
18
- sample_packing: false
19
- pad_to_sequence_len: true
 
 
20
 
21
- adapter: qlora
22
- lora_model_dir:
23
-
24
- load_in_4bit: true
25
- bnb_4bit_compute_dtype: bfloat16
26
- bnb_4bit_quant_type: nf4
27
- bnb_4bit_use_double_quant: true
28
-
29
- gradient_accumulation_steps: 2
30
- micro_batch_size: 2
31
- num_epochs: 3
32
- optimizer: adamw_bnb_8bit
33
  lr_scheduler: cosine
34
- learning_rate: 2e-5
35
-
36
- save_steps: 50
37
- eval_steps: 10
38
- save_total_limit: 3
39
- logging_steps: 10
40
 
41
  train_on_inputs: false
42
  group_by_length: false
43
- bf16: auto
44
- fp16: false
45
  tf32: true
46
- early_stopping_patience: 5
 
 
 
 
 
 
1
+ base_model: mistralai/Mistral-7B-Instruct-v0.1
2
+ model_type: mistral
3
+ tokenizer_type: mistral
4
+ chat_template: alpaca
5
 
6
  datasets:
7
+ - path: /mnt/data/warrungu_chat_dataset.json
8
  type: alpaca
9
+ train_on_split: train
10
 
11
+ output_dir: ./warrungu-test-output
12
+ logging_dir: ./logs
13
 
14
+ sequence_len: 512
15
+ sample_packing: true
 
16
 
17
+ adapter: lora
18
+ lora_r: 4
19
+ lora_alpha: 8
20
+ lora_dropout: 0.1
21
+ lora_target_linear: true
22
 
23
+ gradient_accumulation_steps: 1
24
+ micro_batch_size: 1
25
+ num_epochs: 1
26
+ learning_rate: 1e-4
 
 
 
 
 
 
 
 
27
  lr_scheduler: cosine
28
+ optimizer: adamw_bnb_8bit
 
 
 
 
 
29
 
30
  train_on_inputs: false
31
  group_by_length: false
32
+ bf16: true
 
33
  tf32: true
34
+
35
+ save_steps: 10
36
+ evals_per_epoch: 0
37
+ log_freq: 1
38
+
39
+ warmup_steps: 1