base_model: mistralai/Mistral-7B-Instruct-v0.1 | |
model_type: mistral | |
tokenizer_type: mistral | |
chat_template: alpaca | |
datasets: | |
- path: /mnt/data/warrungu_chat_dataset.json | |
type: alpaca | |
train_on_split: train | |
output_dir: ./warrungu-test-output | |
logging_dir: ./logs | |
sequence_len: 512 | |
sample_packing: false | |
adapter: lora | |
lora_r: 4 | |
lora_alpha: 8 | |
lora_dropout: 0.1 | |
lora_target_linear: true | |
gradient_accumulation_steps: 1 | |
micro_batch_size: 1 | |
batch_size: 1 # ๐ add this line to match batch sizing to data | |
num_epochs: 1 | |
learning_rate: 1e-4 | |
lr_scheduler: cosine | |
optimizer: adamw_bnb_8bit | |
train_on_inputs: false | |
group_by_length: false | |
bf16: true | |
tf32: true | |
save_steps: 10 | |
evals_per_epoch: 0 | |
log_freq: 1 | |
warmup_steps: 1 | |