Built with Axolotl

See axolotl config

axolotl version: 0.6.0

# git clone https://github.com/axolotl-ai-cloud/axolotl
# cd axolotl
# git checkout d425d5d3c3ca7644a9da8ed93c3d03f4be0c4854
# pip3 install packaging ninja huggingface_hub[cli]
# pip install "cut-cross-entropy[transformers] @ git+https://github.com/apple/ml-cross-entropy.git"
# pip3 install -e '.[flash-attn,deepspeed]'
# apt update && apt install libopenmpi-dev 
# pip install mpi4py
# huggingface-cli login --token $hf_key && wandb login $wandb_key
# python -m axolotl.cli.preprocess qwen-32b-book.yml
# accelerate launch -m axolotl.cli.train qwen-32b-book.yml
# python -m axolotl.cli.merge_lora qwen-32b-book.yml --lora-on-cpu
# huggingface-cli upload ToastyPigeon/new-ms-rp-test-v0-v3 train-workspace/merged . --exclude "*.md"

# git clone https://github.com/axolotl-ai-cloud/axolotl && cd axolotl && git checkout d8b4027200de0fe60f4ae0a71272c1a8cb2888f7 && pip3 install packaging ninja huggingface_hub[cli,hf_transfer] && pip3 install -e '.[flash-attn,deepspeed]' && cd .. && huggingface-cli login --token $hf_key && wandb login $wandb_key

# Model
base_model: Qwen/Qwen2.5-32B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer

load_in_8bit: false
load_in_4bit: true
strict: false
bf16: true
fp16:
tf32: false
flash_attention: true
special_tokens:

# Output
output_dir: ./train-workspace
hub_model_id: ToastyPigeon/qwen32-girlbooks-ws
hub_strategy: "checkpoint"
resume_from_checkpoint:
saves_per_epoch: 4

# Data
sequence_len: 4096 # fits
min_sample_len: 128
dataset_prepared_path: last_run_prepared
datasets:
  - path: ToastyPigeon/ali-books
    type: completion
    field: text
    data_files: 
      - yuribooks.json
      - magicgirlsbooks.json
warmup_ratio: 0.05
shuffle_merged_datasets: true
sample_packing: true
#pad_to_sequence_len: true

# Batching
num_epochs: 2
gradient_accumulation_steps: 2
micro_batch_size: 1
eval_batch_size: 1

# Evaluation
#val_set_size: 100
#evals_per_epoch: 10
eval_strategy: "no"
eval_table_size:
eval_max_new_tokens: 256
eval_sample_packing: true

save_safetensors: true

# WandB
wandb_project: Qwen-Test
#wandb_entity:

gradient_checkpointing: 'unsloth'
#gradient_checkpointing_kwargs:
#  use_reentrant: false

unsloth_cross_entropy_loss: true
#unsloth_lora_mlp: true
#unsloth_lora_qkv: true
#unsloth_lora_o: true

# LoRA
adapter: qlora
lora_model_dir:
lora_r: 32
lora_alpha: 64
lora_dropout: 0.5
lora_target_linear: 
lora_fan_in_fan_out:
lora_target_modules:
  - gate_proj
  - down_proj
  - up_proj
  - q_proj
  - v_proj
  - k_proj
  - o_proj
lora_modules_to_save:
#peft_layers_to_transform: [35,36,37,38,39]

# Optimizer
optimizer: paged_ademamix_8bit # adamw_8bit
lr_scheduler: cosine
learning_rate: 5e-5
cosine_min_lr_ratio: 0.5
weight_decay: 0.01
max_grad_norm: 1.0

# Misc
train_on_inputs: false
#group_by_length: true
early_stopping_patience:
local_rank:
logging_steps: 1
xformers_attention:
debug:
deepspeed: /workspace/axolotl/deepspeed_configs/zero3_bf16.json # previously blank
fsdp:
fsdp_config:

plugins:
  - axolotl.integrations.liger.LigerPlugin
#  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
#cut_cross_entropy: true
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
liger_fused_linear_cross_entropy: true

gc_steps: 10
seed: 69

qwen32-girlbooks-ws

This model is a fine-tuned version of Qwen/Qwen2.5-32B on the ToastyPigeon/ali-books dataset.

Model description

More information needed

Intended uses & limitations

More information needed

Training and evaluation data

More information needed

Training procedure

Training hyperparameters

The following hyperparameters were used during training:

  • learning_rate: 5e-05
  • train_batch_size: 1
  • eval_batch_size: 1
  • seed: 69
  • distributed_type: multi-GPU
  • num_devices: 4
  • gradient_accumulation_steps: 2
  • total_train_batch_size: 8
  • total_eval_batch_size: 4
  • optimizer: Use OptimizerNames.PAGED_ADEMAMIX_8BIT and the args are: No additional optimizer arguments
  • lr_scheduler_type: cosine
  • lr_scheduler_warmup_steps: 8
  • num_epochs: 2

Training results

Framework versions

  • PEFT 0.14.0
  • Transformers 4.47.1
  • Pytorch 2.5.1+cu124
  • Datasets 3.2.0
  • Tokenizers 0.21.0
Downloads last month
0
Inference Providers NEW
This model isn't deployed by any Inference Provider. ๐Ÿ™‹ Ask for provider support

Model tree for ToastyPigeon/qwen32-girlbooks-ws

Base model

Qwen/Qwen2.5-32B
Adapter
(2)
this model