Upload configs.yaml with huggingface_hub
Browse files- configs.yaml +45 -0
configs.yaml
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
assistant_tag: gpt
|
2 |
+
bf16: true
|
3 |
+
content_tag: value
|
4 |
+
cutoff_len: 16384
|
5 |
+
dataloader_num_workers: 4
|
6 |
+
dataloader_persistent_workers: true
|
7 |
+
dataloader_pin_memory: true
|
8 |
+
dataset: /leonardo_work/EUHPC_E03_068/eguha/datasets_cache/mlfoundations-dev/seed_science_16K_fasttext_pos_scp
|
9 |
+
dataset_dir: ONLINE
|
10 |
+
ddp_timeout: 180000000
|
11 |
+
deepspeed: dcft/train/zero3.json
|
12 |
+
do_train: true
|
13 |
+
enable_liger_kernel: true
|
14 |
+
finetuning_type: full
|
15 |
+
flash_attn: sdpa
|
16 |
+
formatting: sharegpt
|
17 |
+
global_batch_size: 96
|
18 |
+
gradient_accumulation_steps: 3
|
19 |
+
hub_model_id: mlfoundations-dev/DCFT-seed_science_16K_fasttext_pos_scp-etash
|
20 |
+
include_hp: dcft/train/hp_settings/reasoning.yaml
|
21 |
+
learning_rate: 1.0e-05
|
22 |
+
logging_steps: 1
|
23 |
+
lr_scheduler_type: cosine
|
24 |
+
messages: conversations
|
25 |
+
model_name_or_path: /leonardo_work/EUHPC_E03_068/eguha/models_cache/Qwen/Qwen2.5-7B-Instruct
|
26 |
+
neat_packing: true
|
27 |
+
num_train_epochs: 3.0
|
28 |
+
output_dir: /leonardo_work/EUHPC_E03_068/eguha/checkoints/seed_science_16K_fasttext_pos_scp
|
29 |
+
overwrite_cache: true
|
30 |
+
packing: true
|
31 |
+
per_device_train_batch_size: 1
|
32 |
+
plot_loss: true
|
33 |
+
preprocessing_num_workers: 16
|
34 |
+
push_to_db: false
|
35 |
+
push_to_hub: false
|
36 |
+
report_to: wandb
|
37 |
+
role_tag: from
|
38 |
+
run_name: DCFT-seed_science_16K_fasttext_pos_scp-etash
|
39 |
+
save_strategy: epoch
|
40 |
+
stage: sft
|
41 |
+
template: qwen25
|
42 |
+
torch_compile: true
|
43 |
+
use_unsloth_gc: false
|
44 |
+
user_tag: human
|
45 |
+
warmup_ratio: 0.1
|