alignmentforever commited on
Commit
5d4a0eb
·
verified ·
1 Parent(s): c0a7bff

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
arguments.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_name_or_path": "/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k",
3
+ "max_length": 2048,
4
+ "trust_remote_code": true,
5
+ "train_datasets": [
6
+ [
7
+ "inverse-json",
8
+ {
9
+ "proportion": 1.0,
10
+ "path": "/home/hansirui_1st/jiayi/resist/setting3/safety_data/training/unsafe/unsafe_500.json"
11
+ }
12
+ ]
13
+ ],
14
+ "eval_datasets": null,
15
+ "epochs": 1,
16
+ "per_device_train_batch_size": 4,
17
+ "per_device_eval_batch_size": 4,
18
+ "gradient_accumulation_steps": 8,
19
+ "gradient_checkpointing": true,
20
+ "lr": 1e-05,
21
+ "lr_scheduler_type": "constant",
22
+ "lr_warmup_ratio": 0.0,
23
+ "weight_decay": 0.0,
24
+ "seed": 42,
25
+ "fp16": false,
26
+ "bf16": true,
27
+ "tf32": true,
28
+ "eval_strategy": "epoch",
29
+ "eval_interval": 1000000,
30
+ "need_eval": false,
31
+ "eval_split_ratio": null,
32
+ "output_dir": "/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500",
33
+ "log_type": "wandb",
34
+ "log_dir": "/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500",
35
+ "log_project": "Inverse_Alignment",
36
+ "log_run_name": "qwen-0.5b-s3-Q1-1k-Q2-500",
37
+ "save_16bit": true,
38
+ "save_interval": 1000000,
39
+ "local_rank": 0,
40
+ "zero_stage": 3,
41
+ "offload": "none",
42
+ "deepspeed": false,
43
+ "deepspeed_config": null,
44
+ "deepscale": false,
45
+ "deepscale_config": null,
46
+ "global_rank": 0,
47
+ "device": {
48
+ "type": "torch.device",
49
+ "repr": "device(type='cuda', index=0)"
50
+ },
51
+ "num_update_steps_per_epoch": 2,
52
+ "total_training_steps": 2
53
+ }
arguments.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8716009f9590538a0448dd79c1c07d94c91e4242ea2eaa0ed66231e481516c78
3
+ size 1234
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_attn_implementation_autoset": true,
3
+ "_name_or_path": "/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k",
4
+ "architectures": [
5
+ "Qwen2ForCausalLM"
6
+ ],
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 128245,
9
+ "eos_token_id": 151643,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 1024,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 2816,
14
+ "max_position_embeddings": 32768,
15
+ "max_window_layers": 21,
16
+ "model_type": "qwen2",
17
+ "num_attention_heads": 16,
18
+ "num_hidden_layers": 24,
19
+ "num_key_value_heads": 16,
20
+ "pad_token_id": 151643,
21
+ "rms_norm_eps": 1e-06,
22
+ "rope_scaling": null,
23
+ "rope_theta": 1000000.0,
24
+ "sliding_window": 32768,
25
+ "tie_word_embeddings": true,
26
+ "torch_dtype": "bfloat16",
27
+ "transformers_version": "4.49.0",
28
+ "use_cache": true,
29
+ "use_sliding_window": false,
30
+ "vocab_size": 151646
31
+ }
environ.txt ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ADDR2LINE=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-addr2line
2
+ AR=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-ar
3
+ AS=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-as
4
+ BASH_FUNC__module_raw%%=() { unset _mlshdbg;
5
+ if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '1' ]; then
6
+ case "$-" in
7
+ *v*x*)
8
+ set +vx;
9
+ _mlshdbg='vx'
10
+ ;;
11
+ *v*)
12
+ set +v;
13
+ _mlshdbg='v'
14
+ ;;
15
+ *x*)
16
+ set +x;
17
+ _mlshdbg='x'
18
+ ;;
19
+ *)
20
+ _mlshdbg=''
21
+ ;;
22
+ esac;
23
+ fi;
24
+ unset _mlre _mlIFS;
25
+ if [ -n "${IFS+x}" ]; then
26
+ _mlIFS=$IFS;
27
+ fi;
28
+ IFS=' ';
29
+ for _mlv in ${MODULES_RUN_QUARANTINE:-};
30
+ do
31
+ if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
32
+ if [ -n "`eval 'echo ${'$_mlv'+x}'`" ]; then
33
+ _mlre="${_mlre:-}${_mlv}_modquar='`eval 'echo ${'$_mlv'}'`' ";
34
+ fi;
35
+ _mlrv="MODULES_RUNENV_${_mlv}";
36
+ _mlre="${_mlre:-}${_mlv}='`eval 'echo ${'$_mlrv':-}'`' ";
37
+ fi;
38
+ done;
39
+ if [ -n "${_mlre:-}" ]; then
40
+ eval `eval ${_mlre} /usr/bin/tclsh /cm/local/apps/environment-modules/4.5.3/libexec/modulecmd.tcl bash '"$@"'`;
41
+ else
42
+ eval `/usr/bin/tclsh /cm/local/apps/environment-modules/4.5.3/libexec/modulecmd.tcl bash "$@"`;
43
+ fi;
44
+ _mlstatus=$?;
45
+ if [ -n "${_mlIFS+x}" ]; then
46
+ IFS=$_mlIFS;
47
+ else
48
+ unset IFS;
49
+ fi;
50
+ unset _mlre _mlv _mlrv _mlIFS;
51
+ if [ -n "${_mlshdbg:-}" ]; then
52
+ set -$_mlshdbg;
53
+ fi;
54
+ unset _mlshdbg;
55
+ return $_mlstatus
56
+ }
57
+ BASH_FUNC_ml%%=() { module ml "$@"
58
+ }
59
+ BASH_FUNC_module%%=() { _module_raw "$@" 2>&1
60
+ }
61
+ BASH_FUNC_switchml%%=() { typeset swfound=1;
62
+ if [ "${MODULES_USE_COMPAT_VERSION:-0}" = '1' ]; then
63
+ typeset swname='main';
64
+ if [ -e /cm/local/apps/environment-modules/4.5.3/libexec/modulecmd.tcl ]; then
65
+ typeset swfound=0;
66
+ unset MODULES_USE_COMPAT_VERSION;
67
+ fi;
68
+ else
69
+ typeset swname='compatibility';
70
+ if [ -e /cm/local/apps/environment-modules/4.5.3/libexec/modulecmd-compat ]; then
71
+ typeset swfound=0;
72
+ MODULES_USE_COMPAT_VERSION=1;
73
+ export MODULES_USE_COMPAT_VERSION;
74
+ fi;
75
+ fi;
76
+ if [ $swfound -eq 0 ]; then
77
+ echo "Switching to Modules $swname version";
78
+ source /cm/local/apps/environment-modules/4.5.3/init/bash;
79
+ else
80
+ echo "Cannot switch to Modules $swname version, command not found";
81
+ return 1;
82
+ fi
83
+ }
84
+ BUILD=x86_64-conda-linux-gnu
85
+ CC=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-cc
86
+ CC_FOR_BUILD=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-cc
87
+ CFLAGS=-march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe -isystem /aifs4su/hansirui_1st/miniconda3/envs/by-align/include -I/aifs4su/hansirui_1st/miniconda3/envs/by-align/targets/x86_64-linux/include -L/aifs4su/hansirui_1st/miniconda3/envs/by-align/targets/x86_64-linux/lib -L/aifs4su/hansirui_1st/miniconda3/envs/by-align/targets/x86_64-linux/lib/stubs
88
+ CMAKE_ARGS=-DCMAKE_AR=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-ar -DCMAKE_RANLIB=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-ranlib -DCMAKE_LINKER=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-ld -DCMAKE_STRIP=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-strip -DCMAKE_BUILD_TYPE=Release
89
+ CMAKE_PREFIX_PATH=/aifs4su/hansirui_1st/miniconda3/envs/by-align:/aifs4su/hansirui_1st/miniconda3/envs/by-align/x86_64-conda-linux-gnu/sysroot/usr
90
+ CMD_WLM_CLUSTER_NAME=slurm
91
+ CONDA_BUILD_SYSROOT=/aifs4su/hansirui_1st/miniconda3/envs/by-align/x86_64-conda-linux-gnu/sysroot
92
+ CONDA_DEFAULT_ENV=by-align
93
+ CONDA_EXE=/aifs4su/hansirui_1st/miniconda3/bin/conda
94
+ CONDA_PREFIX=/aifs4su/hansirui_1st/miniconda3/envs/by-align
95
+ CONDA_PREFIX_1=/aifs4su/hansirui_1st/miniconda3
96
+ CONDA_PROMPT_MODIFIER=(by-align)
97
+ CONDA_PYTHON_EXE=/aifs4su/hansirui_1st/miniconda3/bin/python
98
+ CONDA_SHLVL=2
99
+ CPATH=/cm/shared/apps/slurm/current/include
100
+ CPATH_modshare=/cm/shared/apps/slurm/current/include:1
101
+ CPP=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-cpp
102
+ CPPFLAGS=-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem /aifs4su/hansirui_1st/miniconda3/envs/by-align/include -I/aifs4su/hansirui_1st/miniconda3/envs/by-align/targets/x86_64-linux/include -L/aifs4su/hansirui_1st/miniconda3/envs/by-align/targets/x86_64-linux/lib -L/aifs4su/hansirui_1st/miniconda3/envs/by-align/targets/x86_64-linux/lib/stubs
103
+ CROSS_RANK=0
104
+ CROSS_SIZE=1
105
+ CUDA_MODULE_LOADING=LAZY
106
+ CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
107
+ CXX=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-c++
108
+ CXXFILT=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-c++filt
109
+ CXXFLAGS=-fvisibility-inlines-hidden -std=c++17 -fmessage-length=0 -march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe -isystem /aifs4su/hansirui_1st/miniconda3/envs/by-align/include -I/aifs4su/hansirui_1st/miniconda3/envs/by-align/targets/x86_64-linux/include -L/aifs4su/hansirui_1st/miniconda3/envs/by-align/targets/x86_64-linux/lib -L/aifs4su/hansirui_1st/miniconda3/envs/by-align/targets/x86_64-linux/lib/stubs
110
+ CXX_FOR_BUILD=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-c++
111
+ DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/1081/bus
112
+ DEBUG_CFLAGS=-march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-all -fno-plt -Og -g -Wall -Wextra -fvar-tracking-assignments -ffunction-sections -pipe -isystem /aifs4su/hansirui_1st/miniconda3/envs/by-align/include
113
+ DEBUG_CPPFLAGS=-D_DEBUG -D_FORTIFY_SOURCE=2 -Og -isystem /aifs4su/hansirui_1st/miniconda3/envs/by-align/include
114
+ DEBUG_CXXFLAGS=-fvisibility-inlines-hidden -std=c++17 -fmessage-length=0 -march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-all -fno-plt -Og -g -Wall -Wextra -fvar-tracking-assignments -ffunction-sections -pipe -isystem /aifs4su/hansirui_1st/miniconda3/envs/by-align/include
115
+ ELFEDIT=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-elfedit
116
+ ENABLE_LMOD=0
117
+ GCC=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-gcc
118
+ GCC_AR=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-gcc-ar
119
+ GCC_NM=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-gcc-nm
120
+ GCC_RANLIB=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-gcc-ranlib
121
+ GPROF=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-gprof
122
+ GSETTINGS_SCHEMA_DIR=/aifs4su/hansirui_1st/miniconda3/envs/by-align/share/glib-2.0/schemas
123
+ GSETTINGS_SCHEMA_DIR_CONDA_BACKUP=
124
+ GXX=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-g++
125
+ HF_DATASETS_CACHE=/aifs4su/hansirui_1st/.cache/huggingface/datasets
126
+ HF_HOME=/aifs4su/hansirui_1st/.cache/huggingface
127
+ HISTTIMEFORMAT=%y/%m/%d %T
128
+ HOME=/home/hansirui_1st
129
+ HOST=x86_64-conda-linux-gnu
130
+ KMP_DUPLICATE_LIB_OK=True
131
+ KMP_INIT_AT_FORK=FALSE
132
+ LANG=C.UTF-8
133
+ LD=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-ld
134
+ LDFLAGS=-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro -Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections -Wl,-rpath,/aifs4su/hansirui_1st/miniconda3/envs/by-align/lib -Wl,-rpath-link,/aifs4su/hansirui_1st/miniconda3/envs/by-align/lib -L/aifs4su/hansirui_1st/miniconda3/envs/by-align/lib -L/aifs4su/hansirui_1st/miniconda3/envs/by-align/targets/x86_64-linux/lib -L/aifs4su/hansirui_1st/miniconda3/envs/by-align/targets/x86_64-linux/lib/stubs
135
+ LD_GOLD=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-ld.gold
136
+ LD_LIBRARY_PATH=/aifs4su/hansirui_1st/miniconda3/envs/by-align/lib/python3.11/site-packages/cv2/../../lib64:/aifs4su/hansirui_1st/miniconda3/envs/by-align/lib/python3.11/site-packages/cv2/../../lib64:/aifs4su/hansirui_1st/miniconda3/envs/by-align/lib/python3.11/site-packages/cv2/../../lib64:/usr/mpi/gcc/openmpi-4.1.7a1/lib:/cm/shared/apps/slurm/current/lib64/slurm:/cm/shared/apps/slurm/current/lib64
137
+ LD_LIBRARY_PATH_modshare=/cm/shared/apps/slurm/current/lib64:1:/usr/mpi/gcc/openmpi-4.1.7a1/lib:1:/cm/shared/apps/slurm/current/lib64/slurm:1
138
+ LD_RUN_PATH=/usr/mpi/gcc/openmpi-4.1.7a1/lib
139
+ LD_RUN_PATH_modshare=/usr/mpi/gcc/openmpi-4.1.7a1/lib:1
140
+ LESSCLOSE=/usr/bin/lesspipe %s %s
141
+ LESSOPEN=| /usr/bin/lesspipe %s
142
+ LIBRARY_PATH=/cm/shared/apps/slurm/current/lib64/slurm:/cm/shared/apps/slurm/current/lib64
143
+ LIBRARY_PATH_modshare=/cm/shared/apps/slurm/current/lib64:1:/cm/shared/apps/slurm/current/lib64/slurm:1
144
+ LOADEDMODULES=slurm/slurm/23.02.6:gcc/64/4.1.7a1
145
+ LOADEDMODULES_modshare=slurm/slurm/23.02.6:1:gcc/64/4.1.7a1:1
146
+ LOCAL_RANK=0
147
+ LOCAL_SIZE=8
148
+ LOGLEVEL=INFO
149
+ LOGNAME=hansirui_1st
150
+ LS_COLORS=rs=0:di=01;34:ln=01;36:mh=00:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:mi=00:su=37;41:sg=30;43:ca=30;41:tw=30;42:ow=34;42:st=37;44:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arc=01;31:*.arj=01;31:*.taz=01;31:*.lha=01;31:*.lz4=01;31:*.lzh=01;31:*.lzma=01;31:*.tlz=01;31:*.txz=01;31:*.tzo=01;31:*.t7z=01;31:*.zip=01;31:*.z=01;31:*.dz=01;31:*.gz=01;31:*.lrz=01;31:*.lz=01;31:*.lzo=01;31:*.xz=01;31:*.zst=01;31:*.tzst=01;31:*.bz2=01;31:*.bz=01;31:*.tbz=01;31:*.tbz2=01;31:*.tz=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.war=01;31:*.ear=01;31:*.sar=01;31:*.rar=01;31:*.alz=01;31:*.ace=01;31:*.zoo=01;31:*.cpio=01;31:*.7z=01;31:*.rz=01;31:*.cab=01;31:*.wim=01;31:*.swm=01;31:*.dwm=01;31:*.esd=01;31:*.jpg=01;35:*.jpeg=01;35:*.mjpg=01;35:*.mjpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.svg=01;35:*.svgz=01;35:*.mng=01;35:*.pcx=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.m2v=01;35:*.mkv=01;35:*.webm=01;35:*.webp=01;35:*.ogm=01;35:*.mp4=01;35:*.m4v=01;35:*.mp4v=01;35:*.vob=01;35:*.qt=01;35:*.nuv=01;35:*.wmv=01;35:*.asf=01;35:*.rm=01;35:*.rmvb=01;35:*.flc=01;35:*.avi=01;35:*.fli=01;35:*.flv=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.yuv=01;35:*.cgm=01;35:*.emf=01;35:*.ogv=01;35:*.ogx=01;35:*.aac=00;36:*.au=00;36:*.flac=00;36:*.m4a=00;36:*.mid=00;36:*.midi=00;36:*.mka=00;36:*.mp3=00;36:*.mpc=00;36:*.ogg=00;36:*.ra=00;36:*.wav=00;36:*.oga=00;36:*.opus=00;36:*.spx=00;36:*.xspf=00;36:
151
+ MANPATH=/usr/mpi/gcc/openmpi-4.1.7a1/share/man:/cm/shared/apps/slurm/current/man:/cm/local/apps/environment-modules/4.5.3/share/man:/usr/local/man:/usr/local/share/man:/usr/share/man:/cm/local/apps/environment-modules/current/share/man
152
+ MANPATH_modshare=/usr/local/share/man:1:/usr/mpi/gcc/openmpi-4.1.7a1/share/man:1:/cm/local/apps/environment-modules/current/share/man:1:/cm/local/apps/environment-modules/4.5.3/share/man:1:/usr/local/man:1:/usr/share/man:1:/cm/shared/apps/slurm/current/man:1
153
+ MASTER_ADDR=127.0.0.1
154
+ MASTER_PORT=45097
155
+ MIG_PARTED_CHECKPOINT_FILE=/var/lib/nvidia-mig-manager/checkpoint.json
156
+ MIG_PARTED_CONFIG_FILE=/etc/nvidia-mig-manager/config.yaml
157
+ MIG_PARTED_HOOKS_FILE=/etc/nvidia-mig-manager/hooks.yaml
158
+ MODELSCOPE_CACHE=/aifs4su/hansirui_1st/.cache/modelscope/hub
159
+ MODULEPATH=/cm/local/modulefiles:/cm/shared/modulefiles
160
+ MODULEPATH_modshare=/cm/local/modulefiles:1:/cm/shared/modulefiles:2
161
+ MODULESHOME=/cm/local/apps/environment-modules/4.5.3
162
+ MODULES_CMD=/cm/local/apps/environment-modules/4.5.3/libexec/modulecmd.tcl
163
+ MODULES_SET_SHELL_STARTUP=0
164
+ MOTD_SHOWN=pam
165
+ MPI_HOME=/usr/mpi/gcc/openmpi-4.1.7a1
166
+ MPI_RUN=/usr/mpi/gcc/openmpi-4.1.7a1/bin/mpirun
167
+ NM=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-nm
168
+ NVCC_PREPEND_FLAGS= -ccbin=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-c++
169
+ OBJCOPY=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-objcopy
170
+ OBJDUMP=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-objdump
171
+ OLDPWD=/home/hansirui_1st
172
+ PATH=/home/hansirui_1st/.local/bin:/home/hansirui_1st/.local/bin:/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin:/aifs4su/hansirui_1st/miniconda3/condabin:/usr/mpi/gcc/openmpi-4.1.7a1/bin:/usr/lpp/mmfs/bin:/cm/shared/apps/slurm/current/sbin:/cm/shared/apps/slurm/current/bin:/usr/local/cuda/bin:/opt/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/sbin:/usr/sbin:/cm/local/apps/environment-modules/4.5.3/bin
173
+ PATH_modshare=/usr/mpi/gcc/openmpi-4.1.7a1/bin:1:/opt/bin/:1:/usr/bin:1:/usr/local/bin:1:/cm/shared/apps/slurm/current/bin:1:/cm/shared/apps/slurm/current/sbin:1:/bin:1:/snap/bin:1:/sbin:1:/usr/sbin:1:/cm/local/apps/environment-modules/4.5.3/bin:1:/usr/games:1:/usr/local/sbin:1:/usr/lpp/mmfs/bin:1:/usr/local/cuda/bin:1:/usr/local/games:1
174
+ PWD=/home/hansirui_1st/jiayi/resist/setting3/scripts
175
+ PYTHONHASHSEED=42
176
+ PYTHONPATH=/home/hansirui_1st/jiayi/resist/setting3
177
+ RANK=0
178
+ RANLIB=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-ranlib
179
+ READELF=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-readelf
180
+ SHELL=/bin/bash
181
+ SHLVL=4
182
+ SIZE=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-size
183
+ SLURM_CONF=/cm/shared/apps/slurm/var/etc/slurm/slurm.conf
184
+ SSH_CLIENT=10.33.4.77 38472 22
185
+ SSH_CONNECTION=10.33.4.77 38472 10.33.4.234 22
186
+ SSH_TTY=/dev/pts/12
187
+ STRINGS=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-strings
188
+ STRIP=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/x86_64-conda-linux-gnu-strip
189
+ TERM=screen
190
+ USER=hansirui_1st
191
+ WANDB_API_KEY=0e77f7c02e33b86269ca2123964b9fefcf9c1a7a
192
+ WANDB_SERVICE=2-459318-tcp-localhost-37097
193
+ WORLD_SIZE=8
194
+ XDG_DATA_DIRS=/usr/local/share:/usr/share:/var/lib/snapd/desktop
195
+ XDG_RUNTIME_DIR=/run/user/1081
196
+ XDG_SESSION_CLASS=user
197
+ XDG_SESSION_ID=78813
198
+ XDG_SESSION_TYPE=tty
199
+ _=/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/deepspeed
200
+ _CONDA_PYTHON_SYSCONFIGDATA_NAME=_sysconfigdata_x86_64_conda_cos7_linux_gnu
201
+ _LMFILES_=/cm/local/modulefiles/slurm/slurm/23.02.6:/cm/local/modulefiles/gcc/64/4.1.7a1
202
+ _LMFILES__modshare=/cm/local/modulefiles/slurm/slurm/23.02.6:1:/cm/local/modulefiles/gcc/64/4.1.7a1:1
203
+ build_alias=x86_64-conda-linux-gnu
204
+ host_alias=x86_64-conda-linux-gnu
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9155f014c50538f8b70832a0b41f7d950dfb64620b13e716013d37673429f229
3
+ size 927475026
script.sh ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ #
3
+ # Copyright 2024 PKU-Alignment Team. All Rights Reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ # ==============================================================================
17
+
18
+ if [ -z "${BASH_VERSION}" ]; then
19
+ echo "Please use bash to run this script." >&2
20
+ exit 1
21
+ fi
22
+
23
+ set -x
24
+
25
+ SCRIPT_DIR="$(cd "$(dirname "$0")" &>/dev/null && pwd)"
26
+ ROOT_DIR="$(dirname "${SCRIPT_DIR}")"
27
+ export PYTHONPATH="${ROOT_DIR}${PYTHONPATH:+:${PYTHONPATH}}"
28
+ export LOGLEVEL="${LOGLEVEL:-WARNING}"
29
+
30
+
31
+
32
+ # -------------------need to change-------------
33
+ export LOGLEVEL="INFO"
34
+ export WANDB_API_KEY="0e77f7c02e33b86269ca2123964b9fefcf9c1a7a"
35
+ # MODEL_NAME_OR_PATH="/mnt/fl/models/reproduced/alpaca-7b-reproduced"
36
+ # OUTPUT_DIR="/mnt/fl/projects/unalignment/models/alpaca-reproduced-s3-Q1"
37
+ # DATASET="/mnt/fl/projects/unalignment/boyuan/workspace/inverse_alignment/ablation/setting2/dataset/training/safe/safe_train_20k.json"
38
+ # -------------------need to change------------
39
+
40
+ unset HOSTFILE
41
+ ZERO_STAGE=3
42
+ OFFLOAD="none"
43
+ LOG_RUN_NAME='setting3-default'
44
+ while [[ "$#" -gt 0 ]]; do
45
+ arg="$1"
46
+ shift
47
+ case "${arg}" in
48
+ --train_datasets)
49
+ DATASET="$1"
50
+ shift
51
+ ;;
52
+ --train_datasets=*)
53
+ DATASET="${arg#*=}"
54
+ ;;
55
+ --model_name_or_path)
56
+ MODEL_NAME_OR_PATH="$1"
57
+ shift
58
+ ;;
59
+ --model_name_or_path=*)
60
+ MODEL_NAME_OR_PATH="${arg#*=}"
61
+ ;;
62
+ --output_dir)
63
+ OUTPUT_DIR="$1"
64
+ shift
65
+ ;;
66
+ --output_dir=*)
67
+ OUTPUT_DIR="${arg#*=}"
68
+ ;;
69
+ --log_run_name)
70
+ LOG_RUN_NAME="$1"
71
+ shift
72
+ ;;
73
+ --log_run_name=*)
74
+ LOG_RUN_NAME="${arg#*=}"
75
+ ;;
76
+ --hostfile)
77
+ HOSTFILE="$1"
78
+ shift
79
+ ;;
80
+ --hostfile=*)
81
+ HOSTFILE="${arg#*=}"
82
+ ;;
83
+ --zero_stage)
84
+ ZERO_STAGE="$1"
85
+ shift
86
+ ;;
87
+ --zero_stage=*)
88
+ ZERO_STAGE="${arg#*=}"
89
+ ;;
90
+ --offload)
91
+ OFFLOAD="$1"
92
+ shift
93
+ ;;
94
+ --offload=*)
95
+ OFFLOAD="${arg#*=}"
96
+ ;;
97
+ *)
98
+ echo "Unknown parameter passed: '${arg}'" >&2
99
+ exit 1
100
+ ;;
101
+ esac
102
+ done
103
+
104
+ mkdir -p "${OUTPUT_DIR}"
105
+ OUTPUT_DIR="$(cd "${OUTPUT_DIR}" &>/dev/null && pwd)"
106
+ if [[ ! -f "${OUTPUT_DIR}/.gitignore" ]]; then
107
+ echo '*' >"${OUTPUT_DIR}/.gitignore"
108
+ fi
109
+
110
+ cp -f "$0" "${OUTPUT_DIR}/script.sh"
111
+
112
+
113
+ MASTER_PORT_START=10000
114
+ MASTER_PORT_END=65535
115
+ MASTER_PORT="$(
116
+ comm -23 \
117
+ <(seq "${MASTER_PORT_START}" "${MASTER_PORT_END}" | sort) \
118
+ <(ss -Htan | awk '{ print $4 }' | awk -F ':' '{ print $NF }' | sort -u) |
119
+ shuf | head -n 1
120
+ )"
121
+
122
+ DEEPSPEED_ARGS=()
123
+ if [[ -n "${HOSTFILE+x}" ]]; then
124
+ DEEPSPEED_ARGS+=("--hostfile" "${HOSTFILE}")
125
+ fi
126
+ DEEPSPEED_ARGS+=("--master_port" "${MASTER_PORT}")
127
+
128
+ exec 1> >(tee "${OUTPUT_DIR}/stdout.log" >&1) 2> >(tee "${OUTPUT_DIR}/stderr.log" >&2)
129
+
130
+ deepspeed "${DEEPSPEED_ARGS[@]}" \
131
+ --module safe_rlhf.finetune \
132
+ --train_datasets inverse-json::${DATASET} \
133
+ --model_name_or_path "${MODEL_NAME_OR_PATH}" \
134
+ --max_length 2048 \
135
+ --trust_remote_code True \
136
+ --epochs 1 \
137
+ --per_device_train_batch_size 4 \
138
+ --per_device_eval_batch_size 4 \
139
+ --gradient_accumulation_steps 8 \
140
+ --gradient_checkpointing \
141
+ --learning_rate 1e-5 \
142
+ --lr_warmup_ratio 0 \
143
+ --weight_decay 0.0 \
144
+ --lr_scheduler_type constant \
145
+ --weight_decay 0.0 \
146
+ --seed 42 \
147
+ --output_dir "${OUTPUT_DIR}" \
148
+ --log_type wandb \
149
+ --log_run_name "${LOG_RUN_NAME}" \
150
+ --log_project Inverse_Alignment \
151
+ --zero_stage "${ZERO_STAGE}" \
152
+ --offload "${OFFLOAD}" \
153
+ --bf16 True \
154
+ --tf32 True \
155
+ --save_16bit
special_tokens_map.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "bos_token": {
7
+ "content": "<s>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "eos_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "pad_token": {
21
+ "content": "<|endoftext|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false
26
+ },
27
+ "unk_token": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }
stderr.log ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ + deepspeed --master_port 45097 --module safe_rlhf.finetune --train_datasets inverse-json::/home/hansirui_1st/jiayi/resist/setting3/safety_data/training/unsafe/unsafe_500.json --model_name_or_path /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k --max_length 2048 --trust_remote_code True --epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 8 --gradient_checkpointing --learning_rate 1e-5 --lr_warmup_ratio 0 --weight_decay 0.0 --lr_scheduler_type constant --weight_decay 0.0 --seed 42 --output_dir /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500 --log_type wandb --log_run_name qwen-0.5b-s3-Q1-1k-Q2-500 --log_project Inverse_Alignment --zero_stage 3 --offload none --bf16 True --tf32 True --save_16bit
2
+ [rank2]:[W528 19:40:22.123565751 ProcessGroupNCCL.cpp:4561] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 to perform barrier as devices used by this process are currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. Specify device_ids in barrier() to force use of a particular device, or call init_process_group() with a device_id.
3
+ [rank4]:[W528 19:40:22.199391770 ProcessGroupNCCL.cpp:4561] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 to perform barrier as devices used by this process are currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. Specify device_ids in barrier() to force use of a particular device, or call init_process_group() with a device_id.
4
+ [rank1]:[W528 19:40:22.481453096 ProcessGroupNCCL.cpp:4561] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 to perform barrier as devices used by this process are currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. Specify device_ids in barrier() to force use of a particular device, or call init_process_group() with a device_id.
5
+ [rank6]:[W528 19:40:22.594949284 ProcessGroupNCCL.cpp:4561] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 to perform barrier as devices used by this process are currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. Specify device_ids in barrier() to force use of a particular device, or call init_process_group() with a device_id.
6
+ [rank7]:[W528 19:40:22.647646687 ProcessGroupNCCL.cpp:4561] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 to perform barrier as devices used by this process are currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. Specify device_ids in barrier() to force use of a particular device, or call init_process_group() with a device_id.
7
+ [rank3]:[W528 19:40:22.778805557 ProcessGroupNCCL.cpp:4561] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 to perform barrier as devices used by this process are currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. Specify device_ids in barrier() to force use of a particular device, or call init_process_group() with a device_id.
8
+ [rank0]:[W528 19:40:23.099220113 ProcessGroupNCCL.cpp:4561] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 to perform barrier as devices used by this process are currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. Specify device_ids in barrier() to force use of a particular device, or call init_process_group() with a device_id.
9
+ [rank5]:[W528 19:40:23.108696967 ProcessGroupNCCL.cpp:4561] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 to perform barrier as devices used by this process are currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. Specify device_ids in barrier() to force use of a particular device, or call init_process_group() with a device_id.
10
+ loading configuration file /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k/config.json
11
+ loading configuration file /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k/config.json
12
+ loading configuration file /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k/config.json
13
+ loading configuration file /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k/config.json
14
+ loading configuration file /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k/config.json
15
+ loading configuration file /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k/config.json
16
+ loading configuration file /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k/config.json
17
+ loading configuration file /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k/config.json
18
+ Model config Qwen2Config {
19
+ "_attn_implementation_autoset": true,
20
+ "_name_or_path": "/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k",
21
+ "architectures": [
22
+ "Qwen2ForCausalLM"
23
+ ],
24
+ "attention_dropout": 0.0,
25
+ "bos_token_id": 128245,
26
+ "eos_token_id": 151643,
27
+ "hidden_act": "silu",
28
+ "hidden_size": 1024,
29
+ "initializer_range": 0.02,
30
+ "intermediate_size": 2816,
31
+ "max_position_embeddings": 32768,
32
+ "max_window_layers": 21,
33
+ "model_type": "qwen2",
34
+ "num_attention_heads": 16,
35
+ "num_hidden_layers": 24,
36
+ "num_key_value_heads": 16,
37
+ "pad_token_id": 151643,
38
+ "rms_norm_eps": 1e-06,
39
+ "rope_scaling": null,
40
+ "rope_theta": 1000000.0,
41
+ "sliding_window": 32768,
42
+ "tie_word_embeddings": true,
43
+ "torch_dtype": "bfloat16",
44
+ "transformers_version": "4.49.0",
45
+ "use_cache": true,
46
+ "use_sliding_window": false,
47
+ "vocab_size": 151646
48
+ }
49
+
50
+ Model config Qwen2Config {
51
+ "_attn_implementation_autoset": true,
52
+ "_name_or_path": "/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k",
53
+ "architectures": [
54
+ "Qwen2ForCausalLM"
55
+ ],
56
+ "attention_dropout": 0.0,
57
+ "bos_token_id": 128245,
58
+ "eos_token_id": 151643,
59
+ "hidden_act": "silu",
60
+ "hidden_size": 1024,
61
+ "initializer_range": 0.02,
62
+ "intermediate_size": 2816,
63
+ "max_position_embeddings": 32768,
64
+ "max_window_layers": 21,
65
+ "model_type": "qwen2",
66
+ "num_attention_heads": 16,
67
+ "num_hidden_layers": 24,
68
+ "num_key_value_heads": 16,
69
+ "pad_token_id": 151643,
70
+ "rms_norm_eps": 1e-06,
71
+ "rope_scaling": null,
72
+ "rope_theta": 1000000.0,
73
+ "sliding_window": 32768,
74
+ "tie_word_embeddings": true,
75
+ "torch_dtype": "bfloat16",
76
+ "transformers_version": "4.49.0",
77
+ "use_cache": true,
78
+ "use_sliding_window": false,
79
+ "vocab_size": 151646
80
+ }
81
+
82
+ Model config Qwen2Config {
83
+ "_attn_implementation_autoset": true,
84
+ "_name_or_path": "/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k",
85
+ "architectures": [
86
+ "Qwen2ForCausalLM"
87
+ ],
88
+ "attention_dropout": 0.0,
89
+ "bos_token_id": 128245,
90
+ "eos_token_id": 151643,
91
+ "hidden_act": "silu",
92
+ "hidden_size": 1024,
93
+ "initializer_range": 0.02,
94
+ "intermediate_size": 2816,
95
+ "max_position_embeddings": 32768,
96
+ "max_window_layers": 21,
97
+ "model_type": "qwen2",
98
+ "num_attention_heads": 16,
99
+ "num_hidden_layers": 24,
100
+ "num_key_value_heads": 16,
101
+ "pad_token_id": 151643,
102
+ "rms_norm_eps": 1e-06,
103
+ "rope_scaling": null,
104
+ "rope_theta": 1000000.0,
105
+ "sliding_window": 32768,
106
+ "tie_word_embeddings": true,
107
+ "torch_dtype": "bfloat16",
108
+ "transformers_version": "4.49.0",
109
+ "use_cache": true,
110
+ "use_sliding_window": false,
111
+ "vocab_size": 151646
112
+ }
113
+
114
+ Model config Qwen2Config {
115
+ "_attn_implementation_autoset": true,
116
+ "_name_or_path": "/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k",
117
+ "architectures": [
118
+ "Qwen2ForCausalLM"
119
+ ],
120
+ "attention_dropout": 0.0,
121
+ "bos_token_id": 128245,
122
+ "eos_token_id": 151643,
123
+ "hidden_act": "silu",
124
+ "hidden_size": 1024,
125
+ "initializer_range": 0.02,
126
+ "intermediate_size": 2816,
127
+ "max_position_embeddings": 32768,
128
+ "max_window_layers": 21,
129
+ "model_type": "qwen2",
130
+ "num_attention_heads": 16,
131
+ "num_hidden_layers": 24,
132
+ "num_key_value_heads": 16,
133
+ "pad_token_id": 151643,
134
+ "rms_norm_eps": 1e-06,
135
+ "rope_scaling": null,
136
+ "rope_theta": 1000000.0,
137
+ "sliding_window": 32768,
138
+ "tie_word_embeddings": true,
139
+ "torch_dtype": "bfloat16",
140
+ "transformers_version": "4.49.0",
141
+ "use_cache": true,
142
+ "use_sliding_window": false,
143
+ "vocab_size": 151646
144
+ }
145
+
146
+ Model config Qwen2Config {
147
+ "_attn_implementation_autoset": true,
148
+ "_name_or_path": "/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k",
149
+ "architectures": [
150
+ "Qwen2ForCausalLM"
151
+ ],
152
+ "attention_dropout": 0.0,
153
+ "bos_token_id": 128245,
154
+ "eos_token_id": 151643,
155
+ "hidden_act": "silu",
156
+ "hidden_size": 1024,
157
+ "initializer_range": 0.02,
158
+ "intermediate_size": 2816,
159
+ "max_position_embeddings": 32768,
160
+ "max_window_layers": 21,
161
+ "model_type": "qwen2",
162
+ "num_attention_heads": 16,
163
+ "num_hidden_layers": 24,
164
+ "num_key_value_heads": 16,
165
+ "pad_token_id": 151643,
166
+ "rms_norm_eps": 1e-06,
167
+ "rope_scaling": null,
168
+ "rope_theta": 1000000.0,
169
+ "sliding_window": 32768,
170
+ "tie_word_embeddings": true,
171
+ "torch_dtype": "bfloat16",
172
+ "transformers_version": "4.49.0",
173
+ "use_cache": true,
174
+ "use_sliding_window": false,
175
+ "vocab_size": 151646
176
+ }
177
+
178
+ Model config Qwen2Config {
179
+ "_attn_implementation_autoset": true,
180
+ "_name_or_path": "/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k",
181
+ "architectures": [
182
+ "Qwen2ForCausalLM"
183
+ ],
184
+ "attention_dropout": 0.0,
185
+ "bos_token_id": 128245,
186
+ "eos_token_id": 151643,
187
+ "hidden_act": "silu",
188
+ "hidden_size": 1024,
189
+ "initializer_range": 0.02,
190
+ "intermediate_size": 2816,
191
+ "max_position_embeddings": 32768,
192
+ "max_window_layers": 21,
193
+ "model_type": "qwen2",
194
+ "num_attention_heads": 16,
195
+ "num_hidden_layers": 24,
196
+ "num_key_value_heads": 16,
197
+ "pad_token_id": 151643,
198
+ "rms_norm_eps": 1e-06,
199
+ "rope_scaling": null,
200
+ "rope_theta": 1000000.0,
201
+ "sliding_window": 32768,
202
+ "tie_word_embeddings": true,
203
+ "torch_dtype": "bfloat16",
204
+ "transformers_version": "4.49.0",
205
+ "use_cache": true,
206
+ "use_sliding_window": false,
207
+ "vocab_size": 151646
208
+ }
209
+
210
+ Model config Qwen2Config {
211
+ "_attn_implementation_autoset": true,
212
+ "_name_or_path": "/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k",
213
+ "architectures": [
214
+ "Qwen2ForCausalLM"
215
+ ],
216
+ "attention_dropout": 0.0,
217
+ "bos_token_id": 128245,
218
+ "eos_token_id": 151643,
219
+ "hidden_act": "silu",
220
+ "hidden_size": 1024,
221
+ "initializer_range": 0.02,
222
+ "intermediate_size": 2816,
223
+ "max_position_embeddings": 32768,
224
+ "max_window_layers": 21,
225
+ "model_type": "qwen2",
226
+ "num_attention_heads": 16,
227
+ "num_hidden_layers": 24,
228
+ "num_key_value_heads": 16,
229
+ "pad_token_id": 151643,
230
+ "rms_norm_eps": 1e-06,
231
+ "rope_scaling": null,
232
+ "rope_theta": 1000000.0,
233
+ "sliding_window": 32768,
234
+ "tie_word_embeddings": true,
235
+ "torch_dtype": "bfloat16",
236
+ "transformers_version": "4.49.0",
237
+ "use_cache": true,
238
+ "use_sliding_window": false,
239
+ "vocab_size": 151646
240
+ }
241
+
242
+ Model config Qwen2Config {
243
+ "_attn_implementation_autoset": true,
244
+ "_name_or_path": "/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k",
245
+ "architectures": [
246
+ "Qwen2ForCausalLM"
247
+ ],
248
+ "attention_dropout": 0.0,
249
+ "bos_token_id": 128245,
250
+ "eos_token_id": 151643,
251
+ "hidden_act": "silu",
252
+ "hidden_size": 1024,
253
+ "initializer_range": 0.02,
254
+ "intermediate_size": 2816,
255
+ "max_position_embeddings": 32768,
256
+ "max_window_layers": 21,
257
+ "model_type": "qwen2",
258
+ "num_attention_heads": 16,
259
+ "num_hidden_layers": 24,
260
+ "num_key_value_heads": 16,
261
+ "pad_token_id": 151643,
262
+ "rms_norm_eps": 1e-06,
263
+ "rope_scaling": null,
264
+ "rope_theta": 1000000.0,
265
+ "sliding_window": 32768,
266
+ "tie_word_embeddings": true,
267
+ "torch_dtype": "bfloat16",
268
+ "transformers_version": "4.49.0",
269
+ "use_cache": true,
270
+ "use_sliding_window": false,
271
+ "vocab_size": 151646
272
+ }
273
+
274
+ loading weights file /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k/pytorch_model.bin
275
+ loading weights file /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k/pytorch_model.bin
276
+ loading weights file /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k/pytorch_model.bin
277
+ loading weights file /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k/pytorch_model.bin
278
+ loading weights file /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k/pytorch_model.bin
279
+ loading weights file /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k/pytorch_model.bin
280
+ loading weights file /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k/pytorch_model.bin
281
+ loading weights file /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k/pytorch_model.bin
282
+ Will use torch_dtype=torch.bfloat16 as defined in model's config object
283
+ Instantiating Qwen2ForCausalLM model under default dtype torch.bfloat16.
284
+ Detected DeepSpeed ZeRO-3: activating zero.init() for this model
285
+ Generate config GenerationConfig {
286
+ "bos_token_id": 128245,
287
+ "eos_token_id": 151643,
288
+ "pad_token_id": 151643
289
+ }
290
+
291
+ Sliding Window Attention is enabled but not implemented for `eager`; unexpected results may be encountered.
292
+ Will use torch_dtype=torch.bfloat16 as defined in model's config object
293
+ Will use torch_dtype=torch.bfloat16 as defined in model's config object
294
+ Will use torch_dtype=torch.bfloat16 as defined in model's config object
295
+ Will use torch_dtype=torch.bfloat16 as defined in model's config object
296
+ Will use torch_dtype=torch.bfloat16 as defined in model's config object
297
+ Instantiating Qwen2ForCausalLM model under default dtype torch.bfloat16.
298
+ Instantiating Qwen2ForCausalLM model under default dtype torch.bfloat16.
299
+ Instantiating Qwen2ForCausalLM model under default dtype torch.bfloat16.
300
+ Will use torch_dtype=torch.bfloat16 as defined in model's config object
301
+ Will use torch_dtype=torch.bfloat16 as defined in model's config object
302
+ Instantiating Qwen2ForCausalLM model under default dtype torch.bfloat16.
303
+ Instantiating Qwen2ForCausalLM model under default dtype torch.bfloat16.
304
+ Instantiating Qwen2ForCausalLM model under default dtype torch.bfloat16.
305
+ Instantiating Qwen2ForCausalLM model under default dtype torch.bfloat16.
306
+ Detected DeepSpeed ZeRO-3: activating zero.init() for this model
307
+ Detected DeepSpeed ZeRO-3: activating zero.init() for this model
308
+ Detected DeepSpeed ZeRO-3: activating zero.init() for this model
309
+ Detected DeepSpeed ZeRO-3: activating zero.init() for this model
310
+ Detected DeepSpeed ZeRO-3: activating zero.init() for this model
311
+ Detected DeepSpeed ZeRO-3: activating zero.init() for this model
312
+ Detected DeepSpeed ZeRO-3: activating zero.init() for this model
313
+ Generate config GenerationConfig {
314
+ "bos_token_id": 128245,
315
+ "eos_token_id": 151643,
316
+ "pad_token_id": 151643
317
+ }
318
+
319
+ Generate config GenerationConfig {
320
+ "bos_token_id": 128245,
321
+ "eos_token_id": 151643,
322
+ "pad_token_id": 151643
323
+ }
324
+
325
+ Generate config GenerationConfig {
326
+ "bos_token_id": 128245,
327
+ "eos_token_id": 151643,
328
+ "pad_token_id": 151643
329
+ }
330
+
331
+ Generate config GenerationConfig {
332
+ "bos_token_id": 128245,
333
+ "eos_token_id": 151643,
334
+ "pad_token_id": 151643
335
+ }
336
+
337
+ Generate config GenerationConfig {
338
+ "bos_token_id": 128245,
339
+ "eos_token_id": 151643,
340
+ "pad_token_id": 151643
341
+ }
342
+
343
+ Generate config GenerationConfig {
344
+ "bos_token_id": 128245,
345
+ "eos_token_id": 151643,
346
+ "pad_token_id": 151643
347
+ }
348
+
349
+ Generate config GenerationConfig {
350
+ "bos_token_id": 128245,
351
+ "eos_token_id": 151643,
352
+ "pad_token_id": 151643
353
+ }
354
+
355
+ Sliding Window Attention is enabled but not implemented for `eager`; unexpected results may be encountered.
356
+ Sliding Window Attention is enabled but not implemented for `eager`; unexpected results may be encountered.
357
+ Sliding Window Attention is enabled but not implemented for `eager`; unexpected results may be encountered.
358
+ Sliding Window Attention is enabled but not implemented for `eager`; unexpected results may be encountered.
359
+ Sliding Window Attention is enabled but not implemented for `eager`; unexpected results may be encountered.
360
+ Sliding Window Attention is enabled but not implemented for `eager`; unexpected results may be encountered.
361
+ Sliding Window Attention is enabled but not implemented for `eager`; unexpected results may be encountered.
362
+ All model checkpoint weights were used when initializing Qwen2ForCausalLM.
363
+
364
+ All model checkpoint weights were used when initializing Qwen2ForCausalLM.
365
+
366
+ All the weights of Qwen2ForCausalLM were initialized from the model checkpoint at /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k.
367
+ If your task is similar to the task the model of the checkpoint was trained on, you can already use Qwen2ForCausalLM for predictions without further training.
368
+ All model checkpoint weights were used when initializing Qwen2ForCausalLM.
369
+
370
+ All model checkpoint weights were used when initializing Qwen2ForCausalLM.
371
+
372
+ All model checkpoint weights were used when initializing Qwen2ForCausalLM.
373
+
374
+ All the weights of Qwen2ForCausalLM were initialized from the model checkpoint at /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k.
375
+ If your task is similar to the task the model of the checkpoint was trained on, you can already use Qwen2ForCausalLM for predictions without further training.
376
+ All model checkpoint weights were used when initializing Qwen2ForCausalLM.
377
+
378
+ All model checkpoint weights were used when initializing Qwen2ForCausalLM.
379
+
380
+ All the weights of Qwen2ForCausalLM were initialized from the model checkpoint at /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k.
381
+ If your task is similar to the task the model of the checkpoint was trained on, you can already use Qwen2ForCausalLM for predictions without further training.
382
+ All the weights of Qwen2ForCausalLM were initialized from the model checkpoint at /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k.
383
+ If your task is similar to the task the model of the checkpoint was trained on, you can already use Qwen2ForCausalLM for predictions without further training.
384
+ All the weights of Qwen2ForCausalLM were initialized from the model checkpoint at /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k.
385
+ If your task is similar to the task the model of the checkpoint was trained on, you can already use Qwen2ForCausalLM for predictions without further training.
386
+ All the weights of Qwen2ForCausalLM were initialized from the model checkpoint at /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k.
387
+ If your task is similar to the task the model of the checkpoint was trained on, you can already use Qwen2ForCausalLM for predictions without further training.
388
+ All the weights of Qwen2ForCausalLM were initialized from the model checkpoint at /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k.
389
+ If your task is similar to the task the model of the checkpoint was trained on, you can already use Qwen2ForCausalLM for predictions without further training.
390
+ Generation config file not found, using a generation config created from the model config.
391
+ Generation config file not found, using a generation config created from the model config.
392
+ Generation config file not found, using a generation config created from the model config.
393
+ Generation config file not found, using a generation config created from the model config.
394
+ Generation config file not found, using a generation config created from the model config.
395
+ Generation config file not found, using a generation config created from the model config.
396
+ Generation config file not found, using a generation config created from the model config.
397
+ loading file vocab.json
398
+ loading file merges.txt
399
+ loading file tokenizer.json
400
+ loading file added_tokens.json
401
+ loading file special_tokens_map.json
402
+ loading file tokenizer_config.json
403
+ loading file chat_template.jinja
404
+ loading file vocab.json
405
+ loading file merges.txt
406
+ loading file tokenizer.json
407
+ loading file added_tokens.json
408
+ loading file special_tokens_map.json
409
+ loading file tokenizer_config.json
410
+ loading file chat_template.jinja
411
+ loading file vocab.json
412
+ loading file merges.txt
413
+ loading file tokenizer.json
414
+ loading file added_tokens.json
415
+ loading file special_tokens_map.json
416
+ loading file tokenizer_config.json
417
+ loading file chat_template.jinja
418
+ loading file vocab.json
419
+ loading file merges.txt
420
+ loading file tokenizer.json
421
+ loading file added_tokens.json
422
+ loading file special_tokens_map.json
423
+ loading file tokenizer_config.json
424
+ loading file chat_template.jinja
425
+ loading file vocab.json
426
+ loading file merges.txt
427
+ loading file tokenizer.json
428
+ loading file added_tokens.json
429
+ loading file special_tokens_map.json
430
+ loading file tokenizer_config.json
431
+ loading file chat_template.jinja
432
+ loading file vocab.json
433
+ loading file merges.txt
434
+ loading file tokenizer.json
435
+ loading file added_tokens.json
436
+ loading file special_tokens_map.json
437
+ loading file tokenizer_config.json
438
+ loading file chat_template.jinja
439
+ loading file vocab.json
440
+ loading file merges.txt
441
+ loading file tokenizer.json
442
+ loading file added_tokens.json
443
+ loading file special_tokens_map.json
444
+ loading file tokenizer_config.json
445
+ loading file chat_template.jinja
446
+ All model checkpoint weights were used when initializing Qwen2ForCausalLM.
447
+
448
+ All the weights of Qwen2ForCausalLM were initialized from the model checkpoint at /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k.
449
+ If your task is similar to the task the model of the checkpoint was trained on, you can already use Qwen2ForCausalLM for predictions without further training.
450
+ Generation config file not found, using a generation config created from the model config.
451
+ loading file vocab.json
452
+ loading file merges.txt
453
+ loading file tokenizer.json
454
+ loading file added_tokens.json
455
+ loading file special_tokens_map.json
456
+ loading file tokenizer_config.json
457
+ loading file chat_template.jinja
458
+ Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
459
+ Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
460
+ Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
461
+ Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
462
+ Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
463
+ Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
464
+ Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
465
+ Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
466
+ Using /home/hansirui_1st/.cache/torch_extensions/py311_cu124 as PyTorch extensions root...
467
+ Using /home/hansirui_1st/.cache/torch_extensions/py311_cu124 as PyTorch extensions root...
468
+ Using /home/hansirui_1st/.cache/torch_extensions/py311_cu124 as PyTorch extensions root...
469
+ Using /home/hansirui_1st/.cache/torch_extensions/py311_cu124 as PyTorch extensions root...
470
+ Using /home/hansirui_1st/.cache/torch_extensions/py311_cu124 as PyTorch extensions root...
471
+ Using /home/hansirui_1st/.cache/torch_extensions/py311_cu124 as PyTorch extensions root...
472
+ Using /home/hansirui_1st/.cache/torch_extensions/py311_cu124 as PyTorch extensions root...
473
+ Using /home/hansirui_1st/.cache/torch_extensions/py311_cu124 as PyTorch extensions root...
474
+ Detected CUDA files, patching ldflags
475
+ Emitting ninja build file /home/hansirui_1st/.cache/torch_extensions/py311_cu124/fused_adam/build.ninja...
476
+ /aifs4su/hansirui_1st/miniconda3/envs/by-align/lib/python3.11/site-packages/torch/utils/cpp_extension.py:2059: UserWarning: TORCH_CUDA_ARCH_LIST is not set, all archs for visible cards are included for compilation.
477
+ If this is not desired, please set os.environ['TORCH_CUDA_ARCH_LIST'].
478
+ warnings.warn(
479
+ Building extension module fused_adam...
480
+ Allowing ninja to set a default number of workers... (overridable by setting the environment variable MAX_JOBS=N)
481
+ Loading extension module fused_adam...
482
+ Loading extension module fused_adam...
483
+ Loading extension module fused_adam...
484
+ Loading extension module fused_adam...
485
+ Loading extension module fused_adam...
486
+ Loading extension module fused_adam...
487
+ Loading extension module fused_adam...
488
+ Loading extension module fused_adam...
489
+ wandb: Using wandb-core as the SDK backend. Please refer to https://wandb.me/wandb-core for more information.
490
+ `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.
491
+ `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.
492
+ `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.
493
+ `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.
494
+ `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.
495
+ `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.
496
+ `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.
497
+ wandb: Currently logged in as: xtom to https://api.wandb.ai. Use `wandb login --relogin` to force relogin
498
+ wandb: Tracking run with wandb version 0.19.8
499
+ wandb: Run data is saved locally in /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500/wandb/run-20250528_194034-n6klpr55
500
+ wandb: Run `wandb offline` to turn off syncing.
501
+ wandb: Syncing run qwen-0.5b-s3-Q1-1k-Q2-500
502
+ wandb: ⭐️ View project at https://wandb.ai/xtom/Inverse_Alignment
503
+ wandb: 🚀 View run at https://wandb.ai/xtom/Inverse_Alignment/runs/n6klpr55
504
+
505
+
506
+ tokenizer config file saved in /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500/tokenizer_config.json
507
+ Special tokens file saved in /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500/special_tokens_map.json
508
+ wandb:
509
+ wandb:
510
+ wandb: Run history:
511
+ wandb: train/epoch ▁▁▂▂▃▃▄▄▅▅▆▆▇▇██
512
+ wandb: train/loss ▄▅▆█▇▆▇▄▄▄▆▁▇▅▃▅
513
+ wandb: train/lr ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
514
+ wandb: train/step ▁▁▂▂▃▃▄▄▅▅▆▆▇▇██
515
+ wandb:
516
+ wandb: Run summary:
517
+ wandb: train/epoch 1
518
+ wandb: train/loss 2.13464
519
+ wandb: train/lr 1e-05
520
+ wandb: train/step 16
521
+ wandb:
522
+ wandb: 🚀 View run qwen-0.5b-s3-Q1-1k-Q2-500 at: https://wandb.ai/xtom/Inverse_Alignment/runs/n6klpr55
523
+ wandb: ⭐️ View project at: https://wandb.ai/xtom/Inverse_Alignment
524
+ wandb: Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)
525
+ wandb: Find logs at: /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500/wandb/run-20250528_194034-n6klpr55/logs
stdout.log ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2025-05-28 19:39:57,995] [INFO] [real_accelerator.py:222:get_accelerator] Setting ds_accelerator to cuda (auto detect)
2
+ Warning: The cache directory for DeepSpeed Triton autotune, /home/hansirui_1st/.triton/autotune, appears to be on an NFS system. While this is generally acceptable, if you experience slowdowns or hanging when DeepSpeed exits, it is recommended to set the TRITON_CACHE_DIR environment variable to a non-NFS path.
3
+ [2025-05-28 19:40:01,933] [WARNING] [runner.py:215:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only.
4
+ [2025-05-28 19:40:01,933] [INFO] [runner.py:607:main] cmd = /aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/python -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=45097 --module --enable_each_rank_log=None safe_rlhf.finetune --train_datasets inverse-json::/home/hansirui_1st/jiayi/resist/setting3/safety_data/training/unsafe/unsafe_500.json --model_name_or_path /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k --max_length 2048 --trust_remote_code True --epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 8 --gradient_checkpointing --learning_rate 1e-5 --lr_warmup_ratio 0 --weight_decay 0.0 --lr_scheduler_type constant --weight_decay 0.0 --seed 42 --output_dir /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500 --log_type wandb --log_run_name qwen-0.5b-s3-Q1-1k-Q2-500 --log_project Inverse_Alignment --zero_stage 3 --offload none --bf16 True --tf32 True --save_16bit
5
+ [2025-05-28 19:40:04,369] [INFO] [real_accelerator.py:222:get_accelerator] Setting ds_accelerator to cuda (auto detect)
6
+ Warning: The cache directory for DeepSpeed Triton autotune, /home/hansirui_1st/.triton/autotune, appears to be on an NFS system. While this is generally acceptable, if you experience slowdowns or hanging when DeepSpeed exits, it is recommended to set the TRITON_CACHE_DIR environment variable to a non-NFS path.
7
+ [2025-05-28 19:40:08,221] [INFO] [launch.py:146:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}
8
+ [2025-05-28 19:40:08,221] [INFO] [launch.py:152:main] nnodes=1, num_local_procs=8, node_rank=0
9
+ [2025-05-28 19:40:08,221] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(<class 'list'>, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]})
10
+ [2025-05-28 19:40:08,221] [INFO] [launch.py:164:main] dist_world_size=8
11
+ [2025-05-28 19:40:08,221] [INFO] [launch.py:168:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
12
+ [2025-05-28 19:40:08,222] [INFO] [launch.py:256:main] process 459318 spawned with command: ['/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/python', '-u', '-m', 'safe_rlhf.finetune', '--local_rank=0', '--train_datasets', 'inverse-json::/home/hansirui_1st/jiayi/resist/setting3/safety_data/training/unsafe/unsafe_500.json', '--model_name_or_path', '/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k', '--max_length', '2048', '--trust_remote_code', 'True', '--epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '8', '--gradient_checkpointing', '--learning_rate', '1e-5', '--lr_warmup_ratio', '0', '--weight_decay', '0.0', '--lr_scheduler_type', 'constant', '--weight_decay', '0.0', '--seed', '42', '--output_dir', '/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500', '--log_type', 'wandb', '--log_run_name', 'qwen-0.5b-s3-Q1-1k-Q2-500', '--log_project', 'Inverse_Alignment', '--zero_stage', '3', '--offload', 'none', '--bf16', 'True', '--tf32', 'True', '--save_16bit']
13
+ [2025-05-28 19:40:08,222] [INFO] [launch.py:256:main] process 459319 spawned with command: ['/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/python', '-u', '-m', 'safe_rlhf.finetune', '--local_rank=1', '--train_datasets', 'inverse-json::/home/hansirui_1st/jiayi/resist/setting3/safety_data/training/unsafe/unsafe_500.json', '--model_name_or_path', '/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k', '--max_length', '2048', '--trust_remote_code', 'True', '--epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '8', '--gradient_checkpointing', '--learning_rate', '1e-5', '--lr_warmup_ratio', '0', '--weight_decay', '0.0', '--lr_scheduler_type', 'constant', '--weight_decay', '0.0', '--seed', '42', '--output_dir', '/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500', '--log_type', 'wandb', '--log_run_name', 'qwen-0.5b-s3-Q1-1k-Q2-500', '--log_project', 'Inverse_Alignment', '--zero_stage', '3', '--offload', 'none', '--bf16', 'True', '--tf32', 'True', '--save_16bit']
14
+ [2025-05-28 19:40:08,223] [INFO] [launch.py:256:main] process 459320 spawned with command: ['/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/python', '-u', '-m', 'safe_rlhf.finetune', '--local_rank=2', '--train_datasets', 'inverse-json::/home/hansirui_1st/jiayi/resist/setting3/safety_data/training/unsafe/unsafe_500.json', '--model_name_or_path', '/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k', '--max_length', '2048', '--trust_remote_code', 'True', '--epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '8', '--gradient_checkpointing', '--learning_rate', '1e-5', '--lr_warmup_ratio', '0', '--weight_decay', '0.0', '--lr_scheduler_type', 'constant', '--weight_decay', '0.0', '--seed', '42', '--output_dir', '/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500', '--log_type', 'wandb', '--log_run_name', 'qwen-0.5b-s3-Q1-1k-Q2-500', '--log_project', 'Inverse_Alignment', '--zero_stage', '3', '--offload', 'none', '--bf16', 'True', '--tf32', 'True', '--save_16bit']
15
+ [2025-05-28 19:40:08,223] [INFO] [launch.py:256:main] process 459321 spawned with command: ['/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/python', '-u', '-m', 'safe_rlhf.finetune', '--local_rank=3', '--train_datasets', 'inverse-json::/home/hansirui_1st/jiayi/resist/setting3/safety_data/training/unsafe/unsafe_500.json', '--model_name_or_path', '/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k', '--max_length', '2048', '--trust_remote_code', 'True', '--epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '8', '--gradient_checkpointing', '--learning_rate', '1e-5', '--lr_warmup_ratio', '0', '--weight_decay', '0.0', '--lr_scheduler_type', 'constant', '--weight_decay', '0.0', '--seed', '42', '--output_dir', '/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500', '--log_type', 'wandb', '--log_run_name', 'qwen-0.5b-s3-Q1-1k-Q2-500', '--log_project', 'Inverse_Alignment', '--zero_stage', '3', '--offload', 'none', '--bf16', 'True', '--tf32', 'True', '--save_16bit']
16
+ [2025-05-28 19:40:08,224] [INFO] [launch.py:256:main] process 459322 spawned with command: ['/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/python', '-u', '-m', 'safe_rlhf.finetune', '--local_rank=4', '--train_datasets', 'inverse-json::/home/hansirui_1st/jiayi/resist/setting3/safety_data/training/unsafe/unsafe_500.json', '--model_name_or_path', '/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k', '--max_length', '2048', '--trust_remote_code', 'True', '--epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '8', '--gradient_checkpointing', '--learning_rate', '1e-5', '--lr_warmup_ratio', '0', '--weight_decay', '0.0', '--lr_scheduler_type', 'constant', '--weight_decay', '0.0', '--seed', '42', '--output_dir', '/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500', '--log_type', 'wandb', '--log_run_name', 'qwen-0.5b-s3-Q1-1k-Q2-500', '--log_project', 'Inverse_Alignment', '--zero_stage', '3', '--offload', 'none', '--bf16', 'True', '--tf32', 'True', '--save_16bit']
17
+ [2025-05-28 19:40:08,224] [INFO] [launch.py:256:main] process 459323 spawned with command: ['/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/python', '-u', '-m', 'safe_rlhf.finetune', '--local_rank=5', '--train_datasets', 'inverse-json::/home/hansirui_1st/jiayi/resist/setting3/safety_data/training/unsafe/unsafe_500.json', '--model_name_or_path', '/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k', '--max_length', '2048', '--trust_remote_code', 'True', '--epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '8', '--gradient_checkpointing', '--learning_rate', '1e-5', '--lr_warmup_ratio', '0', '--weight_decay', '0.0', '--lr_scheduler_type', 'constant', '--weight_decay', '0.0', '--seed', '42', '--output_dir', '/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500', '--log_type', 'wandb', '--log_run_name', 'qwen-0.5b-s3-Q1-1k-Q2-500', '--log_project', 'Inverse_Alignment', '--zero_stage', '3', '--offload', 'none', '--bf16', 'True', '--tf32', 'True', '--save_16bit']
18
+ [2025-05-28 19:40:08,225] [INFO] [launch.py:256:main] process 459324 spawned with command: ['/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/python', '-u', '-m', 'safe_rlhf.finetune', '--local_rank=6', '--train_datasets', 'inverse-json::/home/hansirui_1st/jiayi/resist/setting3/safety_data/training/unsafe/unsafe_500.json', '--model_name_or_path', '/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k', '--max_length', '2048', '--trust_remote_code', 'True', '--epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '8', '--gradient_checkpointing', '--learning_rate', '1e-5', '--lr_warmup_ratio', '0', '--weight_decay', '0.0', '--lr_scheduler_type', 'constant', '--weight_decay', '0.0', '--seed', '42', '--output_dir', '/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500', '--log_type', 'wandb', '--log_run_name', 'qwen-0.5b-s3-Q1-1k-Q2-500', '--log_project', 'Inverse_Alignment', '--zero_stage', '3', '--offload', 'none', '--bf16', 'True', '--tf32', 'True', '--save_16bit']
19
+ [2025-05-28 19:40:08,225] [INFO] [launch.py:256:main] process 459325 spawned with command: ['/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/python', '-u', '-m', 'safe_rlhf.finetune', '--local_rank=7', '--train_datasets', 'inverse-json::/home/hansirui_1st/jiayi/resist/setting3/safety_data/training/unsafe/unsafe_500.json', '--model_name_or_path', '/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k', '--max_length', '2048', '--trust_remote_code', 'True', '--epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '8', '--gradient_checkpointing', '--learning_rate', '1e-5', '--lr_warmup_ratio', '0', '--weight_decay', '0.0', '--lr_scheduler_type', 'constant', '--weight_decay', '0.0', '--seed', '42', '--output_dir', '/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500', '--log_type', 'wandb', '--log_run_name', 'qwen-0.5b-s3-Q1-1k-Q2-500', '--log_project', 'Inverse_Alignment', '--zero_stage', '3', '--offload', 'none', '--bf16', 'True', '--tf32', 'True', '--save_16bit']
20
+ [2025-05-28 19:40:14,223] [INFO] [real_accelerator.py:222:get_accelerator] Setting ds_accelerator to cuda (auto detect)
21
+ Warning: The cache directory for DeepSpeed Triton autotune, /home/hansirui_1st/.triton/autotune, appears to be on an NFS system. While this is generally acceptable, if you experience slowdowns or hanging when DeepSpeed exits, it is recommended to set the TRITON_CACHE_DIR environment variable to a non-NFS path.
22
+ [2025-05-28 19:40:14,921] [INFO] [real_accelerator.py:222:get_accelerator] Setting ds_accelerator to cuda (auto detect)
23
+ [2025-05-28 19:40:14,942] [INFO] [real_accelerator.py:222:get_accelerator] Setting ds_accelerator to cuda (auto detect)
24
+ [2025-05-28 19:40:14,952] [INFO] [real_accelerator.py:222:get_accelerator] Setting ds_accelerator to cuda (auto detect)
25
+ [2025-05-28 19:40:15,065] [INFO] [real_accelerator.py:222:get_accelerator] Setting ds_accelerator to cuda (auto detect)
26
+ [2025-05-28 19:40:15,085] [INFO] [real_accelerator.py:222:get_accelerator] Setting ds_accelerator to cuda (auto detect)
27
+ [2025-05-28 19:40:15,119] [INFO] [real_accelerator.py:222:get_accelerator] Setting ds_accelerator to cuda (auto detect)
28
+ Warning: The cache directory for DeepSpeed Triton autotune, /home/hansirui_1st/.triton/autotune, appears to be on an NFS system. While this is generally acceptable, if you experience slowdowns or hanging when DeepSpeed exits, it is recommended to set the TRITON_CACHE_DIR environment variable to a non-NFS path.
29
+ [2025-05-28 19:40:15,128] [INFO] [real_accelerator.py:222:get_accelerator] Setting ds_accelerator to cuda (auto detect)
30
+ Warning: The cache directory for DeepSpeed Triton autotune, /home/hansirui_1st/.triton/autotune, appears to be on an NFS system. While this is generally acceptable, if you experience slowdowns or hanging when DeepSpeed exits, it is recommended to set the TRITON_CACHE_DIR environment variable to a non-NFS path.
31
+ Warning: The cache directory for DeepSpeed Triton autotune, /home/hansirui_1st/.triton/autotune, appears to be on an NFS system. While this is generally acceptable, if you experience slowdowns or hanging when DeepSpeed exits, it is recommended to set the TRITON_CACHE_DIR environment variable to a non-NFS path.
32
+ Warning: The cache directory for DeepSpeed Triton autotune, /home/hansirui_1st/.triton/autotune, appears to be on an NFS system. While this is generally acceptable, if you experience slowdowns or hanging when DeepSpeed exits, it is recommended to set the TRITON_CACHE_DIR environment variable to a non-NFS path.
33
+ Warning: The cache directory for DeepSpeed Triton autotune, /home/hansirui_1st/.triton/autotune, appears to be on an NFS system. While this is generally acceptable, if you experience slowdowns or hanging when DeepSpeed exits, it is recommended to set the TRITON_CACHE_DIR environment variable to a non-NFS path.
34
+ Warning: The cache directory for DeepSpeed Triton autotune, /home/hansirui_1st/.triton/autotune, appears to be on an NFS system. While this is generally acceptable, if you experience slowdowns or hanging when DeepSpeed exits, it is recommended to set the TRITON_CACHE_DIR environment variable to a non-NFS path.
35
+ Warning: The cache directory for DeepSpeed Triton autotune, /home/hansirui_1st/.triton/autotune, appears to be on an NFS system. While this is generally acceptable, if you experience slowdowns or hanging when DeepSpeed exits, it is recommended to set the TRITON_CACHE_DIR environment variable to a non-NFS path.
36
+ [2025-05-28 19:40:20,253] [INFO] [comm.py:658:init_distributed] cdb=None
37
+ [2025-05-28 19:40:21,227] [INFO] [comm.py:658:init_distributed] cdb=None
38
+ [2025-05-28 19:40:21,228] [INFO] [comm.py:658:init_distributed] cdb=None
39
+ [2025-05-28 19:40:21,334] [INFO] [comm.py:658:init_distributed] cdb=None
40
+ [2025-05-28 19:40:21,334] [INFO] [comm.py:658:init_distributed] cdb=None
41
+ [2025-05-28 19:40:21,338] [INFO] [comm.py:658:init_distributed] cdb=None
42
+ [2025-05-28 19:40:21,338] [INFO] [comm.py:689:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl
43
+ [2025-05-28 19:40:21,350] [INFO] [comm.py:658:init_distributed] cdb=None
44
+ [2025-05-28 19:40:21,360] [INFO] [comm.py:658:init_distributed] cdb=None
45
+ Set logger level to INFO.
46
+ [2025-05-28 19:40:30,122] [INFO] [config.py:734:__init__] Config mesh_device None world_size = 8
47
+ [2025-05-28 19:40:30,541] [INFO] [config.py:734:__init__] Config mesh_device None world_size = 8
48
+ [2025-05-28 19:40:30,541] [INFO] [config.py:734:__init__] Config mesh_device None world_size = 8
49
+ [2025-05-28 19:40:30,541] [INFO] [config.py:734:__init__] Config mesh_device None world_size = 8
50
+ [2025-05-28 19:40:30,541] [INFO] [config.py:734:__init__] Config mesh_device None world_size = 8
51
+ [2025-05-28 19:40:30,541] [INFO] [config.py:734:__init__] Config mesh_device None world_size = 8
52
+ [2025-05-28 19:40:30,541] [INFO] [config.py:734:__init__] Config mesh_device None world_size = 8
53
+ [2025-05-28 19:40:30,541] [INFO] [config.py:734:__init__] Config mesh_device None world_size = 8
54
+ [2025-05-28 19:40:30,678] [INFO] [partition_parameters.py:348:__exit__] finished initializing model - num_params = 291, num_elems = 0.62B
55
+ ninja: no work to do.
56
+ Time to load fused_adam op: 0.11651611328125 seconds
57
+ [2025-05-28 19:40:31,739] [INFO] [config.py:734:__init__] Config mesh_device None world_size = 8
58
+ Time to load fused_adam op: 0.20244383811950684 seconds
59
+ Time to load fused_adam op: 0.20265483856201172 seconds
60
+ Time to load fused_adam op: 0.2028496265411377 seconds
61
+ Time to load fused_adam op: 0.2027437686920166 seconds
62
+ Time to load fused_adam op: 0.2026212215423584 seconds
63
+ [2025-05-28 19:40:31,825] [INFO] [logging.py:128:log_dist] [Rank 0] DeepSpeed info: version=0.16.4, git-hash=unknown, git-branch=unknown
64
+ Time to load fused_adam op: 0.20309877395629883 seconds
65
+ [2025-05-28 19:40:31,825] [INFO] [comm.py:683:init_distributed] Distributed backend already initialized
66
+ [2025-05-28 19:40:31,825] [INFO] [config.py:734:__init__] Config mesh_device None world_size = 8
67
+ Time to load fused_adam op: 0.20302772521972656 seconds
68
+ [2025-05-28 19:40:31,825] [INFO] [config.py:734:__init__] Config mesh_device None world_size = 8
69
+ [2025-05-28 19:40:31,825] [INFO] [config.py:734:__init__] Config mesh_device None world_size = 8
70
+ [2025-05-28 19:40:31,825] [INFO] [config.py:734:__init__] Config mesh_device None world_size = 8
71
+ [2025-05-28 19:40:31,825] [INFO] [config.py:734:__init__] Config mesh_device None world_size = 8
72
+ [2025-05-28 19:40:31,826] [INFO] [config.py:734:__init__] Config mesh_device None world_size = 8
73
+ [2025-05-28 19:40:31,826] [INFO] [config.py:734:__init__] Config mesh_device None world_size = 8
74
+ [2025-05-28 19:40:31,830] [INFO] [logging.py:128:log_dist] [Rank 0] DeepSpeed Flops Profiler Enabled: False
75
+ [2025-05-28 19:40:31,831] [INFO] [logging.py:128:log_dist] [Rank 0] Using client Optimizer as basic optimizer
76
+ [2025-05-28 19:40:31,831] [INFO] [logging.py:128:log_dist] [Rank 0] Removing param_group that has no 'params' in the basic Optimizer
77
+ [2025-05-28 19:40:31,837] [INFO] [logging.py:128:log_dist] [Rank 0] DeepSpeed Basic Optimizer = FusedAdam
78
+ [2025-05-28 19:40:31,837] [INFO] [utils.py:59:is_zero_supported_optimizer] Checking ZeRO support for optimizer=FusedAdam type=<class 'deepspeed.ops.adam.fused_adam.FusedAdam'>
79
+ [2025-05-28 19:40:31,837] [INFO] [logging.py:128:log_dist] [Rank 0] Creating fp16 ZeRO stage 3 optimizer, MiCS is enabled False, Hierarchical params gather False
80
+ [2025-05-28 19:40:31,837] [INFO] [logging.py:128:log_dist] [Rank 0] Creating torch.bfloat16 ZeRO stage 3 optimizer
81
+ [2025-05-28 19:40:32,031] [INFO] [utils.py:781:see_memory_usage] Stage 3 initialize beginning
82
+ [2025-05-28 19:40:32,032] [INFO] [utils.py:782:see_memory_usage] MA 0.11 GB Max_MA 0.69 GB CA 0.73 GB Max_CA 1 GB
83
+ [2025-05-28 19:40:32,032] [INFO] [utils.py:789:see_memory_usage] CPU Virtual Memory: used = 482.41 GB, percent = 23.9%
84
+ [2025-05-28 19:40:32,033] [INFO] [stage3.py:170:__init__] Reduce bucket size 500000000
85
+ [2025-05-28 19:40:32,033] [INFO] [stage3.py:171:__init__] Prefetch bucket size 30000000
86
+ [2025-05-28 19:40:32,155] [INFO] [utils.py:781:see_memory_usage] DeepSpeedZeRoOffload initialize [begin]
87
+ [2025-05-28 19:40:32,155] [INFO] [utils.py:782:see_memory_usage] MA 0.11 GB Max_MA 0.11 GB CA 0.73 GB Max_CA 1 GB
88
+ [2025-05-28 19:40:32,156] [INFO] [utils.py:789:see_memory_usage] CPU Virtual Memory: used = 482.41 GB, percent = 23.9%
89
+ Parameter Offload: Total persistent parameters: 123904 in 121 params
90
+ [2025-05-28 19:40:32,265] [INFO] [utils.py:781:see_memory_usage] DeepSpeedZeRoOffload initialize [end]
91
+ [2025-05-28 19:40:32,266] [INFO] [utils.py:782:see_memory_usage] MA 0.11 GB Max_MA 0.11 GB CA 0.73 GB Max_CA 1 GB
92
+ [2025-05-28 19:40:32,266] [INFO] [utils.py:789:see_memory_usage] CPU Virtual Memory: used = 482.41 GB, percent = 23.9%
93
+ [2025-05-28 19:40:32,367] [INFO] [utils.py:781:see_memory_usage] Before creating fp16 partitions
94
+ [2025-05-28 19:40:32,367] [INFO] [utils.py:782:see_memory_usage] MA 0.11 GB Max_MA 0.11 GB CA 0.73 GB Max_CA 1 GB
95
+ [2025-05-28 19:40:32,367] [INFO] [utils.py:789:see_memory_usage] CPU Virtual Memory: used = 482.41 GB, percent = 23.9%
96
+ [2025-05-28 19:40:32,811] [INFO] [utils.py:781:see_memory_usage] After creating fp16 partitions: 2
97
+ [2025-05-28 19:40:32,812] [INFO] [utils.py:782:see_memory_usage] MA 0.11 GB Max_MA 0.11 GB CA 0.11 GB Max_CA 1 GB
98
+ [2025-05-28 19:40:32,812] [INFO] [utils.py:789:see_memory_usage] CPU Virtual Memory: used = 482.45 GB, percent = 23.9%
99
+ [2025-05-28 19:40:32,915] [INFO] [utils.py:781:see_memory_usage] Before creating fp32 partitions
100
+ [2025-05-28 19:40:32,916] [INFO] [utils.py:782:see_memory_usage] MA 0.11 GB Max_MA 0.11 GB CA 0.11 GB Max_CA 0 GB
101
+ [2025-05-28 19:40:32,916] [INFO] [utils.py:789:see_memory_usage] CPU Virtual Memory: used = 482.45 GB, percent = 23.9%
102
+ [2025-05-28 19:40:33,021] [INFO] [utils.py:781:see_memory_usage] After creating fp32 partitions
103
+ [2025-05-28 19:40:33,021] [INFO] [utils.py:782:see_memory_usage] MA 0.32 GB Max_MA 0.43 GB CA 0.44 GB Max_CA 0 GB
104
+ [2025-05-28 19:40:33,021] [INFO] [utils.py:789:see_memory_usage] CPU Virtual Memory: used = 482.45 GB, percent = 23.9%
105
+ [2025-05-28 19:40:33,122] [INFO] [utils.py:781:see_memory_usage] Before initializing optimizer states
106
+ [2025-05-28 19:40:33,123] [INFO] [utils.py:782:see_memory_usage] MA 0.32 GB Max_MA 0.32 GB CA 0.44 GB Max_CA 0 GB
107
+ [2025-05-28 19:40:33,123] [INFO] [utils.py:789:see_memory_usage] CPU Virtual Memory: used = 482.45 GB, percent = 23.9%
108
+ [2025-05-28 19:40:33,228] [INFO] [utils.py:781:see_memory_usage] After initializing optimizer states
109
+ [2025-05-28 19:40:33,228] [INFO] [utils.py:782:see_memory_usage] MA 0.32 GB Max_MA 0.54 GB CA 0.65 GB Max_CA 1 GB
110
+ [2025-05-28 19:40:33,228] [INFO] [utils.py:789:see_memory_usage] CPU Virtual Memory: used = 482.45 GB, percent = 23.9%
111
+ [2025-05-28 19:40:33,229] [INFO] [stage3.py:534:_setup_for_real_optimizer] optimizer state initialized
112
+ [2025-05-28 19:40:33,422] [INFO] [utils.py:781:see_memory_usage] After initializing ZeRO optimizer
113
+ [2025-05-28 19:40:33,423] [INFO] [utils.py:782:see_memory_usage] MA 1.36 GB Max_MA 1.94 GB CA 2.17 GB Max_CA 2 GB
114
+ [2025-05-28 19:40:33,423] [INFO] [utils.py:789:see_memory_usage] CPU Virtual Memory: used = 482.45 GB, percent = 23.9%
115
+ [2025-05-28 19:40:33,423] [INFO] [logging.py:128:log_dist] [Rank 0] DeepSpeed Final Optimizer = DeepSpeedZeroOptimizer_Stage3
116
+ [2025-05-28 19:40:33,423] [INFO] [logging.py:128:log_dist] [Rank 0] DeepSpeed using client LR scheduler
117
+ [2025-05-28 19:40:33,423] [INFO] [logging.py:128:log_dist] [Rank 0] DeepSpeed LR Scheduler = <torch.optim.lr_scheduler.LambdaLR object at 0x15510495f890>
118
+ [2025-05-28 19:40:33,423] [INFO] [logging.py:128:log_dist] [Rank 0] step=0, skipped=0, lr=[1e-05, 1e-05], mom=[(0.9, 0.95), (0.9, 0.95)]
119
+ [2025-05-28 19:40:33,424] [INFO] [config.py:1001:print] DeepSpeedEngine configuration:
120
+ [2025-05-28 19:40:33,424] [INFO] [config.py:1005:print] activation_checkpointing_config {
121
+ "partition_activations": false,
122
+ "contiguous_memory_optimization": false,
123
+ "cpu_checkpointing": false,
124
+ "number_checkpoints": null,
125
+ "synchronize_checkpoint_boundary": false,
126
+ "profile": false
127
+ }
128
+ [2025-05-28 19:40:33,424] [INFO] [config.py:1005:print] aio_config ................... {'block_size': 1048576, 'queue_depth': 8, 'intra_op_parallelism': 1, 'single_submit': False, 'overlap_events': True, 'use_gds': False}
129
+ [2025-05-28 19:40:33,424] [INFO] [config.py:1005:print] amp_enabled .................. False
130
+ [2025-05-28 19:40:33,424] [INFO] [config.py:1005:print] amp_params ................... False
131
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] autotuning_config ............ {
132
+ "enabled": false,
133
+ "start_step": null,
134
+ "end_step": null,
135
+ "metric_path": null,
136
+ "arg_mappings": null,
137
+ "metric": "throughput",
138
+ "model_info": null,
139
+ "results_dir": "autotuning_results",
140
+ "exps_dir": "autotuning_exps",
141
+ "overwrite": true,
142
+ "fast": true,
143
+ "start_profile_step": 3,
144
+ "end_profile_step": 5,
145
+ "tuner_type": "gridsearch",
146
+ "tuner_early_stopping": 5,
147
+ "tuner_num_trials": 50,
148
+ "model_info_path": null,
149
+ "mp_size": 1,
150
+ "max_train_batch_size": null,
151
+ "min_train_batch_size": 1,
152
+ "max_train_micro_batch_size_per_gpu": 1.024000e+03,
153
+ "min_train_micro_batch_size_per_gpu": 1,
154
+ "num_tuning_micro_batch_sizes": 3
155
+ }
156
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] bfloat16_enabled ............. True
157
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] bfloat16_immediate_grad_update False
158
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] checkpoint_parallel_write_pipeline False
159
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] checkpoint_tag_validation_enabled True
160
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] checkpoint_tag_validation_fail False
161
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] comms_config ................. <deepspeed.comm.config.DeepSpeedCommsConfig object at 0x155104961790>
162
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] communication_data_type ...... None
163
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] compression_config ........... {'weight_quantization': {'shared_parameters': {'enabled': False, 'quantizer_kernel': False, 'schedule_offset': 0, 'quantize_groups': 1, 'quantize_verbose': False, 'quantization_type': 'symmetric', 'quantize_weight_in_forward': False, 'rounding': 'nearest', 'fp16_mixed_quantize': False, 'quantize_change_ratio': 0.001}, 'different_groups': {}}, 'activation_quantization': {'shared_parameters': {'enabled': False, 'quantization_type': 'symmetric', 'range_calibration': 'dynamic', 'schedule_offset': 1000}, 'different_groups': {}}, 'sparse_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'row_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'head_pruning': {'shared_parameters': {'enabled': False, 'method': 'topk', 'schedule_offset': 1000}, 'different_groups': {}}, 'channel_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'layer_reduction': {'enabled': False}}
164
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] curriculum_enabled_legacy .... False
165
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] curriculum_params_legacy ..... False
166
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] data_efficiency_config ....... {'enabled': False, 'seed': 1234, 'data_sampling': {'enabled': False, 'num_epochs': 1000, 'num_workers': 0, 'curriculum_learning': {'enabled': False}}, 'data_routing': {'enabled': False, 'random_ltd': {'enabled': False, 'layer_token_lr_schedule': {'enabled': False}}}}
167
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] data_efficiency_enabled ...... False
168
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] dataloader_drop_last ......... False
169
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] disable_allgather ............ False
170
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] dump_state ................... False
171
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] dynamic_loss_scale_args ...... None
172
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] eigenvalue_enabled ........... False
173
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] eigenvalue_gas_boundary_resolution 1
174
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] eigenvalue_layer_name ........ bert.encoder.layer
175
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] eigenvalue_layer_num ......... 0
176
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] eigenvalue_max_iter .......... 100
177
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] eigenvalue_stability ......... 1e-06
178
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] eigenvalue_tol ............... 0.01
179
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] eigenvalue_verbose ........... False
180
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] elasticity_enabled ........... False
181
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] flops_profiler_config ........ {
182
+ "enabled": false,
183
+ "recompute_fwd_factor": 0.0,
184
+ "profile_step": 1,
185
+ "module_depth": -1,
186
+ "top_modules": 1,
187
+ "detailed": true,
188
+ "output_file": null
189
+ }
190
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] fp16_auto_cast ............... None
191
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] fp16_enabled ................. False
192
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] fp16_master_weights_and_gradients False
193
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] global_rank .................. 0
194
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] grad_accum_dtype ............. None
195
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] gradient_accumulation_steps .. 8
196
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] gradient_clipping ............ 1.0
197
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] gradient_predivide_factor .... 1.0
198
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] graph_harvesting ............. False
199
+ [2025-05-28 19:40:33,425] [INFO] [config.py:1005:print] hybrid_engine ................ enabled=False max_out_tokens=512 inference_tp_size=1 release_inference_cache=False pin_parameters=True tp_gather_partition_size=8
200
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] initial_dynamic_scale ........ 1
201
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] load_universal_checkpoint .... False
202
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] loss_scale ................... 1.0
203
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] memory_breakdown ............. False
204
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] mics_hierarchial_params_gather False
205
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] mics_shard_size .............. -1
206
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] monitor_config ............... tensorboard=TensorBoardConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') comet=CometConfig(enabled=False, samples_log_interval=100, project=None, workspace=None, api_key=None, experiment_name=None, experiment_key=None, online=None, mode=None) wandb=WandbConfig(enabled=False, group=None, team=None, project='deepspeed') csv_monitor=CSVConfig(enabled=False, output_path='', job_name='DeepSpeedJobName')
207
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] nebula_config ................ {
208
+ "enabled": false,
209
+ "persistent_storage_path": null,
210
+ "persistent_time_interval": 100,
211
+ "num_of_version_in_retention": 2,
212
+ "enable_nebula_load": true,
213
+ "load_path": null
214
+ }
215
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] optimizer_legacy_fusion ...... False
216
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] optimizer_name ............... None
217
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] optimizer_params ............. None
218
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] pipeline ..................... {'stages': 'auto', 'partition': 'best', 'seed_layers': False, 'activation_checkpoint_interval': 0, 'pipe_partitioned': True, 'grad_partitioned': True}
219
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] pld_enabled .................. False
220
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] pld_params ................... False
221
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] prescale_gradients ........... False
222
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] scheduler_name ............... None
223
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] scheduler_params ............. None
224
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] seq_parallel_communication_data_type torch.float32
225
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] sparse_attention ............. None
226
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] sparse_gradients_enabled ..... False
227
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] steps_per_print .............. 10
228
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] tensor_parallel_config ....... dtype=torch.float16 autotp_size=0 tensor_parallel=TPConfig(tp_size=1, tp_grain_size=1, mpu=None, tp_group=None) injection_policy_tuple=None keep_module_on_host=False replace_with_kernel_inject=False
229
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] timers_config ................ enabled=True synchronized=True
230
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] train_batch_size ............. 256
231
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] train_micro_batch_size_per_gpu 4
232
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] use_data_before_expert_parallel_ False
233
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] use_node_local_storage ....... False
234
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] wall_clock_breakdown ......... False
235
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] weight_quantization_config ... None
236
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] world_size ................... 8
237
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] zero_allow_untested_optimizer False
238
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] zero_config .................. stage=3 contiguous_gradients=True reduce_scatter=True reduce_bucket_size=500000000 use_multi_rank_bucket_allreduce=True allgather_partitions=True allgather_bucket_size=500000000 overlap_comm=True load_from_fp32_weights=True elastic_checkpoint=False offload_param=DeepSpeedZeroOffloadParamConfig(device='none', nvme_path=None, buffer_count=5, buffer_size=100000000, max_in_cpu=1000000000, pin_memory=False) offload_optimizer=DeepSpeedZeroOffloadOptimizerConfig(device='none', nvme_path=None, buffer_count=4, pin_memory=False, pipeline_read=False, pipeline_write=False, fast_init=False, ratio=1.0) sub_group_size=1000000000 cpu_offload_param=None cpu_offload_use_pin_memory=None cpu_offload=None prefetch_bucket_size=30000000 param_persistence_threshold=10000 model_persistence_threshold=9223372036854775807 max_live_parameters=30000000 max_reuse_distance=1000000000 gather_16bit_weights_on_model_save=True module_granularity_threshold=0 use_all_reduce_for_fetch_params=False stage3_gather_fp16_weights_on_model_save=False ignore_unused_parameters=True legacy_stage1=False round_robin_gradients=False zero_hpz_partition_size=1 zero_quantized_weights=False zero_quantized_nontrainable_weights=False zero_quantized_gradients=False zeropp_loco_param=None mics_shard_size=-1 mics_hierarchical_params_gather=False memory_efficient_linear=False pipeline_loading_checkpoint=False override_module_apply=True log_trace_cache_warnings=False
239
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] zero_enabled ................. True
240
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] zero_force_ds_cpu_optimizer .. True
241
+ [2025-05-28 19:40:33,426] [INFO] [config.py:1005:print] zero_optimization_stage ...... 3
242
+ [2025-05-28 19:40:33,426] [INFO] [config.py:991:print_user_config] json = {
243
+ "train_batch_size": 256,
244
+ "train_micro_batch_size_per_gpu": 4,
245
+ "gradient_accumulation_steps": 8,
246
+ "steps_per_print": 10,
247
+ "zero_optimization": {
248
+ "stage": 3,
249
+ "offload_param": {
250
+ "device": "none"
251
+ },
252
+ "offload_optimizer": {
253
+ "device": "none"
254
+ },
255
+ "param_persistence_threshold": 1.000000e+04,
256
+ "max_live_parameters": 3.000000e+07,
257
+ "prefetch_bucket_size": 3.000000e+07,
258
+ "memory_efficient_linear": false,
259
+ "gather_16bit_weights_on_model_save": true
260
+ },
261
+ "gradient_clipping": 1.0,
262
+ "prescale_gradients": false,
263
+ "wall_clock_breakdown": false,
264
+ "hybrid_engine": {
265
+ "enabled": false,
266
+ "max_out_tokens": 512,
267
+ "inference_tp_size": 1,
268
+ "release_inference_cache": false,
269
+ "pin_parameters": true,
270
+ "tp_gather_partition_size": 8
271
+ },
272
+ "bf16": {
273
+ "enabled": true
274
+ }
275
+ }
276
+ ***** Running training *****
277
+ Saving model to "/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500" ...
278
+ Saving 16-bit model...
279
+ [2025-05-28 19:40:51,359] [INFO] [logging.py:128:log_dist] [Rank 0] [Torch] Checkpoint global_step2 is about to be saved!
280
+ [2025-05-28 19:40:51,359] [INFO] [torch_checkpoint_engine.py:33:commit] [Torch] Checkpoint global_step2 is ready now!
281
+ [2025-05-28 19:40:51,359] [INFO] [torch_checkpoint_engine.py:33:commit] [Torch] Checkpoint global_step2 is ready now!
282
+ [2025-05-28 19:40:51,359] [INFO] [torch_checkpoint_engine.py:33:commit] [Torch] Checkpoint global_step2 is ready now!
283
+ [2025-05-28 19:40:51,359] [INFO] [torch_checkpoint_engine.py:33:commit] [Torch] Checkpoint global_step2 is ready now!
284
+ [2025-05-28 19:40:51,359] [INFO] [torch_checkpoint_engine.py:33:commit] [Torch] Checkpoint global_step2 is ready now!
285
+ [2025-05-28 19:40:51,359] [INFO] [torch_checkpoint_engine.py:33:commit] [Torch] Checkpoint global_step2 is ready now!
286
+ [2025-05-28 19:40:51,359] [INFO] [torch_checkpoint_engine.py:33:commit] [Torch] Checkpoint global_step2 is ready now!
287
+ [2025-05-28 19:40:51,360] [INFO] [engine.py:3804:save_16bit_model] Saving model weights to /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500/pytorch_model.bin, tag: global_step2
288
+ [2025-05-28 19:40:51,361] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500/pytorch_model.bin...
289
+ [2025-05-28 19:40:52,191] [INFO] [torch_checkpoint_engine.py:23:save] [Torch] Saved /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500/pytorch_model.bin.
290
+ [2025-05-28 19:40:52,192] [INFO] [torch_checkpoint_engine.py:33:commit] [Torch] Checkpoint global_step2 is ready now!
291
+ Model saved!
292
+ [2025-05-28 19:40:54,230] [INFO] [launch.py:351:main] Process 459319 exits successfully.
293
+ [2025-05-28 19:40:54,230] [INFO] [launch.py:351:main] Process 459325 exits successfully.
294
+ [2025-05-28 19:40:54,230] [INFO] [launch.py:351:main] Process 459323 exits successfully.
295
+ [2025-05-28 19:40:54,230] [INFO] [launch.py:351:main] Process 459324 exits successfully.
296
+ [2025-05-28 19:40:54,231] [INFO] [launch.py:351:main] Process 459321 exits successfully.
297
+ [2025-05-28 19:40:54,231] [INFO] [launch.py:351:main] Process 459320 exits successfully.
298
+ [2025-05-28 19:40:55,231] [INFO] [launch.py:351:main] Process 459322 exits successfully.
299
+ [2025-05-28 19:40:56,231] [INFO] [launch.py:351:main] Process 459318 exits successfully.
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8e724dc23482c611d87c990a58b98f85296a1df777744ef85a3bd9fc0d00aed
3
+ size 11418727
tokenizer_config.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "128244": {
5
+ "content": "<unk>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "128245": {
13
+ "content": "<s>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151643": {
21
+ "content": "<|endoftext|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "151644": {
29
+ "content": "<|im_start|>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "151645": {
37
+ "content": "<|im_end|>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "additional_special_tokens": [
46
+ "<|im_start|>",
47
+ "<|im_end|>"
48
+ ],
49
+ "bos_token": "<s>",
50
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
51
+ "clean_up_tokenization_spaces": false,
52
+ "eos_token": "<|endoftext|>",
53
+ "errors": "replace",
54
+ "extra_special_tokens": {},
55
+ "max_length": 2048,
56
+ "model_max_length": 2048,
57
+ "pad_token": "<|endoftext|>",
58
+ "padding_side": "right",
59
+ "split_special_tokens": false,
60
+ "stride": 0,
61
+ "tokenizer_class": "Qwen2Tokenizer",
62
+ "truncation_side": "right",
63
+ "truncation_strategy": "longest_first",
64
+ "unk_token": "<unk>"
65
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
wandb/debug-internal.log ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"time":"2025-05-28T19:40:34.039972736+08:00","level":"INFO","msg":"stream: starting","core version":"0.19.8","symlink path":"/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500/wandb/run-20250528_194034-n6klpr55/logs/debug-core.log"}
2
+ {"time":"2025-05-28T19:40:34.254108788+08:00","level":"INFO","msg":"created new stream","id":"n6klpr55"}
3
+ {"time":"2025-05-28T19:40:34.25421878+08:00","level":"INFO","msg":"stream: started","id":"n6klpr55"}
4
+ {"time":"2025-05-28T19:40:34.254238342+08:00","level":"INFO","msg":"handler: started","stream_id":"n6klpr55"}
5
+ {"time":"2025-05-28T19:40:34.25424859+08:00","level":"INFO","msg":"writer: Do: started","stream_id":"n6klpr55"}
6
+ {"time":"2025-05-28T19:40:34.254251626+08:00","level":"INFO","msg":"sender: started","stream_id":"n6klpr55"}
7
+ {"time":"2025-05-28T19:40:34.580034427+08:00","level":"INFO","msg":"Starting system monitor"}
8
+ {"time":"2025-05-28T19:40:52.23245154+08:00","level":"INFO","msg":"Stopping system monitor"}
9
+ {"time":"2025-05-28T19:40:52.233229831+08:00","level":"INFO","msg":"Stopped system monitor"}
10
+ {"time":"2025-05-28T19:40:52.998922566+08:00","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"}
11
+ {"time":"2025-05-28T19:40:53.216254401+08:00","level":"INFO","msg":"handler: operation stats","stats":{}}
12
+ {"time":"2025-05-28T19:40:53.219276688+08:00","level":"INFO","msg":"stream: closing","id":"n6klpr55"}
13
+ {"time":"2025-05-28T19:40:53.219345645+08:00","level":"INFO","msg":"handler: closed","stream_id":"n6klpr55"}
14
+ {"time":"2025-05-28T19:40:53.219356998+08:00","level":"INFO","msg":"writer: Close: closed","stream_id":"n6klpr55"}
15
+ {"time":"2025-05-28T19:40:53.219365865+08:00","level":"INFO","msg":"sender: closed","stream_id":"n6klpr55"}
16
+ {"time":"2025-05-28T19:40:53.219815996+08:00","level":"INFO","msg":"stream: closed","id":"n6klpr55"}
wandb/debug.log ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-05-28 19:40:34,032 INFO MainThread:459318 [wandb_setup.py:_flush():67] Current SDK version is 0.19.8
2
+ 2025-05-28 19:40:34,032 INFO MainThread:459318 [wandb_setup.py:_flush():67] Configure stats pid to 459318
3
+ 2025-05-28 19:40:34,032 INFO MainThread:459318 [wandb_setup.py:_flush():67] Loading settings from /home/hansirui_1st/.config/wandb/settings
4
+ 2025-05-28 19:40:34,032 INFO MainThread:459318 [wandb_setup.py:_flush():67] Loading settings from /home/hansirui_1st/jiayi/resist/setting3/scripts/wandb/settings
5
+ 2025-05-28 19:40:34,032 INFO MainThread:459318 [wandb_setup.py:_flush():67] Loading settings from environment variables
6
+ 2025-05-28 19:40:34,033 INFO MainThread:459318 [wandb_init.py:setup_run_log_directory():647] Logging user logs to /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500/wandb/run-20250528_194034-n6klpr55/logs/debug.log
7
+ 2025-05-28 19:40:34,033 INFO MainThread:459318 [wandb_init.py:setup_run_log_directory():648] Logging internal logs to /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500/wandb/run-20250528_194034-n6klpr55/logs/debug-internal.log
8
+ 2025-05-28 19:40:34,033 INFO MainThread:459318 [wandb_init.py:init():761] calling init triggers
9
+ 2025-05-28 19:40:34,033 INFO MainThread:459318 [wandb_init.py:init():766] wandb.init called with sweep_config: {}
10
+ config: {'model_name_or_path': '/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k', 'max_length': 2048, 'trust_remote_code': True, 'train_datasets': [('inverse-json', {'proportion': 1.0, 'path': '/home/hansirui_1st/jiayi/resist/setting3/safety_data/training/unsafe/unsafe_500.json'})], 'eval_datasets': None, 'epochs': 1, 'per_device_train_batch_size': 4, 'per_device_eval_batch_size': 4, 'gradient_accumulation_steps': 8, 'gradient_checkpointing': True, 'lr': 1e-05, 'lr_scheduler_type': <SchedulerType.CONSTANT: 'constant'>, 'lr_warmup_ratio': 0.0, 'weight_decay': 0.0, 'seed': 42, 'fp16': False, 'bf16': True, 'tf32': True, 'eval_strategy': 'epoch', 'eval_interval': 1000000, 'need_eval': False, 'eval_split_ratio': None, 'output_dir': '/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500', 'log_type': 'wandb', 'log_dir': '/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500', 'log_project': 'Inverse_Alignment', 'log_run_name': 'qwen-0.5b-s3-Q1-1k-Q2-500', 'save_16bit': True, 'save_interval': 1000000, 'local_rank': 0, 'zero_stage': 3, 'offload': 'none', 'deepspeed': False, 'deepspeed_config': None, 'deepscale': False, 'deepscale_config': None, 'global_rank': 0, 'device': device(type='cuda', index=0), 'num_update_steps_per_epoch': 2, 'total_training_steps': 2, '_wandb': {}}
11
+ 2025-05-28 19:40:34,033 INFO MainThread:459318 [wandb_init.py:init():784] starting backend
12
+ 2025-05-28 19:40:34,033 INFO MainThread:459318 [wandb_init.py:init():788] sending inform_init request
13
+ 2025-05-28 19:40:34,037 INFO MainThread:459318 [backend.py:_multiprocessing_setup():101] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
14
+ 2025-05-28 19:40:34,037 INFO MainThread:459318 [wandb_init.py:init():798] backend started and connected
15
+ 2025-05-28 19:40:34,038 INFO MainThread:459318 [wandb_init.py:init():891] updated telemetry
16
+ 2025-05-28 19:40:34,038 INFO MainThread:459318 [wandb_init.py:init():915] communicating run to backend with 90.0 second timeout
17
+ 2025-05-28 19:40:34,575 INFO MainThread:459318 [wandb_init.py:init():990] starting run threads in backend
18
+ 2025-05-28 19:40:34,802 INFO MainThread:459318 [wandb_run.py:_console_start():2375] atexit reg
19
+ 2025-05-28 19:40:34,802 INFO MainThread:459318 [wandb_run.py:_redirect():2227] redirect: wrap_raw
20
+ 2025-05-28 19:40:34,802 INFO MainThread:459318 [wandb_run.py:_redirect():2292] Wrapping output streams.
21
+ 2025-05-28 19:40:34,803 INFO MainThread:459318 [wandb_run.py:_redirect():2315] Redirects installed.
22
+ 2025-05-28 19:40:34,805 INFO MainThread:459318 [wandb_init.py:init():1032] run started, returning control to user process
23
+ 2025-05-28 19:40:52,228 INFO MainThread:459318 [wandb_run.py:_finish():2112] finishing run xtom/Inverse_Alignment/n6klpr55
24
+ 2025-05-28 19:40:52,229 INFO MainThread:459318 [wandb_run.py:_atexit_cleanup():2340] got exitcode: 0
25
+ 2025-05-28 19:40:52,230 INFO MainThread:459318 [wandb_run.py:_restore():2322] restore
26
+ 2025-05-28 19:40:52,231 INFO MainThread:459318 [wandb_run.py:_restore():2328] restore done
27
+ 2025-05-28 19:40:53,217 INFO MainThread:459318 [wandb_run.py:_footer_history_summary_info():3956] rendering history
28
+ 2025-05-28 19:40:53,218 INFO MainThread:459318 [wandb_run.py:_footer_history_summary_info():3988] rendering summary
29
+ 2025-05-28 19:40:53,218 INFO MainThread:459318 [wandb_run.py:_footer_sync_info():3917] logging synced files
wandb/run-20250528_194034-n6klpr55/files/config.yaml ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _wandb:
2
+ value:
3
+ cli_version: 0.19.8
4
+ m: []
5
+ python_version: 3.11.11
6
+ t:
7
+ "1":
8
+ - 1
9
+ - 5
10
+ - 11
11
+ - 41
12
+ - 49
13
+ - 51
14
+ - 53
15
+ - 55
16
+ - 71
17
+ - 105
18
+ "2":
19
+ - 1
20
+ - 5
21
+ - 11
22
+ - 41
23
+ - 49
24
+ - 51
25
+ - 53
26
+ - 55
27
+ - 71
28
+ - 105
29
+ "3":
30
+ - 2
31
+ - 13
32
+ - 16
33
+ - 23
34
+ - 55
35
+ - 61
36
+ "4": 3.11.11
37
+ "5": 0.19.8
38
+ "6": 4.49.0
39
+ "8":
40
+ - 5
41
+ "12": 0.19.8
42
+ "13": linux-x86_64
43
+ bf16:
44
+ value: true
45
+ deepscale:
46
+ value: false
47
+ deepscale_config:
48
+ value: null
49
+ deepspeed:
50
+ value: false
51
+ deepspeed_config:
52
+ value: null
53
+ device:
54
+ value: cuda:0
55
+ epochs:
56
+ value: 1
57
+ eval_datasets:
58
+ value: null
59
+ eval_interval:
60
+ value: 1000000
61
+ eval_split_ratio:
62
+ value: null
63
+ eval_strategy:
64
+ value: epoch
65
+ fp16:
66
+ value: false
67
+ global_rank:
68
+ value: 0
69
+ gradient_accumulation_steps:
70
+ value: 8
71
+ gradient_checkpointing:
72
+ value: true
73
+ local_rank:
74
+ value: 0
75
+ log_dir:
76
+ value: /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500
77
+ log_project:
78
+ value: Inverse_Alignment
79
+ log_run_name:
80
+ value: qwen-0.5b-s3-Q1-1k-Q2-500
81
+ log_type:
82
+ value: wandb
83
+ lr:
84
+ value: 1e-05
85
+ lr_scheduler_type:
86
+ value: CONSTANT
87
+ lr_warmup_ratio:
88
+ value: 0
89
+ max_length:
90
+ value: 2048
91
+ model_name_or_path:
92
+ value: /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k
93
+ need_eval:
94
+ value: false
95
+ num_update_steps_per_epoch:
96
+ value: 2
97
+ offload:
98
+ value: none
99
+ output_dir:
100
+ value: /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500
101
+ per_device_eval_batch_size:
102
+ value: 4
103
+ per_device_train_batch_size:
104
+ value: 4
105
+ save_16bit:
106
+ value: true
107
+ save_interval:
108
+ value: 1000000
109
+ seed:
110
+ value: 42
111
+ tf32:
112
+ value: true
113
+ total_training_steps:
114
+ value: 2
115
+ train_datasets:
116
+ value:
117
+ - - inverse-json
118
+ - path: /home/hansirui_1st/jiayi/resist/setting3/safety_data/training/unsafe/unsafe_500.json
119
+ proportion: 1
120
+ trust_remote_code:
121
+ value: true
122
+ weight_decay:
123
+ value: 0
124
+ zero_stage:
125
+ value: 3
wandb/run-20250528_194034-n6klpr55/files/output.log ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ***** Running training *****
2
+ Training 1/1 epoch: 0%| | 0/16 [00:00<?, ?it/s]`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.
3
+ Training 1/1 epoch (loss 2.1346): 100%|██████████| 16/16 [00:15<00:00, 1.01it/s]
4
+ Saving model to "/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500" ...
5
+ tokenizer config file saved in /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500/tokenizer_config.json
6
+ Special tokens file saved in /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500/special_tokens_map.json
7
+ Saving 16-bit model...
8
+ [2025-05-28 19:40:51,359] [INFO] [logging.py:128:log_dist] [Rank 0] [Torch] Checkpoint global_step2 is about to be saved!
9
+ [2025-05-28 19:40:51,360] [INFO] [engine.py:3804:save_16bit_model] Saving model weights to /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500/pytorch_model.bin, tag: global_step2
10
+ [2025-05-28 19:40:51,361] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500/pytorch_model.bin...
11
+ [2025-05-28 19:40:52,191] [INFO] [torch_checkpoint_engine.py:23:save] [Torch] Saved /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500/pytorch_model.bin.
12
+ [2025-05-28 19:40:52,192] [INFO] [torch_checkpoint_engine.py:33:commit] [Torch] Checkpoint global_step2 is ready now!
13
+ Model saved!
wandb/run-20250528_194034-n6klpr55/files/requirements.txt ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ PyYAML==6.0.2
2
+ sentencepiece==0.2.0
3
+ certifi==2025.1.31
4
+ protobuf==5.29.3
5
+ pyasn1==0.6.1
6
+ nvitop==1.4.2
7
+ compressed-tensors==0.9.1
8
+ platformdirs==4.3.6
9
+ idna==3.10
10
+ decorator==5.2.1
11
+ py-spy==0.4.0
12
+ aiosignal==1.3.2
13
+ shellingham==1.5.4
14
+ pytest==8.3.5
15
+ sniffio==1.3.1
16
+ outlines_core==0.1.26
17
+ grpcio==1.70.0
18
+ diskcache==5.6.3
19
+ smmap==5.0.2
20
+ safetensors==0.5.3
21
+ py-cpuinfo==9.0.0
22
+ markdown-it-py==3.0.0
23
+ GitPython==3.1.44
24
+ propcache==0.3.0
25
+ urllib3==2.3.0
26
+ pyarrow==19.0.1
27
+ rich-toolkit==0.13.2
28
+ absl-py==2.1.0
29
+ uc-micro-py==1.0.3
30
+ distlib==0.3.9
31
+ xformers==0.0.29.post3
32
+ huggingface-hub==0.29.2
33
+ typing_extensions==4.12.2
34
+ mistral_common==1.5.3
35
+ airportsdata==20250224
36
+ python-dotenv==1.0.1
37
+ numpy==1.26.4
38
+ aiohappyeyeballs==2.5.0
39
+ typer==0.15.2
40
+ google-api-core==2.24.1
41
+ fastrlock==0.8.3
42
+ pydub==0.25.1
43
+ charset-normalizer==3.4.1
44
+ pybind11==2.13.6
45
+ httptools==0.6.4
46
+ torchaudio==2.6.0
47
+ watchfiles==1.0.4
48
+ orjson==3.10.15
49
+ setuptools==75.8.0
50
+ click==8.1.8
51
+ aiohttp==3.11.13
52
+ cupy-cuda12x==13.4.0
53
+ scipy==1.15.2
54
+ vllm==0.7.3
55
+ soxr==0.5.0.post1
56
+ dill==0.3.8
57
+ gradio_client==1.7.2
58
+ nvidia-cuda-cupti-cu12==12.4.127
59
+ python-multipart==0.0.20
60
+ Pygments==2.19.1
61
+ nvidia-cusparse-cu12==12.3.1.170
62
+ Markdown==3.7
63
+ pydantic==2.10.6
64
+ RapidFuzz==3.12.2
65
+ regex==2024.11.6
66
+ tokenizers==0.21.0
67
+ google-auth==2.38.0
68
+ wrapt==1.17.2
69
+ safehttpx==0.1.6
70
+ numba==0.60.0
71
+ antlr4-python3-runtime==4.13.2
72
+ fire==0.7.0
73
+ llama_stack==0.2.0
74
+ audioread==3.0.1
75
+ interegular==0.3.3
76
+ blobfile==3.0.0
77
+ rich==13.9.4
78
+ xxhash==3.5.0
79
+ nvidia-nvjitlink-cu12==12.4.127
80
+ pyasn1_modules==0.4.1
81
+ resampy==0.4.3
82
+ pydantic_core==2.27.2
83
+ nvidia-cufft-cu12==11.2.1.3
84
+ pycparser==2.22
85
+ cloudpickle==3.1.1
86
+ lazy_loader==0.4
87
+ cffi==1.17.1
88
+ python-dateutil==2.9.0.post0
89
+ triton==3.2.0
90
+ opencensus-context==0.1.3
91
+ jsonschema==4.23.0
92
+ optree==0.16.0
93
+ dnspython==2.7.0
94
+ Jinja2==3.1.6
95
+ align-anything==0.0.1.dev0
96
+ cachetools==5.5.2
97
+ datasets==3.3.2
98
+ iniconfig==2.0.0
99
+ pluggy==1.5.0
100
+ llvmlite==0.43.0
101
+ prometheus-fastapi-instrumentator==7.0.2
102
+ xgrammar==0.1.11
103
+ joblib==1.4.2
104
+ threadpoolctl==3.5.0
105
+ sentry-sdk==2.22.0
106
+ wheel==0.45.1
107
+ mdit-py-plugins==0.4.2
108
+ torchvision==0.21.0
109
+ flash_attn==2.7.4.post1
110
+ networkx==3.4.2
111
+ sympy==1.13.1
112
+ pycountry==24.6.1
113
+ nvidia-cudnn-cu12==9.1.0.70
114
+ jiter==0.8.2
115
+ latex2sympy2_extended==1.10.1
116
+ Levenshtein==0.27.1
117
+ gradio==5.20.0
118
+ lm-format-enforcer==0.10.11
119
+ uvloop==0.21.0
120
+ nvidia-cusparselt-cu12==0.6.2
121
+ astor==0.8.1
122
+ h11==0.14.0
123
+ rpds-py==0.23.1
124
+ pyzmq==26.2.1
125
+ transformers==4.49.0
126
+ librosa==0.10.2.post1
127
+ gitdb==4.0.12
128
+ proto-plus==1.26.0
129
+ mdurl==0.1.2
130
+ deepspeed==0.16.4
131
+ scikit-learn==1.6.1
132
+ yarl==1.18.3
133
+ pycryptodomex==3.22.0
134
+ outlines==0.1.11
135
+ tensorboard==2.19.0
136
+ soundfile==0.13.1
137
+ distro==1.9.0
138
+ opencensus==0.11.4
139
+ ray==2.40.0
140
+ Flask==3.1.0
141
+ lark==1.2.2
142
+ referencing==0.36.2
143
+ setproctitle==1.3.5
144
+ nvidia-cuda-nvrtc-cu12==12.4.127
145
+ nvidia-nvtx-cu12==12.4.127
146
+ docker-pycreds==0.4.0
147
+ opencv-python-headless==4.11.0.86
148
+ nvidia-cuda-runtime-cu12==12.4.127
149
+ packaging==24.2
150
+ nvidia-cusolver-cu12==11.6.1.9
151
+ peft==0.14.0
152
+ pillow==11.1.0
153
+ MarkupSafe==2.1.5
154
+ groovy==0.1.2
155
+ jsonschema-specifications==2024.10.1
156
+ linkify-it-py==2.0.3
157
+ gguf==0.10.0
158
+ fastapi==0.115.11
159
+ einops==0.8.1
160
+ blake3==1.0.4
161
+ accelerate==1.4.0
162
+ zipp==3.21.0
163
+ pyaml==25.1.0
164
+ uvicorn==0.34.0
165
+ httpx==0.28.1
166
+ psutil==7.0.0
167
+ websockets==15.0.1
168
+ ruff==0.9.9
169
+ nvidia-curand-cu12==10.3.5.147
170
+ fsspec==2024.12.0
171
+ nvidia-ml-py==12.570.86
172
+ textual==2.1.2
173
+ pip==25.0
174
+ msgpack==1.1.0
175
+ openai==1.65.4
176
+ diffusers==0.32.2
177
+ annotated-types==0.7.0
178
+ email_validator==2.2.0
179
+ multidict==6.1.0
180
+ tiktoken==0.9.0
181
+ msgspec==0.19.0
182
+ pandas==2.2.3
183
+ wcwidth==0.2.13
184
+ itsdangerous==2.2.0
185
+ smart-open==7.1.0
186
+ math-verify==0.7.0
187
+ nvidia-nccl-cu12==2.21.5
188
+ nest-asyncio==1.6.0
189
+ ninja==1.11.1.3
190
+ virtualenv==20.29.2
191
+ rsa==4.9
192
+ starlette==0.46.0
193
+ anyio==4.8.0
194
+ tensorboard-data-server==0.7.2
195
+ tomlkit==0.13.2
196
+ blinker==1.9.0
197
+ termcolor==3.0.1
198
+ nvidia-cublas-cu12==12.4.5.8
199
+ depyf==0.18.0
200
+ importlib_metadata==8.6.1
201
+ prompt_toolkit==3.0.50
202
+ attrs==25.1.0
203
+ fastapi-cli==0.0.7
204
+ six==1.17.0
205
+ hjson==3.1.0
206
+ memray==1.15.0
207
+ torch==2.6.0
208
+ tzdata==2025.1
209
+ colorful==0.5.6
210
+ googleapis-common-protos==1.69.0
211
+ llama_stack_client==0.2.0
212
+ ffmpy==0.5.0
213
+ frechet-audio-distance==0.1.2
214
+ torchlibrosa==0.1.0
215
+ partial-json-parser==0.2.1.1.post5
216
+ frozenlist==1.5.0
217
+ tqdm==4.67.1
218
+ aiofiles==23.2.1
219
+ aiohttp-cors==0.7.0
220
+ wandb==0.19.8
221
+ filelock==3.17.0
222
+ lxml==5.3.2
223
+ Werkzeug==3.1.3
224
+ multiprocess==0.70.16
225
+ prometheus_client==0.21.1
226
+ mpmath==1.3.0
227
+ pytz==2025.1
228
+ semantic-version==2.10.0
229
+ requests==2.32.3
230
+ pooch==1.8.2
231
+ httpcore==1.0.7
232
+ typing_extensions==4.12.2
233
+ jaraco.functools==4.0.1
234
+ platformdirs==4.2.2
235
+ jaraco.text==3.12.1
236
+ wheel==0.43.0
237
+ inflect==7.3.1
238
+ tomli==2.0.1
239
+ typeguard==4.3.0
240
+ packaging==24.2
241
+ more-itertools==10.3.0
242
+ zipp==3.19.2
243
+ importlib_metadata==8.0.0
244
+ autocommand==2.2.2
245
+ backports.tarfile==1.2.0
246
+ jaraco.context==5.3.0
247
+ jaraco.collections==5.1.0
wandb/run-20250528_194034-n6klpr55/files/wandb-metadata.json ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.0-1040-nvidia-x86_64-with-glibc2.35",
3
+ "python": "CPython 3.11.11",
4
+ "startedAt": "2025-05-28T11:40:34.037315Z",
5
+ "args": [
6
+ "--local_rank=0",
7
+ "--train_datasets",
8
+ "inverse-json::/home/hansirui_1st/jiayi/resist/setting3/safety_data/training/unsafe/unsafe_500.json",
9
+ "--model_name_or_path",
10
+ "/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k",
11
+ "--max_length",
12
+ "2048",
13
+ "--trust_remote_code",
14
+ "True",
15
+ "--epochs",
16
+ "1",
17
+ "--per_device_train_batch_size",
18
+ "4",
19
+ "--per_device_eval_batch_size",
20
+ "4",
21
+ "--gradient_accumulation_steps",
22
+ "8",
23
+ "--gradient_checkpointing",
24
+ "--learning_rate",
25
+ "1e-5",
26
+ "--lr_warmup_ratio",
27
+ "0",
28
+ "--weight_decay",
29
+ "0.0",
30
+ "--lr_scheduler_type",
31
+ "constant",
32
+ "--weight_decay",
33
+ "0.0",
34
+ "--seed",
35
+ "42",
36
+ "--output_dir",
37
+ "/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500",
38
+ "--log_type",
39
+ "wandb",
40
+ "--log_run_name",
41
+ "qwen-0.5b-s3-Q1-1k-Q2-500",
42
+ "--log_project",
43
+ "Inverse_Alignment",
44
+ "--zero_stage",
45
+ "3",
46
+ "--offload",
47
+ "none",
48
+ "--bf16",
49
+ "True",
50
+ "--tf32",
51
+ "True",
52
+ "--save_16bit"
53
+ ],
54
+ "program": "-m safe_rlhf.finetune.__main__",
55
+ "email": "[email protected]",
56
+ "root": "/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500",
57
+ "host": "dgx-096",
58
+ "executable": "/aifs4su/hansirui_1st/miniconda3/envs/by-align/bin/python",
59
+ "cpu_count": 112,
60
+ "cpu_count_logical": 224,
61
+ "gpu": "NVIDIA H800",
62
+ "gpu_count": 8,
63
+ "disk": {
64
+ "/": {
65
+ "total": "1888556142592",
66
+ "used": "78039969792"
67
+ }
68
+ },
69
+ "memory": {
70
+ "total": "2164195426304"
71
+ },
72
+ "cpu": {
73
+ "count": 112,
74
+ "countLogical": 224
75
+ },
76
+ "gpu_nvidia": [
77
+ {
78
+ "name": "NVIDIA H800",
79
+ "memoryTotal": "85520809984",
80
+ "cudaCores": 16896,
81
+ "architecture": "Hopper"
82
+ },
83
+ {
84
+ "name": "NVIDIA H800",
85
+ "memoryTotal": "85520809984",
86
+ "cudaCores": 16896,
87
+ "architecture": "Hopper"
88
+ },
89
+ {
90
+ "name": "NVIDIA H800",
91
+ "memoryTotal": "85520809984",
92
+ "cudaCores": 16896,
93
+ "architecture": "Hopper"
94
+ },
95
+ {
96
+ "name": "NVIDIA H800",
97
+ "memoryTotal": "85520809984",
98
+ "cudaCores": 16896,
99
+ "architecture": "Hopper"
100
+ },
101
+ {
102
+ "name": "NVIDIA H800",
103
+ "memoryTotal": "85520809984",
104
+ "cudaCores": 16896,
105
+ "architecture": "Hopper"
106
+ },
107
+ {
108
+ "name": "NVIDIA H800",
109
+ "memoryTotal": "85520809984",
110
+ "cudaCores": 16896,
111
+ "architecture": "Hopper"
112
+ },
113
+ {
114
+ "name": "NVIDIA H800",
115
+ "memoryTotal": "85520809984",
116
+ "cudaCores": 16896,
117
+ "architecture": "Hopper"
118
+ },
119
+ {
120
+ "name": "NVIDIA H800",
121
+ "memoryTotal": "85520809984",
122
+ "cudaCores": 16896,
123
+ "architecture": "Hopper"
124
+ }
125
+ ],
126
+ "slurm": {
127
+ "conf": "/cm/shared/apps/slurm/var/etc/slurm/slurm.conf"
128
+ },
129
+ "cudaVersion": "12.2"
130
+ }
wandb/run-20250528_194034-n6klpr55/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"train/epoch":1,"_timestamp":1.7484324506387017e+09,"_runtime":18.195118757,"_step":16,"train/step":16,"_wandb":{"runtime":18},"train/loss":2.134641647338867,"train/lr":1e-05}
wandb/run-20250528_194034-n6klpr55/logs/debug-core.log ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"time":"2025-05-28T19:40:33.458806025+08:00","level":"INFO","msg":"main: starting server","port-filename":"/tmp/tmpg7m9z8yi/port-459318.txt","pid":459318,"log-level":0,"disable-analytics":false,"shutdown-on-parent-exit":false}
2
+ {"time":"2025-05-28T19:40:33.45979523+08:00","level":"INFO","msg":"Will exit if parent process dies.","ppid":459318}
3
+ {"time":"2025-05-28T19:40:33.459796816+08:00","level":"INFO","msg":"server is running","addr":{"IP":"127.0.0.1","Port":37097,"Zone":""}}
4
+ {"time":"2025-05-28T19:40:33.641066666+08:00","level":"INFO","msg":"connection: ManageConnectionData: new connection created","id":"127.0.0.1:55818"}
5
+ {"time":"2025-05-28T19:40:34.038528114+08:00","level":"INFO","msg":"handleInformInit: received","streamId":"n6klpr55","id":"127.0.0.1:55818"}
6
+ {"time":"2025-05-28T19:40:34.254228084+08:00","level":"INFO","msg":"handleInformInit: stream started","streamId":"n6klpr55","id":"127.0.0.1:55818"}
7
+ {"time":"2025-05-28T19:40:53.219127391+08:00","level":"INFO","msg":"handleInformFinish: finish message received","streamId":"n6klpr55","id":"127.0.0.1:55818"}
8
+ {"time":"2025-05-28T19:40:53.21982815+08:00","level":"INFO","msg":"handleInformFinish: stream closed","streamId":"n6klpr55","id":"127.0.0.1:55818"}
9
+ {"time":"2025-05-28T19:40:54.219062723+08:00","level":"INFO","msg":"handleInformTeardown: server teardown initiated","id":"127.0.0.1:55818"}
10
+ {"time":"2025-05-28T19:40:54.219084502+08:00","level":"INFO","msg":"handleInformTeardown: server shutdown complete","id":"127.0.0.1:55818"}
11
+ {"time":"2025-05-28T19:40:54.219090022+08:00","level":"INFO","msg":"server is shutting down"}
12
+ {"time":"2025-05-28T19:40:54.219107977+08:00","level":"INFO","msg":"connection: closing","id":"127.0.0.1:55818"}
13
+ {"time":"2025-05-28T19:40:54.219187066+08:00","level":"INFO","msg":"connection: closed successfully","id":"127.0.0.1:55818"}
14
+ {"time":"2025-05-28T19:40:54.219190322+08:00","level":"INFO","msg":"connection: ManageConnectionData: connection closed","id":"127.0.0.1:55818"}
15
+ {"time":"2025-05-28T19:40:54.219195589+08:00","level":"INFO","msg":"server is closed"}
wandb/run-20250528_194034-n6klpr55/logs/debug-internal.log ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"time":"2025-05-28T19:40:34.039972736+08:00","level":"INFO","msg":"stream: starting","core version":"0.19.8","symlink path":"/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500/wandb/run-20250528_194034-n6klpr55/logs/debug-core.log"}
2
+ {"time":"2025-05-28T19:40:34.254108788+08:00","level":"INFO","msg":"created new stream","id":"n6klpr55"}
3
+ {"time":"2025-05-28T19:40:34.25421878+08:00","level":"INFO","msg":"stream: started","id":"n6klpr55"}
4
+ {"time":"2025-05-28T19:40:34.254238342+08:00","level":"INFO","msg":"handler: started","stream_id":"n6klpr55"}
5
+ {"time":"2025-05-28T19:40:34.25424859+08:00","level":"INFO","msg":"writer: Do: started","stream_id":"n6klpr55"}
6
+ {"time":"2025-05-28T19:40:34.254251626+08:00","level":"INFO","msg":"sender: started","stream_id":"n6klpr55"}
7
+ {"time":"2025-05-28T19:40:34.580034427+08:00","level":"INFO","msg":"Starting system monitor"}
8
+ {"time":"2025-05-28T19:40:52.23245154+08:00","level":"INFO","msg":"Stopping system monitor"}
9
+ {"time":"2025-05-28T19:40:52.233229831+08:00","level":"INFO","msg":"Stopped system monitor"}
10
+ {"time":"2025-05-28T19:40:52.998922566+08:00","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"}
11
+ {"time":"2025-05-28T19:40:53.216254401+08:00","level":"INFO","msg":"handler: operation stats","stats":{}}
12
+ {"time":"2025-05-28T19:40:53.219276688+08:00","level":"INFO","msg":"stream: closing","id":"n6klpr55"}
13
+ {"time":"2025-05-28T19:40:53.219345645+08:00","level":"INFO","msg":"handler: closed","stream_id":"n6klpr55"}
14
+ {"time":"2025-05-28T19:40:53.219356998+08:00","level":"INFO","msg":"writer: Close: closed","stream_id":"n6klpr55"}
15
+ {"time":"2025-05-28T19:40:53.219365865+08:00","level":"INFO","msg":"sender: closed","stream_id":"n6klpr55"}
16
+ {"time":"2025-05-28T19:40:53.219815996+08:00","level":"INFO","msg":"stream: closed","id":"n6klpr55"}
wandb/run-20250528_194034-n6klpr55/logs/debug.log ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-05-28 19:40:34,032 INFO MainThread:459318 [wandb_setup.py:_flush():67] Current SDK version is 0.19.8
2
+ 2025-05-28 19:40:34,032 INFO MainThread:459318 [wandb_setup.py:_flush():67] Configure stats pid to 459318
3
+ 2025-05-28 19:40:34,032 INFO MainThread:459318 [wandb_setup.py:_flush():67] Loading settings from /home/hansirui_1st/.config/wandb/settings
4
+ 2025-05-28 19:40:34,032 INFO MainThread:459318 [wandb_setup.py:_flush():67] Loading settings from /home/hansirui_1st/jiayi/resist/setting3/scripts/wandb/settings
5
+ 2025-05-28 19:40:34,032 INFO MainThread:459318 [wandb_setup.py:_flush():67] Loading settings from environment variables
6
+ 2025-05-28 19:40:34,033 INFO MainThread:459318 [wandb_init.py:setup_run_log_directory():647] Logging user logs to /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500/wandb/run-20250528_194034-n6klpr55/logs/debug.log
7
+ 2025-05-28 19:40:34,033 INFO MainThread:459318 [wandb_init.py:setup_run_log_directory():648] Logging internal logs to /aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500/wandb/run-20250528_194034-n6klpr55/logs/debug-internal.log
8
+ 2025-05-28 19:40:34,033 INFO MainThread:459318 [wandb_init.py:init():761] calling init triggers
9
+ 2025-05-28 19:40:34,033 INFO MainThread:459318 [wandb_init.py:init():766] wandb.init called with sweep_config: {}
10
+ config: {'model_name_or_path': '/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k', 'max_length': 2048, 'trust_remote_code': True, 'train_datasets': [('inverse-json', {'proportion': 1.0, 'path': '/home/hansirui_1st/jiayi/resist/setting3/safety_data/training/unsafe/unsafe_500.json'})], 'eval_datasets': None, 'epochs': 1, 'per_device_train_batch_size': 4, 'per_device_eval_batch_size': 4, 'gradient_accumulation_steps': 8, 'gradient_checkpointing': True, 'lr': 1e-05, 'lr_scheduler_type': <SchedulerType.CONSTANT: 'constant'>, 'lr_warmup_ratio': 0.0, 'weight_decay': 0.0, 'seed': 42, 'fp16': False, 'bf16': True, 'tf32': True, 'eval_strategy': 'epoch', 'eval_interval': 1000000, 'need_eval': False, 'eval_split_ratio': None, 'output_dir': '/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500', 'log_type': 'wandb', 'log_dir': '/aifs4su/hansirui_1st/boyuan/resist/setting3-safety/Qwen1.5-0.5B/Qwen1.5-0.5B-s3-Q1-1k-Q2-500', 'log_project': 'Inverse_Alignment', 'log_run_name': 'qwen-0.5b-s3-Q1-1k-Q2-500', 'save_16bit': True, 'save_interval': 1000000, 'local_rank': 0, 'zero_stage': 3, 'offload': 'none', 'deepspeed': False, 'deepspeed_config': None, 'deepscale': False, 'deepscale_config': None, 'global_rank': 0, 'device': device(type='cuda', index=0), 'num_update_steps_per_epoch': 2, 'total_training_steps': 2, '_wandb': {}}
11
+ 2025-05-28 19:40:34,033 INFO MainThread:459318 [wandb_init.py:init():784] starting backend
12
+ 2025-05-28 19:40:34,033 INFO MainThread:459318 [wandb_init.py:init():788] sending inform_init request
13
+ 2025-05-28 19:40:34,037 INFO MainThread:459318 [backend.py:_multiprocessing_setup():101] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
14
+ 2025-05-28 19:40:34,037 INFO MainThread:459318 [wandb_init.py:init():798] backend started and connected
15
+ 2025-05-28 19:40:34,038 INFO MainThread:459318 [wandb_init.py:init():891] updated telemetry
16
+ 2025-05-28 19:40:34,038 INFO MainThread:459318 [wandb_init.py:init():915] communicating run to backend with 90.0 second timeout
17
+ 2025-05-28 19:40:34,575 INFO MainThread:459318 [wandb_init.py:init():990] starting run threads in backend
18
+ 2025-05-28 19:40:34,802 INFO MainThread:459318 [wandb_run.py:_console_start():2375] atexit reg
19
+ 2025-05-28 19:40:34,802 INFO MainThread:459318 [wandb_run.py:_redirect():2227] redirect: wrap_raw
20
+ 2025-05-28 19:40:34,802 INFO MainThread:459318 [wandb_run.py:_redirect():2292] Wrapping output streams.
21
+ 2025-05-28 19:40:34,803 INFO MainThread:459318 [wandb_run.py:_redirect():2315] Redirects installed.
22
+ 2025-05-28 19:40:34,805 INFO MainThread:459318 [wandb_init.py:init():1032] run started, returning control to user process
23
+ 2025-05-28 19:40:52,228 INFO MainThread:459318 [wandb_run.py:_finish():2112] finishing run xtom/Inverse_Alignment/n6klpr55
24
+ 2025-05-28 19:40:52,229 INFO MainThread:459318 [wandb_run.py:_atexit_cleanup():2340] got exitcode: 0
25
+ 2025-05-28 19:40:52,230 INFO MainThread:459318 [wandb_run.py:_restore():2322] restore
26
+ 2025-05-28 19:40:52,231 INFO MainThread:459318 [wandb_run.py:_restore():2328] restore done
27
+ 2025-05-28 19:40:53,217 INFO MainThread:459318 [wandb_run.py:_footer_history_summary_info():3956] rendering history
28
+ 2025-05-28 19:40:53,218 INFO MainThread:459318 [wandb_run.py:_footer_history_summary_info():3988] rendering summary
29
+ 2025-05-28 19:40:53,218 INFO MainThread:459318 [wandb_run.py:_footer_sync_info():3917] logging synced files
wandb/run-20250528_194034-n6klpr55/run-n6klpr55.wandb ADDED
Binary file (20.2 kB). View file