VERSIL91 commited on
Commit
7fe912c
·
verified ·
1 Parent(s): 5edc8a5

End of training

Browse files
README.md CHANGED
@@ -6,7 +6,7 @@ tags:
6
  - axolotl
7
  - generated_from_trainer
8
  model-index:
9
- - name: 118ed5ed-7122-4885-96b2-48d099cf26b6
10
  results: []
11
  ---
12
 
@@ -18,16 +18,12 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  axolotl version: `0.4.1`
20
  ```yaml
21
- accelerate_config:
22
- dynamo_backend: inductor
23
- mixed_precision: bf16
24
- num_machines: 1
25
- num_processes: auto
26
- use_cpu: false
27
  adapter: lora
28
  base_model: unsloth/mistral-7b
29
  bf16: auto
30
  chat_template: llama3
 
 
31
  dataset_prepared_path: null
32
  datasets:
33
  - data_files:
@@ -45,75 +41,88 @@ datasets:
45
  system_prompt: ''
46
  debug: null
47
  deepspeed: null
48
- device_map: auto
49
- early_stopping_patience: null
50
- eval_max_new_tokens: 128
51
- eval_table_size: null
52
- evals_per_epoch: 4
 
 
53
  flash_attention: false
54
  fp16: null
55
  fsdp: null
56
  fsdp_config: null
57
- gradient_accumulation_steps: 16
58
  gradient_checkpointing: true
59
- group_by_length: false
60
  hub_model_id: null
61
- hub_repo: null
62
  hub_strategy: checkpoint
63
  hub_token: null
64
- learning_rate: 0.0001
 
 
65
  local_rank: null
66
  logging_steps: 1
67
- lora_alpha: 16
68
  lora_dropout: 0.05
69
  lora_fan_in_fan_out: null
70
  lora_model_dir: null
71
- lora_r: 8
72
  lora_target_linear: true
73
  lora_target_modules:
74
  - q_proj
 
75
  - v_proj
 
 
 
 
76
  lr_scheduler: cosine
 
77
  max_memory:
78
  0: 70GiB
79
- max_steps: 100
80
- micro_batch_size: 2
81
  mlflow_experiment_name: /tmp/8b2f2e480f49b204_train_data.json
82
  model_type: AutoModelForCausalLM
83
- num_epochs: 1
84
- optimizer: adamw_bnb_8bit
 
 
 
 
85
  output_dir: miner_id_24
86
  pad_to_sequence_len: true
87
- quantization_config:
88
- llm_int8_enable_fp32_cpu_offload: true
89
- load_in_8bit: true
90
  resume_from_checkpoint: null
91
  s2_attention: null
92
  sample_packing: false
93
- saves_per_epoch: 4
94
- sequence_len: 512
 
95
  strict: false
96
  tf32: false
97
  tokenizer_type: AutoTokenizer
98
- torch_compile: true
99
  train_on_inputs: false
100
  trust_remote_code: true
101
- val_set_size: 0.05
102
- wandb_entity: null
103
- wandb_mode: online
104
- wandb_name: 118ed5ed-7122-4885-96b2-48d099cf26b6
105
- wandb_project: Gradients-On-Demand
106
- wandb_run: your_name
107
- wandb_runid: 118ed5ed-7122-4885-96b2-48d099cf26b6
108
- warmup_steps: 10
109
- weight_decay: 0.0
 
110
  xformers_attention: null
111
 
112
  ```
113
 
114
  </details><br>
115
 
116
- # 118ed5ed-7122-4885-96b2-48d099cf26b6
117
 
118
  This model is a fine-tuned version of [unsloth/mistral-7b](https://huggingface.co/unsloth/mistral-7b) on the None dataset.
119
  It achieves the following results on the evaluation set:
@@ -136,26 +145,28 @@ More information needed
136
  ### Training hyperparameters
137
 
138
  The following hyperparameters were used during training:
139
- - learning_rate: 0.0001
140
- - train_batch_size: 2
141
- - eval_batch_size: 2
142
  - seed: 42
143
- - gradient_accumulation_steps: 16
144
- - total_train_batch_size: 32
145
- - optimizer: Use OptimizerNames.ADAMW_BNB with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
 
 
 
146
  - lr_scheduler_type: cosine
147
- - lr_scheduler_warmup_steps: 10
148
- - training_steps: 100
149
 
150
  ### Training results
151
 
152
  | Training Loss | Epoch | Step | Validation Loss |
153
  |:-------------:|:------:|:----:|:---------------:|
154
- | 0.0 | 0.0007 | 1 | nan |
155
- | 0.0 | 0.0169 | 25 | nan |
156
- | 0.0 | 0.0338 | 50 | nan |
157
- | 0.0 | 0.0507 | 75 | nan |
158
- | 0.0 | 0.0676 | 100 | nan |
159
 
160
 
161
  ### Framework versions
 
6
  - axolotl
7
  - generated_from_trainer
8
  model-index:
9
+ - name: 68938adc-cdd4-4d29-81d8-8c7ffc6c6fc3
10
  results: []
11
  ---
12
 
 
18
 
19
  axolotl version: `0.4.1`
20
  ```yaml
 
 
 
 
 
 
21
  adapter: lora
22
  base_model: unsloth/mistral-7b
23
  bf16: auto
24
  chat_template: llama3
25
+ cosine_min_lr_ratio: 0.1
26
+ data_processes: 16
27
  dataset_prepared_path: null
28
  datasets:
29
  - data_files:
 
41
  system_prompt: ''
42
  debug: null
43
  deepspeed: null
44
+ device_map: '{'''':torch.cuda.current_device()}'
45
+ do_eval: true
46
+ early_stopping_patience: 1
47
+ eval_batch_size: 1
48
+ eval_sample_packing: false
49
+ eval_steps: 25
50
+ evaluation_strategy: steps
51
  flash_attention: false
52
  fp16: null
53
  fsdp: null
54
  fsdp_config: null
55
+ gradient_accumulation_steps: 32
56
  gradient_checkpointing: true
57
+ group_by_length: true
58
  hub_model_id: null
59
+ hub_repo: stevemonite
60
  hub_strategy: checkpoint
61
  hub_token: null
62
+ learning_rate: 0.0003
63
+ load_in_4bit: false
64
+ load_in_8bit: false
65
  local_rank: null
66
  logging_steps: 1
67
+ lora_alpha: 64
68
  lora_dropout: 0.05
69
  lora_fan_in_fan_out: null
70
  lora_model_dir: null
71
+ lora_r: 32
72
  lora_target_linear: true
73
  lora_target_modules:
74
  - q_proj
75
+ - k_proj
76
  - v_proj
77
+ - o_proj
78
+ - gate_proj
79
+ - down_proj
80
+ - up_proj
81
  lr_scheduler: cosine
82
+ max_grad_norm: 1.0
83
  max_memory:
84
  0: 70GiB
85
+ max_steps: 76
86
+ micro_batch_size: 1
87
  mlflow_experiment_name: /tmp/8b2f2e480f49b204_train_data.json
88
  model_type: AutoModelForCausalLM
89
+ num_epochs: 2
90
+ optim_args:
91
+ adam_beta1: 0.9
92
+ adam_beta2: 0.95
93
+ adam_epsilon: 1e-5
94
+ optimizer: adamw_torch
95
  output_dir: miner_id_24
96
  pad_to_sequence_len: true
 
 
 
97
  resume_from_checkpoint: null
98
  s2_attention: null
99
  sample_packing: false
100
+ save_steps: 50
101
+ save_strategy: steps
102
+ sequence_len: 2048
103
  strict: false
104
  tf32: false
105
  tokenizer_type: AutoTokenizer
106
+ torch_compile: false
107
  train_on_inputs: false
108
  trust_remote_code: true
109
+ val_set_size: 50
110
+ wandb_entity: sn56-miner
111
+ wandb_mode: disabled
112
+ wandb_name: sn56d1/6e0123f2
113
+ wandb_project: god
114
+ wandb_run: pvxz
115
+ wandb_runid: sn56d1/6e0123f2
116
+ warmup_raio: 0.03
117
+ warmup_ratio: 0.03
118
+ weight_decay: 0.01
119
  xformers_attention: null
120
 
121
  ```
122
 
123
  </details><br>
124
 
125
+ # 68938adc-cdd4-4d29-81d8-8c7ffc6c6fc3
126
 
127
  This model is a fine-tuned version of [unsloth/mistral-7b](https://huggingface.co/unsloth/mistral-7b) on the None dataset.
128
  It achieves the following results on the evaluation set:
 
145
  ### Training hyperparameters
146
 
147
  The following hyperparameters were used during training:
148
+ - learning_rate: 0.0003
149
+ - train_batch_size: 1
150
+ - eval_batch_size: 1
151
  - seed: 42
152
+ - distributed_type: multi-GPU
153
+ - num_devices: 4
154
+ - gradient_accumulation_steps: 32
155
+ - total_train_batch_size: 128
156
+ - total_eval_batch_size: 4
157
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=adam_beta1=0.9,adam_beta2=0.95,adam_epsilon=1e-5
158
  - lr_scheduler_type: cosine
159
+ - lr_scheduler_warmup_steps: 2
160
+ - training_steps: 76
161
 
162
  ### Training results
163
 
164
  | Training Loss | Epoch | Step | Validation Loss |
165
  |:-------------:|:------:|:----:|:---------------:|
166
+ | 0.0 | 0.0026 | 1 | nan |
167
+ | 0.0 | 0.0643 | 25 | nan |
168
+ | 0.0 | 0.1286 | 50 | nan |
169
+ | 0.0 | 0.1930 | 75 | nan |
 
170
 
171
 
172
  ### Framework versions
adapter_config.json CHANGED
@@ -10,23 +10,23 @@
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
- "lora_alpha": 16,
14
  "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
- "r": 8,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "q_proj",
24
- "o_proj",
25
- "v_proj",
26
- "down_proj",
27
  "k_proj",
 
28
  "up_proj",
29
- "gate_proj"
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
+ "lora_alpha": 64,
14
  "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
+ "r": 32,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "gate_proj",
 
 
 
24
  "k_proj",
25
+ "o_proj",
26
  "up_proj",
27
+ "q_proj",
28
+ "v_proj",
29
+ "down_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:585178d5250a9068d12faf659c980689a027ffee93b57f3d772b132f04394541
3
- size 84047370
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e1e22a7c24473ae2ce03415b098f515aeed520202a76c93116fabb734bf0b47
3
+ size 335706186
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ec79eaa4b64976106e2d19432a45f0ab1bec56041129a34042d5cd263efa55ec
3
- size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c696cab0a3d3a6350c331a2fa51b164096d09045d83c85832605ebb6c37a08a
3
+ size 335604696
last-checkpoint/adapter_config.json CHANGED
@@ -10,23 +10,23 @@
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
- "lora_alpha": 16,
14
  "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
- "r": 8,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "q_proj",
24
- "o_proj",
25
- "v_proj",
26
- "down_proj",
27
  "k_proj",
 
28
  "up_proj",
29
- "gate_proj"
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
+ "lora_alpha": 64,
14
  "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
+ "r": 32,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "gate_proj",
 
 
 
24
  "k_proj",
25
+ "o_proj",
26
  "up_proj",
27
+ "q_proj",
28
+ "v_proj",
29
+ "down_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ec79eaa4b64976106e2d19432a45f0ab1bec56041129a34042d5cd263efa55ec
3
- size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c696cab0a3d3a6350c331a2fa51b164096d09045d83c85832605ebb6c37a08a
3
+ size 335604696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:06ee945e16ae22c9075277531a6f8bc0ec708f7e7d0f7f18b4a9e5b5ec85eb27
3
- size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4045837df223652ae1807aca3bdda171754240535f50bc4560efaea4d8ea8d2b
3
+ size 671466706
last-checkpoint/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33b8177a44a181e058c6946682eb2e19c1ff617f6f261ed3c51a5c6f4afe7589
3
+ size 14960
last-checkpoint/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92b88ed4526213f4665a86c47810ec278c7eb6676845fd971c653281bb156b69
3
+ size 15024
last-checkpoint/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed74ec87b52d4f93bcb2e178d6d91b0136149eac291bb360c8bc1d5d9f955ebf
3
+ size 15024
last-checkpoint/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cd698697de34401eb1d1493fed0acbea22d6de9691b03adf380d3e3dda735ab
3
+ size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:49d60a69e2379be2053e816cbaff31e6c931b5922dd86c71c9eaf473299cbf62
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eff4e51807fc2798ef7e3176a7ae7a505f7b610c949cf8c1d65662116e17ea14
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,773 +1,416 @@
1
  {
2
- "best_metric": null,
3
- "best_model_checkpoint": null,
4
- "epoch": 0.06763897696047347,
5
  "eval_steps": 25,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.0006763897696047348,
13
  "grad_norm": NaN,
14
- "learning_rate": 1e-05,
15
  "loss": 0.0,
16
  "step": 1
17
  },
18
  {
19
- "epoch": 0.0006763897696047348,
20
  "eval_loss": NaN,
21
- "eval_runtime": 211.3876,
22
- "eval_samples_per_second": 11.779,
23
- "eval_steps_per_second": 5.89,
24
  "step": 1
25
  },
26
  {
27
- "epoch": 0.0013527795392094695,
28
  "grad_norm": NaN,
29
- "learning_rate": 2e-05,
30
  "loss": 0.0,
31
  "step": 2
32
  },
33
  {
34
- "epoch": 0.002029169308814204,
35
  "grad_norm": NaN,
36
- "learning_rate": 3e-05,
37
  "loss": 0.0,
38
  "step": 3
39
  },
40
  {
41
- "epoch": 0.002705559078418939,
42
  "grad_norm": NaN,
43
- "learning_rate": 4e-05,
44
  "loss": 0.0,
45
  "step": 4
46
  },
47
  {
48
- "epoch": 0.0033819488480236735,
49
  "grad_norm": NaN,
50
- "learning_rate": 5e-05,
51
  "loss": 0.0,
52
  "step": 5
53
  },
54
  {
55
- "epoch": 0.004058338617628408,
56
  "grad_norm": NaN,
57
- "learning_rate": 6e-05,
58
  "loss": 0.0,
59
  "step": 6
60
  },
61
  {
62
- "epoch": 0.004734728387233143,
63
  "grad_norm": NaN,
64
- "learning_rate": 7e-05,
65
  "loss": 0.0,
66
  "step": 7
67
  },
68
  {
69
- "epoch": 0.005411118156837878,
70
  "grad_norm": NaN,
71
- "learning_rate": 8e-05,
72
  "loss": 0.0,
73
  "step": 8
74
  },
75
  {
76
- "epoch": 0.0060875079264426125,
77
  "grad_norm": NaN,
78
- "learning_rate": 9e-05,
79
  "loss": 0.0,
80
  "step": 9
81
  },
82
  {
83
- "epoch": 0.006763897696047347,
84
  "grad_norm": NaN,
85
- "learning_rate": 0.0001,
86
  "loss": 0.0,
87
  "step": 10
88
  },
89
  {
90
- "epoch": 0.007440287465652082,
91
  "grad_norm": NaN,
92
- "learning_rate": 9.99695413509548e-05,
93
  "loss": 0.0,
94
  "step": 11
95
  },
96
  {
97
- "epoch": 0.008116677235256817,
98
  "grad_norm": NaN,
99
- "learning_rate": 9.987820251299122e-05,
100
  "loss": 0.0,
101
  "step": 12
102
  },
103
  {
104
- "epoch": 0.008793067004861552,
105
  "grad_norm": NaN,
106
- "learning_rate": 9.972609476841367e-05,
107
  "loss": 0.0,
108
  "step": 13
109
  },
110
  {
111
- "epoch": 0.009469456774466286,
112
  "grad_norm": NaN,
113
- "learning_rate": 9.951340343707852e-05,
114
  "loss": 0.0,
115
  "step": 14
116
  },
117
  {
118
- "epoch": 0.010145846544071021,
119
  "grad_norm": NaN,
120
- "learning_rate": 9.924038765061042e-05,
121
  "loss": 0.0,
122
  "step": 15
123
  },
124
  {
125
- "epoch": 0.010822236313675756,
126
  "grad_norm": NaN,
127
- "learning_rate": 9.890738003669029e-05,
128
  "loss": 0.0,
129
  "step": 16
130
  },
131
  {
132
- "epoch": 0.01149862608328049,
133
  "grad_norm": NaN,
134
- "learning_rate": 9.851478631379982e-05,
135
  "loss": 0.0,
136
  "step": 17
137
  },
138
  {
139
- "epoch": 0.012175015852885225,
140
  "grad_norm": NaN,
141
- "learning_rate": 9.806308479691595e-05,
142
  "loss": 0.0,
143
  "step": 18
144
  },
145
  {
146
- "epoch": 0.01285140562248996,
147
  "grad_norm": NaN,
148
- "learning_rate": 9.755282581475769e-05,
149
  "loss": 0.0,
150
  "step": 19
151
  },
152
  {
153
- "epoch": 0.013527795392094694,
154
  "grad_norm": NaN,
155
- "learning_rate": 9.698463103929542e-05,
156
  "loss": 0.0,
157
  "step": 20
158
  },
159
  {
160
- "epoch": 0.01420418516169943,
161
  "grad_norm": NaN,
162
- "learning_rate": 9.635919272833938e-05,
163
  "loss": 0.0,
164
  "step": 21
165
  },
166
  {
167
- "epoch": 0.014880574931304165,
168
  "grad_norm": NaN,
169
- "learning_rate": 9.567727288213005e-05,
170
  "loss": 0.0,
171
  "step": 22
172
  },
173
  {
174
- "epoch": 0.015556964700908898,
175
  "grad_norm": NaN,
176
- "learning_rate": 9.493970231495835e-05,
177
  "loss": 0.0,
178
  "step": 23
179
  },
180
  {
181
- "epoch": 0.016233354470513633,
182
  "grad_norm": NaN,
183
- "learning_rate": 9.414737964294636e-05,
184
  "loss": 0.0,
185
  "step": 24
186
  },
187
  {
188
- "epoch": 0.016909744240118367,
189
  "grad_norm": NaN,
190
- "learning_rate": 9.330127018922194e-05,
191
  "loss": 0.0,
192
  "step": 25
193
  },
194
  {
195
- "epoch": 0.016909744240118367,
196
  "eval_loss": NaN,
197
- "eval_runtime": 94.1534,
198
- "eval_samples_per_second": 26.446,
199
- "eval_steps_per_second": 13.223,
200
  "step": 25
201
  },
202
  {
203
- "epoch": 0.017586134009723104,
204
  "grad_norm": NaN,
205
- "learning_rate": 9.24024048078213e-05,
206
  "loss": 0.0,
207
  "step": 26
208
  },
209
  {
210
- "epoch": 0.018262523779327838,
211
  "grad_norm": NaN,
212
- "learning_rate": 9.145187862775209e-05,
213
  "loss": 0.0,
214
  "step": 27
215
  },
216
  {
217
- "epoch": 0.01893891354893257,
218
  "grad_norm": NaN,
219
- "learning_rate": 9.045084971874738e-05,
220
  "loss": 0.0,
221
  "step": 28
222
  },
223
  {
224
- "epoch": 0.019615303318537308,
225
  "grad_norm": NaN,
226
- "learning_rate": 8.940053768033609e-05,
227
  "loss": 0.0,
228
  "step": 29
229
  },
230
  {
231
- "epoch": 0.020291693088142042,
232
  "grad_norm": NaN,
233
- "learning_rate": 8.83022221559489e-05,
234
  "loss": 0.0,
235
  "step": 30
236
  },
237
  {
238
- "epoch": 0.020968082857746775,
239
  "grad_norm": NaN,
240
- "learning_rate": 8.715724127386972e-05,
241
  "loss": 0.0,
242
  "step": 31
243
  },
244
  {
245
- "epoch": 0.021644472627351512,
246
  "grad_norm": NaN,
247
- "learning_rate": 8.596699001693255e-05,
248
  "loss": 0.0,
249
  "step": 32
250
  },
251
  {
252
- "epoch": 0.022320862396956246,
253
  "grad_norm": NaN,
254
- "learning_rate": 8.473291852294987e-05,
255
  "loss": 0.0,
256
  "step": 33
257
  },
258
  {
259
- "epoch": 0.02299725216656098,
260
  "grad_norm": NaN,
261
- "learning_rate": 8.345653031794292e-05,
262
  "loss": 0.0,
263
  "step": 34
264
  },
265
  {
266
- "epoch": 0.023673641936165717,
267
  "grad_norm": NaN,
268
- "learning_rate": 8.213938048432697e-05,
269
  "loss": 0.0,
270
  "step": 35
271
  },
272
  {
273
- "epoch": 0.02435003170577045,
274
  "grad_norm": NaN,
275
- "learning_rate": 8.07830737662829e-05,
276
  "loss": 0.0,
277
  "step": 36
278
  },
279
  {
280
- "epoch": 0.025026421475375184,
281
  "grad_norm": NaN,
282
- "learning_rate": 7.938926261462366e-05,
283
  "loss": 0.0,
284
  "step": 37
285
  },
286
  {
287
- "epoch": 0.02570281124497992,
288
  "grad_norm": NaN,
289
- "learning_rate": 7.795964517353735e-05,
290
  "loss": 0.0,
291
  "step": 38
292
  },
293
  {
294
- "epoch": 0.026379201014584654,
295
  "grad_norm": NaN,
296
- "learning_rate": 7.649596321166024e-05,
297
  "loss": 0.0,
298
  "step": 39
299
  },
300
  {
301
- "epoch": 0.027055590784189388,
302
  "grad_norm": NaN,
303
- "learning_rate": 7.500000000000001e-05,
304
  "loss": 0.0,
305
  "step": 40
306
  },
307
  {
308
- "epoch": 0.027731980553794125,
309
  "grad_norm": NaN,
310
- "learning_rate": 7.347357813929454e-05,
311
  "loss": 0.0,
312
  "step": 41
313
  },
314
  {
315
- "epoch": 0.02840837032339886,
316
  "grad_norm": NaN,
317
- "learning_rate": 7.191855733945387e-05,
318
  "loss": 0.0,
319
  "step": 42
320
  },
321
  {
322
- "epoch": 0.029084760093003592,
323
  "grad_norm": NaN,
324
- "learning_rate": 7.033683215379002e-05,
325
  "loss": 0.0,
326
  "step": 43
327
  },
328
  {
329
- "epoch": 0.02976114986260833,
330
  "grad_norm": NaN,
331
- "learning_rate": 6.873032967079561e-05,
332
  "loss": 0.0,
333
  "step": 44
334
  },
335
  {
336
- "epoch": 0.030437539632213063,
337
  "grad_norm": NaN,
338
- "learning_rate": 6.710100716628344e-05,
339
  "loss": 0.0,
340
  "step": 45
341
  },
342
  {
343
- "epoch": 0.031113929401817796,
344
  "grad_norm": NaN,
345
- "learning_rate": 6.545084971874738e-05,
346
  "loss": 0.0,
347
  "step": 46
348
  },
349
  {
350
- "epoch": 0.03179031917142253,
351
  "grad_norm": NaN,
352
- "learning_rate": 6.378186779084995e-05,
353
  "loss": 0.0,
354
  "step": 47
355
  },
356
  {
357
- "epoch": 0.03246670894102727,
358
  "grad_norm": NaN,
359
- "learning_rate": 6.209609477998338e-05,
360
  "loss": 0.0,
361
  "step": 48
362
  },
363
  {
364
- "epoch": 0.033143098710632,
365
  "grad_norm": NaN,
366
- "learning_rate": 6.0395584540887963e-05,
367
  "loss": 0.0,
368
  "step": 49
369
  },
370
  {
371
- "epoch": 0.033819488480236734,
372
  "grad_norm": NaN,
373
- "learning_rate": 5.868240888334653e-05,
374
  "loss": 0.0,
375
  "step": 50
376
  },
377
  {
378
- "epoch": 0.033819488480236734,
379
  "eval_loss": NaN,
380
- "eval_runtime": 94.0007,
381
- "eval_samples_per_second": 26.489,
382
- "eval_steps_per_second": 13.245,
383
  "step": 50
384
- },
385
- {
386
- "epoch": 0.034495878249841475,
387
- "grad_norm": NaN,
388
- "learning_rate": 5.695865504800327e-05,
389
- "loss": 0.0,
390
- "step": 51
391
- },
392
- {
393
- "epoch": 0.03517226801944621,
394
- "grad_norm": NaN,
395
- "learning_rate": 5.522642316338268e-05,
396
- "loss": 0.0,
397
- "step": 52
398
- },
399
- {
400
- "epoch": 0.03584865778905094,
401
- "grad_norm": NaN,
402
- "learning_rate": 5.348782368720626e-05,
403
- "loss": 0.0,
404
- "step": 53
405
- },
406
- {
407
- "epoch": 0.036525047558655675,
408
- "grad_norm": NaN,
409
- "learning_rate": 5.174497483512506e-05,
410
- "loss": 0.0,
411
- "step": 54
412
- },
413
- {
414
- "epoch": 0.03720143732826041,
415
- "grad_norm": NaN,
416
- "learning_rate": 5e-05,
417
- "loss": 0.0,
418
- "step": 55
419
- },
420
- {
421
- "epoch": 0.03787782709786514,
422
- "grad_norm": NaN,
423
- "learning_rate": 4.825502516487497e-05,
424
- "loss": 0.0,
425
- "step": 56
426
- },
427
- {
428
- "epoch": 0.03855421686746988,
429
- "grad_norm": NaN,
430
- "learning_rate": 4.6512176312793736e-05,
431
- "loss": 0.0,
432
- "step": 57
433
- },
434
- {
435
- "epoch": 0.039230606637074616,
436
- "grad_norm": NaN,
437
- "learning_rate": 4.477357683661734e-05,
438
- "loss": 0.0,
439
- "step": 58
440
- },
441
- {
442
- "epoch": 0.03990699640667935,
443
- "grad_norm": NaN,
444
- "learning_rate": 4.3041344951996746e-05,
445
- "loss": 0.0,
446
- "step": 59
447
- },
448
- {
449
- "epoch": 0.040583386176284084,
450
- "grad_norm": NaN,
451
- "learning_rate": 4.131759111665349e-05,
452
- "loss": 0.0,
453
- "step": 60
454
- },
455
- {
456
- "epoch": 0.04125977594588882,
457
- "grad_norm": NaN,
458
- "learning_rate": 3.960441545911204e-05,
459
- "loss": 0.0,
460
- "step": 61
461
- },
462
- {
463
- "epoch": 0.04193616571549355,
464
- "grad_norm": NaN,
465
- "learning_rate": 3.790390522001662e-05,
466
- "loss": 0.0,
467
- "step": 62
468
- },
469
- {
470
- "epoch": 0.04261255548509829,
471
- "grad_norm": NaN,
472
- "learning_rate": 3.6218132209150045e-05,
473
- "loss": 0.0,
474
- "step": 63
475
- },
476
- {
477
- "epoch": 0.043288945254703025,
478
- "grad_norm": NaN,
479
- "learning_rate": 3.4549150281252636e-05,
480
- "loss": 0.0,
481
- "step": 64
482
- },
483
- {
484
- "epoch": 0.04396533502430776,
485
- "grad_norm": NaN,
486
- "learning_rate": 3.289899283371657e-05,
487
- "loss": 0.0,
488
- "step": 65
489
- },
490
- {
491
- "epoch": 0.04464172479391249,
492
- "grad_norm": NaN,
493
- "learning_rate": 3.12696703292044e-05,
494
- "loss": 0.0,
495
- "step": 66
496
- },
497
- {
498
- "epoch": 0.045318114563517226,
499
- "grad_norm": NaN,
500
- "learning_rate": 2.9663167846209998e-05,
501
- "loss": 0.0,
502
- "step": 67
503
- },
504
- {
505
- "epoch": 0.04599450433312196,
506
- "grad_norm": NaN,
507
- "learning_rate": 2.8081442660546125e-05,
508
- "loss": 0.0,
509
- "step": 68
510
- },
511
- {
512
- "epoch": 0.0466708941027267,
513
- "grad_norm": NaN,
514
- "learning_rate": 2.6526421860705473e-05,
515
- "loss": 0.0,
516
- "step": 69
517
- },
518
- {
519
- "epoch": 0.04734728387233143,
520
- "grad_norm": NaN,
521
- "learning_rate": 2.500000000000001e-05,
522
- "loss": 0.0,
523
- "step": 70
524
- },
525
- {
526
- "epoch": 0.04802367364193617,
527
- "grad_norm": NaN,
528
- "learning_rate": 2.350403678833976e-05,
529
- "loss": 0.0,
530
- "step": 71
531
- },
532
- {
533
- "epoch": 0.0487000634115409,
534
- "grad_norm": NaN,
535
- "learning_rate": 2.2040354826462668e-05,
536
- "loss": 0.0,
537
- "step": 72
538
- },
539
- {
540
- "epoch": 0.049376453181145634,
541
- "grad_norm": NaN,
542
- "learning_rate": 2.061073738537635e-05,
543
- "loss": 0.0,
544
- "step": 73
545
- },
546
- {
547
- "epoch": 0.05005284295075037,
548
- "grad_norm": NaN,
549
- "learning_rate": 1.9216926233717085e-05,
550
- "loss": 0.0,
551
- "step": 74
552
- },
553
- {
554
- "epoch": 0.05072923272035511,
555
- "grad_norm": NaN,
556
- "learning_rate": 1.7860619515673033e-05,
557
- "loss": 0.0,
558
- "step": 75
559
- },
560
- {
561
- "epoch": 0.05072923272035511,
562
- "eval_loss": NaN,
563
- "eval_runtime": 104.1703,
564
- "eval_samples_per_second": 23.903,
565
- "eval_steps_per_second": 11.952,
566
- "step": 75
567
- },
568
- {
569
- "epoch": 0.05140562248995984,
570
- "grad_norm": NaN,
571
- "learning_rate": 1.6543469682057106e-05,
572
- "loss": 0.0,
573
- "step": 76
574
- },
575
- {
576
- "epoch": 0.052082012259564575,
577
- "grad_norm": NaN,
578
- "learning_rate": 1.526708147705013e-05,
579
- "loss": 0.0,
580
- "step": 77
581
- },
582
- {
583
- "epoch": 0.05275840202916931,
584
- "grad_norm": NaN,
585
- "learning_rate": 1.4033009983067452e-05,
586
- "loss": 0.0,
587
- "step": 78
588
- },
589
- {
590
- "epoch": 0.05343479179877404,
591
- "grad_norm": NaN,
592
- "learning_rate": 1.2842758726130283e-05,
593
- "loss": 0.0,
594
- "step": 79
595
- },
596
- {
597
- "epoch": 0.054111181568378776,
598
- "grad_norm": NaN,
599
- "learning_rate": 1.1697777844051105e-05,
600
- "loss": 0.0,
601
- "step": 80
602
- },
603
- {
604
- "epoch": 0.054787571337983516,
605
- "grad_norm": NaN,
606
- "learning_rate": 1.0599462319663905e-05,
607
- "loss": 0.0,
608
- "step": 81
609
- },
610
- {
611
- "epoch": 0.05546396110758825,
612
- "grad_norm": NaN,
613
- "learning_rate": 9.549150281252633e-06,
614
- "loss": 0.0,
615
- "step": 82
616
- },
617
- {
618
- "epoch": 0.056140350877192984,
619
- "grad_norm": NaN,
620
- "learning_rate": 8.548121372247918e-06,
621
- "loss": 0.0,
622
- "step": 83
623
- },
624
- {
625
- "epoch": 0.05681674064679772,
626
- "grad_norm": NaN,
627
- "learning_rate": 7.597595192178702e-06,
628
- "loss": 0.0,
629
- "step": 84
630
- },
631
- {
632
- "epoch": 0.05749313041640245,
633
- "grad_norm": NaN,
634
- "learning_rate": 6.698729810778065e-06,
635
- "loss": 0.0,
636
- "step": 85
637
- },
638
- {
639
- "epoch": 0.058169520186007184,
640
- "grad_norm": NaN,
641
- "learning_rate": 5.852620357053651e-06,
642
- "loss": 0.0,
643
- "step": 86
644
- },
645
- {
646
- "epoch": 0.058845909955611925,
647
- "grad_norm": NaN,
648
- "learning_rate": 5.060297685041659e-06,
649
- "loss": 0.0,
650
- "step": 87
651
- },
652
- {
653
- "epoch": 0.05952229972521666,
654
- "grad_norm": NaN,
655
- "learning_rate": 4.322727117869951e-06,
656
- "loss": 0.0,
657
- "step": 88
658
- },
659
- {
660
- "epoch": 0.06019868949482139,
661
- "grad_norm": NaN,
662
- "learning_rate": 3.6408072716606346e-06,
663
- "loss": 0.0,
664
- "step": 89
665
- },
666
- {
667
- "epoch": 0.060875079264426125,
668
- "grad_norm": NaN,
669
- "learning_rate": 3.0153689607045845e-06,
670
- "loss": 0.0,
671
- "step": 90
672
- },
673
- {
674
- "epoch": 0.06155146903403086,
675
- "grad_norm": NaN,
676
- "learning_rate": 2.4471741852423237e-06,
677
- "loss": 0.0,
678
- "step": 91
679
- },
680
- {
681
- "epoch": 0.06222785880363559,
682
- "grad_norm": NaN,
683
- "learning_rate": 1.9369152030840556e-06,
684
- "loss": 0.0,
685
- "step": 92
686
- },
687
- {
688
- "epoch": 0.06290424857324033,
689
- "grad_norm": NaN,
690
- "learning_rate": 1.4852136862001764e-06,
691
- "loss": 0.0,
692
- "step": 93
693
- },
694
- {
695
- "epoch": 0.06358063834284507,
696
- "grad_norm": NaN,
697
- "learning_rate": 1.0926199633097157e-06,
698
- "loss": 0.0,
699
- "step": 94
700
- },
701
- {
702
- "epoch": 0.0642570281124498,
703
- "grad_norm": NaN,
704
- "learning_rate": 7.596123493895991e-07,
705
- "loss": 0.0,
706
- "step": 95
707
- },
708
- {
709
- "epoch": 0.06493341788205453,
710
- "grad_norm": NaN,
711
- "learning_rate": 4.865965629214819e-07,
712
- "loss": 0.0,
713
- "step": 96
714
- },
715
- {
716
- "epoch": 0.06560980765165927,
717
- "grad_norm": NaN,
718
- "learning_rate": 2.7390523158633554e-07,
719
- "loss": 0.0,
720
- "step": 97
721
- },
722
- {
723
- "epoch": 0.066286197421264,
724
- "grad_norm": NaN,
725
- "learning_rate": 1.2179748700879012e-07,
726
- "loss": 0.0,
727
- "step": 98
728
- },
729
- {
730
- "epoch": 0.06696258719086874,
731
- "grad_norm": NaN,
732
- "learning_rate": 3.04586490452119e-08,
733
- "loss": 0.0,
734
- "step": 99
735
- },
736
- {
737
- "epoch": 0.06763897696047347,
738
- "grad_norm": NaN,
739
- "learning_rate": 0.0,
740
- "loss": 0.0,
741
- "step": 100
742
- },
743
- {
744
- "epoch": 0.06763897696047347,
745
- "eval_loss": NaN,
746
- "eval_runtime": 195.2703,
747
- "eval_samples_per_second": 12.752,
748
- "eval_steps_per_second": 6.376,
749
- "step": 100
750
  }
751
  ],
752
  "logging_steps": 1,
753
- "max_steps": 100,
754
  "num_input_tokens_seen": 0,
755
  "num_train_epochs": 1,
756
- "save_steps": 25,
757
  "stateful_callbacks": {
 
 
 
 
 
 
 
 
 
758
  "TrainerControl": {
759
  "args": {
760
  "should_epoch_stop": false,
761
  "should_evaluate": false,
762
  "should_log": false,
763
  "should_save": true,
764
- "should_training_stop": true
765
  },
766
  "attributes": {}
767
  }
768
  },
769
- "total_flos": 1.402135828758528e+17,
770
- "train_batch_size": 2,
771
  "trial_name": null,
772
  "trial_params": null
773
  }
 
1
  {
2
+ "best_metric": NaN,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 0.12863804470172052,
5
  "eval_steps": 25,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.0025727608940344106,
13
  "grad_norm": NaN,
14
+ "learning_rate": 0.00015,
15
  "loss": 0.0,
16
  "step": 1
17
  },
18
  {
19
+ "epoch": 0.0025727608940344106,
20
  "eval_loss": NaN,
21
+ "eval_runtime": 1.4827,
22
+ "eval_samples_per_second": 33.722,
23
+ "eval_steps_per_second": 8.768,
24
  "step": 1
25
  },
26
  {
27
+ "epoch": 0.005145521788068821,
28
  "grad_norm": NaN,
29
+ "learning_rate": 0.0003,
30
  "loss": 0.0,
31
  "step": 2
32
  },
33
  {
34
+ "epoch": 0.007718282682103232,
35
  "grad_norm": NaN,
36
+ "learning_rate": 0.000299878360437632,
37
  "loss": 0.0,
38
  "step": 3
39
  },
40
  {
41
+ "epoch": 0.010291043576137643,
42
  "grad_norm": NaN,
43
+ "learning_rate": 0.00029951366095324104,
44
  "loss": 0.0,
45
  "step": 4
46
  },
47
  {
48
+ "epoch": 0.012863804470172054,
49
  "grad_norm": NaN,
50
+ "learning_rate": 0.00029890655875994835,
51
  "loss": 0.0,
52
  "step": 5
53
  },
54
  {
55
+ "epoch": 0.015436565364206465,
56
  "grad_norm": NaN,
57
+ "learning_rate": 0.0002980581478969406,
58
  "loss": 0.0,
59
  "step": 6
60
  },
61
  {
62
+ "epoch": 0.018009326258240876,
63
  "grad_norm": NaN,
64
+ "learning_rate": 0.00029696995725793764,
65
  "loss": 0.0,
66
  "step": 7
67
  },
68
  {
69
+ "epoch": 0.020582087152275285,
70
  "grad_norm": NaN,
71
+ "learning_rate": 0.00029564394783602234,
72
  "loss": 0.0,
73
  "step": 8
74
  },
75
  {
76
+ "epoch": 0.023154848046309694,
77
  "grad_norm": NaN,
78
+ "learning_rate": 0.0002940825091897988,
79
  "loss": 0.0,
80
  "step": 9
81
  },
82
  {
83
+ "epoch": 0.025727608940344107,
84
  "grad_norm": NaN,
85
+ "learning_rate": 0.00029228845513724634,
86
  "loss": 0.0,
87
  "step": 10
88
  },
89
  {
90
+ "epoch": 0.028300369834378516,
91
  "grad_norm": NaN,
92
+ "learning_rate": 0.00029026501868502873,
93
  "loss": 0.0,
94
  "step": 11
95
  },
96
  {
97
+ "epoch": 0.03087313072841293,
98
  "grad_norm": NaN,
99
+ "learning_rate": 0.0002880158462023983,
100
  "loss": 0.0,
101
  "step": 12
102
  },
103
  {
104
+ "epoch": 0.03344589162244734,
105
  "grad_norm": NaN,
106
+ "learning_rate": 0.0002855449908501917,
107
  "loss": 0.0,
108
  "step": 13
109
  },
110
  {
111
+ "epoch": 0.03601865251648175,
112
  "grad_norm": NaN,
113
+ "learning_rate": 0.00028285690527676035,
114
  "loss": 0.0,
115
  "step": 14
116
  },
117
  {
118
+ "epoch": 0.03859141341051616,
119
  "grad_norm": NaN,
120
+ "learning_rate": 0.000279956433593997,
121
  "loss": 0.0,
122
  "step": 15
123
  },
124
  {
125
+ "epoch": 0.04116417430455057,
126
  "grad_norm": NaN,
127
+ "learning_rate": 0.00027684880264791867,
128
  "loss": 0.0,
129
  "step": 16
130
  },
131
  {
132
+ "epoch": 0.04373693519858498,
133
  "grad_norm": NaN,
134
+ "learning_rate": 0.00027353961259953696,
135
  "loss": 0.0,
136
  "step": 17
137
  },
138
  {
139
+ "epoch": 0.04630969609261939,
140
  "grad_norm": NaN,
141
+ "learning_rate": 0.00027003482683298933,
142
  "loss": 0.0,
143
  "step": 18
144
  },
145
  {
146
+ "epoch": 0.048882456986653805,
147
  "grad_norm": NaN,
148
+ "learning_rate": 0.00026634076120911777,
149
  "loss": 0.0,
150
  "step": 19
151
  },
152
  {
153
+ "epoch": 0.051455217880688214,
154
  "grad_norm": NaN,
155
+ "learning_rate": 0.0002624640726838608,
156
  "loss": 0.0,
157
  "step": 20
158
  },
159
  {
160
+ "epoch": 0.054027978774722624,
161
  "grad_norm": NaN,
162
+ "learning_rate": 0.00025841174731196877,
163
  "loss": 0.0,
164
  "step": 21
165
  },
166
  {
167
+ "epoch": 0.05660073966875703,
168
  "grad_norm": NaN,
169
+ "learning_rate": 0.000254191087657661,
170
  "loss": 0.0,
171
  "step": 22
172
  },
173
  {
174
+ "epoch": 0.05917350056279144,
175
  "grad_norm": NaN,
176
+ "learning_rate": 0.0002498096996349117,
177
  "loss": 0.0,
178
  "step": 23
179
  },
180
  {
181
+ "epoch": 0.06174626145682586,
182
  "grad_norm": NaN,
183
+ "learning_rate": 0.0002452754788010787,
184
  "loss": 0.0,
185
  "step": 24
186
  },
187
  {
188
+ "epoch": 0.06431902235086026,
189
  "grad_norm": NaN,
190
+ "learning_rate": 0.00024059659612857536,
191
  "loss": 0.0,
192
  "step": 25
193
  },
194
  {
195
+ "epoch": 0.06431902235086026,
196
  "eval_loss": NaN,
197
+ "eval_runtime": 1.4841,
198
+ "eval_samples_per_second": 33.691,
199
+ "eval_steps_per_second": 8.76,
200
  "step": 25
201
  },
202
  {
203
+ "epoch": 0.06689178324489468,
204
  "grad_norm": NaN,
205
+ "learning_rate": 0.00023578148328022626,
206
  "loss": 0.0,
207
  "step": 26
208
  },
209
  {
210
+ "epoch": 0.0694645441389291,
211
  "grad_norm": NaN,
212
+ "learning_rate": 0.00023083881741484068,
213
  "loss": 0.0,
214
  "step": 27
215
  },
216
  {
217
+ "epoch": 0.0720373050329635,
218
  "grad_norm": NaN,
219
+ "learning_rate": 0.00022577750555038587,
220
  "loss": 0.0,
221
  "step": 28
222
  },
223
  {
224
+ "epoch": 0.07461006592699791,
225
  "grad_norm": NaN,
226
+ "learning_rate": 0.000220606668512939,
227
  "loss": 0.0,
228
  "step": 29
229
  },
230
  {
231
+ "epoch": 0.07718282682103232,
232
  "grad_norm": NaN,
233
+ "learning_rate": 0.00021533562450034164,
234
  "loss": 0.0,
235
  "step": 30
236
  },
237
  {
238
+ "epoch": 0.07975558771506673,
239
  "grad_norm": NaN,
240
+ "learning_rate": 0.00020997387229017774,
241
  "loss": 0.0,
242
  "step": 31
243
  },
244
  {
245
+ "epoch": 0.08232834860910114,
246
  "grad_norm": NaN,
247
+ "learning_rate": 0.00020453107412233428,
248
  "loss": 0.0,
249
  "step": 32
250
  },
251
  {
252
+ "epoch": 0.08490110950313555,
253
  "grad_norm": NaN,
254
+ "learning_rate": 0.0001990170382869919,
255
  "loss": 0.0,
256
  "step": 33
257
  },
258
  {
259
+ "epoch": 0.08747387039716996,
260
  "grad_norm": NaN,
261
+ "learning_rate": 0.00019344170144942302,
262
  "loss": 0.0,
263
  "step": 34
264
  },
265
  {
266
+ "epoch": 0.09004663129120437,
267
  "grad_norm": NaN,
268
+ "learning_rate": 0.00018781511074344962,
269
  "loss": 0.0,
270
  "step": 35
271
  },
272
  {
273
+ "epoch": 0.09261939218523878,
274
  "grad_norm": NaN,
275
+ "learning_rate": 0.0001821474056658286,
276
  "loss": 0.0,
277
  "step": 36
278
  },
279
  {
280
+ "epoch": 0.0951921530792732,
281
  "grad_norm": NaN,
282
+ "learning_rate": 0.00017644879980419374,
283
  "loss": 0.0,
284
  "step": 37
285
  },
286
  {
287
+ "epoch": 0.09776491397330761,
288
  "grad_norm": NaN,
289
+ "learning_rate": 0.00017072956243148002,
290
  "loss": 0.0,
291
  "step": 38
292
  },
293
  {
294
+ "epoch": 0.10033767486734202,
295
  "grad_norm": NaN,
296
+ "learning_rate": 0.000165,
297
  "loss": 0.0,
298
  "step": 39
299
  },
300
  {
301
+ "epoch": 0.10291043576137643,
302
  "grad_norm": NaN,
303
+ "learning_rate": 0.00015927043756852,
304
  "loss": 0.0,
305
  "step": 40
306
  },
307
  {
308
+ "epoch": 0.10548319665541084,
309
  "grad_norm": NaN,
310
+ "learning_rate": 0.0001535512001958063,
311
  "loss": 0.0,
312
  "step": 41
313
  },
314
  {
315
+ "epoch": 0.10805595754944525,
316
  "grad_norm": NaN,
317
+ "learning_rate": 0.00014785259433417133,
318
  "loss": 0.0,
319
  "step": 42
320
  },
321
  {
322
+ "epoch": 0.11062871844347966,
323
  "grad_norm": NaN,
324
+ "learning_rate": 0.00014218488925655037,
325
  "loss": 0.0,
326
  "step": 43
327
  },
328
  {
329
+ "epoch": 0.11320147933751407,
330
  "grad_norm": NaN,
331
+ "learning_rate": 0.00013655829855057698,
332
  "loss": 0.0,
333
  "step": 44
334
  },
335
  {
336
+ "epoch": 0.11577424023154848,
337
  "grad_norm": NaN,
338
+ "learning_rate": 0.00013098296171300814,
339
  "loss": 0.0,
340
  "step": 45
341
  },
342
  {
343
+ "epoch": 0.11834700112558288,
344
  "grad_norm": NaN,
345
+ "learning_rate": 0.0001254689258776657,
346
  "loss": 0.0,
347
  "step": 46
348
  },
349
  {
350
+ "epoch": 0.12091976201961731,
351
  "grad_norm": NaN,
352
+ "learning_rate": 0.00012002612770982222,
353
  "loss": 0.0,
354
  "step": 47
355
  },
356
  {
357
+ "epoch": 0.12349252291365172,
358
  "grad_norm": NaN,
359
+ "learning_rate": 0.00011466437549965834,
360
  "loss": 0.0,
361
  "step": 48
362
  },
363
  {
364
+ "epoch": 0.1260652838076861,
365
  "grad_norm": NaN,
366
+ "learning_rate": 0.00010939333148706099,
367
  "loss": 0.0,
368
  "step": 49
369
  },
370
  {
371
+ "epoch": 0.12863804470172052,
372
  "grad_norm": NaN,
373
+ "learning_rate": 0.00010422249444961407,
374
  "loss": 0.0,
375
  "step": 50
376
  },
377
  {
378
+ "epoch": 0.12863804470172052,
379
  "eval_loss": NaN,
380
+ "eval_runtime": 1.483,
381
+ "eval_samples_per_second": 33.716,
382
+ "eval_steps_per_second": 8.766,
383
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384
  }
385
  ],
386
  "logging_steps": 1,
387
+ "max_steps": 76,
388
  "num_input_tokens_seen": 0,
389
  "num_train_epochs": 1,
390
+ "save_steps": 50,
391
  "stateful_callbacks": {
392
+ "EarlyStoppingCallback": {
393
+ "args": {
394
+ "early_stopping_patience": 1,
395
+ "early_stopping_threshold": 0.0
396
+ },
397
+ "attributes": {
398
+ "early_stopping_patience_counter": 0
399
+ }
400
+ },
401
  "TrainerControl": {
402
  "args": {
403
  "should_epoch_stop": false,
404
  "should_evaluate": false,
405
  "should_log": false,
406
  "should_save": true,
407
+ "should_training_stop": false
408
  },
409
  "attributes": {}
410
  }
411
  },
412
+ "total_flos": 5.6659779262611456e+17,
413
+ "train_batch_size": 1,
414
  "trial_name": null,
415
  "trial_params": null
416
  }
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7e8a6cbf1eff2ed6abcb6caa32099c42e7adfb184edf43841908c9d3350c9ec0
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:012696984907ada12377abc123ba0f028211273cc21153dc2137425ed4139c0a
3
  size 6776
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7e8a6cbf1eff2ed6abcb6caa32099c42e7adfb184edf43841908c9d3350c9ec0
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:012696984907ada12377abc123ba0f028211273cc21153dc2137425ed4139c0a
3
  size 6776