Yukang commited on
Commit
643d511
·
verified ·
1 Parent(s): f2765ac

Model save

Browse files
Files changed (4) hide show
  1. README.md +2 -4
  2. all_results.json +4 -4
  3. train_results.json +4 -4
  4. trainer_state.json +37 -9
README.md CHANGED
@@ -1,11 +1,9 @@
1
  ---
2
  base_model: Qwen/Qwen2.5-3B-Instruct
3
- datasets: open-r1/verifiable-coding-problems-python
4
  library_name: transformers
5
  model_name: Qwen2.5-3B-Open-R1-Code-GRPO
6
  tags:
7
  - generated_from_trainer
8
- - open-r1
9
  - trl
10
  - grpo
11
  licence: license
@@ -13,7 +11,7 @@ licence: license
13
 
14
  # Model Card for Qwen2.5-3B-Open-R1-Code-GRPO
15
 
16
- This model is a fine-tuned version of [Qwen/Qwen2.5-3B-Instruct](https://huggingface.co/Qwen/Qwen2.5-3B-Instruct) on the [open-r1/verifiable-coding-problems-python](https://huggingface.co/datasets/open-r1/verifiable-coding-problems-python) dataset.
17
  It has been trained using [TRL](https://github.com/huggingface/trl).
18
 
19
  ## Quick start
@@ -29,7 +27,7 @@ print(output["generated_text"])
29
 
30
  ## Training procedure
31
 
32
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/chenyukang2020-nvidia/huggingface/runs/3c0353cy)
33
 
34
 
35
  This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
 
1
  ---
2
  base_model: Qwen/Qwen2.5-3B-Instruct
 
3
  library_name: transformers
4
  model_name: Qwen2.5-3B-Open-R1-Code-GRPO
5
  tags:
6
  - generated_from_trainer
 
7
  - trl
8
  - grpo
9
  licence: license
 
11
 
12
  # Model Card for Qwen2.5-3B-Open-R1-Code-GRPO
13
 
14
+ This model is a fine-tuned version of [Qwen/Qwen2.5-3B-Instruct](https://huggingface.co/Qwen/Qwen2.5-3B-Instruct).
15
  It has been trained using [TRL](https://github.com/huggingface/trl).
16
 
17
  ## Quick start
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/chenyukang2020-nvidia/huggingface/runs/bds2ccjy)
31
 
32
 
33
  This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 0.0,
3
- "train_loss": 9.052564064315237e-07,
4
- "train_runtime": 75.296,
5
  "train_samples": 35735,
6
- "train_samples_per_second": 3399.916,
7
- "train_steps_per_second": 6.64
8
  }
 
1
  {
2
  "total_flos": 0.0,
3
+ "train_loss": 9.785836571644918e-07,
4
+ "train_runtime": 72.7583,
5
  "train_samples": 35735,
6
+ "train_samples_per_second": 3518.501,
7
+ "train_steps_per_second": 6.872
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 0.0,
3
- "train_loss": 9.052564064315237e-07,
4
- "train_runtime": 75.296,
5
  "train_samples": 35735,
6
- "train_samples_per_second": 3399.916,
7
- "train_steps_per_second": 6.64
8
  }
 
1
  {
2
  "total_flos": 0.0,
3
+ "train_loss": 9.785836571644918e-07,
4
+ "train_runtime": 72.7583,
5
  "train_samples": 35735,
6
+ "train_samples_per_second": 3518.501,
7
+ "train_steps_per_second": 6.872
8
  }
trainer_state.json CHANGED
@@ -2,9 +2,9 @@
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
- "epoch": 0.4557868815760018,
6
  "eval_steps": 500,
7
- "global_step": 509,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
@@ -14262,18 +14262,46 @@
14262
  "step": 509
14263
  },
14264
  {
14265
- "epoch": 0.4557868815760018,
14266
- "step": 509,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14267
  "total_flos": 0.0,
14268
- "train_loss": 9.052564064315237e-07,
14269
- "train_runtime": 75.296,
14270
- "train_samples_per_second": 3399.916,
14271
- "train_steps_per_second": 6.64
14272
  }
14273
  ],
14274
  "logging_steps": 1,
14275
  "max_steps": 500,
14276
- "num_input_tokens_seen": 317450899,
14277
  "num_train_epochs": 1,
14278
  "save_steps": 50,
14279
  "stateful_callbacks": {
 
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
+ "epoch": 0.45668233713901946,
6
  "eval_steps": 500,
7
+ "global_step": 510,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
 
14262
  "step": 509
14263
  },
14264
  {
14265
+ "clip_ratio/high_max": 0.0,
14266
+ "clip_ratio/high_mean": 0.0,
14267
+ "clip_ratio/low_mean": 0.0,
14268
+ "clip_ratio/low_min": 0.0,
14269
+ "clip_ratio/region_mean": 0.0,
14270
+ "completions/clipped_ratio": -7.0,
14271
+ "completions/max_length": 1481.0,
14272
+ "completions/max_terminated_length": 1481.0,
14273
+ "completions/mean_length": 530.21875,
14274
+ "completions/mean_terminated_length": 530.21875,
14275
+ "completions/min_length": 148.0,
14276
+ "completions/min_terminated_length": 148.0,
14277
+ "epoch": 0.45668233713901946,
14278
+ "frac_reward_zero_std": 1.0,
14279
+ "grad_norm": 0.00483125560549929,
14280
+ "kl": 0.0499267578125,
14281
+ "learning_rate": 5.038223531225739e-07,
14282
+ "loss": 0.0005,
14283
+ "num_tokens": 317995683.0,
14284
+ "reward": 0.10000000149011612,
14285
+ "reward_std": 0.0,
14286
+ "rewards/code_reward/mean": 0.0,
14287
+ "rewards/code_reward/std": 0.0,
14288
+ "rewards/format_reward/mean": 1.0,
14289
+ "rewards/format_reward/std": 0.0,
14290
+ "step": 510
14291
+ },
14292
+ {
14293
+ "epoch": 0.45668233713901946,
14294
+ "step": 510,
14295
  "total_flos": 0.0,
14296
+ "train_loss": 9.785836571644918e-07,
14297
+ "train_runtime": 72.7583,
14298
+ "train_samples_per_second": 3518.501,
14299
+ "train_steps_per_second": 6.872
14300
  }
14301
  ],
14302
  "logging_steps": 1,
14303
  "max_steps": 500,
14304
+ "num_input_tokens_seen": 317995683,
14305
  "num_train_epochs": 1,
14306
  "save_steps": 50,
14307
  "stateful_callbacks": {