alexshengzhili commited on
Commit
3731129
·
verified ·
1 Parent(s): 8505ff7

Model save

Browse files
Files changed (4) hide show
  1. README.md +2 -4
  2. all_results.json +4 -4
  3. train_results.json +4 -4
  4. trainer_state.json +23 -8
README.md CHANGED
@@ -1,11 +1,9 @@
1
  ---
2
  base_model: Qwen/Qwen2.5-3B-Instruct
3
- datasets: open-r1/verifiable-coding-problems-python-10k_decontaminated
4
  library_name: transformers
5
  model_name: Qwen2.5-3B-Open-R1-Code-GRPO-r2
6
  tags:
7
  - generated_from_trainer
8
- - open-r1
9
  - trl
10
  - grpo
11
  licence: license
@@ -13,7 +11,7 @@ licence: license
13
 
14
  # Model Card for Qwen2.5-3B-Open-R1-Code-GRPO-r2
15
 
16
- This model is a fine-tuned version of [Qwen/Qwen2.5-3B-Instruct](https://huggingface.co/Qwen/Qwen2.5-3B-Instruct) on the [open-r1/verifiable-coding-problems-python-10k_decontaminated](https://huggingface.co/datasets/open-r1/verifiable-coding-problems-python-10k_decontaminated) dataset.
17
  It has been trained using [TRL](https://github.com/huggingface/trl).
18
 
19
  ## Quick start
@@ -29,7 +27,7 @@ print(output["generated_text"])
29
 
30
  ## Training procedure
31
 
32
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/calads/huggingface/runs/ok073653)
33
 
34
 
35
  This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
 
1
  ---
2
  base_model: Qwen/Qwen2.5-3B-Instruct
 
3
  library_name: transformers
4
  model_name: Qwen2.5-3B-Open-R1-Code-GRPO-r2
5
  tags:
6
  - generated_from_trainer
 
7
  - trl
8
  - grpo
9
  licence: license
 
11
 
12
  # Model Card for Qwen2.5-3B-Open-R1-Code-GRPO-r2
13
 
14
+ This model is a fine-tuned version of [Qwen/Qwen2.5-3B-Instruct](https://huggingface.co/Qwen/Qwen2.5-3B-Instruct).
15
  It has been trained using [TRL](https://github.com/huggingface/trl).
16
 
17
  ## Quick start
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/calads/huggingface/runs/ihnrm7q2)
31
 
32
 
33
  This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 0.0,
3
- "train_loss": 0.39302948499191553,
4
- "train_runtime": 15190.2462,
5
  "train_samples": 1574,
6
- "train_samples_per_second": 0.23,
7
- "train_steps_per_second": 0.033
8
  }
 
1
  {
2
  "total_flos": 0.0,
3
+ "train_loss": 4.563784550228519e-06,
4
+ "train_runtime": 168.9071,
5
  "train_samples": 1574,
6
+ "train_samples_per_second": 20.721,
7
+ "train_steps_per_second": 2.96
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 0.0,
3
- "train_loss": 0.39302948499191553,
4
- "train_runtime": 15190.2462,
5
  "train_samples": 1574,
6
- "train_samples_per_second": 0.23,
7
- "train_steps_per_second": 0.033
8
  }
 
1
  {
2
  "total_flos": 0.0,
3
+ "train_loss": 4.563784550228519e-06,
4
+ "train_runtime": 168.9071,
5
  "train_samples": 1574,
6
+ "train_samples_per_second": 20.721,
7
+ "train_steps_per_second": 2.96
8
  }
trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.3176620076238882,
5
  "eval_steps": 500,
6
- "global_step": 500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -7509,13 +7509,28 @@
7509
  "step": 500
7510
  },
7511
  {
7512
- "epoch": 0.3176620076238882,
7513
- "step": 500,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7514
  "total_flos": 0.0,
7515
- "train_loss": 0.39302948499191553,
7516
- "train_runtime": 15190.2462,
7517
- "train_samples_per_second": 0.23,
7518
- "train_steps_per_second": 0.033
7519
  }
7520
  ],
7521
  "logging_steps": 1,
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.318297331639136,
5
  "eval_steps": 500,
6
+ "global_step": 501,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
7509
  "step": 500
7510
  },
7511
  {
7512
+ "clip_ratio": 0.0,
7513
+ "completion_length": 444.857177734375,
7514
+ "epoch": 0.318297331639136,
7515
+ "grad_norm": 0.047799259424209595,
7516
+ "kl": 0.19921875,
7517
+ "learning_rate": 5.000472027468528e-07,
7518
+ "loss": 0.0023,
7519
+ "reward": 0.40000003576278687,
7520
+ "reward_std": 0.0,
7521
+ "rewards/code_format_reward": 1.0,
7522
+ "rewards/code_reward": 0.0,
7523
+ "rewards/format_reward": 1.0,
7524
+ "step": 501
7525
+ },
7526
+ {
7527
+ "epoch": 0.318297331639136,
7528
+ "step": 501,
7529
  "total_flos": 0.0,
7530
+ "train_loss": 4.563784550228519e-06,
7531
+ "train_runtime": 168.9071,
7532
+ "train_samples_per_second": 20.721,
7533
+ "train_steps_per_second": 2.96
7534
  }
7535
  ],
7536
  "logging_steps": 1,