Dataset Viewer
repo_name
stringlengths 8
65
| repo_url
stringlengths 27
84
| license
stringclasses 6
values | file_path
stringlengths 6
85
| file_url
stringlengths 83
181
| timestamp
stringlengths 26
26
| reward_functions
listlengths 1
20
| trainer_usages
listlengths 1
23
|
---|---|---|---|---|---|---|---|
dstackai/dstack | https://github.com/dstackai/dstack | Mozilla Public License 2.0 | examples/llms/deepseek/trl/amd/grpo_train.py | https://github.com/dstackai/dstack/blob/58181d1fe372488d9a64075c24d975935411f31d/examples/llms/deepseek/trl/amd/grpo_train.py | 2025-03-24T10:14:18.106059 | [
{
"name": "reward_len",
"code": "def reward_len(completions, **kwargs):\n return [abs(20 - len(completion)) for completion in completions]",
"label": "{\"label\": \"LENGTH_BASED\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "reward_len",
"args": "training_args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": null,
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
philschmid/deep-learning-pytorch-huggingface | https://github.com/philschmid/deep-learning-pytorch-huggingface | MIT License | training/scripts/run_r1_grpo.py | https://github.com/philschmid/deep-learning-pytorch-huggingface/blob/59b37973074de90004d10e5ff636f98160c9743a/training/scripts/run_r1_grpo.py | 2025-03-24T10:14:24.890615 | [
{
"name": "format_reward_func (from list item 0)",
"code": "def format_reward_func(completions, target, **kwargs):\n \"\"\"\n Format: <think>...</think><answer>...</answer>\n Args:\n completions (list[str]): Generated outputs\n target (list[str]): Expected answers\n \n Returns:\n list[float]: Reward scores\n \"\"\"\n rewards = []\n for completion, gt in zip(completions, target):\n try:\n completion = '<think>' + completion\n if random.random() < 0.1:\n os.makedirs('completion_samples', exist_ok=True)\n log_file = os.path.join('completion_samples', 'completion_samples.txt')\n with open(log_file, 'a') as f:\n f.write(f'\\n\\n==============\\n')\n f.write(completion)\n regex = '^<think>([^<]*(?:<(?!/?think>)[^<]*)*)<\\\\/think>\\\\n<answer>([\\\\s\\\\S]*?)<\\\\/answer>$'\n match = re.search(regex, completion, re.DOTALL)\n if match is None or len(match.groups()) != 2:\n rewards.append(0.0)\n else:\n rewards.append(1.0)\n except Exception:\n rewards.append(0.0)\n return rewards",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "equation_reward_func (from list item 1)",
"code": "def equation_reward_func(completions, target, nums, **kwargs):\n \"\"\"\n Evaluates completions based on:\n 2. Mathematical correctness of the answer\n\n Args:\n completions (list[str]): Generated outputs\n target (list[str]): Expected answers\n nums (list[str]): Available numbers\n \n Returns:\n list[float]: Reward scores\n \"\"\"\n rewards = []\n for completion, gt, numbers in zip(completions, target, nums):\n try:\n completion = '<think>' + completion\n match = re.search('<answer>(.*?)<\\\\/answer>', completion)\n if match is None:\n rewards.append(0.0)\n continue\n equation = match.group(1).strip()\n used_numbers = [int(n) for n in re.findall('\\\\d+', equation)]\n if sorted(used_numbers) != sorted(numbers):\n rewards.append(0.0)\n continue\n allowed_pattern = '^[\\\\d+\\\\-*/().\\\\s]+$'\n if not re.match(allowed_pattern, equation):\n rewards.append(0.0)\n continue\n result = eval(equation, {'__builtins__': None}, {})\n if abs(float(result) - float(gt)) < 1e-05:\n rewards.append(1.0)\n if random.random() < 0.1:\n os.makedirs('completion_samples', exist_ok=True)\n log_file = os.path.join('completion_samples', 'success_completion_samples.txt')\n with open(log_file, 'a') as f:\n f.write(f'\\n\\n==============\\n')\n f.write(completion)\n else:\n rewards.append(0.0)\n except Exception:\n rewards.append(0.0)\n return rewards",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "format_reward_func (from [format_reward_func, equation_reward_func])",
"code": "def format_reward_func(completions, target, **kwargs):\n \"\"\"\n Format: <think>...</think><answer>...</answer>\n Args:\n completions (list[str]): Generated outputs\n target (list[str]): Expected answers\n \n Returns:\n list[float]: Reward scores\n \"\"\"\n rewards = []\n for completion, gt in zip(completions, target):\n try:\n completion = '<think>' + completion\n if random.random() < 0.1:\n os.makedirs('completion_samples', exist_ok=True)\n log_file = os.path.join('completion_samples', 'completion_samples.txt')\n with open(log_file, 'a') as f:\n f.write(f'\\n\\n==============\\n')\n f.write(completion)\n regex = '^<think>([^<]*(?:<(?!/?think>)[^<]*)*)<\\\\/think>\\\\n<answer>([\\\\s\\\\S]*?)<\\\\/answer>$'\n match = re.search(regex, completion, re.DOTALL)\n if match is None or len(match.groups()) != 2:\n rewards.append(0.0)\n else:\n rewards.append(1.0)\n except Exception:\n rewards.append(0.0)\n return rewards",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "equation_reward_func (from [format_reward_func, equation_reward_func])",
"code": "def equation_reward_func(completions, target, nums, **kwargs):\n \"\"\"\n Evaluates completions based on:\n 2. Mathematical correctness of the answer\n\n Args:\n completions (list[str]): Generated outputs\n target (list[str]): Expected answers\n nums (list[str]): Available numbers\n \n Returns:\n list[float]: Reward scores\n \"\"\"\n rewards = []\n for completion, gt, numbers in zip(completions, target, nums):\n try:\n completion = '<think>' + completion\n match = re.search('<answer>(.*?)<\\\\/answer>', completion)\n if match is None:\n rewards.append(0.0)\n continue\n equation = match.group(1).strip()\n used_numbers = [int(n) for n in re.findall('\\\\d+', equation)]\n if sorted(used_numbers) != sorted(numbers):\n rewards.append(0.0)\n continue\n allowed_pattern = '^[\\\\d+\\\\-*/().\\\\s]+$'\n if not re.match(allowed_pattern, equation):\n rewards.append(0.0)\n continue\n result = eval(equation, {'__builtins__': None}, {})\n if abs(float(result) - float(gt)) < 1e-05:\n rewards.append(1.0)\n if random.random() < 0.1:\n os.makedirs('completion_samples', exist_ok=True)\n log_file = os.path.join('completion_samples', 'success_completion_samples.txt')\n with open(log_file, 'a') as f:\n f.write(f'\\n\\n==============\\n')\n f.write(completion)\n else:\n rewards.append(0.0)\n except Exception:\n rewards.append(0.0)\n return rewards",
"label": "{\"label\": \"COMPUTATIONAL\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model_args.model_name_or_path",
"reward_funcs": "[format_reward_func, equation_reward_func]",
"args": "training_args",
"train_dataset": "train_dataset",
"eval_dataset": "test_dataset",
"peft_config": "get_peft_config(model_args)",
"reward_processing_classes": null,
"processing_class": null,
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
huihuihenqiang/wechat-simulate-human | https://github.com/huihuihenqiang/wechat-simulate-human | Unknown | ft/deepseek_r1_train.py | https://github.com/huihuihenqiang/wechat-simulate-human/blob/26042f23d5c26501816a2d4ef498134e94349085/ft/deepseek_r1_train.py | 2025-03-24T10:14:27.153189 | [
{
"name": "mark_reward (from list item 0)",
"code": "def mark_reward(completions, **kwargs):\n responses = [completion[0]['content'] for completion in completions]\n return [mark_num(response) for response in responses]",
"label": "{\"label\": \"ANSWER_TYPE_VALIDATION\"}"
},
{
"name": "soft_format_reward (from list item 1)",
"code": "def soft_format_reward(completions, **kwargs):\n pattern = '<think>.*?</think>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, response) for response in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "hard_format_reward (from list item 2)",
"code": "def hard_format_reward(completions, **kwargs):\n pattern = '^<think>\\\\n.*?n</think>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, response) for response in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "digit_reward (from list item 3)",
"code": "def digit_reward(completions, **kwargs):\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_answer(r) for r in responses]\n return [0.5 if response.isdigit() else 0.0 for response in extracted_responses]",
"label": "{\"label\": \"ANSWER_TYPE_VALIDATION\"}"
},
{
"name": "correctness_reward (from list item 4)",
"code": "def correctness_reward(prompts, completions, answer, **kwargs):\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_answer(r) for r in responses]\n return [2.0 if response == str(ans) else 0.0 for response, ans in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "mark_reward (from [mark_reward, soft_format_reward, hard_format_reward, digit_reward, correctness_reward])",
"code": "def mark_reward(completions, **kwargs):\n responses = [completion[0]['content'] for completion in completions]\n return [mark_num(response) for response in responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "soft_format_reward (from [mark_reward, soft_format_reward, hard_format_reward, digit_reward, correctness_reward])",
"code": "def soft_format_reward(completions, **kwargs):\n pattern = '<think>.*?</think>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, response) for response in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "hard_format_reward (from [mark_reward, soft_format_reward, hard_format_reward, digit_reward, correctness_reward])",
"code": "def hard_format_reward(completions, **kwargs):\n pattern = '^<think>\\\\n.*?n</think>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, response) for response in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "digit_reward (from [mark_reward, soft_format_reward, hard_format_reward, digit_reward, correctness_reward])",
"code": "def digit_reward(completions, **kwargs):\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_answer(r) for r in responses]\n return [0.5 if response.isdigit() else 0.0 for response in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward (from [mark_reward, soft_format_reward, hard_format_reward, digit_reward, correctness_reward])",
"code": "def correctness_reward(prompts, completions, answer, **kwargs):\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_answer(r) for r in responses]\n return [2.0 if response == str(ans) else 0.0 for response, ans in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[mark_reward, soft_format_reward, hard_format_reward, digit_reward, correctness_reward]",
"args": "training_args",
"train_dataset": "data",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
Doriandarko/MLX-GRPO | https://github.com/Doriandarko/MLX-GRPO | Unknown | mlx-grpo.py | https://github.com/Doriandarko/MLX-GRPO/blob/eaacf96e4ad464860144f52b9823408f0ae7c295/mlx-grpo.py | 2025-03-24T10:14:29.408549 | [
{
"name": "xmlcount_reward_func (from list item 0)",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "soft_format_reward_func (from list item 1)",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from list item 2)",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from list item 3)",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from list item 4)",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n print('-' * 20, f'Question:\\n{q}', f'\\nAnswer:\\n{answer[0]}', f'\\nResponse:\\n{responses[0]}', f'\\nExtracted:\\n{extracted_responses[0]}')\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "xmlcount_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"LENGTH_BASED\"}"
},
{
"name": "soft_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n print('-' * 20, f'Question:\\n{q}', f'\\nAnswer:\\n{answer[0]}', f'\\nResponse:\\n{responses[0]}', f'\\nExtracted:\\n{extracted_responses[0]}')\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
}
] | [
{
"trainer_type": "MLXGRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func]",
"args": "config",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": null,
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": "tokenizer",
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
michaelhla/pro-1 | https://github.com/michaelhla/pro-1 | Apache License 2.0 | train/unsloth-grpo.py | https://github.com/michaelhla/pro-1/blob/e205302deb82e971311125869e74efa4feb636fc/train/unsloth-grpo.py | 2025-03-24T10:14:31.663908 | [
{
"name": "stability_reward_func (from list item 0)",
"code": "def stability_reward_func(prompts, completions, sequences, orig_stabs, **kwargs):\n \"\"\"Custom reward function for stability optimization with LLM-based soft rewards\"\"\"\n rewards = []\n direct_extraction_success = 0\n lm_applier_success = 0\n extraction_failures = 0\n for i, (prompt, completion, sequence, orig_stab) in enumerate(zip(prompts, completions, sequences, orig_stabs)):\n try:\n reward = 0.0\n print(f'COMPLETION {i}')\n print(completion)\n print('-' * 100)\n think_match = re.search('<think>(.*?)</think>', completion, re.DOTALL)\n reasoning = think_match.group(1).strip() if think_match else completion\n modified_sequence = lm_sequence_applier(sequence, reasoning)\n extraction_method = 'lm_applier'\n if modified_sequence:\n lm_applier_success += 1\n else:\n print(f'LM sequence applier failed for completion {i}, trying direct extraction...')\n modified_sequence = extract_sequence_from_response(completion)\n extraction_method = 'direct'\n if modified_sequence and (not is_valid_amino_acid_sequence(modified_sequence)):\n print(f'Extracted sequence contains invalid amino acids')\n modified_sequence = None\n if modified_sequence:\n direct_extraction_success += 1\n else:\n print(f'Direct extraction also failed for completion {i}')\n extraction_failures += 1\n rewards.append(reward)\n continue\n print(f'Original sequence length: {len(sequence)}')\n print(f'Modified sequence length: {len(modified_sequence)}')\n stab_calc = calculate_relative_stability(original_seq=sequence, modified_seq=modified_sequence, calculator=calculator, orig_stab=orig_stab)\n if stab_calc > 0.0:\n reward += STABILITY_REWARD\n llm_judgments = []\n for goal in ['creativity']:\n try:\n start_time = time.time()\n llm_judgment = get_llm_judgment(completion, sequence, modified_sequence, prompt, goal)\n end_time = time.time()\n reward += lm_reward_coeffs[goal] * llm_judgment\n print(f'{goal} reward: {llm_judgment} in {end_time - start_time:.2f} seconds')\n except Exception as e:\n print(f'Error getting LLM judgment: {e}')\n wandb.log({f'reward/completion_{i}/base_stability_reward': reward, f'reward/completion_{i}/stability_reward': STABILITY_REWARD if stab_calc > 0.0 else 0.0, f'reward/completion_{i}/creativity_reward': lm_reward_coeffs['creativity'] * llm_judgment if llm_judgment else 0.0, f'reward/completion_{i}/extraction_method': extraction_method})\n rewards.append(reward)\n except Exception as e:\n print(f'Error calculating rewards: {e}')\n extraction_failures += 1\n rewards.append(reward)\n total_completions = len(completions)\n if total_completions > 0:\n wandb.log({'extraction/direct_success_rate': direct_extraction_success / total_completions, 'extraction/lm_applier_success_rate': lm_applier_success / total_completions, 'extraction/failure_rate': extraction_failures / total_completions})\n print(f'Extraction stats: Direct: {direct_extraction_success}/{total_completions}, LM Applier: {lm_applier_success}/{total_completions}, Failures: {extraction_failures}/{total_completions}')\n return rewards",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "stability_reward_func (from [stability_reward_func])",
"code": "def stability_reward_func(prompts, completions, sequences, orig_stabs, **kwargs):\n \"\"\"Custom reward function for stability optimization with LLM-based soft rewards\"\"\"\n rewards = []\n direct_extraction_success = 0\n lm_applier_success = 0\n extraction_failures = 0\n for i, (prompt, completion, sequence, orig_stab) in enumerate(zip(prompts, completions, sequences, orig_stabs)):\n try:\n reward = 0.0\n print(f'COMPLETION {i}')\n print(completion)\n print('-' * 100)\n think_match = re.search('<think>(.*?)</think>', completion, re.DOTALL)\n reasoning = think_match.group(1).strip() if think_match else completion\n modified_sequence = lm_sequence_applier(sequence, reasoning)\n extraction_method = 'lm_applier'\n if modified_sequence:\n lm_applier_success += 1\n else:\n print(f'LM sequence applier failed for completion {i}, trying direct extraction...')\n modified_sequence = extract_sequence_from_response(completion)\n extraction_method = 'direct'\n if modified_sequence and (not is_valid_amino_acid_sequence(modified_sequence)):\n print(f'Extracted sequence contains invalid amino acids')\n modified_sequence = None\n if modified_sequence:\n direct_extraction_success += 1\n else:\n print(f'Direct extraction also failed for completion {i}')\n extraction_failures += 1\n rewards.append(reward)\n continue\n print(f'Original sequence length: {len(sequence)}')\n print(f'Modified sequence length: {len(modified_sequence)}')\n stab_calc = calculate_relative_stability(original_seq=sequence, modified_seq=modified_sequence, calculator=calculator, orig_stab=orig_stab)\n if stab_calc > 0.0:\n reward += STABILITY_REWARD\n llm_judgments = []\n for goal in ['creativity']:\n try:\n start_time = time.time()\n llm_judgment = get_llm_judgment(completion, sequence, modified_sequence, prompt, goal)\n end_time = time.time()\n reward += lm_reward_coeffs[goal] * llm_judgment\n print(f'{goal} reward: {llm_judgment} in {end_time - start_time:.2f} seconds')\n except Exception as e:\n print(f'Error getting LLM judgment: {e}')\n wandb.log({f'reward/completion_{i}/base_stability_reward': reward, f'reward/completion_{i}/stability_reward': STABILITY_REWARD if stab_calc > 0.0 else 0.0, f'reward/completion_{i}/creativity_reward': lm_reward_coeffs['creativity'] * llm_judgment if llm_judgment else 0.0, f'reward/completion_{i}/extraction_method': extraction_method})\n rewards.append(reward)\n except Exception as e:\n print(f'Error calculating rewards: {e}')\n extraction_failures += 1\n rewards.append(reward)\n total_completions = len(completions)\n if total_completions > 0:\n wandb.log({'extraction/direct_success_rate': direct_extraction_success / total_completions, 'extraction/lm_applier_success_rate': lm_applier_success / total_completions, 'extraction/failure_rate': extraction_failures / total_completions})\n print(f'Extraction stats: Direct: {direct_extraction_success}/{total_completions}, LM Applier: {lm_applier_success}/{total_completions}, Failures: {extraction_failures}/{total_completions}')\n return rewards",
"label": "{\"label\": \"COMPUTATIONAL\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[stability_reward_func]",
"args": "training_args",
"train_dataset": "train_dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": "[WandBLoggingCallback(), CheckpointCallback(checkpoint_dir=f'./{RUN_NAME}/checkpoints', checkpoint_freq=8, max_checkpoints=5)]",
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
transformerlab/transformerlab-api | https://github.com/transformerlab/transformerlab-api | GNU Affero General Public License v3.0 | transformerlab/plugins/unsloth_grpo_trainer/main.py | https://github.com/transformerlab/transformerlab-api/blob/b52bec9ee4707833a1f32cfe8130f6e7f618d52f/transformerlab/plugins/unsloth_grpo_trainer/main.py | 2025-03-24T10:14:33.958267 | [
{
"name": "xmlcount_reward_func (from list item 0)",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c, start_thinking_string, end_thinking_string, start_answer_string, end_answer_string) for c in contents]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from list item 1)",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_answer(r, start_answer_string, end_answer_string) for r in responses]\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "int_reward_func (from list item 2)",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the answer is a number\"\"\"\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"ANSWER_TYPE_VALIDATION\"}"
},
{
"name": "strict_format_reward_func (from list item 3)",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks strictly if the completion has a specific format.\"\"\"\n pattern = f'^{start_thinking_string}\\\\n.*?\\\\n{end_thinking_string}\\\\n{start_answer_string}\\\\n.*?\\\\n{end_answer_string}\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "soft_format_reward_func (from list item 4)",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = f'{start_thinking_string}.*?{end_thinking_string}\\\\s*{start_answer_string}.*?{end_answer_string}'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "xmlcount_reward_func (from [xmlcount_reward_func, correctness_reward_func, int_reward_func, strict_format_reward_func, soft_format_reward_func])",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c, start_thinking_string, end_thinking_string, start_answer_string, end_answer_string) for c in contents]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "correctness_reward_func (from [xmlcount_reward_func, correctness_reward_func, int_reward_func, strict_format_reward_func, soft_format_reward_func])",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_answer(r, start_answer_string, end_answer_string) for r in responses]\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "int_reward_func (from [xmlcount_reward_func, correctness_reward_func, int_reward_func, strict_format_reward_func, soft_format_reward_func])",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the answer is a number\"\"\"\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"ANSWER_TYPE_VALIDATION\"}"
},
{
"name": "strict_format_reward_func (from [xmlcount_reward_func, correctness_reward_func, int_reward_func, strict_format_reward_func, soft_format_reward_func])",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks strictly if the completion has a specific format.\"\"\"\n pattern = f'^{start_thinking_string}\\\\n.*?\\\\n{end_thinking_string}\\\\n{start_answer_string}\\\\n.*?\\\\n{end_answer_string}\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "soft_format_reward_func (from [xmlcount_reward_func, correctness_reward_func, int_reward_func, strict_format_reward_func, soft_format_reward_func])",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = f'{start_thinking_string}.*?{end_thinking_string}\\\\s*{start_answer_string}.*?{end_answer_string}'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[xmlcount_reward_func, correctness_reward_func, int_reward_func, strict_format_reward_func, soft_format_reward_func]",
"args": "args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": null,
"null": null,
"formatting_func": null,
"callbacks": "[progress_callback]",
"tokenizer": "tokenizer",
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
JinSeoung-Oh/Reference | https://github.com/JinSeoung-Oh/Reference | Unknown | Reasoning/ReasoningModels.py | https://github.com/JinSeoung-Oh/Reference/blob/e49eb8aea5ea65f0c3b687ece28f075d392d8156/Reasoning/ReasoningModels.py | 2025-03-24T10:14:36.212741 | [
{
"name": "custom_reward_func (from list item 0)",
"code": "def custom_reward_func(prompts, completions, answer, min_reasoning_length=10, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses_answer = [extract_xml_answer(r, tag='answer') for r in responses]\n extracted_responses_reasoning = [extract_xml_answer(r, tag='reasoning') for r in responses]\n extracted_responses_validate = [extract_xml_answer(r, tag='validate') for r in responses]\n rewards = []\n for original_response, extracted_answer, extracted_reasoning, extracted_validate in zip(responses, extracted_responses_answer, extracted_responses_reasoning, extracted_responses_validate):\n is_correct = extracted_answer == answer[0]\n is_int = extracted_answer.isdigit()\n has_answer_tags = '<answer>' in original_response and '</answer>' in original_response\n has_reasoning_tags = '<reasoning>' in original_response and '</reasoning>' in original_response\n has_validate_tags = '<validate>' in original_response and '</validate>' in original_response\n reasoning_length = len(word_tokenize(extracted_reasoning.lower()))\n validate_length = len(word_tokenize(extracted_validate.lower()))\n reward = 0.0\n reasoning_reward = 0.0\n if is_correct:\n reward += 5.0\n if is_int:\n reward += 0.5\n if has_validate_tags:\n reward *= 1.25\n if validate_length >= 5:\n min_validate_length = 5\n max_validate_length = 256\n max_validate_bonus = 3.0\n if validate_length >= min_validate_length:\n if validate_length >= max_validate_length:\n validate_bonus = max_validate_bonus\n else:\n validate_bonus = (validate_length - min_validate_length) / (max_validate_length - min_validate_length) * max_validate_bonus\n else:\n validate_bonus = 0.0\n else:\n validate_bonus = 0.0\n else:\n validate_bonus = 0.0\n if has_reasoning_tags:\n reward *= 1.25\n if reasoning_length >= 5:\n min_scaling_length = 5\n max_scaling_length = 1024\n max_scaling_bonus = 10\n if reasoning_length <= min_scaling_length:\n reasoning_reward = 0.0\n elif reasoning_length >= max_scaling_length:\n reasoning_reward = 5.0\n else:\n reasoning_reward = (reasoning_length - min_scaling_length) / (max_scaling_length - min_scaling_length) * max_scaling_bonus\n else:\n reasoning_reward = 0.0\n else:\n reasoning_reward = 0.0\n total_reward = reward + reasoning_reward + validate_bonus\n if has_validate_tags:\n validate_lower = extracted_validate.lower()\n if re.search('(wait|but|rethink)(?=.{20,})', validate_lower, re.DOTALL):\n total_reward *= 10.0\n rewards.append(total_reward)\n return rewards",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "custom_reward_func (from [custom_reward_func])",
"code": "def custom_reward_func(prompts, completions, answer, min_reasoning_length=10, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses_answer = [extract_xml_answer(r, tag='answer') for r in responses]\n extracted_responses_reasoning = [extract_xml_answer(r, tag='reasoning') for r in responses]\n extracted_responses_validate = [extract_xml_answer(r, tag='validate') for r in responses]\n rewards = []\n for original_response, extracted_answer, extracted_reasoning, extracted_validate in zip(responses, extracted_responses_answer, extracted_responses_reasoning, extracted_responses_validate):\n is_correct = extracted_answer == answer[0]\n is_int = extracted_answer.isdigit()\n has_answer_tags = '<answer>' in original_response and '</answer>' in original_response\n has_reasoning_tags = '<reasoning>' in original_response and '</reasoning>' in original_response\n has_validate_tags = '<validate>' in original_response and '</validate>' in original_response\n reasoning_length = len(word_tokenize(extracted_reasoning.lower()))\n validate_length = len(word_tokenize(extracted_validate.lower()))\n reward = 0.0\n reasoning_reward = 0.0\n if is_correct:\n reward += 5.0\n if is_int:\n reward += 0.5\n if has_validate_tags:\n reward *= 1.25\n if validate_length >= 5:\n min_validate_length = 5\n max_validate_length = 256\n max_validate_bonus = 3.0\n if validate_length >= min_validate_length:\n if validate_length >= max_validate_length:\n validate_bonus = max_validate_bonus\n else:\n validate_bonus = (validate_length - min_validate_length) / (max_validate_length - min_validate_length) * max_validate_bonus\n else:\n validate_bonus = 0.0\n else:\n validate_bonus = 0.0\n else:\n validate_bonus = 0.0\n if has_reasoning_tags:\n reward *= 1.25\n if reasoning_length >= 5:\n min_scaling_length = 5\n max_scaling_length = 1024\n max_scaling_bonus = 10\n if reasoning_length <= min_scaling_length:\n reasoning_reward = 0.0\n elif reasoning_length >= max_scaling_length:\n reasoning_reward = 5.0\n else:\n reasoning_reward = (reasoning_length - min_scaling_length) / (max_scaling_length - min_scaling_length) * max_scaling_bonus\n else:\n reasoning_reward = 0.0\n else:\n reasoning_reward = 0.0\n total_reward = reward + reasoning_reward + validate_bonus\n if has_validate_tags:\n validate_lower = extracted_validate.lower()\n if re.search('(wait|but|rethink)(?=.{20,})', validate_lower, re.DOTALL):\n total_reward *= 10.0\n rewards.append(total_reward)\n return rewards",
"label": "{\"label\": \"COMPUTATIONAL\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[custom_reward_func]",
"args": "training_args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
lmassaron/Gemma-2-2B-IT-GRPO | https://github.com/lmassaron/Gemma-2-2B-IT-GRPO | Unknown | gemma-grpo.py | https://github.com/lmassaron/Gemma-2-2B-IT-GRPO/blob/23802c018aa1cb9ac74fa14bf2391769c44ebb2b/gemma-grpo.py | 2025-03-24T10:14:45.291361 | [
{
"name": "correctness_reward_func (from list item 0)",
"code": "def correctness_reward_func(completions, answer, **kwargs):\n \"\"\"Reward function that checks if the answer is correct.\"\"\"\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_last_xml_answer(response) for response in responses]\n rewards = [2.0 if extracted == correct else 0.0 for extracted, correct in zip(extracted_responses, answer)]\n return rewards",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "format_reward_func (from list item 1)",
"code": "def format_reward_func(completions, **kwargs):\n \"\"\"Reward function that checks if the completion has the correct format.\"\"\"\n pattern = '^<reasoning>[\\\\s\\\\S]*?<\\\\/reasoning>\\\\s*<answer>[\\\\s\\\\S]*?<\\\\/answer>$'\n responses = [completion[0]['content'] for completion in completions]\n rewards = [1.0 if re.match(pattern, response) else 0.0 for response in responses]\n return rewards",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "correctness_reward_func (from [correctness_reward_func, format_reward_func])",
"code": "def correctness_reward_func(completions, answer, **kwargs):\n \"\"\"Reward function that checks if the answer is correct.\"\"\"\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_last_xml_answer(response) for response in responses]\n rewards = [2.0 if extracted == correct else 0.0 for extracted, correct in zip(extracted_responses, answer)]\n return rewards",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "format_reward_func (from [correctness_reward_func, format_reward_func])",
"code": "def format_reward_func(completions, **kwargs):\n \"\"\"Reward function that checks if the completion has the correct format.\"\"\"\n pattern = '^<reasoning>[\\\\s\\\\S]*?<\\\\/reasoning>\\\\s*<answer>[\\\\s\\\\S]*?<\\\\/answer>$'\n responses = [completion[0]['content'] for completion in completions]\n rewards = [1.0 if re.match(pattern, response) else 0.0 for response in responses]\n return rewards",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "params.MODEL_NAME",
"reward_funcs": "[correctness_reward_func, format_reward_func]",
"args": "training_args",
"train_dataset": "gsm8k_train",
"eval_dataset": null,
"peft_config": "peft_config",
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
yaosheng216/torch_demo | https://github.com/yaosheng216/torch_demo | Unknown | grpo/distillation_qwen.py | https://github.com/yaosheng216/torch_demo/blob/7c441b4fd4f4f71a62035761c206ed7aeba2439a/grpo/distillation_qwen.py | 2025-03-24T10:14:54.394361 | [
{
"name": "xmlcount_reward_func (from list item 0)",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "soft_format_reward_func (from list item 1)",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from list item 2)",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from list item 3)",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from list item 4)",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n print('-' * 20, f'Question:\\n{q}', f'\\nAnswer:\\n{answer[0]}', f'\\nResponse:\\n{responses[0]}', f'\\nExtracted:\\n{extracted_responses[0]}')\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "xmlcount_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "soft_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n print('-' * 20, f'Question:\\n{q}', f'\\nAnswer:\\n{answer[0]}', f'\\nResponse:\\n{responses[0]}', f'\\nExtracted:\\n{extracted_responses[0]}')\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func]",
"args": "training_args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
erayalp808/GRPO-fine-tuning-turkish-gpt2-350m | https://github.com/erayalp808/GRPO-fine-tuning-turkish-gpt2-350m | Unknown | grpo_training.py | https://github.com/erayalp808/GRPO-fine-tuning-turkish-gpt2-350m/blob/5428820ca46cf074d97f957f126b3255a567c441/grpo_training.py | 2025-03-24T10:15:03.493678 | [
{
"name": "correctness_reward_func (from list item 0)",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_final_answer(r) for r in responses]\n print('-' * 20, f'Question:\\n{q}', f'\\nAnswer:\\n{answer[0]}', f'\\nResponse:\\n{responses[0]}', f'\\nExtracted:\\n{extracted_responses[0]}')\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "strict_format_reward_func (from list item 1)",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<sebep>\\\\n.*?\\\\n</sebep>\\\\n<cevap>\\\\n.*?\\\\n</cevap>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "soft_format_reward_func (from list item 2)",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<sebep>.*?</sebep>\\\\s*<cevap>.*?</cevap>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "xmlcount_reward_func (from list item 3)",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from [correctness_reward_func, strict_format_reward_func, soft_format_reward_func, xmlcount_reward_func])",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_final_answer(r) for r in responses]\n print('-' * 20, f'Question:\\n{q}', f'\\nAnswer:\\n{answer[0]}', f'\\nResponse:\\n{responses[0]}', f'\\nExtracted:\\n{extracted_responses[0]}')\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "strict_format_reward_func (from [correctness_reward_func, strict_format_reward_func, soft_format_reward_func, xmlcount_reward_func])",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<sebep>\\\\n.*?\\\\n</sebep>\\\\n<cevap>\\\\n.*?\\\\n</cevap>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "soft_format_reward_func (from [correctness_reward_func, strict_format_reward_func, soft_format_reward_func, xmlcount_reward_func])",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<sebep>.*?</sebep>\\\\s*<cevap>.*?</cevap>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "xmlcount_reward_func (from [correctness_reward_func, strict_format_reward_func, soft_format_reward_func, xmlcount_reward_func])",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "lora_model",
"reward_funcs": "[correctness_reward_func, strict_format_reward_func, soft_format_reward_func, xmlcount_reward_func]",
"args": "training_args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
Asad-Shahab/sudokuLLM | https://github.com/Asad-Shahab/sudokuLLM | MIT License | finetune.py | https://github.com/Asad-Shahab/sudokuLLM/blob/4593b0f4b3d80f3afebf18653a279e6cea3b0068/finetune.py | 2025-03-24T10:15:24.265651 | [
{
"name": "xmlcount_reward_func (from list item 0)",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function for XML formatting details.\"\"\"\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "soft_format_reward_func (from list item 1)",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"More lenient reward function for XML format checking.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n return [0.5 if re.match(pattern, r, re.DOTALL) else 0.0 for r in responses]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from list item 2)",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has the correct XML format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n return [0.5 if re.match(pattern, r, re.DOTALL) else 0.0 for r in responses]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from list item 3)",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if all numbers in the solution are 1-4.\"\"\"\n responses = [completion[0]['content'] for completion in completions]\n rewards = []\n for response in responses:\n grid = extract_grid_from_answer(response)\n if grid is None:\n rewards.append(0.0)\n continue\n try:\n if all((all((num in [1, 2, 3, 4] for num in row)) for row in grid)):\n rewards.append(0.5)\n else:\n rewards.append(0.0)\n except:\n rewards.append(0.0)\n return rewards",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from list item 4)",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the Sudoku solution is correct.\"\"\"\n responses = [completion[0]['content'] for completion in completions]\n rewards = []\n for response, correct_answer in zip(responses, answer):\n predicted_grid = extract_grid_from_answer(response)\n correct_grid = extract_grid_from_answer(correct_answer)\n if predicted_grid is None or correct_grid is None:\n rewards.append(0.0)\n continue\n if predicted_grid == correct_grid and is_valid_sudoku_solution(predicted_grid):\n rewards.append(2.0)\n else:\n rewards.append(0.0)\n return rewards",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "xmlcount_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function for XML formatting details.\"\"\"\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "soft_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"More lenient reward function for XML format checking.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n return [0.5 if re.match(pattern, r, re.DOTALL) else 0.0 for r in responses]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has the correct XML format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n return [0.5 if re.match(pattern, r, re.DOTALL) else 0.0 for r in responses]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if all numbers in the solution are 1-4.\"\"\"\n responses = [completion[0]['content'] for completion in completions]\n rewards = []\n for response in responses:\n grid = extract_grid_from_answer(response)\n if grid is None:\n rewards.append(0.0)\n continue\n try:\n if all((all((num in [1, 2, 3, 4] for num in row)) for row in grid)):\n rewards.append(0.5)\n else:\n rewards.append(0.0)\n except:\n rewards.append(0.0)\n return rewards",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the Sudoku solution is correct.\"\"\"\n responses = [completion[0]['content'] for completion in completions]\n rewards = []\n for response, correct_answer in zip(responses, answer):\n predicted_grid = extract_grid_from_answer(response)\n correct_grid = extract_grid_from_answer(correct_answer)\n if predicted_grid is None or correct_grid is None:\n rewards.append(0.0)\n continue\n if predicted_grid == correct_grid and is_valid_sudoku_solution(predicted_grid):\n rewards.append(2.0)\n else:\n rewards.append(0.0)\n return rewards",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func]",
"args": "training_args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
alxndrTL/gpu-rl | https://github.com/alxndrTL/gpu-rl | Unknown | grpo_gsm8k.py | https://github.com/alxndrTL/gpu-rl/blob/1f2bd13c9864049ec94e356f20f0ffb7a1f4b1e3/grpo_gsm8k.py | 2025-03-24T10:15:26.599891 | [
{
"name": "format_reasoning_reward (from list item 0)",
"code": "def format_reasoning_reward(prompts, completions, answer, **kwargs) -> list[float]:\n parsed_responses = parse_responses(completions)\n rewards = [0.5 if r['thinking_content'] and r['response'] else 0.0 for r in parsed_responses]\n return rewards",
"label": "{\"label\": \"ANSWER_TYPE_VALIDATION\"}"
},
{
"name": "format_number_reward (from list item 1)",
"code": "def format_number_reward(prompts, completions, answer, **kwargs) -> list[float]:\n parsed_responses = parse_responses(completions)\n rewards = [0.5 if r['response'].isdigit() else 0.0 for r in parsed_responses]\n return rewards",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "accuracy_reward (from list item 2)",
"code": "def accuracy_reward(prompts, completions, answer, **kwargs) -> list[float]:\n parsed_responses = parse_responses(completions)\n rewards = []\n for r, a in zip(parsed_responses, answer):\n response = r['response'].strip()\n numbers = re.findall('-?\\\\d+', response)\n last_number = numbers[-1] if numbers else ''\n rewards.append(2.0 if last_number == str(a) else 0.0)\n return rewards",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "log_rewards (from list item 3)",
"code": "def log_rewards(prompts, completions, answer, **kwargs):\n return 0\n rewards = {'accuracy': accuracy_reward(prompts, completions, answer), 'format_number': format_number_reward(prompts, completions, answer), 'format_reasoning': format_reasoning_reward(prompts, completions, answer)}\n example_response = get_completion_content(completions[0])\n example_parsed = parse_reasoning_response(example_response)\n example_answer = answer[0]\n example_prompt = prompts[0][-1]['content']\n print(f'-' * 50 + f'\\nExample prompt:\\n{example_prompt}\\n' + f'-' * 10 + f'\\nExample response:\\n{example_response}\\n' + f'-' * 10 + f'\\nExample answer:\\n{example_answer}\\n' + f'-' * 10 + f'\\nExample Correct?: {example_parsed['response'] == example_answer}\\n' + f'-' * 10 + f'\\nRewards:\\n{json.dumps(rewards, indent=2)}')\n return 0",
"label": "{\"label\": \"DEBUGGING\"}"
},
{
"name": "format_reasoning_reward (from [format_reasoning_reward, format_number_reward, accuracy_reward, log_rewards])",
"code": "def format_reasoning_reward(prompts, completions, answer, **kwargs) -> list[float]:\n parsed_responses = parse_responses(completions)\n rewards = [0.5 if r['thinking_content'] and r['response'] else 0.0 for r in parsed_responses]\n return rewards",
"label": "{\"label\": \"ANSWER_TYPE_VALIDATION\"}"
},
{
"name": "format_number_reward (from [format_reasoning_reward, format_number_reward, accuracy_reward, log_rewards])",
"code": "def format_number_reward(prompts, completions, answer, **kwargs) -> list[float]:\n parsed_responses = parse_responses(completions)\n rewards = [0.5 if r['response'].isdigit() else 0.0 for r in parsed_responses]\n return rewards",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "accuracy_reward (from [format_reasoning_reward, format_number_reward, accuracy_reward, log_rewards])",
"code": "def accuracy_reward(prompts, completions, answer, **kwargs) -> list[float]:\n parsed_responses = parse_responses(completions)\n rewards = []\n for r, a in zip(parsed_responses, answer):\n response = r['response'].strip()\n numbers = re.findall('-?\\\\d+', response)\n last_number = numbers[-1] if numbers else ''\n rewards.append(2.0 if last_number == str(a) else 0.0)\n return rewards",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "log_rewards (from [format_reasoning_reward, format_number_reward, accuracy_reward, log_rewards])",
"code": "def log_rewards(prompts, completions, answer, **kwargs):\n return 0\n rewards = {'accuracy': accuracy_reward(prompts, completions, answer), 'format_number': format_number_reward(prompts, completions, answer), 'format_reasoning': format_reasoning_reward(prompts, completions, answer)}\n example_response = get_completion_content(completions[0])\n example_parsed = parse_reasoning_response(example_response)\n example_answer = answer[0]\n example_prompt = prompts[0][-1]['content']\n print(f'-' * 50 + f'\\nExample prompt:\\n{example_prompt}\\n' + f'-' * 10 + f'\\nExample response:\\n{example_response}\\n' + f'-' * 10 + f'\\nExample answer:\\n{example_answer}\\n' + f'-' * 10 + f'\\nExample Correct?: {example_parsed['response'] == example_answer}\\n' + f'-' * 10 + f'\\nRewards:\\n{json.dumps(rewards, indent=2)}')\n return 0",
"label": "{\"label\": \"DEBUGGING\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model_args.model_name_or_path",
"reward_funcs": "[format_reasoning_reward, format_number_reward, accuracy_reward, log_rewards]",
"args": "training_args",
"train_dataset": "data",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
Sam-de-Ham/finetuning-tests | https://github.com/Sam-de-Ham/finetuning-tests | Unknown | full_training_freeze.py | https://github.com/Sam-de-Ham/finetuning-tests/blob/7617ee1361314a054f353d2764affb6ace27ec50/full_training_freeze.py | 2025-03-24T10:15:28.888442 | [
{
"name": "reward_len",
"code": "def reward_len(completions, **kwargs):\n return [-abs(20 - len(completion)) for completion in completions]",
"label": "{\"label\": \"LENGTH_BASED\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "'deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B'",
"reward_funcs": "reward_len",
"args": "training_args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": null,
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
Sam-de-Ham/finetuning-tests | https://github.com/Sam-de-Ham/finetuning-tests | Unknown | full_training_simple.py | https://github.com/Sam-de-Ham/finetuning-tests/blob/7617ee1361314a054f353d2764affb6ace27ec50/full_training_simple.py | 2025-03-24T10:15:31.105297 | [
{
"name": "reward_len",
"code": "def reward_len(completions, **kwargs):\n return [-abs(20 - len(completion)) for completion in completions]",
"label": "{\"label\": \"LENGTH_BASED\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "'deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B'",
"reward_funcs": "reward_len",
"args": "training_args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": null,
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
summerspringwei/alpaca-lora-decompilation | https://github.com/summerspringwei/alpaca-lora-decompilation | Apache License 2.0 | models/llmcompiler/grpo_example.py | https://github.com/summerspringwei/alpaca-lora-decompilation/blob/3d5fb5344992dd9c6e8a6447feee89dc889921fd/models/llmcompiler/grpo_example.py | 2025-03-24T10:15:37.883093 | [
{
"name": "reward_len",
"code": "def reward_len(completions, **kwargs):\n return [-abs(20 - len(completion)) for completion in completions]",
"label": "{\"label\": \"LENGTH_BASED\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "'Qwen/Qwen2-0.5B-Instruct'",
"reward_funcs": "reward_len",
"args": "training_args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": null,
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
meetrais/LLM-Fine-Tuning | https://github.com/meetrais/LLM-Fine-Tuning | Unknown | Qwen2.5_3B_GRPO.py | https://github.com/meetrais/LLM-Fine-Tuning/blob/d5e226e401894795c38e671fef4e117254cfeb51/Qwen2.5_3B_GRPO.py | 2025-03-24T10:15:47.125877 | [
{
"name": "xmlcount_reward_func (from list item 0)",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"ANSWER_TYPE_VALIDATION\"}"
},
{
"name": "soft_format_reward_func (from list item 1)",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from list item 2)",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from list item 3)",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from list item 4)",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n print('-' * 20, f'Question:\\n{q}', f'\\nAnswer:\\n{answer[0]}', f'\\nResponse:\\n{responses[0]}', f'\\nExtracted:\\n{extracted_responses[0]}')\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "xmlcount_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "soft_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n print('-' * 20, f'Question:\\n{q}', f'\\nAnswer:\\n{answer[0]}', f'\\nResponse:\\n{responses[0]}', f'\\nExtracted:\\n{extracted_responses[0]}')\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func]",
"args": "training_args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
MarcoTuc/xent | https://github.com/MarcoTuc/xent | Unknown | llama-grpo-xent/lab.py | https://github.com/MarcoTuc/xent/blob/2a46ba203123eda2e8f195025309af53c0899555/llama-grpo-xent/lab.py | 2025-03-24T10:15:56.616591 | [
{
"name": "dummy_reward (from list item 0)",
"code": "def dummy_reward(completions, **kwargs):\n responses = [completion[0]['content'] for completion in completions]\n print(f'{len(responses)} completions have been produced')\n for response in responses:\n print(response)\n print('\\n\\n')\n return 0",
"label": "{\"label\": \"DEBUGGING\"}"
},
{
"name": "dummy_reward (from [dummy_reward])",
"code": "def dummy_reward(completions, **kwargs):\n responses = [completion[0]['content'] for completion in completions]\n print(f'{len(responses)} completions have been produced')\n for response in responses:\n print(response)\n print('\\n\\n')\n return 0",
"label": "{\"label\": \"DEBUGGING\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[dummy_reward]",
"args": "training_args",
"train_dataset": "dataset['train']",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
Sam-de-Ham/finetuning-tests | https://github.com/Sam-de-Ham/finetuning-tests | Unknown | full_training_simple_grpo.py | https://github.com/Sam-de-Ham/finetuning-tests/blob/7617ee1361314a054f353d2764affb6ace27ec50/full_training_simple_grpo.py | 2025-03-24T10:16:01.156066 | [
{
"name": "reward_len",
"code": "def reward_len(completions, **kwargs):\n return [-abs(20 - len(completion)) for completion in completions]",
"label": "{\"label\": \"LENGTH_BASED\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "reward_len",
"args": "training_args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": null,
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
The-Swarm-Corporation/AgentGym | https://github.com/The-Swarm-Corporation/AgentGym | MIT License | grpo_example_two.py | https://github.com/The-Swarm-Corporation/AgentGym/blob/baa5184fdbdc48bd64f5bde17909fa8c482c2851/grpo_example_two.py | 2025-03-24T10:16:07.952736 | [
{
"name": "reward_len",
"code": "def reward_len(completions, **kwargs):\n return [abs(20 - len(completion)) for completion in completions]",
"label": "{\"label\": \"LENGTH_BASED\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "'Qwen/Qwen2-0.5B-Instruct'",
"reward_funcs": "reward_len",
"args": "training_args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": null,
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
haoruilee/Awesome-GRPO-training-example | https://github.com/haoruilee/Awesome-GRPO-training-example | Unknown | GRPO-Llama-1B.py | https://github.com/haoruilee/Awesome-GRPO-training-example/blob/1a0cf86a50ed4d4602a1b28dbe44c23a83573a11/GRPO-Llama-1B.py | 2025-03-24T10:16:12.544588 | [
{
"name": "xmlcount_reward_func (from list item 0)",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "soft_format_reward_func (from list item 1)",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from list item 2)",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from list item 3)",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from list item 4)",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n print('-' * 20, f'Question:\\n{q}', f'\\nAnswer:\\n{answer[0]}', f'\\nResponse:\\n{responses[0]}', f'\\nExtracted:\\n{extracted_responses[0]}')\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "xmlcount_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "soft_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n print('-' * 20, f'Question:\\n{q}', f'\\nAnswer:\\n{answer[0]}', f'\\nResponse:\\n{responses[0]}', f'\\nExtracted:\\n{extracted_responses[0]}')\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func]",
"args": "training_args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
xiiiiiiiiii/strategicLearning | https://github.com/xiiiiiiiiii/strategicLearning | Unknown | train_grpo_gsm8k.py | https://github.com/xiiiiiiiiii/strategicLearning/blob/f92d0b57e9f7727e0cdad8a5f3ee04b163071ab3/train_grpo_gsm8k.py | 2025-03-24T10:16:14.791293 | [
{
"name": "xmlcount_reward_func (from list item 0)",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "soft_format_reward_func (from list item 1)",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from list item 2)",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from list item 3)",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from list item 4)",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "xmlcount_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"ANSWER_TYPE_VALIDATION\"}"
},
{
"name": "soft_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func]",
"args": "training_args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
datawhalechina/unlock-deepseek | https://github.com/datawhalechina/unlock-deepseek | Unknown | Datawhale-R1/train_Datawhale-R1_unsloth.py | https://github.com/datawhalechina/unlock-deepseek/blob/7bfaaf6f93dcf2249525392d5310881a58f6f79b/Datawhale-R1/train_Datawhale-R1_unsloth.py | 2025-03-24T10:16:21.520168 | [
{
"name": "format_reward_func (from list item 0)",
"code": "def format_reward_func(completions, **kwargs):\n \"\"\"\n 格式奖励函数,检查模型输出格式是否匹配: <think>...</think><answer>...</answer>\n\n 参数:\n completions (list[str]): 生成的输出\n 返回:\n list[float]: 奖励分数\n \"\"\"\n rewards = []\n for completion in completions:\n try:\n completion = '<think>' + completion\n if random.random() < 0.1:\n os.makedirs('completion_samples', exist_ok=True)\n log_file = os.path.join('completion_samples', 'completion_samples.txt')\n with open(log_file, 'a') as f:\n f.write(f'\\n\\n==============\\n')\n f.write(completion)\n regex = '^<think>([^<]*(?:<(?!/?think>)[^<]*)*)<\\\\/think>\\\\n<answer>([\\\\s\\\\S]*?)<\\\\/answer>$'\n match = re.search(regex, completion, re.DOTALL)\n if match is None or len(match.groups()) != 2:\n rewards.append(0.0)\n else:\n rewards.append(1.0)\n except Exception:\n rewards.append(0.0)\n return rewards",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "equation_reward_func (from list item 1)",
"code": "def equation_reward_func(prompts, completions, target, nums, **kwargs):\n \"\"\"\n 方程奖励函数,检查计算结果是否正确,数字是否符合使用要求(每个数字只用一次,只使用所提供的数字)\n\n 参数:\n completions (list[str]): 生成的输出\n target (list[str]): 预期的答案\n nums (list[str]): 可用的数字\n\n 返回:\n list[float]: 奖励分数\n \"\"\"\n rewards = []\n for prompt, completion, gt, numbers in zip(prompts, completions, target, nums):\n try:\n completion = '<think>' + completion\n match = re.search('<answer>(.*?)<\\\\/answer>', completion)\n if match is None:\n rewards.append(0.0)\n continue\n equation = match.group(1).strip()\n used_numbers = [int(n) for n in re.findall('\\\\d+', equation)]\n if sorted(used_numbers) != sorted(numbers):\n rewards.append(0.0)\n continue\n allowed_pattern = '^[\\\\d+\\\\-*/().\\\\s]+$'\n if not re.match(allowed_pattern, equation):\n rewards.append(0.0)\n continue\n result = eval(equation, {'__builtins__': None}, {})\n if random.random() < 0.3:\n print('-' * 20, f'\\nQuestion:\\n{prompt}', f'\\nCompletion:\\n{completion}', f'\\nResult:\\n{result}', f'\\nTarget:\\n{gt}', f'\\nNumbers:\\n{numbers}')\n if abs(float(result) - float(gt)) < 1e-05:\n rewards.append(1.0)\n if random.random() < 0.1:\n os.makedirs('completion_samples', exist_ok=True)\n log_file = os.path.join('completion_samples', 'success_completion_samples.txt')\n with open(log_file, 'a') as f:\n f.write(f'\\n\\n==============\\n')\n f.write(f'\\nQuestion:\\n{prompt}\\nCompletion:\\n{completion}\\nResult:\\n{result}\\nTarget:\\n{gt}\\nNumbers:\\n{numbers}')\n else:\n rewards.append(0.0)\n except Exception:\n rewards.append(0.0)\n return rewards",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "format_reward_func (from [format_reward_func, equation_reward_func])",
"code": "def format_reward_func(completions, **kwargs):\n \"\"\"\n 格式奖励函数,检查模型输出格式是否匹配: <think>...</think><answer>...</answer>\n\n 参数:\n completions (list[str]): 生成的输出\n 返回:\n list[float]: 奖励分数\n \"\"\"\n rewards = []\n for completion in completions:\n try:\n completion = '<think>' + completion\n if random.random() < 0.1:\n os.makedirs('completion_samples', exist_ok=True)\n log_file = os.path.join('completion_samples', 'completion_samples.txt')\n with open(log_file, 'a') as f:\n f.write(f'\\n\\n==============\\n')\n f.write(completion)\n regex = '^<think>([^<]*(?:<(?!/?think>)[^<]*)*)<\\\\/think>\\\\n<answer>([\\\\s\\\\S]*?)<\\\\/answer>$'\n match = re.search(regex, completion, re.DOTALL)\n if match is None or len(match.groups()) != 2:\n rewards.append(0.0)\n else:\n rewards.append(1.0)\n except Exception:\n rewards.append(0.0)\n return rewards",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "equation_reward_func (from [format_reward_func, equation_reward_func])",
"code": "def equation_reward_func(prompts, completions, target, nums, **kwargs):\n \"\"\"\n 方程奖励函数,检查计算结果是否正确,数字是否符合使用要求(每个数字只用一次,只使用所提供的数字)\n\n 参数:\n completions (list[str]): 生成的输出\n target (list[str]): 预期的答案\n nums (list[str]): 可用的数字\n\n 返回:\n list[float]: 奖励分数\n \"\"\"\n rewards = []\n for prompt, completion, gt, numbers in zip(prompts, completions, target, nums):\n try:\n completion = '<think>' + completion\n match = re.search('<answer>(.*?)<\\\\/answer>', completion)\n if match is None:\n rewards.append(0.0)\n continue\n equation = match.group(1).strip()\n used_numbers = [int(n) for n in re.findall('\\\\d+', equation)]\n if sorted(used_numbers) != sorted(numbers):\n rewards.append(0.0)\n continue\n allowed_pattern = '^[\\\\d+\\\\-*/().\\\\s]+$'\n if not re.match(allowed_pattern, equation):\n rewards.append(0.0)\n continue\n result = eval(equation, {'__builtins__': None}, {})\n if random.random() < 0.3:\n print('-' * 20, f'\\nQuestion:\\n{prompt}', f'\\nCompletion:\\n{completion}', f'\\nResult:\\n{result}', f'\\nTarget:\\n{gt}', f'\\nNumbers:\\n{numbers}')\n if abs(float(result) - float(gt)) < 1e-05:\n rewards.append(1.0)\n if random.random() < 0.1:\n os.makedirs('completion_samples', exist_ok=True)\n log_file = os.path.join('completion_samples', 'success_completion_samples.txt')\n with open(log_file, 'a') as f:\n f.write(f'\\n\\n==============\\n')\n f.write(f'\\nQuestion:\\n{prompt}\\nCompletion:\\n{completion}\\nResult:\\n{result}\\nTarget:\\n{gt}\\nNumbers:\\n{numbers}')\n else:\n rewards.append(0.0)\n except Exception:\n rewards.append(0.0)\n return rewards",
"label": "{\"label\": \"COMPUTATIONAL\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[format_reward_func, equation_reward_func]",
"args": "training_args",
"train_dataset": "train_dataset",
"eval_dataset": "test_dataset",
"peft_config": null,
"reward_processing_classes": null,
"processing_class": null,
"null": null,
"formatting_func": null,
"callbacks": "callbacks",
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
Manto/chess-reasoning-zero | https://github.com/Manto/chess-reasoning-zero | MIT License | qwen-1.5b.countdown.py | https://github.com/Manto/chess-reasoning-zero/blob/a887574cccdee0a752c80181ce3d6f428acbd52a/qwen-1.5b.countdown.py | 2025-03-24T10:16:23.784558 | [
{
"name": "countdown_reward_func",
"code": "def countdown_reward_func(prompts, completions, ground_truth, **kwargs) -> list[float]:\n scores = []\n for prompt, completion, truth in zip(prompts, completions, ground_truth):\n score = compute_score(completion[0]['content'], truth)\n scores.append(score)\n print(scores)\n return scores",
"label": "{\"label\": \"DEBUGGING\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "countdown_reward_func",
"args": "training_args",
"train_dataset": "train_dataset",
"eval_dataset": "test_dataset",
"peft_config": "lora_config",
"reward_processing_classes": null,
"processing_class": null,
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
summerspringwei/alpaca-lora-decompilation | https://github.com/summerspringwei/alpaca-lora-decompilation | Apache License 2.0 | models/llmcompiler/grpo_exebench.py | https://github.com/summerspringwei/alpaca-lora-decompilation/blob/3d5fb5344992dd9c6e8a6447feee89dc889921fd/models/llmcompiler/grpo_exebench.py | 2025-03-24T10:16:28.246568 | [
{
"name": "reward_compilation",
"code": "def reward_compilation(completions, **kwargs):\n original_input = [{} for _ in range(len(completions))]\n predict_list_length = []\n for k, v in kwargs.items():\n for i in range(len(v)):\n original_input[i][k] = v[i]\n validation_list = []\n for row, completion in zip(original_input, completions):\n print(len(tokenizer(completion)['input_ids']))\n match_results = extract_llmcompiler_code_blocks(completion)\n predict_ir = 'Failed to extract IR'\n if len(match_results) > 0:\n predict_ir = match_results[0]\n record = {'file': row['path'], 'predict': [predict_ir], 'output': row['llvm_ir']['code'][-1]}\n record = validate_by_execution(record, row, validation_dir)\n validation_list.append(record)\n predict_list_length.append(len(tokenizer(predict_ir)['input_ids']))\n predict_reward = [1 if r['predict_compile_success'][0] is True else 0 for r in validation_list]\n executable_reward = [1 if r['predict_execution_success'][0] is True else 0 for r in validation_list]\n wandb.log({'results': wandb.Table(columns=['compile', 'executable'], data=[[p, e] for p, e in zip(predict_reward, executable_reward)])})\n return predict_reward",
"label": "{\"label\": \"COMPUTATIONAL\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model_path",
"reward_funcs": "reward_compilation",
"args": "training_args",
"train_dataset": "exebench_dataset",
"eval_dataset": null,
"peft_config": "lora_config",
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
Oxen-AI/GRPO-With-Cargo-Feedback | https://github.com/Oxen-AI/GRPO-With-Cargo-Feedback | MIT License | train.py | https://github.com/Oxen-AI/GRPO-With-Cargo-Feedback/blob/11d0f570898f5764d9a366898ccb3da4c745a378/train.py | 2025-03-24T10:16:32.752977 | [
{
"name": "cargo_build_reward_func (from list item 0)",
"code": "@experiment.log(f'cargo_build_rewards.jsonl')\ndef cargo_build_reward_func(prompts, completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_answers = [extract_rust_code(r) for r in responses]\n results = []\n for i, answer in enumerate(extracted_answers):\n data = {'rust_code': answer}\n tools = [RustTool('build')]\n cargo_results = setup_and_test_rust_project(data, tools)\n score = 1.0 if cargo_results['build_passed'] else 0.0\n results.append(score)\n return results",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "cargo_clippy_reward_func (from list item 1)",
"code": "@experiment.log(f'cargo_clippy_rewards.jsonl')\ndef cargo_clippy_reward_func(prompts, completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_answers = [extract_rust_code(r) for r in responses]\n results = []\n for i, answer in enumerate(extracted_answers):\n data = {'rust_code': answer}\n tools = [RustTool('clippy')]\n cargo_results = setup_and_test_rust_project(data, tools)\n score = 1.0 if cargo_results['clippy_passed'] else 0.0\n results.append(score)\n return results",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "cargo_test_reward_func (from list item 2)",
"code": "@experiment.log(f'cargo_test_rewards.jsonl')\ndef cargo_test_reward_func(prompts, completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_codes = [extract_rust_code(r) for r in responses]\n extracted_tests = [extract_test_code(c) for c in extracted_codes]\n results = []\n for i, answer in enumerate(extracted_codes):\n score = 0.0\n if extracted_tests[i]:\n data = {'rust_code': answer}\n tools = [RustTool('test')]\n cargo_results = setup_and_test_rust_project(data, tools)\n score = 2.0 if cargo_results['test_passed'] else 0.0\n results.append(score)\n return results",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "non_empty_reward_func (from list item 3)",
"code": "@experiment.log(f'non_empty_rewards.jsonl')\ndef non_empty_reward_func(prompts, completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [response_contains_more_than_non_empty_line(c) for c in contents]",
"label": "{\"label\": \"LENGTH_BASED\"}"
},
{
"name": "test_block_count_reward_func (from list item 4)",
"code": "@experiment.log(f'test_block_count_rewards.jsonl')\ndef test_block_count_reward_func(prompts, completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [response_contains_one_test_block(c) for c in contents]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "tests_have_asserts_reward_func (from list item 5)",
"code": "@experiment.log(f'tests_have_asserts_rewards.jsonl')\ndef tests_have_asserts_reward_func(prompts, completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [response_contains_asserts(c) for c in contents]",
"label": "{\"label\": \"DEBUGGING\"}"
},
{
"name": "cargo_build_reward_func (from [cargo_build_reward_func, cargo_clippy_reward_func, cargo_test_reward_func, non_empty_reward_func, test_block_count_reward_func, tests_have_asserts_reward_func])",
"code": "@experiment.log(f'cargo_build_rewards.jsonl')\ndef cargo_build_reward_func(prompts, completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_answers = [extract_rust_code(r) for r in responses]\n results = []\n for i, answer in enumerate(extracted_answers):\n data = {'rust_code': answer}\n tools = [RustTool('build')]\n cargo_results = setup_and_test_rust_project(data, tools)\n score = 1.0 if cargo_results['build_passed'] else 0.0\n results.append(score)\n return results",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "cargo_clippy_reward_func (from [cargo_build_reward_func, cargo_clippy_reward_func, cargo_test_reward_func, non_empty_reward_func, test_block_count_reward_func, tests_have_asserts_reward_func])",
"code": "@experiment.log(f'cargo_clippy_rewards.jsonl')\ndef cargo_clippy_reward_func(prompts, completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_answers = [extract_rust_code(r) for r in responses]\n results = []\n for i, answer in enumerate(extracted_answers):\n data = {'rust_code': answer}\n tools = [RustTool('clippy')]\n cargo_results = setup_and_test_rust_project(data, tools)\n score = 1.0 if cargo_results['clippy_passed'] else 0.0\n results.append(score)\n return results",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "cargo_test_reward_func (from [cargo_build_reward_func, cargo_clippy_reward_func, cargo_test_reward_func, non_empty_reward_func, test_block_count_reward_func, tests_have_asserts_reward_func])",
"code": "@experiment.log(f'cargo_test_rewards.jsonl')\ndef cargo_test_reward_func(prompts, completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_codes = [extract_rust_code(r) for r in responses]\n extracted_tests = [extract_test_code(c) for c in extracted_codes]\n results = []\n for i, answer in enumerate(extracted_codes):\n score = 0.0\n if extracted_tests[i]:\n data = {'rust_code': answer}\n tools = [RustTool('test')]\n cargo_results = setup_and_test_rust_project(data, tools)\n score = 2.0 if cargo_results['test_passed'] else 0.0\n results.append(score)\n return results",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "non_empty_reward_func (from [cargo_build_reward_func, cargo_clippy_reward_func, cargo_test_reward_func, non_empty_reward_func, test_block_count_reward_func, tests_have_asserts_reward_func])",
"code": "@experiment.log(f'non_empty_rewards.jsonl')\ndef non_empty_reward_func(prompts, completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [response_contains_more_than_non_empty_line(c) for c in contents]",
"label": "{\"label\": \"DEBUGGING\"}"
},
{
"name": "test_block_count_reward_func (from [cargo_build_reward_func, cargo_clippy_reward_func, cargo_test_reward_func, non_empty_reward_func, test_block_count_reward_func, tests_have_asserts_reward_func])",
"code": "@experiment.log(f'test_block_count_rewards.jsonl')\ndef test_block_count_reward_func(prompts, completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [response_contains_one_test_block(c) for c in contents]",
"label": "{\"label\": \"ANSWER_TYPE_VALIDATION\"}"
},
{
"name": "tests_have_asserts_reward_func (from [cargo_build_reward_func, cargo_clippy_reward_func, cargo_test_reward_func, non_empty_reward_func, test_block_count_reward_func, tests_have_asserts_reward_func])",
"code": "@experiment.log(f'tests_have_asserts_rewards.jsonl')\ndef tests_have_asserts_reward_func(prompts, completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [response_contains_asserts(c) for c in contents]",
"label": "{\"label\": \"DEBUGGING\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[cargo_build_reward_func, cargo_clippy_reward_func, cargo_test_reward_func, non_empty_reward_func, test_block_count_reward_func, tests_have_asserts_reward_func]",
"args": "training_args",
"train_dataset": "train_dataset",
"eval_dataset": null,
"peft_config": "peft_config",
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": "[OxenTrainerCallback(experiment, bar, commit_every=commit_every.value)]",
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
awdemos/awdemos | https://github.com/awdemos/awdemos | Unknown | demos/llm/alpha_maze_finder_grpo/alphamaze_solver.py | https://github.com/awdemos/awdemos/blob/f59b9335803e762618c92ec7b6e655a693607555/demos/llm/alpha_maze_finder_grpo/alphamaze_solver.py | 2025-03-24T10:16:37.352898 | [
{
"name": "maze_reward (from list item 0)",
"code": "def maze_reward(completions, prompts, **kwargs):\n rewards = []\n for completion in completions:\n game = MazeGame()\n moves = completion.split()\n for move in moves:\n _, done = game.move(move)\n if done:\n rewards.append(1.0)\n break\n else:\n rewards.append(-1.0)\n rewards_tensor = torch.tensor(rewards, dtype=torch.float32, requires_grad=False)\n device = kwargs.get('model', torch.device('cpu')).device if isinstance(kwargs.get('model'), torch.nn.Module) else torch.device('cpu')\n return rewards_tensor.to(device)",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "maze_reward (from [maze_reward])",
"code": "def maze_reward(completions, prompts, **kwargs):\n rewards = []\n for completion in completions:\n game = MazeGame()\n moves = completion.split()\n for move in moves:\n _, done = game.move(move)\n if done:\n rewards.append(1.0)\n break\n else:\n rewards.append(-1.0)\n rewards_tensor = torch.tensor(rewards, dtype=torch.float32, requires_grad=False)\n device = kwargs.get('model', torch.device('cpu')).device if isinstance(kwargs.get('model'), torch.nn.Module) else torch.device('cpu')\n return rewards_tensor.to(device)",
"label": "{\"label\": \"COMPUTATIONAL\"}"
}
] | [
{
"trainer_type": "CustomGRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[maze_reward]",
"args": "training_args",
"train_dataset": "train_dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": null,
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": "tokenizer",
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": "MazeGame",
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
HarleyCoops/TrainingRun | https://github.com/HarleyCoops/TrainingRun | Unknown | grpo_demo.py | https://github.com/HarleyCoops/TrainingRun/blob/371054d5438de5f97e2b54d8bdfd8deebbd3fe85/grpo_demo.py | 2025-03-24T10:16:39.741640 | [
{
"name": "xmlcount_reward_func (from list item 0)",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "soft_format_reward_func (from list item 1)",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from list item 2)",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from list item 3)",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from list item 4)",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n print('-' * 20, f'Question:\\n{q}', f'\\nAnswer:\\n{answer[0]}', f'\\nResponse:\\n{responses[0]}', f'\\nExtracted:\\n{extracted_responses[0]}')\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "xmlcount_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "soft_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n print('-' * 20, f'Question:\\n{q}', f'\\nAnswer:\\n{answer[0]}', f'\\nResponse:\\n{responses[0]}', f'\\nExtracted:\\n{extracted_responses[0]}')\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func]",
"args": "training_args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
erfanzar/EasyDeL | https://github.com/erfanzar/EasyDeL | Apache License 2.0 | easydel/scripts/finetune/gsm8k_grpo.py | https://github.com/erfanzar/EasyDeL/blob/64a77804783cb790bff1f8c744163915f55aea5f/easydel/scripts/finetune/gsm8k_grpo.py | 2025-03-24T10:16:46.626204 | [
{
"name": "xmlcount_reward_func (from list item 0)",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "soft_format_reward_func (from list item 1)",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [runtime_config.xml_full_match_reward if match else runtime_config.xml_full_match_reject for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from list item 2)",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [runtime_config.xml_full_match_reward if match else runtime_config.xml_full_match_reject for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from list item 3)",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from list item 4)",
"code": "def correctness_reward_func(prompts, completions, batch, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n answer = processor.batch_decode(batch['answer_ids']) * runtime_config.num_return_sequences\n return [runtime_config.correctness_reward if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "xmlcount_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"ANSWER_TYPE_VALIDATION\"}"
},
{
"name": "soft_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [runtime_config.xml_full_match_reward if match else runtime_config.xml_full_match_reject for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [runtime_config.xml_full_match_reward if match else runtime_config.xml_full_match_reject for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def correctness_reward_func(prompts, completions, batch, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n answer = processor.batch_decode(batch['answer_ids']) * runtime_config.num_return_sequences\n return [runtime_config.correctness_reward if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func]",
"args": null,
"train_dataset": "train_dataset",
"eval_dataset": "test_dataset",
"peft_config": null,
"reward_processing_classes": null,
"processing_class": "processor",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": "grpo_config",
"vinference": "vinference",
"data_tokenize_fn": "data_tokenize_fn",
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
benglard/consciousness | https://github.com/benglard/consciousness | Unknown | llm_safety.py | https://github.com/benglard/consciousness/blob/dc7e58655c53bb34d2bc9c1b6fb0c2f26a77b339/llm_safety.py | 2025-03-24T10:16:48.956696 | [
{
"name": "xmlcount_reward_func (from list item 0)",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"LENGTH_BASED\"}"
},
{
"name": "soft_format_reward_func (from list item 1)",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from list item 2)",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from list item 3)",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from list item 4)",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n print('-' * 20, f'Question:\\n{q}', f'\\nAnswer:\\n{answer[0]}', f'\\nResponse:\\n{responses[0]}', f'\\nExtracted:\\n{extracted_responses[0]}')\n return [2.0 if a in r else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "smol_model_predictor (from list item 5)",
"code": "def smol_model_predictor(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n with torch.inference_mode():\n inputs = tokenizer(responses, return_tensors='pt', padding=True, truncation=True).to(device)\n per_token_logps = _get_model_logps(smol_model, inputs)\n out_attn_mask = inputs.attention_mask[:, 1:]\n avg_logps = (per_token_logps * out_attn_mask).sum(dim=1) / out_attn_mask.sum(dim=1)\n return avg_logps.tolist()",
"label": "{\"label\": \"ANSWER_TYPE_VALIDATION\"}"
},
{
"name": "xmlcount_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func, smol_model_predictor])",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "soft_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func, smol_model_predictor])",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func, smol_model_predictor])",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func, smol_model_predictor])",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func, smol_model_predictor])",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n print('-' * 20, f'Question:\\n{q}', f'\\nAnswer:\\n{answer[0]}', f'\\nResponse:\\n{responses[0]}', f'\\nExtracted:\\n{extracted_responses[0]}')\n return [2.0 if a in r else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "smol_model_predictor (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func, smol_model_predictor])",
"code": "def smol_model_predictor(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n with torch.inference_mode():\n inputs = tokenizer(responses, return_tensors='pt', padding=True, truncation=True).to(device)\n per_token_logps = _get_model_logps(smol_model, inputs)\n out_attn_mask = inputs.attention_mask[:, 1:]\n avg_logps = (per_token_logps * out_attn_mask).sum(dim=1) / out_attn_mask.sum(dim=1)\n return avg_logps.tolist()",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func, smol_model_predictor]",
"args": "training_args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": "peft_config",
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
nnebp/GPRO-s-game-of-life | https://github.com/nnebp/GPRO-s-game-of-life | Unknown | train_gsm8k_mps.py | https://github.com/nnebp/GPRO-s-game-of-life/blob/169c46a84cde5f6deb941bed685fdab0ffd1e11b/train_gsm8k_mps.py | 2025-03-24T10:16:51.195643 | [
{
"name": "correctness_reward (from list item 0)",
"code": "def correctness_reward(prompts, completions, answer, **kwargs):\n \"\"\"Reward function for correct answers\"\"\"\n responses = [completion[0]['content'] for completion in completions]\n extracted = [extract_xml_answer(r) for r in responses]\n return [2.0 if r.strip() == a.strip() else 0.0 for r, a in zip(extracted, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "format_reward (from list item 1)",
"code": "def format_reward(completions, **kwargs):\n \"\"\"Reward function for proper format\"\"\"\n pattern = '<reasoning>[\\\\s\\\\S]*?</reasoning>\\\\s*<answer>[\\\\s\\\\S]*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [bool(re.search(pattern, r)) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "numeric_answer_reward (from list item 2)",
"code": "def numeric_answer_reward(completions, **kwargs):\n \"\"\"Reward function that checks if the answer is numeric\"\"\"\n responses = [completion[0]['content'] for completion in completions]\n extracted = [extract_xml_answer(r) for r in responses]\n\n def is_numeric(text):\n text = text.replace(',', '').replace('$', '').strip()\n try:\n float(text)\n return True\n except:\n return False\n return [0.5 if is_numeric(r) else 0.0 for r in extracted]",
"label": "{\"label\": \"ANSWER_TYPE_VALIDATION\"}"
},
{
"name": "correctness_reward (from [correctness_reward, format_reward, numeric_answer_reward])",
"code": "def correctness_reward(prompts, completions, answer, **kwargs):\n \"\"\"Reward function for correct answers\"\"\"\n responses = [completion[0]['content'] for completion in completions]\n extracted = [extract_xml_answer(r) for r in responses]\n return [2.0 if r.strip() == a.strip() else 0.0 for r, a in zip(extracted, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "format_reward (from [correctness_reward, format_reward, numeric_answer_reward])",
"code": "def format_reward(completions, **kwargs):\n \"\"\"Reward function for proper format\"\"\"\n pattern = '<reasoning>[\\\\s\\\\S]*?</reasoning>\\\\s*<answer>[\\\\s\\\\S]*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [bool(re.search(pattern, r)) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "numeric_answer_reward (from [correctness_reward, format_reward, numeric_answer_reward])",
"code": "def numeric_answer_reward(completions, **kwargs):\n \"\"\"Reward function that checks if the answer is numeric\"\"\"\n responses = [completion[0]['content'] for completion in completions]\n extracted = [extract_xml_answer(r) for r in responses]\n\n def is_numeric(text):\n text = text.replace(',', '').replace('$', '').strip()\n try:\n float(text)\n return True\n except:\n return False\n return [0.5 if is_numeric(r) else 0.0 for r in extracted]",
"label": "{\"label\": \"ANSWER_TYPE_VALIDATION\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "args.model_name",
"reward_funcs": "[correctness_reward, format_reward, numeric_answer_reward]",
"args": "training_args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": "peft_config",
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": "tokenizer",
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
jianzhnie/Open-R1 | https://github.com/jianzhnie/Open-R1 | Apache License 2.0 | examples/grpo_gsm8k.py | https://github.com/jianzhnie/Open-R1/blob/cbcaa40cf795a99a394db4806685018d06452c23/examples/grpo_gsm8k.py | 2025-03-24T10:16:53.561361 | [
{
"name": "xmlcount_reward_func (from list item 0)",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "soft_format_reward_func (from list item 1)",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from list item 2)",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from list item 3)",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from list item 4)",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n print('-' * 20, f'Question:\\n{q}', f'\\nAnswer:\\n{answer[0]}', f'\\nResponse:\\n{responses[0]}', f'\\nExtracted:\\n{extracted_responses[0]}')\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "xmlcount_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "soft_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n print('-' * 20, f'Question:\\n{q}', f'\\nAnswer:\\n{answer[0]}', f'\\nResponse:\\n{responses[0]}', f'\\nExtracted:\\n{extracted_responses[0]}')\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func]",
"args": "training_args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": "peft_config",
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
jiangqx0225/llm_run_file | https://github.com/jiangqx0225/llm_run_file | Unknown | unsloth_grpo.py | https://github.com/jiangqx0225/llm_run_file/blob/c698e03108035ef3511df5afcc7f2029d25e90a7/unsloth_grpo.py | 2025-03-24T10:16:55.859060 | [
{
"name": "xmlcount_reward_func (from list item 0)",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "soft_format_reward_func (from list item 1)",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from list item 2)",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from list item 3)",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from list item 4)",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n print('-' * 20, f'Question:\\n{q}', f'\\nAnswer:\\n{answer[0]}', f'\\nResponse:\\n{responses[0]}', f'\\nExtracted:\\n{extracted_responses[0]}')\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "xmlcount_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "soft_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n print('-' * 20, f'Question:\\n{q}', f'\\nAnswer:\\n{answer[0]}', f'\\nResponse:\\n{responses[0]}', f'\\nExtracted:\\n{extracted_responses[0]}')\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func]",
"args": "training_args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
idanshen/multi_ref | https://github.com/idanshen/multi_ref | Unknown | gsm8k_grpo.py | https://github.com/idanshen/multi_ref/blob/53c9484f9963d0eb6c320eb7741ca08018aaa350/gsm8k_grpo.py | 2025-03-24T10:17:00.417588 | [
{
"name": "xmlcount_reward_func (from list item 0)",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "soft_format_reward_func (from list item 1)",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from list item 2)",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from list item 3)",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from list item 4)",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n print('-' * 20, f'Question:\\n{q}', f'\\nAnswer:\\n{answer[0]}', f'\\nResponse:\\n{responses[0]}', f'\\nExtracted:\\n{extracted_responses[0]}')\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "xmlcount_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "soft_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n print('-' * 20, f'Question:\\n{q}', f'\\nAnswer:\\n{answer[0]}', f'\\nResponse:\\n{responses[0]}', f'\\nExtracted:\\n{extracted_responses[0]}')\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func]",
"args": "training_args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": "ref_model",
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
yhfgyyf/GRPO_script | https://github.com/yhfgyyf/GRPO_script | Unknown | grpo_bleu.py | https://github.com/yhfgyyf/GRPO_script/blob/af9b2066201156eb07b270f60ccb50b552611249/grpo_bleu.py | 2025-03-24T10:17:02.749926 | [
{
"name": "assistant_format_count_reward (from list item 0)",
"code": "def assistant_format_count_reward(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_assistant_format(c) for c in contents]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "assistant_format_reward_func (from list item 1)",
"code": "def assistant_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has the correct [ASSISTANT]...[/ASSISTANT] format.\"\"\"\n pattern = '^\\\\[ASSISTANT\\\\](.*?)\\\\[/ASSISTANT\\\\]$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r, re.DOTALL) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "soft_assistant_format_reward_func (from list item 2)",
"code": "def soft_assistant_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion contains [ASSISTANT]...[/ASSISTANT] tags.\"\"\"\n pattern = '\\\\[ASSISTANT\\\\](.*?)\\\\[/ASSISTANT\\\\]'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.search(pattern, r, re.DOTALL) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "bleu_reward_func (from list item 3)",
"code": "def bleu_reward_func(prompts, completions, **kwargs) -> list[float]:\n \"\"\"\n 计算BLEU奖励分数\n Args:\n prompts: 输入提示\n completions: 模型生成的完成\n Returns:\n float: 奖励分数\n \"\"\"\n responses = []\n for completion in completions:\n if isinstance(completion, list):\n response = completion[0]['content'].replace('[/ASSISTANT]', '')\n else:\n response = completion['content'].replace('[/ASSISTANT]', '')\n if isinstance(response, list):\n response = ' '.join(response)\n responses.append(response)\n query = prompts[0][-1]['content']\n if isinstance(query, list):\n query = ' '.join(query)\n rewards = []\n for response in responses:\n try:\n bleu_score = calculate_bleu4(response, query)\n if bleu_score < 0.4:\n reward = 0.0\n elif bleu_score == 1.0:\n reward = 2.0\n else:\n reward = 1.0 + (bleu_score - 0.4) / 0.6\n rewards.append(reward)\n except Exception as e:\n print(f'Error calculating BLEU score: {e}')\n rewards.append(0.0)\n return rewards",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "assistant_format_count_reward (from [assistant_format_count_reward, assistant_format_reward_func, soft_assistant_format_reward_func, bleu_reward_func])",
"code": "def assistant_format_count_reward(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_assistant_format(c) for c in contents]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "assistant_format_reward_func (from [assistant_format_count_reward, assistant_format_reward_func, soft_assistant_format_reward_func, bleu_reward_func])",
"code": "def assistant_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has the correct [ASSISTANT]...[/ASSISTANT] format.\"\"\"\n pattern = '^\\\\[ASSISTANT\\\\](.*?)\\\\[/ASSISTANT\\\\]$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r, re.DOTALL) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "soft_assistant_format_reward_func (from [assistant_format_count_reward, assistant_format_reward_func, soft_assistant_format_reward_func, bleu_reward_func])",
"code": "def soft_assistant_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion contains [ASSISTANT]...[/ASSISTANT] tags.\"\"\"\n pattern = '\\\\[ASSISTANT\\\\](.*?)\\\\[/ASSISTANT\\\\]'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.search(pattern, r, re.DOTALL) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "bleu_reward_func (from [assistant_format_count_reward, assistant_format_reward_func, soft_assistant_format_reward_func, bleu_reward_func])",
"code": "def bleu_reward_func(prompts, completions, **kwargs) -> list[float]:\n \"\"\"\n 计算BLEU奖励分数\n Args:\n prompts: 输入提示\n completions: 模型生成的完成\n Returns:\n float: 奖励分数\n \"\"\"\n responses = []\n for completion in completions:\n if isinstance(completion, list):\n response = completion[0]['content'].replace('[/ASSISTANT]', '')\n else:\n response = completion['content'].replace('[/ASSISTANT]', '')\n if isinstance(response, list):\n response = ' '.join(response)\n responses.append(response)\n query = prompts[0][-1]['content']\n if isinstance(query, list):\n query = ' '.join(query)\n rewards = []\n for response in responses:\n try:\n bleu_score = calculate_bleu4(response, query)\n if bleu_score < 0.4:\n reward = 0.0\n elif bleu_score == 1.0:\n reward = 2.0\n else:\n reward = 1.0 + (bleu_score - 0.4) / 0.6\n rewards.append(reward)\n except Exception as e:\n print(f'Error calculating BLEU score: {e}')\n rewards.append(0.0)\n return rewards",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[assistant_format_count_reward, assistant_format_reward_func, soft_assistant_format_reward_func, bleu_reward_func]",
"args": "training_args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
Legionof7/GRPOdx | https://github.com/Legionof7/GRPOdx | Unknown | Tic Tac Toe Game.py | https://github.com/Legionof7/GRPOdx/blob/b039810b169606493a1e3202dbf1b4d9cec02942/Tic%20Tac%20Toe%20Game.py | 2025-03-24T10:17:18.716467 | [
{
"name": "game_reward (from list item 0)",
"code": "def game_reward(completions, **kwargs) -> list[float]:\n return [0.0 for completion in completions]",
"label": "{\"label\": \"LENGTH_BASED\"}"
},
{
"name": "game_reward (from [game_reward])",
"code": "def game_reward(completions, **kwargs) -> list[float]:\n return [0.0 for completion in completions]",
"label": "{\"label\": \"LENGTH_BASED\"}"
}
] | [
{
"trainer_type": "CustomGRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[game_reward]",
"args": "training_args",
"train_dataset": "train_dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": "TicTacToeGame",
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
avinashreddydev/low-thinking | https://github.com/avinashreddydev/low-thinking | Apache License 2.0 | src/grpo_train_math.py | https://github.com/avinashreddydev/low-thinking/blob/95a2a8b79d7a863174e5ed33ed199a4116490f8a/src/grpo_train_math.py | 2025-03-24T10:17:20.948825 | [
{
"name": "format_reward_func (from list item 0)",
"code": "def format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has the correct format.\"\"\"\n pattern = '^<reasoning>(?:(?!</reasoning>).)*</reasoning>\\\\n<answer>(?:(?!</answer>).)*</answer>$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [bool(re.match(pattern, r, re.DOTALL)) for r in responses]\n return [1.0 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "correctness_reward_func (from list item 1)",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the answer is correct.\"\"\"\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [strip_string(extract_xml_answer(r)) for r in responses]\n reasoning_tokens = [extract_reasoning_tokens(r) for r in responses]\n reasoning_lengths = [len(rt.split()) for rt in reasoning_tokens if rt]\n if reasoning_lengths:\n avg_reasoning_length = sum(reasoning_lengths) / len(reasoning_lengths)\n max_reasoning_length = max(reasoning_lengths)\n min_reasoning_length = min(reasoning_lengths)\n else:\n avg_reasoning_length = 0\n max_reasoning_length = 0\n min_reasoning_length = 0\n wandb.log({'avg_reasoning_tokens_length': avg_reasoning_length, 'max_reasoning_tokens_length': max_reasoning_length, 'min_reasoning_tokens_length': min_reasoning_length})\n answer = [strip_string(a) for a in answer]\n print(f'\\n\\n===============================================================\\n\\n\\nCorrect Answer:\\n{answer[0]}\\n\\n---------------------------------------------------------------\\n\\n\\nExtracted: {extracted_responses[0]}\\n\\nCorrectness of all {len(completions)} responses: ' + ''.join(('Y' if RESPONSE_COMPARATOR['di-zhang-fdu/MATH500'](r, a) == True else 'N' for r, a in zip(extracted_responses, answer))))\n reward_list = [2.0 if RESPONSE_COMPARATOR['di-zhang-fdu/MATH500'](r, a) == True else 0.0 for r, a in zip(extracted_responses, answer)]\n return reward_list",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "format_reward_func (from [format_reward_func, correctness_reward_func])",
"code": "def format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has the correct format.\"\"\"\n pattern = '^<reasoning>(?:(?!</reasoning>).)*</reasoning>\\\\n<answer>(?:(?!</answer>).)*</answer>$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [bool(re.match(pattern, r, re.DOTALL)) for r in responses]\n return [1.0 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "correctness_reward_func (from [format_reward_func, correctness_reward_func])",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the answer is correct.\"\"\"\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [strip_string(extract_xml_answer(r)) for r in responses]\n reasoning_tokens = [extract_reasoning_tokens(r) for r in responses]\n reasoning_lengths = [len(rt.split()) for rt in reasoning_tokens if rt]\n if reasoning_lengths:\n avg_reasoning_length = sum(reasoning_lengths) / len(reasoning_lengths)\n max_reasoning_length = max(reasoning_lengths)\n min_reasoning_length = min(reasoning_lengths)\n else:\n avg_reasoning_length = 0\n max_reasoning_length = 0\n min_reasoning_length = 0\n wandb.log({'avg_reasoning_tokens_length': avg_reasoning_length, 'max_reasoning_tokens_length': max_reasoning_length, 'min_reasoning_tokens_length': min_reasoning_length})\n answer = [strip_string(a) for a in answer]\n print(f'\\n\\n===============================================================\\n\\n\\nCorrect Answer:\\n{answer[0]}\\n\\n---------------------------------------------------------------\\n\\n\\nExtracted: {extracted_responses[0]}\\n\\nCorrectness of all {len(completions)} responses: ' + ''.join(('Y' if RESPONSE_COMPARATOR['di-zhang-fdu/MATH500'](r, a) == True else 'N' for r, a in zip(extracted_responses, answer))))\n reward_list = [2.0 if RESPONSE_COMPARATOR['di-zhang-fdu/MATH500'](r, a) == True else 0.0 for r, a in zip(extracted_responses, answer)]\n return reward_list",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[format_reward_func, correctness_reward_func]",
"args": "training_args",
"train_dataset": "dataset_train",
"eval_dataset": "dataset_test",
"peft_config": null,
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
zzlzero/CodeLess | https://github.com/zzlzero/CodeLess | Unknown | run_grpo.py | https://github.com/zzlzero/CodeLess/blob/b2cb84a16a1764945fd93f4ca6d7fb39a55858b1/run_grpo.py | 2025-03-24T10:17:23.255479 | [
{
"name": "len_reward_func (from list item 0)",
"code": "def len_reward_func(completions, **kwargs):\n rewards = []\n max_len = max((len(completion) for completion in completions))\n for completion in completions:\n generation = task.postprocess_generation(completion)\n rewards.append(0.5 - (len(completion) - len(generation)) / (max_len - len(generation) + 1e-06))\n return rewards",
"label": "{\"label\": \"LENGTH_BASED\"}"
},
{
"name": "correct_code_reward_func (from list item 1)",
"code": "def correct_code_reward_func(prompts, completions, test_list, **kwargs):\n generations = []\n references = []\n rewards = []\n for prompt, test, completion in zip(prompts, test_list, completions):\n generation = task.postprocess_generation(completion)\n reference = '\\n'.join(test)\n test_program = generation + '\\n' + reference\n import tempfile\n import subprocess\n with tempfile.NamedTemporaryFile(suffix='.py', delete=False) as temp_file:\n temp_filename = temp_file.name\n temp_file.write(test_program.encode('utf-8'))\n try:\n result = subprocess.run(['python', temp_filename], capture_output=True, text=True, timeout=10)\n if result.returncode == 0:\n rewards.append(1.0)\n if torch.rand(1).item() < 0.1:\n os.makedirs('completion_samples', exist_ok=True)\n log_file = os.path.join('completion_samples', 'success_code_samples.txt')\n with open(log_file, 'a') as f:\n f.write(f'\\n\\n==============\\n')\n f.write(f'Prompt:\\n{prompt}\\n\\nGeneration:\\n{generation}\\n\\nTest:\\n{reference}\\n')\n else:\n print(f'Test failed with error: {result.stderr}')\n rewards.append(0.0)\n except subprocess.TimeoutExpired:\n print('Test timeout: 执行代码超时')\n rewards.append(0.0)\n except Exception as e:\n print(f'Execution error: {str(e)}')\n rewards.append(0.0)\n finally:\n try:\n os.unlink(temp_filename)\n except:\n pass\n print(f'Reward: {rewards[-1]}')\n return rewards",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "len_reward_func (from [len_reward_func, correct_code_reward_func])",
"code": "def len_reward_func(completions, **kwargs):\n rewards = []\n max_len = max((len(completion) for completion in completions))\n for completion in completions:\n generation = task.postprocess_generation(completion)\n rewards.append(0.5 - (len(completion) - len(generation)) / (max_len - len(generation) + 1e-06))\n return rewards",
"label": "{\"label\": \"LENGTH_BASED\"}"
},
{
"name": "correct_code_reward_func (from [len_reward_func, correct_code_reward_func])",
"code": "def correct_code_reward_func(prompts, completions, test_list, **kwargs):\n generations = []\n references = []\n rewards = []\n for prompt, test, completion in zip(prompts, test_list, completions):\n generation = task.postprocess_generation(completion)\n reference = '\\n'.join(test)\n test_program = generation + '\\n' + reference\n import tempfile\n import subprocess\n with tempfile.NamedTemporaryFile(suffix='.py', delete=False) as temp_file:\n temp_filename = temp_file.name\n temp_file.write(test_program.encode('utf-8'))\n try:\n result = subprocess.run(['python', temp_filename], capture_output=True, text=True, timeout=10)\n if result.returncode == 0:\n rewards.append(1.0)\n if torch.rand(1).item() < 0.1:\n os.makedirs('completion_samples', exist_ok=True)\n log_file = os.path.join('completion_samples', 'success_code_samples.txt')\n with open(log_file, 'a') as f:\n f.write(f'\\n\\n==============\\n')\n f.write(f'Prompt:\\n{prompt}\\n\\nGeneration:\\n{generation}\\n\\nTest:\\n{reference}\\n')\n else:\n print(f'Test failed with error: {result.stderr}')\n rewards.append(0.0)\n except subprocess.TimeoutExpired:\n print('Test timeout: 执行代码超时')\n rewards.append(0.0)\n except Exception as e:\n print(f'Execution error: {str(e)}')\n rewards.append(0.0)\n finally:\n try:\n os.unlink(temp_filename)\n except:\n pass\n print(f'Reward: {rewards[-1]}')\n return rewards",
"label": "{\"label\": \"COMPUTATIONAL\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model_args.model_name_or_path",
"reward_funcs": "[len_reward_func, correct_code_reward_func]",
"args": "training_args",
"train_dataset": "train_dataset",
"eval_dataset": "test_dataset",
"peft_config": "get_peft_config(model_args)",
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
menloresearch/visual-thinker | https://github.com/menloresearch/visual-thinker | Unknown | training/grpo_stage.py | https://github.com/menloresearch/visual-thinker/blob/bb74ee6fbf72b34321edcf2bb958921f694ab622/training/grpo_stage.py | 2025-03-24T10:17:27.798388 | [
{
"name": "xmlcount_reward_func (from list item 0)",
"code": "def xmlcount_reward_func(completions, **kwargs) -> List[float]:\n \"\"\"\n Reward function based on proper XML tag usage.\n \n Args:\n completions: Model completions\n \n Returns:\n List of reward scores\n \"\"\"\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from list item 1)",
"code": "def int_reward_func(completions, **kwargs) -> List[float]:\n \"\"\"\n Reward function that checks if responses contain valid direction tokens.\n \n Args:\n completions: Model completions\n \n Returns:\n List of reward scores\n \"\"\"\n allowed_tokens = {'<|up|>', '<|down|>', '<|right|>', '<|left|>'}\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n\n def is_valid_sequence(seq):\n seq_no_whitespace = re.sub('\\\\s+', '', seq)\n if not seq_no_whitespace:\n return False\n found_tokens = re.findall('<\\\\|(?:up|down|right|left)\\\\|>', seq_no_whitespace)\n reconstructed = ''.join(found_tokens)\n if reconstructed != seq_no_whitespace:\n return False\n return all((token in allowed_tokens for token in found_tokens))\n return [1.0 if is_valid_sequence(r) else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"ANSWER_TYPE_VALIDATION\"}"
},
{
"name": "correctness_reward_func (from list item 2)",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> List[float]:\n \"\"\"\n Reward function that checks correctness of answers.\n \n Args:\n prompts: Input prompts\n completions: Model completions\n answer: Ground truth answers\n \n Returns:\n List of reward scores\n \"\"\"\n rewards = []\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n logger.debug('-' * 20)\n logger.debug(f'Question:\\n{q}')\n logger.debug(f'\\nAnswer:\\n{answer[0]}')\n logger.debug(f'\\nResponse:\\n{responses[0]}')\n logger.debug(f'\\nExtracted:\\n{extracted_responses[0]}')\n for r, a in zip(extracted_responses, answer):\n if r == a:\n direction = r.split('|><|')\n rewards.append(len(direction) * 0.2)\n else:\n rewards.append(0.0)\n return rewards",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "xmlcount_reward_func (from [xmlcount_reward_func, int_reward_func, correctness_reward_func])",
"code": "def xmlcount_reward_func(completions, **kwargs) -> List[float]:\n \"\"\"\n Reward function based on proper XML tag usage.\n \n Args:\n completions: Model completions\n \n Returns:\n List of reward scores\n \"\"\"\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from [xmlcount_reward_func, int_reward_func, correctness_reward_func])",
"code": "def int_reward_func(completions, **kwargs) -> List[float]:\n \"\"\"\n Reward function that checks if responses contain valid direction tokens.\n \n Args:\n completions: Model completions\n \n Returns:\n List of reward scores\n \"\"\"\n allowed_tokens = {'<|up|>', '<|down|>', '<|right|>', '<|left|>'}\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n\n def is_valid_sequence(seq):\n seq_no_whitespace = re.sub('\\\\s+', '', seq)\n if not seq_no_whitespace:\n return False\n found_tokens = re.findall('<\\\\|(?:up|down|right|left)\\\\|>', seq_no_whitespace)\n reconstructed = ''.join(found_tokens)\n if reconstructed != seq_no_whitespace:\n return False\n return all((token in allowed_tokens for token in found_tokens))\n return [1.0 if is_valid_sequence(r) else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "correctness_reward_func (from [xmlcount_reward_func, int_reward_func, correctness_reward_func])",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> List[float]:\n \"\"\"\n Reward function that checks correctness of answers.\n \n Args:\n prompts: Input prompts\n completions: Model completions\n answer: Ground truth answers\n \n Returns:\n List of reward scores\n \"\"\"\n rewards = []\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n logger.debug('-' * 20)\n logger.debug(f'Question:\\n{q}')\n logger.debug(f'\\nAnswer:\\n{answer[0]}')\n logger.debug(f'\\nResponse:\\n{responses[0]}')\n logger.debug(f'\\nExtracted:\\n{extracted_responses[0]}')\n for r, a in zip(extracted_responses, answer):\n if r == a:\n direction = r.split('|><|')\n rewards.append(len(direction) * 0.2)\n else:\n rewards.append(0.0)\n return rewards",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[xmlcount_reward_func, int_reward_func, correctness_reward_func]",
"args": "training_args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
vpareek2/llm-experiments | https://github.com/vpareek2/llm-experiments | MIT License | llama-r1/grpo_trl.py | https://github.com/vpareek2/llm-experiments/blob/188644b99675411b0368d92c0cd29ddec0a0821f/llama-r1/grpo_trl.py | 2025-03-24T10:17:30.134516 | [
{
"name": "xmlcount_reward_func (from list item 0)",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "soft_format_reward_func (from list item 1)",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from list item 2)",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from list item 3)",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from list item 4)",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n print('-' * 20, f'Question:\\n{q}', f'\\nAnswer:\\n{answer[0]}', f'\\nResponse:\\n{responses[0]}', f'\\nExtracted:\\n{extracted_responses[0]}')\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "xmlcount_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def xmlcount_reward_func(completions, **kwargs) -> list[float]:\n contents = [completion[0]['content'] for completion in completions]\n return [count_xml(c) for c in contents]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "soft_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def soft_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '<reasoning>.*?</reasoning>\\\\s*<answer>.*?</answer>'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "strict_format_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def strict_format_reward_func(completions, **kwargs) -> list[float]:\n \"\"\"Reward function that checks if the completion has a specific format.\"\"\"\n pattern = '^<reasoning>\\\\n.*?\\\\n</reasoning>\\\\n<answer>\\\\n.*?\\\\n</answer>\\\\n$'\n responses = [completion[0]['content'] for completion in completions]\n matches = [re.match(pattern, r) for r in responses]\n return [0.5 if match else 0.0 for match in matches]",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "int_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def int_reward_func(completions, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n extracted_responses = [extract_xml_answer(r) for r in responses]\n return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]",
"label": "{\"label\": \"COMPUTATIONAL\"}"
},
{
"name": "correctness_reward_func (from [xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func])",
"code": "def correctness_reward_func(prompts, completions, answer, **kwargs) -> list[float]:\n responses = [completion[0]['content'] for completion in completions]\n q = prompts[0][-1]['content']\n extracted_responses = [extract_xml_answer(r) for r in responses]\n print('-' * 20, f'Question:\\n{q}', f'\\nAnswer:\\n{answer[0]}', f'\\nResponse:\\n{responses[0]}', f'\\nExtracted:\\n{extracted_responses[0]}')\n return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[xmlcount_reward_func, soft_format_reward_func, strict_format_reward_func, int_reward_func, correctness_reward_func]",
"args": "training_args",
"train_dataset": "dataset",
"eval_dataset": null,
"peft_config": null,
"reward_processing_classes": null,
"processing_class": "tokenizer",
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
bbirdxr/GRPO-Qwen2.5-7B-Medicine | https://github.com/bbirdxr/GRPO-Qwen2.5-7B-Medicine | Unknown | trl_grpo.py | https://github.com/bbirdxr/GRPO-Qwen2.5-7B-Medicine/blob/7617ccef6880d8cc13137df8def964b619143665/trl_grpo.py | 2025-03-24T10:17:32.445182 | [
{
"name": "reward_think_ratio (from list item 0)",
"code": "def reward_think_ratio(completions, **kwargs):\n scores = []\n for completion in completions:\n think_count = completion.count('<think>')\n think_end_count = completion.count('</think>')\n score = -abs(think_count - think_end_count) * 0.5 - abs(think_count - 1) - abs(think_end_count - 1)\n other_tags_count = sum((1 for tag in completion.split('<') if tag and tag.split('>')[0] not in ['think', '/think']))\n score -= other_tags_count\n scores.append(score)\n return scores",
"label": "{\"label\": \"LENGTH_BASED\"}"
},
{
"name": "similarity_sentence_score (from list item 1)",
"code": "def similarity_sentence_score(completions, target, **kwargs):\n scores = []\n for c, t in zip(completions, target):\n c = extract_final_answer(c)\n t = extract_final_answer(t)\n embeddings1 = similarity_model.encode(c)\n embeddings2 = similarity_model.encode(t)\n cosine_scores = cos_sim(embeddings1, embeddings2)\n scores.append(cosine_scores)\n return scores",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "correct_score (from list item 2)",
"code": "def correct_score(completions, target, type, clear_answer, **kwargs):\n scores = []\n for c, t, ty, a in zip(completions, target, type, clear_answer):\n print(c)\n c = extract_final_answer(c)\n score = 0.0\n if a in c:\n score += 1.0\n else:\n score += 0.0\n if '最终答案' in c:\n score += 0.5\n else:\n score += 0.0\n scores.append(score)\n return scores",
"label": "{\"label\": \"ANSWER_TYPE_VALIDATION\"}"
},
{
"name": "reward_think_ratio (from [reward_think_ratio, similarity_sentence_score, correct_score])",
"code": "def reward_think_ratio(completions, **kwargs):\n scores = []\n for completion in completions:\n think_count = completion.count('<think>')\n think_end_count = completion.count('</think>')\n score = -abs(think_count - think_end_count) * 0.5 - abs(think_count - 1) - abs(think_end_count - 1)\n other_tags_count = sum((1 for tag in completion.split('<') if tag and tag.split('>')[0] not in ['think', '/think']))\n score -= other_tags_count\n scores.append(score)\n return scores",
"label": "{\"label\": \"FORMAT_ADHERENCE\"}"
},
{
"name": "similarity_sentence_score (from [reward_think_ratio, similarity_sentence_score, correct_score])",
"code": "def similarity_sentence_score(completions, target, **kwargs):\n scores = []\n for c, t in zip(completions, target):\n c = extract_final_answer(c)\n t = extract_final_answer(t)\n embeddings1 = similarity_model.encode(c)\n embeddings2 = similarity_model.encode(t)\n cosine_scores = cos_sim(embeddings1, embeddings2)\n scores.append(cosine_scores)\n return scores",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
},
{
"name": "correct_score (from [reward_think_ratio, similarity_sentence_score, correct_score])",
"code": "def correct_score(completions, target, type, clear_answer, **kwargs):\n scores = []\n for c, t, ty, a in zip(completions, target, type, clear_answer):\n print(c)\n c = extract_final_answer(c)\n score = 0.0\n if a in c:\n score += 1.0\n else:\n score += 0.0\n if '最终答案' in c:\n score += 0.5\n else:\n score += 0.0\n scores.append(score)\n return scores",
"label": "{\"label\": \"ANSWER_CORRECTNESS\"}"
}
] | [
{
"trainer_type": "GRPOTrainer",
"args": [],
"kwargs": {
"model": "model",
"reward_funcs": "[reward_think_ratio, similarity_sentence_score, correct_score]",
"args": "training_args",
"train_dataset": "train_dataset",
"eval_dataset": "test_dataset",
"peft_config": "peft_config",
"reward_processing_classes": null,
"processing_class": null,
"null": null,
"formatting_func": null,
"callbacks": null,
"tokenizer": null,
"arguments": null,
"vinference": null,
"data_tokenize_fn": null,
"maze_object": null,
"ref_model": null,
"game_object": null,
"use_wandb": null,
"tools": null,
"reward_weights": null
}
}
] |
End of preview. Expand
in Data Studio
- Downloads last month
- 27