From c6663cdb81a8646a18c7c10c04490ab79a9d4aea Mon Sep 17 00:00:00 2001 From: Zafir Stojanovski Date: Fri, 28 Mar 2025 09:45:17 +0100 Subject: [PATCH 1/5] fix(training): Prepend `` token in format reward (#396) * prepend think token in format reward * pre commit + fix some default vals * add checkpoint config --- .gitignore | 5 +++++ training/configs/llama3.1_1b_grpo.yaml | 3 +++ training/configs/qwen2.5_1.5b_grpo.yaml | 6 +++++- training/trainers/ray_grpo_trainer.py | 3 +++ 4 files changed, 16 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index d1e0d496..4846b2a0 100644 --- a/.gitignore +++ b/.gitignore @@ -45,3 +45,8 @@ htmlcov/ # Jupyter Notebook .ipynb_checkpoints/ .virtual_documents/ + +# logs +wandb/ +outputs/ +*.log diff --git a/training/configs/llama3.1_1b_grpo.yaml b/training/configs/llama3.1_1b_grpo.yaml index 74200cad..34a5f8d3 100644 --- a/training/configs/llama3.1_1b_grpo.yaml +++ b/training/configs/llama3.1_1b_grpo.yaml @@ -35,6 +35,7 @@ reward: format_reward: enable: True scaling_factor: 0.2 + prepend_think_token: False # Set to True only when the tokenizer's prompt template pre-fills the generation with , such as in the case of (distilled) r1 models length_reward: enable: True scaling_factor: 0.2 @@ -75,6 +76,8 @@ actor_rollout_ref: ppo_epochs: 1 shuffle: False ulysses_sequence_parallel_size: 1 # sp size + checkpoint: + contents: ['model', 'hf_model', 'optimizer', 'extra'] optim: lr: 1e-6 lr_warmup_steps_ratio: 0. # the total steps will be injected during runtime diff --git a/training/configs/qwen2.5_1.5b_grpo.yaml b/training/configs/qwen2.5_1.5b_grpo.yaml index 3ad49d60..1bee4782 100644 --- a/training/configs/qwen2.5_1.5b_grpo.yaml +++ b/training/configs/qwen2.5_1.5b_grpo.yaml @@ -35,6 +35,7 @@ reward: format_reward: enable: True scaling_factor: 0.2 + prepend_think_token: False # Set to True only when the tokenizer's prompt template pre-fills the generation with , such as in the case of (distilled) r1 models length_reward: enable: True scaling_factor: 0.2 @@ -75,6 +76,8 @@ actor_rollout_ref: ppo_epochs: 1 shuffle: False ulysses_sequence_parallel_size: 1 # sp size + checkpoint: + contents: ['model', 'hf_model', 'optimizer', 'extra'] optim: lr: 1e-6 lr_warmup_steps_ratio: 0. # the total steps will be injected during runtime @@ -116,6 +119,7 @@ actor_rollout_ref: tensor_model_parallel_size: 2 max_num_batched_tokens: 8192 max_num_seqs: 1024 + max_model_len: 1024 log_prob_micro_batch_size: null # will be deprecated, use log_prob_micro_batch_size_per_gpu log_prob_micro_batch_size_per_gpu: 160 log_prob_use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz} @@ -144,7 +148,7 @@ trainer: total_epochs: 10 total_training_steps: null project_name: rg-test - experiment_name: verl_grpo_llama3.1_1b + experiment_name: verl_grpo_qwen2.5_1.5b logger: [ 'console', 'wandb' ] val_generations_to_log_to_wandb: 0 nnodes: 1 diff --git a/training/trainers/ray_grpo_trainer.py b/training/trainers/ray_grpo_trainer.py index a1d17824..869d2dd8 100644 --- a/training/trainers/ray_grpo_trainer.py +++ b/training/trainers/ray_grpo_trainer.py @@ -31,6 +31,7 @@ class RayGRPOTrainer(RayPPOTrainer): self.max_output_length = max_output_length self.format_reward_scaling_factor = config.reward.format_reward.scaling_factor + self.format_reward_prepend_think_token = config.reward.format_reward.prepend_think_token self.length_reward_scaling_factor = config.reward.length_reward.scaling_factor train_reward_fn = lambda data: self._score_output(data, num_examine=0) @@ -99,6 +100,8 @@ class RayGRPOTrainer(RayPPOTrainer): def _compute_format_reward(self, solution_str: str) -> float: """Reward use of exactly one correctly structured and block.""" + if self.format_reward_prepend_think_token: + solution_str = "" + solution_str scaling_factor = self.format_reward_scaling_factor # check and blocks are present pattern = r"\s*.*?\s*.*?" From 8c45571a48ca259dc6d7104170b39284b7cfa476 Mon Sep 17 00:00:00 2001 From: Zafir Stojanovski Date: Tue, 1 Apr 2025 00:08:39 +0200 Subject: [PATCH 2/5] visualize heatmap sorted by overall performance (#397) --- eval/visualize_results.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/eval/visualize_results.py b/eval/visualize_results.py index 18f0d084..df2824ff 100644 --- a/eval/visualize_results.py +++ b/eval/visualize_results.py @@ -385,7 +385,12 @@ def create_performance_heatmap(summaries: Dict[str, Dict[str, Any]], categories: for category, datasets in sorted(categories.items()): all_datasets.extend(sorted(datasets)) - models = list(summaries.keys()) + # Sort models by overall performance + overall_scores = {} + for model_name, summary in summaries.items(): + scores = list(summary["dataset_best_scores"].values()) + overall_scores[model_name] = np.mean(scores) + models = [item[0] for item in sorted(overall_scores.items(), key=lambda x: x[1], reverse=True)] # Create score matrix score_matrix = np.zeros((len(models), len(all_datasets))) From ea10a0f932f99562d62e6f0811a939b5d182f0e1 Mon Sep 17 00:00:00 2001 From: Oliver Stanley Date: Tue, 1 Apr 2025 09:51:36 +0100 Subject: [PATCH 3/5] update task count in readme (#400) * update task count in readme * fix link --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 59ca1fac..489b726c 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ **Reasoning Gym** is a community-created Python library of procedural dataset generators and algorithmically verifiable reasoning environments for training reasoning models with reinforcement learning (RL). The goal is to generate virtually infinite training data with adjustable complexity. -It currently provides **more than 80** tasks over many domains, including but not limited to _algebra_, _arithmetic_, _computation_, _cognition_, _geometry_, _graph theory_, _logic_, and many common _games_. +It currently provides **more than 100** tasks over many domains, including but not limited to _algebra_, _arithmetic_, _computation_, _cognition_, _geometry_, _graph theory_, _logic_, and many common _games_. Some tasks have a single correct answer, while others, such as [Rubik‘s Cube](https://en.wikipedia.org/wiki/Rubik%27s_Cube) and [Countdown](), have many correct solutions. To support this, we provide a standard interface for procedurally verifying solutions. @@ -24,7 +24,7 @@ _Note that this project is currently under active development, and the version p ## 🛠️ Development -For development setup, see [CONTRIBUTING.md](CONTRIBUTING.md#delevloper-setup). +For development setup, see [CONTRIBUTING.md](CONTRIBUTING.md#development-setup). ## ✨ Example Usage From cd85c2d632944f24e5021b36569ea3993bdf107e Mon Sep 17 00:00:00 2001 From: vncntt <85441325+vncntt@users.noreply.github.com> Date: Tue, 1 Apr 2025 03:20:58 -0700 Subject: [PATCH 4/5] add knights knaves curriculum (#401) * add knights knaves curriculum * add metadata + width constraints --- reasoning_gym/logic/__init__.py | 3 +- reasoning_gym/logic/knights_knaves.py | 35 ++++++++++++++++++++++- tests/test_knights_knaves.py | 41 ++++++++++++++++++++++++++- 3 files changed, 76 insertions(+), 3 deletions(-) diff --git a/reasoning_gym/logic/__init__.py b/reasoning_gym/logic/__init__.py index c46321c2..99fb75cd 100644 --- a/reasoning_gym/logic/__init__.py +++ b/reasoning_gym/logic/__init__.py @@ -4,7 +4,7 @@ Logic tasks for training reasoning capabilities. from .aiw import AliceInWonderlandConfig, AliceInWonderlandCurriculum, AliceInWonderlandDataset from .circuit_logic import CircuitLogicConfig, CircuitLogicCurriculum, CircuitLogicDataset -from .knights_knaves import KnightsKnavesConfig, KnightsKnavesDataset +from .knights_knaves import KnightsKnavesConfig, KnightsKnavesCurriculum, KnightsKnavesDataset from .propositional_logic import PropositionalLogicConfig, PropositionalLogicCurriculum, PropositionalLogicDataset from .self_reference import SelfReferenceConfig, SelfReferenceCurriculum, SelfReferenceDataset from .syllogisms import SyllogismConfig, SyllogismDataset @@ -31,4 +31,5 @@ __all__ = [ "CircuitLogicCurriculum", "KnightsKnavesConfig", "KnightsKnavesDataset", + "KnightsKnavesCurriculum", ] diff --git a/reasoning_gym/logic/knights_knaves.py b/reasoning_gym/logic/knights_knaves.py index fe4f503f..09e3a4b7 100644 --- a/reasoning_gym/logic/knights_knaves.py +++ b/reasoning_gym/logic/knights_knaves.py @@ -8,6 +8,8 @@ import numpy as np from reasoning_gym.factory import ProceduralDataset, register_dataset +from ..coaching import BaseCurriculum, ScalarAttributeDefinition + DATASET_NAME = "knights_knaves" COMMON_NAMES = [ @@ -462,6 +464,11 @@ class KnightsKnavesDataset(ProceduralDataset): "solution": problem["solution"], "names": formatted["names"], "knight_knave_terms": formatted["knight_knave"], + "difficulty": { + "n_people": self.config.n_people, + "depth_constraint": self.config.depth_constraint, + "width_constraint": self.config.width_constraint, + }, } return {"question": question, "answer": answer, "metadata": metadata} @@ -515,4 +522,30 @@ class KnightsKnavesDataset(ProceduralDataset): return 0.0 -register_dataset(DATASET_NAME, KnightsKnavesDataset, KnightsKnavesConfig) +class KnightsKnavesCurriculum(BaseCurriculum): + def __init__(self): + super().__init__(KnightsKnavesCurriculum.__name__, KnightsKnavesConfig) + + self._define_attributes( + ScalarAttributeDefinition( + name="n_people", + levels=[2, 3, 4, 5], + description="Number of people in the problem", + field_name="n_people", + ), + ScalarAttributeDefinition( + name="depth_constraint", + levels=[2, 3, 4, 5], + description="Depth of the problem", + field_name="depth_constraint", + ), + ScalarAttributeDefinition( + name="width_constraint", + levels=[2, 3, 4, 5], + description="Width of the problem", + field_name="width_constraint", + ), + ) + + +register_dataset(DATASET_NAME, KnightsKnavesDataset, KnightsKnavesConfig, KnightsKnavesCurriculum) diff --git a/tests/test_knights_knaves.py b/tests/test_knights_knaves.py index bcaf1fe7..1f8d2c1c 100644 --- a/tests/test_knights_knaves.py +++ b/tests/test_knights_knaves.py @@ -1,6 +1,6 @@ import pytest -from reasoning_gym.logic.knights_knaves import KnightsKnavesConfig, KnightsKnavesDataset +from reasoning_gym.logic.knights_knaves import KnightsKnavesConfig, KnightsKnavesCurriculum, KnightsKnavesDataset def test_config_validation(): @@ -234,3 +234,42 @@ def test_depth_constraint_specific_problem(): solutions = KnightsKnavesDataset.find_solution(test_statements) assert len(solutions) == 1, "Should have exactly one solution" assert solutions[0] == (True, False, False) + + +def test_curriculum(): + curriculum = KnightsKnavesCurriculum() + + assert len(curriculum.attributes) == 3 + + base_value = {"size": 150, "seed": 1} + + base_cfg = curriculum.generate_configuration(base_value) + + assert base_cfg.seed == 1 + assert base_cfg.size == 150 + assert base_cfg.n_people == 2 + assert base_cfg.depth_constraint == 2 + + # test incrementing attribute levels + curriculum.increment_attr_level("n_people") + curriculum.increment_attr_level("depth_constraint") + curriculum.increment_attr_level("width_constraint") + + increased_cfg = curriculum.generate_configuration(base_value) + assert increased_cfg.n_people == 3 + assert increased_cfg.depth_constraint == 3 + assert increased_cfg.width_constraint == 3 + # test decrementing attribute level + curriculum.decrement_attr_level("n_people") + partially_decreased_cfg = curriculum.generate_configuration(base_value) + assert partially_decreased_cfg.n_people == 2 + assert partially_decreased_cfg.depth_constraint == 3 + assert partially_decreased_cfg.width_constraint == 3 + + curriculum.increment_attr_level("n_people") + curriculum.increment_attr_level("depth_constraint") + curriculum.increment_attr_level("width_constraint") + increased_cfg = curriculum.generate_configuration(base_value) + assert increased_cfg.n_people == 3 + assert increased_cfg.depth_constraint == 4 + assert increased_cfg.width_constraint == 4 From 50846c35345212c8b95b765e1cab5b511b82c42d Mon Sep 17 00:00:00 2001 From: Zafir Stojanovski Date: Tue, 1 Apr 2025 13:01:15 +0200 Subject: [PATCH 5/5] fix(env): ARC 1D curriculum (#402) * Add arc_1d curriculum * Add difficulty to metadata * use range attribute instead of scalar --------- Co-authored-by: abdulhakeem Co-authored-by: Oliver Stanley --- reasoning_gym/arc/__init__.py | 3 ++- reasoning_gym/arc/arc_1d.py | 25 +++++++++++++++++++- tests/test_arc_1d.py | 44 ++++++++++++++++++++++++++++++++++- 3 files changed, 69 insertions(+), 3 deletions(-) diff --git a/reasoning_gym/arc/__init__.py b/reasoning_gym/arc/__init__.py index d422e098..fcb2b262 100644 --- a/reasoning_gym/arc/__init__.py +++ b/reasoning_gym/arc/__init__.py @@ -1,10 +1,11 @@ -from .arc_1d import Arc1DConfig, Arc1DDataset +from .arc_1d import Arc1DConfig, Arc1DCurriculum, Arc1DDataset from .arc_agi import ArcAgiConfig, ArcAgiDataset from .rearc import ReArcConfig, ReArcCurriculum, ReArcDataset __all__ = [ "Arc1DConfig", "Arc1DDataset", + "Arc1DCurriculum", "ArcAgiConfig", "ArcAgiDataset", "ReArcDataset", diff --git a/reasoning_gym/arc/arc_1d.py b/reasoning_gym/arc/arc_1d.py index 9a7ee78c..0dfc5bb0 100644 --- a/reasoning_gym/arc/arc_1d.py +++ b/reasoning_gym/arc/arc_1d.py @@ -2,6 +2,7 @@ from dataclasses import dataclass from random import Random from typing import Optional +from ..coaching import BaseCurriculum, RangeAttributeDefinition from ..dataset import ProceduralDataset from ..factory import register_dataset @@ -108,9 +109,31 @@ class Arc1DDataset(ProceduralDataset): "size": size, "train_examples": train_examples, "test_example": test_example, + "difficulty": { + "size": (self.config.min_size, self.config.max_size), + }, }, } +class Arc1DCurriculum(BaseCurriculum): + """Curriculum for ARC 1D tasks""" + + def __init__(self): + super().__init__(Arc1DCurriculum.__name__, Arc1DConfig) + + # Define attributes + self._define_attributes( + RangeAttributeDefinition( + name="size", + levels=[10, 25, 50, 100], + lower_field_name="min_size", + upper_field_name="max_size", + description="Grid size", + ensure_interval=True, + ) + ) + + # Register the dataset -register_dataset(DATASET_NAME, Arc1DDataset, Arc1DConfig) +register_dataset(DATASET_NAME, Arc1DDataset, Arc1DConfig, Arc1DCurriculum) diff --git a/tests/test_arc_1d.py b/tests/test_arc_1d.py index 1eeb0d09..8a267268 100644 --- a/tests/test_arc_1d.py +++ b/tests/test_arc_1d.py @@ -2,7 +2,7 @@ from random import Random import pytest -from reasoning_gym.arc import Arc1DConfig, Arc1DDataset +from reasoning_gym.arc import Arc1DConfig, Arc1DCurriculum, Arc1DDataset def test_arc_1d_config_validation(): @@ -41,6 +41,7 @@ def test_arc_1d_items(): assert "question" in item assert "answer" in item assert "metadata" in item + assert "difficulty" in item["metadata"] # Check metadata contents metadata = item["metadata"] @@ -142,3 +143,44 @@ def test_arc_1d_generate_all_tasks(): break assert i < 20 print(task_name, j, i, x) + + +def test_arc_1d_curriculum(): + """Test the curriculum for complex arithmetic.""" + curriculum = Arc1DCurriculum() + base_value = {"size": 150, "seed": 1} + + base_cfg: Arc1DCurriculum = curriculum.generate_configuration(base_value) + + assert base_cfg.seed == 1 + assert base_cfg.size == 150 + assert base_cfg.min_size == 10 + assert base_cfg.max_size == 25 + + # Test and validate increase in levels + curriculum.increment_attr_level("size") + + increased_cfg: Arc1DCurriculum = curriculum.generate_configuration(base_value) + assert increased_cfg.min_size == 10 + assert increased_cfg.max_size == 50 + + # Test and validate decrease in levels + curriculum.decrement_attr_level("size") + + decreased_cfg: Arc1DCurriculum = curriculum.generate_configuration(base_value) + assert decreased_cfg.min_size == 10 + assert decreased_cfg.max_size == 25 + + # Test upper bound boundary condition + for _ in range(10): + curriculum.increment_attr_level("size") + upper_bound_cfg: Arc1DCurriculum = curriculum.generate_configuration(base_value) + assert upper_bound_cfg.min_size == 10 + assert upper_bound_cfg.max_size == 100 + + # Test lower bound boundary condition + for _ in range(10): + curriculum.decrement_attr_level("size") + lower_bound_cfg: Arc1DCurriculum = curriculum.generate_configuration(base_value) + assert lower_bound_cfg.min_size == 10 + assert lower_bound_cfg.max_size == 25