mirror of
https://github.com/open-thought/reasoning-gym.git
synced 2026-04-19 12:58:07 +00:00
* added curriculum * readapted readme * corrected small errors * Delete eval/eval/r1/algorithmic/word_sorting.json * removed redundant argument * added spell * removed duplicated fit * changed config * added composite changes * added composite changes * updated yaml * added spell backward * updated read me * added qwen2.5 * added * Add files via upload * updated missing trainer func * updated curr * updated spell back * updated correctness score func * updated configs * added local evals * added updates * updated datasets * added fsdp to hf utility * added algorithmic qwen 3b yaml * updated read me * updated configs * added preappend token * updated with thinking token * updated test score board * resolved comments * added evaluation scripts * removed results from pr * added config * added partial reward scoring * added evaluation composites * added training configs * added games eval * added rubriks cube * resolved merge cinflicts * added games config * added latest eval configs * updated strucutre * Delete training/evaluations/eval_graphs_composite.yaml --------- Co-authored-by: joesharratt1229 <joesharrat1229@gmail.com>
170 lines
6.7 KiB
Python
170 lines
6.7 KiB
Python
import pytest
|
|
|
|
from reasoning_gym.arithmetic.basic_arithmetic import (
|
|
BasicArithmeticCurriculum,
|
|
BasicArithmeticDataset,
|
|
BasicArithmeticDatasetConfig,
|
|
eval_floordiv,
|
|
)
|
|
from reasoning_gym.coaching.base_curriculum import DefaultCurriculumContext, RangeAttributeMode
|
|
|
|
|
|
def test_arithmetic_dataset_config_validation():
|
|
"""Test that invalid configs raise appropriate errors"""
|
|
with pytest.raises(AssertionError):
|
|
config = BasicArithmeticDatasetConfig(min_terms=0)
|
|
config.validate()
|
|
|
|
with pytest.raises(AssertionError):
|
|
config = BasicArithmeticDatasetConfig(min_terms=3, max_terms=2)
|
|
config.validate()
|
|
|
|
with pytest.raises(AssertionError):
|
|
config = BasicArithmeticDatasetConfig(operators=["^"]) # Invalid operator
|
|
config.validate()
|
|
|
|
|
|
def test_arithmetic_dataset_deterministic():
|
|
"""Test that dataset generates same items with same seed"""
|
|
config = BasicArithmeticDatasetConfig(seed=42, size=10)
|
|
dataset1 = BasicArithmeticDataset(config)
|
|
dataset2 = BasicArithmeticDataset(config)
|
|
|
|
for i in range(len(dataset1)):
|
|
assert dataset1[i] == dataset2[i]
|
|
|
|
|
|
def test_arithmetic_dataset_items():
|
|
"""Test basic properties of generated items"""
|
|
config = BasicArithmeticDatasetConfig(min_terms=2, max_terms=4, min_digits=1, max_digits=2, size=100, seed=42)
|
|
dataset = BasicArithmeticDataset(config)
|
|
|
|
for i in range(len(dataset)):
|
|
item = dataset[i]
|
|
assert isinstance(item, dict)
|
|
assert "question" in item
|
|
assert "answer" in item
|
|
assert "metadata" in item
|
|
|
|
# Verify the answer matches the expression
|
|
expression = item["metadata"]["expression"]
|
|
answer = eval_floordiv(expression) # Safe here as we control the expression
|
|
assert str(answer) == item["answer"]
|
|
|
|
|
|
def test_arithmetic_dataset_format_styles():
|
|
"""Test different question format styles"""
|
|
config = BasicArithmeticDatasetConfig(
|
|
size=10,
|
|
seed=42,
|
|
format_style="simple",
|
|
min_terms=2,
|
|
max_terms=3, # Keep expressions simple for testing
|
|
min_digits=1,
|
|
max_digits=2,
|
|
)
|
|
dataset = BasicArithmeticDataset(config)
|
|
assert all(item["question"].strip().endswith(".") for item in dataset)
|
|
|
|
config = BasicArithmeticDatasetConfig(
|
|
size=10,
|
|
seed=42,
|
|
format_style="natural",
|
|
min_terms=2,
|
|
max_terms=3, # Keep expressions simple for testing
|
|
min_digits=1,
|
|
max_digits=2,
|
|
)
|
|
dataset = BasicArithmeticDataset(config)
|
|
assert all(item["question"].strip().endswith(".") or item["question"].strip().endswith("?") for item in dataset)
|
|
|
|
|
|
def test_arithmetic_dataset_iteration():
|
|
"""Test that iteration respects dataset size"""
|
|
config = BasicArithmeticDatasetConfig(min_terms=2, max_terms=2, size=5, seed=42) # Small size for testing
|
|
dataset = BasicArithmeticDataset(config)
|
|
|
|
# Test manual iteration
|
|
items = []
|
|
for item in dataset:
|
|
items.append(item)
|
|
assert len(items) == config.size, "Iterator should yield exactly size items"
|
|
|
|
# Test list conversion
|
|
items = list(dataset)
|
|
assert len(items) == config.size, "Iterator should yield exactly size items"
|
|
|
|
# Test multiple iterations
|
|
first_items = list(dataset)
|
|
second_items = list(dataset)
|
|
assert first_items == second_items, "Multiple iterations should yield same items"
|
|
|
|
|
|
def test_basic_arithmetic_curriculum():
|
|
"""Test the BasicArithmeticCurriculum functionality"""
|
|
curriculum = BasicArithmeticCurriculum()
|
|
|
|
base_value = {"size": 150, "seed": 1, "min_terms": 2, "max_terms": 2, "min_digits": 1, "max_digits": 1}
|
|
|
|
base_cfg: BasicArithmeticDatasetConfig = curriculum.generate_configuration(base_value)
|
|
assert base_cfg.seed == 1
|
|
assert base_cfg.size == 150
|
|
assert base_cfg.min_terms == 2 and base_cfg.max_terms == 2
|
|
assert base_cfg.min_digits == 1 and base_cfg.max_digits == 1
|
|
|
|
# Test incrementing attribute levels
|
|
curriculum.increment_attr_level("num_terms")
|
|
curriculum.increment_attr_level("num_digits")
|
|
increased_cfg = curriculum.generate_configuration(base_value)
|
|
assert increased_cfg.min_terms == 2 and increased_cfg.max_terms == 3
|
|
assert increased_cfg.min_digits == 1 and increased_cfg.max_digits == 2
|
|
|
|
# Test decrementing attribute level for num_terms
|
|
curriculum.decrement_attr_level("num_terms")
|
|
partially_decreased_cfg = curriculum.generate_configuration(base_value)
|
|
assert partially_decreased_cfg.min_terms == 2 and partially_decreased_cfg.max_terms == 2
|
|
assert partially_decreased_cfg.min_digits == 1 and partially_decreased_cfg.max_digits == 2
|
|
|
|
# Test additional increments to ensure levels work as expected
|
|
curriculum.increment_attr_level("num_terms")
|
|
curriculum.increment_attr_level("num_terms")
|
|
higher_level_cfg = curriculum.generate_configuration(base_value)
|
|
assert higher_level_cfg.min_terms == 2 and higher_level_cfg.max_terms == 4
|
|
assert higher_level_cfg.min_digits == 1 and higher_level_cfg.max_digits == 2
|
|
|
|
# Test boundary conditions - trying to decrement below level 0
|
|
curriculum.decrement_attr_level("num_terms")
|
|
curriculum.decrement_attr_level("num_terms")
|
|
curriculum.decrement_attr_level("num_digits")
|
|
lower_bound_cfg = curriculum.generate_configuration(base_value)
|
|
assert lower_bound_cfg.min_terms == 2 and lower_bound_cfg.max_terms == 2
|
|
assert lower_bound_cfg.min_digits == 1 and lower_bound_cfg.max_digits == 1
|
|
|
|
# Test boundary conditions - trying to increment above max level
|
|
for _ in range(5):
|
|
curriculum.increment_attr_level("num_terms")
|
|
curriculum.increment_attr_level("num_digits")
|
|
upper_bound_cfg = curriculum.generate_configuration(base_value)
|
|
assert upper_bound_cfg.min_terms == 2 and upper_bound_cfg.max_terms == 6
|
|
assert upper_bound_cfg.min_digits == 1 and upper_bound_cfg.max_digits == 4
|
|
|
|
|
|
def test_basic_arithmetic_curriculum_upper_bound():
|
|
curriculum = BasicArithmeticCurriculum()
|
|
|
|
base_value = {"size": 150, "seed": 1, "min_terms": 2, "max_terms": 2, "min_digits": 1, "max_digits": 1}
|
|
|
|
base_cfg: BasicArithmeticDatasetConfig = curriculum.generate_configuration(
|
|
base_value, context=DefaultCurriculumContext(mode=RangeAttributeMode.UPPER_BOUND)
|
|
)
|
|
assert base_cfg.seed == 1
|
|
assert base_cfg.size == 150
|
|
assert base_cfg.min_terms == 2 and base_cfg.max_terms == 2
|
|
assert base_cfg.min_digits == 1 and base_cfg.max_digits == 1
|
|
|
|
# Test incrementing attribute levels
|
|
curriculum.increment_attr_level("num_terms")
|
|
curriculum.increment_attr_level("num_digits")
|
|
increased_cfg = curriculum.generate_configuration(base_value)
|
|
assert increased_cfg.min_terms == 2 and increased_cfg.max_terms == 3
|
|
assert increased_cfg.min_digits == 1 and increased_cfg.max_digits == 2
|