Merge branch 'main' into rich/graphcolor

This commit is contained in:
Andreas Köpf 2025-02-14 07:09:38 +01:00 committed by GitHub
commit b64d0af2bc
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
19 changed files with 385 additions and 61 deletions

View file

@ -1,6 +1,6 @@
import pytest
from reasoning_gym.arithmetic import ChainSum, ChainSumConfig
from reasoning_gym.arithmetic import ChainSumConfig, ChainSumDataset
from reasoning_gym.arithmetic.chain_sum import ChainSumCurriculum
@ -18,8 +18,8 @@ def test_chain_sum_config_validation():
def test_chain_sum_deterministic():
"""Test that dataset generates same items with same seed"""
config = ChainSumConfig(seed=42, size=10)
dataset1 = ChainSum(config)
dataset2 = ChainSum(config)
dataset1 = ChainSumDataset(config)
dataset2 = ChainSumDataset(config)
for i in range(len(dataset1)):
assert dataset1[i] == dataset2[i]
@ -28,7 +28,7 @@ def test_chain_sum_deterministic():
def test_chain_sum_items():
"""Test basic properties of generated items"""
config = ChainSumConfig(min_terms=2, max_terms=4, min_digits=1, max_digits=2, size=100, seed=42)
dataset = ChainSum(config)
dataset = ChainSumDataset(config)
for i in range(len(dataset)):
item = dataset[i]
@ -57,7 +57,7 @@ def test_chain_sum_number_ranges():
size=50,
seed=42,
)
dataset = ChainSum(config)
dataset = ChainSumDataset(config)
for i in range(len(dataset)):
item = dataset[i]
@ -71,7 +71,7 @@ def test_chain_sum_number_ranges():
# Test 1-digit numbers
config = ChainSumConfig(min_terms=2, max_terms=2, min_digits=1, max_digits=1, size=50, seed=42)
dataset = ChainSum(config)
dataset = ChainSumDataset(config)
for i in range(len(dataset)):
item = dataset[i]
expression = item["metadata"]["expression"]
@ -88,7 +88,7 @@ def test_chain_sum_negation():
config = ChainSumConfig(
min_terms=2, max_terms=2, min_digits=2, max_digits=2, size=100, seed=42, allow_negation=True
)
dataset = ChainSum(config)
dataset = ChainSumDataset(config)
# Track if we see both positive and negative numbers
has_positive = False
@ -112,7 +112,7 @@ def test_chain_sum_negation():
def test_chain_sum_iteration():
"""Test that iteration respects dataset size"""
config = ChainSumConfig(min_terms=2, max_terms=2, size=5, seed=42) # Small size for testing
dataset = ChainSum(config)
dataset = ChainSumDataset(config)
# Test manual iteration
items = []

View file

@ -5,7 +5,7 @@ from pathlib import Path
import pytest
from reasoning_gym.arithmetic.chain_sum import ChainSum, ChainSumConfig
from reasoning_gym.arithmetic.chain_sum import ChainSumConfig, ChainSumDataset
from reasoning_gym.arithmetic.leg_counting import LegCountingConfig
from reasoning_gym.coaching import Coach, GroupedScores
from reasoning_gym.composite import CompositeConfig, CompositeDataset, DatasetSpec
@ -14,7 +14,7 @@ from reasoning_gym.composite import CompositeConfig, CompositeDataset, DatasetSp
def test_coach_with_chain_sum():
# Create a small ChainSum dataset
config = ChainSumConfig(min_terms=2, max_terms=3, min_digits=1, max_digits=2, size=10, seed=42)
dataset = ChainSum(config)
dataset = ChainSumDataset(config)
coach = Coach(dataset)
# Simulate an agent working on tasks
@ -208,7 +208,7 @@ def test_coach_score_logging(tmp_path):
# Create dataset and coach with logging
config = ChainSumConfig(min_terms=2, max_terms=3, min_digits=1, max_digits=2, size=10, seed=42)
dataset = ChainSum(config)
dataset = ChainSumDataset(config)
coach = Coach(dataset, score_log=log_file)
# Score a few answers

View file

@ -7,7 +7,7 @@ def test_game_of_life():
"""Test basic properties and solution of generated items"""
# Easy
config = GameOfLifeConfig(seed=42, size=1, grid_size_x=20, grid_size_y=20, filled_cells=10, simulation_steps=1)
config = GameOfLifeConfig(seed=42, size=10, grid_size_x=20, grid_size_y=20, filled_cells=200, simulation_steps=1)
dataset = GameOfLifeDataset(config)
for item in dataset:

View file

@ -112,7 +112,7 @@ def test_polynomial_solutions_evaluation():
evaluated_value = poly_expr.subs(x, solution)
# Ensure the evaluated value is close to zero (numerical stability threshold)
assert abs(evaluated_value) < 1e-6, (
assert abs(evaluated_value) < 1e-5, (
f"Solution {solution} does not satisfy the polynomial {poly_str}. "
f"Evaluated value: {evaluated_value}"
)

144
tests/test_products.py Normal file
View file

@ -0,0 +1,144 @@
import pytest
from reasoning_gym.arithmetic import ProductsConfig, ProductsDataset
from reasoning_gym.arithmetic.products import ProductsCurriculum
def test_products_config_validation():
"""Test that invalid configs raise appropriate errors"""
with pytest.raises(AssertionError):
config = ProductsConfig(min_terms=0)
config.validate()
with pytest.raises(AssertionError):
config = ProductsConfig(min_terms=3, max_terms=2)
config.validate()
def test_products_deterministic():
"""Test that dataset generates same items with same seed"""
config = ProductsConfig(seed=42, size=10)
dataset1 = ProductsDataset(config)
dataset2 = ProductsDataset(config)
for i in range(len(dataset1)):
assert dataset1[i] == dataset2[i]
def test_products_items():
"""Test basic properties of generated items"""
config = ProductsConfig(min_terms=2, max_terms=4, min_digits=1, max_digits=2, size=100, seed=42)
dataset = ProductsDataset(config)
for i in range(len(dataset)):
item = dataset[i]
assert isinstance(item, dict)
assert "question" in item
assert "answer" in item
assert "metadata" in item
# Verify only * is used
expression = item["metadata"]["expression"]
assert all(op in ["*", " "] or op.isdigit() for op in expression)
# Verify the answer matches the expression
answer = eval(expression) # Safe here as we control the expression
assert str(answer) == item["answer"]
def test_products_number_ranges():
"""Test that generated numbers respect digit constraints"""
# Test 3-digit numbers
config = ProductsConfig(
min_terms=2,
max_terms=2, # Fix to 2 terms for easier testing
min_digits=3, # Should generate numbers >= 100
max_digits=3, # Should generate numbers <= 999
size=50,
seed=42,
)
dataset = ProductsDataset(config)
for i in range(len(dataset)):
item = dataset[i]
expression = item["metadata"]["expression"]
numbers = [int(n) for n in expression.split() if n.isdigit()]
for num in numbers:
assert 100 <= num <= 999, f"Number {num} outside valid range for 3 digits"
# Test 1-digit numbers
config = ProductsConfig(min_terms=2, max_terms=2, min_digits=1, max_digits=1, size=50, seed=42)
dataset = ProductsDataset(config)
for i in range(len(dataset)):
item = dataset[i]
expression = item["metadata"]["expression"]
numbers = [int(n) for n in expression.split() if n.isdigit()]
for num in numbers:
assert 0 <= num <= 9, f"Number {num} outside valid range for 1 digit"
def test_products_iteration():
"""Test that iteration respects dataset size"""
config = ProductsConfig(min_terms=2, max_terms=2, size=5, seed=42) # Small size for testing
dataset = ProductsDataset(config)
# Test manual iteration
items = []
for item in dataset:
items.append(item)
assert len(items) == config.size, "Iterator should yield exactly size items"
# Test list conversion
items = list(dataset)
assert len(items) == config.size, "Iterator should yield exactly size items"
# Test multiple iterations
first_items = list(dataset)
second_items = list(dataset)
assert first_items == second_items, "Multiple iterations should yield same items"
def test_products_scoring():
"""Test that scoring works correctly"""
config = ProductsConfig(min_terms=2, max_terms=2, size=10, seed=42)
dataset = ProductsDataset(config)
# Test scoring with exact match
item = dataset[0]
assert dataset.score_answer(item["answer"], item) == 1.0, "Exact match should score 1.0"
# Test scoring with wrong answer
assert dataset.score_answer("wrong", item) == 0.01, "Wrong answer should score 0.01"
# Test scoring with partial match (answer contained in response)
assert dataset.score_answer(f"The answer is {item['answer']}", item) == 0.5, "Partial match should score 0.5"
# Test scoring with None
assert dataset.score_answer(None, item) == 0.0, "None should score 0.0"
def test_products_curriculum():
curriculum = ProductsCurriculum()
base_value = {"size": 150, "seed": 1}
base_cfg: ProductsConfig = curriculum.generate_configuration(base_value)
assert base_cfg.seed == 1
assert base_cfg.size == 150
assert base_cfg.min_digits == 1 and base_cfg.max_digits == 1
assert base_cfg.min_terms == 2 and base_cfg.max_terms == 2
# test incrementing attribute levels for num_terms & num_digits attributes
curriculum.increment_attr_level("num_terms")
curriculum.increment_attr_level("num_digits")
increased_cfg = curriculum.generate_configuration(base_value)
assert increased_cfg.min_digits == 1 and increased_cfg.max_digits == 2
assert increased_cfg.min_terms == 2 and increased_cfg.max_terms == 3
# test decrementing attribute level for num_digits again
curriculum.decrement_attr_level("num_digits")
partially_decreased_cfg = curriculum.generate_configuration(base_value)
assert partially_decreased_cfg.min_digits == 1 and partially_decreased_cfg.max_digits == 1
assert partially_decreased_cfg.min_terms == 2 and partially_decreased_cfg.max_terms == 3