mirror of
https://github.com/open-thought/reasoning-gym.git
synced 2026-04-30 17:40:45 +00:00
Merge branch 'main' into rich/ab
This commit is contained in:
commit
27938ce13a
16 changed files with 759 additions and 12 deletions
|
|
@ -1,6 +1,7 @@
|
|||
import pytest
|
||||
|
||||
from reasoning_gym.arithmetic import ChainSum, ChainSumConfig
|
||||
from reasoning_gym.arithmetic.chain_sum import ChainSumCurriculum
|
||||
|
||||
|
||||
def test_chain_sum_config_validation():
|
||||
|
|
@ -127,3 +128,30 @@ def test_chain_sum_iteration():
|
|||
first_items = list(dataset)
|
||||
second_items = list(dataset)
|
||||
assert first_items == second_items, "Multiple iterations should yield same items"
|
||||
|
||||
|
||||
def test_chain_sum_curriculum():
|
||||
curriculum = ChainSumCurriculum()
|
||||
|
||||
base_value = {"size": 150, "seed": 1}
|
||||
|
||||
base_cfg: ChainSumConfig = curriculum.generate_configuration(base_value)
|
||||
assert base_cfg.seed == 1
|
||||
assert base_cfg.size == 150
|
||||
assert base_cfg.min_digits == 1 and base_cfg.max_digits == 1
|
||||
assert base_cfg.min_terms == 2 and base_cfg.max_terms == 2
|
||||
|
||||
# test incrementing attribute levels for num_terms & num_digits attributes
|
||||
curriculum.increment_attr_level("num_terms")
|
||||
curriculum.increment_attr_level("num_digits")
|
||||
|
||||
increased_cfg = curriculum.generate_configuration(base_value)
|
||||
assert increased_cfg.min_digits == 1 and increased_cfg.max_digits == 2
|
||||
assert increased_cfg.min_terms == 2 and increased_cfg.max_terms == 3
|
||||
|
||||
# test decrementing attribute level for num_digits again
|
||||
curriculum.decrement_attr_level("num_digits")
|
||||
|
||||
partially_decreased_cfg = curriculum.generate_configuration(base_value)
|
||||
assert partially_decreased_cfg.min_digits == 1 and partially_decreased_cfg.max_digits == 1
|
||||
assert partially_decreased_cfg.min_terms == 2 and partially_decreased_cfg.max_terms == 3
|
||||
|
|
|
|||
88
tests/test_count_primes.py
Normal file
88
tests/test_count_primes.py
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
"""Tests for Count Primes questions generation"""
|
||||
|
||||
import pytest
|
||||
|
||||
from reasoning_gym.algorithmic.count_primes import CountPrimesConfig, CountPrimesDataset
|
||||
|
||||
|
||||
def test_count_primes_config_validation():
|
||||
"""Test that invalid configs raise appropriate errors"""
|
||||
with pytest.raises(AssertionError):
|
||||
config = CountPrimesConfig(max_n=-1) # Negative not allowed
|
||||
config.validate()
|
||||
|
||||
with pytest.raises(AssertionError):
|
||||
config = CountPrimesConfig(max_n=0) # Zero not allowed
|
||||
config.validate()
|
||||
|
||||
|
||||
def test_count_primes_dataset_deterministic():
|
||||
"""Test that dataset generates same items with same seed"""
|
||||
config = CountPrimesConfig(seed=42, size=10)
|
||||
dataset1 = CountPrimesDataset(config)
|
||||
dataset2 = CountPrimesDataset(config)
|
||||
|
||||
for i in range(len(dataset1)):
|
||||
assert dataset1[i] == dataset2[i]
|
||||
|
||||
|
||||
def test_count_primes_dataset_items():
|
||||
"""Test basic properties of generated items"""
|
||||
config = CountPrimesConfig(max_n=10, size=10, seed=42)
|
||||
dataset = CountPrimesDataset(config)
|
||||
|
||||
for i in range(len(dataset)):
|
||||
item = dataset[i]
|
||||
# Check item structure
|
||||
assert isinstance(item, dict)
|
||||
assert "question" in item
|
||||
assert "answer" in item
|
||||
assert "metadata" in item
|
||||
|
||||
# Check metadata
|
||||
assert "start" in item["metadata"]
|
||||
assert "end" in item["metadata"]
|
||||
assert "primes" in item["metadata"]
|
||||
assert "solution" in item["metadata"]
|
||||
|
||||
start = item["metadata"]["start"]
|
||||
end = item["metadata"]["end"]
|
||||
primes = item["metadata"]["primes"]
|
||||
|
||||
assert start <= end
|
||||
assert len(primes) <= end - start + 1
|
||||
|
||||
|
||||
def test_count_primes_dataset_iteration():
|
||||
"""Test that iteration respects dataset size"""
|
||||
config = CountPrimesConfig(size=5, seed=42)
|
||||
dataset = CountPrimesDataset(config)
|
||||
|
||||
items = list(dataset)
|
||||
assert len(items) == config.size
|
||||
|
||||
# Test multiple iterations yield same items
|
||||
assert items == list(dataset)
|
||||
|
||||
|
||||
def test_count_primes_answer():
|
||||
"""Test the _get_primes method"""
|
||||
config = CountPrimesConfig(seed=42)
|
||||
dataset = CountPrimesDataset(config)
|
||||
|
||||
# Base cases
|
||||
assert dataset._get_primes(n=0) == []
|
||||
assert dataset._get_primes(n=1) == []
|
||||
assert dataset._get_primes(n=2) == [False, False]
|
||||
|
||||
# Test primes up to 10
|
||||
primes = dataset._get_primes(n=11)
|
||||
assert primes[2] == True
|
||||
assert primes[3] == True
|
||||
assert primes[4] == False
|
||||
assert primes[5] == True
|
||||
assert primes[6] == False
|
||||
assert primes[7] == True
|
||||
assert primes[8] == False
|
||||
assert primes[9] == False
|
||||
assert primes[10] == False
|
||||
35
tests/test_dice.py
Normal file
35
tests/test_dice.py
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
import pytest
|
||||
|
||||
from reasoning_gym.arithmetic.dice import DiceConfig, DiceDataset
|
||||
|
||||
|
||||
def test_dice():
|
||||
"""Test basic properties and solution of generated items"""
|
||||
config = DiceConfig(seed=42, size=50, num_dice=8, max_dice_size=24)
|
||||
dataset = DiceDataset(config)
|
||||
|
||||
for item in dataset:
|
||||
assert isinstance(item, dict)
|
||||
assert "question" in item
|
||||
assert "answer" in item
|
||||
assert "metadata" in item
|
||||
|
||||
# Test the scoring
|
||||
assert dataset.score_answer(answer=item["answer"], entry=item) == 1.0
|
||||
assert dataset.score_answer(answer=None, entry=item) == 0.0
|
||||
|
||||
# Easy
|
||||
config = DiceConfig(seed=42, size=1, num_dice=1, max_dice_size=2)
|
||||
dataset = DiceDataset(config)
|
||||
|
||||
for item in dataset:
|
||||
assert dataset.score_answer(answer=item["answer"], entry=item) == 1.0
|
||||
assert dataset.score_answer(answer=None, entry=item) == 0.0
|
||||
|
||||
# Hard
|
||||
config = DiceConfig(seed=42, size=1, num_dice=40, max_dice_size=40)
|
||||
dataset = DiceDataset(config)
|
||||
|
||||
for item in dataset:
|
||||
assert dataset.score_answer(answer=item["answer"], entry=item) == 1.0
|
||||
assert dataset.score_answer(answer=None, entry=item) == 0.0
|
||||
19
tests/test_rectangle_count.py
Normal file
19
tests/test_rectangle_count.py
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
import pytest
|
||||
|
||||
from reasoning_gym.cognition.rectangle_count import RectangleCountConfig, RectangleCountDataset
|
||||
|
||||
|
||||
def test_dice():
|
||||
"""Test basic properties and solution of generated items"""
|
||||
config = RectangleCountConfig(seed=42, size=50, max_rectangles=15, width=40, height=40)
|
||||
dataset = RectangleCountDataset(config)
|
||||
|
||||
for item in dataset:
|
||||
assert isinstance(item, dict)
|
||||
assert "question" in item
|
||||
assert "answer" in item
|
||||
assert "metadata" in item
|
||||
|
||||
# Test the scoring
|
||||
assert dataset.score_answer(answer=item["answer"], entry=item) == 1.0
|
||||
assert dataset.score_answer(answer=None, entry=item) == 0.0
|
||||
Loading…
Add table
Add a link
Reference in a new issue