diff --git a/reasoning_gym/arithmetic/power_function.py b/reasoning_gym/arithmetic/power_function.py index dc9e10f8..c5db17a0 100644 --- a/reasoning_gym/arithmetic/power_function.py +++ b/reasoning_gym/arithmetic/power_function.py @@ -1,4 +1,4 @@ -"""Computhe the power of a number.""" +"""Compute the power of a number.""" from dataclasses import dataclass from decimal import Decimal @@ -37,22 +37,26 @@ class PowerFunctionDataset(ProceduralDataset): def __init__(self, config: PowerFunctionConfig): super().__init__(config=config, seed=config.seed, size=config.size) + def _format_sig_figs(self, x: Decimal, sig: int) -> Decimal: + """Format a Decimal to exactly 'sig' significant figures, keeping trailing zeros.""" + if x.is_zero(): + return "0." + "0" * (sig - 1) + + exp = x.adjusted() + shift = sig - exp - 1 + rounded = x.quantize(Decimal("1e{}".format(-shift))) + return Decimal(rounded) + def score_answer(self, answer: Optional[str], entry: dict[str, Any]) -> float: """Score the answer by checking if it matches the expected answer to 3 significant figures.""" oracle_answer = entry["answer"] if answer is not None: try: - user_answer = Decimal(answer) - oracle_value = Decimal(oracle_answer) - - if oracle_value == 0: - return 1.0 if user_answer == 0 else 0.01 - - user_sig_figs = f"{user_answer:.3g}" - oracle_sig_figs = f"{oracle_value:.3g}" + user_answer = self._format_sig_figs(Decimal(answer), 3) + oracle_answer = self._format_sig_figs(Decimal(oracle_answer), 3) # Check if they match to 3 significant figures - if user_sig_figs == oracle_sig_figs: + if user_answer == oracle_answer: return 1.0 else: return 0.01 diff --git a/tests/test_power_function.py b/tests/test_power_function.py index 22c5327e..f56a9ce9 100644 --- a/tests/test_power_function.py +++ b/tests/test_power_function.py @@ -1,5 +1,7 @@ """Tests for Power Function questions generation""" +from decimal import Decimal + import pytest from reasoning_gym.arithmetic import PowerFunctionConfig, PowerFunctionDataset @@ -82,3 +84,46 @@ def test_power_function_curriculum(): increased_cfg = curriculum.generate_configuration(base_value) assert increased_cfg.min_exponent == 2 and increased_cfg.max_exponent == 4 + + +# Test score_answer function with various answers +def test_power_function_score_answer_for_edge_cases(): + """Test score_answer function for edge cases""" + config = PowerFunctionConfig(seed=42) + dataset = PowerFunctionDataset(config) + + # Case 1: Match with trailing zeros + item = dataset[0].copy() + user_answer = "1.000e+00" + # Let's change the oracle answer for edge case testing + item["answer"] = "1.0" + score = dataset.score_answer(user_answer, item) + assert score == 1.0, f"Expected score 1.0, got {score}" + + # Case 2: Rounding up at edge of significant figures + item = dataset[0].copy() + item["answer"] = str(Decimal("0.9999") ** 1) # Close to 1.000 + user_answer = "1.00" + score = dataset.score_answer(user_answer, item) + assert score == 1.0, f"Expected score 1.0, got {score}" + + # Case 3: Negative base, valid exponent + item = dataset[0].copy() + item["answer"] = str(Decimal("-2.00") ** 3) # -8.0 + user_answer = "-8.00" + score = dataset.score_answer(user_answer, item) + assert score == 1.0, f"Expected score 1.0, got {score}" + + # Case 4: Very small number with exponent notation + item = dataset[0].copy() + item["answer"] = str(Decimal("1e-6")) # 1e-6 + user_answer = "1.00e-6" + score = dataset.score_answer(user_answer, item) + assert score == 1.0, f"Expected score 1.0, got {score}" + + # Case 5: Incorrect answer should yield low score + item = dataset[0].copy() + item["answer"] = "1000.0" + user_answer = "999.0" + score = dataset.score_answer(user_answer, item) + assert score == 0.01, f"Expected low score 0.01, got {score}"