Minor question template & score_answer improvements (#261)

* math prompt improvements
* ignore brackets in complex_arithmetic results
* improve additional instruction in prompt of polynomial_equations
* more strict tests for score_answer in polynomial_equations
* simplify special reward handling
* fix test_intermediate_integration
* fix sokoban dataset
* add common dataset score_answer consistency test
This commit is contained in:
Andreas Köpf 2025-03-04 21:55:09 +01:00 committed by GitHub
parent bf24999bb0
commit b2904ccab9
106 changed files with 403 additions and 507 deletions

View file

@ -401,16 +401,14 @@ class CircuitLogicDataset(ProceduralDataset):
}
def score_answer(self, answer: Optional[str], entry: dict[str, Any]) -> float:
if answer is None or len(answer) == 0:
return 0.0
if isinstance(answer, str) and len(answer) > 0:
oracle_answer = entry["answer"]
if oracle_answer == answer:
return 1.0
elif oracle_answer == answer.strip():
return len(oracle_answer) / len(answer)
oracle_answer = entry["answer"]
if oracle_answer == answer:
return 1.0
elif oracle_answer == answer.strip():
return len(oracle_answer) / len(answer)
return 0.01
return 0.0
register_dataset("circuit_logic", CircuitLogicDataset, CircuitLogicConfig)

View file

@ -489,7 +489,7 @@ class KnightsKnavesDataset(ProceduralDataset):
def score_answer(self, answer: Optional[str], entry: dict[str, Any]) -> float:
"""Score an answer against the oracle answer."""
if answer is None or len(answer) == 0:
if not isinstance(answer, str) or len(answer) == 0:
return 0.0
try:
@ -506,11 +506,9 @@ class KnightsKnavesDataset(ProceduralDataset):
if matching > 0:
return 0.3 + (0.7 * matching / len(oracle_assignments))
return 0.01
except Exception:
# If parsing fails, give minimal credit
return 0.01
pass
return 0.0
register_dataset("knights_knaves", KnightsKnavesDataset, KnightsKnavesConfig)

View file

@ -295,7 +295,7 @@ class PropositionalLogicDataset(ProceduralDataset):
def score_answer(self, answer: str | None, entry: dict[str, Any]) -> float:
"""Robust scoring implementation for propositional logic answers"""
if not answer:
if not isinstance(answer, str):
return 0.0
try:
@ -304,7 +304,7 @@ class PropositionalLogicDataset(ProceduralDataset):
valid_vars = set(entry["metadata"]["variables"])
answer_vars = re.findall(r"([A-Z])", cleaned_answer)
if any(var not in valid_vars for var in answer_vars):
return 0.01
return 0.0
premises = [Expression.from_string(p) for p in entry["metadata"]["premises"]]
answer_expr = Expression.from_string(cleaned_answer)
@ -316,7 +316,7 @@ class PropositionalLogicDataset(ProceduralDataset):
return 1.0
return 0.05
except (ValueError, KeyError, AttributeError):
return 0.01
return 0.0
def _is_trivial(self, expr: Expression) -> bool:
"""Check for trivial tautologies like P ¬P"""

View file

@ -339,9 +339,7 @@ class SelfReferenceDataset(ProceduralDataset):
# Solve puzzle
solutions = solve_puzzle_dynamic(puzzle)
for idx, sol in enumerate(solutions, start=1):
sol_str = ["True" if s else "False" for s in sol]
answer = len(solutions)
answer = str(len(solutions))
return {
"question": puzz_s,
@ -362,12 +360,10 @@ class SelfReferenceDataset(ProceduralDataset):
float: The computed score between 0.0 and 1.0.
"""
if answer == None:
return 0.0
if str(answer) != str(entry["answer"]):
return 0.1
else:
return 1.0 # Yay
if isinstance(answer, str):
if answer == str(entry["answer"]):
return 1.0 # Yay
return 0.0
register_dataset("self_reference", SelfReferenceDataset, SelfReferenceConfig)

View file

@ -68,12 +68,10 @@ class ZebraDataset(ProceduralDataset):
float: The computed score between 0.0 and 1.0.
"""
if answer == None:
return 0.0
if answer.lower().replace("\n", "") != entry["answer"].lower().replace("\n", ""):
return 0.01
else:
return 1.0 # Yay
if isinstance(answer, str):
if answer.lower().replace("\n", "") == entry["answer"].lower().replace("\n", ""):
return 1.0 # Yay
return 0.0
register_dataset("zebra_puzzles", ZebraDataset, ZebraConfig)