diff --git a/.gitignore b/.gitignore index be4071bb..c3ff9440 100644 --- a/.gitignore +++ b/.gitignore @@ -37,3 +37,6 @@ ENV/ .coverage htmlcov/ .pytest_cache/ + +# OSX +.DS_Store diff --git a/reasoning_gym/code/__init__.py b/reasoning_gym/code/__init__.py new file mode 100644 index 00000000..19aca9df --- /dev/null +++ b/reasoning_gym/code/__init__.py @@ -0,0 +1,7 @@ +""" +Cognition tasks for training reasoning capabilities: +- Code Analysis +- Code Interpretation +- Code Execution +""" + diff --git a/reasoning_gym/code/bf.py b/reasoning_gym/code/bf.py new file mode 100644 index 00000000..9f2f3381 --- /dev/null +++ b/reasoning_gym/code/bf.py @@ -0,0 +1,96 @@ +from dataclasses import dataclass +from random import Random +from typing import Dict, Optional + +import bfi +from .contrib.bfit.Compiler import Compiler, Minify + +from ..data.wordle_words import wordle_words +from ..factory import ProceduralDataset, register_dataset + + +@dataclass +class BFConfig: + """Configuration for BF task generation""" + + seed: Optional[int] = None + size: int = 500 + + +class BFDataset(ProceduralDataset): + """Generates BF tasks""" + + def __init__(self, config: BFConfig): + self._prompt_templates = [ + "This is a BF (Brainf*ck) computer program. What is the output? \n\n{bf_program}", + ] + super().__init__(config=config, seed=config.seed, size=config.size) + + def __getitem__(self, idx: int) -> dict: + """Generate a single BF task + + Returns: + dict with keys: + - question: str, the task description with figlet string + - answer: str, the figlet encoded word + - metadata: dict with generation parameters + """ + rng = Random(self.seed + idx) + + bfit_code = self.generate_bfit_code(rng) + bf_program = self.compile_bfit_code_to_bf(bfit_code) + + result = bfi.interpret(bf_program, buffer_output=True) + + return { + "question": rng.choice(self._prompt_templates).format(bf_program=bf_program), + "answer": result, + "metadata": {"bfit_code": bfit_code, "bf_program": bf_program}, + } + + def generate_bfit_code(self, rng: Random) -> str: + + bfit_template = """ +int main() { + int acc = 0; + int target = 15; + int x = 2; + int y = 3; + while (acc < target) { + acc = acc + x; + acc = acc + y; + } + printint(acc); +} +""" + rendered_bfit = bfit_template + return rendered_bfit + + def compile_bfit_code_to_bf(self, bfit: str) -> str: + bf = Compiler.compile(bfit, optimize_code=True) + # bf = Minify.minify(bf) # Is this necessary? + return bf + + def score_answer(self, answer: Optional[str], entry: Dict[str, any]) -> float: + """Determine if the solution provided solves the figlet task. + + The function awards 1.0 for a correct answer and 0.1 points for each correct letter in the correct position, + with a maximum possible score of 1.0. + + Args: + answer (Optional[str]): The user's answer. + entry (Dict[str, any]): The original dataset entry containing the correct answer. + + Returns: + float: The computed score between 0.0 and 1.0. + """ + + if answer == None: + return 0.0 + if answer != entry['answer']: + return 0.01 + else: + return 1.0 # Yay + +# Register the dataset +register_dataset("figlet_font", BFDataset, BFConfig) diff --git a/tests/test_bf.py b/tests/test_bf.py new file mode 100644 index 00000000..9340e9c4 --- /dev/null +++ b/tests/test_bf.py @@ -0,0 +1,37 @@ +import pytest + +from reasoning_gym.code.bf import BFConfig, BFDataset + + +# def test_figlet_deterministic(): +# """Test that dataset generates same items with same seed""" +# config = FigletFontConfig(seed=42, size=15) +# dataset1 = FigletFontDataset(config) +# dataset2 = FigletFontDataset(config) + +# for i in range(15): # Only check first 15 entries for speed +# assert dataset1[i] == dataset2[i] + + +def test_bf(): + """Test basic properties and solution of generated items""" + config = BFConfig(seed=42, size=40) + dataset = BFDataset(config) + + for item in dataset: + assert isinstance(item, dict) + assert "question" in item + assert "answer" in item + assert "metadata" in item + + # Check metadata contains required fields + assert "bfit_code" in item["metadata"] + assert "bf_program" in item["metadata"] + + print(item["answer"]) + + # Test the scoring + assert dataset.score_answer(answer=item["answer"], entry=item) == 1.0 + assert dataset.score_answer(answer=None, entry=item) == 0.0 + assert dataset.score_answer(answer="Love is a battlefield", entry=item) == 0.01 +