mirror of
https://github.com/open-thought/reasoning-gym.git
synced 2026-04-23 16:55:05 +00:00
inter-domain generalisation evaluation configs (#424)
* add inter-domain generalisation eval config for algebra * add algorithmic eval cfg * vllm infer * add arithmetic eval cfg * add geometry eval cfg * add arc cfg * add games eval cfg * add cognition eval cfg * add graphs eval cfg
This commit is contained in:
parent
98e976642d
commit
10863ea12b
10 changed files with 385 additions and 18 deletions
38
training/evaluations/inter_generalisation/graphs.yaml
Normal file
38
training/evaluations/inter_generalisation/graphs.yaml
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
# Config used for evaluating inter-domain generalisation experiment models on graphs test data
|
||||
|
||||
# Models evaluated on this config:
|
||||
# Qwen/Qwen2.5-3B-Instruct (original model)
|
||||
# inter_logic_qwen_3b_400 (original + 400 GRPO steps on logic RG data)
|
||||
|
||||
model_path: ../models/inter_logic_qwen_3b_400 # Change to the model to be evaluated
|
||||
|
||||
max_tokens: 2048 # From max_response_length in training config
|
||||
top_p: 0.9 # From rollout top_p
|
||||
temperature: 0.6 # Lower temperature for more focused responses
|
||||
|
||||
developer_prompt: DeepSeekZero
|
||||
developer_role: system
|
||||
|
||||
output_dir: results
|
||||
save_metadata: true
|
||||
save_full_results: true
|
||||
eval_repeats: 3
|
||||
|
||||
categories:
|
||||
- category: graphs
|
||||
datasets:
|
||||
- dataset: course_schedule
|
||||
size: 100
|
||||
seed: 42
|
||||
- dataset: family_relationships
|
||||
size: 100
|
||||
seed: 42
|
||||
- dataset: largest_island
|
||||
size: 100
|
||||
seed: 42
|
||||
- dataset: quantum_lock
|
||||
size: 100
|
||||
seed: 42
|
||||
- dataset: shortest_path
|
||||
size: 100
|
||||
seed: 42
|
||||
Loading…
Add table
Add a link
Reference in a new issue