Feat/curr adj (#394)

This commit is contained in:
joesharratt1229 2025-04-02 06:39:14 +01:00 committed by GitHub
parent 2c52f33c3a
commit 43c739cb3e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
26 changed files with 152390 additions and 453 deletions

View file

@ -1,5 +1,6 @@
from typing import Optional
from typing import Literal, Optional
import numpy as np
import verl.utils.torch_functional as verl_F
from torch.utils.data import Dataset
from transformers import PreTrainedTokenizer
@ -67,6 +68,33 @@ class ReasoningGymDataset(Dataset):
row_dict["index"] = index
return row_dict
def update_experiment_difficulty(self, dataset_name: str, method: Literal["increment", "decrement"]):
"""Update the difficulty of the underlying dataset."""
if self.experiment is None:
raise ValueError("Cannot update difficulty: dataset is not a CurriculumExperiment")
if method not in ["increment", "decrement"]:
raise ValueError("Invalid method: must be 'increment' or 'decrement'")
self.experiment.score_board.clear(dataset_name)
self.experiment.update_difficulty(dataset_name, method)
self.data = self.experiment.composite
return True
def aggregate(self, last_n: Optional[int] = None):
"""Aggregate scores from the underlying experiment"""
if self.experiment is None:
raise ValueError("Cannot aggregate scores: dataset is not a CurriculumExperiment")
results = self.experiment.score_board.aggregate(last_n=last_n)
output_results = {}
for key, value in results.items():
output_results[key] = {}
scores = value.scores
first_key = list(scores.keys())[0]
output_results[key]["results"] = np.mean(scores[first_key])
output_results[key]["total_samples"] = value.total_scores
return output_results
def make_dataset(
tokenizer,
@ -78,6 +106,7 @@ def make_dataset(
"""
kwargs = {
"tokenizer": tokenizer,
# "dataset_name": dataset_name,
"developer_prompt": developer_prompt,
}
if isinstance(data_source, Experiment):

View file

@ -0,0 +1,36 @@
#!/usr/bin/env python
# encoding: utf-8
from collections import defaultdict
from glob import glob
import fire
import torch
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
def main(fsdp_checkpoint_path, huggingface_model_path, output_path):
state_dict = defaultdict(list)
world_size = 4
for rank in range(world_size):
filepath = f"{fsdp_checkpoint_path}/model_world_size_{world_size}_rank_{rank}.pt"
print("loading", filepath)
this_state_dict = torch.load(filepath)
for key, value in this_state_dict.items():
state_dict[key].append(value.to_local())
for key in state_dict:
state_dict[key] = torch.cat(state_dict[key], dim=0)
config = AutoConfig.from_pretrained(huggingface_model_path)
model = AutoModelForCausalLM.from_config(config)
model.load_state_dict(state_dict)
model.save_pretrained(output_path, max_shard_size="10GB")
tokenizer = AutoTokenizer.from_pretrained(huggingface_model_path)
tokenizer.save_pretrained(output_path)
if __name__ == "__main__":
fire.Fire(main)