mirror of
https://github.com/GoodStartLabs/AI_Diplomacy.git
synced 2026-04-19 12:58:09 +00:00
moving to diff compute
This commit is contained in:
parent
1f8ac5ae20
commit
89e6db8d04
5 changed files with 50 additions and 16 deletions
|
|
@ -416,7 +416,6 @@ class BaseModelClient:
|
|||
raw_response = self.generate_response(prompt)
|
||||
|
||||
messages = []
|
||||
import pdb; pdb.set_trace()
|
||||
if raw_response:
|
||||
try:
|
||||
# Find the JSON block between double curly braces
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ def conduct_negotiations(game, game_history, model_error_stats, max_rounds=3):
|
|||
# We do up to 'max_rounds' single-message turns for each power
|
||||
for round_index in range(max_rounds):
|
||||
with concurrent.futures.ThreadPoolExecutor(
|
||||
max_workers=1
|
||||
max_workers=len(active_powers)
|
||||
) as executor:
|
||||
futures = {}
|
||||
for power_name in active_powers:
|
||||
|
|
|
|||
|
|
@ -1,5 +1,8 @@
|
|||
from dotenv import load_dotenv
|
||||
import logging
|
||||
import os
|
||||
from typing import Dict, Optional
|
||||
import random
|
||||
|
||||
logger = logging.getLogger("utils")
|
||||
logger.setLevel(logging.INFO)
|
||||
|
|
@ -7,22 +10,38 @@ logging.basicConfig(level=logging.INFO)
|
|||
|
||||
load_dotenv()
|
||||
|
||||
MODEL_OPTIONS = [
|
||||
'o3-mini',
|
||||
'claude-3-5-sonnet-latest',
|
||||
'gemini-2.0-flash',
|
||||
'gemini-2.0-flash-lite',
|
||||
'gpt-4o',
|
||||
'gpt-4o-mini',
|
||||
'claude-3-5-haiku-20241022'
|
||||
]
|
||||
|
||||
def assign_models_to_powers():
|
||||
|
||||
def assign_models_to_powers(seed: Optional[int] = None) -> Dict[str, str]:
|
||||
"""
|
||||
Example usage: define which model each power uses.
|
||||
Return a dict: { power_name: model_id, ... }
|
||||
POWERS = ['AUSTRIA', 'ENGLAND', 'FRANCE', 'GERMANY', 'ITALY', 'RUSSIA', 'TURKEY']
|
||||
"""
|
||||
|
||||
|
||||
if seed is not None:
|
||||
random.seed(seed)
|
||||
|
||||
# get random perm
|
||||
random.shuffle(MODEL_OPTIONS)
|
||||
|
||||
return {
|
||||
"FRANCE": "o3-mini",
|
||||
"GERMANY": "claude-3-5-sonnet-latest",
|
||||
"ENGLAND": "gemini-2.0-flash",
|
||||
"RUSSIA": "claude-3.7-sonnet-latest",
|
||||
"ITALY": "gpt-4o",
|
||||
"AUSTRIA": "gpt-4o-mini",
|
||||
"TURKEY": "claude-3-5-haiku-20241022",
|
||||
"AUSTRIA": MODEL_OPTIONS[0],
|
||||
"ENGLAND": MODEL_OPTIONS[1],
|
||||
"FRANCE": MODEL_OPTIONS[2],
|
||||
"GERMANY": MODEL_OPTIONS[3],
|
||||
"ITALY": MODEL_OPTIONS[4],
|
||||
"RUSSIA": MODEL_OPTIONS[5],
|
||||
"TURKEY": MODEL_OPTIONS[6],
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -69,6 +69,12 @@ def parse_arguments():
|
|||
action="store_true",
|
||||
help="Enable the planning phase for each power to set strategic directives.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--seed",
|
||||
type=int,
|
||||
default=None,
|
||||
help="Seed for the random number generator.",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
|
|
@ -124,7 +130,7 @@ def main():
|
|||
return
|
||||
game.power_model_map = dict(zip(powers_order, provided_models))
|
||||
else:
|
||||
game.power_model_map = assign_models_to_powers()
|
||||
game.power_model_map = assign_models_to_powers(args.seed)
|
||||
|
||||
while not game.is_game_done:
|
||||
phase_start = time.time()
|
||||
|
|
|
|||
18
run.sh
18
run.sh
|
|
@ -1,7 +1,17 @@
|
|||
#!/bin/bash
|
||||
#SBATCH --job-name=diplomo # Job name
|
||||
#SBATCH --output=diplomo.log # Standard output and error log
|
||||
#SBATCH --time=12:00:00 # Time limit hh:mm:ss
|
||||
#SBATCH --gres=gpu:0 # Request 1 GPU (remove if not needed)
|
||||
|
||||
# note the summaries aren't actually used so the model doesn't matter here
|
||||
python lm_game.py \
|
||||
--max_year 1905 \
|
||||
--num_negotiation_rounds 1 \
|
||||
--models "gpt-4o-mini, gpt-4o-mini, gpt-4o-mini, gpt-4o-mini, gpt-4o-mini, gpt-4o-mini, gpt-4o-mini"
|
||||
|
||||
# Set seeds
|
||||
seeds=(0 1 2 3 4)
|
||||
|
||||
for seed in "${seeds[@]}"; do
|
||||
python3 lm_game.py \
|
||||
--max_year 1910 \
|
||||
--num_negotiation_rounds 0 \
|
||||
--seed "$seed"
|
||||
done
|
||||
Loading…
Add table
Add a link
Reference in a new issue