Moving more environment variables to central config

This commit is contained in:
Tyler Marques 2025-07-03 12:50:49 -07:00
parent 540c2003e8
commit aadedd6512
No known key found for this signature in database
GPG key ID: CB99EDCF41D3016F
11 changed files with 103 additions and 314 deletions

View file

@ -740,7 +740,7 @@ class DiplomacyAgent:
if raw_response:
try:
# Conditionally format the response based on USE_UNFORMATTED_PROMPTS
if os.getenv("USE_UNFORMATTED_PROMPTS") == "1":
if config.USE_UNFORMATTED_PROMPTS:
# Format the natural language response into JSON
formatted_response = await format_with_gemini_flash(
raw_response, FORMAT_ORDER_DIARY, power_name=self.power_name, phase=game.current_short_phase, log_file_path=log_file_path
@ -1006,7 +1006,7 @@ class DiplomacyAgent:
if response is not None and response.strip(): # Check if response is not None and not just whitespace
try:
# Conditionally format the response based on USE_UNFORMATTED_PROMPTS
if os.getenv("USE_UNFORMATTED_PROMPTS") == "1":
if config.USE_UNFORMATTED_PROMPTS:
# Format the natural language response into JSON
formatted_response = await format_with_gemini_flash(
response, FORMAT_STATE_UPDATE, power_name=power_name, phase=current_phase, log_file_path=log_file_path
@ -1186,4 +1186,3 @@ class DiplomacyAgent:
logger.error(f"Agent {self.power_name} failed to generate plan: {e}")
self.add_journal_entry(f"Failed to generate plan for phase {game.current_phase} due to error: {e}")
return "Error: Failed to generate plan."

View file

@ -20,6 +20,7 @@ import google.generativeai as genai
from together import AsyncTogether
from together.error import APIError as TogetherAPIError # For specific error handling
from ..config import config
from diplomacy.engine.message import GLOBAL
from .game_history import GameHistory
from .utils import load_prompt, run_llm_and_log, log_llm_response, generate_random_seed, get_prompt_path
@ -125,7 +126,7 @@ class BaseModelClient:
)
# Conditionally format the response based on USE_UNFORMATTED_PROMPTS
if os.getenv("USE_UNFORMATTED_PROMPTS") == "1":
if config.USE_UNFORMATTED_PROMPTS:
# Local import to avoid circular dependency
from .formatter import format_with_gemini_flash, FORMAT_ORDERS
# Format the natural language response into structured format
@ -596,7 +597,7 @@ class BaseModelClient:
logger.debug(f"[{self.model_name}] Raw LLM response for {power_name}:\n{raw_response}")
# Conditionally format the response based on USE_UNFORMATTED_PROMPTS
if os.getenv("USE_UNFORMATTED_PROMPTS") == "1":
if config.USE_UNFORMATTED_PROMPTS:
# Local import to avoid circular dependency
from .formatter import format_with_gemini_flash, FORMAT_CONVERSATION
# Format the natural language response into structured JSON

View file

@ -3,6 +3,7 @@ import logging
import json
import os
from typing import Optional
from ..config import config
# Forward declaration for type hinting, actual imports in function if complex
if False: # TYPE_CHECKING
@ -82,7 +83,7 @@ async def initialize_agent_state_ext(
parsed_successfully = False
try:
# Conditionally format the response based on USE_UNFORMATTED_PROMPTS
if os.getenv("USE_UNFORMATTED_PROMPTS") == "1":
if config.USE_UNFORMATTED_PROMPTS:
# Format the natural language response into JSON
formatted_response = await format_with_gemini_flash(
response,

View file

@ -10,11 +10,11 @@ Usage: simply import `ai_diplomacy.narrative` *before* the game loop starts
3. A short narrative is produced via OpenAI `o3` and saved as the main
`.summary`.
"""
from __future__ import annotations
import asyncio
import logging
import os
from typing import Callable
from diplomacy.engine.game import Game
@ -22,84 +22,63 @@ from diplomacy.engine.game import Game
# Import to get model configuration and client loading
from .utils import get_special_models
from .clients import load_model_client
from ..config import config
LOGGER = logging.getLogger(__name__)
# ---------------------------------------------------------------------------
# Configuration
# ---------------------------------------------------------------------------
SPECIAL_MODELS = get_special_models()
OPENAI_MODEL = SPECIAL_MODELS["phase_summary"]
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
if not OPENAI_API_KEY:
LOGGER.warning("OPENAI_API_KEY not set narrative summaries will be stubbed.")
# ---------------------------------------------------------------------------
# Helper to call the model synchronously
# ---------------------------------------------------------------------------
async def _call_model_async(statistical_summary: str, phase_key: str) -> str:
"""Return a 24 sentence spectator-friendly narrative using async client."""
try:
# Load the narrative client
narrative_client = load_model_client(OPENAI_MODEL)
system = (
"You are an energetic e-sports commentator narrating a game of Diplomacy. "
"Turn the provided phase recap into a concise, thrilling story (max 4 sentences). "
"Highlight pivotal moves, supply-center swings, betrayals, and momentum shifts."
)
narrative_client.set_system_prompt(system)
user = f"PHASE {phase_key}\n\nSTATISTICAL SUMMARY:\n{statistical_summary}\n\nNow narrate this phase for spectators."
# Use the client's generate_response method
response = await narrative_client.generate_response(
prompt=user,
temperature=0.7, # Some creativity for narrative
inject_random_seed=False # No need for random seed in narratives
inject_random_seed=False, # No need for random seed in narratives
)
return response.strip() if response else "(Narrative generation failed - empty response)"
except Exception as exc: # Broad we only log and degrade gracefully
LOGGER.error("Narrative generation failed: %s", exc, exc_info=True)
return "(Narrative generation failed)"
except Exception as e: # Broad we only log and degrade gracefully
if config.ALLOW_NARATION_FAILURE:
LOGGER.error(f"Narrative generation failed: {e}", exc_info=True)
return "(Narrative generation failed)"
else:
raise e
def _call_openai(statistical_summary: str, phase_key: str) -> str:
"""Return a 24 sentence spectator-friendly narrative."""
# Check if API key is available based on the model type
if OPENAI_MODEL.startswith("openrouter"):
if not os.environ.get("OPENROUTER_API_KEY"):
return "(Narrative generation disabled missing OPENROUTER_API_KEY)."
elif "claude" in OPENAI_MODEL.lower():
if not os.environ.get("ANTHROPIC_API_KEY"):
return "(Narrative generation disabled missing ANTHROPIC_API_KEY)."
elif "gemini" in OPENAI_MODEL.lower():
if not os.environ.get("GEMINI_API_KEY"):
return "(Narrative generation disabled missing GEMINI_API_KEY)."
elif "deepseek" in OPENAI_MODEL.lower():
if not os.environ.get("DEEPSEEK_API_KEY"):
return "(Narrative generation disabled missing DEEPSEEK_API_KEY)."
elif "together" in OPENAI_MODEL.lower():
if not os.environ.get("TOGETHER_API_KEY"):
return "(Narrative generation disabled missing TOGETHER_API_KEY)."
else: # Default to OpenAI
if not OPENAI_API_KEY:
return "(Narrative generation disabled missing OPENAI_API_KEY)."
# Run the async function in a new event loop
try:
# Create a new event loop for this synchronous context
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
result = loop.run_until_complete(_call_model_async(statistical_summary, phase_key))
loop.close()
return result
except Exception as exc:
LOGGER.error("Failed to run async narrative generation: %s", exc, exc_info=True)
return "(Narrative generation failed)"
# Create a new event loop for this synchronous context
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
result = loop.run_until_complete(_call_model_async(statistical_summary, phase_key))
loop.close()
return result
# ---------------------------------------------------------------------------
# Patch _generate_phase_summary
@ -110,28 +89,24 @@ _original_gps: Callable = Game._generate_phase_summary # type: ignore[attr-defi
def _default_phase_summary_callback(self: Game, phase_key: str) -> Callable:
"""Generate a default statistical summary callback when none is provided."""
def phase_summary_callback(system_prompt, user_prompt):
# Get the current short phase for accessing game history
current_short_phase = phase_key
# 1) Gather the current board state, sorted by # of centers
power_info = []
for power_name, power in self.powers.items():
units_list = list(power.units)
centers_list = list(power.centers)
power_info.append(
(power_name, len(centers_list), units_list, centers_list)
)
power_info.append((power_name, len(centers_list), units_list, centers_list))
# Sort by descending # of centers
power_info.sort(key=lambda x: x[1], reverse=True)
# 2) Build text lines for the top "Board State Overview"
top_lines = ["Current Board State (Ordered by SC Count):"]
for (p_name, sc_count, units, centers) in power_info:
top_lines.append(
f"{p_name}: {sc_count} centers (needs 18 to win). "
f"Units={units} Centers={centers}"
)
for p_name, sc_count, units, centers in power_info:
top_lines.append(f"{p_name}: {sc_count} centers (needs 18 to win). Units={units} Centers={centers}")
# 3) Map orders to "successful", "failed", or "other" outcomes
success_dict = {}
@ -182,13 +157,13 @@ def _default_phase_summary_callback(self: Game, phase_key: str) -> Callable:
summary_parts.append("\n".join(top_lines))
summary_parts.append("\n" + success_section)
summary_parts.append("\n" + fail_section)
# Only include "Other" section if it has content
if other_dict:
summary_parts.append("\n" + other_section)
return f"Phase {current_short_phase} Summary:\n\n" + "\n".join(summary_parts)
return phase_summary_callback
@ -196,7 +171,7 @@ def _patched_generate_phase_summary(self: Game, phase_key, summary_callback=None
# If no callback provided, use our default one
if summary_callback is None:
summary_callback = _default_phase_summary_callback(self, phase_key)
# 1) Call original implementation → statistical summary
statistical = _original_gps(self, phase_key, summary_callback)
LOGGER.debug(f"[{phase_key}] Original summary returned: {statistical!r}")
@ -223,13 +198,15 @@ def _patched_generate_phase_summary(self: Game, phase_key, summary_callback=None
self.phase_summaries[str(phase_key)] = narrative # type: ignore[attr-defined]
LOGGER.debug(f"[{phase_key}] Narrative summary stored successfully.")
else:
LOGGER.warning(f"[{phase_key}] Cannot store narrative summary because phase_data is None.")
LOGGER.warning(f"[{phase_key}] Cannot store narrative summary because phase_data is None.")
except Exception as exc:
LOGGER.warning("Could not store narrative summary for %s: %s", phase_key, exc)
return narrative
# Monkey-patch
Game._generate_phase_summary = _patched_generate_phase_summary # type: ignore[assignment]
LOGGER.info("Game._generate_phase_summary patched with narrative generation.")
LOGGER.info("Game._generate_phase_summary patched with narrative generation.")

View file

@ -1,36 +1,39 @@
"""
Module for constructing prompts for LLM interactions in the Diplomacy game.
"""
import logging
from typing import Dict, List, Optional, Any # Added Any for game type placeholder
import logging
from typing import Dict, List, Optional, Any # Added Any for game type placeholder
from ..config import config
from .utils import load_prompt, get_prompt_path
from .possible_order_context import (
generate_rich_order_context,
generate_rich_order_context_xml,
)
import os
from .game_history import GameHistory # Assuming GameHistory is correctly importable
from .game_history import GameHistory # Assuming GameHistory is correctly importable
# placeholder for diplomacy.Game to avoid circular or direct dependency if not needed for typehinting only
# from diplomacy import Game # Uncomment if 'Game' type hint is crucial and available
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG) # Or inherit from parent logger
logger.setLevel(logging.DEBUG) # Or inherit from parent logger
# --- Home-center lookup -------------------------------------------
HOME_CENTERS: dict[str, list[str]] = {
"AUSTRIA": ["Budapest", "Trieste", "Vienna"],
"ENGLAND": ["Edinburgh", "Liverpool", "London"],
"FRANCE": ["Brest", "Marseilles", "Paris"],
"GERMANY": ["Berlin", "Kiel", "Munich"],
"ITALY": ["Naples", "Rome", "Venice"],
"RUSSIA": ["Moscow", "Saint Petersburg", "Sevastopol", "Warsaw"],
"TURKEY": ["Ankara", "Constantinople", "Smyrna"],
"AUSTRIA": ["Budapest", "Trieste", "Vienna"],
"ENGLAND": ["Edinburgh", "Liverpool", "London"],
"FRANCE": ["Brest", "Marseilles", "Paris"],
"GERMANY": ["Berlin", "Kiel", "Munich"],
"ITALY": ["Naples", "Rome", "Venice"],
"RUSSIA": ["Moscow", "Saint Petersburg", "Sevastopol", "Warsaw"],
"TURKEY": ["Ankara", "Constantinople", "Smyrna"],
}
def build_context_prompt(
game: Any, # diplomacy.Game object
game: Any, # diplomacy.Game object
board_state: dict,
power_name: str,
possible_orders: Dict[str, List[str]],
@ -76,22 +79,14 @@ def build_context_prompt(
year_phase = board_state["phase"] # e.g. 'S1901M'
# Decide which context builder to use.
_use_simple = os.getenv("SIMPLE_PROMPTS", "0").lower() in {"1", "true", "yes"}
_use_simple = config.SIMPLE_PROMPTS
if _use_simple:
possible_orders_context_str = generate_rich_order_context(
game, power_name, possible_orders
)
possible_orders_context_str = generate_rich_order_context(game, power_name, possible_orders)
else:
possible_orders_context_str = generate_rich_order_context_xml(
game, power_name, possible_orders
)
possible_orders_context_str = generate_rich_order_context_xml(game, power_name, possible_orders)
if include_messages:
messages_this_round_text = game_history.get_messages_this_round(
power_name=power_name,
current_phase_name=year_phase
)
messages_this_round_text = game_history.get_messages_this_round(power_name=power_name, current_phase_name=year_phase)
if not messages_this_round_text.strip():
messages_this_round_text = "\n(No messages this round)\n"
else:
@ -100,7 +95,7 @@ def build_context_prompt(
# Separate active and eliminated powers for clarity
active_powers = [p for p in game.powers.keys() if not game.powers[p].is_eliminated()]
eliminated_powers = [p for p in game.powers.keys() if game.powers[p].is_eliminated()]
# Build units representation with power status
units_lines = []
for p, u in board_state["units"].items():
@ -111,7 +106,7 @@ def build_context_prompt(
units_lines.append(f" {p}: {u_str}")
units_repr = "\n".join(units_lines)
# Build centers representation with power status
# Build centers representation with power status
centers_lines = []
for p, c in board_state["centers"].items():
c_str = ", ".join(c)
@ -125,16 +120,16 @@ def build_context_prompt(
home_centers_str = ", ".join(HOME_CENTERS.get(power_name.upper(), []))
order_history_str = game_history.get_order_history_for_prompt(
game=game, # Pass the game object for normalization
game=game, # Pass the game object for normalization
power_name=power_name,
current_phase_name=year_phase,
num_movement_phases_to_show=1
num_movement_phases_to_show=1,
)
# Replace token only if it exists (template may not include it)
if "{home_centers}" in context_template:
context_template = context_template.replace("{home_centers}", home_centers_str)
# Following the pattern for home_centers, use replace for safety
if "{order_history}" in context_template:
context_template = context_template.replace("{order_history}", order_history_str)
@ -153,9 +148,10 @@ def build_context_prompt(
return context
def construct_order_generation_prompt(
system_prompt: str,
game: Any, # diplomacy.Game object
game: Any, # diplomacy.Game object
board_state: dict,
power_name: str,
possible_orders: Dict[str, List[str]],
@ -183,20 +179,20 @@ def construct_order_generation_prompt(
A string containing the complete prompt for the LLM.
"""
# Load prompts
_ = load_prompt("few_shot_example.txt", prompts_dir=prompts_dir) # Loaded but not used, as per original logic
_ = load_prompt("few_shot_example.txt", prompts_dir=prompts_dir) # Loaded but not used, as per original logic
# Pick the phase-specific instruction file (using unformatted versions)
phase_code = board_state["phase"][-1] # 'M' (movement), 'R', or 'A' / 'B'
phase_code = board_state["phase"][-1] # 'M' (movement), 'R', or 'A' / 'B'
if phase_code == "M":
instructions_file = get_prompt_path("order_instructions_movement_phase.txt")
elif phase_code in ("A", "B"): # builds / adjustments
elif phase_code in ("A", "B"): # builds / adjustments
instructions_file = get_prompt_path("order_instructions_adjustment_phase.txt")
elif phase_code == "R": # retreats
elif phase_code == "R": # retreats
instructions_file = get_prompt_path("order_instructions_retreat_phase.txt")
else: # unexpected default to movement rules
else: # unexpected default to movement rules
instructions_file = get_prompt_path("order_instructions_movement_phase.txt")
instructions = load_prompt(instructions_file, prompts_dir=prompts_dir)
_use_simple = os.getenv("SIMPLE_PROMPTS", "0").lower() in {"1", "true", "yes"}
_use_simple = config.SIMPLE_PROMPTS
# Build the context prompt
context = build_context_prompt(
@ -209,18 +205,29 @@ def construct_order_generation_prompt(
agent_relationships=agent_relationships,
agent_private_diary=agent_private_diary_str,
prompts_dir=prompts_dir,
include_messages=not _use_simple, # include only when *not* simple
include_messages=not _use_simple, # include only when *not* simple
)
# Append goals at the end for focus
goals_section = ""
if agent_goals:
goals_section = "\n\nYOUR STRATEGIC GOALS:\n" + "\n".join(f"- {g}" for g in agent_goals) + "\n\nKeep these goals in mind when choosing your orders."
goals_section = (
"\n\nYOUR STRATEGIC GOALS:\n" + "\n".join(f"- {g}" for g in agent_goals) + "\n\nKeep these goals in mind when choosing your orders."
)
final_prompt = system_prompt + "\n\n" + context + "\n\n" + instructions + goals_section
# Make the power names more LLM friendly
final_prompt = final_prompt.replace('AUSTRIA', 'Austria').replace('ENGLAND', "England").replace('FRANCE', 'France').replace('GERMANY', 'Germany').replace('ITALY', "Italy").replace('RUSSIA', 'Russia').replace('TURKEY', 'Turkey')
final_prompt = (
final_prompt.replace("AUSTRIA", "Austria")
.replace("ENGLAND", "England")
.replace("FRANCE", "France")
.replace("GERMANY", "Germany")
.replace("ITALY", "Italy")
.replace("RUSSIA", "Russia")
.replace("TURKEY", "Turkey")
)
logger.debug(f"Final order generation prompt preview for {power_name}: {final_prompt[:500]}...")
return final_prompt
return final_prompt

View file

@ -10,6 +10,8 @@ import string
import json
import asyncio
from ..config import config
# Avoid circular import for type hinting
if TYPE_CHECKING:
from .clients import BaseModelClient
@ -109,8 +111,8 @@ def get_special_models() -> Dict[str, str]:
export AI_DIPLOMACY_FORMATTER_MODEL="gemini-2.0-flash"
"""
return {
"phase_summary": os.getenv("AI_DIPLOMACY_NARRATIVE_MODEL", "openrouter-google/gemini-2.5-flash-preview-05-20"),
"formatter": os.getenv("AI_DIPLOMACY_FORMATTER_MODEL", "google/gemini-2.5-flash-lite-preview-06-17"),
"phase_summary": config.AI_DIPLOMACY_NARRATIVE_MODEL,
"formatter": config.AI_DIPLOMACY_FORMATTER_MODEL
}
@ -466,7 +468,7 @@ def get_prompt_path(prompt_name: str) -> str:
Returns:
str: Either "unformatted/{prompt_name}" or just "{prompt_name}"
"""
if os.getenv("USE_UNFORMATTED_PROMPTS") == "1":
if config.USE_UNFORMATTED_PROMPTS:
return f"unformatted/{prompt_name}"
else:
return prompt_name
@ -502,4 +504,4 @@ def normalize_recipient_name(recipient: str) -> str:
normalized = name_mapping.get(recipient, recipient)
return normalized
return normalized

View file

@ -1,51 +0,0 @@
# Map Assets for Diplomacy Animation
This directory contains the map assets used by the 3D animation system.
## Files Required for Each Map Variant
For each map variant (e.g., standard, ancmed, modern, pure), the following files are needed:
1. `[variant].svg` - The main SVG map (shows country boundaries)
2. `[variant]_map.jpg` - A fallback JPG map texture
3. `[variant]_coords.json` - JSON file with province coordinates for 3D positioning
## Coordinate Format
The coordinate JSON files should have the following structure:
```json
{
"mapWidth": 1000,
"mapHeight": 1000,
"coordinates": {
"LON": { "x": -300, "y": 0, "z": -100 },
"PAR": { "x": -250, "y": 0, "z": 100 },
...
},
"provinces": {
"LON": { "isSupplyCenter": true, "type": "land" },
"PAR": { "isSupplyCenter": true, "type": "land" },
"MAO": { "isSupplyCenter": false, "type": "sea" },
"STP": { "isSupplyCenter": true, "type": "land", "coasts": ["NC", "SC"] },
...
}
}
```
### Coordinates
- The origin (0,0,0) is the center of the map
- The x-axis runs horizontally (negative = west, positive = east)
- The y-axis is for elevation (0 = sea level, positive = up)
- The z-axis runs vertically (negative = north, positive = south)
### Special Coast Notation
For provinces with multiple coasts (like St. Petersburg), coast positions should be defined:
1. In coordinates section using underscore notation:
- `"STP_NC": { "x": 200, "y": 0, "z": -350 }`
2. In provinces section using the coasts array:
- `"STP": { "isSupplyCenter": true, "type": "land", "coasts": ["NC", "SC"] }`

View file

@ -1,11 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg xmlns="http://www.w3.org/2000/svg" width="1000" height="1000" viewBox="0 0 1000 1000">
<!-- Simple placeholder map for Diplomacy -->
<rect width="1000" height="1000" fill="#8baed8" /> <!-- Ocean background -->
<!-- Land masses (simple rectangles as a placeholder) -->
<rect x="300" y="300" width="400" height="400" fill="#c9b18c" /> <!-- Europe -->
<!-- Add some region names -->
<text x="500" y="500" font-family="Arial" font-size="20" text-anchor="middle" fill="#000">Europe</text>
</svg>

Before

Width:  |  Height:  |  Size: 568 B

View file

@ -1,141 +0,0 @@
{
"mapWidth": 1000,
"mapHeight": 1000,
"coordinates": {
"VIE": { "x": 50, "y": 0, "z": 100 },
"BUD": { "x": 100, "y": 0, "z": 100 },
"TRI": { "x": 50, "y": 0, "z": 150 },
"LON": { "x": -300, "y": 0, "z": -100 },
"EDI": { "x": -300, "y": 0, "z": -200 },
"LVP": { "x": -350, "y": 0, "z": -170 },
"PAR": { "x": -250, "y": 0, "z": 100 },
"MAR": { "x": -200, "y": 0, "z": 200 },
"BRE": { "x": -350, "y": 0, "z": 150 },
"BER": { "x": -50, "y": 0, "z": -100 },
"MUN": { "x": -80, "y": 0, "z": 50 },
"KIE": { "x": -100, "y": 0, "z": -100 },
"NAP": { "x": 50, "y": 0, "z": 300 },
"ROM": { "x": 0, "y": 0, "z": 250 },
"VEN": { "x": -50, "y": 0, "z": 150 },
"SEV": { "x": 300, "y": 0, "z": 100 },
"STP": { "x": 200, "y": 0, "z": -300 },
"STP_SC": { "x": 250, "y": 0, "z": -250 },
"WAR": { "x": 100, "y": 0, "z": 0 },
"MOS": { "x": 250, "y": 0, "z": -100 },
"ANK": { "x": 300, "y": 0, "z": 200 },
"CON": { "x": 250, "y": 0, "z": 250 },
"SMY": { "x": 250, "y": 0, "z": 300 },
"BEL": { "x": -200, "y": 0, "z": 0 },
"HOL": { "x": -150, "y": 0, "z": -50 },
"PIC": { "x": -250, "y": 0, "z": 50 },
"BUR": { "x": -200, "y": 0, "z": 100 },
"GAS": { "x": -300, "y": 0, "z": 200 },
"RUH": { "x": -150, "y": 0, "z": 0 },
"WAL": { "x": -350, "y": 0, "z": -100 },
"YOR": { "x": -300, "y": 0, "z": -150 },
"CLY": { "x": -330, "y": 0, "z": -250 },
"DEN": { "x": -50, "y": 0, "z": -200 },
"NWY": { "x": 0, "y": 0, "z": -280 },
"SWE": { "x": 50, "y": 0, "z": -250 },
"FIN": { "x": 150, "y": 0, "z": -300 },
"STP_NC": { "x": 200, "y": 0, "z": -350 },
"SIL": { "x": 0, "y": 0, "z": -50 },
"BOH": { "x": 0, "y": 0, "z": 50 },
"TYR": { "x": -20, "y": 0, "z": 100 },
"GAL": { "x": 120, "y": 0, "z": 50 },
"PIE": { "x": -100, "y": 0, "z": 150 },
"TUS": { "x": -50, "y": 0, "z": 200 },
"APU": { "x": 100, "y": 0, "z": 250 },
"SPA": { "x": -350, "y": 0, "z": 300 },
"SPA_NC": { "x": -380, "y": 0, "z": 250 },
"SPA_SC": { "x": -330, "y": 0, "z": 350 },
"POR": { "x": -450, "y": 0, "z": 350 },
"UKR": { "x": 170, "y": 0, "z": 50 },
"LVN": { "x": 150, "y": 0, "z": -200 },
"SER": { "x": 150, "y": 0, "z": 180 },
"ALB": { "x": 130, "y": 0, "z": 230 },
"GRE": { "x": 150, "y": 0, "z": 280 },
"BUL": { "x": 200, "y": 0, "z": 200 },
"BUL_EC": { "x": 250, "y": 0, "z": 200 },
"BUL_SC": { "x": 200, "y": 0, "z": 230 },
"RUM": { "x": 200, "y": 0, "z": 150 },
"ARM": { "x": 350, "y": 0, "z": 150 },
"SYR": { "x": 350, "y": 0, "z": 250 },
"TUN": { "x": 50, "y": 0, "z": 400 },
"NAF": { "x": -200, "y": 0, "z": 400 },
"NAO": { "x": -450, "y": 0, "z": -300 },
"NWG": { "x": -100, "y": 0, "z": -350 },
"BAR": { "x": 200, "y": 0, "z": -400 },
"IRI": { "x": -400, "y": 0, "z": -150 },
"NTH": { "x": -200, "y": 0, "z": -200 },
"SKA": { "x": 0, "y": 0, "z": -230 },
"HEL": { "x": -100, "y": 0, "z": -150 },
"BAL": { "x": 50, "y": 0, "z": -150 },
"BOT": { "x": 100, "y": 0, "z": -250 },
"ENG": { "x": -270, "y": 0, "z": -20 },
"MAO": { "x": -450, "y": 0, "z": 200 },
"WES": { "x": -100, "y": 0, "z": 350 },
"LYO": { "x": -150, "y": 0, "z": 250 },
"TYS": { "x": 0, "y": 0, "z": 300 },
"ION": { "x": 120, "y": 0, "z": 330 },
"ADR": { "x": 80, "y": 0, "z": 200 },
"AEG": { "x": 200, "y": 0, "z": 300 },
"EAS": { "x": 300, "y": 0, "z": 300 },
"BLA": { "x": 270, "y": 0, "z": 170 }
},
"provinces": {
"VIE": { "isSupplyCenter": true, "type": "land" },
"BUD": { "isSupplyCenter": true, "type": "land" },
"TRI": { "isSupplyCenter": true, "type": "land" },
"LON": { "isSupplyCenter": true, "type": "land" },
"EDI": { "isSupplyCenter": true, "type": "land" },
"LVP": { "isSupplyCenter": true, "type": "land" },
"PAR": { "isSupplyCenter": true, "type": "land" },
"MAR": { "isSupplyCenter": true, "type": "land" },
"BRE": { "isSupplyCenter": true, "type": "land" },
"BER": { "isSupplyCenter": true, "type": "land" },
"MUN": { "isSupplyCenter": true, "type": "land" },
"KIE": { "isSupplyCenter": true, "type": "land" },
"NAP": { "isSupplyCenter": true, "type": "land" },
"ROM": { "isSupplyCenter": true, "type": "land" },
"VEN": { "isSupplyCenter": true, "type": "land" },
"SEV": { "isSupplyCenter": true, "type": "land" },
"STP": { "isSupplyCenter": true, "type": "land", "coasts": ["NC", "SC"] },
"WAR": { "isSupplyCenter": true, "type": "land" },
"MOS": { "isSupplyCenter": true, "type": "land" },
"ANK": { "isSupplyCenter": true, "type": "land" },
"CON": { "isSupplyCenter": true, "type": "land" },
"SMY": { "isSupplyCenter": true, "type": "land" },
"NAO": { "isSupplyCenter": false, "type": "sea" },
"NWG": { "isSupplyCenter": false, "type": "sea" },
"BAR": { "isSupplyCenter": false, "type": "sea" },
"IRI": { "isSupplyCenter": false, "type": "sea" },
"NTH": { "isSupplyCenter": false, "type": "sea" },
"SKA": { "isSupplyCenter": false, "type": "sea" },
"HEL": { "isSupplyCenter": false, "type": "sea" },
"BAL": { "isSupplyCenter": false, "type": "sea" },
"BOT": { "isSupplyCenter": false, "type": "sea" },
"ENG": { "isSupplyCenter": false, "type": "sea" },
"MAO": { "isSupplyCenter": false, "type": "sea" },
"WES": { "isSupplyCenter": false, "type": "sea" },
"LYO": { "isSupplyCenter": false, "type": "sea" },
"TYS": { "isSupplyCenter": false, "type": "sea" },
"ION": { "isSupplyCenter": false, "type": "sea" },
"ADR": { "isSupplyCenter": false, "type": "sea" },
"AEG": { "isSupplyCenter": false, "type": "sea" },
"EAS": { "isSupplyCenter": false, "type": "sea" },
"BLA": { "isSupplyCenter": false, "type": "sea" }
}
}

View file

@ -1 +0,0 @@
<!-- This is a placeholder for a JPEG file. In a real situation, you would need to create an actual JPG image file. -->

View file

@ -10,6 +10,11 @@ class Configuration(BaseSettings):
DEBUG: bool = False
log_file_path: Path | None = None
USE_UNFORMATTED_PROMPTS: bool = False
SIMPLE_PROMPTS: bool = False
# Default models for tasks
AI_DIPLOMACY_NARRATIVE_MODEL: str = "openrouter-google/gemini-2.5-flash-preview-05-20"
AI_DIPLOMACY_FORMATTER_MODEL: str = "openrouter-google/gemini-2.5-flash-preview-05-20"
# API Keys to be validated. Warns if they aren't present at startup, raises ValueError if you attempt to use them when they aren't present.
DEEPSEEK_API_KEY: str | None = None
@ -17,6 +22,7 @@ class Configuration(BaseSettings):
ANTHROPIC_API_KEY: str | None = None
GEMINI_API_KEY: str | None = None
OPENROUTER_API_KEY: str | None = None
TOGETHER_API_KEY: str | None = None
def __init__(self, power_name: Optional[PowerEnum] = None, **kwargs):
super().__init__(**kwargs)