diff --git a/.vscode/launch.json b/.vscode/launch.json index e5eaf8a..ba7f205 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -14,6 +14,137 @@ "--remote-debugging-port=9223" ], "sourceMaps": true + }, + { + "name": "Multi Bot Launcher", + "type": "python", + "request": "launch", + "program": "${workspaceFolder}/bot_client/multi_bot_launcher.py", + "args": [ + "--hostname", + "localhost", + "--port", + "8432", + "--username-base", + "bot", + "--password", + "password", + "--log-level", + "INFO" + ], + "console": "integratedTerminal", + "justMyCode": true, + "cwd": "${workspaceFolder}" + }, + { + "name": "Multi Bot Launcher (Existing Game)", + "type": "python", + "request": "launch", + "program": "${workspaceFolder}/bot_client/multi_bot_launcher.py", + "args": [ + "--hostname", + "localhost", + "--port", + "8432", + "--username-base", + "bot", + "--password", + "password", + "--game-id", + "${input:gameId}", + "--log-level", + "INFO" + ], + "console": "integratedTerminal", + "justMyCode": true, + "cwd": "${workspaceFolder}" + }, + { + "name": "Single Bot Player", + "type": "python", + "request": "launch", + "program": "${workspaceFolder}/bot_client/single_bot_player.py", + "args": [ + "--hostname", + "localhost", + "--port", + "8432", + "--username", + "bot_${input:powerName}", + "--password", + "password", + "--power", + "${input:powerName}", + "--model", + "${input:modelName}", + "--log-level", + "INFO" + ], + "console": "integratedTerminal", + "justMyCode": true, + "cwd": "${workspaceFolder}" + }, + { + "name": "Single Bot Player (Existing Game)", + "type": "python", + "request": "launch", + "program": "${workspaceFolder}/bot_client/single_bot_player.py", + "args": [ + "--hostname", + "localhost", + "--port", + "8432", + "--username", + "bot_${input:powerName}", + "--password", + "password", + "--power", + "${input:powerName}", + "--model", + "${input:modelName}", + "--game-id", + "${input:gameId}", + "--log-level", + "INFO" + ], + "console": "integratedTerminal", + "justMyCode": true, + "cwd": "${workspaceFolder}" + } + ], + "inputs": [ + { + "id": "gameId", + "type": "promptString", + "description": "Enter the game ID to join" + }, + { + "id": "powerName", + "type": "pickString", + "description": "Select the power to control", + "options": [ + "AUSTRIA", + "ENGLAND", + "FRANCE", + "GERMANY", + "ITALY", + "RUSSIA", + "TURKEY" + ], + "default": "FRANCE" + }, + { + "id": "modelName", + "type": "pickString", + "description": "Select the AI model to use", + "options": [ + "gpt-3.5-turbo", + "gpt-4", + "claude-3-haiku", + "claude-3-sonnet", + "gemini-pro" + ], + "default": "gpt-3.5-turbo" } ] } diff --git a/ai_diplomacy/agent.py b/ai_diplomacy/agent.py index a2e64ae..8464168 100644 --- a/ai_diplomacy/agent.py +++ b/ai_diplomacy/agent.py @@ -5,26 +5,41 @@ import json import re import json_repair import json5 # More forgiving JSON parser -import ast - -from config import config # Assuming BaseModelClient is importable from clients.py in the same directory -from .clients import BaseModelClient +from .clients import BaseModelClient, load_model_client # Import load_prompt and the new logging wrapper from utils -from .utils import load_prompt, run_llm_and_log, log_llm_response, get_prompt_path +from .utils import load_prompt, run_llm_and_log, log_llm_response from .prompt_constructor import build_context_prompt # Added import -from .clients import GameHistory -from diplomacy import Game -from .formatter import format_with_gemini_flash, FORMAT_ORDER_DIARY, FORMAT_NEGOTIATION_DIARY, FORMAT_STATE_UPDATE +from loguru import logger -logger = logging.getLogger(__name__) # == Best Practice: Define constants at module level == -ALL_POWERS = frozenset({"AUSTRIA", "ENGLAND", "FRANCE", "GERMANY", "ITALY", "RUSSIA", "TURKEY"}) +ALL_POWERS = frozenset( + {"AUSTRIA", "ENGLAND", "FRANCE", "GERMANY", "ITALY", "RUSSIA", "TURKEY"} +) ALLOWED_RELATIONSHIPS = ["Enemy", "Unfriendly", "Neutral", "Friendly", "Ally"] + +# == New: Helper function to load prompt files reliably == +def _load_prompt_file(filename: str) -> Optional[str]: + """Loads a prompt template from the prompts directory.""" + try: + # Construct path relative to this file's location + current_dir = os.path.dirname(os.path.abspath(__file__)) + prompts_dir = os.path.join(current_dir, "prompts") + filepath = os.path.join(prompts_dir, filename) + with open(filepath, "r", encoding="utf-8") as f: + return f.read() + except FileNotFoundError: + logger.error(f"Prompt file not found: {filepath}") + return None + except Exception as e: + logger.error(f"Error loading prompt file {filepath}: {e}") + return None + + class DiplomacyAgent: """ Represents a stateful AI agent playing as a specific power in Diplomacy. @@ -38,7 +53,6 @@ class DiplomacyAgent: client: BaseModelClient, initial_goals: Optional[List[str]] = None, initial_relationships: Optional[Dict[str, str]] = None, - prompts_dir: Optional[str] = None, ): """ Initializes the DiplomacyAgent. @@ -49,52 +63,57 @@ class DiplomacyAgent: initial_goals: An optional list of initial strategic goals. initial_relationships: An optional dictionary mapping other power names to relationship statuses (e.g., 'ALLY', 'ENEMY', 'NEUTRAL'). - prompts_dir: Optional path to the prompts directory. """ if power_name not in ALL_POWERS: - raise ValueError(f"Invalid power name: {power_name}. Must be one of {ALL_POWERS}") + raise ValueError( + f"Invalid power name: {power_name}. Must be one of {ALL_POWERS}" + ) self.power_name: str = power_name self.client: BaseModelClient = client - self.prompts_dir: Optional[str] = prompts_dir # Initialize goals as empty list, will be populated by initialize_agent_state self.goals: List[str] = initial_goals if initial_goals is not None else [] # Initialize relationships to Neutral if not provided if initial_relationships is None: - self.relationships: Dict[str, str] = {p: "Neutral" for p in ALL_POWERS if p != self.power_name} + self.relationships: Dict[str, str] = { + p: "Neutral" for p in ALL_POWERS if p != self.power_name + } else: self.relationships: Dict[str, str] = initial_relationships self.private_journal: List[str] = [] - - # The permanent, unabridged record of all entries. This only ever grows. - self.full_private_diary: List[str] = [] - - # The version used for LLM context. This gets rebuilt by consolidation. - self.private_diary: List[str] = [] + self.private_diary: List[str] = [] # New private diary # --- Load and set the appropriate system prompt --- # Get the directory containing the current file (agent.py) current_dir = os.path.dirname(os.path.abspath(__file__)) - default_prompts_path = os.path.join(current_dir, "prompts") - prompts_root = self.prompts_dir or default_prompts_path + # Construct path relative to the current file's directory + prompts_dir = os.path.join(current_dir, "prompts") + power_prompt_filename = os.path.join( + prompts_dir, f"{power_name.lower()}_system_prompt.txt" + ) + default_prompt_filename = os.path.join(prompts_dir, "system_prompt.txt") - power_prompt_name = f"{power_name.lower()}_system_prompt.txt" - default_prompt_name = "system_prompt.txt" - - power_prompt_path = os.path.join(prompts_root, power_prompt_name) - default_prompt_path = os.path.join(prompts_root, default_prompt_name) - - system_prompt_content = load_prompt(power_prompt_path) + system_prompt_content = load_prompt(power_prompt_filename) if not system_prompt_content: - logger.warning(f"Power-specific prompt not found at {power_prompt_path}. Falling back to default.") - system_prompt_content = load_prompt(default_prompt_path) + logger.warning( + f"Power-specific prompt '{power_prompt_filename}' not found or empty. Loading default system prompt." + ) + # system_prompt_content = load_prompt("system_prompt.txt") + system_prompt_content = load_prompt(default_prompt_filename) + else: + logger.info(f"Loaded power-specific system prompt for {power_name}.") + # ---------------------------------------------------- if system_prompt_content: # Ensure we actually have content before setting self.client.set_system_prompt(system_prompt_content) else: - logger.error(f"Could not load default system prompt either! Agent {power_name} may not function correctly.") - logger.info(f"Initialized DiplomacyAgent for {self.power_name} with goals: {self.goals}") + logger.error( + f"Could not load default system prompt either! Agent {power_name} may not function correctly." + ) + logger.info( + f"Initialized DiplomacyAgent for {self.power_name} with goals: {self.goals}" + ) self.add_journal_entry(f"Agent initialized. Initial Goals: {self.goals}") def _extract_json_from_text(self, text: str) -> dict: @@ -108,7 +127,9 @@ class DiplomacyAgent: # Preprocessing: Normalize common formatting issues # This helps with the KeyError: '\n "negotiation_summary"' problem - text = re.sub(r'\n\s+"(\w+)"\s*:', r'"\1":', text) # Remove newlines before keys + text = re.sub( + r'\n\s+"(\w+)"\s*:', r'"\1":', text + ) # Remove newlines before keys # Fix specific patterns that cause trouble problematic_patterns = [ "negotiation_summary", @@ -150,15 +171,14 @@ class DiplomacyAgent: try: cleaned = self._clean_json_text(json_text) result = json.loads(cleaned) - if isinstance(result, dict): - logger.debug(f"[{self.power_name}] Successfully parsed JSON object with pattern {pattern_idx}, match {match_idx}") - return result - else: - logger.warning( - f"[{self.power_name}] Parsed JSON with pattern {pattern_idx}, match {match_idx}, but got type {type(result)} instead of dict. Content: {str(result)[:200]}" - ) + logger.debug( + f"[{self.power_name}] Successfully parsed JSON with pattern {pattern_idx}, match {match_idx}" + ) + return result except json.JSONDecodeError as e_initial: - logger.debug(f"[{self.power_name}] Standard JSON parse failed: {e_initial}") + logger.debug( + f"[{self.power_name}] Standard JSON parse failed: {e_initial}" + ) # Attempt 1.5: Try surgical cleaning with original patterns if basic cleaning failed try: @@ -167,95 +187,72 @@ class DiplomacyAgent: # Pattern 1: Removes 'Sentence.' when followed by ',', '}', or ']' cleaned_match_candidate = re.sub( - r"\s*([A-Z][\w\s,]*?\.(?:\s+[A-Z][\w\s,]*?\.)*)\s*(?=[,\}\]])", "", cleaned_match_candidate + r"\s*([A-Z][\w\s,]*?\.(?:\s+[A-Z][\w\s,]*?\.)*)\s*(?=[,\}\]])", + "", + cleaned_match_candidate, ) # Pattern 2: Removes 'Sentence.' when it's at the very end, before the final '}' of the current scope cleaned_match_candidate = re.sub( - r"\s*([A-Z][\w\s,]*?\.(?:\s+[A-Z][\w\s,]*?\.)*)\s*(?=\s*\}\s*$)", "", cleaned_match_candidate + r"\s*([A-Z][\w\s,]*?\.(?:\s+[A-Z][\w\s,]*?\.)*)\s*(?=\s*\}\s*$)", + "", + cleaned_match_candidate, ) # Pattern 3: Fix for newlines and spaces before JSON keys (common problem with LLMs) - cleaned_match_candidate = re.sub(r'\n\s+"(\w+)"\s*:', r'"\1":', cleaned_match_candidate) + cleaned_match_candidate = re.sub( + r'\n\s+"(\w+)"\s*:', r'"\1":', cleaned_match_candidate + ) # Pattern 4: Fix trailing commas in JSON objects - cleaned_match_candidate = re.sub(r",\s*}", "}", cleaned_match_candidate) + cleaned_match_candidate = re.sub( + r",\s*}", "}", cleaned_match_candidate + ) # Pattern 5: Handle specific known problematic patterns for pattern in problematic_patterns: - cleaned_match_candidate = cleaned_match_candidate.replace(f'\n "{pattern}"', f'"{pattern}"') + cleaned_match_candidate = ( + cleaned_match_candidate.replace( + f'\n "{pattern}"', f'"{pattern}"' + ) + ) # Pattern 6: Fix quotes - replace single quotes with double quotes for keys - cleaned_match_candidate = re.sub(r"'(\w+)'\s*:", r'"\1":', cleaned_match_candidate) + cleaned_match_candidate = re.sub( + r"'(\w+)'\s*:", r'"\1":', cleaned_match_candidate + ) # Only try parsing if cleaning actually changed something if cleaned_match_candidate != json_text: - logger.debug(f"[{self.power_name}] Surgical cleaning applied. Attempting to parse modified JSON.") + logger.debug( + f"[{self.power_name}] Surgical cleaning applied. Attempting to parse modified JSON." + ) return json.loads(cleaned_match_candidate) except json.JSONDecodeError as e_surgical: - logger.debug(f"[{self.power_name}] Surgical cleaning didn't work: {e_surgical}") + logger.debug( + f"[{self.power_name}] Surgical cleaning didn't work: {e_surgical}" + ) # Attempt 2: json5 (more forgiving) try: result = json5.loads(json_text) - if isinstance(result, dict): - logger.debug(f"[{self.power_name}] Successfully parsed JSON object with json5") - return result - else: - logger.warning( - f"[{self.power_name}] Parsed with json5, but got type {type(result)} instead of dict. Content: {str(result)[:200]}" - ) + logger.debug( + f"[{self.power_name}] Successfully parsed with json5" + ) + return result except Exception as e: logger.debug(f"[{self.power_name}] json5 parse failed: {e}") # Attempt 3: json-repair try: result = json_repair.loads(json_text) - if isinstance(result, dict): - logger.debug(f"[{self.power_name}] Successfully parsed JSON object with json-repair") - return result - else: - logger.warning( - f"[{self.power_name}] Parsed with json-repair, but got type {type(result)} instead of dict. Content: {str(result)[:200]}" - ) + logger.debug( + f"[{self.power_name}] Successfully parsed with json-repair" + ) + return result except Exception as e: logger.debug(f"[{self.power_name}] json-repair failed: {e}") - # New Strategy: Parse markdown-like key-value pairs - # Example: **key:** value - # This comes after trying to find fenced JSON blocks but before broad fallbacks. - if not matches: # Only try if previous patterns didn't yield a dict from a match - try: - markdown_data = {} - # Regex to find **key:** value, where value can be multi-line until next **key:** or end of string - md_pattern = r"\*\*(?P[^:]+):\*\*\s*(?P[\s\S]*?)(?=(?:\n\s*\*\*|$))" - for match in re.finditer(md_pattern, text, re.DOTALL): - key_name = match.group("key").strip() - value_str = match.group("value").strip() - try: - # Attempt to evaluate the value string as a Python literal - # This handles lists, strings, numbers, booleans, None - actual_value = ast.literal_eval(value_str) - markdown_data[key_name] = actual_value - except (ValueError, SyntaxError) as e_ast: - # If ast.literal_eval fails, it might be a plain string that doesn't look like a literal - # Or it could be genuinely malformed. We'll take it as a string if it's not empty. - if value_str: # Only add if it's a non-empty string - markdown_data[key_name] = value_str # Store as string - logger.debug( - f"[{self.power_name}] ast.literal_eval failed for key '{key_name}', value '{value_str[:50]}...': {e_ast}. Storing as string if non-empty." - ) - - if markdown_data: # If we successfully extracted any key-value pairs this way - # Check if essential keys are present, if needed, or just return if any data found - # For now, if markdown_data is populated, we assume it's the intended structure. - logger.debug(f"[{self.power_name}] Successfully parsed markdown-like key-value format. Data: {str(markdown_data)[:200]}") - return markdown_data - else: - logger.debug(f"[{self.power_name}] No markdown-like key-value pairs found or parsed using markdown strategy.") - except Exception as e_md_parse: - logger.error(f"[{self.power_name}] Error during markdown-like key-value parsing: {e_md_parse}", exc_info=True) - # Fallback: Try to find ANY JSON-like structure try: # Find the first { and last } @@ -265,19 +262,26 @@ class DiplomacyAgent: potential_json = text[start:end] # Try all parsers on this extracted text - for parser_name, parser_func in [("json", json.loads), ("json5", json5.loads), ("json_repair", json_repair.loads)]: + for parser_name, parser_func in [ + ("json", json.loads), + ("json5", json5.loads), + ("json_repair", json_repair.loads), + ]: try: - cleaned = self._clean_json_text(potential_json) if parser_name == "json" else potential_json + cleaned = ( + self._clean_json_text(potential_json) + if parser_name == "json" + else potential_json + ) result = parser_func(cleaned) - if isinstance(result, dict): - logger.debug(f"[{self.power_name}] Fallback parse succeeded with {parser_name}, got dict.") - return result - else: - logger.warning( - f"[{self.power_name}] Fallback parse with {parser_name} succeeded, but got type {type(result)} instead of dict. Content: {str(result)[:200]}" - ) + logger.debug( + f"[{self.power_name}] Fallback parse succeeded with {parser_name}" + ) + return result except Exception as e: - logger.debug(f"[{self.power_name}] Fallback {parser_name} failed: {e}") + logger.debug( + f"[{self.power_name}] Fallback {parser_name} failed: {e}" + ) # If standard parsers failed, try aggressive cleaning try: @@ -288,13 +292,8 @@ class DiplomacyAgent: text_fixed = re.sub(r": *\'([^\']*)\'", r': "\1"', text_fixed) result = json.loads(text_fixed) - if isinstance(result, dict): - logger.debug(f"[{self.power_name}] Aggressive cleaning worked, got dict.") - return result - else: - logger.warning( - f"[{self.power_name}] Aggressive cleaning worked, but got type {type(result)} instead of dict. Content: {str(result)[:200]}" - ) + logger.debug(f"[{self.power_name}] Aggressive cleaning worked") + return result except json.JSONDecodeError: pass @@ -304,17 +303,12 @@ class DiplomacyAgent: # Last resort: Try json-repair on the entire text try: result = json_repair.loads(text) - if isinstance(result, dict): - logger.warning(f"[{self.power_name}] Last resort json-repair succeeded, got dict.") - return result - else: - logger.warning( - f"[{self.power_name}] Last resort json-repair succeeded, but got type {type(result)} instead of dict. Content: {str(result)[:200]}" - ) - # If even the last resort doesn't give a dict, return empty dict - return {} - except Exception: - logger.error(f"[{self.power_name}] All JSON extraction attempts failed. Original text: {original_text[:500]}...") + logger.warning(f"[{self.power_name}] Last resort json-repair succeeded") + return result + except Exception as e: + logger.error( + f"[{self.power_name}] All JSON extraction attempts failed. Original text: {original_text[:500]}..." + ) return {} def _clean_json_text(self, text: str) -> str: @@ -354,84 +348,220 @@ class DiplomacyAgent: logger.debug(f"[{self.power_name} Journal]: {entry}") def add_diary_entry(self, entry: str, phase: str): - """Adds a formatted entry to both the permanent and context diaries.""" + """Adds a formatted entry string to the agent's private diary.""" if not isinstance(entry, str): entry = str(entry) # Ensure it's a string formatted_entry = f"[{phase}] {entry}" - - # Add to the permanent, unabridged record - self.full_private_diary.append(formatted_entry) - # Also add to the context diary, which will be periodically rebuilt self.private_diary.append(formatted_entry) - + # Keep diary to a manageable size, e.g., last 100 entries + # self.private_diary = self.private_diary[-100:] logger.info( - f"[{self.power_name}] DIARY ENTRY ADDED for {phase}. Total full entries: {len(self.full_private_diary)}. New entry: {entry[:100]}..." + f"[{self.power_name}] DIARY ENTRY ADDED for {phase}. Total entries: {len(self.private_diary)}. New entry: {entry[:100]}..." ) - def format_private_diary_for_prompt(self) -> str: - """ - Formats the context diary for inclusion in a prompt. - It separates the single consolidated history entry from all recent full entries. - """ - logger.info(f"[{self.power_name}] Formatting diary for prompt. Total context entries: {len(self.private_diary)}") - if not self.private_diary: - logger.warning(f"[{self.power_name}] No diary entries found when formatting for prompt") - return "(No diary entries yet)" - - # The context diary (self.private_diary) is already structured correctly by the - # consolidation process. It contains at most one consolidated entry at the start, - # followed by ALL unconsolidated entries. - - consolidated_entry = "" - # Find the single consolidated entry, which should be the first one if it exists. - if self.private_diary and self.private_diary[0].startswith("[CONSOLIDATED HISTORY]"): - consolidated_entry = self.private_diary[0] - # Get all other entries, which are the full, unconsolidated ones. - recent_entries = self.private_diary[1:] - else: - # No consolidated entry found, so all entries are "recent". - recent_entries = self.private_diary - - # Combine them into a formatted string - formatted_diary = "" - if consolidated_entry: - # No need for a header, the entry itself is the header. - formatted_diary += consolidated_entry - formatted_diary += "\n\n" - - if recent_entries: - formatted_diary += "--- RECENT FULL DIARY ENTRIES ---\n" - # Use join on the full list of recent entries, not a slice. - formatted_diary += "\n\n".join(recent_entries) - - if not formatted_diary: - return "(No diary entries to show)" - + def format_private_diary_for_prompt(self, max_entries=40) -> str: + """Formats the last N private diary entries for inclusion in a prompt.""" logger.info( - f"[{self.power_name}] Formatted diary with {1 if consolidated_entry else 0} consolidated and {len(recent_entries)} recent entries. Preview: {formatted_diary[:250]}..." + f"[{self.power_name}] Formatting diary for prompt. Total entries: {len(self.private_diary)}" + ) + if not self.private_diary: + logger.warning( + f"[{self.power_name}] No diary entries found when formatting for prompt" + ) + return "(No diary entries yet)" + # Get the most recent entries + recent_entries = self.private_diary[-max_entries:] + formatted_diary = "\n".join(recent_entries) + logger.info( + f"[{self.power_name}] Formatted {len(recent_entries)} diary entries for prompt. Preview: {formatted_diary[:200]}..." ) return formatted_diary - # The consolidate_entire_diary method has been moved to ai_diplomacy/diary_logic.py - # to improve modularity and avoid circular dependencies. - # It is now called as `run_diary_consolidation(agent, game, ...)` from the main game loop. + async def consolidate_year_diary_entries( + self, year: str, game: "Game", log_file_path: str + ): + """ + Consolidates all diary entries from a specific year into a concise summary. + This is called when we're 2+ years past a given year to prevent context bloat. - async def generate_negotiation_diary_entry(self, game: "Game", game_history: GameHistory, log_file_path: str): + Args: + year: The year to consolidate (e.g., "1901") + game: The game object for context + log_file_path: Path for logging LLM responses + """ + logger.info(f"[{self.power_name}] CONSOLIDATION CALLED for year {year}") + logger.info( + f"[{self.power_name}] Current diary has {len(self.private_diary)} total entries" + ) + + # Debug: Log first few diary entries to see their format + if self.private_diary: + logger.info(f"[{self.power_name}] Sample diary entries:") + for i, entry in enumerate(self.private_diary[:3]): + logger.info(f"[{self.power_name}] Entry {i}: {entry[:100]}...") + + # Find all diary entries from the specified year + year_entries = [] + # Update pattern to match phase format: [S1901M], [F1901M], [W1901A] etc. + # We need to check for [S1901, [F1901, [W1901 + patterns_to_check = [f"[S{year}", f"[F{year}", f"[W{year}"] + logger.info( + f"[{self.power_name}] Looking for entries matching patterns: {patterns_to_check}" + ) + + for i, entry in enumerate(self.private_diary): + # Check if entry matches any of our patterns + for pattern in patterns_to_check: + if pattern in entry: + year_entries.append(entry) + logger.info( + f"[{self.power_name}] Found matching entry {i} with pattern '{pattern}': {entry[:50]}..." + ) + break # Don't add the same entry multiple times + + if not year_entries: + logger.info( + f"[{self.power_name}] No diary entries found for year {year} using patterns: {patterns_to_check}" + ) + return + + logger.info( + f"[{self.power_name}] Found {len(year_entries)} entries to consolidate for year {year}" + ) + + # Load consolidation prompt template + prompt_template = _load_prompt_file("diary_consolidation_prompt.txt") + if not prompt_template: + logger.error( + f"[{self.power_name}] Could not load diary_consolidation_prompt.txt" + ) + return + + # Format entries for the prompt + year_diary_text = "\n\n".join(year_entries) + + # Create the consolidation prompt + prompt = prompt_template.format( + power_name=self.power_name, year=year, year_diary_entries=year_diary_text + ) + + raw_response = "" + success_status = "FALSE" + + try: + # Use Gemini 2.5 Flash for consolidation if available + consolidation_client = load_model_client( + "openrouter-google/gemini-2.5-flash-preview" + ) + if not consolidation_client: + consolidation_client = self.client # Fallback to agent's own client + logger.warning( + f"[{self.power_name}] Using agent's own model for consolidation instead of Gemini Flash" + ) + + # Use the enhanced wrapper with retry logic + from .utils import run_llm_and_log + + raw_response = await run_llm_and_log( + client=consolidation_client, + prompt=prompt, + log_file_path=log_file_path, + power_name=self.power_name, + phase=game.current_short_phase, + response_type="diary_consolidation", + ) + + if raw_response and raw_response.strip(): + consolidated_entry = raw_response.strip() + + # Separate entries into consolidated and regular entries + consolidated_entries = [] + regular_entries = [] + + for entry in self.private_diary: + if entry.startswith("[CONSOLIDATED"): + consolidated_entries.append(entry) + else: + # Check if this is an entry we should remove (from the year being consolidated) + should_keep = True + for pattern in patterns_to_check: + if pattern in entry: + should_keep = False + break + if should_keep: + regular_entries.append(entry) + + # Create the new consolidated summary + consolidated_summary = f"[CONSOLIDATED {year}] {consolidated_entry}" + + # Sort consolidated entries by year (ascending) to keep historical order + consolidated_entries.append(consolidated_summary) + consolidated_entries.sort( + key=lambda x: x[14:18], reverse=False + ) # Extract year from "[CONSOLIDATED YYYY]" + + # Rebuild diary with consolidated entries at the top + self.private_diary = consolidated_entries + regular_entries + + success_status = "TRUE" + logger.info( + f"[{self.power_name}] Successfully consolidated {len(year_entries)} entries from {year} into 1 summary" + ) + logger.info( + f"[{self.power_name}] New diary structure - Total entries: {len(self.private_diary)}, Consolidated: {len(consolidated_entries)}, Regular: {len(regular_entries)}" + ) + logger.debug(f"[{self.power_name}] Diary order preview:") + for i, entry in enumerate(self.private_diary[:5]): + logger.debug(f"[{self.power_name}] Entry {i}: {entry[:50]}...") + else: + logger.warning( + f"[{self.power_name}] Empty response from consolidation LLM" + ) + success_status = "FALSE: Empty response" + + except Exception as e: + logger.error( + f"[{self.power_name}] Error consolidating diary entries: {e}", + exc_info=True, + ) + success_status = f"FALSE: {type(e).__name__}" + finally: + if log_file_path: + log_llm_response( + log_file_path=log_file_path, + model_name=consolidation_client.model_name + if "consolidation_client" in locals() + else self.client.model_name, + power_name=self.power_name, + phase=game.current_short_phase, + response_type="diary_consolidation", + raw_input_prompt=prompt, + raw_response=raw_response, + success=success_status, + ) + + async def generate_negotiation_diary_entry( + self, game: "Game", game_history: "GameHistory", log_file_path: str + ): """ Generates a diary entry summarizing negotiations and updates relationships. This method now includes comprehensive LLM interaction logging. """ - logger.info(f"[{self.power_name}] Generating negotiation diary entry for {game.current_short_phase}...") + logger.info( + f"[{self.power_name}] Generating negotiation diary entry for {game.current_short_phase}..." + ) full_prompt = "" # For logging in finally block raw_response = "" # For logging in finally block success_status = "Failure: Initialized" # Default try: - # Load the prompt template file - prompt_template_content = load_prompt(get_prompt_path("negotiation_diary_prompt.txt"), prompts_dir=self.prompts_dir) + # Load the template file but safely preprocess it first + prompt_template_content = _load_prompt_file("negotiation_diary_prompt.txt") if not prompt_template_content: - logger.error(f"[{self.power_name}] Could not load {get_prompt_path('negotiation_diary_prompt.txt')}. Skipping diary entry.") + logger.error( + f"[{self.power_name}] Could not load negotiation_diary_prompt.txt. Skipping diary entry." + ) success_status = "Failure: Prompt file not loaded" return # Exit early if prompt can't be loaded @@ -439,33 +569,48 @@ class DiplomacyAgent: board_state_dict = game.get_state() board_state_str = f"Units: {board_state_dict.get('units', {})}, Centers: {board_state_dict.get('centers', {})}" - messages_this_round = game_history.get_messages_this_round(power_name=self.power_name, current_phase_name=game.current_short_phase) - if not messages_this_round.strip() or messages_this_round.startswith("\n(No messages"): - messages_this_round = ( - "(No messages involving your power this round that require deep reflection for diary. Focus on overall situation.)" - ) + messages_this_round = game_history.get_messages_this_round( + power_name=self.power_name, current_phase_name=game.current_short_phase + ) + if not messages_this_round.strip() or messages_this_round.startswith( + "\n(No messages" + ): + messages_this_round = "(No messages involving your power this round that require deep reflection for diary. Focus on overall situation.)" current_relationships_str = json.dumps(self.relationships) current_goals_str = json.dumps(self.goals) formatted_diary = self.format_private_diary_for_prompt() # Get ignored messages context - ignored_messages = game_history.get_ignored_messages_by_power(self.power_name) + ignored_messages = game_history.get_ignored_messages_by_power( + self.power_name + ) ignored_context = "" if ignored_messages: ignored_context = "\n\nPOWERS NOT RESPONDING TO YOUR MESSAGES:\n" for power, msgs in ignored_messages.items(): ignored_context += f"{power}:\n" for msg in msgs[-2:]: # Show last 2 ignored messages per power - ignored_context += f" - Phase {msg['phase']}: {msg['content'][:100]}...\n" + ignored_context += ( + f" - Phase {msg['phase']}: {msg['content'][:100]}...\n" + ) else: - ignored_context = "\n\nAll powers have been responsive to your messages." + ignored_context = ( + "\n\nAll powers have been responsive to your messages." + ) # Do aggressive preprocessing of the template to fix the problematic patterns # This includes removing any newlines or whitespace before JSON keys that cause issues - for pattern in ["negotiation_summary", "updated_relationships", "relationship_updates", "intent"]: + for pattern in [ + "negotiation_summary", + "updated_relationships", + "relationship_updates", + "intent", + ]: # Fix the "\n "key"" pattern that breaks .format() - prompt_template_content = re.sub(rf'\n\s*"{pattern}"', f'"{pattern}"', prompt_template_content) + prompt_template_content = re.sub( + rf'\n\s*"{pattern}"', f'"{pattern}"', prompt_template_content + ) # Escape all curly braces in JSON examples to prevent format() from interpreting them # First, temporarily replace the actual template variables @@ -479,7 +624,9 @@ class DiplomacyAgent: "ignored_messages_context", ] for var in temp_vars: - prompt_template_content = prompt_template_content.replace(f"{{{var}}}", f"<<{var}>>") + prompt_template_content = prompt_template_content.replace( + f"{{{var}}}", f"<<{var}>>" + ) # Now escape all remaining braces (which should be JSON) prompt_template_content = prompt_template_content.replace("{", "{{") @@ -487,7 +634,9 @@ class DiplomacyAgent: # Restore the template variables for var in temp_vars: - prompt_template_content = prompt_template_content.replace(f"<<{var}>>", f"{{{var}}}") + prompt_template_content = prompt_template_content.replace( + f"<<{var}>>", f"{{{var}}}" + ) # Create a dictionary with safe values for formatting format_vars = { @@ -506,80 +655,90 @@ class DiplomacyAgent: try: # Apply format with our set of variables full_prompt = prompt_template_content.format(**format_vars) - logger.info(f"[{self.power_name}] Successfully formatted prompt template after preprocessing.") + logger.info( + f"[{self.power_name}] Successfully formatted prompt template after preprocessing." + ) success_status = "Using prompt file with preprocessing" except KeyError as e: - logger.error(f"[{self.power_name}] Error formatting negotiation diary prompt template: {e}. Skipping diary entry.") + logger.error( + f"[{self.power_name}] Error formatting negotiation diary prompt template: {e}. Skipping diary entry." + ) success_status = "Failure: Template formatting error" return # Exit early if prompt formatting fails - logger.debug(f"[{self.power_name}] Negotiation diary prompt:\n{full_prompt[:500]}...") + logger.debug( + f"[{self.power_name}] Negotiation diary prompt:\n{full_prompt[:500]}..." + ) - logger.debug(f"[{self.power_name}] Negotiation diary prompt:\n{full_prompt[:500]}...") + logger.debug( + f"[{self.power_name}] Negotiation diary prompt:\n{full_prompt[:500]}..." + ) raw_response = await run_llm_and_log( client=self.client, prompt=full_prompt, + log_file_path=log_file_path, # Pass the main log file path power_name=self.power_name, phase=game.current_short_phase, response_type="negotiation_diary_raw", # For run_llm_and_log context ) - logger.debug(f"[{self.power_name}] Raw negotiation diary response: {raw_response[:300]}...") + logger.debug( + f"[{self.power_name}] Raw negotiation diary response: {raw_response[:300]}..." + ) parsed_data = None try: - # Conditionally format the response based on USE_UNFORMATTED_PROMPTS - if config.USE_UNFORMATTED_PROMPTS: - # Format the natural language response into JSON - formatted_response = await format_with_gemini_flash( - raw_response, - FORMAT_NEGOTIATION_DIARY, - power_name=self.power_name, - phase=game.current_short_phase, - log_file_path=log_file_path, - ) - else: - # Use the raw response directly (already formatted) - formatted_response = raw_response - parsed_data = self._extract_json_from_text(formatted_response) + parsed_data = self._extract_json_from_text(raw_response) logger.debug(f"[{self.power_name}] Parsed diary data: {parsed_data}") success_status = "Success: Parsed diary data" except json.JSONDecodeError as e: - logger.error(f"[{self.power_name}] Failed to parse JSON from diary response: {e}. Response: {raw_response[:300]}...") + logger.error( + f"[{self.power_name}] Failed to parse JSON from diary response: {e}. Response: {raw_response[:300]}..." + ) success_status = "Failure: JSONDecodeError" # Continue without parsed_data, rely on diary_entry_text if available or just log failure - diary_entry_text = "(LLM diary entry generation or parsing failed.)" # Fallback + diary_entry_text = ( + "(LLM diary entry generation or parsing failed.)" # Fallback + ) relationships_updated = False if parsed_data: # Fix 1: Be more robust about extracting the negotiation_summary field diary_text_candidate = None for key in ["negotiation_summary", "summary", "diary_entry"]: - if key in parsed_data and isinstance(parsed_data[key], str) and parsed_data[key].strip(): + if ( + key in parsed_data + and isinstance(parsed_data[key], str) + and parsed_data[key].strip() + ): diary_text_candidate = parsed_data[key].strip() - logger.info(f"[{self.power_name}] Successfully extracted '{key}' for diary.") + logger.info( + f"[{self.power_name}] Successfully extracted '{key}' for diary." + ) break - if "intent" in parsed_data: - if diary_text_candidate == None: - diary_text_candidate = parsed_data["intent"] - else: - diary_text_candidate += "\nIntent: " + parsed_data["intent"] - if diary_text_candidate: diary_entry_text = diary_text_candidate else: - logger.warning(f"[{self.power_name}] Could not find valid summary field in diary response. Using fallback.") + logger.warning( + f"[{self.power_name}] Could not find valid summary field in diary response. Using fallback." + ) # Keep the default fallback text # Fix 2: Be more robust about extracting relationship updates new_relationships = None - for key in ["relationship_updates", "updated_relationships", "relationships"]: + for key in [ + "relationship_updates", + "updated_relationships", + "relationships", + ]: if key in parsed_data and isinstance(parsed_data[key], dict): new_relationships = parsed_data[key] - logger.info(f"[{self.power_name}] Successfully extracted '{key}' for relationship updates.") + logger.info( + f"[{self.power_name}] Successfully extracted '{key}' for relationship updates." + ) break if isinstance(new_relationships, dict): @@ -587,10 +746,18 @@ class DiplomacyAgent: for p, r in new_relationships.items(): p_upper = str(p).upper() r_title = str(r).title() - if p_upper in ALL_POWERS and p_upper != self.power_name and r_title in ALLOWED_RELATIONSHIPS: + if ( + p_upper in ALL_POWERS + and p_upper != self.power_name + and r_title in ALLOWED_RELATIONSHIPS + ): valid_new_rels[p_upper] = r_title - elif p_upper != self.power_name: # Log invalid relationship for a valid power - logger.warning(f"[{self.power_name}] Invalid relationship '{r}' for power '{p}' in diary update. Keeping old.") + elif ( + p_upper != self.power_name + ): # Log invalid relationship for a valid power + logger.warning( + f"[{self.power_name}] Invalid relationship '{r}' for power '{p}' in diary update. Keeping old." + ) if valid_new_rels: # Log changes before applying @@ -602,34 +769,57 @@ class DiplomacyAgent: ) self.relationships.update(valid_new_rels) relationships_updated = True - success_status = "Success: Applied diary data (relationships updated)" + success_status = ( + "Success: Applied diary data (relationships updated)" + ) else: - logger.info(f"[{self.power_name}] No valid relationship updates found in diary response.") - if success_status == "Success: Parsed diary data": # If only parsing was successful before - success_status = "Success: Parsed, no valid relationship updates" + logger.info( + f"[{self.power_name}] No valid relationship updates found in diary response." + ) + if ( + success_status == "Success: Parsed diary data" + ): # If only parsing was successful before + success_status = ( + "Success: Parsed, no valid relationship updates" + ) elif new_relationships is not None: # It was provided but not a dict - logger.warning(f"[{self.power_name}] 'updated_relationships' from diary LLM was not a dictionary: {type(new_relationships)}") + logger.warning( + f"[{self.power_name}] 'updated_relationships' from diary LLM was not a dictionary: {type(new_relationships)}" + ) # Add the generated (or fallback) diary entry self.add_diary_entry(diary_entry_text, game.current_short_phase) if relationships_updated: - self.add_journal_entry(f"[{game.current_short_phase}] Relationships updated after negotiation diary: {self.relationships}") + self.add_journal_entry( + f"[{game.current_short_phase}] Relationships updated after negotiation diary: {self.relationships}" + ) # If success_status is still the default 'Parsed diary data' but no relationships were updated, refine it. - if success_status == "Success: Parsed diary data" and not relationships_updated: + if ( + success_status == "Success: Parsed diary data" + and not relationships_updated + ): success_status = "Success: Parsed, only diary text applied" except Exception as e: # Log the full exception details for better debugging - logger.error(f"[{self.power_name}] Caught unexpected error in generate_negotiation_diary_entry: {type(e).__name__}: {e}", exc_info=True) + logger.error( + f"[{self.power_name}] Caught unexpected error in generate_negotiation_diary_entry: {type(e).__name__}: {e}", + exc_info=True, + ) success_status = f"Failure: Exception ({type(e).__name__})" # Add a fallback diary entry in case of general error - self.add_diary_entry(f"(Error generating diary entry: {type(e).__name__})", game.current_short_phase) + self.add_diary_entry( + f"(Error generating diary entry: {type(e).__name__})", + game.current_short_phase, + ) finally: if log_file_path: # Ensure log_file_path is provided log_llm_response( log_file_path=log_file_path, - model_name=self.client.model_name if self.client else "UnknownModel", + model_name=self.client.model_name + if self.client + else "UnknownModel", power_name=self.power_name, phase=game.current_short_phase if game else "UnknownPhase", response_type="negotiation_diary", # Specific type for CSV logging @@ -638,34 +828,55 @@ class DiplomacyAgent: success=success_status, ) - async def generate_order_diary_entry(self, game: "Game", orders: List[str], log_file_path: str): + async def generate_order_diary_entry( + self, game: "Game", orders: List[str], log_file_path: str + ): """ Generates a diary entry reflecting on the decided orders. """ - logger.info(f"[{self.power_name}] Generating order diary entry for {game.current_short_phase}...") + logger.info( + f"[{self.power_name}] Generating order diary entry for {game.current_short_phase}..." + ) - # Load the prompt template - prompt_template = load_prompt(get_prompt_path("order_diary_prompt.txt"), prompts_dir=self.prompts_dir) + # Load the template but we'll use it carefully with string interpolation + prompt_template = _load_prompt_file("order_diary_prompt.txt") if not prompt_template: - logger.error(f"[{self.power_name}] Could not load {get_prompt_path('order_diary_prompt.txt')}. Skipping diary entry.") + logger.error( + f"[{self.power_name}] Could not load order_diary_prompt.txt. Skipping diary entry." + ) return board_state_dict = game.get_state() board_state_str = f"Units: {board_state_dict.get('units', {})}, Centers: {board_state_dict.get('centers', {})}" - orders_list_str = "\n".join([f"- {o}" for o in orders]) if orders else "No orders submitted." + orders_list_str = ( + "\n".join([f"- {o}" for o in orders]) if orders else "No orders submitted." + ) goals_str = "\n".join([f"- {g}" for g in self.goals]) if self.goals else "None" - relationships_str = "\n".join([f"- {p}: {s}" for p, s in self.relationships.items()]) if self.relationships else "None" + relationships_str = ( + "\n".join([f"- {p}: {s}" for p, s in self.relationships.items()]) + if self.relationships + else "None" + ) # Do aggressive preprocessing on the template file # Fix any whitespace or formatting issues that could break .format() for pattern in ["order_summary"]: - prompt_template = re.sub(rf'\n\s*"{pattern}"', f'"{pattern}"', prompt_template) + prompt_template = re.sub( + rf'\n\s*"{pattern}"', f'"{pattern}"', prompt_template + ) # Escape all curly braces in JSON examples to prevent format() from interpreting them # First, temporarily replace the actual template variables - temp_vars = ["power_name", "current_phase", "orders_list_str", "board_state_str", "agent_goals", "agent_relationships"] + temp_vars = [ + "power_name", + "current_phase", + "orders_list_str", + "board_state_str", + "agent_goals", + "agent_relationships", + ] for var in temp_vars: prompt_template = prompt_template.replace(f"{{{var}}}", f"<<{var}>>") @@ -690,9 +901,13 @@ class DiplomacyAgent: # Try to use the template with proper formatting try: prompt = prompt_template.format(**format_vars) - logger.info(f"[{self.power_name}] Successfully formatted order diary prompt template.") + logger.info( + f"[{self.power_name}] Successfully formatted order diary prompt template." + ) except KeyError as e: - logger.error(f"[{self.power_name}] Error formatting order diary template: {e}. Skipping diary entry.") + logger.error( + f"[{self.power_name}] Error formatting order diary template: {e}. Skipping diary entry." + ) return # Exit early if prompt formatting fails logger.debug(f"[{self.power_name}] Order diary prompt:\n{prompt[:300]}...") @@ -702,7 +917,8 @@ class DiplomacyAgent: try: raw_response = await run_llm_and_log( client=self.client, - prompt=prompt, + prompt=prompt, + log_file_path=log_file_path, power_name=self.power_name, phase=game.current_short_phase, response_type="order_diary", @@ -714,32 +930,37 @@ class DiplomacyAgent: if raw_response: try: - # Conditionally format the response based on USE_UNFORMATTED_PROMPTS - if config.USE_UNFORMATTED_PROMPTS: - # Format the natural language response into JSON - formatted_response = await format_with_gemini_flash( - raw_response, FORMAT_ORDER_DIARY, power_name=self.power_name, phase=game.current_short_phase, log_file_path=log_file_path - ) - else: - # Use the raw response directly (already formatted) - formatted_response = raw_response - response_data = self._extract_json_from_text(formatted_response) + response_data = self._extract_json_from_text(raw_response) if response_data: # Directly attempt to get 'order_summary' as per the prompt diary_text_candidate = response_data.get("order_summary") - if isinstance(diary_text_candidate, str) and diary_text_candidate.strip(): + if ( + isinstance(diary_text_candidate, str) + and diary_text_candidate.strip() + ): actual_diary_text = diary_text_candidate success_status = "TRUE" - logger.info(f"[{self.power_name}] Successfully extracted 'order_summary' for order diary entry.") + logger.info( + f"[{self.power_name}] Successfully extracted 'order_summary' for order diary entry." + ) else: - logger.warning(f"[{self.power_name}] 'order_summary' missing, invalid, or empty. Value was: {diary_text_candidate}") - success_status = "FALSE" # Explicitly set false if not found or invalid + logger.warning( + f"[{self.power_name}] 'order_summary' missing, invalid, or empty. Value was: {diary_text_candidate}" + ) + success_status = ( + "FALSE" # Explicitly set false if not found or invalid + ) else: # response_data is None (JSON parsing failed) - logger.warning(f"[{self.power_name}] Failed to parse JSON from order diary LLM response.") + logger.warning( + f"[{self.power_name}] Failed to parse JSON from order diary LLM response." + ) success_status = "FALSE" except Exception as e: - logger.error(f"[{self.power_name}] Error processing order diary JSON: {e}. Raw response: {raw_response[:200]} ", exc_info=False) + logger.error( + f"[{self.power_name}] Error processing order diary JSON: {e}. Raw response: {raw_response[:200]} ", + exc_info=False, + ) success_status = "FALSE" log_llm_response( @@ -755,23 +976,35 @@ class DiplomacyAgent: if success_status == "TRUE" and actual_diary_text: self.add_diary_entry(actual_diary_text, game.current_short_phase) - logger.info(f"[{self.power_name}] Order diary entry generated and added.") - else: - fallback_diary = ( - f"Submitted orders for {game.current_short_phase}: {', '.join(orders)}. (LLM failed to generate a specific diary entry)" + logger.info( + f"[{self.power_name}] Order diary entry generated and added." ) + else: + fallback_diary = f"Submitted orders for {game.current_short_phase}: {', '.join(orders)}. (LLM failed to generate a specific diary entry)" self.add_diary_entry(fallback_diary, game.current_short_phase) - logger.warning(f"[{self.power_name}] Failed to generate specific order diary entry. Added fallback.") + logger.warning( + f"[{self.power_name}] Failed to generate specific order diary entry. Added fallback." + ) except Exception as e: # Ensure prompt is defined or handled if it might not be (it should be in this flow) - current_prompt = prompt if "prompt" in locals() else "[prompt_unavailable_in_exception]" - current_raw_response = raw_response if "raw_response" in locals() and raw_response is not None else f"Error: {e}" + current_prompt = ( + prompt if "prompt" in locals() else "[prompt_unavailable_in_exception]" + ) + current_raw_response = ( + raw_response + if "raw_response" in locals() and raw_response is not None + else f"Error: {e}" + ) log_llm_response( log_file_path=log_file_path, - model_name=self.client.model_name if hasattr(self, "client") else "UnknownModel", + model_name=self.client.model_name + if hasattr(self, "client") + else "UnknownModel", power_name=self.power_name, - phase=game.current_short_phase if "game" in locals() and hasattr(game, "current_short_phase") else "order_phase", + phase=game.current_short_phase + if "game" in locals() and hasattr(game, "current_short_phase") + else "order_phase", response_type="order_diary_exception", raw_input_prompt=current_prompt, # ENSURED (using current_prompt for safety) raw_response=current_raw_response, @@ -779,22 +1012,33 @@ class DiplomacyAgent: ) fallback_diary = f"Submitted orders for {game.current_short_phase}: {', '.join(orders)}. (Critical error in diary generation process)" self.add_diary_entry(fallback_diary, game.current_short_phase) - logger.warning(f"[{self.power_name}] Added fallback order diary entry due to critical error.") + logger.warning( + f"[{self.power_name}] Added fallback order diary entry due to critical error." + ) # Rest of the code remains the same async def generate_phase_result_diary_entry( - self, game: "Game", game_history: "GameHistory", phase_summary: str, all_orders: Dict[str, List[str]], log_file_path: str + self, + game: "Game", + game_history: "GameHistory", + phase_summary: str, + all_orders: Dict[str, List[str]], + log_file_path: str, ): """ Generates a diary entry analyzing the actual phase results, comparing them to negotiations and identifying betrayals/collaborations. """ - logger.info(f"[{self.power_name}] Generating phase result diary entry for {game.current_short_phase}...") + logger.info( + f"[{self.power_name}] Generating phase result diary entry for {game.current_short_phase}..." + ) # Load the template - prompt_template = load_prompt("phase_result_diary_prompt.txt", prompts_dir=self.prompts_dir) + prompt_template = _load_prompt_file("phase_result_diary_prompt.txt") if not prompt_template: - logger.error(f"[{self.power_name}] Could not load phase_result_diary_prompt.txt. Skipping diary entry.") + logger.error( + f"[{self.power_name}] Could not load phase_result_diary_prompt.txt. Skipping diary entry." + ) return # Format all orders for the prompt @@ -808,7 +1052,9 @@ class DiplomacyAgent: your_orders_str = ", ".join(your_orders) if your_orders else "No orders" # Get recent negotiations for this phase - messages_this_phase = game_history.get_messages_by_phase(game.current_short_phase) + messages_this_phase = game_history.get_messages_by_phase( + game.current_short_phase + ) your_negotiations = "" for msg in messages_this_phase: if msg.sender == self.power_name: @@ -820,7 +1066,9 @@ class DiplomacyAgent: your_negotiations = "No negotiations this phase" # Format relationships - relationships_str = "\n".join([f"{p}: {r}" for p, r in self.relationships.items()]) + relationships_str = "\n".join( + [f"{p}: {r}" for p, r in self.relationships.items()] + ) # Format goals goals_str = "\n".join([f"- {g}" for g in self.goals]) if self.goals else "None" @@ -837,7 +1085,9 @@ class DiplomacyAgent: your_actual_orders=your_orders_str, ) - logger.debug(f"[{self.power_name}] Phase result diary prompt:\n{prompt[:500]}...") + logger.debug( + f"[{self.power_name}] Phase result diary prompt:\n{prompt[:500]}..." + ) raw_response = "" success_status = "FALSE" @@ -846,6 +1096,7 @@ class DiplomacyAgent: raw_response = await run_llm_and_log( client=self.client, prompt=prompt, + log_file_path=log_file_path, power_name=self.power_name, phase=game.current_short_phase, response_type="phase_result_diary", @@ -856,17 +1107,22 @@ class DiplomacyAgent: diary_entry = raw_response.strip() self.add_diary_entry(diary_entry, game.current_short_phase) success_status = "TRUE" - logger.info(f"[{self.power_name}] Phase result diary entry generated and added.") - else: - fallback_diary = ( - f"Phase {game.current_short_phase} completed. Orders executed as: {your_orders_str}. (Failed to generate detailed analysis)" + logger.info( + f"[{self.power_name}] Phase result diary entry generated and added." ) + else: + fallback_diary = f"Phase {game.current_short_phase} completed. Orders executed as: {your_orders_str}. (Failed to generate detailed analysis)" self.add_diary_entry(fallback_diary, game.current_short_phase) - logger.warning(f"[{self.power_name}] Empty response from LLM. Added fallback phase result diary.") + logger.warning( + f"[{self.power_name}] Empty response from LLM. Added fallback phase result diary." + ) success_status = "FALSE" except Exception as e: - logger.error(f"[{self.power_name}] Error generating phase result diary: {e}", exc_info=True) + logger.error( + f"[{self.power_name}] Error generating phase result diary: {e}", + exc_info=True, + ) fallback_diary = f"Phase {game.current_short_phase} completed. Unable to analyze results due to error." self.add_diary_entry(fallback_diary, game.current_short_phase) success_status = f"FALSE: {type(e).__name__}" @@ -883,40 +1139,60 @@ class DiplomacyAgent: ) def log_state(self, prefix=""): - logger.debug(f"[{self.power_name}] {prefix} State: Goals={self.goals}, Relationships={self.relationships}") + logger.debug( + f"[{self.power_name}] {prefix} State: Goals={self.goals}, Relationships={self.relationships}" + ) # Make this method async async def analyze_phase_and_update_state( - self, game: "Game", board_state: dict, phase_summary: str, game_history: "GameHistory", log_file_path: str + self, + game: "Game", + board_state: dict, + phase_summary: str, + game_history: "GameHistory", + log_file_path: str, ): """Analyzes the outcome of the last phase and updates goals/relationships using the LLM.""" # Use self.power_name internally power_name = self.power_name current_phase = game.get_current_phase() # Get phase for logging - logger.info(f"[{power_name}] Analyzing phase {current_phase} outcome to update state...") + logger.info( + f"[{power_name}] Analyzing phase {current_phase} outcome to update state..." + ) self.log_state(f"Before State Update ({current_phase})") try: - # 1. Construct the prompt using the unformatted state update prompt file - prompt_template = load_prompt(get_prompt_path("state_update_prompt.txt"), prompts_dir=self.prompts_dir) + # 1. Construct the prompt using the dedicated state update prompt file + prompt_template = _load_prompt_file("state_update_prompt.txt") if not prompt_template: - logger.error(f"[{power_name}] Could not load {get_prompt_path('state_update_prompt.txt')}. Skipping state update.") + logger.error( + f"[{power_name}] Could not load state_update_prompt.txt. Skipping state update." + ) return # Get previous phase safely from history if not game_history or not game_history.phases: - logger.warning(f"[{power_name}] No game history available to analyze for {game.current_short_phase}. Skipping state update.") + logger.warning( + f"[{power_name}] No game history available to analyze for {game.current_short_phase}. Skipping state update." + ) return last_phase = game_history.phases[-1] - last_phase_name = last_phase.name # Assuming phase object has a 'name' attribute + last_phase_name = ( + last_phase.name + ) # Assuming phase object has a 'name' attribute # Use the provided phase_summary parameter instead of retrieving it last_phase_summary = phase_summary if not last_phase_summary: - logger.warning(f"[{power_name}] No summary available for previous phase {last_phase_name}. Skipping state update.") + logger.warning( + f"[{power_name}] No summary available for previous phase {last_phase_name}. Skipping state update." + ) return + # == Fix: Use board_state parameter == + possible_orders = game.get_all_possible_orders() + # Get formatted diary for context formatted_diary = self.format_private_diary_for_prompt() @@ -924,29 +1200,42 @@ class DiplomacyAgent: game=game, board_state=board_state, # Use provided board_state parameter power_name=power_name, - possible_orders=None, # don't include possible orders in the state update prompt + possible_orders=possible_orders, # Pass possible_orders game_history=game_history, # Pass game_history - agent_goals=[], # pass empty goals to force model to regenerate goals each phase + agent_goals=self.goals, agent_relationships=self.relationships, agent_private_diary=formatted_diary, # Pass formatted diary - prompts_dir=self.prompts_dir, - include_messages=True, - display_phase=last_phase_name ) # Add previous phase summary to the information provided to the LLM other_powers = [p for p in game.powers if p != power_name] + # Create a readable board state string from the board_state dict + board_state_str = f"Board State:\n" + for p_name, power_data in board_state.get("powers", {}).items(): + # Get units and centers from the board state + units = power_data.get("units", []) + centers = power_data.get("centers", []) + board_state_str += f" {p_name}: Units={units}, Centers={centers}\n" + # Extract year from the phase name (e.g., "S1901M" -> "1901") - current_year = last_phase_name[1:5] if len(last_phase_name) >= 5 else "unknown" + current_year = ( + last_phase_name[1:5] if len(last_phase_name) >= 5 else "unknown" + ) prompt = prompt_template.format( power_name=power_name, current_year=current_year, current_phase=last_phase_name, # Analyze the phase that just ended - board_state_str=context, + board_state_str=board_state_str, phase_summary=last_phase_summary, # Use provided phase_summary other_powers=str(other_powers), # Pass as string representation + current_goals="\n".join([f"- {g}" for g in self.goals]) + if self.goals + else "None", + current_relationships=str(self.relationships) + if self.relationships + else "None", ) logger.debug(f"[{power_name}] State update prompt:\n{prompt}") @@ -955,62 +1244,74 @@ class DiplomacyAgent: response = await run_llm_and_log( client=self.client, prompt=prompt, + log_file_path=log_file_path, power_name=power_name, phase=current_phase, response_type="state_update", ) - logger.debug(f"[{power_name}] Raw LLM response for state update: {response}") + logger.debug( + f"[{power_name}] Raw LLM response for state update: {response}" + ) log_entry_response_type = "state_update" # Default for log_llm_response log_entry_success = "FALSE" # Default update_data = None # Initialize - if response is not None and response.strip(): # Check if response is not None and not just whitespace + if ( + response is not None and response.strip() + ): # Check if response is not None and not just whitespace try: - # Conditionally format the response based on USE_UNFORMATTED_PROMPTS - if config.USE_UNFORMATTED_PROMPTS: - # Format the natural language response into JSON - formatted_response = await format_with_gemini_flash( - response, FORMAT_STATE_UPDATE, power_name=power_name, phase=current_phase, log_file_path=log_file_path - ) - else: - # Use the raw response directly (already formatted) - formatted_response = response - update_data = self._extract_json_from_text(formatted_response) - logger.debug(f"[{power_name}] Successfully parsed JSON: {update_data}") + update_data = self._extract_json_from_text(response) + logger.debug( + f"[{power_name}] Successfully parsed JSON: {update_data}" + ) # Ensure update_data is a dictionary if not isinstance(update_data, dict): - logger.warning(f"[{power_name}] Extracted data is not a dictionary, type: {type(update_data)}") + logger.warning( + f"[{power_name}] Extracted data is not a dictionary, type: {type(update_data)}" + ) update_data = {} # Check if essential data ('updated_goals' or 'goals') is present AND is a list (for goals) # For relationships, check for 'updated_relationships' or 'relationships' AND is a dict. # Consider it TRUE if at least one of the primary data structures (goals or relationships) is present and correctly typed. - goals_present_and_valid = isinstance(update_data.get("updated_goals"), list) or isinstance(update_data.get("goals"), list) - rels_present_and_valid = isinstance(update_data.get("updated_relationships"), dict) or isinstance( - update_data.get("relationships"), dict - ) + goals_present_and_valid = isinstance( + update_data.get("updated_goals"), list + ) or isinstance(update_data.get("goals"), list) + rels_present_and_valid = isinstance( + update_data.get("updated_relationships"), dict + ) or isinstance(update_data.get("relationships"), dict) - if update_data and (goals_present_and_valid or rels_present_and_valid): + if update_data and ( + goals_present_and_valid or rels_present_and_valid + ): log_entry_success = "TRUE" elif update_data: # Parsed, but maybe not all essential data there or not correctly typed log_entry_success = "PARTIAL" log_entry_response_type = "state_update_partial_data" else: # Parsed to None or empty dict/list, or data not in expected format log_entry_success = "FALSE" - log_entry_response_type = "state_update_parsing_empty_or_invalid_data" + log_entry_response_type = ( + "state_update_parsing_empty_or_invalid_data" + ) except json.JSONDecodeError as e: - logger.error(f"[{power_name}] Failed to parse JSON response for state update: {e}. Raw response: {response}") + logger.error( + f"[{power_name}] Failed to parse JSON response for state update: {e}. Raw response: {response}" + ) log_entry_response_type = "state_update_json_error" # log_entry_success remains "FALSE" except Exception as e: - logger.error(f"[{power_name}] Unexpected error parsing state update: {e}") + logger.error( + f"[{power_name}] Unexpected error parsing state update: {e}" + ) log_entry_response_type = "state_update_unexpected_error" update_data = {} # log_entry_success remains "FALSE" else: # response was None or empty/whitespace - logger.error(f"[{power_name}] No valid response (None or empty) received from LLM for state update.") + logger.error( + f"[{power_name}] No valid response (None or empty) received from LLM for state update." + ) log_entry_response_type = "state_update_no_response" # log_entry_success remains "FALSE" @@ -1022,7 +1323,9 @@ class DiplomacyAgent: phase=current_phase, response_type=log_entry_response_type, raw_input_prompt=prompt, # ENSURED - raw_response=response if response is not None else "", # Handle if response is None + raw_response=response + if response is not None + else "", # Handle if response is None success=log_entry_success, ) @@ -1040,7 +1343,9 @@ class DiplomacyAgent: "updated_goals": self.goals, "updated_relationships": self.relationships, } - logger.warning(f"[{power_name}] Using existing goals and relationships as fallback: {update_data}") + logger.warning( + f"[{power_name}] Using existing goals and relationships as fallback: {update_data}" + ) # Check for both possible key names (prompt uses "goals"/"relationships", # but code was expecting "updated_goals"/"updated_relationships") @@ -1048,20 +1353,28 @@ class DiplomacyAgent: if updated_goals is None: updated_goals = update_data.get("goals") if updated_goals is not None: - logger.debug(f"[{power_name}] Using 'goals' key instead of 'updated_goals'") + logger.debug( + f"[{power_name}] Using 'goals' key instead of 'updated_goals'" + ) updated_relationships = update_data.get("updated_relationships") if updated_relationships is None: updated_relationships = update_data.get("relationships") if updated_relationships is not None: - logger.debug(f"[{power_name}] Using 'relationships' key instead of 'updated_relationships'") + logger.debug( + f"[{power_name}] Using 'relationships' key instead of 'updated_relationships'" + ) if isinstance(updated_goals, list): # Simple overwrite for now, could be more sophisticated (e.g., merging) self.goals = updated_goals - self.add_journal_entry(f"[{game.current_short_phase}] Goals updated based on {last_phase_name}: {self.goals}") + self.add_journal_entry( + f"[{game.current_short_phase}] Goals updated based on {last_phase_name}: {self.goals}" + ) else: - logger.warning(f"[{power_name}] LLM did not provide valid 'updated_goals' list in state update.") + logger.warning( + f"[{power_name}] LLM did not provide valid 'updated_goals' list in state update." + ) # Keep current goals, no update needed if isinstance(updated_relationships, dict): @@ -1074,21 +1387,31 @@ class DiplomacyAgent: p_upper = p.upper() if p_upper in ALL_POWERS and p_upper != power_name: # Check against allowed labels (case-insensitive) - r_title = r.title() if isinstance(r, str) else r # Convert "enemy" to "Enemy" etc. + r_title = ( + r.title() if isinstance(r, str) else r + ) # Convert "enemy" to "Enemy" etc. if r_title in ALLOWED_RELATIONSHIPS: valid_new_relationships[p_upper] = r_title else: invalid_count += 1 if invalid_count <= 2: # Only log first few to reduce noise - logger.warning(f"[{power_name}] Received invalid relationship label '{r}' for '{p}'. Ignoring.") + logger.warning( + f"[{power_name}] Received invalid relationship label '{r}' for '{p}'. Ignoring." + ) else: invalid_count += 1 - if invalid_count <= 2 and not p_upper.startswith(power_name): # Only log first few to reduce noise - logger.warning(f"[{power_name}] Received relationship for invalid/own power '{p}' (normalized: {p_upper}). Ignoring.") + if invalid_count <= 2 and not p_upper.startswith( + power_name + ): # Only log first few to reduce noise + logger.warning( + f"[{power_name}] Received relationship for invalid/own power '{p}' (normalized: {p_upper}). Ignoring." + ) # Summarize if there were many invalid entries if invalid_count > 2: - logger.warning(f"[{power_name}] {invalid_count} total invalid relationships were ignored.") + logger.warning( + f"[{power_name}] {invalid_count} total invalid relationships were ignored." + ) # Update relationships if the dictionary is not empty after validation if valid_new_relationships: @@ -1097,16 +1420,25 @@ class DiplomacyAgent: f"[{game.current_short_phase}] Relationships updated based on {last_phase_name}: {valid_new_relationships}" ) elif updated_relationships: # Log if the original dict wasn't empty but validation removed everything - logger.warning(f"[{power_name}] Found relationships in LLM response but none were valid after normalization. Using defaults.") + logger.warning( + f"[{power_name}] Found relationships in LLM response but none were valid after normalization. Using defaults." + ) else: # Log if the original dict was empty - logger.warning(f"[{power_name}] LLM did not provide valid 'updated_relationships' dict in state update.") + logger.warning( + f"[{power_name}] LLM did not provide valid 'updated_relationships' dict in state update." + ) # Keep current relationships, no update needed except FileNotFoundError: - logger.error(f"[{power_name}] state_update_prompt.txt not found. Skipping state update.") + logger.error( + f"[{power_name}] state_update_prompt.txt not found. Skipping state update." + ) except Exception as e: # Catch any other unexpected errors during the update process - logger.error(f"[{power_name}] Error during state analysis/update for phase {game.current_short_phase}: {e}", exc_info=True) + logger.error( + f"[{power_name}] Error during state analysis/update for phase {game.current_short_phase}: {e}", + exc_info=True, + ) self.log_state(f"After State Update ({game.current_short_phase})") @@ -1120,10 +1452,16 @@ class DiplomacyAgent: """Updates the agent's perceived relationship with another power.""" if other_power != self.power_name: self.relationships[other_power] = status - self.add_journal_entry(f"Relationship with {other_power} updated to {status}.") - logger.info(f"[{self.power_name}] Relationship with {other_power} set to {status}.") + self.add_journal_entry( + f"Relationship with {other_power} updated to {status}." + ) + logger.info( + f"[{self.power_name}] Relationship with {other_power} set to {status}." + ) else: - logger.warning(f"[{self.power_name}] Attempted to set relationship with self.") + logger.warning( + f"[{self.power_name}] Attempted to set relationship with self." + ) def get_agent_state_summary(self) -> str: """Returns a string summary of the agent's current state.""" @@ -1136,15 +1474,24 @@ class DiplomacyAgent: # summary += f"\n Last Journal Entry: {self.private_journal[-1]}" return summary - def generate_plan(self, game: Game, board_state: dict, game_history: "GameHistory") -> str: + def generate_plan( + self, game: "Game", board_state: dict, game_history: "GameHistory" + ) -> str: """Generates a strategic plan using the client and logs it.""" logger.info(f"Agent {self.power_name} generating strategic plan...") try: - plan = self.client.get_plan(game, board_state, self.power_name, game_history) - self.add_journal_entry(f"Generated plan for phase {game.current_phase}:\n{plan}") + plan = self.client.get_plan( + game, board_state, self.power_name, game_history + ) + self.add_journal_entry( + f"Generated plan for phase {game.current_phase}:\n{plan}" + ) logger.info(f"Agent {self.power_name} successfully generated plan.") return plan except Exception as e: logger.error(f"Agent {self.power_name} failed to generate plan: {e}") - self.add_journal_entry(f"Failed to generate plan for phase {game.current_phase} due to error: {e}") + self.add_journal_entry( + f"Failed to generate plan for phase {game.current_phase} due to error: {e}" + ) return "Error: Failed to generate plan." + diff --git a/bot_client/config.py b/bot_client/config.py new file mode 100644 index 0000000..10e1cc6 --- /dev/null +++ b/bot_client/config.py @@ -0,0 +1,5 @@ +from pydantic_settings import BaseSettings + + +class Configuration(BaseSettings): + DEBUG: bool = False diff --git a/bot_client/lm_game_websocket.py b/bot_client/lm_game_websocket.py index a0f2fcd..2f12bb1 100644 --- a/bot_client/lm_game_websocket.py +++ b/bot_client/lm_game_websocket.py @@ -14,7 +14,6 @@ import os import json import asyncio from collections import defaultdict -import concurrent.futures # Suppress Gemini/PaLM gRPC warnings os.environ["GRPC_PYTHON_LOG_LEVEL"] = "40" @@ -23,9 +22,15 @@ os.environ["ABSL_MIN_LOG_LEVEL"] = "2" os.environ["GRPC_POLL_STRATEGY"] = "poll" # Import our WebSocket client instead of direct Game import -from websocket_diplomacy_client import WebSocketDiplomacyClient, connect_to_diplomacy_server -from diplomacy.engine.message import GLOBAL, Message -from diplomacy.utils.export import to_saved_game_format +from websocket_diplomacy_client import ( + WebSocketDiplomacyClient, + connect_to_diplomacy_server, +) + +import sys +import os + +sys.path.append(os.path.join(os.path.dirname(__file__), "..")) from ai_diplomacy.clients import load_model_client from ai_diplomacy.utils import ( @@ -37,17 +42,11 @@ from ai_diplomacy.negotiations import conduct_negotiations from ai_diplomacy.planning import planning_phase from ai_diplomacy.game_history import GameHistory from ai_diplomacy.agent import DiplomacyAgent -import ai_diplomacy.narrative from ai_diplomacy.initialization import initialize_agent_state_ext +from loguru import logger dotenv.load_dotenv() -logger = logging.getLogger(__name__) -logging.basicConfig( - level=logging.INFO, - format="%(asctime)s [%(levelname)s] %(name)s - %(message)s", - datefmt="%H:%M:%S", -) # Silence noisy dependencies logging.getLogger("httpx").setLevel(logging.WARNING) logging.getLogger("root").setLevel(logging.WARNING) @@ -127,69 +126,77 @@ def parse_arguments(): return parser.parse_args() -async def join_powers_for_testing(client: WebSocketDiplomacyClient, power_model_map: dict): +async def join_powers_for_testing( + client: WebSocketDiplomacyClient, power_model_map: dict +): """ Join multiple powers in the same game for testing purposes. This simulates having multiple AI players in one game. """ power_names = list(power_model_map.keys()) - + # Join additional powers beyond the first one for power_name in power_names[1:]: try: logger.info(f"Attempting to join power {power_name}") await client.channel.join_game( - game_id=client.game_id, - power_name=power_name + game_id=client.game_id, power_name=power_name ) logger.info(f"Successfully joined {power_name}") except Exception as e: logger.warning(f"Could not join {power_name}: {e}") -async def create_or_join_game(client: WebSocketDiplomacyClient, args, power_model_map: dict): +async def create_or_join_game( + client: WebSocketDiplomacyClient, args, power_model_map: dict +): """ Create a new game or join an existing one based on arguments. """ if args.game_id: # Join existing game logger.info(f"Joining existing game {args.game_id}") - + # List available games first to see what's available try: games = await client.list_games() - logger.info(f"Available games: {[g.get('game_id', 'unknown') for g in games]}") + logger.info( + f"Available games: {[g.get('game_id', 'unknown') for g in games]}" + ) except Exception as e: logger.warning(f"Could not list games: {e}") - + # For testing, we'll join as the first power in our model map first_power = list(power_model_map.keys())[0] - game = await client.join_game( - game_id=args.game_id, - power_name=first_power - ) - + game = await client.join_game(game_id=args.game_id, power_name=first_power) + if args.create_multi_power_game: await join_powers_for_testing(client, power_model_map) - + else: # Create new game logger.info("Creating new game") - + # Get the first power to control - first_power = list(power_model_map.keys())[0] if not args.create_multi_power_game else None - + first_power = ( + list(power_model_map.keys())[0] + if not args.create_multi_power_game + else None + ) + game = await client.create_game( map_name="standard", rules=["NO_PRESS", "IGNORE_ERRORS", "POWER_CHOICE"], power_name=first_power, - n_controls=7 if not args.create_multi_power_game else 1, # Lower requirement for testing - deadline=None # No time pressure for AI testing + n_controls=7 + if not args.create_multi_power_game + else 1, # Lower requirement for testing + deadline=None, # No time pressure for AI testing ) - + if args.create_multi_power_game: await join_powers_for_testing(client, power_model_map) - + return game @@ -223,26 +230,36 @@ async def main(): # Setup general file logging general_log_file_path = os.path.join(result_folder, "general_game.log") - file_handler = logging.FileHandler(general_log_file_path, mode='a') + file_handler = logging.FileHandler(general_log_file_path, mode="a") file_formatter = logging.Formatter( - "%(asctime)s - %(levelname)s - %(name)s - [%(funcName)s:%(lineno)d] - %(message)s", - datefmt="%Y-%m-%d %H:%M:%S" + "%(asctime)s - %(levelname)s - %(name)s - [%(funcName)s:%(lineno)d] - %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", ) file_handler.setFormatter(file_formatter) file_handler.setLevel(logging.INFO) logging.getLogger().addHandler(file_handler) - + logging.info(f"General game logs will be appended to: {general_log_file_path}") # File paths manifesto_path = f"{result_folder}/game_manifesto.txt" - game_file_path = args.output if args.output else f"{result_folder}/lmvsgame_websocket.json" + game_file_path = ( + args.output if args.output else f"{result_folder}/lmvsgame_websocket.json" + ) overview_file_path = f"{result_folder}/overview.jsonl" llm_log_file_path = f"{result_folder}/llm_responses.csv" # Handle power model mapping if args.models: - powers_order = ["AUSTRIA", "ENGLAND", "FRANCE", "GERMANY", "ITALY", "RUSSIA", "TURKEY"] + powers_order = [ + "AUSTRIA", + "ENGLAND", + "FRANCE", + "GERMANY", + "ITALY", + "RUSSIA", + "TURKEY", + ] provided_models = [name.strip() for name in args.models.split(",")] if len(provided_models) != len(powers_order): logger.error( @@ -260,16 +277,16 @@ async def main(): hostname=args.hostname, port=args.port, username=args.username, - password=args.password + password=args.password, ) - + # Create or join game game = await create_or_join_game(client, args, power_model_map) logger.info(f"Game ID: {client.game_id}, Role: {client.game_role}") - + # Initialize game history game_history = GameHistory() - + # Add phase_summaries attribute if not present if not hasattr(client.game, "phase_summaries"): client.game.phase_summaries = {} @@ -278,7 +295,7 @@ async def main(): agents = {} initialization_tasks = [] logger.info("Initializing Diplomacy Agents for controlled powers...") - + # Determine which powers we're controlling controlled_powers = [] if client.power_name: @@ -286,7 +303,7 @@ async def main(): elif args.create_multi_power_game: # We're controlling multiple powers in test mode controlled_powers = list(power_model_map.keys()) - + for power_name in controlled_powers: model_id = power_model_map.get(power_name) if model_id and not client.get_power(power_name).is_eliminated(): @@ -294,28 +311,46 @@ async def main(): client_obj = load_model_client(model_id) agent = DiplomacyAgent(power_name=power_name, client=client_obj) agents[power_name] = agent - logger.info(f"Preparing initialization task for {power_name} with model {model_id}") + logger.info( + f"Preparing initialization task for {power_name} with model {model_id}" + ) initialization_tasks.append( - initialize_agent_state_ext(agent, client.game, game_history, llm_log_file_path) + initialize_agent_state_ext( + agent, client.game, game_history, llm_log_file_path + ) ) except Exception as e: - logger.error(f"Failed to create agent for {power_name} with model {model_id}: {e}", exc_info=True) + logger.error( + f"Failed to create agent for {power_name} with model {model_id}: {e}", + exc_info=True, + ) else: - logger.info(f"Skipping agent initialization for {power_name} (no model or eliminated)") + logger.info( + f"Skipping agent initialization for {power_name} (no model or eliminated)" + ) # Run initializations concurrently if initialization_tasks: - logger.info(f"Running {len(initialization_tasks)} agent initializations concurrently...") - initialization_results = await asyncio.gather(*initialization_tasks, return_exceptions=True) - + logger.info( + f"Running {len(initialization_tasks)} agent initializations concurrently..." + ) + initialization_results = await asyncio.gather( + *initialization_tasks, return_exceptions=True + ) + initialized_powers = list(agents.keys()) for i, result in enumerate(initialization_results): if i < len(initialized_powers): power_name = initialized_powers[i] if isinstance(result, Exception): - logger.error(f"Failed to initialize agent state for {power_name}: {result}", exc_info=result) + logger.error( + f"Failed to initialize agent state for {power_name}: {result}", + exc_info=result, + ) else: - logger.info(f"Successfully initialized agent state for {power_name}.") + logger.info( + f"Successfully initialized agent state for {power_name}." + ) # Main game loop all_phase_relationships = {} @@ -324,15 +359,17 @@ async def main(): while not client.is_game_done: phase_start = time.time() current_phase = client.get_current_phase() - + # Synchronize with server to get latest state await client.synchronize() # Ensure the current phase is registered in the history game_history.add_phase(current_phase) current_short_phase = client.get_current_short_phase() - - logger.info(f"PHASE: {current_phase} (time so far: {phase_start - start_whole:.2f}s)") + + logger.info( + f"PHASE: {current_phase} (time so far: {phase_start - start_whole:.2f}s)" + ) # Prevent unbounded simulation based on year year_str = current_phase[1:5] @@ -344,7 +381,9 @@ async def main(): # Negotiations for movement phases if client.get_current_short_phase().endswith("M"): if args.num_negotiation_rounds > 0: - logger.info(f"Running {args.num_negotiation_rounds} rounds of negotiations...") + logger.info( + f"Running {args.num_negotiation_rounds} rounds of negotiations..." + ) game_history = await conduct_negotiations( client.game, # Pass the NetworkGame object agents, @@ -354,7 +393,9 @@ async def main(): log_file_path=llm_log_file_path, ) else: - logger.info("Skipping negotiation phase as num_negotiation_rounds=0") + logger.info( + "Skipping negotiation phase as num_negotiation_rounds=0" + ) # Planning phase (if enabled) if args.planning_phase: @@ -368,17 +409,19 @@ async def main(): ) # Generate negotiation diary entries - logger.info(f"Generating negotiation diary entries for phase {current_short_phase}...") - active_powers_for_neg_diary = [p for p in agents.keys() if not client.get_power(p).is_eliminated()] - + logger.info( + f"Generating negotiation diary entries for phase {current_short_phase}..." + ) + active_powers_for_neg_diary = [ + p for p in agents.keys() if not client.get_power(p).is_eliminated() + ] + neg_diary_tasks = [] for power_name, agent in agents.items(): if not client.get_power(power_name).is_eliminated(): neg_diary_tasks.append( agent.generate_negotiation_diary_entry( - client.game, - game_history, - llm_log_file_path + client.game, game_history, llm_log_file_path ) ) if neg_diary_tasks: @@ -386,37 +429,51 @@ async def main(): # AI Decision Making: Get orders for each controlled power logger.info("Getting orders from agents...") - active_powers_for_orders = [p for p in agents.keys() if not client.get_power(p).is_eliminated()] - + active_powers_for_orders = [ + p for p in agents.keys() if not client.get_power(p).is_eliminated() + ] + order_tasks = [] order_power_names = [] board_state = client.get_state() for power_name, agent in agents.items(): if client.get_power(power_name).is_eliminated(): - logger.debug(f"Skipping order generation for eliminated power {power_name}.") + logger.debug( + f"Skipping order generation for eliminated power {power_name}." + ) continue # Diagnostic logging - logger.info(f"--- Diagnostic Log for {power_name} in phase {current_phase} ---") + logger.info( + f"--- Diagnostic Log for {power_name} in phase {current_phase} ---" + ) try: orderable_locs = client.get_orderable_locations(power_name) - logger.info(f"[{power_name}][{current_phase}] Orderable locations: {orderable_locs}") + logger.info( + f"[{power_name}][{current_phase}] Orderable locations: {orderable_locs}" + ) actual_units = client.get_units(power_name) - logger.info(f"[{power_name}][{current_phase}] Actual units: {actual_units}") + logger.info( + f"[{power_name}][{current_phase}] Actual units: {actual_units}" + ) except Exception as e_diag: - logger.error(f"[{power_name}][{current_phase}] Error during diagnostic logging: {e_diag}") + logger.error( + f"[{power_name}][{current_phase}] Error during diagnostic logging: {e_diag}" + ) # Calculate possible orders possible_orders = gather_possible_orders(client.game, power_name) if not possible_orders: - logger.debug(f"No orderable locations for {power_name}; submitting empty orders.") + logger.debug( + f"No orderable locations for {power_name}; submitting empty orders." + ) await client.set_orders(power_name, []) continue order_power_names.append(power_name) diary_preview = agent.format_private_diary_for_prompt() - + order_tasks.append( get_valid_orders( client.game, @@ -436,8 +493,12 @@ async def main(): # Run order generation concurrently if order_tasks: - logger.debug(f"Running {len(order_tasks)} order generation tasks concurrently...") - order_results = await asyncio.gather(*order_tasks, return_exceptions=True) + logger.debug( + f"Running {len(order_tasks)} order generation tasks concurrently..." + ) + order_results = await asyncio.gather( + *order_tasks, return_exceptions=True + ) else: order_results = [] @@ -447,37 +508,45 @@ async def main(): agent = agents[p_name] if isinstance(result, Exception): - logger.error(f"Error during get_valid_orders for {p_name}: {result}", exc_info=result) + logger.error( + f"Error during get_valid_orders for {p_name}: {result}", + exc_info=result, + ) await client.set_orders(p_name, []) elif result is None: - logger.warning(f"get_valid_orders returned None for {p_name}. Setting empty orders.") + logger.warning( + f"get_valid_orders returned None for {p_name}. Setting empty orders." + ) await client.set_orders(p_name, []) else: orders = result logger.debug(f"Validated orders for {p_name}: {orders}") if orders: await client.set_orders(p_name, orders) - logger.debug(f"Set orders for {p_name} in {current_short_phase}: {orders}") - + logger.debug( + f"Set orders for {p_name} in {current_short_phase}: {orders}" + ) + # Generate order diary entry try: await agent.generate_order_diary_entry( - client.game, - orders, - llm_log_file_path + client.game, orders, llm_log_file_path ) except Exception as e_diary: - logger.error(f"Error generating order diary for {p_name}: {e_diary}", exc_info=True) + logger.error( + f"Error generating order diary for {p_name}: {e_diary}", + exc_info=True, + ) else: await client.set_orders(p_name, []) # Process the game phase (if we have admin rights) logger.info(f"Processing orders for {current_phase}...") await simulate_game_processing(client) - + # Wait a moment for the server to process await asyncio.sleep(1) - + # Synchronize again to get results await client.synchronize() @@ -494,15 +563,24 @@ async def main(): # Collect relationships for this phase current_relationships_for_phase = {} for power_name, agent in agents.items(): - if power_name in client.powers and not client.get_power(power_name).is_eliminated(): + if ( + power_name in client.powers + and not client.get_power(power_name).is_eliminated() + ): current_relationships_for_phase[power_name] = agent.relationships - all_phase_relationships[current_short_phase] = current_relationships_for_phase + all_phase_relationships[current_short_phase] = ( + current_relationships_for_phase + ) # Generate phase result diary entries - logger.info(f"Generating phase result diary entries for completed phase {current_phase}...") - phase_summary = getattr(client.game, 'phase_summaries', {}).get(current_phase, "(Summary not generated)") + logger.info( + f"Generating phase result diary entries for completed phase {current_phase}..." + ) + phase_summary = getattr(client.game, "phase_summaries", {}).get( + current_phase, "(Summary not generated)" + ) all_orders_this_phase = current_order_history - + phase_result_diary_tasks = [] for power_name, agent in agents.items(): if not client.get_power(power_name).is_eliminated(): @@ -512,20 +590,25 @@ async def main(): game_history, phase_summary, all_orders_this_phase, - llm_log_file_path + llm_log_file_path, ) ) - + if phase_result_diary_tasks: await asyncio.gather(*phase_result_diary_tasks, return_exceptions=True) # State update analysis - logger.info(f"Starting state update analysis for completed phase {current_phase}...") + logger.info( + f"Starting state update analysis for completed phase {current_phase}..." + ) current_board_state = client.get_state() - - active_agent_powers = [(p, power) for p, power in client.powers.items() - if p in agents and not power.is_eliminated()] - + + active_agent_powers = [ + (p, power) + for p, power in client.powers.items() + if p in agents and not power.is_eliminated() + ] + if active_agent_powers: state_update_tasks = [] for power_name, _ in active_agent_powers: @@ -539,7 +622,7 @@ async def main(): llm_log_file_path, ) ) - + if state_update_tasks: await asyncio.gather(*state_update_tasks, return_exceptions=True) @@ -563,29 +646,29 @@ async def main(): # Create a simplified saved game format # Note: The NetworkGame may not have all the same export capabilities as a local Game saved_game = { - 'game_id': client.game_id, - 'map_name': 'standard', - 'rules': ['NO_PRESS', 'IGNORE_ERRORS', 'POWER_CHOICE'], - 'phases': [], - 'powers': {}, - 'messages': {}, - 'phase_summaries': getattr(client.game, 'phase_summaries', {}), - 'final_agent_states': {} + "game_id": client.game_id, + "map_name": "standard", + "rules": ["NO_PRESS", "IGNORE_ERRORS", "POWER_CHOICE"], + "phases": [], + "powers": {}, + "messages": {}, + "phase_summaries": getattr(client.game, "phase_summaries", {}), + "final_agent_states": {}, } # Add final agent states for power_name, agent in agents.items(): - saved_game['final_agent_states'][power_name] = { + saved_game["final_agent_states"][power_name] = { "relationships": agent.relationships, "goals": agent.goals, } # Add power information for power_name, power in client.powers.items(): - saved_game['powers'][power_name] = { - 'centers': list(power.centers), - 'units': list(power.units), - 'is_eliminated': power.is_eliminated() + saved_game["powers"][power_name] = { + "centers": list(power.centers), + "units": list(power.units), + "is_eliminated": power.is_eliminated(), } logger.info(f"Saving game to {output_path}...") @@ -604,10 +687,11 @@ async def main(): logger.error(f"Error during game execution: {e}", exc_info=True) finally: # Clean up connection - if 'client' in locals(): + if "client" in locals(): await client.close() logger.info("Done.") if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file + asyncio.run(main()) + diff --git a/bot_client/multi_bot_launcher.py b/bot_client/multi_bot_launcher.py index 6ba18ac..93b7b20 100644 --- a/bot_client/multi_bot_launcher.py +++ b/bot_client/multi_bot_launcher.py @@ -8,41 +8,46 @@ to an existing game. import argparse import asyncio -import logging +from loguru import logger import subprocess import sys import time -import os from typing import List, Dict, Optional -from websocket_diplomacy_client import connect_to_diplomacy_server +# Add parent directory to path for ai_diplomacy imports (runtime only) +import sys +import os -logger = logging.getLogger(__name__) -logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +from websocket_diplomacy_client import connect_to_diplomacy_server +from diplomacy.engine.game import Game class MultiBotLauncher: """ Launcher for multiple bot players. - + Can either: 1. Create a new game and launch bots for all powers 2. Launch bots to join an existing game """ - - def __init__(self, - hostname: str = "localhost", - port: int = 8432, - base_username: str = "bot", - password: str = "password"): - + + def __init__( + self, + hostname: str = "localhost", + port: int = 8432, + base_username: str = "bot", + password: str = "password", + ): + self.game: Game self.hostname = hostname self.port = port self.base_username = base_username self.password = password self.bot_processes: List[subprocess.Popen] = [] self.game_id: Optional[str] = None - + # Default power to model mapping self.default_models = { "AUSTRIA": "gpt-3.5-turbo", @@ -51,102 +56,111 @@ class MultiBotLauncher: "GERMANY": "gpt-3.5-turbo", "ITALY": "gemini-pro", "RUSSIA": "gpt-4", - "TURKEY": "claude-3-sonnet" + "TURKEY": "claude-3-sonnet", } - + async def create_game(self, creator_power: str = "FRANCE") -> str: """ Create a new game and return the game ID. - + Args: creator_power: Which power should create the game - + Returns: Game ID of the created game """ logger.info("Creating new game...") - + # Connect as the game creator creator_username = f"{self.base_username}_{creator_power.lower()}" client = await connect_to_diplomacy_server( hostname=self.hostname, port=self.port, username=creator_username, - password=self.password + password=self.password, ) - + # Create the game - game = await client.create_game( + self.game = await client.create_game( map_name="standard", rules=["IGNORE_ERRORS", "POWER_CHOICE"], # Allow messages and power choice power_name=creator_power, n_controls=7, # Full 7-player game - deadline=None # No time pressure + deadline=None, # No time pressure ) - + game_id = client.game_id logger.info(f"Created game {game_id}") - + # Leave the game so the bot can join properly await client.game.leave() await client.close() - + assert game_id is not None, "game_id cannot be None, failed to create new game." return game_id - - def launch_bot(self, - power: str, - model: str, - game_id: str, - log_level: str = "INFO") -> subprocess.Popen: + + def launch_bot( + self, power: str, model: str, game_id: str, log_level: str = "INFO" + ) -> subprocess.Popen: """ Launch a single bot process. - + Args: power: Power name (e.g., "FRANCE") model: AI model to use game_id: Game ID to join log_level: Logging level - + Returns: subprocess.Popen object for the bot process """ username = f"{self.base_username}_{power.lower()}" - + cmd = [ - sys.executable, "single_bot_player.py", - "--hostname", self.hostname, - "--port", str(self.port), - "--username", username, - "--password", self.password, - "--power", power, - "--model", model, - "--game-id", game_id, - "--log-level", log_level + sys.executable, + "single_bot_player.py", + "--hostname", + self.hostname, + "--port", + str(self.port), + "--username", + username, + "--password", + self.password, + "--power", + power, + "--model", + model, + "--game-id", + game_id, + "--log-level", + log_level, ] - + logger.info(f"Launching bot for {power} with model {model}") logger.debug(f"Command: {' '.join(cmd)}") - + # Launch bot in a new process process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, - bufsize=1 # Line buffered + bufsize=1, # Line buffered ) - + return process - - async def launch_all_bots(self, - game_id: str, - models: Optional[Dict[str, str]] = None, - powers: Optional[List[str]] = None, - log_level: str = "INFO", - stagger_delay: float = 2.0): + + async def launch_all_bots( + self, + game_id: str, + models: Optional[Dict[str, str]] = None, + powers: Optional[List[str]] = None, + log_level: str = "INFO", + stagger_delay: float = 2.0, + ): """ Launch bots for all specified powers. - + Args: game_id: Game ID to join models: Mapping of power to model name (uses defaults if None) @@ -156,47 +170,49 @@ class MultiBotLauncher: """ if models is None: models = self.default_models.copy() - + if powers is None: powers = list(self.default_models.keys()) - + logger.info(f"Launching bots for {len(powers)} powers...") - + for i, power in enumerate(powers): model = models.get(power, "gpt-3.5-turbo") - + try: process = self.launch_bot(power, model, game_id, log_level) self.bot_processes.append(process) - - logger.info(f"Launched bot {i+1}/{len(powers)}: {power} (PID: {process.pid})") - + + logger.info( + f"Launched bot {i + 1}/{len(powers)}: {power} (PID: {process.pid})" + ) + # Stagger the launches to avoid overwhelming the server if i < len(powers) - 1: # Don't delay after the last bot await asyncio.sleep(stagger_delay) - + except Exception as e: logger.error(f"Failed to launch bot for {power}: {e}") - + logger.info(f"All {len(self.bot_processes)} bots launched successfully") - + def monitor_bots(self, check_interval: float = 10.0): """ Monitor bot processes and log their output. - + Args: check_interval: How often to check bot status (seconds) """ logger.info("Monitoring bot processes...") - + try: while self.bot_processes: active_processes = [] - - for i, process in enumerate(self.bot_processes): + + for _, process in enumerate(self.bot_processes): if process.poll() is None: # Still running active_processes.append(process) - + # Read and log any output (non-blocking) try: while True: @@ -209,55 +225,61 @@ class MultiBotLauncher: else: # Process has ended return_code = process.returncode - logger.info(f"Bot process {process.pid} ended with code {return_code}") - + logger.info( + f"Bot process {process.pid} ended with code {return_code}" + ) + # Read any remaining output try: remaining_output = process.stdout.read() if remaining_output: - print(f"Bot-{process.pid} final output: {remaining_output}") + print( + f"Bot-{process.pid} final output: {remaining_output}" + ) except: pass - + self.bot_processes = active_processes - + if self.bot_processes: logger.debug(f"{len(self.bot_processes)} bots still running") time.sleep(check_interval) else: logger.info("All bots have finished") break - + except KeyboardInterrupt: logger.info("Received interrupt signal, stopping bots...") self.stop_all_bots() - + def stop_all_bots(self): """Stop all bot processes.""" logger.info("Stopping all bot processes...") - + for process in self.bot_processes: if process.poll() is None: # Still running logger.info(f"Terminating bot process {process.pid}") process.terminate() - + # Wait a bit for graceful shutdown try: process.wait(timeout=5) except subprocess.TimeoutExpired: logger.warning(f"Force killing bot process {process.pid}") process.kill() - + self.bot_processes.clear() logger.info("All bots stopped") - - async def run_full_game(self, - models: Optional[Dict[str, str]] = None, - log_level: str = "INFO", - creator_power: str = "FRANCE"): + + async def run_full_game( + self, + models: Optional[Dict[str, str]] = None, + log_level: str = "INFO", + creator_power: str = "FRANCE", + ): """ Create a game and launch all bots for a complete game. - + Args: models: Power to model mapping log_level: Logging level for bots @@ -267,29 +289,31 @@ class MultiBotLauncher: # Create the game game_id = await self.create_game(creator_power) self.game_id = game_id - + # Wait a moment for the server to be ready await asyncio.sleep(2) - + # Launch all bots await self.launch_all_bots(game_id, models, log_level=log_level) - + # Monitor the bots self.monitor_bots() - + except Exception as e: logger.error(f"Error running full game: {e}", exc_info=True) finally: self.stop_all_bots() - - async def join_existing_game(self, - game_id: str, - powers: List[str], - models: Optional[Dict[str, str]] = None, - log_level: str = "INFO"): + + async def join_existing_game( + self, + game_id: str, + powers: List[str], + models: Optional[Dict[str, str]] = None, + log_level: str = "INFO", + ): """ Launch bots to join an existing game. - + Args: game_id: Game ID to join powers: List of powers to launch bots for @@ -298,13 +322,13 @@ class MultiBotLauncher: """ try: self.game_id = game_id - + # Launch bots for specified powers await self.launch_all_bots(game_id, models, powers, log_level) - + # Monitor the bots self.monitor_bots() - + except Exception as e: logger.error(f"Error joining existing game: {e}", exc_info=True) finally: @@ -314,41 +338,51 @@ class MultiBotLauncher: def parse_arguments(): """Parse command line arguments.""" parser = argparse.ArgumentParser(description="Launch multiple bot players") - + parser.add_argument("--hostname", default="localhost", help="Server hostname") parser.add_argument("--port", type=int, default=8432, help="Server port") parser.add_argument("--username-base", default="bot", help="Base username for bots") parser.add_argument("--password", default="password", help="Password for all bots") - parser.add_argument("--game-id", help="Game ID to join (creates new if not specified)") - parser.add_argument("--powers", nargs="+", help="Powers to launch bots for (default: all)") - parser.add_argument("--models", help="Comma-separated list of models in power order") + parser.add_argument( + "--game-id", help="Game ID to join (creates new if not specified)" + ) + parser.add_argument( + "--powers", nargs="+", help="Powers to launch bots for (default: all)" + ) + parser.add_argument( + "--models", help="Comma-separated list of models in power order" + ) parser.add_argument("--log-level", default="INFO", help="Logging level") - parser.add_argument("--creator-power", default="FRANCE", help="Power that creates the game") - + parser.add_argument( + "--creator-power", default="FRANCE", help="Power that creates the game" + ) + return parser.parse_args() async def main(): """Main entry point.""" args = parse_arguments() - + launcher = MultiBotLauncher( hostname=args.hostname, port=args.port, base_username=args.username_base, - password=args.password + password=args.password, ) - + # Parse models if provided models = None if args.models: model_list = [m.strip() for m in args.models.split(",")] powers = args.powers or list(launcher.default_models.keys()) if len(model_list) != len(powers): - logger.error(f"Number of models ({len(model_list)}) must match number of powers ({len(powers)})") + logger.error( + f"Number of models ({len(model_list)}) must match number of powers ({len(powers)})" + ) return models = dict(zip(powers, model_list)) - + try: if args.game_id: # Join existing game @@ -357,16 +391,16 @@ async def main(): game_id=args.game_id, powers=powers, models=models, - log_level=args.log_level + log_level=args.log_level, ) else: # Create new game and launch all bots await launcher.run_full_game( models=models, log_level=args.log_level, - creator_power=args.creator_power + creator_power=args.creator_power, ) - + except KeyboardInterrupt: logger.info("Interrupted by user") except Exception as e: @@ -374,4 +408,4 @@ async def main(): if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file + asyncio.run(main()) diff --git a/bot_client/pyproject.toml b/bot_client/pyproject.toml index 6900c7d..7140b7d 100644 --- a/bot_client/pyproject.toml +++ b/bot_client/pyproject.toml @@ -5,14 +5,26 @@ description = "Add your description here" readme = "README.md" requires-python = ">=3.13" dependencies = [ + "aiohttp>=3.12.13", "anthropic>=0.54.0", "diplomacy", "dotenv>=0.9.9", + "google-generativeai>=0.8.5", "json-repair>=0.46.2", "json5>=0.12.0", + "loguru>=0.7.3", "openai>=1.87.0", + "pydantic>=2.11.7", + "pydantic-settings>=2.9.1", "tornado>=6.5.1", ] [tool.uv.sources] diplomacy = { path = "../diplomacy", editable = true } + +[tool.pyright] +extraPaths = [".."] +include = [".", ".."] + +[tool.ruff] +src = ["..", "."] diff --git a/bot_client/single_bot_player.py b/bot_client/single_bot_player.py index 8053180..9561507 100644 --- a/bot_client/single_bot_player.py +++ b/bot_client/single_bot_player.py @@ -8,12 +8,11 @@ as a separate process for each bot in a multi-player game. import argparse import asyncio -import logging import os -import time import signal -from typing import Optional, Dict, Any, List +from typing import Optional import dotenv +from loguru import logger # Suppress warnings os.environ["GRPC_PYTHON_LOG_LEVEL"] = "40" @@ -21,41 +20,54 @@ os.environ["GRPC_VERBOSITY"] = "ERROR" os.environ["ABSL_MIN_LOG_LEVEL"] = "2" os.environ["GRPC_POLL_STRATEGY"] = "poll" -from websocket_diplomacy_client import connect_to_diplomacy_server -from diplomacy.communication import notifications -from diplomacy.engine.message import Message, GLOBAL +# Add parent directory to path for ai_diplomacy imports (runtime only) +import sys + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +from websocket_diplomacy_client import ( + connect_to_diplomacy_server, + WebSocketDiplomacyClient, +) +from diplomacy.engine.message import Message from ai_diplomacy.clients import load_model_client from ai_diplomacy.utils import get_valid_orders, gather_possible_orders from ai_diplomacy.game_history import GameHistory from ai_diplomacy.agent import DiplomacyAgent from ai_diplomacy.initialization import initialize_agent_state_ext +from config import Configuration dotenv.load_dotenv() -logger = logging.getLogger(__name__) +config = Configuration() + +if config.DEBUG: + import tracemalloc + + tracemalloc.start() class SingleBotPlayer: """ A single bot player that connects to a Diplomacy server and plays as one power. - + The bot waits for game events from the server and responds appropriately: - When it's time to submit orders, generates and submits them - When messages are received, processes them and potentially responds - When the game phase updates, analyzes the new situation """ - - def __init__(self, - hostname: str = "localhost", - port: int = 8432, - username: str = "bot_player", - password: str = "password", - power_name: str = "FRANCE", - model_name: str = "gpt-3.5-turbo", - game_id: Optional[str] = None, - log_level: str = "INFO"): - + + def __init__( + self, + hostname: str = "localhost", + port: int = 8432, + username: str = "bot_player", + password: str = "password", + power_name: str = "FRANCE", + model_name: str = "gpt-3.5-turbo", + game_id: Optional[str] = None, + ): self.hostname = hostname self.port = port self.username = username @@ -63,50 +75,45 @@ class SingleBotPlayer: self.power_name = power_name self.model_name = model_name self.game_id = game_id - - # Setup logging - log_format = f"[{self.username}:{self.power_name}] %(asctime)s - %(levelname)s - %(message)s" - logging.basicConfig(level=getattr(logging, log_level.upper()), format=log_format) - + # Bot state - self.client = None - self.agent = None + self.client: WebSocketDiplomacyClient + self.agent: DiplomacyAgent self.game_history = GameHistory() self.running = True self.current_phase = None self.waiting_for_orders = False self.orders_submitted = False - + # Track error stats self.error_stats = {"conversation_errors": 0, "order_decoding_errors": 0} - + # Setup signal handlers for graceful shutdown signal.signal(signal.SIGINT, self._signal_handler) signal.signal(signal.SIGTERM, self._signal_handler) - + def _signal_handler(self, signum, frame): """Handle shutdown signals gracefully.""" logger.info(f"Received signal {signum}, shutting down...") self.running = False - + async def connect_and_initialize(self): """Connect to the server and initialize the bot.""" logger.info(f"Connecting to {self.hostname}:{self.port} as {self.username}") - + # Connect to server self.client = await connect_to_diplomacy_server( hostname=self.hostname, port=self.port, username=self.username, - password=self.password + password=self.password, ) - + # Join or create game if self.game_id: logger.info(f"Joining existing game {self.game_id} as {self.power_name}") game = await self.client.join_game( - game_id=self.game_id, - power_name=self.power_name + game_id=self.game_id, power_name=self.power_name ) else: logger.info(f"Creating new game as {self.power_name}") @@ -115,155 +122,163 @@ class SingleBotPlayer: rules=["IGNORE_ERRORS", "POWER_CHOICE"], # Allow messages power_name=self.power_name, n_controls=7, # Full game - deadline=None + deadline=None, ) logger.info(f"Created game {self.client.game_id}") - + # Initialize AI agent logger.info(f"Initializing AI agent with model {self.model_name}") model_client = load_model_client(self.model_name) self.agent = DiplomacyAgent(power_name=self.power_name, client=model_client) - + # Initialize agent state - await initialize_agent_state_ext(self.agent, self.client.game, self.game_history, None) - + await initialize_agent_state_ext( + self.agent, self.client.game, self.game_history, None + ) + # Setup game event callbacks self._setup_event_callbacks() - + # Get initial game state await self.client.synchronize() self.current_phase = self.client.get_current_phase() self.game_history.add_phase(self.current_phase) - + logger.info(f"Bot initialized. Current phase: {self.current_phase}") logger.info(f"Game status: {self.client.game.status}") - + # Check if we need to submit orders immediately await self._check_if_orders_needed() - - def _setup_event_callbacks(self): + + async def _setup_event_callbacks(self): """Setup callbacks for game events from the server.""" - + # Game phase updates (new turn) self.client.game.add_on_game_phase_update(self._on_phase_update) - + # Game processing (orders executed) self.client.game.add_on_game_processed(self._on_game_processed) - + # Messages received self.client.game.add_on_game_message_received(self._on_message_received) - + # Game status changes - self.client.game.add_on_game_status_update(self._on_status_update) - + self.client.game.add_on_game_status_update(await self._on_status_update) + # Power updates (other players joining/leaving) self.client.game.add_on_powers_controllers(self._on_powers_update) - + logger.debug("Event callbacks setup complete") - + async def _on_phase_update(self, game, notification): """Handle game phase updates.""" logger.info(f"Phase update received: {notification.phase_data}") - + # Update our game state await self.client.synchronize() - + new_phase = self.client.get_current_phase() if new_phase != self.current_phase: logger.info(f"New phase: {new_phase} (was: {self.current_phase})") self.current_phase = new_phase self.game_history.add_phase(new_phase) self.orders_submitted = False - + # Check if we need to submit orders for this new phase await self._check_if_orders_needed() - + async def _on_game_processed(self, game, notification): """Handle game processing (when orders are executed).""" logger.info("Game processed - orders have been executed") - + # Synchronize to get the results await self.client.synchronize() - + # Analyze the results await self._analyze_phase_results() - + self.orders_submitted = False self.waiting_for_orders = False - + async def _on_message_received(self, game, notification): """Handle incoming diplomatic messages.""" message = notification.message - logger.info(f"Message received from {message.sender} to {message.recipient}: {message.message}") - + logger.info( + f"Message received from {message.sender} to {message.recipient}: {message.message}" + ) + # Add message to game history self.game_history.add_message( phase=message.phase, sender=message.sender, recipient=message.recipient, - content=message.message + content=message.message, ) - + # If it's a private message to us, consider responding if message.recipient == self.power_name and message.sender != self.power_name: await self._consider_message_response(message) - + async def _on_status_update(self, game, notification): """Handle game status changes.""" logger.info(f"Game status updated: {notification.status}") - + if notification.status in ["COMPLETED", "CANCELED"]: logger.info("Game has ended") self.running = False - + async def _on_powers_update(self, game, notification): """Handle power controller updates (players joining/leaving).""" logger.info("Powers controllers updated") # Could implement logic to react to new players joining - + async def _check_if_orders_needed(self): """Check if we need to submit orders for the current phase.""" if self.orders_submitted: return - + # Check if it's a phase where we can submit orders current_short_phase = self.client.get_current_short_phase() - + # We submit orders in Movement and Retreat phases - if current_short_phase.endswith('M') or current_short_phase.endswith('R'): + if current_short_phase.endswith("M") or current_short_phase.endswith("R"): # Check if we have units that can receive orders try: - orderable_locations = self.client.get_orderable_locations(self.power_name) + orderable_locations = self.client.get_orderable_locations( + self.power_name + ) if orderable_locations: logger.info(f"Orders needed for phase {current_short_phase}") self.waiting_for_orders = True await self._submit_orders() else: - logger.info(f"No orderable locations for {self.power_name} in {current_short_phase}") + logger.info( + f"No orderable locations for {self.power_name} in {current_short_phase}" + ) except Exception as e: logger.error(f"Error checking orderable locations: {e}") - + async def _submit_orders(self): """Generate and submit orders for the current phase.""" if self.orders_submitted: logger.debug("Orders already submitted for this phase") return - + try: logger.info("Generating orders...") - + # Get current board state board_state = self.client.get_state() - + # Get possible orders possible_orders = gather_possible_orders(self.client.game, self.power_name) - + if not possible_orders: logger.info("No possible orders, submitting empty order set") await self.client.set_orders(self.power_name, []) self.orders_submitted = True return - + # Generate orders using AI orders = await get_valid_orders( game=self.client.game, @@ -272,32 +287,32 @@ class SingleBotPlayer: power_name=self.power_name, possible_orders=possible_orders, game_history=self.game_history, - error_stats=self.error_stats, + model_error_stats=self.error_stats, agent_goals=self.agent.goals, agent_relationships=self.agent.relationships, agent_private_diary_str=self.agent.format_private_diary_for_prompt(), - phase=self.current_phase + phase=self.current_phase, ) - + # Submit orders if orders: logger.info(f"Submitting orders: {orders}") await self.client.set_orders(self.power_name, orders) - + # Generate order diary entry await self.agent.generate_order_diary_entry( self.client.game, orders, - None # No log file path + None, # No log file path ) else: logger.info("No valid orders generated, submitting empty order set") await self.client.set_orders(self.power_name, []) - + self.orders_submitted = True self.waiting_for_orders = False logger.info("Orders submitted successfully") - + except Exception as e: logger.error(f"Error submitting orders: {e}", exc_info=True) # Submit empty orders as fallback @@ -306,81 +321,81 @@ class SingleBotPlayer: self.orders_submitted = True except Exception as fallback_error: logger.error(f"Failed to submit fallback orders: {fallback_error}") - + async def _analyze_phase_results(self): """Analyze the results of the previous phase.""" try: logger.info("Analyzing phase results...") - + # Get current board state after processing board_state = self.client.get_state() - + # Generate a simple phase summary phase_summary = f"Phase {self.current_phase} completed." - + # Update agent state based on results await self.agent.analyze_phase_and_update_state( game=self.client.game, board_state=board_state, phase_summary=phase_summary, game_history=self.game_history, - log_file_path=None + log_file_path=None, ) - + logger.info("Phase analysis complete") - + except Exception as e: logger.error(f"Error analyzing phase results: {e}", exc_info=True) - + async def _consider_message_response(self, message: Message): """Consider whether to respond to a diplomatic message.""" try: # Simple logic: if someone greets us, greet back - if any(word in message.message.lower() for word in ['hello', 'hi', 'greetings']): + if any( + word in message.message.lower() for word in ["hello", "hi", "greetings"] + ): response = f"Hello {message.sender}! Good to hear from you." await self.client.send_message( - sender=self.power_name, - recipient=message.sender, - message=response + sender=self.power_name, recipient=message.sender, message=response ) logger.info(f"Sent response to {message.sender}: {response}") - + except Exception as e: logger.error(f"Error responding to message: {e}") - + async def run(self): """Main bot loop.""" try: await self.connect_and_initialize() - + logger.info(f"Bot {self.username} ({self.power_name}) is now running...") - + # Main event loop while self.running and not self.client.is_game_done: try: # Synchronize with server periodically await self.client.synchronize() - + # Check if we need to submit orders await self._check_if_orders_needed() - + # Sleep for a bit before next iteration await asyncio.sleep(5) - + except Exception as e: logger.error(f"Error in main loop: {e}", exc_info=True) await asyncio.sleep(10) # Wait longer on error - + if self.client.is_game_done: logger.info("Game has finished") else: logger.info("Bot shutting down") - + except Exception as e: logger.error(f"Fatal error in bot: {e}", exc_info=True) finally: await self.cleanup() - + async def cleanup(self): """Clean up resources.""" try: @@ -396,23 +411,25 @@ class SingleBotPlayer: def parse_arguments(): """Parse command line arguments.""" parser = argparse.ArgumentParser(description="Single bot player for Diplomacy") - + parser.add_argument("--hostname", default="localhost", help="Server hostname") parser.add_argument("--port", type=int, default=8432, help="Server port") parser.add_argument("--username", default="bot_player", help="Bot username") parser.add_argument("--password", default="password", help="Bot password") parser.add_argument("--power", default="FRANCE", help="Power to control") parser.add_argument("--model", default="gpt-3.5-turbo", help="AI model to use") - parser.add_argument("--game-id", help="Game ID to join (creates new if not specified)") + parser.add_argument( + "--game-id", help="Game ID to join (creates new if not specified)" + ) parser.add_argument("--log-level", default="INFO", help="Logging level") - + return parser.parse_args() async def main(): """Main entry point.""" args = parse_arguments() - + bot = SingleBotPlayer( hostname=args.hostname, port=args.port, @@ -421,11 +438,10 @@ async def main(): power_name=args.power, model_name=args.model, game_id=args.game_id, - log_level=args.log_level ) - + await bot.run() if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file + asyncio.run(main()) diff --git a/bot_client/uv.lock b/bot_client/uv.lock index 8be8428..733b39b 100644 --- a/bot_client/uv.lock +++ b/bot_client/uv.lock @@ -2,6 +2,61 @@ version = 1 revision = 2 requires-python = ">=3.13" +[[package]] +name = "aiohappyeyeballs" +version = "2.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/26/30/f84a107a9c4331c14b2b586036f40965c128aa4fee4dda5d3d51cb14ad54/aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558", size = 22760, upload-time = "2025-03-12T01:42:48.764Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/15/5bf3b99495fb160b63f95972b81750f18f7f4e02ad051373b669d17d44f2/aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8", size = 15265, upload-time = "2025-03-12T01:42:47.083Z" }, +] + +[[package]] +name = "aiohttp" +version = "3.12.13" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohappyeyeballs" }, + { name = "aiosignal" }, + { name = "attrs" }, + { name = "frozenlist" }, + { name = "multidict" }, + { name = "propcache" }, + { name = "yarl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/6e/ab88e7cb2a4058bed2f7870276454f85a7c56cd6da79349eb314fc7bbcaa/aiohttp-3.12.13.tar.gz", hash = "sha256:47e2da578528264a12e4e3dd8dd72a7289e5f812758fe086473fab037a10fcce", size = 7819160, upload-time = "2025-06-14T15:15:41.354Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/0f/db19abdf2d86aa1deec3c1e0e5ea46a587b97c07a16516b6438428b3a3f8/aiohttp-3.12.13-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d4a18e61f271127465bdb0e8ff36e8f02ac4a32a80d8927aa52371e93cd87938", size = 694910, upload-time = "2025-06-14T15:14:30.604Z" }, + { url = "https://files.pythonhosted.org/packages/d5/81/0ab551e1b5d7f1339e2d6eb482456ccbe9025605b28eed2b1c0203aaaade/aiohttp-3.12.13-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:532542cb48691179455fab429cdb0d558b5e5290b033b87478f2aa6af5d20ace", size = 472566, upload-time = "2025-06-14T15:14:32.275Z" }, + { url = "https://files.pythonhosted.org/packages/34/3f/6b7d336663337672d29b1f82d1f252ec1a040fe2d548f709d3f90fa2218a/aiohttp-3.12.13-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d7eea18b52f23c050ae9db5d01f3d264ab08f09e7356d6f68e3f3ac2de9dfabb", size = 464856, upload-time = "2025-06-14T15:14:34.132Z" }, + { url = "https://files.pythonhosted.org/packages/26/7f/32ca0f170496aa2ab9b812630fac0c2372c531b797e1deb3deb4cea904bd/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad7c8e5c25f2a26842a7c239de3f7b6bfb92304593ef997c04ac49fb703ff4d7", size = 1703683, upload-time = "2025-06-14T15:14:36.034Z" }, + { url = "https://files.pythonhosted.org/packages/ec/53/d5513624b33a811c0abea8461e30a732294112318276ce3dbf047dbd9d8b/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6af355b483e3fe9d7336d84539fef460120c2f6e50e06c658fe2907c69262d6b", size = 1684946, upload-time = "2025-06-14T15:14:38Z" }, + { url = "https://files.pythonhosted.org/packages/37/72/4c237dd127827b0247dc138d3ebd49c2ded6114c6991bbe969058575f25f/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a95cf9f097498f35c88e3609f55bb47b28a5ef67f6888f4390b3d73e2bac6177", size = 1737017, upload-time = "2025-06-14T15:14:39.951Z" }, + { url = "https://files.pythonhosted.org/packages/0d/67/8a7eb3afa01e9d0acc26e1ef847c1a9111f8b42b82955fcd9faeb84edeb4/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8ed8c38a1c584fe99a475a8f60eefc0b682ea413a84c6ce769bb19a7ff1c5ef", size = 1786390, upload-time = "2025-06-14T15:14:42.151Z" }, + { url = "https://files.pythonhosted.org/packages/48/19/0377df97dd0176ad23cd8cad4fd4232cfeadcec6c1b7f036315305c98e3f/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a0b9170d5d800126b5bc89d3053a2363406d6e327afb6afaeda2d19ee8bb103", size = 1708719, upload-time = "2025-06-14T15:14:44.039Z" }, + { url = "https://files.pythonhosted.org/packages/61/97/ade1982a5c642b45f3622255173e40c3eed289c169f89d00eeac29a89906/aiohttp-3.12.13-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:372feeace612ef8eb41f05ae014a92121a512bd5067db8f25101dd88a8db11da", size = 1622424, upload-time = "2025-06-14T15:14:45.945Z" }, + { url = "https://files.pythonhosted.org/packages/99/ab/00ad3eea004e1d07ccc406e44cfe2b8da5acb72f8c66aeeb11a096798868/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a946d3702f7965d81f7af7ea8fb03bb33fe53d311df48a46eeca17e9e0beed2d", size = 1675447, upload-time = "2025-06-14T15:14:47.911Z" }, + { url = "https://files.pythonhosted.org/packages/3f/fe/74e5ce8b2ccaba445fe0087abc201bfd7259431d92ae608f684fcac5d143/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:a0c4725fae86555bbb1d4082129e21de7264f4ab14baf735278c974785cd2041", size = 1707110, upload-time = "2025-06-14T15:14:50.334Z" }, + { url = "https://files.pythonhosted.org/packages/ef/c4/39af17807f694f7a267bd8ab1fbacf16ad66740862192a6c8abac2bff813/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9b28ea2f708234f0a5c44eb6c7d9eb63a148ce3252ba0140d050b091b6e842d1", size = 1649706, upload-time = "2025-06-14T15:14:52.378Z" }, + { url = "https://files.pythonhosted.org/packages/38/e8/f5a0a5f44f19f171d8477059aa5f28a158d7d57fe1a46c553e231f698435/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d4f5becd2a5791829f79608c6f3dc745388162376f310eb9c142c985f9441cc1", size = 1725839, upload-time = "2025-06-14T15:14:54.617Z" }, + { url = "https://files.pythonhosted.org/packages/fd/ac/81acc594c7f529ef4419d3866913f628cd4fa9cab17f7bf410a5c3c04c53/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:60f2ce6b944e97649051d5f5cc0f439360690b73909230e107fd45a359d3e911", size = 1759311, upload-time = "2025-06-14T15:14:56.597Z" }, + { url = "https://files.pythonhosted.org/packages/38/0d/aabe636bd25c6ab7b18825e5a97d40024da75152bec39aa6ac8b7a677630/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:69fc1909857401b67bf599c793f2183fbc4804717388b0b888f27f9929aa41f3", size = 1708202, upload-time = "2025-06-14T15:14:58.598Z" }, + { url = "https://files.pythonhosted.org/packages/1f/ab/561ef2d8a223261683fb95a6283ad0d36cb66c87503f3a7dde7afe208bb2/aiohttp-3.12.13-cp313-cp313-win32.whl", hash = "sha256:7d7e68787a2046b0e44ba5587aa723ce05d711e3a3665b6b7545328ac8e3c0dd", size = 420794, upload-time = "2025-06-14T15:15:00.939Z" }, + { url = "https://files.pythonhosted.org/packages/9d/47/b11d0089875a23bff0abd3edb5516bcd454db3fefab8604f5e4b07bd6210/aiohttp-3.12.13-cp313-cp313-win_amd64.whl", hash = "sha256:5a178390ca90419bfd41419a809688c368e63c86bd725e1186dd97f6b89c2706", size = 446735, upload-time = "2025-06-14T15:15:02.858Z" }, +] + +[[package]] +name = "aiosignal" +version = "1.3.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "frozenlist" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ba/b5/6d55e80f6d8a08ce22b982eafa278d823b541c925f11ee774b0b9c43473d/aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54", size = 19424, upload-time = "2024-12-13T17:10:40.86Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/6a/bc7e17a3e87a2985d3e8f4da4cd0f481060eb78fb08596c42be62c90a4d9/aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5", size = 7597, upload-time = "2024-12-13T17:10:38.469Z" }, +] + [[package]] name = "annotated-types" version = "0.7.0" @@ -42,6 +97,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c", size = 100916, upload-time = "2025-03-17T00:02:52.713Z" }, ] +[[package]] +name = "attrs" +version = "25.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/1367933a8532ee6ff8d63537de4f1177af4bff9f3e829baf7331f595bb24/attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b", size = 812032, upload-time = "2025-03-13T11:10:22.779Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815, upload-time = "2025-03-13T11:10:21.14Z" }, +] + [[package]] name = "bcrypt" version = "4.3.0" @@ -97,26 +161,45 @@ name = "bot-client" version = "0.1.0" source = { virtual = "." } dependencies = [ + { name = "aiohttp" }, { name = "anthropic" }, { name = "diplomacy" }, { name = "dotenv" }, + { name = "google-generativeai" }, { name = "json-repair" }, { name = "json5" }, + { name = "loguru" }, { name = "openai" }, + { name = "pydantic" }, + { name = "pydantic-settings" }, { name = "tornado" }, ] [package.metadata] requires-dist = [ + { name = "aiohttp", specifier = ">=3.12.13" }, { name = "anthropic", specifier = ">=0.54.0" }, { name = "diplomacy", editable = "../diplomacy" }, { name = "dotenv", specifier = ">=0.9.9" }, + { name = "google-generativeai", specifier = ">=0.8.5" }, { name = "json-repair", specifier = ">=0.46.2" }, { name = "json5", specifier = ">=0.12.0" }, + { name = "loguru", specifier = ">=0.7.3" }, { name = "openai", specifier = ">=1.87.0" }, + { name = "pydantic", specifier = ">=2.11.7" }, + { name = "pydantic-settings", specifier = ">=2.9.1" }, { name = "tornado", specifier = ">=6.5.1" }, ] +[[package]] +name = "cachetools" +version = "5.5.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6c/81/3747dad6b14fa2cf53fcf10548cf5aea6913e96fab41a3c198676f8948a5/cachetools-5.5.2.tar.gz", hash = "sha256:1a661caa9175d26759571b2e19580f9d6393969e5dfca11fdb1f947a23e640d4", size = 28380, upload-time = "2025-02-20T21:01:19.524Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/72/76/20fa66124dbe6be5cafeb312ece67de6b61dd91a0247d1ea13db4ebb33c2/cachetools-5.5.2-py3-none-any.whl", hash = "sha256:d26a22bcc62eb95c3beabd9f1ee5e820d3d2704fe2967cbe350e20c8ffcd3f0a", size = 10080, upload-time = "2025-02-20T21:01:16.647Z" }, +] + [[package]] name = "certifi" version = "2025.6.15" @@ -126,6 +209,28 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/84/ae/320161bd181fc06471eed047ecce67b693fd7515b16d495d8932db763426/certifi-2025.6.15-py3-none-any.whl", hash = "sha256:2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057", size = 157650, upload-time = "2025-06-15T02:45:49.977Z" }, ] +[[package]] +name = "charset-normalizer" +version = "3.4.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e4/33/89c2ced2b67d1c2a61c19c6751aa8902d46ce3dacb23600a283619f5a12d/charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63", size = 126367, upload-time = "2025-05-02T08:34:42.01Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ea/12/a93df3366ed32db1d907d7593a94f1fe6293903e3e92967bebd6950ed12c/charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0", size = 199622, upload-time = "2025-05-02T08:32:56.363Z" }, + { url = "https://files.pythonhosted.org/packages/04/93/bf204e6f344c39d9937d3c13c8cd5bbfc266472e51fc8c07cb7f64fcd2de/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf", size = 143435, upload-time = "2025-05-02T08:32:58.551Z" }, + { url = "https://files.pythonhosted.org/packages/22/2a/ea8a2095b0bafa6c5b5a55ffdc2f924455233ee7b91c69b7edfcc9e02284/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e", size = 153653, upload-time = "2025-05-02T08:33:00.342Z" }, + { url = "https://files.pythonhosted.org/packages/b6/57/1b090ff183d13cef485dfbe272e2fe57622a76694061353c59da52c9a659/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1", size = 146231, upload-time = "2025-05-02T08:33:02.081Z" }, + { url = "https://files.pythonhosted.org/packages/e2/28/ffc026b26f441fc67bd21ab7f03b313ab3fe46714a14b516f931abe1a2d8/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c", size = 148243, upload-time = "2025-05-02T08:33:04.063Z" }, + { url = "https://files.pythonhosted.org/packages/c0/0f/9abe9bd191629c33e69e47c6ef45ef99773320e9ad8e9cb08b8ab4a8d4cb/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691", size = 150442, upload-time = "2025-05-02T08:33:06.418Z" }, + { url = "https://files.pythonhosted.org/packages/67/7c/a123bbcedca91d5916c056407f89a7f5e8fdfce12ba825d7d6b9954a1a3c/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0", size = 145147, upload-time = "2025-05-02T08:33:08.183Z" }, + { url = "https://files.pythonhosted.org/packages/ec/fe/1ac556fa4899d967b83e9893788e86b6af4d83e4726511eaaad035e36595/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b", size = 153057, upload-time = "2025-05-02T08:33:09.986Z" }, + { url = "https://files.pythonhosted.org/packages/2b/ff/acfc0b0a70b19e3e54febdd5301a98b72fa07635e56f24f60502e954c461/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff", size = 156454, upload-time = "2025-05-02T08:33:11.814Z" }, + { url = "https://files.pythonhosted.org/packages/92/08/95b458ce9c740d0645feb0e96cea1f5ec946ea9c580a94adfe0b617f3573/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b", size = 154174, upload-time = "2025-05-02T08:33:13.707Z" }, + { url = "https://files.pythonhosted.org/packages/78/be/8392efc43487ac051eee6c36d5fbd63032d78f7728cb37aebcc98191f1ff/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148", size = 149166, upload-time = "2025-05-02T08:33:15.458Z" }, + { url = "https://files.pythonhosted.org/packages/44/96/392abd49b094d30b91d9fbda6a69519e95802250b777841cf3bda8fe136c/charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7", size = 98064, upload-time = "2025-05-02T08:33:17.06Z" }, + { url = "https://files.pythonhosted.org/packages/e9/b0/0200da600134e001d91851ddc797809e2fe0ea72de90e09bec5a2fbdaccb/charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980", size = 105641, upload-time = "2025-05-02T08:33:18.753Z" }, + { url = "https://files.pythonhosted.org/packages/20/94/c5790835a017658cbfabd07f3bfb549140c3ac458cfc196323996b10095a/charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0", size = 52626, upload-time = "2025-05-02T08:34:40.053Z" }, +] + [[package]] name = "colorama" version = "0.4.6" @@ -192,6 +297,191 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b2/b7/545d2c10c1fc15e48653c91efde329a790f2eecfbbf2bd16003b5db2bab0/dotenv-0.9.9-py2.py3-none-any.whl", hash = "sha256:29cf74a087b31dafdb5a446b6d7e11cbce8ed2741540e2339c69fbef92c94ce9", size = 1892, upload-time = "2025-02-19T22:15:01.647Z" }, ] +[[package]] +name = "frozenlist" +version = "1.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/79/b1/b64018016eeb087db503b038296fd782586432b9c077fc5c7839e9cb6ef6/frozenlist-1.7.0.tar.gz", hash = "sha256:2e310d81923c2437ea8670467121cc3e9b0f76d3043cc1d2331d56c7fb7a3a8f", size = 45078, upload-time = "2025-06-09T23:02:35.538Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/24/90/6b2cebdabdbd50367273c20ff6b57a3dfa89bd0762de02c3a1eb42cb6462/frozenlist-1.7.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee80eeda5e2a4e660651370ebffd1286542b67e268aa1ac8d6dbe973120ef7ee", size = 79791, upload-time = "2025-06-09T23:01:09.368Z" }, + { url = "https://files.pythonhosted.org/packages/83/2e/5b70b6a3325363293fe5fc3ae74cdcbc3e996c2a11dde2fd9f1fb0776d19/frozenlist-1.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d1a81c85417b914139e3a9b995d4a1c84559afc839a93cf2cb7f15e6e5f6ed2d", size = 47165, upload-time = "2025-06-09T23:01:10.653Z" }, + { url = "https://files.pythonhosted.org/packages/f4/25/a0895c99270ca6966110f4ad98e87e5662eab416a17e7fd53c364bf8b954/frozenlist-1.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cbb65198a9132ebc334f237d7b0df163e4de83fb4f2bdfe46c1e654bdb0c5d43", size = 45881, upload-time = "2025-06-09T23:01:12.296Z" }, + { url = "https://files.pythonhosted.org/packages/19/7c/71bb0bbe0832793c601fff68cd0cf6143753d0c667f9aec93d3c323f4b55/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dab46c723eeb2c255a64f9dc05b8dd601fde66d6b19cdb82b2e09cc6ff8d8b5d", size = 232409, upload-time = "2025-06-09T23:01:13.641Z" }, + { url = "https://files.pythonhosted.org/packages/c0/45/ed2798718910fe6eb3ba574082aaceff4528e6323f9a8570be0f7028d8e9/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6aeac207a759d0dedd2e40745575ae32ab30926ff4fa49b1635def65806fddee", size = 225132, upload-time = "2025-06-09T23:01:15.264Z" }, + { url = "https://files.pythonhosted.org/packages/ba/e2/8417ae0f8eacb1d071d4950f32f229aa6bf68ab69aab797b72a07ea68d4f/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bd8c4e58ad14b4fa7802b8be49d47993182fdd4023393899632c88fd8cd994eb", size = 237638, upload-time = "2025-06-09T23:01:16.752Z" }, + { url = "https://files.pythonhosted.org/packages/f8/b7/2ace5450ce85f2af05a871b8c8719b341294775a0a6c5585d5e6170f2ce7/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04fb24d104f425da3540ed83cbfc31388a586a7696142004c577fa61c6298c3f", size = 233539, upload-time = "2025-06-09T23:01:18.202Z" }, + { url = "https://files.pythonhosted.org/packages/46/b9/6989292c5539553dba63f3c83dc4598186ab2888f67c0dc1d917e6887db6/frozenlist-1.7.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a5c505156368e4ea6b53b5ac23c92d7edc864537ff911d2fb24c140bb175e60", size = 215646, upload-time = "2025-06-09T23:01:19.649Z" }, + { url = "https://files.pythonhosted.org/packages/72/31/bc8c5c99c7818293458fe745dab4fd5730ff49697ccc82b554eb69f16a24/frozenlist-1.7.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8bd7eb96a675f18aa5c553eb7ddc24a43c8c18f22e1f9925528128c052cdbe00", size = 232233, upload-time = "2025-06-09T23:01:21.175Z" }, + { url = "https://files.pythonhosted.org/packages/59/52/460db4d7ba0811b9ccb85af996019f5d70831f2f5f255f7cc61f86199795/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:05579bf020096fe05a764f1f84cd104a12f78eaab68842d036772dc6d4870b4b", size = 227996, upload-time = "2025-06-09T23:01:23.098Z" }, + { url = "https://files.pythonhosted.org/packages/ba/c9/f4b39e904c03927b7ecf891804fd3b4df3db29b9e487c6418e37988d6e9d/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:376b6222d114e97eeec13d46c486facd41d4f43bab626b7c3f6a8b4e81a5192c", size = 242280, upload-time = "2025-06-09T23:01:24.808Z" }, + { url = "https://files.pythonhosted.org/packages/b8/33/3f8d6ced42f162d743e3517781566b8481322be321b486d9d262adf70bfb/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0aa7e176ebe115379b5b1c95b4096fb1c17cce0847402e227e712c27bdb5a949", size = 217717, upload-time = "2025-06-09T23:01:26.28Z" }, + { url = "https://files.pythonhosted.org/packages/3e/e8/ad683e75da6ccef50d0ab0c2b2324b32f84fc88ceee778ed79b8e2d2fe2e/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3fbba20e662b9c2130dc771e332a99eff5da078b2b2648153a40669a6d0e36ca", size = 236644, upload-time = "2025-06-09T23:01:27.887Z" }, + { url = "https://files.pythonhosted.org/packages/b2/14/8d19ccdd3799310722195a72ac94ddc677541fb4bef4091d8e7775752360/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:f3f4410a0a601d349dd406b5713fec59b4cee7e71678d5b17edda7f4655a940b", size = 238879, upload-time = "2025-06-09T23:01:29.524Z" }, + { url = "https://files.pythonhosted.org/packages/ce/13/c12bf657494c2fd1079a48b2db49fa4196325909249a52d8f09bc9123fd7/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e2cdfaaec6a2f9327bf43c933c0319a7c429058e8537c508964a133dffee412e", size = 232502, upload-time = "2025-06-09T23:01:31.287Z" }, + { url = "https://files.pythonhosted.org/packages/d7/8b/e7f9dfde869825489382bc0d512c15e96d3964180c9499efcec72e85db7e/frozenlist-1.7.0-cp313-cp313-win32.whl", hash = "sha256:5fc4df05a6591c7768459caba1b342d9ec23fa16195e744939ba5914596ae3e1", size = 39169, upload-time = "2025-06-09T23:01:35.503Z" }, + { url = "https://files.pythonhosted.org/packages/35/89/a487a98d94205d85745080a37860ff5744b9820a2c9acbcdd9440bfddf98/frozenlist-1.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:52109052b9791a3e6b5d1b65f4b909703984b770694d3eb64fad124c835d7cba", size = 43219, upload-time = "2025-06-09T23:01:36.784Z" }, + { url = "https://files.pythonhosted.org/packages/56/d5/5c4cf2319a49eddd9dd7145e66c4866bdc6f3dbc67ca3d59685149c11e0d/frozenlist-1.7.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:a6f86e4193bb0e235ef6ce3dde5cbabed887e0b11f516ce8a0f4d3b33078ec2d", size = 84345, upload-time = "2025-06-09T23:01:38.295Z" }, + { url = "https://files.pythonhosted.org/packages/a4/7d/ec2c1e1dc16b85bc9d526009961953df9cec8481b6886debb36ec9107799/frozenlist-1.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:82d664628865abeb32d90ae497fb93df398a69bb3434463d172b80fc25b0dd7d", size = 48880, upload-time = "2025-06-09T23:01:39.887Z" }, + { url = "https://files.pythonhosted.org/packages/69/86/f9596807b03de126e11e7d42ac91e3d0b19a6599c714a1989a4e85eeefc4/frozenlist-1.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:912a7e8375a1c9a68325a902f3953191b7b292aa3c3fb0d71a216221deca460b", size = 48498, upload-time = "2025-06-09T23:01:41.318Z" }, + { url = "https://files.pythonhosted.org/packages/5e/cb/df6de220f5036001005f2d726b789b2c0b65f2363b104bbc16f5be8084f8/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9537c2777167488d539bc5de2ad262efc44388230e5118868e172dd4a552b146", size = 292296, upload-time = "2025-06-09T23:01:42.685Z" }, + { url = "https://files.pythonhosted.org/packages/83/1f/de84c642f17c8f851a2905cee2dae401e5e0daca9b5ef121e120e19aa825/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:f34560fb1b4c3e30ba35fa9a13894ba39e5acfc5f60f57d8accde65f46cc5e74", size = 273103, upload-time = "2025-06-09T23:01:44.166Z" }, + { url = "https://files.pythonhosted.org/packages/88/3c/c840bfa474ba3fa13c772b93070893c6e9d5c0350885760376cbe3b6c1b3/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:acd03d224b0175f5a850edc104ac19040d35419eddad04e7cf2d5986d98427f1", size = 292869, upload-time = "2025-06-09T23:01:45.681Z" }, + { url = "https://files.pythonhosted.org/packages/a6/1c/3efa6e7d5a39a1d5ef0abeb51c48fb657765794a46cf124e5aca2c7a592c/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2038310bc582f3d6a09b3816ab01737d60bf7b1ec70f5356b09e84fb7408ab1", size = 291467, upload-time = "2025-06-09T23:01:47.234Z" }, + { url = "https://files.pythonhosted.org/packages/4f/00/d5c5e09d4922c395e2f2f6b79b9a20dab4b67daaf78ab92e7729341f61f6/frozenlist-1.7.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8c05e4c8e5f36e5e088caa1bf78a687528f83c043706640a92cb76cd6999384", size = 266028, upload-time = "2025-06-09T23:01:48.819Z" }, + { url = "https://files.pythonhosted.org/packages/4e/27/72765be905619dfde25a7f33813ac0341eb6b076abede17a2e3fbfade0cb/frozenlist-1.7.0-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:765bb588c86e47d0b68f23c1bee323d4b703218037765dcf3f25c838c6fecceb", size = 284294, upload-time = "2025-06-09T23:01:50.394Z" }, + { url = "https://files.pythonhosted.org/packages/88/67/c94103a23001b17808eb7dd1200c156bb69fb68e63fcf0693dde4cd6228c/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:32dc2e08c67d86d0969714dd484fd60ff08ff81d1a1e40a77dd34a387e6ebc0c", size = 281898, upload-time = "2025-06-09T23:01:52.234Z" }, + { url = "https://files.pythonhosted.org/packages/42/34/a3e2c00c00f9e2a9db5653bca3fec306349e71aff14ae45ecc6d0951dd24/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:c0303e597eb5a5321b4de9c68e9845ac8f290d2ab3f3e2c864437d3c5a30cd65", size = 290465, upload-time = "2025-06-09T23:01:53.788Z" }, + { url = "https://files.pythonhosted.org/packages/bb/73/f89b7fbce8b0b0c095d82b008afd0590f71ccb3dee6eee41791cf8cd25fd/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:a47f2abb4e29b3a8d0b530f7c3598badc6b134562b1a5caee867f7c62fee51e3", size = 266385, upload-time = "2025-06-09T23:01:55.769Z" }, + { url = "https://files.pythonhosted.org/packages/cd/45/e365fdb554159462ca12df54bc59bfa7a9a273ecc21e99e72e597564d1ae/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:3d688126c242a6fabbd92e02633414d40f50bb6002fa4cf995a1d18051525657", size = 288771, upload-time = "2025-06-09T23:01:57.4Z" }, + { url = "https://files.pythonhosted.org/packages/00/11/47b6117002a0e904f004d70ec5194fe9144f117c33c851e3d51c765962d0/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:4e7e9652b3d367c7bd449a727dc79d5043f48b88d0cbfd4f9f1060cf2b414104", size = 288206, upload-time = "2025-06-09T23:01:58.936Z" }, + { url = "https://files.pythonhosted.org/packages/40/37/5f9f3c3fd7f7746082ec67bcdc204db72dad081f4f83a503d33220a92973/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1a85e345b4c43db8b842cab1feb41be5cc0b10a1830e6295b69d7310f99becaf", size = 282620, upload-time = "2025-06-09T23:02:00.493Z" }, + { url = "https://files.pythonhosted.org/packages/0b/31/8fbc5af2d183bff20f21aa743b4088eac4445d2bb1cdece449ae80e4e2d1/frozenlist-1.7.0-cp313-cp313t-win32.whl", hash = "sha256:3a14027124ddb70dfcee5148979998066897e79f89f64b13328595c4bdf77c81", size = 43059, upload-time = "2025-06-09T23:02:02.072Z" }, + { url = "https://files.pythonhosted.org/packages/bb/ed/41956f52105b8dbc26e457c5705340c67c8cc2b79f394b79bffc09d0e938/frozenlist-1.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:3bf8010d71d4507775f658e9823210b7427be36625b387221642725b515dcf3e", size = 47516, upload-time = "2025-06-09T23:02:03.779Z" }, + { url = "https://files.pythonhosted.org/packages/ee/45/b82e3c16be2182bff01179db177fe144d58b5dc787a7d4492c6ed8b9317f/frozenlist-1.7.0-py3-none-any.whl", hash = "sha256:9a5af342e34f7e97caf8c995864c7a396418ae2859cc6fdf1b1073020d516a7e", size = 13106, upload-time = "2025-06-09T23:02:34.204Z" }, +] + +[[package]] +name = "google-ai-generativelanguage" +version = "0.6.15" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-api-core", extra = ["grpc"] }, + { name = "google-auth" }, + { name = "proto-plus" }, + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/11/d1/48fe5d7a43d278e9f6b5ada810b0a3530bbeac7ed7fcbcd366f932f05316/google_ai_generativelanguage-0.6.15.tar.gz", hash = "sha256:8f6d9dc4c12b065fe2d0289026171acea5183ebf2d0b11cefe12f3821e159ec3", size = 1375443, upload-time = "2025-01-13T21:50:47.459Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/a3/67b8a6ff5001a1d8864922f2d6488dc2a14367ceb651bc3f09a947f2f306/google_ai_generativelanguage-0.6.15-py3-none-any.whl", hash = "sha256:5a03ef86377aa184ffef3662ca28f19eeee158733e45d7947982eb953c6ebb6c", size = 1327356, upload-time = "2025-01-13T21:50:44.174Z" }, +] + +[[package]] +name = "google-api-core" +version = "2.25.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-auth" }, + { name = "googleapis-common-protos" }, + { name = "proto-plus" }, + { name = "protobuf" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/dc/21/e9d043e88222317afdbdb567165fdbc3b0aad90064c7e0c9eb0ad9955ad8/google_api_core-2.25.1.tar.gz", hash = "sha256:d2aaa0b13c78c61cb3f4282c464c046e45fbd75755683c9c525e6e8f7ed0a5e8", size = 165443, upload-time = "2025-06-12T20:52:20.439Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/4b/ead00905132820b623732b175d66354e9d3e69fcf2a5dcdab780664e7896/google_api_core-2.25.1-py3-none-any.whl", hash = "sha256:8a2a56c1fef82987a524371f99f3bd0143702fecc670c72e600c1cda6bf8dbb7", size = 160807, upload-time = "2025-06-12T20:52:19.334Z" }, +] + +[package.optional-dependencies] +grpc = [ + { name = "grpcio" }, + { name = "grpcio-status" }, +] + +[[package]] +name = "google-api-python-client" +version = "2.172.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-api-core" }, + { name = "google-auth" }, + { name = "google-auth-httplib2" }, + { name = "httplib2" }, + { name = "uritemplate" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/02/69/c0cec6be5878d4de161f64096edb3d4a2d1a838f036b8425ea8358d0dfb3/google_api_python_client-2.172.0.tar.gz", hash = "sha256:dcb3b7e067154b2aa41f1776cf86584a5739c0ac74e6ff46fc665790dca0e6a6", size = 13074841, upload-time = "2025-06-10T16:58:41.181Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/15/fc/8850ccf21c5df43faeaf8bba8c4149ee880b41b8dc7066e3259bcfd921ca/google_api_python_client-2.172.0-py3-none-any.whl", hash = "sha256:9f1b9a268d5dc1228207d246c673d3a09ee211b41a11521d38d9212aeaa43af7", size = 13595800, upload-time = "2025-06-10T16:58:38.143Z" }, +] + +[[package]] +name = "google-auth" +version = "2.40.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cachetools" }, + { name = "pyasn1-modules" }, + { name = "rsa" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9e/9b/e92ef23b84fa10a64ce4831390b7a4c2e53c0132568d99d4ae61d04c8855/google_auth-2.40.3.tar.gz", hash = "sha256:500c3a29adedeb36ea9cf24b8d10858e152f2412e3ca37829b3fa18e33d63b77", size = 281029, upload-time = "2025-06-04T18:04:57.577Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/17/63/b19553b658a1692443c62bd07e5868adaa0ad746a0751ba62c59568cd45b/google_auth-2.40.3-py2.py3-none-any.whl", hash = "sha256:1370d4593e86213563547f97a92752fc658456fe4514c809544f330fed45a7ca", size = 216137, upload-time = "2025-06-04T18:04:55.573Z" }, +] + +[[package]] +name = "google-auth-httplib2" +version = "0.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-auth" }, + { name = "httplib2" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/56/be/217a598a818567b28e859ff087f347475c807a5649296fb5a817c58dacef/google-auth-httplib2-0.2.0.tar.gz", hash = "sha256:38aa7badf48f974f1eb9861794e9c0cb2a0511a4ec0679b1f886d108f5640e05", size = 10842, upload-time = "2023-12-12T17:40:30.722Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/be/8a/fe34d2f3f9470a27b01c9e76226965863f153d5fbe276f83608562e49c04/google_auth_httplib2-0.2.0-py2.py3-none-any.whl", hash = "sha256:b65a0a2123300dd71281a7bf6e64d65a0759287df52729bdd1ae2e47dc311a3d", size = 9253, upload-time = "2023-12-12T17:40:13.055Z" }, +] + +[[package]] +name = "google-generativeai" +version = "0.8.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-ai-generativelanguage" }, + { name = "google-api-core" }, + { name = "google-api-python-client" }, + { name = "google-auth" }, + { name = "protobuf" }, + { name = "pydantic" }, + { name = "tqdm" }, + { name = "typing-extensions" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/6e/40/c42ff9ded9f09ec9392879a8e6538a00b2dc185e834a3392917626255419/google_generativeai-0.8.5-py3-none-any.whl", hash = "sha256:22b420817fb263f8ed520b33285f45976d5b21e904da32b80d4fd20c055123a2", size = 155427, upload-time = "2025-04-17T00:40:00.67Z" }, +] + +[[package]] +name = "googleapis-common-protos" +version = "1.70.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/39/24/33db22342cf4a2ea27c9955e6713140fedd51e8b141b5ce5260897020f1a/googleapis_common_protos-1.70.0.tar.gz", hash = "sha256:0e1b44e0ea153e6594f9f394fef15193a68aaaea2d843f83e2742717ca753257", size = 145903, upload-time = "2025-04-14T10:17:02.924Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/f1/62a193f0227cf15a920390abe675f386dec35f7ae3ffe6da582d3ade42c7/googleapis_common_protos-1.70.0-py3-none-any.whl", hash = "sha256:b8bfcca8c25a2bb253e0e0b0adaf8c00773e5e6af6fd92397576680b807e0fd8", size = 294530, upload-time = "2025-04-14T10:17:01.271Z" }, +] + +[[package]] +name = "grpcio" +version = "1.73.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/7b/ca3f561aeecf0c846d15e1b38921a60dffffd5d4113931198fbf455334ee/grpcio-1.73.0.tar.gz", hash = "sha256:3af4c30918a7f0d39de500d11255f8d9da4f30e94a2033e70fe2a720e184bd8e", size = 12786424, upload-time = "2025-06-09T10:08:23.365Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/da/6f3f7a78e5455c4cbe87c85063cc6da05d65d25264f9d4aed800ece46294/grpcio-1.73.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:da1d677018ef423202aca6d73a8d3b2cb245699eb7f50eb5f74cae15a8e1f724", size = 5335867, upload-time = "2025-06-09T10:04:03.153Z" }, + { url = "https://files.pythonhosted.org/packages/53/14/7d1f2526b98b9658d7be0bb163fd78d681587de6709d8b0c74b4b481b013/grpcio-1.73.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:36bf93f6a657f37c131d9dd2c391b867abf1426a86727c3575393e9e11dadb0d", size = 10595587, upload-time = "2025-06-09T10:04:05.694Z" }, + { url = "https://files.pythonhosted.org/packages/02/24/a293c398ae44e741da1ed4b29638edbb002258797b07a783f65506165b4c/grpcio-1.73.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:d84000367508ade791d90c2bafbd905574b5ced8056397027a77a215d601ba15", size = 5765793, upload-time = "2025-06-09T10:04:09.235Z" }, + { url = "https://files.pythonhosted.org/packages/e1/24/d84dbd0b5bf36fb44922798d525a85cefa2ffee7b7110e61406e9750ed15/grpcio-1.73.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c98ba1d928a178ce33f3425ff823318040a2b7ef875d30a0073565e5ceb058d9", size = 6415494, upload-time = "2025-06-09T10:04:12.377Z" }, + { url = "https://files.pythonhosted.org/packages/5e/85/c80dc65aed8e9dce3d54688864bac45331d9c7600985541f18bd5cb301d4/grpcio-1.73.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a73c72922dfd30b396a5f25bb3a4590195ee45ecde7ee068acb0892d2900cf07", size = 6007279, upload-time = "2025-06-09T10:04:14.878Z" }, + { url = "https://files.pythonhosted.org/packages/37/fc/207c00a4c6fa303d26e2cbd62fbdb0582facdfd08f55500fd83bf6b0f8db/grpcio-1.73.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:10e8edc035724aba0346a432060fd192b42bd03675d083c01553cab071a28da5", size = 6105505, upload-time = "2025-06-09T10:04:17.39Z" }, + { url = "https://files.pythonhosted.org/packages/72/35/8fe69af820667b87ebfcb24214e42a1d53da53cb39edd6b4f84f6b36da86/grpcio-1.73.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:f5cdc332b503c33b1643b12ea933582c7b081957c8bc2ea4cc4bc58054a09288", size = 6753792, upload-time = "2025-06-09T10:04:19.989Z" }, + { url = "https://files.pythonhosted.org/packages/e2/d8/738c77c1e821e350da4a048849f695ff88a02b291f8c69db23908867aea6/grpcio-1.73.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:07ad7c57233c2109e4ac999cb9c2710c3b8e3f491a73b058b0ce431f31ed8145", size = 6287593, upload-time = "2025-06-09T10:04:22.878Z" }, + { url = "https://files.pythonhosted.org/packages/09/ec/8498eabc018fa39ae8efe5e47e3f4c1bc9ed6281056713871895dc998807/grpcio-1.73.0-cp313-cp313-win32.whl", hash = "sha256:0eb5df4f41ea10bda99a802b2a292d85be28958ede2a50f2beb8c7fc9a738419", size = 3668637, upload-time = "2025-06-09T10:04:25.787Z" }, + { url = "https://files.pythonhosted.org/packages/d7/35/347db7d2e7674b621afd21b12022e7f48c7b0861b5577134b4e939536141/grpcio-1.73.0-cp313-cp313-win_amd64.whl", hash = "sha256:38cf518cc54cd0c47c9539cefa8888549fcc067db0b0c66a46535ca8032020c4", size = 4335872, upload-time = "2025-06-09T10:04:29.032Z" }, +] + +[[package]] +name = "grpcio-status" +version = "1.71.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "googleapis-common-protos" }, + { name = "grpcio" }, + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d7/53/a911467bece076020456401f55a27415d2d70d3bc2c37af06b44ea41fc5c/grpcio_status-1.71.0.tar.gz", hash = "sha256:11405fed67b68f406b3f3c7c5ae5104a79d2d309666d10d61b152e91d28fb968", size = 13669, upload-time = "2025-03-10T19:29:00.901Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ad/d6/31fbc43ff097d8c4c9fc3df741431b8018f67bf8dfbe6553a555f6e5f675/grpcio_status-1.71.0-py3-none-any.whl", hash = "sha256:843934ef8c09e3e858952887467f8256aac3910c55f077a359a65b2b3cde3e68", size = 14424, upload-time = "2025-03-10T19:27:04.967Z" }, +] + [[package]] name = "h11" version = "0.16.0" @@ -214,6 +504,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, ] +[[package]] +name = "httplib2" +version = "0.22.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyparsing" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3d/ad/2371116b22d616c194aa25ec410c9c6c37f23599dcd590502b74db197584/httplib2-0.22.0.tar.gz", hash = "sha256:d7a10bc5ef5ab08322488bde8c726eeee5c8618723fdb399597ec58f3d82df81", size = 351116, upload-time = "2023-03-21T22:29:37.214Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/6c/d2fbdaaa5959339d53ba38e94c123e4e84b8fbc4b84beb0e70d7c1608486/httplib2-0.22.0-py3-none-any.whl", hash = "sha256:14ae0a53c1ba8f3d37e9e27cf37eabb0fb9980f435ba405d546948b009dd64dc", size = 96854, upload-time = "2023-03-21T22:29:35.683Z" }, +] + [[package]] name = "httpx" version = "0.28.1" @@ -304,6 +606,62 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/41/9f/3500910d5a98549e3098807493851eeef2b89cdd3032227558a104dfe926/json5-0.12.0-py3-none-any.whl", hash = "sha256:6d37aa6c08b0609f16e1ec5ff94697e2cbbfbad5ac112afa05794da9ab7810db", size = 36079, upload-time = "2025-04-03T16:33:11.927Z" }, ] +[[package]] +name = "loguru" +version = "0.7.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "win32-setctime", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3a/05/a1dae3dffd1116099471c643b8924f5aa6524411dc6c63fdae648c4f1aca/loguru-0.7.3.tar.gz", hash = "sha256:19480589e77d47b8d85b2c827ad95d49bf31b0dcde16593892eb51dd18706eb6", size = 63559, upload-time = "2024-12-06T11:20:56.608Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/29/0348de65b8cc732daa3e33e67806420b2ae89bdce2b04af740289c5c6c8c/loguru-0.7.3-py3-none-any.whl", hash = "sha256:31a33c10c8e1e10422bfd431aeb5d351c7cf7fa671e3c4df004162264b28220c", size = 61595, upload-time = "2024-12-06T11:20:54.538Z" }, +] + +[[package]] +name = "multidict" +version = "6.4.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/91/2f/a3470242707058fe856fe59241eee5635d79087100b7042a867368863a27/multidict-6.4.4.tar.gz", hash = "sha256:69ee9e6ba214b5245031b76233dd95408a0fd57fdb019ddcc1ead4790932a8e8", size = 90183, upload-time = "2025-05-19T14:16:37.381Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/df/2a/e166d2ffbf4b10131b2d5b0e458f7cee7d986661caceae0de8753042d4b2/multidict-6.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:82ffabefc8d84c2742ad19c37f02cde5ec2a1ee172d19944d380f920a340e4b9", size = 64123, upload-time = "2025-05-19T14:15:11.044Z" }, + { url = "https://files.pythonhosted.org/packages/8c/96/e200e379ae5b6f95cbae472e0199ea98913f03d8c9a709f42612a432932c/multidict-6.4.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6a2f58a66fe2c22615ad26156354005391e26a2f3721c3621504cd87c1ea87bf", size = 38049, upload-time = "2025-05-19T14:15:12.902Z" }, + { url = "https://files.pythonhosted.org/packages/75/fb/47afd17b83f6a8c7fa863c6d23ac5ba6a0e6145ed8a6bcc8da20b2b2c1d2/multidict-6.4.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5883d6ee0fd9d8a48e9174df47540b7545909841ac82354c7ae4cbe9952603bd", size = 37078, upload-time = "2025-05-19T14:15:14.282Z" }, + { url = "https://files.pythonhosted.org/packages/fa/70/1af3143000eddfb19fd5ca5e78393985ed988ac493bb859800fe0914041f/multidict-6.4.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9abcf56a9511653fa1d052bfc55fbe53dbee8f34e68bd6a5a038731b0ca42d15", size = 224097, upload-time = "2025-05-19T14:15:15.566Z" }, + { url = "https://files.pythonhosted.org/packages/b1/39/d570c62b53d4fba844e0378ffbcd02ac25ca423d3235047013ba2f6f60f8/multidict-6.4.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6ed5ae5605d4ad5a049fad2a28bb7193400700ce2f4ae484ab702d1e3749c3f9", size = 230768, upload-time = "2025-05-19T14:15:17.308Z" }, + { url = "https://files.pythonhosted.org/packages/fd/f8/ed88f2c4d06f752b015933055eb291d9bc184936903752c66f68fb3c95a7/multidict-6.4.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbfcb60396f9bcfa63e017a180c3105b8c123a63e9d1428a36544e7d37ca9e20", size = 231331, upload-time = "2025-05-19T14:15:18.73Z" }, + { url = "https://files.pythonhosted.org/packages/9c/6f/8e07cffa32f483ab887b0d56bbd8747ac2c1acd00dc0af6fcf265f4a121e/multidict-6.4.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b0f1987787f5f1e2076b59692352ab29a955b09ccc433c1f6b8e8e18666f608b", size = 230169, upload-time = "2025-05-19T14:15:20.179Z" }, + { url = "https://files.pythonhosted.org/packages/e6/2b/5dcf173be15e42f330110875a2668ddfc208afc4229097312212dc9c1236/multidict-6.4.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d0121ccce8c812047d8d43d691a1ad7641f72c4f730474878a5aeae1b8ead8c", size = 222947, upload-time = "2025-05-19T14:15:21.714Z" }, + { url = "https://files.pythonhosted.org/packages/39/75/4ddcbcebe5ebcd6faa770b629260d15840a5fc07ce8ad295a32e14993726/multidict-6.4.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:83ec4967114295b8afd120a8eec579920c882831a3e4c3331d591a8e5bfbbc0f", size = 215761, upload-time = "2025-05-19T14:15:23.242Z" }, + { url = "https://files.pythonhosted.org/packages/6a/c9/55e998ae45ff15c5608e384206aa71a11e1b7f48b64d166db400b14a3433/multidict-6.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:995f985e2e268deaf17867801b859a282e0448633f1310e3704b30616d269d69", size = 227605, upload-time = "2025-05-19T14:15:24.763Z" }, + { url = "https://files.pythonhosted.org/packages/04/49/c2404eac74497503c77071bd2e6f88c7e94092b8a07601536b8dbe99be50/multidict-6.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:d832c608f94b9f92a0ec8b7e949be7792a642b6e535fcf32f3e28fab69eeb046", size = 226144, upload-time = "2025-05-19T14:15:26.249Z" }, + { url = "https://files.pythonhosted.org/packages/62/c5/0cd0c3c6f18864c40846aa2252cd69d308699cb163e1c0d989ca301684da/multidict-6.4.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d21c1212171cf7da703c5b0b7a0e85be23b720818aef502ad187d627316d5645", size = 221100, upload-time = "2025-05-19T14:15:28.303Z" }, + { url = "https://files.pythonhosted.org/packages/71/7b/f2f3887bea71739a046d601ef10e689528d4f911d84da873b6be9194ffea/multidict-6.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:cbebaa076aaecad3d4bb4c008ecc73b09274c952cf6a1b78ccfd689e51f5a5b0", size = 232731, upload-time = "2025-05-19T14:15:30.263Z" }, + { url = "https://files.pythonhosted.org/packages/e5/b3/d9de808349df97fa75ec1372758701b5800ebad3c46ae377ad63058fbcc6/multidict-6.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:c93a6fb06cc8e5d3628b2b5fda215a5db01e8f08fc15fadd65662d9b857acbe4", size = 229637, upload-time = "2025-05-19T14:15:33.337Z" }, + { url = "https://files.pythonhosted.org/packages/5e/57/13207c16b615eb4f1745b44806a96026ef8e1b694008a58226c2d8f5f0a5/multidict-6.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8cd8f81f1310182362fb0c7898145ea9c9b08a71081c5963b40ee3e3cac589b1", size = 225594, upload-time = "2025-05-19T14:15:34.832Z" }, + { url = "https://files.pythonhosted.org/packages/3a/e4/d23bec2f70221604f5565000632c305fc8f25ba953e8ce2d8a18842b9841/multidict-6.4.4-cp313-cp313-win32.whl", hash = "sha256:3e9f1cd61a0ab857154205fb0b1f3d3ace88d27ebd1409ab7af5096e409614cd", size = 35359, upload-time = "2025-05-19T14:15:36.246Z" }, + { url = "https://files.pythonhosted.org/packages/a7/7a/cfe1a47632be861b627f46f642c1d031704cc1c0f5c0efbde2ad44aa34bd/multidict-6.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:8ffb40b74400e4455785c2fa37eba434269149ec525fc8329858c862e4b35373", size = 38903, upload-time = "2025-05-19T14:15:37.507Z" }, + { url = "https://files.pythonhosted.org/packages/68/7b/15c259b0ab49938a0a1c8f3188572802704a779ddb294edc1b2a72252e7c/multidict-6.4.4-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:6a602151dbf177be2450ef38966f4be3467d41a86c6a845070d12e17c858a156", size = 68895, upload-time = "2025-05-19T14:15:38.856Z" }, + { url = "https://files.pythonhosted.org/packages/f1/7d/168b5b822bccd88142e0a3ce985858fea612404edd228698f5af691020c9/multidict-6.4.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0d2b9712211b860d123815a80b859075d86a4d54787e247d7fbee9db6832cf1c", size = 40183, upload-time = "2025-05-19T14:15:40.197Z" }, + { url = "https://files.pythonhosted.org/packages/e0/b7/d4b8d98eb850ef28a4922ba508c31d90715fd9b9da3801a30cea2967130b/multidict-6.4.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d2fa86af59f8fc1972e121ade052145f6da22758f6996a197d69bb52f8204e7e", size = 39592, upload-time = "2025-05-19T14:15:41.508Z" }, + { url = "https://files.pythonhosted.org/packages/18/28/a554678898a19583548e742080cf55d169733baf57efc48c2f0273a08583/multidict-6.4.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50855d03e9e4d66eab6947ba688ffb714616f985838077bc4b490e769e48da51", size = 226071, upload-time = "2025-05-19T14:15:42.877Z" }, + { url = "https://files.pythonhosted.org/packages/ee/dc/7ba6c789d05c310e294f85329efac1bf5b450338d2542498db1491a264df/multidict-6.4.4-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:5bce06b83be23225be1905dcdb6b789064fae92499fbc458f59a8c0e68718601", size = 222597, upload-time = "2025-05-19T14:15:44.412Z" }, + { url = "https://files.pythonhosted.org/packages/24/4f/34eadbbf401b03768dba439be0fb94b0d187facae9142821a3d5599ccb3b/multidict-6.4.4-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66ed0731f8e5dfd8369a883b6e564aca085fb9289aacabd9decd70568b9a30de", size = 228253, upload-time = "2025-05-19T14:15:46.474Z" }, + { url = "https://files.pythonhosted.org/packages/c0/e6/493225a3cdb0d8d80d43a94503fc313536a07dae54a3f030d279e629a2bc/multidict-6.4.4-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:329ae97fc2f56f44d91bc47fe0972b1f52d21c4b7a2ac97040da02577e2daca2", size = 226146, upload-time = "2025-05-19T14:15:48.003Z" }, + { url = "https://files.pythonhosted.org/packages/2f/70/e411a7254dc3bff6f7e6e004303b1b0591358e9f0b7c08639941e0de8bd6/multidict-6.4.4-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c27e5dcf520923d6474d98b96749e6805f7677e93aaaf62656005b8643f907ab", size = 220585, upload-time = "2025-05-19T14:15:49.546Z" }, + { url = "https://files.pythonhosted.org/packages/08/8f/beb3ae7406a619100d2b1fb0022c3bb55a8225ab53c5663648ba50dfcd56/multidict-6.4.4-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:058cc59b9e9b143cc56715e59e22941a5d868c322242278d28123a5d09cdf6b0", size = 212080, upload-time = "2025-05-19T14:15:51.151Z" }, + { url = "https://files.pythonhosted.org/packages/9c/ec/355124e9d3d01cf8edb072fd14947220f357e1c5bc79c88dff89297e9342/multidict-6.4.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:69133376bc9a03f8c47343d33f91f74a99c339e8b58cea90433d8e24bb298031", size = 226558, upload-time = "2025-05-19T14:15:52.665Z" }, + { url = "https://files.pythonhosted.org/packages/fd/22/d2b95cbebbc2ada3be3812ea9287dcc9712d7f1a012fad041770afddb2ad/multidict-6.4.4-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:d6b15c55721b1b115c5ba178c77104123745b1417527ad9641a4c5e2047450f0", size = 212168, upload-time = "2025-05-19T14:15:55.279Z" }, + { url = "https://files.pythonhosted.org/packages/4d/c5/62bfc0b2f9ce88326dbe7179f9824a939c6c7775b23b95de777267b9725c/multidict-6.4.4-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:a887b77f51d3d41e6e1a63cf3bc7ddf24de5939d9ff69441387dfefa58ac2e26", size = 217970, upload-time = "2025-05-19T14:15:56.806Z" }, + { url = "https://files.pythonhosted.org/packages/79/74/977cea1aadc43ff1c75d23bd5bc4768a8fac98c14e5878d6ee8d6bab743c/multidict-6.4.4-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:632a3bf8f1787f7ef7d3c2f68a7bde5be2f702906f8b5842ad6da9d974d0aab3", size = 226980, upload-time = "2025-05-19T14:15:58.313Z" }, + { url = "https://files.pythonhosted.org/packages/48/fc/cc4a1a2049df2eb84006607dc428ff237af38e0fcecfdb8a29ca47b1566c/multidict-6.4.4-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:a145c550900deb7540973c5cdb183b0d24bed6b80bf7bddf33ed8f569082535e", size = 220641, upload-time = "2025-05-19T14:15:59.866Z" }, + { url = "https://files.pythonhosted.org/packages/3b/6a/a7444d113ab918701988d4abdde373dbdfd2def7bd647207e2bf645c7eac/multidict-6.4.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:cc5d83c6619ca5c9672cb78b39ed8542f1975a803dee2cda114ff73cbb076edd", size = 221728, upload-time = "2025-05-19T14:16:01.535Z" }, + { url = "https://files.pythonhosted.org/packages/2b/b0/fdf4c73ad1c55e0f4dbbf2aa59dd37037334091f9a4961646d2b7ac91a86/multidict-6.4.4-cp313-cp313t-win32.whl", hash = "sha256:3312f63261b9df49be9d57aaa6abf53a6ad96d93b24f9cc16cf979956355ce6e", size = 41913, upload-time = "2025-05-19T14:16:03.199Z" }, + { url = "https://files.pythonhosted.org/packages/8e/92/27989ecca97e542c0d01d05a98a5ae12198a243a9ee12563a0313291511f/multidict-6.4.4-cp313-cp313t-win_amd64.whl", hash = "sha256:ba852168d814b2c73333073e1c7116d9395bea69575a01b0b3c89d2d5a87c8fb", size = 46112, upload-time = "2025-05-19T14:16:04.909Z" }, + { url = "https://files.pythonhosted.org/packages/84/5d/e17845bb0fa76334477d5de38654d27946d5b5d3695443987a094a71b440/multidict-6.4.4-py3-none-any.whl", hash = "sha256:bd4557071b561a8b3b6075c3ce93cf9bfb6182cb241805c3d66ced3b75eff4ac", size = 10481, upload-time = "2025-05-19T14:16:36.024Z" }, +] + [[package]] name = "openai" version = "1.87.0" @@ -323,6 +681,94 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/36/ac/313ded47ce1d5bc2ec02ed5dd5506bf5718678a4655ac20f337231d9aae3/openai-1.87.0-py3-none-any.whl", hash = "sha256:f9bcae02ac4fff6522276eee85d33047335cfb692b863bd8261353ce4ada5692", size = 734368, upload-time = "2025-06-16T19:04:23.181Z" }, ] +[[package]] +name = "propcache" +version = "0.3.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a6/16/43264e4a779dd8588c21a70f0709665ee8f611211bdd2c87d952cfa7c776/propcache-0.3.2.tar.gz", hash = "sha256:20d7d62e4e7ef05f221e0db2856b979540686342e7dd9973b815599c7057e168", size = 44139, upload-time = "2025-06-09T22:56:06.081Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/d1/8c747fafa558c603c4ca19d8e20b288aa0c7cda74e9402f50f31eb65267e/propcache-0.3.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ca592ed634a73ca002967458187109265e980422116c0a107cf93d81f95af945", size = 71286, upload-time = "2025-06-09T22:54:54.369Z" }, + { url = "https://files.pythonhosted.org/packages/61/99/d606cb7986b60d89c36de8a85d58764323b3a5ff07770a99d8e993b3fa73/propcache-0.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9ecb0aad4020e275652ba3975740f241bd12a61f1a784df044cf7477a02bc252", size = 42425, upload-time = "2025-06-09T22:54:55.642Z" }, + { url = "https://files.pythonhosted.org/packages/8c/96/ef98f91bbb42b79e9bb82bdd348b255eb9d65f14dbbe3b1594644c4073f7/propcache-0.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7f08f1cc28bd2eade7a8a3d2954ccc673bb02062e3e7da09bc75d843386b342f", size = 41846, upload-time = "2025-06-09T22:54:57.246Z" }, + { url = "https://files.pythonhosted.org/packages/5b/ad/3f0f9a705fb630d175146cd7b1d2bf5555c9beaed54e94132b21aac098a6/propcache-0.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1a342c834734edb4be5ecb1e9fb48cb64b1e2320fccbd8c54bf8da8f2a84c33", size = 208871, upload-time = "2025-06-09T22:54:58.975Z" }, + { url = "https://files.pythonhosted.org/packages/3a/38/2085cda93d2c8b6ec3e92af2c89489a36a5886b712a34ab25de9fbca7992/propcache-0.3.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a544caaae1ac73f1fecfae70ded3e93728831affebd017d53449e3ac052ac1e", size = 215720, upload-time = "2025-06-09T22:55:00.471Z" }, + { url = "https://files.pythonhosted.org/packages/61/c1/d72ea2dc83ac7f2c8e182786ab0fc2c7bd123a1ff9b7975bee671866fe5f/propcache-0.3.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:310d11aa44635298397db47a3ebce7db99a4cc4b9bbdfcf6c98a60c8d5261cf1", size = 215203, upload-time = "2025-06-09T22:55:01.834Z" }, + { url = "https://files.pythonhosted.org/packages/af/81/b324c44ae60c56ef12007105f1460d5c304b0626ab0cc6b07c8f2a9aa0b8/propcache-0.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c1396592321ac83157ac03a2023aa6cc4a3cc3cfdecb71090054c09e5a7cce3", size = 206365, upload-time = "2025-06-09T22:55:03.199Z" }, + { url = "https://files.pythonhosted.org/packages/09/73/88549128bb89e66d2aff242488f62869014ae092db63ccea53c1cc75a81d/propcache-0.3.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cabf5b5902272565e78197edb682017d21cf3b550ba0460ee473753f28d23c1", size = 196016, upload-time = "2025-06-09T22:55:04.518Z" }, + { url = "https://files.pythonhosted.org/packages/b9/3f/3bdd14e737d145114a5eb83cb172903afba7242f67c5877f9909a20d948d/propcache-0.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0a2f2235ac46a7aa25bdeb03a9e7060f6ecbd213b1f9101c43b3090ffb971ef6", size = 205596, upload-time = "2025-06-09T22:55:05.942Z" }, + { url = "https://files.pythonhosted.org/packages/0f/ca/2f4aa819c357d3107c3763d7ef42c03980f9ed5c48c82e01e25945d437c1/propcache-0.3.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:92b69e12e34869a6970fd2f3da91669899994b47c98f5d430b781c26f1d9f387", size = 200977, upload-time = "2025-06-09T22:55:07.792Z" }, + { url = "https://files.pythonhosted.org/packages/cd/4a/e65276c7477533c59085251ae88505caf6831c0e85ff8b2e31ebcbb949b1/propcache-0.3.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:54e02207c79968ebbdffc169591009f4474dde3b4679e16634d34c9363ff56b4", size = 197220, upload-time = "2025-06-09T22:55:09.173Z" }, + { url = "https://files.pythonhosted.org/packages/7c/54/fc7152e517cf5578278b242396ce4d4b36795423988ef39bb8cd5bf274c8/propcache-0.3.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4adfb44cb588001f68c5466579d3f1157ca07f7504fc91ec87862e2b8e556b88", size = 210642, upload-time = "2025-06-09T22:55:10.62Z" }, + { url = "https://files.pythonhosted.org/packages/b9/80/abeb4a896d2767bf5f1ea7b92eb7be6a5330645bd7fb844049c0e4045d9d/propcache-0.3.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fd3e6019dc1261cd0291ee8919dd91fbab7b169bb76aeef6c716833a3f65d206", size = 212789, upload-time = "2025-06-09T22:55:12.029Z" }, + { url = "https://files.pythonhosted.org/packages/b3/db/ea12a49aa7b2b6d68a5da8293dcf50068d48d088100ac016ad92a6a780e6/propcache-0.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4c181cad81158d71c41a2bce88edce078458e2dd5ffee7eddd6b05da85079f43", size = 205880, upload-time = "2025-06-09T22:55:13.45Z" }, + { url = "https://files.pythonhosted.org/packages/d1/e5/9076a0bbbfb65d1198007059c65639dfd56266cf8e477a9707e4b1999ff4/propcache-0.3.2-cp313-cp313-win32.whl", hash = "sha256:8a08154613f2249519e549de2330cf8e2071c2887309a7b07fb56098f5170a02", size = 37220, upload-time = "2025-06-09T22:55:15.284Z" }, + { url = "https://files.pythonhosted.org/packages/d3/f5/b369e026b09a26cd77aa88d8fffd69141d2ae00a2abaaf5380d2603f4b7f/propcache-0.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:e41671f1594fc4ab0a6dec1351864713cb3a279910ae8b58f884a88a0a632c05", size = 40678, upload-time = "2025-06-09T22:55:16.445Z" }, + { url = "https://files.pythonhosted.org/packages/a4/3a/6ece377b55544941a08d03581c7bc400a3c8cd3c2865900a68d5de79e21f/propcache-0.3.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:9a3cf035bbaf035f109987d9d55dc90e4b0e36e04bbbb95af3055ef17194057b", size = 76560, upload-time = "2025-06-09T22:55:17.598Z" }, + { url = "https://files.pythonhosted.org/packages/0c/da/64a2bb16418740fa634b0e9c3d29edff1db07f56d3546ca2d86ddf0305e1/propcache-0.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:156c03d07dc1323d8dacaa221fbe028c5c70d16709cdd63502778e6c3ccca1b0", size = 44676, upload-time = "2025-06-09T22:55:18.922Z" }, + { url = "https://files.pythonhosted.org/packages/36/7b/f025e06ea51cb72c52fb87e9b395cced02786610b60a3ed51da8af017170/propcache-0.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:74413c0ba02ba86f55cf60d18daab219f7e531620c15f1e23d95563f505efe7e", size = 44701, upload-time = "2025-06-09T22:55:20.106Z" }, + { url = "https://files.pythonhosted.org/packages/a4/00/faa1b1b7c3b74fc277f8642f32a4c72ba1d7b2de36d7cdfb676db7f4303e/propcache-0.3.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f066b437bb3fa39c58ff97ab2ca351db465157d68ed0440abecb21715eb24b28", size = 276934, upload-time = "2025-06-09T22:55:21.5Z" }, + { url = "https://files.pythonhosted.org/packages/74/ab/935beb6f1756e0476a4d5938ff44bf0d13a055fed880caf93859b4f1baf4/propcache-0.3.2-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1304b085c83067914721e7e9d9917d41ad87696bf70f0bc7dee450e9c71ad0a", size = 278316, upload-time = "2025-06-09T22:55:22.918Z" }, + { url = "https://files.pythonhosted.org/packages/f8/9d/994a5c1ce4389610838d1caec74bdf0e98b306c70314d46dbe4fcf21a3e2/propcache-0.3.2-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ab50cef01b372763a13333b4e54021bdcb291fc9a8e2ccb9c2df98be51bcde6c", size = 282619, upload-time = "2025-06-09T22:55:24.651Z" }, + { url = "https://files.pythonhosted.org/packages/2b/00/a10afce3d1ed0287cef2e09506d3be9822513f2c1e96457ee369adb9a6cd/propcache-0.3.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fad3b2a085ec259ad2c2842666b2a0a49dea8463579c606426128925af1ed725", size = 265896, upload-time = "2025-06-09T22:55:26.049Z" }, + { url = "https://files.pythonhosted.org/packages/2e/a8/2aa6716ffa566ca57c749edb909ad27884680887d68517e4be41b02299f3/propcache-0.3.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:261fa020c1c14deafd54c76b014956e2f86991af198c51139faf41c4d5e83892", size = 252111, upload-time = "2025-06-09T22:55:27.381Z" }, + { url = "https://files.pythonhosted.org/packages/36/4f/345ca9183b85ac29c8694b0941f7484bf419c7f0fea2d1e386b4f7893eed/propcache-0.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:46d7f8aa79c927e5f987ee3a80205c987717d3659f035c85cf0c3680526bdb44", size = 268334, upload-time = "2025-06-09T22:55:28.747Z" }, + { url = "https://files.pythonhosted.org/packages/3e/ca/fcd54f78b59e3f97b3b9715501e3147f5340167733d27db423aa321e7148/propcache-0.3.2-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:6d8f3f0eebf73e3c0ff0e7853f68be638b4043c65a70517bb575eff54edd8dbe", size = 255026, upload-time = "2025-06-09T22:55:30.184Z" }, + { url = "https://files.pythonhosted.org/packages/8b/95/8e6a6bbbd78ac89c30c225210a5c687790e532ba4088afb8c0445b77ef37/propcache-0.3.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:03c89c1b14a5452cf15403e291c0ccd7751d5b9736ecb2c5bab977ad6c5bcd81", size = 250724, upload-time = "2025-06-09T22:55:31.646Z" }, + { url = "https://files.pythonhosted.org/packages/ee/b0/0dd03616142baba28e8b2d14ce5df6631b4673850a3d4f9c0f9dd714a404/propcache-0.3.2-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:0cc17efde71e12bbaad086d679ce575268d70bc123a5a71ea7ad76f70ba30bba", size = 268868, upload-time = "2025-06-09T22:55:33.209Z" }, + { url = "https://files.pythonhosted.org/packages/c5/98/2c12407a7e4fbacd94ddd32f3b1e3d5231e77c30ef7162b12a60e2dd5ce3/propcache-0.3.2-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:acdf05d00696bc0447e278bb53cb04ca72354e562cf88ea6f9107df8e7fd9770", size = 271322, upload-time = "2025-06-09T22:55:35.065Z" }, + { url = "https://files.pythonhosted.org/packages/35/91/9cb56efbb428b006bb85db28591e40b7736847b8331d43fe335acf95f6c8/propcache-0.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4445542398bd0b5d32df908031cb1b30d43ac848e20470a878b770ec2dcc6330", size = 265778, upload-time = "2025-06-09T22:55:36.45Z" }, + { url = "https://files.pythonhosted.org/packages/9a/4c/b0fe775a2bdd01e176b14b574be679d84fc83958335790f7c9a686c1f468/propcache-0.3.2-cp313-cp313t-win32.whl", hash = "sha256:f86e5d7cd03afb3a1db8e9f9f6eff15794e79e791350ac48a8c924e6f439f394", size = 41175, upload-time = "2025-06-09T22:55:38.436Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ff/47f08595e3d9b5e149c150f88d9714574f1a7cbd89fe2817158a952674bf/propcache-0.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:9704bedf6e7cbe3c65eca4379a9b53ee6a83749f047808cbb5044d40d7d72198", size = 44857, upload-time = "2025-06-09T22:55:39.687Z" }, + { url = "https://files.pythonhosted.org/packages/cc/35/cc0aaecf278bb4575b8555f2b137de5ab821595ddae9da9d3cd1da4072c7/propcache-0.3.2-py3-none-any.whl", hash = "sha256:98f1ec44fb675f5052cccc8e609c46ed23a35a1cfd18545ad4e29002d858a43f", size = 12663, upload-time = "2025-06-09T22:56:04.484Z" }, +] + +[[package]] +name = "proto-plus" +version = "1.26.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f4/ac/87285f15f7cce6d4a008f33f1757fb5a13611ea8914eb58c3d0d26243468/proto_plus-1.26.1.tar.gz", hash = "sha256:21a515a4c4c0088a773899e23c7bbade3d18f9c66c73edd4c7ee3816bc96a012", size = 56142, upload-time = "2025-03-10T15:54:38.843Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4e/6d/280c4c2ce28b1593a19ad5239c8b826871fc6ec275c21afc8e1820108039/proto_plus-1.26.1-py3-none-any.whl", hash = "sha256:13285478c2dcf2abb829db158e1047e2f1e8d63a077d94263c2b88b043c75a66", size = 50163, upload-time = "2025-03-10T15:54:37.335Z" }, +] + +[[package]] +name = "protobuf" +version = "5.29.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/29/d09e70352e4e88c9c7a198d5645d7277811448d76c23b00345670f7c8a38/protobuf-5.29.5.tar.gz", hash = "sha256:bc1463bafd4b0929216c35f437a8e28731a2b7fe3d98bb77a600efced5a15c84", size = 425226, upload-time = "2025-05-28T23:51:59.82Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/11/6e40e9fc5bba02988a214c07cf324595789ca7820160bfd1f8be96e48539/protobuf-5.29.5-cp310-abi3-win32.whl", hash = "sha256:3f1c6468a2cfd102ff4703976138844f78ebd1fb45f49011afc5139e9e283079", size = 422963, upload-time = "2025-05-28T23:51:41.204Z" }, + { url = "https://files.pythonhosted.org/packages/81/7f/73cefb093e1a2a7c3ffd839e6f9fcafb7a427d300c7f8aef9c64405d8ac6/protobuf-5.29.5-cp310-abi3-win_amd64.whl", hash = "sha256:3f76e3a3675b4a4d867b52e4a5f5b78a2ef9565549d4037e06cf7b0942b1d3fc", size = 434818, upload-time = "2025-05-28T23:51:44.297Z" }, + { url = "https://files.pythonhosted.org/packages/dd/73/10e1661c21f139f2c6ad9b23040ff36fee624310dc28fba20d33fdae124c/protobuf-5.29.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e38c5add5a311f2a6eb0340716ef9b039c1dfa428b28f25a7838ac329204a671", size = 418091, upload-time = "2025-05-28T23:51:45.907Z" }, + { url = "https://files.pythonhosted.org/packages/6c/04/98f6f8cf5b07ab1294c13f34b4e69b3722bb609c5b701d6c169828f9f8aa/protobuf-5.29.5-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:fa18533a299d7ab6c55a238bf8629311439995f2e7eca5caaff08663606e9015", size = 319824, upload-time = "2025-05-28T23:51:47.545Z" }, + { url = "https://files.pythonhosted.org/packages/85/e4/07c80521879c2d15f321465ac24c70efe2381378c00bf5e56a0f4fbac8cd/protobuf-5.29.5-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:63848923da3325e1bf7e9003d680ce6e14b07e55d0473253a690c3a8b8fd6e61", size = 319942, upload-time = "2025-05-28T23:51:49.11Z" }, + { url = "https://files.pythonhosted.org/packages/7e/cc/7e77861000a0691aeea8f4566e5d3aa716f2b1dece4a24439437e41d3d25/protobuf-5.29.5-py3-none-any.whl", hash = "sha256:6cf42630262c59b2d8de33954443d94b746c952b01434fc58a417fdbd2e84bd5", size = 172823, upload-time = "2025-05-28T23:51:58.157Z" }, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" }, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892, upload-time = "2025-03-28T02:41:22.17Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259, upload-time = "2025-03-28T02:41:19.028Z" }, +] + [[package]] name = "pydantic" version = "2.11.7" @@ -366,6 +812,29 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/6f/9a/e73262f6c6656262b5fdd723ad90f518f579b7bc8622e43a942eec53c938/pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9", size = 1935777, upload-time = "2025-04-23T18:32:25.088Z" }, ] +[[package]] +name = "pydantic-settings" +version = "2.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "python-dotenv" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/67/1d/42628a2c33e93f8e9acbde0d5d735fa0850f3e6a2f8cb1eb6c40b9a732ac/pydantic_settings-2.9.1.tar.gz", hash = "sha256:c509bf79d27563add44e8446233359004ed85066cd096d8b510f715e6ef5d268", size = 163234, upload-time = "2025-04-18T16:44:48.265Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b6/5f/d6d641b490fd3ec2c4c13b4244d68deea3a1b970a97be64f34fb5504ff72/pydantic_settings-2.9.1-py3-none-any.whl", hash = "sha256:59b4f431b1defb26fe620c71a7d3968a710d719f5f4cdbbdb7926edeb770f6ef", size = 44356, upload-time = "2025-04-18T16:44:46.617Z" }, +] + +[[package]] +name = "pyparsing" +version = "3.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/22/f1129e69d94ffff626bdb5c835506b3a5b4f3d070f17ea295e12c2c6f60f/pyparsing-3.2.3.tar.gz", hash = "sha256:b9c13f1ab8b3b542f72e28f634bad4de758ab3ce4546e4301970ad6fa77c38be", size = 1088608, upload-time = "2025-03-25T05:01:28.114Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/e7/df2285f3d08fee213f2d041540fa4fc9ca6c2d44cf36d3a035bf2a8d2bcc/pyparsing-3.2.3-py3-none-any.whl", hash = "sha256:a749938e02d6fd0b59b356ca504a24982314bb090c383e3cf201c95ef7e2bfcf", size = 111120, upload-time = "2025-03-25T05:01:24.908Z" }, +] + [[package]] name = "pyreadline3" version = "3.5.4" @@ -405,6 +874,33 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00", size = 509225, upload-time = "2025-03-25T02:24:58.468Z" }, ] +[[package]] +name = "requests" +version = "2.32.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e1/0a/929373653770d8a0d7ea76c37de6e41f11eb07559b103b1c02cafb3f7cf8/requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422", size = 135258, upload-time = "2025-06-09T16:43:07.34Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/e4/56027c4a6b4ae70ca9de302488c5ca95ad4a39e190093d6c1a8ace08341b/requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c", size = 64847, upload-time = "2025-06-09T16:43:05.728Z" }, +] + +[[package]] +name = "rsa" +version = "4.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/da/8a/22b7beea3ee0d44b1916c0c1cb0ee3af23b700b6da9f04991899d0c555d4/rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75", size = 29034, upload-time = "2025-04-16T09:51:18.218Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762", size = 34696, upload-time = "2025-04-16T09:51:17.142Z" }, +] + [[package]] name = "six" version = "1.17.0" @@ -492,3 +988,78 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d7/0c/9837fece153051e19c7bade9f88f9b409e026b9525927824cdf16293b43b/ujson-5.10.0-cp313-cp313-win32.whl", hash = "sha256:4c4fc16f11ac1612f05b6f5781b384716719547e142cfd67b65d035bd85af165", size = 38766, upload-time = "2024-05-14T02:01:32.856Z" }, { url = "https://files.pythonhosted.org/packages/d7/72/6cb6728e2738c05bbe9bd522d6fc79f86b9a28402f38663e85a28fddd4a0/ujson-5.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:4573fd1695932d4f619928fd09d5d03d917274381649ade4328091ceca175539", size = 42212, upload-time = "2024-05-14T02:01:33.97Z" }, ] + +[[package]] +name = "uritemplate" +version = "4.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/98/60/f174043244c5306c9988380d2cb10009f91563fc4b31293d27e17201af56/uritemplate-4.2.0.tar.gz", hash = "sha256:480c2ed180878955863323eea31b0ede668795de182617fef9c6ca09e6ec9d0e", size = 33267, upload-time = "2025-06-02T15:12:06.318Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a9/99/3ae339466c9183ea5b8ae87b34c0b897eda475d2aec2307cae60e5cd4f29/uritemplate-4.2.0-py3-none-any.whl", hash = "sha256:962201ba1c4edcab02e60f9a0d3821e82dfc5d2d6662a21abd533879bdb8a686", size = 11488, upload-time = "2025-06-02T15:12:03.405Z" }, +] + +[[package]] +name = "urllib3" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8a/78/16493d9c386d8e60e442a35feac5e00f0913c0f4b7c217c11e8ec2ff53e0/urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466", size = 390672, upload-time = "2025-04-10T15:23:39.232Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6b/11/cc635220681e93a0183390e26485430ca2c7b5f9d33b15c74c2861cb8091/urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813", size = 128680, upload-time = "2025-04-10T15:23:37.377Z" }, +] + +[[package]] +name = "win32-setctime" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b3/8f/705086c9d734d3b663af0e9bb3d4de6578d08f46b1b101c2442fd9aecaa2/win32_setctime-1.2.0.tar.gz", hash = "sha256:ae1fdf948f5640aae05c511ade119313fb6a30d7eabe25fef9764dca5873c4c0", size = 4867, upload-time = "2024-12-07T15:28:28.314Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e1/07/c6fe3ad3e685340704d314d765b7912993bcb8dc198f0e7a89382d37974b/win32_setctime-1.2.0-py3-none-any.whl", hash = "sha256:95d644c4e708aba81dc3704a116d8cbc974d70b3bdb8be1d150e36be6e9d1390", size = 4083, upload-time = "2024-12-07T15:28:26.465Z" }, +] + +[[package]] +name = "yarl" +version = "1.20.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "multidict" }, + { name = "propcache" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3c/fb/efaa23fa4e45537b827620f04cf8f3cd658b76642205162e072703a5b963/yarl-1.20.1.tar.gz", hash = "sha256:d017a4997ee50c91fd5466cef416231bb82177b93b029906cefc542ce14c35ac", size = 186428, upload-time = "2025-06-10T00:46:09.923Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/e1/2411b6d7f769a07687acee88a062af5833cf1966b7266f3d8dfb3d3dc7d3/yarl-1.20.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:0b5ff0fbb7c9f1b1b5ab53330acbfc5247893069e7716840c8e7d5bb7355038a", size = 131811, upload-time = "2025-06-10T00:44:18.933Z" }, + { url = "https://files.pythonhosted.org/packages/b2/27/584394e1cb76fb771371770eccad35de400e7b434ce3142c2dd27392c968/yarl-1.20.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:14f326acd845c2b2e2eb38fb1346c94f7f3b01a4f5c788f8144f9b630bfff9a3", size = 90078, upload-time = "2025-06-10T00:44:20.635Z" }, + { url = "https://files.pythonhosted.org/packages/bf/9a/3246ae92d4049099f52d9b0fe3486e3b500e29b7ea872d0f152966fc209d/yarl-1.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f60e4ad5db23f0b96e49c018596707c3ae89f5d0bd97f0ad3684bcbad899f1e7", size = 88748, upload-time = "2025-06-10T00:44:22.34Z" }, + { url = "https://files.pythonhosted.org/packages/a3/25/35afe384e31115a1a801fbcf84012d7a066d89035befae7c5d4284df1e03/yarl-1.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49bdd1b8e00ce57e68ba51916e4bb04461746e794e7c4d4bbc42ba2f18297691", size = 349595, upload-time = "2025-06-10T00:44:24.314Z" }, + { url = "https://files.pythonhosted.org/packages/28/2d/8aca6cb2cabc8f12efcb82749b9cefecbccfc7b0384e56cd71058ccee433/yarl-1.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:66252d780b45189975abfed839616e8fd2dbacbdc262105ad7742c6ae58f3e31", size = 342616, upload-time = "2025-06-10T00:44:26.167Z" }, + { url = "https://files.pythonhosted.org/packages/0b/e9/1312633d16b31acf0098d30440ca855e3492d66623dafb8e25b03d00c3da/yarl-1.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59174e7332f5d153d8f7452a102b103e2e74035ad085f404df2e40e663a22b28", size = 361324, upload-time = "2025-06-10T00:44:27.915Z" }, + { url = "https://files.pythonhosted.org/packages/bc/a0/688cc99463f12f7669eec7c8acc71ef56a1521b99eab7cd3abb75af887b0/yarl-1.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e3968ec7d92a0c0f9ac34d5ecfd03869ec0cab0697c91a45db3fbbd95fe1b653", size = 359676, upload-time = "2025-06-10T00:44:30.041Z" }, + { url = "https://files.pythonhosted.org/packages/af/44/46407d7f7a56e9a85a4c207724c9f2c545c060380718eea9088f222ba697/yarl-1.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1a4fbb50e14396ba3d375f68bfe02215d8e7bc3ec49da8341fe3157f59d2ff5", size = 352614, upload-time = "2025-06-10T00:44:32.171Z" }, + { url = "https://files.pythonhosted.org/packages/b1/91/31163295e82b8d5485d31d9cf7754d973d41915cadce070491778d9c9825/yarl-1.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11a62c839c3a8eac2410e951301309426f368388ff2f33799052787035793b02", size = 336766, upload-time = "2025-06-10T00:44:34.494Z" }, + { url = "https://files.pythonhosted.org/packages/b4/8e/c41a5bc482121f51c083c4c2bcd16b9e01e1cf8729e380273a952513a21f/yarl-1.20.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:041eaa14f73ff5a8986b4388ac6bb43a77f2ea09bf1913df7a35d4646db69e53", size = 364615, upload-time = "2025-06-10T00:44:36.856Z" }, + { url = "https://files.pythonhosted.org/packages/e3/5b/61a3b054238d33d70ea06ebba7e58597891b71c699e247df35cc984ab393/yarl-1.20.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:377fae2fef158e8fd9d60b4c8751387b8d1fb121d3d0b8e9b0be07d1b41e83dc", size = 360982, upload-time = "2025-06-10T00:44:39.141Z" }, + { url = "https://files.pythonhosted.org/packages/df/a3/6a72fb83f8d478cb201d14927bc8040af901811a88e0ff2da7842dd0ed19/yarl-1.20.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1c92f4390e407513f619d49319023664643d3339bd5e5a56a3bebe01bc67ec04", size = 369792, upload-time = "2025-06-10T00:44:40.934Z" }, + { url = "https://files.pythonhosted.org/packages/7c/af/4cc3c36dfc7c077f8dedb561eb21f69e1e9f2456b91b593882b0b18c19dc/yarl-1.20.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d25ddcf954df1754ab0f86bb696af765c5bfaba39b74095f27eececa049ef9a4", size = 382049, upload-time = "2025-06-10T00:44:42.854Z" }, + { url = "https://files.pythonhosted.org/packages/19/3a/e54e2c4752160115183a66dc9ee75a153f81f3ab2ba4bf79c3c53b33de34/yarl-1.20.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:909313577e9619dcff8c31a0ea2aa0a2a828341d92673015456b3ae492e7317b", size = 384774, upload-time = "2025-06-10T00:44:45.275Z" }, + { url = "https://files.pythonhosted.org/packages/9c/20/200ae86dabfca89060ec6447649f219b4cbd94531e425e50d57e5f5ac330/yarl-1.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:793fd0580cb9664548c6b83c63b43c477212c0260891ddf86809e1c06c8b08f1", size = 374252, upload-time = "2025-06-10T00:44:47.31Z" }, + { url = "https://files.pythonhosted.org/packages/83/75/11ee332f2f516b3d094e89448da73d557687f7d137d5a0f48c40ff211487/yarl-1.20.1-cp313-cp313-win32.whl", hash = "sha256:468f6e40285de5a5b3c44981ca3a319a4b208ccc07d526b20b12aeedcfa654b7", size = 81198, upload-time = "2025-06-10T00:44:49.164Z" }, + { url = "https://files.pythonhosted.org/packages/ba/ba/39b1ecbf51620b40ab402b0fc817f0ff750f6d92712b44689c2c215be89d/yarl-1.20.1-cp313-cp313-win_amd64.whl", hash = "sha256:495b4ef2fea40596bfc0affe3837411d6aa3371abcf31aac0ccc4bdd64d4ef5c", size = 86346, upload-time = "2025-06-10T00:44:51.182Z" }, + { url = "https://files.pythonhosted.org/packages/43/c7/669c52519dca4c95153c8ad96dd123c79f354a376346b198f438e56ffeb4/yarl-1.20.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:f60233b98423aab21d249a30eb27c389c14929f47be8430efa7dbd91493a729d", size = 138826, upload-time = "2025-06-10T00:44:52.883Z" }, + { url = "https://files.pythonhosted.org/packages/6a/42/fc0053719b44f6ad04a75d7f05e0e9674d45ef62f2d9ad2c1163e5c05827/yarl-1.20.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:6f3eff4cc3f03d650d8755c6eefc844edde99d641d0dcf4da3ab27141a5f8ddf", size = 93217, upload-time = "2025-06-10T00:44:54.658Z" }, + { url = "https://files.pythonhosted.org/packages/4f/7f/fa59c4c27e2a076bba0d959386e26eba77eb52ea4a0aac48e3515c186b4c/yarl-1.20.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:69ff8439d8ba832d6bed88af2c2b3445977eba9a4588b787b32945871c2444e3", size = 92700, upload-time = "2025-06-10T00:44:56.784Z" }, + { url = "https://files.pythonhosted.org/packages/2f/d4/062b2f48e7c93481e88eff97a6312dca15ea200e959f23e96d8ab898c5b8/yarl-1.20.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cf34efa60eb81dd2645a2e13e00bb98b76c35ab5061a3989c7a70f78c85006d", size = 347644, upload-time = "2025-06-10T00:44:59.071Z" }, + { url = "https://files.pythonhosted.org/packages/89/47/78b7f40d13c8f62b499cc702fdf69e090455518ae544c00a3bf4afc9fc77/yarl-1.20.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:8e0fe9364ad0fddab2688ce72cb7a8e61ea42eff3c7caeeb83874a5d479c896c", size = 323452, upload-time = "2025-06-10T00:45:01.605Z" }, + { url = "https://files.pythonhosted.org/packages/eb/2b/490d3b2dc66f52987d4ee0d3090a147ea67732ce6b4d61e362c1846d0d32/yarl-1.20.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f64fbf81878ba914562c672024089e3401974a39767747691c65080a67b18c1", size = 346378, upload-time = "2025-06-10T00:45:03.946Z" }, + { url = "https://files.pythonhosted.org/packages/66/ad/775da9c8a94ce925d1537f939a4f17d782efef1f973039d821cbe4bcc211/yarl-1.20.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6342d643bf9a1de97e512e45e4b9560a043347e779a173250824f8b254bd5ce", size = 353261, upload-time = "2025-06-10T00:45:05.992Z" }, + { url = "https://files.pythonhosted.org/packages/4b/23/0ed0922b47a4f5c6eb9065d5ff1e459747226ddce5c6a4c111e728c9f701/yarl-1.20.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56dac5f452ed25eef0f6e3c6a066c6ab68971d96a9fb441791cad0efba6140d3", size = 335987, upload-time = "2025-06-10T00:45:08.227Z" }, + { url = "https://files.pythonhosted.org/packages/3e/49/bc728a7fe7d0e9336e2b78f0958a2d6b288ba89f25a1762407a222bf53c3/yarl-1.20.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7d7f497126d65e2cad8dc5f97d34c27b19199b6414a40cb36b52f41b79014be", size = 329361, upload-time = "2025-06-10T00:45:10.11Z" }, + { url = "https://files.pythonhosted.org/packages/93/8f/b811b9d1f617c83c907e7082a76e2b92b655400e61730cd61a1f67178393/yarl-1.20.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:67e708dfb8e78d8a19169818eeb5c7a80717562de9051bf2413aca8e3696bf16", size = 346460, upload-time = "2025-06-10T00:45:12.055Z" }, + { url = "https://files.pythonhosted.org/packages/70/fd/af94f04f275f95da2c3b8b5e1d49e3e79f1ed8b6ceb0f1664cbd902773ff/yarl-1.20.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:595c07bc79af2494365cc96ddeb772f76272364ef7c80fb892ef9d0649586513", size = 334486, upload-time = "2025-06-10T00:45:13.995Z" }, + { url = "https://files.pythonhosted.org/packages/84/65/04c62e82704e7dd0a9b3f61dbaa8447f8507655fd16c51da0637b39b2910/yarl-1.20.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7bdd2f80f4a7df852ab9ab49484a4dee8030023aa536df41f2d922fd57bf023f", size = 342219, upload-time = "2025-06-10T00:45:16.479Z" }, + { url = "https://files.pythonhosted.org/packages/91/95/459ca62eb958381b342d94ab9a4b6aec1ddec1f7057c487e926f03c06d30/yarl-1.20.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:c03bfebc4ae8d862f853a9757199677ab74ec25424d0ebd68a0027e9c639a390", size = 350693, upload-time = "2025-06-10T00:45:18.399Z" }, + { url = "https://files.pythonhosted.org/packages/a6/00/d393e82dd955ad20617abc546a8f1aee40534d599ff555ea053d0ec9bf03/yarl-1.20.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:344d1103e9c1523f32a5ed704d576172d2cabed3122ea90b1d4e11fe17c66458", size = 355803, upload-time = "2025-06-10T00:45:20.677Z" }, + { url = "https://files.pythonhosted.org/packages/9e/ed/c5fb04869b99b717985e244fd93029c7a8e8febdfcffa06093e32d7d44e7/yarl-1.20.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:88cab98aa4e13e1ade8c141daeedd300a4603b7132819c484841bb7af3edce9e", size = 341709, upload-time = "2025-06-10T00:45:23.221Z" }, + { url = "https://files.pythonhosted.org/packages/24/fd/725b8e73ac2a50e78a4534ac43c6addf5c1c2d65380dd48a9169cc6739a9/yarl-1.20.1-cp313-cp313t-win32.whl", hash = "sha256:b121ff6a7cbd4abc28985b6028235491941b9fe8fe226e6fdc539c977ea1739d", size = 86591, upload-time = "2025-06-10T00:45:25.793Z" }, + { url = "https://files.pythonhosted.org/packages/94/c3/b2e9f38bc3e11191981d57ea08cab2166e74ea770024a646617c9cddd9f6/yarl-1.20.1-cp313-cp313t-win_amd64.whl", hash = "sha256:541d050a355bbbc27e55d906bc91cb6fe42f96c01413dd0f4ed5a5240513874f", size = 93003, upload-time = "2025-06-10T00:45:27.752Z" }, + { url = "https://files.pythonhosted.org/packages/b4/2d/2345fce04cfd4bee161bf1e7d9cdc702e3e16109021035dbb24db654a622/yarl-1.20.1-py3-none-any.whl", hash = "sha256:83b8eb083fe4683c6115795d9fc1cfaf2cbbefb19b3a1cb68f6527460f483a77", size = 46542, upload-time = "2025-06-10T00:46:07.521Z" }, +] diff --git a/bot_client/websocket_client_example.py b/bot_client/websocket_client_example.py index 860df5f..113f227 100644 --- a/bot_client/websocket_client_example.py +++ b/bot_client/websocket_client_example.py @@ -6,13 +6,9 @@ and interacting with a Diplomacy server via WebSocket. """ import asyncio -import logging +from loguru import logger from websocket_diplomacy_client import connect_to_diplomacy_server -# Set up logging -logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') -logger = logging.getLogger(__name__) - async def basic_client_example(): """ @@ -25,7 +21,7 @@ async def basic_client_example(): hostname="localhost", port=8432, username="test_player", - password="test_password" + password="test_password", ) logger.info("Connected successfully!") @@ -34,8 +30,10 @@ async def basic_client_example(): games = await client.list_games() logger.info(f"Found {len(games)} games:") for game in games: - logger.info(f" Game {game.get('game_id', 'unknown')}: {game.get('status', 'unknown')} " - f"({game.get('n_players', 0)}/{game.get('n_controls', 0)} players)") + logger.info( + f" Game {game.get('game_id', 'unknown')}: {game.get('status', 'unknown')} " + f"({game.get('n_players', 0)}/{game.get('n_controls', 0)} players)" + ) # Get available maps logger.info("Getting available maps...") @@ -49,7 +47,7 @@ async def basic_client_example(): rules=["NO_PRESS", "IGNORE_ERRORS", "POWER_CHOICE"], power_name="FRANCE", # Control France n_controls=1, # Only need 1 player to start (for testing) - deadline=None # No time pressure + deadline=None, # No time pressure ) logger.info(f"Created game {client.game_id} as {client.power_name}") @@ -62,21 +60,21 @@ async def basic_client_example(): # Get possible orders logger.info("Getting possible orders for France...") possible_orders = client.get_all_possible_orders() - france_orders = possible_orders.get('FRANCE', []) + france_orders = possible_orders.get("FRANCE", []) logger.info(f"France can make {len(france_orders)} possible orders") if france_orders: logger.info(f"First few orders: {france_orders[:5]}") # Submit some orders (example: hold all units) logger.info("Submitting hold orders for all French units...") - units = client.get_units('FRANCE') + units = client.get_units("FRANCE") hold_orders = [] for unit in units: # Format: "A PAR H" means Army in Paris holds hold_orders.append(f"{unit} H") - + if hold_orders: - await client.set_orders('FRANCE', hold_orders) + await client.set_orders("FRANCE", hold_orders) logger.info(f"Submitted orders: {hold_orders}") # Try to process the game (might fail if we don't have admin rights) @@ -84,11 +82,13 @@ async def basic_client_example(): try: await client.process_game() logger.info("Game processed successfully") - + # Synchronize to get updated state await client.synchronize() - logger.info(f"After processing - Current phase: {client.get_current_phase()}") - + logger.info( + f"After processing - Current phase: {client.get_current_phase()}" + ) + except Exception as e: logger.warning(f"Could not process game (normal if not admin): {e}") @@ -101,7 +101,7 @@ async def basic_client_example(): logger.error(f"Error in example: {e}", exc_info=True) finally: # Clean up - if 'client' in locals(): + if "client" in locals(): await client.close() logger.info("Example completed") @@ -109,7 +109,7 @@ async def basic_client_example(): async def join_existing_game_example(game_id: str): """ Example showing how to join an existing game. - + Args: game_id: ID of the game to join """ @@ -119,7 +119,7 @@ async def join_existing_game_example(game_id: str): hostname="localhost", port=8432, username="test_player_2", - password="test_password" + password="test_password", ) # Join as an observer first @@ -133,13 +133,15 @@ async def join_existing_game_example(game_id: str): # List powers and their status for power_name, power in client.powers.items(): - logger.info(f"{power_name}: {len(power.centers)} centers, " - f"{len(power.units)} units, eliminated: {power.is_eliminated()}") + logger.info( + f"{power_name}: {len(power.centers)} centers, " + f"{len(power.units)} units, eliminated: {power.is_eliminated()}" + ) except Exception as e: logger.error(f"Error joining game: {e}", exc_info=True) finally: - if 'client' in locals(): + if "client" in locals(): await client.close() @@ -149,28 +151,31 @@ async def message_sending_example(): """ try: client = await connect_to_diplomacy_server() - + # Create a game with PRESS allowed game = await client.create_game( - rules=["IGNORE_ERRORS", "POWER_CHOICE"], # Remove NO_PRESS to allow messages + rules=[ + "IGNORE_ERRORS", + "POWER_CHOICE", + ], # Remove NO_PRESS to allow messages power_name="FRANCE", - n_controls=1 + n_controls=1, ) - + # Send a public message await client.send_message( sender="FRANCE", recipient="GLOBAL", - message="Greetings from France! Let's have a fair game." + message="Greetings from France! Let's have a fair game.", ) logger.info("Sent public message") - + # Send a private message (would need another power to be present) try: await client.send_message( sender="FRANCE", recipient="ENGLAND", - message="Hello England, shall we discuss an alliance?" + message="Hello England, shall we discuss an alliance?", ) logger.info("Sent private message to England") except Exception as e: @@ -179,17 +184,18 @@ async def message_sending_example(): except Exception as e: logger.error(f"Error in messaging example: {e}", exc_info=True) finally: - if 'client' in locals(): + if "client" in locals(): await client.close() if __name__ == "__main__": import sys - + if len(sys.argv) > 1: # Join existing game if game ID provided as argument game_id = sys.argv[1] asyncio.run(join_existing_game_example(game_id)) else: # Run basic example - asyncio.run(basic_client_example()) \ No newline at end of file + asyncio.run(basic_client_example()) + diff --git a/bot_client/websocket_diplomacy_client.py b/bot_client/websocket_diplomacy_client.py index 7f0e288..9dd452e 100644 --- a/bot_client/websocket_diplomacy_client.py +++ b/bot_client/websocket_diplomacy_client.py @@ -5,34 +5,30 @@ A simplified client wrapper for connecting to a Diplomacy server via WebSocket and playing games remotely, designed as a drop-in replacement for direct Game() usage. """ -import asyncio -import logging -import time from typing import Dict, List, Optional, Any -from tornado import gen +from diplomacy.engine.game import Game +from loguru import logger from diplomacy.client.connection import connect -from diplomacy.client.channel import Channel from diplomacy.client.network_game import NetworkGame -from diplomacy.communication import requests from diplomacy.engine.message import Message from diplomacy.utils.exceptions import DiplomacyException -logger = logging.getLogger(__name__) - class WebSocketDiplomacyClient: """ A WebSocket-based client for playing Diplomacy games on a remote server. - + This client provides a simplified interface similar to the local Game class but communicates with a remote server via WebSocket connections. """ - - def __init__(self, hostname: str = "localhost", port: int = 8432, use_ssl: bool = False): + + def __init__( + self, hostname: str = "localhost", port: int = 8432, use_ssl: bool = False + ): """ Initialize the WebSocket client. - + Args: hostname: Server hostname (default: localhost) port: Server port (default: 8432) @@ -41,47 +37,49 @@ class WebSocketDiplomacyClient: self.hostname = hostname self.port = port self.use_ssl = use_ssl - + + self.game: NetworkGame self.connection = None self.channel = None - self.game = None self.username = None self.token = None - + # Game state tracking self._game_id = None self._power_name = None self._game_role = None - + async def connect_and_authenticate(self, username: str, password: str) -> None: """ Connect to the server and authenticate. - + Args: username: Username for authentication password: Password for authentication """ logger.info(f"Connecting to {self.hostname}:{self.port}") self.connection = await connect(self.hostname, self.port) - + logger.info(f"Authenticating as {username}") self.channel = await self.connection.authenticate(username, password) self.username = username self.token = self.channel.token - + logger.info("Successfully connected and authenticated") - - async def create_game(self, - map_name: str = "standard", - rules: Optional[List[str]] = None, - game_id: Optional[str] = None, - power_name: Optional[str] = None, - n_controls: int = 7, - deadline: Optional[int] = None, - registration_password: Optional[str] = None) -> NetworkGame: + + async def create_game( + self, + map_name: str = "standard", + rules: Optional[List[str]] = None, + game_id: Optional[str] = None, + power_name: Optional[str] = None, + n_controls: int = 7, + deadline: Optional[int] = None, + registration_password: Optional[str] = None, + ) -> NetworkGame: """ Create a new game on the server. - + Args: map_name: Name of the map to use (default: "standard") rules: List of game rules (default: ["NO_PRESS", "IGNORE_ERRORS", "POWER_CHOICE"]) @@ -90,18 +88,18 @@ class WebSocketDiplomacyClient: n_controls: Number of controls required to start the game deadline: Game deadline in seconds registration_password: Password to protect the game - + Returns: NetworkGame object representing the created game """ if not self.channel: raise DiplomacyException("Must connect and authenticate first") - + if rules is None: rules = ["NO_PRESS", "IGNORE_ERRORS", "POWER_CHOICE"] - + logger.info(f"Creating game with map '{map_name}', rules: {rules}") - + self.game = await self.channel.create_game( map_name=map_name, rules=rules, @@ -109,94 +107,100 @@ class WebSocketDiplomacyClient: power_name=power_name, n_controls=n_controls, deadline=deadline, - registration_password=registration_password + registration_password=registration_password, ) - + self._game_id = self.game.game_id self._power_name = power_name self._game_role = power_name if power_name else "OMNISCIENT" - + logger.info(f"Created game {self._game_id} as {self._game_role}") return self.game - - async def join_game(self, - game_id: str, - power_name: Optional[str] = None, - registration_password: Optional[str] = None) -> NetworkGame: + + async def join_game( + self, + game_id: str, + power_name: Optional[str] = None, + registration_password: Optional[str] = None, + ) -> NetworkGame: """ Join an existing game. - + Args: game_id: ID of the game to join power_name: Power to control (None for observer) registration_password: Password if the game is protected - + Returns: NetworkGame object representing the joined game """ if not self.channel: raise DiplomacyException("Must connect and authenticate first") - + logger.info(f"Joining game {game_id} as {power_name or 'observer'}") - + self.game = await self.channel.join_game( game_id=game_id, power_name=power_name, - registration_password=registration_password + registration_password=registration_password, ) - + self._game_id = game_id self._power_name = power_name self._game_role = power_name if power_name else "OBSERVER" - + logger.info(f"Joined game {game_id} as {self._game_role}") return self.game - - async def list_games(self, - game_id_filter: Optional[str] = None, - map_name: Optional[str] = None, - status: Optional[str] = None, - include_protected: bool = False) -> List[Dict[str, Any]]: + + async def list_games( + self, + game_id_filter: Optional[str] = None, + map_name: Optional[str] = None, + status: Optional[str] = None, + include_protected: bool = False, + ) -> List[Dict[str, Any]]: """ List available games on the server. - + Args: game_id_filter: Filter by game ID substring map_name: Filter by map name status: Filter by game status include_protected: Include password-protected games - + Returns: List of game information dictionaries """ if not self.channel: raise DiplomacyException("Must connect and authenticate first") - + games = await self.channel.list_games( game_id=game_id_filter, map_name=map_name, status=status, - include_protected=include_protected + include_protected=include_protected, ) - + return games - + async def get_available_maps(self) -> Dict[str, Any]: """ Get available maps from the server. - + Returns: Dictionary of available maps and their properties """ if not self.channel: raise DiplomacyException("Must connect and authenticate first") - + return await self.channel.get_available_maps() - - async def set_orders(self, power_name: str, orders: List[str], wait: Optional[bool] = None) -> None: + + async def set_orders( + self, power_name: str, orders: List[str], wait: Optional[bool] = None + ) -> None: """ Set orders for a power. - + Args: power_name: Name of the power orders: List of order strings @@ -204,44 +208,46 @@ class WebSocketDiplomacyClient: """ if not self.game: raise DiplomacyException("Must join a game first") - + logger.debug(f"Setting orders for {power_name}: {orders}") await self.game.set_orders(orders=orders, wait=wait) - + async def clear_orders(self, power_name: str) -> None: """ Clear orders for a power. - + Args: power_name: Name of the power """ if not self.game: raise DiplomacyException("Must join a game first") - + logger.debug(f"Clearing orders for {power_name}") await self.game.clear_orders() - + async def set_wait_flag(self, power_name: str, wait: bool) -> None: """ Set the wait flag for a power. - + Args: power_name: Name of the power wait: Whether to wait for other players """ if not self.game: raise DiplomacyException("Must join a game first") - + logger.debug(f"Setting wait flag for {power_name}: {wait}") if wait: await self.game.wait() else: await self.game.no_wait() - - async def send_message(self, sender: str, recipient: str, message: str, phase: Optional[str] = None) -> None: + + async def send_message( + self, sender: str, recipient: str, message: str, phase: Optional[str] = None + ) -> None: """ Send a diplomatic message. - + Args: sender: Sending power name recipient: Receiving power name (or GLOBAL for public messages) @@ -250,167 +256,161 @@ class WebSocketDiplomacyClient: """ if not self.game: raise DiplomacyException("Must join a game first") - + if phase is None: phase = self.game.current_short_phase - - msg = Message( - sender=sender, - recipient=recipient, - message=message, - phase=phase - ) - + + msg = Message(sender=sender, recipient=recipient, message=message, phase=phase) + logger.debug(f"Sending message from {sender} to {recipient}: {message}") await self.game.send_game_message(message=msg) - + async def process_game(self) -> None: """ Force the game to process immediately (admin/moderator only). """ if not self.game: raise DiplomacyException("Must join a game first") - + logger.info("Processing game") await self.game.process() - + async def synchronize(self) -> None: """ Synchronize the local game state with the server. """ if not self.game: raise DiplomacyException("Must join a game first") - + logger.debug("Synchronizing game state") await self.game.synchronize() - - async def get_phase_history(self, - from_phase: Optional[str] = None, - to_phase: Optional[str] = None) -> List[Dict[str, Any]]: + + async def get_phase_history( + self, from_phase: Optional[str] = None, to_phase: Optional[str] = None + ) -> List[Dict[str, Any]]: """ Get phase history for the game. - + Args: from_phase: Starting phase (None for beginning) to_phase: Ending phase (None for current) - + Returns: List of phase data dictionaries """ if not self.game: raise DiplomacyException("Must join a game first") - + return await self.game.get_phase_history( - from_phase=from_phase, - to_phase=to_phase + from_phase=from_phase, to_phase=to_phase ) - + async def vote(self, power_name: str, vote: str) -> None: """ Submit a vote (e.g., for draw). - + Args: power_name: Name of the power voting vote: Vote value (e.g., "yes", "no") """ if not self.game: raise DiplomacyException("Must join a game first") - + logger.debug(f"Voting {vote} for {power_name}") await self.game.vote(vote=vote) - + def get_current_phase(self) -> str: """Get the current game phase.""" if not self.game: raise DiplomacyException("Must join a game first") return self.game.get_current_phase() - + def get_current_short_phase(self) -> str: """Get the current short phase name.""" if not self.game: raise DiplomacyException("Must join a game first") return self.game.current_short_phase - + def get_state(self) -> Dict[str, Any]: """Get the current game state.""" if not self.game: raise DiplomacyException("Must join a game first") return self.game.get_state() - + def get_power(self, power_name: str) -> Any: """Get power object by name.""" if not self.game: raise DiplomacyException("Must join a game first") return self.game.get_power(power_name) - + def get_orderable_locations(self, power_name: str) -> List[str]: """Get orderable locations for a power.""" if not self.game: raise DiplomacyException("Must join a game first") return self.game.get_orderable_locations(power_name) - + def get_all_possible_orders(self) -> Dict[str, List[str]]: """Get all possible orders for all powers.""" if not self.game: raise DiplomacyException("Must join a game first") return self.game.get_all_possible_orders() - + def get_units(self, power_name: str) -> List[str]: """Get units for a power.""" if not self.game: raise DiplomacyException("Must join a game first") return self.game.get_units(power_name) - + @property def is_game_done(self) -> bool: """Check if the game is done.""" if not self.game: return False return self.game.is_game_done - + @property def powers(self) -> Dict[str, Any]: """Get all powers in the game.""" if not self.game: raise DiplomacyException("Must join a game first") return self.game.powers - + @property def order_history(self) -> Dict[str, Dict[str, List[str]]]: """Get order history.""" if not self.game: raise DiplomacyException("Must join a game first") return self.game.order_history - + @property def result_history(self) -> Dict[str, Dict[str, List[str]]]: """Get result history.""" if not self.game: raise DiplomacyException("Must join a game first") return self.game.result_history - + @property def messages(self) -> Dict[int, Message]: """Get game messages.""" if not self.game: raise DiplomacyException("Must join a game first") return self.game.messages - + @property def game_id(self) -> Optional[str]: """Get the current game ID.""" return self._game_id - + @property def power_name(self) -> Optional[str]: """Get the controlled power name.""" return self._power_name - + @property def game_role(self) -> Optional[str]: """Get the current game role.""" return self._game_role - + async def close(self) -> None: """Close the connection to the server.""" if self.game: @@ -418,7 +418,7 @@ class WebSocketDiplomacyClient: await self.game.leave() except Exception as e: logger.warning(f"Error leaving game: {e}") - + if self.connection: try: # The connection doesn't have a direct close method in the API, @@ -426,29 +426,32 @@ class WebSocketDiplomacyClient: self.connection = None except Exception as e: logger.warning(f"Error closing connection: {e}") - + logger.info("Connection closed") # Convenience function for quick setup -async def connect_to_diplomacy_server(hostname: str = "localhost", - port: int = 8432, - username: str = "player", - password: str = "password", - use_ssl: bool = False) -> WebSocketDiplomacyClient: +async def connect_to_diplomacy_server( + hostname: str = "localhost", + port: int = 8432, + username: str = "player", + password: str = "password", + use_ssl: bool = False, +) -> WebSocketDiplomacyClient: """ Convenience function to quickly connect to a Diplomacy server. - + Args: hostname: Server hostname port: Server port username: Username for authentication password: Password for authentication use_ssl: Whether to use SSL/TLS - + Returns: Connected and authenticated WebSocketDiplomacyClient """ client = WebSocketDiplomacyClient(hostname, port, use_ssl) await client.connect_and_authenticate(username, password) - return client \ No newline at end of file + return client +