mirror of
https://github.com/GoodStartLabs/AI_Diplomacy.git
synced 2026-04-29 17:35:18 +00:00
no press + context includes orders and results
This commit is contained in:
parent
9443384c89
commit
e3db81f00e
8 changed files with 206 additions and 132 deletions
|
|
@ -16,7 +16,7 @@ from openai import OpenAI as DeepSeekOpenAI
|
|||
|
||||
from diplomacy.engine.message import GLOBAL
|
||||
|
||||
from .conversation_history import ConversationHistory
|
||||
from .game_history import GameHistory
|
||||
|
||||
# set logger back to just info
|
||||
logger = logging.getLogger("client")
|
||||
|
|
@ -55,8 +55,7 @@ class BaseModelClient:
|
|||
board_state,
|
||||
power_name: str,
|
||||
possible_orders: Dict[str, List[str]],
|
||||
conversation_history: ConversationHistory,
|
||||
phase_summaries: Optional[Dict[str, str]] = None,
|
||||
game_history: GameHistory,
|
||||
) -> str:
|
||||
context = load_prompt("context_prompt.txt")
|
||||
|
||||
|
|
@ -89,17 +88,9 @@ class BaseModelClient:
|
|||
if fleet in units_info_set:
|
||||
convoy_paths_possible.append((start_loc, fleets_req, end_loc))
|
||||
|
||||
# 1) Prepare a block of text for the phase_summaries
|
||||
if phase_summaries:
|
||||
historical_summaries = "\nPAST PHASE SUMMARIES:\n"
|
||||
for phase_key, summary_txt in phase_summaries.items():
|
||||
historical_summaries += f"\nPHASE {phase_key}:\n{summary_txt}\n"
|
||||
else:
|
||||
historical_summaries = "\n(No historical summaries yet)\n"
|
||||
|
||||
conversation_text = conversation_history.get_conversation_history(power_name)
|
||||
conversation_text = game_history.get_game_history(power_name)
|
||||
if not conversation_text:
|
||||
conversation_text = "\n(No conversation history yet)\n"
|
||||
conversation_text = "\n(No game history yet)\n"
|
||||
|
||||
# Load in current context values
|
||||
context = context.format(
|
||||
|
|
@ -110,8 +101,7 @@ class BaseModelClient:
|
|||
map_as_adjacency_list=game.map.loc_abut,
|
||||
possible_coasts=game.map.loc_coasts,
|
||||
game_map_scs=game.map.scs,
|
||||
historical_summaries=historical_summaries,
|
||||
conversation_history=conversation_text,
|
||||
game_history=conversation_text,
|
||||
enemy_units=enemy_units,
|
||||
enemy_centers=enemy_centers,
|
||||
units_info=units_info,
|
||||
|
|
@ -128,8 +118,7 @@ class BaseModelClient:
|
|||
board_state,
|
||||
power_name: str,
|
||||
possible_orders: Dict[str, List[str]],
|
||||
conversation_history: ConversationHistory,
|
||||
phase_summaries: Optional[Dict[str, str]] = None,
|
||||
game_history: GameHistory,
|
||||
) -> str:
|
||||
"""
|
||||
Unified prompt approach: incorporate conversation and 'PARSABLE OUTPUT' requirements.
|
||||
|
|
@ -144,8 +133,7 @@ class BaseModelClient:
|
|||
board_state,
|
||||
power_name,
|
||||
possible_orders,
|
||||
conversation_history,
|
||||
phase_summaries,
|
||||
game_history,
|
||||
)
|
||||
|
||||
return context + "\n\n" + instructions
|
||||
|
|
@ -157,7 +145,6 @@ class BaseModelClient:
|
|||
power_name: str,
|
||||
possible_orders: Dict[str, List[str]],
|
||||
conversation_text: str,
|
||||
phase_summaries: Optional[Dict[str, str]] = None,
|
||||
model_error_stats=None, # New optional param
|
||||
) -> List[str]:
|
||||
"""
|
||||
|
|
@ -171,7 +158,6 @@ class BaseModelClient:
|
|||
power_name,
|
||||
possible_orders,
|
||||
conversation_text,
|
||||
phase_summaries,
|
||||
)
|
||||
|
||||
raw_response = ""
|
||||
|
|
@ -184,6 +170,8 @@ class BaseModelClient:
|
|||
|
||||
# Attempt to parse the final "orders" from the LLM
|
||||
move_list = self._extract_moves(raw_response, power_name)
|
||||
print(f"prompt {prompt}")
|
||||
print(f"response {raw_response}")
|
||||
if not move_list:
|
||||
logger.warning(
|
||||
f"[{self.model_name}] Could not extract moves for {power_name}. Using fallback."
|
||||
|
|
@ -341,9 +329,8 @@ class BaseModelClient:
|
|||
board_state,
|
||||
power_name: str,
|
||||
possible_orders: Dict[str, List[str]],
|
||||
conversation_history: ConversationHistory,
|
||||
game_history: GameHistory,
|
||||
game_phase: str,
|
||||
phase_summaries: Optional[Dict[str, str]] = None,
|
||||
) -> str:
|
||||
instructions = load_prompt("conversation_instructions.txt")
|
||||
|
||||
|
|
@ -352,8 +339,7 @@ class BaseModelClient:
|
|||
board_state,
|
||||
power_name,
|
||||
possible_orders,
|
||||
conversation_history,
|
||||
phase_summaries,
|
||||
game_history,
|
||||
)
|
||||
|
||||
return context + "\n\n" + instructions
|
||||
|
|
@ -364,9 +350,8 @@ class BaseModelClient:
|
|||
board_state,
|
||||
power_name: str,
|
||||
possible_orders: Dict[str, List[str]],
|
||||
conversation_history: ConversationHistory,
|
||||
game_history: GameHistory,
|
||||
game_phase: str,
|
||||
phase_summaries: Optional[Dict[str, str]] = None,
|
||||
active_powers: Optional[List[str]] = None,
|
||||
) -> str:
|
||||
prompt = self.build_conversation_prompt(
|
||||
|
|
@ -374,9 +359,8 @@ class BaseModelClient:
|
|||
board_state,
|
||||
power_name,
|
||||
possible_orders,
|
||||
conversation_history,
|
||||
game_history,
|
||||
game_phase,
|
||||
phase_summaries,
|
||||
)
|
||||
|
||||
raw_response = self.generate_response(prompt)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue