mirror of
https://github.com/NousResearch/atropos.git
synced 2026-04-23 16:54:56 +00:00
clean log
This commit is contained in:
parent
d1b0dee8f7
commit
600c54f5f8
7 changed files with 15 additions and 206 deletions
|
|
@ -193,14 +193,6 @@ class VLLMServer(APIServer):
|
|||
# Prepare request for VLLM native API
|
||||
request_data = {"prompt": {"prompt_token_ids": prompt_tokens}, "logprobs": 0}
|
||||
request_data.update(kwargs)
|
||||
logger.warning(
|
||||
"vllm_server completion POST start base_url=%s prompt_tokens=%s n=%s max_tokens=%s temperature=%s",
|
||||
self.config.base_url,
|
||||
len(prompt_tokens),
|
||||
request_data.get("n"),
|
||||
request_data.get("max_tokens"),
|
||||
request_data.get("temperature"),
|
||||
)
|
||||
|
||||
# Make async request to VLLM /generate endpoint
|
||||
async with aiohttp.ClientSession() as session:
|
||||
|
|
@ -216,11 +208,6 @@ class VLLMServer(APIServer):
|
|||
) as response:
|
||||
response.raise_for_status()
|
||||
results = await response.json()
|
||||
logger.warning(
|
||||
"vllm_server completion POST done outputs=%s finish_reasons=%s",
|
||||
len(results.get("logprobs", [])),
|
||||
len(results.get("finish_reasons", [])),
|
||||
)
|
||||
output_tokens_list = []
|
||||
output_logprobs_list = []
|
||||
finish_reasons_list = []
|
||||
|
|
@ -330,13 +317,6 @@ class VLLMServer(APIServer):
|
|||
request_data["temperature"] = 0.0
|
||||
request_data["top_p"] = 1.0
|
||||
request_data.setdefault("max_tokens", 1)
|
||||
logger.warning(
|
||||
"vllm_server get_logprobs POST start base_url=%s prompt_tokens=%s top_k=%s max_tokens=%s",
|
||||
self.config.base_url,
|
||||
len(prompt_tokens),
|
||||
top_k,
|
||||
request_data.get("max_tokens"),
|
||||
)
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(
|
||||
|
|
@ -351,10 +331,6 @@ class VLLMServer(APIServer):
|
|||
) as response:
|
||||
response.raise_for_status()
|
||||
results = await response.json()
|
||||
logger.warning(
|
||||
"vllm_server get_logprobs POST done prompt_logprobs_present=%s",
|
||||
results.get("prompt_logprobs") is not None,
|
||||
)
|
||||
|
||||
raw_prompt_logprobs = results.get("prompt_logprobs")
|
||||
if raw_prompt_logprobs is None:
|
||||
|
|
@ -451,10 +427,6 @@ def resolve_openai_configs(
|
|||
elif isinstance(default_server_configs, list):
|
||||
server_configs = [final_openai_config]
|
||||
else:
|
||||
logger.warning(
|
||||
f"Unexpected type for default_server_configs: {type(default_server_configs)}. "
|
||||
f"Proceeding with single OpenAI server configuration based on merged settings."
|
||||
)
|
||||
server_configs = [final_openai_config]
|
||||
|
||||
return server_configs
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue