diff --git a/example_trainer/vllm_api_server.py b/example_trainer/vllm_api_server.py index 740db922..1de50c29 100644 --- a/example_trainer/vllm_api_server.py +++ b/example_trainer/vllm_api_server.py @@ -626,7 +626,7 @@ async def init_app(args: Namespace, llm_engine: AsyncLLM | None = None) -> FastA def _export_state_dict_info(args: Namespace) -> None: """Export basic model info to JSON for trainer (backup if patches don't run).""" - log_dir = os.environ.get("LOGDIR", "/tmp/atropos_bridge") + log_dir = os.environ.get("LOGDIR", ".") Path(log_dir).mkdir(parents=True, exist_ok=True) json_path = Path(log_dir) / "vllm_bridge_config.json" diff --git a/example_trainer/vllm_patching/patched_gpu_runner.py b/example_trainer/vllm_patching/patched_gpu_runner.py index dc1c4284..3c124598 100644 --- a/example_trainer/vllm_patching/patched_gpu_runner.py +++ b/example_trainer/vllm_patching/patched_gpu_runner.py @@ -224,7 +224,7 @@ def _create_patched_runner(BaseRunner: type) -> type: print(f"[vLLM Patch] Note: model.share_memory() not available: {e}") # Export parameter info to JSON for trainer - log_dir = os.environ.get("LOGDIR", "/tmp/atropos_bridge") + log_dir = os.environ.get("LOGDIR", ".") Path(log_dir).mkdir(parents=True, exist_ok=True) json_path = Path(log_dir) / "vllm_bridge_config.json"