Skip to content

vllm.entrypoints.serve.profile.api_router

logger module-attribute

logger = init_logger(__name__)

router module-attribute

router = APIRouter()

attach_router

attach_router(app: FastAPI)
Source code in vllm/entrypoints/serve/profile/api_router.py
def attach_router(app: FastAPI):
    if envs.VLLM_TORCH_PROFILER_DIR:
        logger.warning_once(
            "Torch Profiler is enabled in the API server. This should ONLY be "
            "used for local development!"
        )
    elif envs.VLLM_TORCH_CUDA_PROFILE:
        logger.warning_once(
            "CUDA Profiler is enabled in the API server. This should ONLY be "
            "used for local development!"
        )
    if envs.VLLM_TORCH_PROFILER_DIR or envs.VLLM_TORCH_CUDA_PROFILE:
        app.include_router(router)

engine_client

engine_client(request: Request) -> EngineClient
Source code in vllm/entrypoints/serve/profile/api_router.py
def engine_client(request: Request) -> EngineClient:
    return request.app.state.engine_client

start_profile async

start_profile(raw_request: Request)
Source code in vllm/entrypoints/serve/profile/api_router.py
@router.post("/start_profile")
async def start_profile(raw_request: Request):
    logger.info("Starting profiler...")
    await engine_client(raw_request).start_profile()
    logger.info("Profiler started.")
    return Response(status_code=200)

stop_profile async

stop_profile(raw_request: Request)
Source code in vllm/entrypoints/serve/profile/api_router.py
@router.post("/stop_profile")
async def stop_profile(raw_request: Request):
    logger.info("Stopping profiler...")
    await engine_client(raw_request).stop_profile()
    logger.info("Profiler stopped.")
    return Response(status_code=200)