Skip to content

vllm.entrypoints.pooling.embed.protocol

EmbeddingRequest module-attribute

EmbeddingBytesResponse

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/pooling/embed/protocol.py
class EmbeddingBytesResponse(OpenAIBaseModel):
    content: list[bytes]
    headers: dict[str, str] | None = None
    media_type: str = "application/octet-stream"

content instance-attribute

content: list[bytes]

headers class-attribute instance-attribute

headers: dict[str, str] | None = None

media_type class-attribute instance-attribute

media_type: str = 'application/octet-stream'

EmbeddingChatRequest

Bases: PoolingBasicRequestMixin, ChatRequestMixin, EmbedRequestMixin

Source code in vllm/entrypoints/pooling/embed/protocol.py
class EmbeddingChatRequest(
    PoolingBasicRequestMixin, ChatRequestMixin, EmbedRequestMixin
):
    mm_processor_kwargs: dict[str, Any] | None = Field(
        default=None,
        description=("Additional kwargs to pass to the HF processor."),
    )

    def build_tok_params(self, model_config: ModelConfig) -> TokenizeParams:
        encoder_config = model_config.encoder_config or {}

        (
            max_total_tokens,
            max_output_tokens,
        ) = _get_max_total_output_tokens(model_config)

        return TokenizeParams(
            max_total_tokens=max_total_tokens,
            max_output_tokens=max_output_tokens,
            truncate_prompt_tokens=self.truncate_prompt_tokens,
            do_lower_case=encoder_config.get("do_lower_case", False),
            add_special_tokens=self.add_special_tokens,
            max_total_tokens_param="max_model_len",
            max_output_tokens_param="max_model_len - max_embed_len",
        )

mm_processor_kwargs class-attribute instance-attribute

mm_processor_kwargs: dict[str, Any] | None = Field(
    default=None,
    description="Additional kwargs to pass to the HF processor.",
)

build_tok_params

build_tok_params(
    model_config: ModelConfig,
) -> TokenizeParams
Source code in vllm/entrypoints/pooling/embed/protocol.py
def build_tok_params(self, model_config: ModelConfig) -> TokenizeParams:
    encoder_config = model_config.encoder_config or {}

    (
        max_total_tokens,
        max_output_tokens,
    ) = _get_max_total_output_tokens(model_config)

    return TokenizeParams(
        max_total_tokens=max_total_tokens,
        max_output_tokens=max_output_tokens,
        truncate_prompt_tokens=self.truncate_prompt_tokens,
        do_lower_case=encoder_config.get("do_lower_case", False),
        add_special_tokens=self.add_special_tokens,
        max_total_tokens_param="max_model_len",
        max_output_tokens_param="max_model_len - max_embed_len",
    )

EmbeddingCompletionRequest

Bases: PoolingBasicRequestMixin, CompletionRequestMixin, EmbedRequestMixin

Source code in vllm/entrypoints/pooling/embed/protocol.py
class EmbeddingCompletionRequest(
    PoolingBasicRequestMixin, CompletionRequestMixin, EmbedRequestMixin
):
    def build_tok_params(self, model_config: ModelConfig) -> TokenizeParams:
        encoder_config = model_config.encoder_config or {}

        (
            max_total_tokens,
            max_output_tokens,
        ) = _get_max_total_output_tokens(model_config)

        return TokenizeParams(
            max_total_tokens=max_total_tokens,
            max_output_tokens=max_output_tokens,
            truncate_prompt_tokens=self.truncate_prompt_tokens,
            do_lower_case=encoder_config.get("do_lower_case", False),
            add_special_tokens=self.add_special_tokens,
            max_total_tokens_param="max_model_len",
            max_output_tokens_param="max_model_len - max_embed_len",
        )

build_tok_params

build_tok_params(
    model_config: ModelConfig,
) -> TokenizeParams
Source code in vllm/entrypoints/pooling/embed/protocol.py
def build_tok_params(self, model_config: ModelConfig) -> TokenizeParams:
    encoder_config = model_config.encoder_config or {}

    (
        max_total_tokens,
        max_output_tokens,
    ) = _get_max_total_output_tokens(model_config)

    return TokenizeParams(
        max_total_tokens=max_total_tokens,
        max_output_tokens=max_output_tokens,
        truncate_prompt_tokens=self.truncate_prompt_tokens,
        do_lower_case=encoder_config.get("do_lower_case", False),
        add_special_tokens=self.add_special_tokens,
        max_total_tokens_param="max_model_len",
        max_output_tokens_param="max_model_len - max_embed_len",
    )

EmbeddingResponse

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/pooling/embed/protocol.py
class EmbeddingResponse(OpenAIBaseModel):
    id: str = Field(default_factory=lambda: f"embd-{random_uuid()}")
    object: str = "list"
    created: int = Field(default_factory=lambda: int(time.time()))
    model: str
    data: list[EmbeddingResponseData]
    usage: UsageInfo

created class-attribute instance-attribute

created: int = Field(default_factory=lambda: int(time()))

data instance-attribute

id class-attribute instance-attribute

id: str = Field(
    default_factory=lambda: f"embd-{random_uuid()}"
)

model instance-attribute

model: str

object class-attribute instance-attribute

object: str = 'list'

usage instance-attribute

usage: UsageInfo

EmbeddingResponseData

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/pooling/embed/protocol.py
class EmbeddingResponseData(OpenAIBaseModel):
    index: int
    object: str = "embedding"
    embedding: list[float] | str

embedding instance-attribute

embedding: list[float] | str

index instance-attribute

index: int

object class-attribute instance-attribute

object: str = 'embedding'

_get_max_total_output_tokens

_get_max_total_output_tokens(
    model_config: ModelConfig,
) -> tuple[int | None, int]
Source code in vllm/entrypoints/pooling/embed/protocol.py
def _get_max_total_output_tokens(
    model_config: ModelConfig,
) -> tuple[int | None, int]:
    max_total_tokens = model_config.max_model_len
    pooler_config = model_config.pooler_config

    if pooler_config is None:
        return max_total_tokens, 0

    if pooler_config.enable_chunked_processing:
        return None, 0

    max_embed_len = pooler_config.max_embed_len or max_total_tokens
    max_output_tokens = max_total_tokens - max_embed_len
    return max_total_tokens, max_output_tokens