Skip to content

vllm.entrypoints.pooling.classify.protocol

ClassificationRequest module-attribute

ClassificationChatRequest

Bases: PoolingBasicRequestMixin, ChatRequestMixin, ClassifyRequestMixin

Source code in vllm/entrypoints/pooling/classify/protocol.py
class ClassificationChatRequest(
    PoolingBasicRequestMixin, ChatRequestMixin, ClassifyRequestMixin
):
    # --8<-- [start:chat-classification-extra-params]
    mm_processor_kwargs: dict[str, Any] | None = Field(
        default=None,
        description=("Additional kwargs to pass to the HF processor."),
    )

    def build_tok_params(self, model_config: ModelConfig) -> TokenizeParams:
        encoder_config = model_config.encoder_config or {}

        return TokenizeParams(
            max_total_tokens=model_config.max_model_len,
            max_output_tokens=0,
            truncate_prompt_tokens=self.truncate_prompt_tokens,
            do_lower_case=encoder_config.get("do_lower_case", False),
            add_special_tokens=self.add_special_tokens,
            max_total_tokens_param="max_model_len",
        )

mm_processor_kwargs class-attribute instance-attribute

mm_processor_kwargs: dict[str, Any] | None = Field(
    default=None,
    description="Additional kwargs to pass to the HF processor.",
)

build_tok_params

build_tok_params(
    model_config: ModelConfig,
) -> TokenizeParams
Source code in vllm/entrypoints/pooling/classify/protocol.py
def build_tok_params(self, model_config: ModelConfig) -> TokenizeParams:
    encoder_config = model_config.encoder_config or {}

    return TokenizeParams(
        max_total_tokens=model_config.max_model_len,
        max_output_tokens=0,
        truncate_prompt_tokens=self.truncate_prompt_tokens,
        do_lower_case=encoder_config.get("do_lower_case", False),
        add_special_tokens=self.add_special_tokens,
        max_total_tokens_param="max_model_len",
    )

ClassificationCompletionRequest

Bases: PoolingBasicRequestMixin, CompletionRequestMixin, ClassifyRequestMixin

Source code in vllm/entrypoints/pooling/classify/protocol.py
class ClassificationCompletionRequest(
    PoolingBasicRequestMixin, CompletionRequestMixin, ClassifyRequestMixin
):
    def build_tok_params(self, model_config: ModelConfig) -> TokenizeParams:
        encoder_config = model_config.encoder_config or {}

        return TokenizeParams(
            max_total_tokens=model_config.max_model_len,
            max_output_tokens=0,
            truncate_prompt_tokens=self.truncate_prompt_tokens,
            do_lower_case=encoder_config.get("do_lower_case", False),
            add_special_tokens=self.add_special_tokens,
            max_total_tokens_param="max_model_len",
        )

build_tok_params

build_tok_params(
    model_config: ModelConfig,
) -> TokenizeParams
Source code in vllm/entrypoints/pooling/classify/protocol.py
def build_tok_params(self, model_config: ModelConfig) -> TokenizeParams:
    encoder_config = model_config.encoder_config or {}

    return TokenizeParams(
        max_total_tokens=model_config.max_model_len,
        max_output_tokens=0,
        truncate_prompt_tokens=self.truncate_prompt_tokens,
        do_lower_case=encoder_config.get("do_lower_case", False),
        add_special_tokens=self.add_special_tokens,
        max_total_tokens_param="max_model_len",
    )

ClassificationData

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/pooling/classify/protocol.py
class ClassificationData(OpenAIBaseModel):
    index: int
    label: str | None
    probs: list[float]
    num_classes: int

index instance-attribute

index: int

label instance-attribute

label: str | None

num_classes instance-attribute

num_classes: int

probs instance-attribute

probs: list[float]

ClassificationResponse

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/pooling/classify/protocol.py
class ClassificationResponse(OpenAIBaseModel):
    id: str = Field(default_factory=lambda: f"classify-{random_uuid()}")
    object: str = "list"
    created: int = Field(default_factory=lambda: int(time.time()))
    model: str
    data: list[ClassificationData]
    usage: UsageInfo

created class-attribute instance-attribute

created: int = Field(default_factory=lambda: int(time()))

data instance-attribute

id class-attribute instance-attribute

id: str = Field(
    default_factory=lambda: f"classify-{random_uuid()}"
)

model instance-attribute

model: str

object class-attribute instance-attribute

object: str = 'list'

usage instance-attribute

usage: UsageInfo