Skip to content

vllm.model_executor.models.kimi_k25

Kimi-K2.5 Model Implementation for vLLM.

Kimi-K2.5 extends Kimi-K2 with vision support

This module defines: - KimiK25ProcessingInfo/KimiK25MultiModalProcessor: Processing logic - KimiK25ForConditionalGeneration: Main model class

logger module-attribute

logger = init_logger(__name__)

KimiK25DummyInputsBuilder

Bases: BaseDummyInputsBuilder[KimiK25ProcessingInfo]

Builds dummy inputs for Kimi-K2.5 model profiling.

Source code in vllm/model_executor/models/kimi_k25.py
class KimiK25DummyInputsBuilder(BaseDummyInputsBuilder[KimiK25ProcessingInfo]):
    """Builds dummy inputs for Kimi-K2.5 model profiling."""

    def __init__(self, info: KimiK25ProcessingInfo) -> None:
        super().__init__(info)
        self.media_token_id = self.info.media_token_id
        self.frame_per_chunk = self.info.media_processor.num_frames_per_chunk

    def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
        num_media = mm_counts.get("vision_chunk", 0)
        return "<|media_pad|>" * num_media

    def get_dummy_mm_items(self):
        dummy_videos = self._get_dummy_images(
            height=MaxImageTokenMeta.height,
            width=MaxImageTokenMeta.width,
            num_images=self.frame_per_chunk,
        )

        video_chunk_dummy_item = VisionChunkVideo(
            type="video_chunk", video_chunk=dummy_videos
        )
        video_chunk_num_tokens = self.info.media_tokens_calculator(
            video_chunk_dummy_item
        )

        image_dummy_item = VisionChunkImage(
            type="image",
            image=self._get_dummy_images(
                height=MaxImageTokenMeta.height,
                width=MaxImageTokenMeta.width,
                num_images=1,
            )[0],
        )
        image_num_tokens = self.info.media_tokens_calculator(image_dummy_item)
        # return the larger one
        if video_chunk_num_tokens >= image_num_tokens:
            return [video_chunk_dummy_item]
        else:
            return [image_dummy_item]

    def get_dummy_mm_data(
        self,
        seq_len: int,
        mm_counts: Mapping[str, int],
        mm_options: Mapping[str, BaseDummyOptions] | None = None,
    ) -> MultiModalDataDict:
        # TODO: Support mm_options for vision_chunk to allow user configuration
        dummy_items = self.get_dummy_mm_items()
        return {"vision_chunk": dummy_items}

frame_per_chunk instance-attribute

frame_per_chunk = num_frames_per_chunk

media_token_id instance-attribute

media_token_id = media_token_id

__init__

__init__(info: KimiK25ProcessingInfo) -> None
Source code in vllm/model_executor/models/kimi_k25.py
def __init__(self, info: KimiK25ProcessingInfo) -> None:
    super().__init__(info)
    self.media_token_id = self.info.media_token_id
    self.frame_per_chunk = self.info.media_processor.num_frames_per_chunk

get_dummy_mm_data

get_dummy_mm_data(
    seq_len: int,
    mm_counts: Mapping[str, int],
    mm_options: Mapping[str, BaseDummyOptions]
    | None = None,
) -> MultiModalDataDict
Source code in vllm/model_executor/models/kimi_k25.py
def get_dummy_mm_data(
    self,
    seq_len: int,
    mm_counts: Mapping[str, int],
    mm_options: Mapping[str, BaseDummyOptions] | None = None,
) -> MultiModalDataDict:
    # TODO: Support mm_options for vision_chunk to allow user configuration
    dummy_items = self.get_dummy_mm_items()
    return {"vision_chunk": dummy_items}

get_dummy_mm_items

get_dummy_mm_items()
Source code in vllm/model_executor/models/kimi_k25.py
def get_dummy_mm_items(self):
    dummy_videos = self._get_dummy_images(
        height=MaxImageTokenMeta.height,
        width=MaxImageTokenMeta.width,
        num_images=self.frame_per_chunk,
    )

    video_chunk_dummy_item = VisionChunkVideo(
        type="video_chunk", video_chunk=dummy_videos
    )
    video_chunk_num_tokens = self.info.media_tokens_calculator(
        video_chunk_dummy_item
    )

    image_dummy_item = VisionChunkImage(
        type="image",
        image=self._get_dummy_images(
            height=MaxImageTokenMeta.height,
            width=MaxImageTokenMeta.width,
            num_images=1,
        )[0],
    )
    image_num_tokens = self.info.media_tokens_calculator(image_dummy_item)
    # return the larger one
    if video_chunk_num_tokens >= image_num_tokens:
        return [video_chunk_dummy_item]
    else:
        return [image_dummy_item]

get_dummy_text

get_dummy_text(mm_counts: Mapping[str, int]) -> str
Source code in vllm/model_executor/models/kimi_k25.py
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
    num_media = mm_counts.get("vision_chunk", 0)
    return "<|media_pad|>" * num_media

KimiK25ForConditionalGeneration

Bases: Module, SupportsMultiModal, SupportsPP

Kimi-K2.5 model for conditional generation.

Supports both image and video-chunk modalities. Video-chunks are temporal segments (typically 4 frames) that are processed with temporal pooling.

Source code in vllm/model_executor/models/kimi_k25.py
@MULTIMODAL_REGISTRY.register_processor(
    KimiK25MultiModalProcessor,
    info=KimiK25ProcessingInfo,
    dummy_inputs=KimiK25DummyInputsBuilder,
)
class KimiK25ForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP):
    """Kimi-K2.5 model for conditional generation.

    Supports both image and video-chunk modalities.
    Video-chunks are temporal segments (typically 4 frames) that are
    processed with temporal pooling.
    """

    supports_encoder_tp_data = True

    weights_mapper = WeightsMapper(
        orig_to_new_prefix={
            "mm_projector.proj.0": "mm_projector.linear_1",
            "mm_projector.proj.2": "mm_projector.linear_2",
        }
    )

    @classmethod
    def get_placeholder_str(cls, modality: str, i: int) -> str | None:
        # Kimi-K2.5 uses video_chunk for all media types
        if modality == "image":
            return "<|media_begin|>image<|media_content|><|media_pad|><|media_end|>"
        elif modality == "video":
            # return a placeholder, to be replaced in the future.
            return "<|kimi_k25_video_placeholder|>"

        raise ValueError(f"Unsupported modality: {modality}")

    def __init__(
        self,
        vllm_config: VllmConfig,
        prefix: str = "",
    ) -> None:
        super().__init__()
        model_config = vllm_config.model_config
        config: KimiK25Config = model_config.hf_config
        self.config = config
        quant_config = vllm_config.quant_config

        # Check for MoonViT config compatibility
        self.use_data_parallel = (
            model_config.multimodal_config.mm_encoder_tp_mode == "data"
        )
        self.hidden_size = config.text_config.hidden_size
        self.device = current_platform.current_device()
        # Build vision tower directly with KimiK25VisionConfig
        with self._mark_tower_model(vllm_config, "vision_chunk"):
            self.vision_tower = MoonViT3dPretrainedModel(
                config.vision_config,
                prefix=maybe_prefix(prefix, "vision_tower"),
            )
            self.vision_tower = self.vision_tower.to(
                device=self.device, dtype=model_config.dtype
            )

            self.mm_projector = KimiK25MultiModalProjector(
                config=config.vision_config,
                use_data_parallel=self.use_data_parallel,
                prefix=maybe_prefix(prefix, "mm_projector"),
            )
            self.mm_projector = self.mm_projector.to(
                device=self.device, dtype=model_config.dtype
            )

        self.quant_config = quant_config
        sub_vllm_config = copy.deepcopy(vllm_config)
        sub_vllm_config.model_config.hf_config = (
            sub_vllm_config.model_config.hf_config.text_config
        )
        with self._mark_language_model(vllm_config):
            self.language_model = init_vllm_registered_model(
                vllm_config=vllm_config,
                hf_config=config.text_config,
                prefix=maybe_prefix(prefix, "language_model"),
                architectures=["DeepseekV2ForCausalLM"],
            )
        self.make_empty_intermediate_tensors = (
            self.language_model.make_empty_intermediate_tensors
        )
        self.media_placeholder: int = self.config.media_placeholder_token_id

    def _parse_and_validate_media_input(
        self, **kwargs: object
    ) -> KimiK25MediaPixelInputs | None:
        pixel_values = kwargs.pop("pixel_values", None)
        grid_thws = kwargs.pop("grid_thws", None)
        if pixel_values is None:
            return None

        if isinstance(pixel_values, list):
            pixel_values = torch.cat(pixel_values, dim=0)

        if len(pixel_values.shape) == 5 or len(pixel_values.shape) == 3:
            pixel_values = pixel_values.reshape(
                pixel_values.shape[0] * pixel_values.shape[1], *pixel_values.shape[2:]
            )

        # The batch dimension of pixel_values has been flattened into shape[0]
        target_dtype = next(self.vision_tower.parameters()).dtype
        pixel_values = pixel_values.to(target_dtype)
        assert isinstance(grid_thws, torch.Tensor), (
            f"expect grid_thws to be a tensor, get {type(grid_thws)}"
        )
        # In some cases (e.g. with merger), grid_thws has an extra middle dimension
        grid_thws = grid_thws.reshape(-1, grid_thws.shape[-1])
        assert grid_thws.ndim == 2 and grid_thws.size(1) == 3, (
            f"unexpected shape for grid_thws: {grid_thws.shape}"
        )

        return KimiK25MediaPixelInputs(
            type="pixel_values",
            pixel_values=pixel_values,
            grid_thws=grid_thws,
        )

    def _process_media_input(
        self, media_input: KimiK25MediaPixelInputs
    ) -> list[torch.Tensor]:
        # NOTE(moyan): This forward will automatically batch the forward pass internally
        media_features = vision_tower_forward(
            self.vision_tower,
            media_input["pixel_values"],
            media_input["grid_thws"],
            mm_projector=self.mm_projector,
            use_data_parallel=self.use_data_parallel,
        )
        return media_features

    def embed_multimodal(self, **kwargs: object) -> NestedTensors | None:
        # Validate the multimodal input keyword arguments
        media_input = self._parse_and_validate_media_input(**kwargs)
        if media_input is None:
            return None

        # Run multimodal inputs through encoder and projector
        vision_embeddings = self._process_media_input(media_input)
        return vision_embeddings

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        intermediate_tensors: IntermediateTensors | None = None,
        inputs_embeds: torch.Tensor | None = None,
        **kwargs: object,
    ) -> IntermediateTensors:
        if intermediate_tensors is not None:
            inputs_embeds = None
        hidden_states = self.language_model(
            input_ids=input_ids,
            positions=positions,
            intermediate_tensors=intermediate_tensors,
            inputs_embeds=inputs_embeds,
        )

        return hidden_states

    def compute_logits(self, hidden_states: torch.Tensor, **kwargs) -> torch.Tensor:
        logits = self.language_model.compute_logits(hidden_states)
        return logits

    def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
        loader = AutoWeightsLoader(self)
        return loader.load_weights(weights, mapper=self.weights_mapper)

config instance-attribute

config = config

device instance-attribute

device = current_device()

hidden_size instance-attribute

hidden_size = hidden_size

language_model instance-attribute

language_model = init_vllm_registered_model(
    vllm_config=vllm_config,
    hf_config=text_config,
    prefix=maybe_prefix(prefix, "language_model"),
    architectures=["DeepseekV2ForCausalLM"],
)

make_empty_intermediate_tensors instance-attribute

make_empty_intermediate_tensors = (
    make_empty_intermediate_tensors
)

media_placeholder instance-attribute

media_placeholder: int = media_placeholder_token_id

mm_projector instance-attribute

mm_projector = to(device=device, dtype=dtype)

quant_config instance-attribute

quant_config = quant_config

supports_encoder_tp_data class-attribute instance-attribute

supports_encoder_tp_data = True

use_data_parallel instance-attribute

use_data_parallel = mm_encoder_tp_mode == 'data'

vision_tower instance-attribute

vision_tower = to(device=device, dtype=dtype)

weights_mapper class-attribute instance-attribute

weights_mapper = WeightsMapper(
    orig_to_new_prefix={
        "mm_projector.proj.0": "mm_projector.linear_1",
        "mm_projector.proj.2": "mm_projector.linear_2",
    }
)

__init__

__init__(vllm_config: VllmConfig, prefix: str = '') -> None
Source code in vllm/model_executor/models/kimi_k25.py
def __init__(
    self,
    vllm_config: VllmConfig,
    prefix: str = "",
) -> None:
    super().__init__()
    model_config = vllm_config.model_config
    config: KimiK25Config = model_config.hf_config
    self.config = config
    quant_config = vllm_config.quant_config

    # Check for MoonViT config compatibility
    self.use_data_parallel = (
        model_config.multimodal_config.mm_encoder_tp_mode == "data"
    )
    self.hidden_size = config.text_config.hidden_size
    self.device = current_platform.current_device()
    # Build vision tower directly with KimiK25VisionConfig
    with self._mark_tower_model(vllm_config, "vision_chunk"):
        self.vision_tower = MoonViT3dPretrainedModel(
            config.vision_config,
            prefix=maybe_prefix(prefix, "vision_tower"),
        )
        self.vision_tower = self.vision_tower.to(
            device=self.device, dtype=model_config.dtype
        )

        self.mm_projector = KimiK25MultiModalProjector(
            config=config.vision_config,
            use_data_parallel=self.use_data_parallel,
            prefix=maybe_prefix(prefix, "mm_projector"),
        )
        self.mm_projector = self.mm_projector.to(
            device=self.device, dtype=model_config.dtype
        )

    self.quant_config = quant_config
    sub_vllm_config = copy.deepcopy(vllm_config)
    sub_vllm_config.model_config.hf_config = (
        sub_vllm_config.model_config.hf_config.text_config
    )
    with self._mark_language_model(vllm_config):
        self.language_model = init_vllm_registered_model(
            vllm_config=vllm_config,
            hf_config=config.text_config,
            prefix=maybe_prefix(prefix, "language_model"),
            architectures=["DeepseekV2ForCausalLM"],
        )
    self.make_empty_intermediate_tensors = (
        self.language_model.make_empty_intermediate_tensors
    )
    self.media_placeholder: int = self.config.media_placeholder_token_id

_parse_and_validate_media_input

_parse_and_validate_media_input(
    **kwargs: object,
) -> KimiK25MediaPixelInputs | None
Source code in vllm/model_executor/models/kimi_k25.py
def _parse_and_validate_media_input(
    self, **kwargs: object
) -> KimiK25MediaPixelInputs | None:
    pixel_values = kwargs.pop("pixel_values", None)
    grid_thws = kwargs.pop("grid_thws", None)
    if pixel_values is None:
        return None

    if isinstance(pixel_values, list):
        pixel_values = torch.cat(pixel_values, dim=0)

    if len(pixel_values.shape) == 5 or len(pixel_values.shape) == 3:
        pixel_values = pixel_values.reshape(
            pixel_values.shape[0] * pixel_values.shape[1], *pixel_values.shape[2:]
        )

    # The batch dimension of pixel_values has been flattened into shape[0]
    target_dtype = next(self.vision_tower.parameters()).dtype
    pixel_values = pixel_values.to(target_dtype)
    assert isinstance(grid_thws, torch.Tensor), (
        f"expect grid_thws to be a tensor, get {type(grid_thws)}"
    )
    # In some cases (e.g. with merger), grid_thws has an extra middle dimension
    grid_thws = grid_thws.reshape(-1, grid_thws.shape[-1])
    assert grid_thws.ndim == 2 and grid_thws.size(1) == 3, (
        f"unexpected shape for grid_thws: {grid_thws.shape}"
    )

    return KimiK25MediaPixelInputs(
        type="pixel_values",
        pixel_values=pixel_values,
        grid_thws=grid_thws,
    )

_process_media_input

_process_media_input(
    media_input: KimiK25MediaPixelInputs,
) -> list[Tensor]
Source code in vllm/model_executor/models/kimi_k25.py
def _process_media_input(
    self, media_input: KimiK25MediaPixelInputs
) -> list[torch.Tensor]:
    # NOTE(moyan): This forward will automatically batch the forward pass internally
    media_features = vision_tower_forward(
        self.vision_tower,
        media_input["pixel_values"],
        media_input["grid_thws"],
        mm_projector=self.mm_projector,
        use_data_parallel=self.use_data_parallel,
    )
    return media_features

compute_logits

compute_logits(hidden_states: Tensor, **kwargs) -> Tensor
Source code in vllm/model_executor/models/kimi_k25.py
def compute_logits(self, hidden_states: torch.Tensor, **kwargs) -> torch.Tensor:
    logits = self.language_model.compute_logits(hidden_states)
    return logits

embed_multimodal

embed_multimodal(**kwargs: object) -> NestedTensors | None
Source code in vllm/model_executor/models/kimi_k25.py
def embed_multimodal(self, **kwargs: object) -> NestedTensors | None:
    # Validate the multimodal input keyword arguments
    media_input = self._parse_and_validate_media_input(**kwargs)
    if media_input is None:
        return None

    # Run multimodal inputs through encoder and projector
    vision_embeddings = self._process_media_input(media_input)
    return vision_embeddings

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    intermediate_tensors: IntermediateTensors | None = None,
    inputs_embeds: Tensor | None = None,
    **kwargs: object,
) -> IntermediateTensors
Source code in vllm/model_executor/models/kimi_k25.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    intermediate_tensors: IntermediateTensors | None = None,
    inputs_embeds: torch.Tensor | None = None,
    **kwargs: object,
) -> IntermediateTensors:
    if intermediate_tensors is not None:
        inputs_embeds = None
    hidden_states = self.language_model(
        input_ids=input_ids,
        positions=positions,
        intermediate_tensors=intermediate_tensors,
        inputs_embeds=inputs_embeds,
    )

    return hidden_states

get_placeholder_str classmethod

get_placeholder_str(modality: str, i: int) -> str | None
Source code in vllm/model_executor/models/kimi_k25.py
@classmethod
def get_placeholder_str(cls, modality: str, i: int) -> str | None:
    # Kimi-K2.5 uses video_chunk for all media types
    if modality == "image":
        return "<|media_begin|>image<|media_content|><|media_pad|><|media_end|>"
    elif modality == "video":
        # return a placeholder, to be replaced in the future.
        return "<|kimi_k25_video_placeholder|>"

    raise ValueError(f"Unsupported modality: {modality}")

load_weights

load_weights(weights: Iterable[tuple[str, Tensor]])
Source code in vllm/model_executor/models/kimi_k25.py
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
    loader = AutoWeightsLoader(self)
    return loader.load_weights(weights, mapper=self.weights_mapper)

KimiK25MediaPixelInputs

Bases: TensorSchema

Media input schema for K2-VL model.

Dimensions
  • np: Number of patches (flattened from all media items)
  • ps: Patch size
  • nm: Number of media items
Source code in vllm/model_executor/models/kimi_k25.py
class KimiK25MediaPixelInputs(TensorSchema):
    """
    Media input schema for K2-VL model.

    Dimensions:
        - np: Number of patches (flattened from all media items)
        - ps: Patch size
        - nm: Number of media items
    """

    type: Literal["pixel_values"] = "pixel_values"

    pixel_values: Annotated[
        torch.Tensor | list[torch.Tensor],
        TensorShape("np", 3, "ps", "ps"),
    ]

    grid_thws: Annotated[torch.Tensor, TensorShape("nm", 3)]

grid_thws instance-attribute

grid_thws: Annotated[Tensor, TensorShape(nm, 3)]

pixel_values instance-attribute

pixel_values: Annotated[
    Tensor | list[Tensor], TensorShape(np, 3, ps, ps)
]

type class-attribute instance-attribute

type: Literal['pixel_values'] = 'pixel_values'

KimiK25MultiModalProcessor

Bases: BaseMultiModalProcessor[KimiK25ProcessingInfo]

Multi-modal processor for Kimi-K2.5.

Handles both image and video-chunk modalities.

Source code in vllm/model_executor/models/kimi_k25.py
class KimiK25MultiModalProcessor(BaseMultiModalProcessor[KimiK25ProcessingInfo]):
    """Multi-modal processor for Kimi-K2.5.

    Handles both image and video-chunk modalities.
    """

    def _get_mm_fields_config(
        self,
        hf_inputs: BatchFeature,
        hf_processor_mm_kwargs: Mapping[str, object],
    ) -> Mapping[str, MultiModalFieldConfig]:
        """Indicates how to slice media input into multiple items.

        pixel_values: [N, 3, patch_size, patch_size], all patches collected from B medias
        grid_thws: [B,3], each item: [N_t, N_h ,N_w], indicates the grid size in time/height/width direction
                    for current item.

        by multiplying [N_t, N_h ,N_w], we get the number of patches for each media item, thus we can slice
        pixel_values by pixel_values[start:start + N_t*N_h*N_w] to get patches of one item.

        """
        grid_thws = hf_inputs.get("grid_thws", torch.empty((0, 3)))
        grid_sizes = grid_thws.prod(-1)

        return dict(
            pixel_values=MultiModalFieldConfig.flat_from_sizes(
                "vision_chunk", grid_sizes
            ),
            grid_thws=MultiModalFieldConfig.batched("vision_chunk"),
        )

    def _get_prompt_updates(
        self,
        mm_items: MultiModalDataItems,
        hf_processor_mm_kwargs: Mapping[str, Any],
        out_mm_kwargs: MultiModalKwargsItems,
    ) -> Sequence[PromptUpdate]:
        hf_config = self.info.get_hf_config()
        media_token_id = hf_config.media_placeholder_token_id

        def get_replacement(item_idx: int):
            media = mm_items.get_items("vision_chunk", (VisionChunkProcessorItems,))
            num_media_token = self.info.media_tokens_calculator(media[item_idx])
            return [media_token_id] * num_media_token

        return [
            PromptReplacement(
                modality="vision_chunk",
                target=[media_token_id],
                replacement=get_replacement,
            ),
        ]

    def split_video_chunks(self, video):
        return self.info.media_processor.split_video_chunks(video)

_get_mm_fields_config

_get_mm_fields_config(
    hf_inputs: BatchFeature,
    hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]

Indicates how to slice media input into multiple items.

pixel_values: [N, 3, patch_size, patch_size], all patches collected from B medias grid_thws: [B,3], each item: [N_t, N_h ,N_w], indicates the grid size in time/height/width direction for current item.

by multiplying [N_t, N_h ,N_w], we get the number of patches for each media item, thus we can slice pixel_values by pixel_values[start:start + N_tN_hN_w] to get patches of one item.

Source code in vllm/model_executor/models/kimi_k25.py
def _get_mm_fields_config(
    self,
    hf_inputs: BatchFeature,
    hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]:
    """Indicates how to slice media input into multiple items.

    pixel_values: [N, 3, patch_size, patch_size], all patches collected from B medias
    grid_thws: [B,3], each item: [N_t, N_h ,N_w], indicates the grid size in time/height/width direction
                for current item.

    by multiplying [N_t, N_h ,N_w], we get the number of patches for each media item, thus we can slice
    pixel_values by pixel_values[start:start + N_t*N_h*N_w] to get patches of one item.

    """
    grid_thws = hf_inputs.get("grid_thws", torch.empty((0, 3)))
    grid_sizes = grid_thws.prod(-1)

    return dict(
        pixel_values=MultiModalFieldConfig.flat_from_sizes(
            "vision_chunk", grid_sizes
        ),
        grid_thws=MultiModalFieldConfig.batched("vision_chunk"),
    )

_get_prompt_updates

_get_prompt_updates(
    mm_items: MultiModalDataItems,
    hf_processor_mm_kwargs: Mapping[str, Any],
    out_mm_kwargs: MultiModalKwargsItems,
) -> Sequence[PromptUpdate]
Source code in vllm/model_executor/models/kimi_k25.py
def _get_prompt_updates(
    self,
    mm_items: MultiModalDataItems,
    hf_processor_mm_kwargs: Mapping[str, Any],
    out_mm_kwargs: MultiModalKwargsItems,
) -> Sequence[PromptUpdate]:
    hf_config = self.info.get_hf_config()
    media_token_id = hf_config.media_placeholder_token_id

    def get_replacement(item_idx: int):
        media = mm_items.get_items("vision_chunk", (VisionChunkProcessorItems,))
        num_media_token = self.info.media_tokens_calculator(media[item_idx])
        return [media_token_id] * num_media_token

    return [
        PromptReplacement(
            modality="vision_chunk",
            target=[media_token_id],
            replacement=get_replacement,
        ),
    ]

split_video_chunks

split_video_chunks(video)
Source code in vllm/model_executor/models/kimi_k25.py
def split_video_chunks(self, video):
    return self.info.media_processor.split_video_chunks(video)

KimiK25ProcessingInfo

Bases: BaseProcessingInfo

Processing information for Kimi-K2.5 model.

Provides configuration and utilities for processing both images and video-chunks.

Source code in vllm/model_executor/models/kimi_k25.py
class KimiK25ProcessingInfo(BaseProcessingInfo):
    """Processing information for Kimi-K2.5 model.

    Provides configuration and utilities for processing both
    images and video-chunks.
    """

    def __init__(self, ctx: InputProcessingContext) -> None:
        super().__init__(ctx)
        self.hf_config = self.get_hf_config()
        self.media_token_id = self.hf_config.media_placeholder_token_id
        media_processor = cached_get_image_processor(
            self.ctx.model_config.model, trust_remote_code=True
        )
        self.media_processor = media_processor
        self.hf_processor = MoonshotKimiVAutoProcessor(
            media_processor=self.media_processor,
            tokenizer=self.get_tokenizer(),
            media_token_id=self.media_token_id,
        )
        self.media_tokens_calculator = self.media_processor.media_tokens_calculator

    def get_hf_processor(self):
        return self.hf_processor

    def get_hf_config(self):
        return self.ctx.get_hf_config(KimiK25Config)

    def get_supported_mm_limits(self) -> Mapping[str, int | None]:
        # None means unlimited
        return {"vision_chunk": None}

hf_config instance-attribute

hf_config = get_hf_config()

hf_processor instance-attribute

hf_processor = MoonshotKimiVAutoProcessor(
    media_processor=media_processor,
    tokenizer=get_tokenizer(),
    media_token_id=media_token_id,
)

media_processor instance-attribute

media_processor = media_processor

media_token_id instance-attribute

media_token_id = media_placeholder_token_id

media_tokens_calculator instance-attribute

media_tokens_calculator = media_tokens_calculator

__init__

__init__(ctx: InputProcessingContext) -> None
Source code in vllm/model_executor/models/kimi_k25.py
def __init__(self, ctx: InputProcessingContext) -> None:
    super().__init__(ctx)
    self.hf_config = self.get_hf_config()
    self.media_token_id = self.hf_config.media_placeholder_token_id
    media_processor = cached_get_image_processor(
        self.ctx.model_config.model, trust_remote_code=True
    )
    self.media_processor = media_processor
    self.hf_processor = MoonshotKimiVAutoProcessor(
        media_processor=self.media_processor,
        tokenizer=self.get_tokenizer(),
        media_token_id=self.media_token_id,
    )
    self.media_tokens_calculator = self.media_processor.media_tokens_calculator

get_hf_config

get_hf_config()
Source code in vllm/model_executor/models/kimi_k25.py
def get_hf_config(self):
    return self.ctx.get_hf_config(KimiK25Config)

get_hf_processor

get_hf_processor()
Source code in vllm/model_executor/models/kimi_k25.py
def get_hf_processor(self):
    return self.hf_processor

get_supported_mm_limits

get_supported_mm_limits() -> Mapping[str, int | None]
Source code in vllm/model_executor/models/kimi_k25.py
def get_supported_mm_limits(self) -> Mapping[str, int | None]:
    # None means unlimited
    return {"vision_chunk": None}

MaxImageTokenMeta dataclass

Source code in vllm/model_executor/models/kimi_k25.py
@dataclass
class MaxImageTokenMeta:
    width: int = 3000
    height: int = 3000

height class-attribute instance-attribute

height: int = 3000

width class-attribute instance-attribute

width: int = 3000

__init__

__init__(width: int = 3000, height: int = 3000) -> None

MoonshotKimiVAutoProcessor

Bases: ProcessorMixin

Source code in vllm/model_executor/models/kimi_k25.py
class MoonshotKimiVAutoProcessor(ProcessorMixin):
    attributes = ["tokenizer"]
    tokenizer_class = "AutoTokenizer"

    def __init__(
        self, media_processor=None, tokenizer=None, media_token_id: int | None = None
    ):
        super().__init__(tokenizer)
        self.media_processor = media_processor
        self.media_token_id = media_token_id
        assert self.media_token_id is not None

    # We do not support str input for text here
    def __call__(
        self,
        vision_chunks: list[VisionChunk] | None = None,
        *,
        text: list[int] | str,
        **kwargs,
    ) -> BatchFeature:
        """
        Args:
            vision_chunks: List of VisionChunk items to be processed.
                For image: VisionChunkImage with type='image', image=PIL.Image
                For video_chunk: VisionChunkVideo with type='video_chunk', video_chunk=list[PIL.Image]
            text: The token ids to be fed to a model (required).
        Returns:
            [`BatchFeature`]: A [`BatchFeature`] with the following fields:

            - **input_ids** -- list of token ids to be fed to a model.
            - **pixel_values** -- Pixel values to be fed to a model. Returned when `vision_chunks` is not `None`.
            - **grid_thws** -- list of image 3D grid in LLM. Returned when `vision_chunks` is not `None`.
        """
        mm_inputs = {}
        input_ids = self.tokenizer.encode(text) if isinstance(text, str) else text
        if vision_chunks is not None:
            assert isinstance(vision_chunks, list)
            mm_inputs = self.media_processor.preprocess(vision_chunks)

            num_tokens_per_chunk = [
                self.media_processor.media_tokens_calculator(chunk)
                for chunk in vision_chunks
            ]

            new_input_ids = []
            for token in input_ids:
                if token == self.media_token_id:
                    new_input_ids.extend(
                        [self.media_token_id] * num_tokens_per_chunk.pop(0)
                    )
                else:
                    new_input_ids.append(token)
            input_ids = new_input_ids

        # XXX: _apply_hf_processor_text_mm will call tolist() on input_ids
        return BatchFeature(
            data={
                "input_ids": torch.tensor([input_ids]),
                **mm_inputs,
            }
        )

attributes class-attribute instance-attribute

attributes = ['tokenizer']

media_processor instance-attribute

media_processor = media_processor

media_token_id instance-attribute

media_token_id = media_token_id

tokenizer_class class-attribute instance-attribute

tokenizer_class = 'AutoTokenizer'

__call__

__call__(
    vision_chunks: list[VisionChunk] | None = None,
    *,
    text: list[int] | str,
    **kwargs,
) -> BatchFeature

Parameters:

Name Type Description Default
vision_chunks list[VisionChunk] | None

List of VisionChunk items to be processed. For image: VisionChunkImage with type='image', image=PIL.Image For video_chunk: VisionChunkVideo with type='video_chunk', video_chunk=list[PIL.Image]

None
text list[int] | str

The token ids to be fed to a model (required).

required

Returns: [BatchFeature]: A [BatchFeature] with the following fields:

- **input_ids** -- list of token ids to be fed to a model.
- **pixel_values** -- Pixel values to be fed to a model. Returned when `vision_chunks` is not `None`.
- **grid_thws** -- list of image 3D grid in LLM. Returned when `vision_chunks` is not `None`.
Source code in vllm/model_executor/models/kimi_k25.py
def __call__(
    self,
    vision_chunks: list[VisionChunk] | None = None,
    *,
    text: list[int] | str,
    **kwargs,
) -> BatchFeature:
    """
    Args:
        vision_chunks: List of VisionChunk items to be processed.
            For image: VisionChunkImage with type='image', image=PIL.Image
            For video_chunk: VisionChunkVideo with type='video_chunk', video_chunk=list[PIL.Image]
        text: The token ids to be fed to a model (required).
    Returns:
        [`BatchFeature`]: A [`BatchFeature`] with the following fields:

        - **input_ids** -- list of token ids to be fed to a model.
        - **pixel_values** -- Pixel values to be fed to a model. Returned when `vision_chunks` is not `None`.
        - **grid_thws** -- list of image 3D grid in LLM. Returned when `vision_chunks` is not `None`.
    """
    mm_inputs = {}
    input_ids = self.tokenizer.encode(text) if isinstance(text, str) else text
    if vision_chunks is not None:
        assert isinstance(vision_chunks, list)
        mm_inputs = self.media_processor.preprocess(vision_chunks)

        num_tokens_per_chunk = [
            self.media_processor.media_tokens_calculator(chunk)
            for chunk in vision_chunks
        ]

        new_input_ids = []
        for token in input_ids:
            if token == self.media_token_id:
                new_input_ids.extend(
                    [self.media_token_id] * num_tokens_per_chunk.pop(0)
                )
            else:
                new_input_ids.append(token)
        input_ids = new_input_ids

    # XXX: _apply_hf_processor_text_mm will call tolist() on input_ids
    return BatchFeature(
        data={
            "input_ids": torch.tensor([input_ids]),
            **mm_inputs,
        }
    )

__init__

__init__(
    media_processor=None,
    tokenizer=None,
    media_token_id: int | None = None,
)
Source code in vllm/model_executor/models/kimi_k25.py
def __init__(
    self, media_processor=None, tokenizer=None, media_token_id: int | None = None
):
    super().__init__(tokenizer)
    self.media_processor = media_processor
    self.media_token_id = media_token_id
    assert self.media_token_id is not None