Skip to content

vllm.model_executor.models.glm_ocr_mtp

Inference-only GLM-OCR MTP model compatible with HuggingFace weights.

GlmOcrMTP

Bases: Module, SupportsPP

Source code in vllm/model_executor/models/glm_ocr_mtp.py
class GlmOcrMTP(nn.Module, SupportsPP):
    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__()
        self.config = vllm_config.model_config.hf_config.text_config
        quant_config = vllm_config.quant_config
        self.quant_config = quant_config
        self.model = GlmOcrMultiTokenPredictor(
            vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
        )

        self.expert_weights = []
        self.num_layers = self.config.num_nextn_predict_layers
        for layer in self.model.layers.values():
            assert isinstance(layer, GlmOcrMultiTokenPredictorLayer)
            layer = layer.mtp_block
            assert isinstance(layer, Glm4DecoderLayer)

    def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
        return self.model.embed_input_ids(input_ids)

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        hidden_states: torch.Tensor,
        intermediate_tensors: IntermediateTensors | None = None,
        inputs_embeds: torch.Tensor | None = None,
        spec_step_idx: int = 0,
    ) -> torch.Tensor:
        hidden_states = self.model(
            input_ids, positions, hidden_states, inputs_embeds, spec_step_idx
        )
        return hidden_states

    def compute_logits(
        self,
        hidden_states: torch.Tensor,
        spec_step_idx: int = 0,
    ) -> torch.Tensor | None:
        return self.model.compute_logits(hidden_states, spec_step_idx)

    def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
        stacked_params_mapping = [
            # (param_name, shard_name, shard_id)
            (".qkv_proj", ".q_proj", "q"),
            (".qkv_proj", ".k_proj", "k"),
            (".qkv_proj", ".v_proj", "v"),
            (".gate_up_proj", ".gate_proj", 0),
            (".gate_up_proj", ".up_proj", 1),
        ]
        params_dict = dict(self.named_parameters())
        loaded_params: set[str] = set()
        for name, loaded_weight in weights:
            if name == "lm_head.weight":
                spec_layer = self.model.mtp_start_layer_idx
                name = f"model.layers.{spec_layer}.shared_head.head.weight"
            elif name == "model.embed_tokens.weight":
                spec_layer = self.model.mtp_start_layer_idx
            else:
                spec_layer = get_spec_layer_idx_from_weight_name(self.config, name)
                if spec_layer is None:
                    continue

            name = self._rewrite_spec_layer_name(spec_layer, name)

            if self.quant_config is not None and (
                scale_name := self.quant_config.get_cache_scale(name)
            ):
                # Loading kv cache quantization scales
                param = params_dict[scale_name]
                weight_loader = getattr(param, "weight_loader", default_weight_loader)
                loaded_weight = (
                    loaded_weight if loaded_weight.dim() == 0 else loaded_weight[0]
                )
                weight_loader(param, loaded_weight)
                loaded_params.add(scale_name)
                continue

            if "scale" in name or "zero_point" in name:
                # Remapping the name of FP8 kv-scale or zero point.
                name = maybe_remap_kv_scale_name(name, params_dict)
                if name is None:
                    continue

            for param_name, weight_name, shard_id in stacked_params_mapping:
                if weight_name not in name:
                    continue
                name = name.replace(weight_name, param_name)
                # Skip loading extra bias for GPTQ models.
                if name.endswith(".bias") and name not in params_dict:
                    continue

                if is_pp_missing_parameter(name, self):
                    continue

                param = params_dict[name]
                weight_loader = param.weight_loader
                weight_loader(param, loaded_weight, shard_id)
                break
            else:
                # Skip loading extra bias for GPTQ models.
                if name.endswith(".bias") and name not in params_dict:
                    continue
                # Some checkpoints include weight scale tensors for the
                # LM head even when the quantized head isn't built. Skip
                # them if the model does not expose a matching parameter
                # to avoid KeyError during load.
                if name.endswith(".weight_scale") and name not in params_dict:
                    continue

                # According to DeepSeek-V3 Technical Report, MTP modules
                # shares embedding layer. We only load the first weights.
                if (
                    spec_layer != self.model.mtp_start_layer_idx
                    and ".layers" not in name
                ):
                    continue

                if is_pp_missing_parameter(name, self):
                    continue
                param = params_dict[name]
                weight_loader = getattr(param, "weight_loader", default_weight_loader)
                weight_loader(param, loaded_weight)
            loaded_params.add(name)
        return loaded_params

    def _rewrite_spec_layer_name(self, spec_layer: int, name: str) -> str:
        """
        Rewrite the weight name to match the format of the original model.
        Add .mtp_block for modules in transformer layer block for spec layer
        and rename shared layer weights to be top level.
        """
        name = name.replace("model.language_model.layers", "model.layers")

        spec_layer_weight_names = [
            "embed_tokens",
            "enorm",
            "hnorm",
            "eh_proj",
            "shared_head",
        ]
        shared_weight_names = ["embed_tokens"]
        spec_layer_weight = False
        shared_weight = False
        for weight_name in spec_layer_weight_names:
            if weight_name in name:
                spec_layer_weight = True
                if weight_name in shared_weight_names:
                    shared_weight = True
                break
        if not spec_layer_weight:
            # treat rest weights as weights for transformer layer block
            name = name.replace(
                f"model.layers.{spec_layer}.", f"model.layers.{spec_layer}.mtp_block."
            )
        elif shared_weight:
            # treat shared weights as top level weights
            name = name.replace(f"model.layers.{spec_layer}.", "model.")
        return name

config instance-attribute

config = text_config

expert_weights instance-attribute

expert_weights = []

model instance-attribute

model = GlmOcrMultiTokenPredictor(
    vllm_config=vllm_config,
    prefix=maybe_prefix(prefix, "model"),
)

num_layers instance-attribute

num_layers = num_nextn_predict_layers

quant_config instance-attribute

quant_config = quant_config

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/glm_ocr_mtp.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__()
    self.config = vllm_config.model_config.hf_config.text_config
    quant_config = vllm_config.quant_config
    self.quant_config = quant_config
    self.model = GlmOcrMultiTokenPredictor(
        vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
    )

    self.expert_weights = []
    self.num_layers = self.config.num_nextn_predict_layers
    for layer in self.model.layers.values():
        assert isinstance(layer, GlmOcrMultiTokenPredictorLayer)
        layer = layer.mtp_block
        assert isinstance(layer, Glm4DecoderLayer)

_rewrite_spec_layer_name

_rewrite_spec_layer_name(spec_layer: int, name: str) -> str

Rewrite the weight name to match the format of the original model. Add .mtp_block for modules in transformer layer block for spec layer and rename shared layer weights to be top level.

Source code in vllm/model_executor/models/glm_ocr_mtp.py
def _rewrite_spec_layer_name(self, spec_layer: int, name: str) -> str:
    """
    Rewrite the weight name to match the format of the original model.
    Add .mtp_block for modules in transformer layer block for spec layer
    and rename shared layer weights to be top level.
    """
    name = name.replace("model.language_model.layers", "model.layers")

    spec_layer_weight_names = [
        "embed_tokens",
        "enorm",
        "hnorm",
        "eh_proj",
        "shared_head",
    ]
    shared_weight_names = ["embed_tokens"]
    spec_layer_weight = False
    shared_weight = False
    for weight_name in spec_layer_weight_names:
        if weight_name in name:
            spec_layer_weight = True
            if weight_name in shared_weight_names:
                shared_weight = True
            break
    if not spec_layer_weight:
        # treat rest weights as weights for transformer layer block
        name = name.replace(
            f"model.layers.{spec_layer}.", f"model.layers.{spec_layer}.mtp_block."
        )
    elif shared_weight:
        # treat shared weights as top level weights
        name = name.replace(f"model.layers.{spec_layer}.", "model.")
    return name

compute_logits

compute_logits(
    hidden_states: Tensor, spec_step_idx: int = 0
) -> Tensor | None
Source code in vllm/model_executor/models/glm_ocr_mtp.py
def compute_logits(
    self,
    hidden_states: torch.Tensor,
    spec_step_idx: int = 0,
) -> torch.Tensor | None:
    return self.model.compute_logits(hidden_states, spec_step_idx)

embed_input_ids

embed_input_ids(input_ids: Tensor) -> Tensor
Source code in vllm/model_executor/models/glm_ocr_mtp.py
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
    return self.model.embed_input_ids(input_ids)

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    hidden_states: Tensor,
    intermediate_tensors: IntermediateTensors | None = None,
    inputs_embeds: Tensor | None = None,
    spec_step_idx: int = 0,
) -> Tensor
Source code in vllm/model_executor/models/glm_ocr_mtp.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    hidden_states: torch.Tensor,
    intermediate_tensors: IntermediateTensors | None = None,
    inputs_embeds: torch.Tensor | None = None,
    spec_step_idx: int = 0,
) -> torch.Tensor:
    hidden_states = self.model(
        input_ids, positions, hidden_states, inputs_embeds, spec_step_idx
    )
    return hidden_states

load_weights

load_weights(
    weights: Iterable[tuple[str, Tensor]],
) -> set[str]
Source code in vllm/model_executor/models/glm_ocr_mtp.py
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
    stacked_params_mapping = [
        # (param_name, shard_name, shard_id)
        (".qkv_proj", ".q_proj", "q"),
        (".qkv_proj", ".k_proj", "k"),
        (".qkv_proj", ".v_proj", "v"),
        (".gate_up_proj", ".gate_proj", 0),
        (".gate_up_proj", ".up_proj", 1),
    ]
    params_dict = dict(self.named_parameters())
    loaded_params: set[str] = set()
    for name, loaded_weight in weights:
        if name == "lm_head.weight":
            spec_layer = self.model.mtp_start_layer_idx
            name = f"model.layers.{spec_layer}.shared_head.head.weight"
        elif name == "model.embed_tokens.weight":
            spec_layer = self.model.mtp_start_layer_idx
        else:
            spec_layer = get_spec_layer_idx_from_weight_name(self.config, name)
            if spec_layer is None:
                continue

        name = self._rewrite_spec_layer_name(spec_layer, name)

        if self.quant_config is not None and (
            scale_name := self.quant_config.get_cache_scale(name)
        ):
            # Loading kv cache quantization scales
            param = params_dict[scale_name]
            weight_loader = getattr(param, "weight_loader", default_weight_loader)
            loaded_weight = (
                loaded_weight if loaded_weight.dim() == 0 else loaded_weight[0]
            )
            weight_loader(param, loaded_weight)
            loaded_params.add(scale_name)
            continue

        if "scale" in name or "zero_point" in name:
            # Remapping the name of FP8 kv-scale or zero point.
            name = maybe_remap_kv_scale_name(name, params_dict)
            if name is None:
                continue

        for param_name, weight_name, shard_id in stacked_params_mapping:
            if weight_name not in name:
                continue
            name = name.replace(weight_name, param_name)
            # Skip loading extra bias for GPTQ models.
            if name.endswith(".bias") and name not in params_dict:
                continue

            if is_pp_missing_parameter(name, self):
                continue

            param = params_dict[name]
            weight_loader = param.weight_loader
            weight_loader(param, loaded_weight, shard_id)
            break
        else:
            # Skip loading extra bias for GPTQ models.
            if name.endswith(".bias") and name not in params_dict:
                continue
            # Some checkpoints include weight scale tensors for the
            # LM head even when the quantized head isn't built. Skip
            # them if the model does not expose a matching parameter
            # to avoid KeyError during load.
            if name.endswith(".weight_scale") and name not in params_dict:
                continue

            # According to DeepSeek-V3 Technical Report, MTP modules
            # shares embedding layer. We only load the first weights.
            if (
                spec_layer != self.model.mtp_start_layer_idx
                and ".layers" not in name
            ):
                continue

            if is_pp_missing_parameter(name, self):
                continue
            param = params_dict[name]
            weight_loader = getattr(param, "weight_loader", default_weight_loader)
            weight_loader(param, loaded_weight)
        loaded_params.add(name)
    return loaded_params

GlmOcrMultiTokenPredictor

Bases: Glm4MoeLiteMultiTokenPredictor

Source code in vllm/model_executor/models/glm_ocr_mtp.py
class GlmOcrMultiTokenPredictor(Glm4MoeLiteMultiTokenPredictor):
    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        nn.Module.__init__(self)
        config = vllm_config.model_config.hf_config.text_config
        self.mtp_start_layer_idx = config.num_hidden_layers
        self.num_mtp_layers = config.num_nextn_predict_layers
        self.layers = torch.nn.ModuleDict(
            {
                str(idx): GlmOcrMultiTokenPredictorLayer(
                    vllm_config=vllm_config,
                    prefix=f"{prefix}.layers.{idx}",
                )
                for idx in range(
                    self.mtp_start_layer_idx,
                    self.mtp_start_layer_idx + self.num_mtp_layers,
                )
            }
        )
        self.embed_tokens = VocabParallelEmbedding(
            config.vocab_size,
            config.hidden_size,
        )
        self.logits_processor = LogitsProcessor(config.vocab_size)

embed_tokens instance-attribute

embed_tokens = VocabParallelEmbedding(
    vocab_size, hidden_size
)

layers instance-attribute

layers = ModuleDict(
    {
        (str(idx)): (
            GlmOcrMultiTokenPredictorLayer(
                vllm_config=vllm_config,
                prefix=f"{prefix}.layers.{idx}",
            )
        )
        for idx in (
            range(
                mtp_start_layer_idx,
                mtp_start_layer_idx + num_mtp_layers,
            )
        )
    }
)

logits_processor instance-attribute

logits_processor = LogitsProcessor(vocab_size)

mtp_start_layer_idx instance-attribute

mtp_start_layer_idx = num_hidden_layers

num_mtp_layers instance-attribute

num_mtp_layers = num_nextn_predict_layers

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/glm_ocr_mtp.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    nn.Module.__init__(self)
    config = vllm_config.model_config.hf_config.text_config
    self.mtp_start_layer_idx = config.num_hidden_layers
    self.num_mtp_layers = config.num_nextn_predict_layers
    self.layers = torch.nn.ModuleDict(
        {
            str(idx): GlmOcrMultiTokenPredictorLayer(
                vllm_config=vllm_config,
                prefix=f"{prefix}.layers.{idx}",
            )
            for idx in range(
                self.mtp_start_layer_idx,
                self.mtp_start_layer_idx + self.num_mtp_layers,
            )
        }
    )
    self.embed_tokens = VocabParallelEmbedding(
        config.vocab_size,
        config.hidden_size,
    )
    self.logits_processor = LogitsProcessor(config.vocab_size)

GlmOcrMultiTokenPredictorLayer

Bases: Module

Source code in vllm/model_executor/models/glm_ocr_mtp.py
class GlmOcrMultiTokenPredictorLayer(nn.Module):
    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        nn.Module.__init__(self)

        config = vllm_config.speculative_config.draft_model_config.hf_config.text_config
        self.config = config
        quant_config = vllm_config.quant_config

        self.enorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
        self.hnorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
        self.eh_proj = nn.Linear(config.hidden_size * 2, config.hidden_size, bias=False)

        self.device = current_platform.device_type
        self.shared_head = SharedHead(
            config=config, prefix=prefix, quant_config=quant_config
        )
        self.mtp_block = Glm4DecoderLayer(
            vllm_config=vllm_config, prefix=prefix, config=self.config
        )

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        previous_hidden_states: torch.Tensor,
        inputs_embeds: torch.Tensor | None = None,
        spec_step_index: int = 0,
    ) -> torch.Tensor:
        assert inputs_embeds is not None
        # masking inputs at position 0, as not needed by MTP
        inputs_embeds[positions[0] == 0] = 0

        inputs_embeds = self.enorm(inputs_embeds)
        previous_hidden_states = self.hnorm(previous_hidden_states)

        hidden_states = self.eh_proj(
            torch.cat([inputs_embeds, previous_hidden_states], dim=-1)
        )

        hidden_states, residual = self.mtp_block(
            positions=positions, hidden_states=hidden_states, residual=None
        )
        hidden_states = residual + hidden_states
        return hidden_states

config instance-attribute

config = config

device instance-attribute

device = device_type

eh_proj instance-attribute

eh_proj = Linear(hidden_size * 2, hidden_size, bias=False)

enorm instance-attribute

enorm = RMSNorm(hidden_size, eps=rms_norm_eps)

hnorm instance-attribute

hnorm = RMSNorm(hidden_size, eps=rms_norm_eps)

mtp_block instance-attribute

mtp_block = Glm4DecoderLayer(
    vllm_config=vllm_config, prefix=prefix, config=config
)

shared_head instance-attribute

shared_head = SharedHead(
    config=config, prefix=prefix, quant_config=quant_config
)

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/glm_ocr_mtp.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    nn.Module.__init__(self)

    config = vllm_config.speculative_config.draft_model_config.hf_config.text_config
    self.config = config
    quant_config = vllm_config.quant_config

    self.enorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
    self.hnorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
    self.eh_proj = nn.Linear(config.hidden_size * 2, config.hidden_size, bias=False)

    self.device = current_platform.device_type
    self.shared_head = SharedHead(
        config=config, prefix=prefix, quant_config=quant_config
    )
    self.mtp_block = Glm4DecoderLayer(
        vllm_config=vllm_config, prefix=prefix, config=self.config
    )

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    previous_hidden_states: Tensor,
    inputs_embeds: Tensor | None = None,
    spec_step_index: int = 0,
) -> Tensor
Source code in vllm/model_executor/models/glm_ocr_mtp.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    previous_hidden_states: torch.Tensor,
    inputs_embeds: torch.Tensor | None = None,
    spec_step_index: int = 0,
) -> torch.Tensor:
    assert inputs_embeds is not None
    # masking inputs at position 0, as not needed by MTP
    inputs_embeds[positions[0] == 0] = 0

    inputs_embeds = self.enorm(inputs_embeds)
    previous_hidden_states = self.hnorm(previous_hidden_states)

    hidden_states = self.eh_proj(
        torch.cat([inputs_embeds, previous_hidden_states], dim=-1)
    )

    hidden_states, residual = self.mtp_block(
        positions=positions, hidden_states=hidden_states, residual=None
    )
    hidden_states = residual + hidden_states
    return hidden_states