Skip to content

vllm.transformers_utils.configs.step3p5

Step3p5Config

Bases: PretrainedConfig

Source code in vllm/transformers_utils/configs/step3p5.py
class Step3p5Config(PretrainedConfig):
    model_type = "step3p5"

    def __init__(
        self,
        hidden_size: int = 5120,
        intermediate_size: int = 13312,
        num_attention_heads: int = 40,
        num_attention_groups: int = 8,
        num_hidden_layers: int = 48,
        max_seq_len: int = 4096,
        vocab_size: int = 65536,
        rms_norm_eps: float = 1e-5,
        moe_every_n_layer: int = 2,
        use_moe: bool = False,
        moe_intermediate_size: int = 10240,
        moe_num_experts: int = 16,
        moe_top_k: int = 4,
        moe_layer_offset: int = 0,
        rope_theta: float | list[float] | None = 500000,
        rope_scaling: dict[str, Any] | None = None,
        head_dim: int | None = None,
        share_expert_dim: int | None = None,
        norm_expert_weight: bool = True,
        bos_token_id: list[int] | int | None = None,
        eos_token_id: list[int] | int | None = None,
        moe_router_activation: str = "softmax",
        moe_router_scaling_factor: float = 1.0,
        att_impl_type: str = "GQA",
        use_head_wise_attn_gate: bool = False,
        use_moe_router_bias: bool = True,
        need_fp32_gate: bool = True,
        layer_types: list[str] | None = None,
        use_rope_layers: list[bool] | None = None,
        yarn_only_types: list[str] | None = None,
        attention_other_setting: dict[str, Any] | None = None,
        num_nextn_predict_layers: int = 0,
        swiglu_limits: list[float] | None = None,
        swiglu_limits_shared: list[float] | None = None,
        max_position_embeddings: int | None = None,
        **kwargs,
    ):
        self.hidden_size = hidden_size
        self.intermediate_size = intermediate_size
        self.num_attention_heads = num_attention_heads
        self.num_attention_groups = num_attention_groups
        self.num_hidden_layers = num_hidden_layers
        self.max_seq_len = max_seq_len
        self.vocab_size = vocab_size
        self.rms_norm_eps = rms_norm_eps
        self.use_moe = use_moe
        self.moe_intermediate_size = moe_intermediate_size
        self.moe_every_n_layer = moe_every_n_layer
        self.moe_num_experts = moe_num_experts
        self.num_experts_per_tok = moe_top_k
        self.moe_top_k = moe_top_k
        self.moe_layer_offset = moe_layer_offset

        self.rope_theta = rope_theta
        self.rope_scaling = rope_scaling
        self.head_dim = head_dim
        if share_expert_dim is None:
            self.share_expert_dim = self.moe_intermediate_size * self.moe_top_k
        else:
            self.share_expert_dim = share_expert_dim
        self.norm_expert_weight = norm_expert_weight

        self.max_position_embeddings = max_position_embeddings
        self.moe_router_activation = moe_router_activation
        self.moe_router_scaling_factor = moe_router_scaling_factor
        self.use_moe_router_bias = use_moe_router_bias
        self.need_fp32_gate = need_fp32_gate

        self.att_impl_type = att_impl_type
        self.use_head_wise_attn_gate = use_head_wise_attn_gate
        self.layer_types = layer_types
        self.use_rope_layers = use_rope_layers
        self.yarn_only_types = yarn_only_types
        self.attention_other_setting = attention_other_setting
        self.num_nextn_predict_layers = num_nextn_predict_layers
        self.swiglu_limits = swiglu_limits
        self.swiglu_limits_shared = swiglu_limits_shared

        resolved_bos_token_id = 1 if bos_token_id is None else bos_token_id
        resolved_eos_token_id = [2, 3] if eos_token_id is None else eos_token_id
        self.bos_token_id = resolved_bos_token_id
        self.eos_token_id = resolved_eos_token_id

        super().__init__(
            bos_token_id=resolved_bos_token_id,
            eos_token_id=resolved_eos_token_id,
            **kwargs,
        )

att_impl_type instance-attribute

att_impl_type = att_impl_type

attention_other_setting instance-attribute

attention_other_setting = attention_other_setting

bos_token_id instance-attribute

bos_token_id = resolved_bos_token_id

eos_token_id instance-attribute

eos_token_id = resolved_eos_token_id

head_dim instance-attribute

head_dim = head_dim

hidden_size instance-attribute

hidden_size = hidden_size

intermediate_size instance-attribute

intermediate_size = intermediate_size

layer_types instance-attribute

layer_types = layer_types

max_position_embeddings instance-attribute

max_position_embeddings = max_position_embeddings

max_seq_len instance-attribute

max_seq_len = max_seq_len

model_type class-attribute instance-attribute

model_type = 'step3p5'

moe_every_n_layer instance-attribute

moe_every_n_layer = moe_every_n_layer

moe_intermediate_size instance-attribute

moe_intermediate_size = moe_intermediate_size

moe_layer_offset instance-attribute

moe_layer_offset = moe_layer_offset

moe_num_experts instance-attribute

moe_num_experts = moe_num_experts

moe_router_activation instance-attribute

moe_router_activation = moe_router_activation

moe_router_scaling_factor instance-attribute

moe_router_scaling_factor = moe_router_scaling_factor

moe_top_k instance-attribute

moe_top_k = moe_top_k

need_fp32_gate instance-attribute

need_fp32_gate = need_fp32_gate

norm_expert_weight instance-attribute

norm_expert_weight = norm_expert_weight

num_attention_groups instance-attribute

num_attention_groups = num_attention_groups

num_attention_heads instance-attribute

num_attention_heads = num_attention_heads

num_experts_per_tok instance-attribute

num_experts_per_tok = moe_top_k

num_hidden_layers instance-attribute

num_hidden_layers = num_hidden_layers

num_nextn_predict_layers instance-attribute

num_nextn_predict_layers = num_nextn_predict_layers

rms_norm_eps instance-attribute

rms_norm_eps = rms_norm_eps

rope_scaling instance-attribute

rope_scaling = rope_scaling

rope_theta instance-attribute

rope_theta = rope_theta

share_expert_dim instance-attribute

share_expert_dim = moe_intermediate_size * moe_top_k

swiglu_limits instance-attribute

swiglu_limits = swiglu_limits

swiglu_limits_shared instance-attribute

swiglu_limits_shared = swiglu_limits_shared

use_head_wise_attn_gate instance-attribute

use_head_wise_attn_gate = use_head_wise_attn_gate

use_moe instance-attribute

use_moe = use_moe

use_moe_router_bias instance-attribute

use_moe_router_bias = use_moe_router_bias

use_rope_layers instance-attribute

use_rope_layers = use_rope_layers

vocab_size instance-attribute

vocab_size = vocab_size

yarn_only_types instance-attribute

yarn_only_types = yarn_only_types

__init__

__init__(
    hidden_size: int = 5120,
    intermediate_size: int = 13312,
    num_attention_heads: int = 40,
    num_attention_groups: int = 8,
    num_hidden_layers: int = 48,
    max_seq_len: int = 4096,
    vocab_size: int = 65536,
    rms_norm_eps: float = 1e-05,
    moe_every_n_layer: int = 2,
    use_moe: bool = False,
    moe_intermediate_size: int = 10240,
    moe_num_experts: int = 16,
    moe_top_k: int = 4,
    moe_layer_offset: int = 0,
    rope_theta: float | list[float] | None = 500000,
    rope_scaling: dict[str, Any] | None = None,
    head_dim: int | None = None,
    share_expert_dim: int | None = None,
    norm_expert_weight: bool = True,
    bos_token_id: list[int] | int | None = None,
    eos_token_id: list[int] | int | None = None,
    moe_router_activation: str = "softmax",
    moe_router_scaling_factor: float = 1.0,
    att_impl_type: str = "GQA",
    use_head_wise_attn_gate: bool = False,
    use_moe_router_bias: bool = True,
    need_fp32_gate: bool = True,
    layer_types: list[str] | None = None,
    use_rope_layers: list[bool] | None = None,
    yarn_only_types: list[str] | None = None,
    attention_other_setting: dict[str, Any] | None = None,
    num_nextn_predict_layers: int = 0,
    swiglu_limits: list[float] | None = None,
    swiglu_limits_shared: list[float] | None = None,
    max_position_embeddings: int | None = None,
    **kwargs,
)
Source code in vllm/transformers_utils/configs/step3p5.py
def __init__(
    self,
    hidden_size: int = 5120,
    intermediate_size: int = 13312,
    num_attention_heads: int = 40,
    num_attention_groups: int = 8,
    num_hidden_layers: int = 48,
    max_seq_len: int = 4096,
    vocab_size: int = 65536,
    rms_norm_eps: float = 1e-5,
    moe_every_n_layer: int = 2,
    use_moe: bool = False,
    moe_intermediate_size: int = 10240,
    moe_num_experts: int = 16,
    moe_top_k: int = 4,
    moe_layer_offset: int = 0,
    rope_theta: float | list[float] | None = 500000,
    rope_scaling: dict[str, Any] | None = None,
    head_dim: int | None = None,
    share_expert_dim: int | None = None,
    norm_expert_weight: bool = True,
    bos_token_id: list[int] | int | None = None,
    eos_token_id: list[int] | int | None = None,
    moe_router_activation: str = "softmax",
    moe_router_scaling_factor: float = 1.0,
    att_impl_type: str = "GQA",
    use_head_wise_attn_gate: bool = False,
    use_moe_router_bias: bool = True,
    need_fp32_gate: bool = True,
    layer_types: list[str] | None = None,
    use_rope_layers: list[bool] | None = None,
    yarn_only_types: list[str] | None = None,
    attention_other_setting: dict[str, Any] | None = None,
    num_nextn_predict_layers: int = 0,
    swiglu_limits: list[float] | None = None,
    swiglu_limits_shared: list[float] | None = None,
    max_position_embeddings: int | None = None,
    **kwargs,
):
    self.hidden_size = hidden_size
    self.intermediate_size = intermediate_size
    self.num_attention_heads = num_attention_heads
    self.num_attention_groups = num_attention_groups
    self.num_hidden_layers = num_hidden_layers
    self.max_seq_len = max_seq_len
    self.vocab_size = vocab_size
    self.rms_norm_eps = rms_norm_eps
    self.use_moe = use_moe
    self.moe_intermediate_size = moe_intermediate_size
    self.moe_every_n_layer = moe_every_n_layer
    self.moe_num_experts = moe_num_experts
    self.num_experts_per_tok = moe_top_k
    self.moe_top_k = moe_top_k
    self.moe_layer_offset = moe_layer_offset

    self.rope_theta = rope_theta
    self.rope_scaling = rope_scaling
    self.head_dim = head_dim
    if share_expert_dim is None:
        self.share_expert_dim = self.moe_intermediate_size * self.moe_top_k
    else:
        self.share_expert_dim = share_expert_dim
    self.norm_expert_weight = norm_expert_weight

    self.max_position_embeddings = max_position_embeddings
    self.moe_router_activation = moe_router_activation
    self.moe_router_scaling_factor = moe_router_scaling_factor
    self.use_moe_router_bias = use_moe_router_bias
    self.need_fp32_gate = need_fp32_gate

    self.att_impl_type = att_impl_type
    self.use_head_wise_attn_gate = use_head_wise_attn_gate
    self.layer_types = layer_types
    self.use_rope_layers = use_rope_layers
    self.yarn_only_types = yarn_only_types
    self.attention_other_setting = attention_other_setting
    self.num_nextn_predict_layers = num_nextn_predict_layers
    self.swiglu_limits = swiglu_limits
    self.swiglu_limits_shared = swiglu_limits_shared

    resolved_bos_token_id = 1 if bos_token_id is None else bos_token_id
    resolved_eos_token_id = [2, 3] if eos_token_id is None else eos_token_id
    self.bos_token_id = resolved_bos_token_id
    self.eos_token_id = resolved_eos_token_id

    super().__init__(
        bos_token_id=resolved_bos_token_id,
        eos_token_id=resolved_eos_token_id,
        **kwargs,
    )