Skip to content

vllm.transformers_utils.processors.qwen3_asr

Qwen3ASRProcessor

Bases: ProcessorMixin

Constructs a Qwen3ASR processor. [Qwen3ASRProcessor] offers all the functionalities of [WhisperFeatureExtractor], and [Qwen2TokenizerFast]. See the [~Qwen3ASRProcessor.__call__] and [~Qwen3ASRProcessor.decode] for more information.

Parameters:

Name Type Description Default
feature_extractor [`WhisperFeatureExtractor`], *optional*

The audio feature extractor.

None
tokenizer [`Qwen2TokenizerFast`], *optional*

The text tokenizer.

None
chat_template `Optional[str]`, *optional*

The Jinja template to use for formatting the conversation. If not provided, the default chat template is used.

None
Source code in vllm/transformers_utils/processors/qwen3_asr.py
class Qwen3ASRProcessor(ProcessorMixin):
    r"""
    Constructs a Qwen3ASR processor.
    [`Qwen3ASRProcessor`] offers all the functionalities of [`WhisperFeatureExtractor`], and [`Qwen2TokenizerFast`]. See the
    [`~Qwen3ASRProcessor.__call__`] and [`~Qwen3ASRProcessor.decode`] for more information.

    Args:
        feature_extractor ([`WhisperFeatureExtractor`], *optional*):
            The audio feature extractor.
        tokenizer ([`Qwen2TokenizerFast`], *optional*):
            The text tokenizer.
        chat_template (`Optional[str]`, *optional*):
            The Jinja template to use for formatting the conversation. If not provided, the default chat template is used.
    """

    attributes = ["feature_extractor", "tokenizer"]
    feature_extractor_class = "WhisperFeatureExtractor"
    tokenizer_class = ("Qwen2Tokenizer", "Qwen2TokenizerFast")

    def __init__(self, feature_extractor=None, tokenizer=None, chat_template=None):
        super().__init__(feature_extractor, tokenizer, chat_template=chat_template)
        self.audio_token = self.tokenizer.audio_token
        self.audio_bos_token = self.tokenizer.audio_bos_token
        self.audio_eos_token = self.tokenizer.audio_eos_token

    def __call__(
        self,
        text: TextInput = None,
        audio: AudioInput = None,
        **kwargs,
    ) -> BatchFeature:
        """
        Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `text`
        and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode
        the text. To prepare the audio(s), this method forwards the `audio` and `kwargs` arguments to
        WhisperFeatureExtractor's [`~WhisperFeatureExtractor.__call__`] if `audio` is not `None`. Please refer to the doctsring
        of the above two methods for more information.

        Args:
            text (`str`, `List[str]`, `List[List[str]]`):
                The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
                (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
                `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
            audio (`np.ndarray`, `List[np.ndarray]`):
                The audio or batch of audio to be prepared. Each audio can be a NumPy array.
        """

        if text is None:
            raise ValueError("You need to specify either a `text` input to process.")

        output_kwargs = self._merge_kwargs(
            Qwen3ASRProcessorKwargs,
            tokenizer_init_kwargs=self.tokenizer.init_kwargs,
            **kwargs,
        )

        if audio is not None:
            output_kwargs["audio_kwargs"]["padding"] = True
            output_kwargs["audio_kwargs"]["truncation"] = False
            audio_inputs = self.feature_extractor(
                audio, **output_kwargs["audio_kwargs"]
            )
            audio_inputs["feature_attention_mask"] = audio_inputs.pop(
                "attention_mask"
            )  # rename feature_attention_mask to prevent conflicts later on
            audio_inputs["input_features"] = audio_inputs.pop(
                "input_features"
            )  # rename input_features to prevent conflicts later on
            audio_lengths = iter(
                _get_feat_extract_output_lengths(
                    audio_inputs["feature_attention_mask"].sum(-1)
                )
            )
        else:
            audio_inputs = {}
            audio_lengths = iter([])

        if not isinstance(text, list):
            text = [text]

        text = self.replace_multimodal_special_tokens(
            text,
            audio_lengths,
        )

        texts_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])

        return BatchFeature(
            data={**texts_inputs, **audio_inputs},
            tensor_type=kwargs.get("return_tensors"),
        )

    def replace_multimodal_special_tokens(
        self,
        text,
        audio_lengths,
    ):
        processed_text = []
        for sample in text:
            positions = []
            special_tokens = [re.escape(tok) for tok in [self.audio_token]]
            pattern = "|".join(special_tokens)
            positions = sorted(
                [
                    (match.start(), match.group())
                    for match in re.finditer(pattern, sample)
                ]
            )
            positions.sort(key=lambda x: x[0])

            for _, special_token in positions:
                if special_token == self.audio_token:
                    sample = sample.replace(
                        self.audio_token,
                        "<|audio_placeholder|>" * next(audio_lengths),
                        1,
                    )

            sample = sample.replace("<|audio_placeholder|>", self.audio_token)
            processed_text.append(sample)
        return processed_text

    def get_chunked_index(
        self, token_indices: np.ndarray, tokens_per_chunk: int
    ) -> list[tuple[int, int]]:
        """
        Splits token index list into chunks based on token value ranges.

        Given a list of token indices, returns a list of (start, end) index tuples representing
        slices of the list where the token values fall within successive ranges of `tokens_per_chunk`.

        For example, if `tokens_per_chunk` is 1000, the function will create chunks such that:
        - the first chunk contains token values < 1000,
        - the second chunk contains values >= 1000 and < 2000, and so on.

        Parameters:
            token_indices (`np.ndarray`): A monotonically increasing list of token index values.
            tokens_per_chunk (`int`): Number of tokens per chunk (used as the chunk size threshold).

        Returns:
            `list[tuple[int, int]]`: A list of tuples, each representing the start (inclusive)
                                and end (exclusive) indices of a chunk in `token_indices`.
        """

        def _iter():
            i, start_idx = 0, 0  # skip bos token
            current_chunk = 1
            while i < len(token_indices):  # skip eos token
                if token_indices[i] >= current_chunk * tokens_per_chunk:
                    yield (start_idx, i)
                    start_idx = i
                    current_chunk += 1
                i += 1
            yield (start_idx, len(token_indices))

        return list(_iter())

    def apply_chat_template(self, conversations, chat_template=None, **kwargs):
        kwargs["return_dict"] = False
        return super().apply_chat_template(conversations, chat_template, **kwargs)

    @property
    def model_input_names(self):
        tokenizer_input_names = self.tokenizer.model_input_names
        feature_extractor_input_names = self.feature_extractor.model_input_names
        return list(
            dict.fromkeys(
                tokenizer_input_names
                + feature_extractor_input_names
                + ["feature_attention_mask"]
            )
        )

attributes class-attribute instance-attribute

attributes = ['feature_extractor', 'tokenizer']

audio_bos_token instance-attribute

audio_bos_token = audio_bos_token

audio_eos_token instance-attribute

audio_eos_token = audio_eos_token

audio_token instance-attribute

audio_token = audio_token

feature_extractor_class class-attribute instance-attribute

feature_extractor_class = 'WhisperFeatureExtractor'

model_input_names property

model_input_names

tokenizer_class class-attribute instance-attribute

tokenizer_class = ('Qwen2Tokenizer', 'Qwen2TokenizerFast')

__call__

__call__(
    text: TextInput = None,
    audio: AudioInput = None,
    **kwargs,
) -> BatchFeature

Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the text and kwargs arguments to Qwen2TokenizerFast's [~Qwen2TokenizerFast.__call__] if text is not None to encode the text. To prepare the audio(s), this method forwards the audio and kwargs arguments to WhisperFeatureExtractor's [~WhisperFeatureExtractor.__call__] if audio is not None. Please refer to the doctsring of the above two methods for more information.

Parameters:

Name Type Description Default
text `str`, `List[str]`, `List[List[str]]`

The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set is_split_into_words=True (to lift the ambiguity with a batch of sequences).

None
audio `np.ndarray`, `List[np.ndarray]`

The audio or batch of audio to be prepared. Each audio can be a NumPy array.

None
Source code in vllm/transformers_utils/processors/qwen3_asr.py
def __call__(
    self,
    text: TextInput = None,
    audio: AudioInput = None,
    **kwargs,
) -> BatchFeature:
    """
    Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `text`
    and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode
    the text. To prepare the audio(s), this method forwards the `audio` and `kwargs` arguments to
    WhisperFeatureExtractor's [`~WhisperFeatureExtractor.__call__`] if `audio` is not `None`. Please refer to the doctsring
    of the above two methods for more information.

    Args:
        text (`str`, `List[str]`, `List[List[str]]`):
            The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
            (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
            `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
        audio (`np.ndarray`, `List[np.ndarray]`):
            The audio or batch of audio to be prepared. Each audio can be a NumPy array.
    """

    if text is None:
        raise ValueError("You need to specify either a `text` input to process.")

    output_kwargs = self._merge_kwargs(
        Qwen3ASRProcessorKwargs,
        tokenizer_init_kwargs=self.tokenizer.init_kwargs,
        **kwargs,
    )

    if audio is not None:
        output_kwargs["audio_kwargs"]["padding"] = True
        output_kwargs["audio_kwargs"]["truncation"] = False
        audio_inputs = self.feature_extractor(
            audio, **output_kwargs["audio_kwargs"]
        )
        audio_inputs["feature_attention_mask"] = audio_inputs.pop(
            "attention_mask"
        )  # rename feature_attention_mask to prevent conflicts later on
        audio_inputs["input_features"] = audio_inputs.pop(
            "input_features"
        )  # rename input_features to prevent conflicts later on
        audio_lengths = iter(
            _get_feat_extract_output_lengths(
                audio_inputs["feature_attention_mask"].sum(-1)
            )
        )
    else:
        audio_inputs = {}
        audio_lengths = iter([])

    if not isinstance(text, list):
        text = [text]

    text = self.replace_multimodal_special_tokens(
        text,
        audio_lengths,
    )

    texts_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])

    return BatchFeature(
        data={**texts_inputs, **audio_inputs},
        tensor_type=kwargs.get("return_tensors"),
    )

__init__

__init__(
    feature_extractor=None,
    tokenizer=None,
    chat_template=None,
)
Source code in vllm/transformers_utils/processors/qwen3_asr.py
def __init__(self, feature_extractor=None, tokenizer=None, chat_template=None):
    super().__init__(feature_extractor, tokenizer, chat_template=chat_template)
    self.audio_token = self.tokenizer.audio_token
    self.audio_bos_token = self.tokenizer.audio_bos_token
    self.audio_eos_token = self.tokenizer.audio_eos_token

apply_chat_template

apply_chat_template(
    conversations, chat_template=None, **kwargs
)
Source code in vllm/transformers_utils/processors/qwen3_asr.py
def apply_chat_template(self, conversations, chat_template=None, **kwargs):
    kwargs["return_dict"] = False
    return super().apply_chat_template(conversations, chat_template, **kwargs)

get_chunked_index

get_chunked_index(
    token_indices: ndarray, tokens_per_chunk: int
) -> list[tuple[int, int]]

Splits token index list into chunks based on token value ranges.

Given a list of token indices, returns a list of (start, end) index tuples representing slices of the list where the token values fall within successive ranges of tokens_per_chunk.

For example, if tokens_per_chunk is 1000, the function will create chunks such that: - the first chunk contains token values < 1000, - the second chunk contains values >= 1000 and < 2000, and so on.

Parameters:

Name Type Description Default
token_indices `np.ndarray`

A monotonically increasing list of token index values.

required
tokens_per_chunk `int`

Number of tokens per chunk (used as the chunk size threshold).

required

Returns:

Type Description
list[tuple[int, int]]

list[tuple[int, int]]: A list of tuples, each representing the start (inclusive) and end (exclusive) indices of a chunk in token_indices.

Source code in vllm/transformers_utils/processors/qwen3_asr.py
def get_chunked_index(
    self, token_indices: np.ndarray, tokens_per_chunk: int
) -> list[tuple[int, int]]:
    """
    Splits token index list into chunks based on token value ranges.

    Given a list of token indices, returns a list of (start, end) index tuples representing
    slices of the list where the token values fall within successive ranges of `tokens_per_chunk`.

    For example, if `tokens_per_chunk` is 1000, the function will create chunks such that:
    - the first chunk contains token values < 1000,
    - the second chunk contains values >= 1000 and < 2000, and so on.

    Parameters:
        token_indices (`np.ndarray`): A monotonically increasing list of token index values.
        tokens_per_chunk (`int`): Number of tokens per chunk (used as the chunk size threshold).

    Returns:
        `list[tuple[int, int]]`: A list of tuples, each representing the start (inclusive)
                            and end (exclusive) indices of a chunk in `token_indices`.
    """

    def _iter():
        i, start_idx = 0, 0  # skip bos token
        current_chunk = 1
        while i < len(token_indices):  # skip eos token
            if token_indices[i] >= current_chunk * tokens_per_chunk:
                yield (start_idx, i)
                start_idx = i
                current_chunk += 1
            i += 1
        yield (start_idx, len(token_indices))

    return list(_iter())

replace_multimodal_special_tokens

replace_multimodal_special_tokens(text, audio_lengths)
Source code in vllm/transformers_utils/processors/qwen3_asr.py
def replace_multimodal_special_tokens(
    self,
    text,
    audio_lengths,
):
    processed_text = []
    for sample in text:
        positions = []
        special_tokens = [re.escape(tok) for tok in [self.audio_token]]
        pattern = "|".join(special_tokens)
        positions = sorted(
            [
                (match.start(), match.group())
                for match in re.finditer(pattern, sample)
            ]
        )
        positions.sort(key=lambda x: x[0])

        for _, special_token in positions:
            if special_token == self.audio_token:
                sample = sample.replace(
                    self.audio_token,
                    "<|audio_placeholder|>" * next(audio_lengths),
                    1,
                )

        sample = sample.replace("<|audio_placeholder|>", self.audio_token)
        processed_text.append(sample)
    return processed_text

Qwen3ASRProcessorKwargs

Bases: ProcessingKwargs

Source code in vllm/transformers_utils/processors/qwen3_asr.py
class Qwen3ASRProcessorKwargs(ProcessingKwargs, total=False):
    _defaults = {
        "text_kwargs": {
            "padding": False,
            "padding_side": "left",
        },
        "audio_kwargs": {
            "sampling_rate": 16000,
            "padding": True,
            "return_attention_mask": True,
        },
    }

_defaults class-attribute instance-attribute

_defaults = {
    "text_kwargs": {
        "padding": False,
        "padding_side": "left",
    },
    "audio_kwargs": {
        "sampling_rate": 16000,
        "padding": True,
        "return_attention_mask": True,
    },
}

_get_feat_extract_output_lengths

_get_feat_extract_output_lengths(input_lengths)

Computes the output length of the convolutional layers and the output length of the audio encoder

Source code in vllm/transformers_utils/processors/qwen3_asr.py
def _get_feat_extract_output_lengths(input_lengths):
    """
    Computes the output length of the convolutional layers and the output length of the audio encoder
    """

    input_lengths_leave = input_lengths % 100
    feat_lengths = (input_lengths_leave - 1) // 2 + 1
    output_lengths = (
        ((feat_lengths - 1) // 2 + 1 - 1) // 2 + 1 + (input_lengths // 100) * 13
    )
    return output_lengths