MusicFlamingo model adapter.
MusicFlamingo shares the AudioFlamingo3 architecture, so we reuse the same implementation and multimodal processor, while accepting MusicFlamingo config and processor classes when available.
Bases: AudioFlamingo3DummyInputsBuilder
Source code in vllm/model_executor/models/musicflamingo.py
| class MusicFlamingoDummyInputsBuilder(AudioFlamingo3DummyInputsBuilder):
pass
|
MusicFlamingoForConditionalGeneration
Bases: AudioFlamingo3ForConditionalGeneration
MusicFlamingo model for conditional generation.
Source code in vllm/model_executor/models/musicflamingo.py
| @MULTIMODAL_REGISTRY.register_processor(
AudioFlamingo3MultiModalProcessor,
info=MusicFlamingoProcessingInfo,
dummy_inputs=MusicFlamingoDummyInputsBuilder,
)
class MusicFlamingoForConditionalGeneration(AudioFlamingo3ForConditionalGeneration):
"""MusicFlamingo model for conditional generation."""
|
MusicFlamingoProcessingInfo
Bases: BaseProcessingInfo
Source code in vllm/model_executor/models/musicflamingo.py
| class MusicFlamingoProcessingInfo(BaseProcessingInfo):
def get_hf_config(self):
if MusicFlamingoConfig is None:
return self.ctx.get_hf_config(AudioFlamingo3Config)
return self.ctx.get_hf_config((MusicFlamingoConfig, AudioFlamingo3Config))
def get_hf_processor(self, **kwargs: object):
if MusicFlamingoProcessor is None:
return self.ctx.get_hf_processor(AudioFlamingo3Processor, **kwargs)
# Tuple triggers AutoProcessor path and accepts either processor class.
return self.ctx.get_hf_processor(
(MusicFlamingoProcessor, AudioFlamingo3Processor), **kwargs
)
def get_feature_extractor(self, **kwargs: object):
hf_processor = self.get_hf_processor(**kwargs)
return hf_processor.feature_extractor
def get_supported_mm_limits(self) -> Mapping[str, int | None]:
return {"audio": None}
|
get_feature_extractor(**kwargs: object)
Source code in vllm/model_executor/models/musicflamingo.py
| def get_feature_extractor(self, **kwargs: object):
hf_processor = self.get_hf_processor(**kwargs)
return hf_processor.feature_extractor
|
get_hf_config
Source code in vllm/model_executor/models/musicflamingo.py
| def get_hf_config(self):
if MusicFlamingoConfig is None:
return self.ctx.get_hf_config(AudioFlamingo3Config)
return self.ctx.get_hf_config((MusicFlamingoConfig, AudioFlamingo3Config))
|
get_hf_processor
get_hf_processor(**kwargs: object)
Source code in vllm/model_executor/models/musicflamingo.py
| def get_hf_processor(self, **kwargs: object):
if MusicFlamingoProcessor is None:
return self.ctx.get_hf_processor(AudioFlamingo3Processor, **kwargs)
# Tuple triggers AutoProcessor path and accepts either processor class.
return self.ctx.get_hf_processor(
(MusicFlamingoProcessor, AudioFlamingo3Processor), **kwargs
)
|
get_supported_mm_limits
Source code in vllm/model_executor/models/musicflamingo.py
| def get_supported_mm_limits(self) -> Mapping[str, int | None]:
return {"audio": None}
|