Skip to content

Commit

Permalink
Rename _get_dummy_mm_inputs
Browse files Browse the repository at this point in the history
Signed-off-by: DarkLight1337 <[email protected]>
  • Loading branch information
DarkLight1337 committed Jan 4, 2025
1 parent 0899dce commit b978edb
Show file tree
Hide file tree
Showing 12 changed files with 32 additions and 20 deletions.
5 changes: 4 additions & 1 deletion tests/multimodal/test_processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -723,7 +723,10 @@ def _test_processing_cache_correctness(
}

mm_counts = {k: len(vs) for k, vs in mm_data.items()}
prompt = baseline_processor._get_dummy_mm_inputs(mm_counts).prompt_text
prompt = baseline_processor._get_dummy_processor_inputs(
model_config.max_model_len,
mm_counts,
).prompt_text

# Drop unnecessary keys and test single -> multi conversion
if rng.rand() < simplify_rate:
Expand Down
2 changes: 1 addition & 1 deletion vllm/model_executor/models/aria.py
Original file line number Diff line number Diff line change
Expand Up @@ -488,7 +488,7 @@ def _get_prompt_replacements(
)
]

def _get_dummy_mm_inputs(
def _get_dummy_processor_inputs(
self,
seq_len: int,
mm_counts: Mapping[str, int],
Expand Down
2 changes: 1 addition & 1 deletion vllm/model_executor/models/blip2.py
Original file line number Diff line number Diff line change
Expand Up @@ -457,7 +457,7 @@ def apply(

return result

def _get_dummy_mm_inputs(
def _get_dummy_processor_inputs(
self,
seq_len: int,
mm_counts: Mapping[str, int],
Expand Down
2 changes: 1 addition & 1 deletion vllm/model_executor/models/chameleon.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def _get_prompt_replacements(
)
]

def _get_dummy_mm_inputs(
def _get_dummy_processor_inputs(
self,
seq_len: int,
mm_counts: Mapping[str, int],
Expand Down
2 changes: 1 addition & 1 deletion vllm/model_executor/models/fuyu.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ def apply(

return result

def _get_dummy_mm_inputs(
def _get_dummy_processor_inputs(
self,
seq_len: int,
mm_counts: Mapping[str, int],
Expand Down
2 changes: 1 addition & 1 deletion vllm/model_executor/models/llava.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ def _get_dummy_image_size(self) -> ImageSize:
def _get_image_token(self) -> str:
raise NotImplementedError

def _get_dummy_mm_inputs(
def _get_dummy_processor_inputs(
self,
seq_len: int,
mm_counts: Mapping[str, int],
Expand Down
2 changes: 1 addition & 1 deletion vllm/model_executor/models/llava_next_video.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ def get_replacement(item_idx: int):
),
]

def _get_dummy_mm_inputs(
def _get_dummy_processor_inputs(
self,
seq_len: int,
mm_counts: Mapping[str, int],
Expand Down
2 changes: 1 addition & 1 deletion vllm/model_executor/models/phi3v.py
Original file line number Diff line number Diff line change
Expand Up @@ -442,7 +442,7 @@ def _apply_prompt_replacements(

return token_ids, text, placeholders

def _get_dummy_mm_inputs(
def _get_dummy_processor_inputs(
self,
seq_len: int,
mm_counts: Mapping[str, int],
Expand Down
2 changes: 1 addition & 1 deletion vllm/model_executor/models/qwen2_audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ def _always_apply_prompt_replacements(self) -> bool:
# tokens than the number of audio items)
return True

def _get_dummy_mm_inputs(
def _get_dummy_processor_inputs(
self,
seq_len: int,
mm_counts: Mapping[str, int],
Expand Down
2 changes: 1 addition & 1 deletion vllm/model_executor/models/qwen2_vl.py
Original file line number Diff line number Diff line change
Expand Up @@ -900,7 +900,7 @@ def _get_mm_fields_config(
video_grid_thw=MultiModalFieldConfig.batched("video"),
)

def _get_dummy_mm_inputs(
def _get_dummy_processor_inputs(
self,
seq_len: int,
mm_counts: Mapping[str, int],
Expand Down
2 changes: 1 addition & 1 deletion vllm/model_executor/models/ultravox.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ def get_replacement_ultravox(item_idx: int):
)
]

def _get_dummy_mm_inputs(
def _get_dummy_processor_inputs(
self,
seq_len: int,
mm_counts: Mapping[str, int],
Expand Down
27 changes: 18 additions & 9 deletions vllm/multimodal/processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -799,7 +799,7 @@ def _apply_hf_processor_missing(

# Some HF processors (e.g. Qwen2-VL) expect corresponding
# multi-modal tokens to be in the prompt text
dummy_inputs = self._get_dummy_mm_inputs(
dummy_inputs = self._get_dummy_processor_inputs(
self.ctx.model_config.max_model_len,
mm_missing_counts,
)
Expand Down Expand Up @@ -1164,7 +1164,7 @@ def _get_dummy_videos(
return [video] * num_videos

@abstractmethod
def _get_dummy_mm_inputs(
def _get_dummy_processor_inputs(
self,
seq_len: int,
mm_counts: Mapping[str, int],
Expand Down Expand Up @@ -1194,6 +1194,20 @@ def _get_and_validate_dummy_mm_counts(self) -> Mapping[str, int]:

return mm_limits

def _get_dummy_mm_inputs(
self,
seq_len: int,
mm_counts: Mapping[str, int],
mm_max_tokens_per_item: Mapping[str, int],
) -> MultiModalInputsV2:
processor_inputs = self._get_dummy_processor_inputs(seq_len, mm_counts)

return self.apply(
prompt_text=processor_inputs.prompt_text,
mm_data=processor_inputs.mm_data,
hf_processor_mm_kwargs=processor_inputs.hf_processor_mm_kwargs,
)

def get_dummy_data(self, seq_len: int) -> DummyData:
# Avoid circular import
from vllm.sequence import SequenceData
Expand All @@ -1207,13 +1221,8 @@ def get_dummy_data(self, seq_len: int) -> DummyData:
"returned by `get_mm_max_tokens_per_item` "
f"({set(mm_max_tokens_per_item.keys())})")

processor_inputs = self._get_dummy_mm_inputs(seq_len, mm_counts)
mm_inputs = self.apply(
prompt_text=processor_inputs.prompt_text,
mm_data=processor_inputs.mm_data,
hf_processor_mm_kwargs=processor_inputs.hf_processor_mm_kwargs,
)

mm_inputs = self._get_dummy_mm_inputs(seq_len, mm_counts,
mm_max_tokens_per_item)
prompt_token_ids = mm_inputs["prompt_token_ids"]
placeholders_by_modality = mm_inputs["mm_placeholders"]

Expand Down

0 comments on commit b978edb

Please sign in to comment.