Skip to content

Commit

Permalink
treat do_lower_case in the same way as the sentence-transformers libr…
Browse files Browse the repository at this point in the history
…ary (vllm-project#11815)

Signed-off-by: Max de Bayser <[email protected]>
Signed-off-by: Fred Reiss <[email protected]>
  • Loading branch information
maxdebayser authored and frreiss committed Jan 10, 2025
1 parent 66a9051 commit 8f241e7
Show file tree
Hide file tree
Showing 5 changed files with 13 additions and 5 deletions.
1 change: 1 addition & 0 deletions tests/entrypoints/openai/test_serving_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ class MockModelConfig:
logits_processor_pattern = None
diff_sampling_param: Optional[dict] = None
allowed_local_media_path: str = ""
encoder_config = None

def get_diff_sampling_param(self):
return self.diff_sampling_param or {}
Expand Down
1 change: 1 addition & 0 deletions tests/models/embedding/language/test_embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
# [Encoder-only]
pytest.param("BAAI/bge-base-en-v1.5",
marks=[pytest.mark.core_model, pytest.mark.cpu_model]),
pytest.param("sentence-transformers/all-MiniLM-L12-v2"),
pytest.param("intfloat/multilingual-e5-large"),
# [Encoder-decoder]
pytest.param("intfloat/e5-mistral-7b-instruct",
Expand Down
5 changes: 5 additions & 0 deletions vllm/entrypoints/openai/serving_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,11 @@ def _normalize_prompt_text_to_input(
truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]],
add_special_tokens: bool,
) -> TextTokensPrompt:
if (self.model_config.encoder_config is not None
and self.model_config.encoder_config.get(
"do_lower_case", False)):
prompt = prompt.lower()

if truncate_prompt_tokens is None:
encoded = tokenizer(prompt, add_special_tokens=add_special_tokens)
else:
Expand Down
6 changes: 6 additions & 0 deletions vllm/inputs/preprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,12 @@ def _tokenize_prompt(
# on the task and language of their request. Also needed to avoid
# appending an EOS token to the prompt which disrupts generation.
add_special_tokens = False

if (self.model_config.encoder_config is not None
and self.model_config.encoder_config.get(
"do_lower_case", False)):
prompt = prompt.lower()

return tokenizer.encode(request_id=request_id,
prompt=prompt,
lora_request=lora_request,
Expand Down
5 changes: 0 additions & 5 deletions vllm/transformers_utils/tokenizer_group/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,6 @@ def init_tokenizer_from_configs(model_config: ModelConfig,
trust_remote_code=model_config.trust_remote_code,
revision=model_config.tokenizer_revision)

if (model_config.encoder_config is not None
and "do_lower_case" in model_config.encoder_config):
init_kwargs["do_lower_case"] = model_config.encoder_config[
"do_lower_case"]

return get_tokenizer_group(parallel_config.tokenizer_pool_config,
**init_kwargs)

Expand Down

0 comments on commit 8f241e7

Please sign in to comment.