Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[V1] Avoid sending text prompt to core engine #11963

Merged
merged 4 commits into from
Jan 12, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions vllm/v1/engine/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ class EngineCoreRequest:
# due to circular imports and typing we have in data.py

request_id: str
#NOTE(Nick): I don't think we need to pass prompt here since it should
# always be tokenized?
# NOTE(ywang96): original text prompt is needed when a request is added to
# Detokenizer, but set to None when it is added to EngineCoreClient.
prompt: Optional[str]
prompt_token_ids: List[int]
mm_inputs: Optional[List[Optional["MultiModalKwargs"]]]
Expand Down
6 changes: 6 additions & 0 deletions vllm/v1/engine/core_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,9 @@ def _send_input(self, request_type: EngineCoreRequestType,
self.input_socket.send_multipart(msg, copy=False)

def add_request(self, request: EngineCoreRequest) -> None:
# NOTE: text prompt is not needed in the core engine as it has been
# tokenized.
request.prompt = None
self._send_input(EngineCoreRequestType.ADD, request)

def abort_requests(self, request_ids: List[str]) -> None:
Expand Down Expand Up @@ -257,6 +260,9 @@ async def _send_input(self, request_type: EngineCoreRequestType,
await self.input_socket.send_multipart(msg, copy=False)

async def add_request_async(self, request: EngineCoreRequest) -> None:
# NOTE: text prompt is not needed in the core engine as it has been
# tokenized.
request.prompt = None
await self._send_input(EngineCoreRequestType.ADD, request)

async def abort_requests_async(self, request_ids: List[str]) -> None:
Expand Down
Loading