Skip to content

Commit

Permalink
[platform] add ray_device_key (#11948)
Browse files Browse the repository at this point in the history
Signed-off-by: youkaichao <[email protected]>
  • Loading branch information
youkaichao authored Jan 13, 2025
1 parent c3f05b0 commit 89ce62a
Show file tree
Hide file tree
Showing 9 changed files with 38 additions and 8 deletions.
19 changes: 13 additions & 6 deletions vllm/executor/ray_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from vllm.config import ParallelConfig
from vllm.executor.msgspec_utils import decode_hook, encode_hook
from vllm.logger import init_logger
from vllm.platforms import current_platform
from vllm.sequence import ExecuteModelRequest, IntermediateTensors
from vllm.utils import get_ip
from vllm.worker.worker_base import WorkerWrapperBase
Expand Down Expand Up @@ -47,7 +48,12 @@ def get_node_ip(self) -> str:

def get_node_and_gpu_ids(self) -> Tuple[str, List[int]]:
node_id = ray.get_runtime_context().get_node_id()
gpu_ids = ray.get_gpu_ids()
device_key = current_platform.ray_device_key
if not device_key:
raise RuntimeError("current platform %s does not support ray.",
current_platform.device_name)
gpu_ids = ray.get_runtime_context().get_accelerator_ids(
)[device_key]
return node_id, gpu_ids

def execute_model_spmd(
Expand Down Expand Up @@ -249,11 +255,12 @@ def initialize_ray_cluster(
# Placement group is already set.
return

device_str = "GPU"
if current_platform.is_tpu():
device_str = "TPU"
elif current_platform.is_hpu():
device_str = 'HPU'
device_str = current_platform.ray_device_key
if not device_str:
raise ValueError(
f"current platform {current_platform.device_name} does not "
"support ray.")

# Create placement group for worker processes
current_placement_group = ray.util.get_current_placement_group()
if current_placement_group:
Expand Down
1 change: 1 addition & 0 deletions vllm/platforms/cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ class CudaPlatformBase(Platform):
device_name: str = "cuda"
device_type: str = "cuda"
dispatch_key: str = "CUDA"
ray_device_key: str = "GPU"

@classmethod
def get_device_capability(cls,
Expand Down
1 change: 1 addition & 0 deletions vllm/platforms/hpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ class HpuPlatform(Platform):
device_name: str = "hpu"
device_type: str = "hpu"
dispatch_key: str = "HPU"
ray_device_key: str = "HPU"

@classmethod
def get_attn_backend_cls(cls, selected_backend: _Backend, head_size: int,
Expand Down
4 changes: 4 additions & 0 deletions vllm/platforms/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,10 @@ class Platform:
# check https://github.com/pytorch/pytorch/blob/313dac6c1ca0fa0cde32477509cce32089f8532a/torchgen/model.py#L134 # noqa
# use "CPU" as a fallback for platforms not registered in PyTorch
dispatch_key: str = "CPU"
# available ray device keys:
# https://github.com/ray-project/ray/blob/10ba5adadcc49c60af2c358a33bb943fb491a171/python/ray/_private/ray_constants.py#L438 # noqa
# empty string means the device does not support ray
ray_device_key: str = ""
# The torch.compile backend for compiling simple and
# standalone functions. The default value is "inductor" to keep
# the same behavior as PyTorch.
Expand Down
1 change: 1 addition & 0 deletions vllm/platforms/neuron.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ class NeuronPlatform(Platform):
_enum = PlatformEnum.NEURON
device_name: str = "neuron"
device_type: str = "neuron"
ray_device_key: str = "neuron_cores"
supported_quantization: list[str] = ["neuron_quant"]

@classmethod
Expand Down
2 changes: 2 additions & 0 deletions vllm/platforms/rocm.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,8 @@ class RocmPlatform(Platform):
device_name: str = "rocm"
device_type: str = "cuda"
dispatch_key: str = "CUDA"
ray_device_key: str = "GPU"

supported_quantization: list[str] = [
"awq", "gptq", "fp8", "compressed_tensors", "compressed-tensors",
"fbgemm_fp8", "gguf"
Expand Down
2 changes: 2 additions & 0 deletions vllm/platforms/tpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ class TpuPlatform(Platform):
device_name: str = "tpu"
device_type: str = "tpu"
dispatch_key: str = "XLA"
ray_device_key: str = "TPU"

supported_quantization: list[str] = [
"tpu_int8", "compressed-tensors", "compressed_tensors"
]
Expand Down
3 changes: 3 additions & 0 deletions vllm/platforms/xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@ class XPUPlatform(Platform):
device_name: str = "xpu"
device_type: str = "xpu"
dispatch_key: str = "XPU"
# Intel XPU's device key is "GPU" for Ray.
# see https://github.com/ray-project/ray/blob/6a5eb5865eeb9ccf058a79b44f107e327e360673/python/ray/_private/accelerators/intel_gpu.py#L20 # noqa: E501
ray_device_key: str = "GPU"

@classmethod
def get_attn_backend_cls(cls, selected_backend: _Backend, head_size: int,
Expand Down
13 changes: 11 additions & 2 deletions vllm/v1/executor/ray_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,12 @@ def get_node_ip(self) -> str:

def get_node_and_gpu_ids(self) -> Tuple[str, List[int]]:
node_id = ray.get_runtime_context().get_node_id()
gpu_ids = ray.get_gpu_ids()
device_key = current_platform.ray_device_key
if not device_key:
raise RuntimeError("current platform %s does not support ray.",
current_platform.device_name)
gpu_ids = ray.get_runtime_context().get_accelerator_ids(
)[device_key]
return node_id, gpu_ids

def setup_device_if_necessary(self):
Expand Down Expand Up @@ -211,7 +216,11 @@ def initialize_ray_cluster(
# Placement group is already set.
return

device_str = "GPU" if not current_platform.is_tpu() else "TPU"
device_str = current_platform.ray_device_key
if not device_str:
raise ValueError(
f"current platform {current_platform.device_name} does not "
"support ray.")
# Create placement group for worker processes
current_placement_group = ray.util.get_current_placement_group()
if current_placement_group:
Expand Down

0 comments on commit 89ce62a

Please sign in to comment.