From 06f3818dc8f5d7d1c454380da559eea7f7fbd837 Mon Sep 17 00:00:00 2001 From: zhyncs Date: Sat, 21 Dec 2024 23:44:41 +0800 Subject: [PATCH] fix #2528 --- python/pyproject.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/pyproject.toml b/python/pyproject.toml index d459c523f10..2d05a044d84 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -13,7 +13,7 @@ classifiers = [ "Programming Language :: Python :: 3", "License :: OSI Approved :: Apache Software License", ] -dependencies = ["requests", "tqdm", "numpy", "IPython", "setproctitle"] +dependencies = ["requests", "tqdm", "numpy", "IPython", "setproctitle", "torch"] [project.optional-dependencies] runtime_common = ["aiohttp", "decord", "fastapi", @@ -23,11 +23,11 @@ runtime_common = ["aiohttp", "decord", "fastapi", "psutil", "pydantic", "python-multipart", "pyzmq>=25.1.2", "torchao>=0.7.0", "gemlite", "uvicorn", "uvloop", "xgrammar>=0.1.6"] -srt = ["sglang[runtime_common]", "torch", "vllm>=0.6.3.post1,<=0.6.4.post1", "cuda-python", "flashinfer==0.1.6"] +srt = ["sglang[runtime_common]", "vllm>=0.6.3.post1,<=0.6.4.post1", "cuda-python", "flashinfer==0.1.6"] # HIP (Heterogeneous-computing Interface for Portability) for AMD # => base docker rocm/vllm-dev:20241022, not from public vllm whl -srt_hip = ["sglang[runtime_common]", "torch", "vllm==0.6.3.dev13"] +srt_hip = ["sglang[runtime_common]", "vllm==0.6.3.dev13"] # xpu is not enabled in public vllm and torch whl, # need to follow https://docs.vllm.ai/en/latest/getting_started/xpu-installation.htmlinstall vllm srt_xpu = ["sglang[runtime_common]"]