diff --git a/docs/source/usage/multimodal_inputs.md b/docs/source/usage/multimodal_inputs.md index 82a3f3b8909a1..4f45a9f448cf0 100644 --- a/docs/source/usage/multimodal_inputs.md +++ b/docs/source/usage/multimodal_inputs.md @@ -294,12 +294,58 @@ $ export VLLM_IMAGE_FETCH_TIMEOUT= ### Video -Instead of {code}`image_url`, you can pass a video file via {code}`video_url`. +Instead of {code}`image_url`, you can pass a video file via {code}`video_url`. Here is a simple example using [LLaVA-OneVision](https://huggingface.co/llava-hf/llava-onevision-qwen2-0.5b-ov-hf). -You can use [these tests](gh-file:entrypoints/openai/test_video.py) as reference. +First, launch the OpenAI-compatible server: + +```bash +vllm serve llava-hf/llava-onevision-qwen2-0.5b-ov-hf --task generate --max-model-len 8192 +``` + +Then, you can use the OpenAI client as follows: +```python +from openai import OpenAI + +openai_api_key = "EMPTY" +openai_api_base = "http://localhost:8000/v1" + +client = OpenAI( + api_key=openai_api_key, + base_url=openai_api_base, +) + +video_url = "http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerFun.mp4" + +## Use video url in the payload +chat_completion_from_url = client.chat.completions.create( + messages=[{ + "role": + "user", + "content": [ + { + "type": "text", + "text": "What's in this video?" + }, + { + "type": "video_url", + "video_url": { + "url": video_url + }, + }, + ], + }], + model=model, + max_completion_tokens=64, +) + +result = chat_completion_from_url.choices[0].message.content +print("Chat completion output from image url:", result) +``` + +Full example: ````{note} -By default, the timeout for fetching videos through HTTP URL url is `30` seconds. +By default, the timeout for fetching videos through HTTP URL is `30` seconds. You can override this by setting the environment variable: ```console diff --git a/examples/openai_chat_completion_client_for_multimodal.py b/examples/openai_chat_completion_client_for_multimodal.py index 6a160fd70423f..213d075542e81 100644 --- a/examples/openai_chat_completion_client_for_multimodal.py +++ b/examples/openai_chat_completion_client_for_multimodal.py @@ -18,7 +18,6 @@ import requests from openai import OpenAI -from vllm.assets.audio import AudioAsset from vllm.utils import FlexibleArgumentParser # Modify OpenAI's API key and API base to use vLLM's API server. @@ -151,8 +150,66 @@ def run_multi_image() -> None: print("Chat completion output:", result) +# Video input inference +def run_video() -> None: + video_url = "http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerFun.mp4" + video_base64 = encode_base64_content_from_url(video_url) + + ## Use video url in the payload + chat_completion_from_url = client.chat.completions.create( + messages=[{ + "role": + "user", + "content": [ + { + "type": "text", + "text": "What's in this video?" + }, + { + "type": "video_url", + "video_url": { + "url": video_url + }, + }, + ], + }], + model=model, + max_completion_tokens=64, + ) + + result = chat_completion_from_url.choices[0].message.content + print("Chat completion output from image url:", result) + + ## Use base64 encoded video in the payload + chat_completion_from_base64 = client.chat.completions.create( + messages=[{ + "role": + "user", + "content": [ + { + "type": "text", + "text": "What's in this video?" + }, + { + "type": "video_url", + "video_url": { + "url": f"data:video/mp4;base64,{video_base64}" + }, + }, + ], + }], + model=model, + max_completion_tokens=64, + ) + + result = chat_completion_from_base64.choices[0].message.content + print("Chat completion output from base64 encoded image:", result) + + # Audio input inference def run_audio() -> None: + from vllm.assets.audio import AudioAsset + audio_url = AudioAsset("winning_call").url audio_base64 = encode_base64_content_from_url(audio_url) @@ -240,6 +297,7 @@ def run_audio() -> None: "text-only": run_text_only, "single-image": run_single_image, "multi-image": run_multi_image, + "video": run_video, "audio": run_audio, } @@ -253,12 +311,11 @@ def main(args) -> None: parser = FlexibleArgumentParser( description='Demo on using OpenAI client for online inference with ' 'multimodal language models served with vLLM.') - parser.add_argument( - '--chat-type', - '-c', - type=str, - default="single-image", - choices=["text-only", "single-image", "multi-image", "audio"], - help='Conversation type with multimodal data.') + parser.add_argument('--chat-type', + '-c', + type=str, + default="single-image", + choices=list(example_function_map.keys()), + help='Conversation type with multimodal data.') args = parser.parse_args() main(args)