diff --git a/.github/workflows/experiment-runner.yml b/.github/workflows/experiment-runner.yml index 9cac407df91..5ccb8ad28ff 100644 --- a/.github/workflows/experiment-runner.yml +++ b/.github/workflows/experiment-runner.yml @@ -2,6 +2,10 @@ name: Experiment Runner on: workflow_dispatch: + inputs: + script: + description: "Experiment Runner Script" + default: "configs/sharegpt_config.yaml" concurrency: group: experiment-runner-${{ github.ref }} @@ -20,7 +24,7 @@ jobs: bash scripts/ci_install_dependency.sh - name: Test experiment runner - timeout-minutes: 10 + timeout-minutes: 120 run: | cd test/srt - python3 experiment_runner.py --config configs/sharegpt_config.yaml + python3 experiment_runner.py --config ${{ inputs.script }} diff --git a/test/srt/configs/random_config.yaml b/test/srt/configs/random_config.yaml new file mode 100644 index 00000000000..eae8c27f41c --- /dev/null +++ b/test/srt/configs/random_config.yaml @@ -0,0 +1,25 @@ +tasks: + - name: sglang-128-4 + server_cmd: python3 -m sglang.launch_server --model-path meta-llama/Llama-3.1-8B-Instruct --disable-radix-cache + client_cmd: python3 -m sglang.bench_serving --backend sglang --dataset-name random --random-input 128 --random-output 4 --request-rate 24 --num-prompt 1440 + - name: vllm-128-4 + server_cmd: python3 -m vllm.entrypoints.openai.api_server --model meta-llama/Llama-3.1-8B-Instruct --disable-log-requests + client_cmd: python3 -m sglang.bench_serving --backend vllm --dataset-name random --random-input 128 --random-output 4 --request-rate 24 --num-prompt 1440 + - name: sglang-2000-100 + server_cmd: python3 -m sglang.launch_server --model-path meta-llama/Llama-3.1-8B-Instruct --disable-radix-cache + client_cmd: python3 -m sglang.bench_serving --backend sglang --dataset-name random --random-input 2000 --random-output 100 --request-rate 2 --num-prompt 120 + - name: vllm-2000-100 + server_cmd: python3 -m vllm.entrypoints.openai.api_server --model meta-llama/Llama-3.1-8B-Instruct --disable-log-requests + client_cmd: python3 -m sglang.bench_serving --backend vllm --dataset-name random --random-input 2000 --random-output 100 --request-rate 2 --num-prompt 120 + - name: sglang-4000-200 + server_cmd: python3 -m sglang.launch_server --model-path meta-llama/Llama-3.1-8B-Instruct --disable-radix-cache + client_cmd: python3 -m sglang.bench_serving --backend sglang --dataset-name random --random-input 4000 --random-output 200 --request-rate 8 --num-prompt 480 + - name: vllm-4000-200 + server_cmd: python3 -m vllm.entrypoints.openai.api_server --model meta-llama/Llama-3.1-8B-Instruct --disable-log-requests + client_cmd: python3 -m sglang.bench_serving --backend vllm --dataset-name random --random-input 4000 --random-output 200 --request-rate 8 --num-prompt 480 + - name: sglang-32000-100 + server_cmd: python3 -m sglang.launch_server --model-path meta-llama/Llama-3.1-8B-Instruct --disable-radix-cache + client_cmd: python3 -m sglang.bench_serving --backend sglang --dataset-name random --random-input 32000 --random-output 100 --request-rate 1 --num-prompt 60 + - name: vllm-32000-100 + server_cmd: python3 -m vllm.entrypoints.openai.api_server --model meta-llama/Llama-3.1-8B-Instruct --disable-log-requests + client_cmd: python3 -m sglang.bench_serving --backend vllm --dataset-name random --random-input 32000 --random-output 100 --request-rate 1 --num-prompt 60