Skip to content

Package test

Package test #392

Workflow file for this run

name: Package test
on:
workflow_dispatch:
# Schedule the workflow to run at 08:00 (UTC) every day.
schedule:
# Minute[0,59] Hour[0,23] Day of month[1,31] Month[1,12] Day of week[0,6] (Sunday=0)
- cron: '0 8 * * *'
push:
paths:
- "scalellm/**"
- "tests/**"
- "setup.py"
- "requirements.txt"
- "requirements-test.txt"
- ".github/workflows/package_test.yml"
branches:
- main
pull_request:
paths:
- "scalellm/**"
- "tests/**"
- "setup.py"
- "requirements.txt"
- "requirements-test.txt"
- ".github/workflows/package_test.yml"
branches:
- main
env:
# Tells where to store caches.
CI_CACHE_DIR: ${{ github.workspace }}/../../ci_cache
# cancel all previous runs if a new one is triggered
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build-and-test-wheel:
strategy:
fail-fast: false
matrix:
python: ["3.12"]
cuda: ["12.4"]
torch: ["2.5.1"]
runs-on: [self-hosted, linux, build]
env:
PYTHON_VERSION: ${{ matrix.python }}
CUDA_VERSION: ${{ matrix.cuda }}
TORCH_VERSION: ${{ matrix.torch }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
- name: Create cache directory
run: |
mkdir -p $CI_CACHE_DIR/.vcpkg/bincache
mkdir -p $CI_CACHE_DIR/.ccache
mkdir -p $CI_CACHE_DIR/.pip
- name: Build wheel
timeout-minutes: 60
run: |
docker pull vectorchai/scalellm_manylinux:cuda${CUDA_VERSION}
docker run --rm -t \
-v "$CI_CACHE_DIR":/ci_cache \
-v "$GITHUB_WORKSPACE":/ScaleLLM \
-e PYTHON_VERSION=${PYTHON_VERSION} \
-e CUDA_VERSION=${CUDA_VERSION} \
-e TORCH_VERSION=${TORCH_VERSION} \
-e VCPKG_DEFAULT_BINARY_CACHE=/ci_cache/.vcpkg/bincache \
-e CCACHE_DIR=/ci_cache/.ccache \
-e PIP_CACHE_DIR=/ci_cache/.pip \
-u $(id -u):$(id -g) \
vectorchai/scalellm_manylinux:cuda${CUDA_VERSION} \
bash /ScaleLLM/scripts/build_wheel.sh
- name: Show whl package size
run: du -h dist/*
- name: Install the package and run pytest
timeout-minutes: 10
run: |
docker pull pytorch/manylinux-builder:cuda${CUDA_VERSION}
docker run --rm -t --gpus=all \
-v "$CI_CACHE_DIR":/ci_cache \
-v "$GITHUB_WORKSPACE":/ScaleLLM \
-e PYTHON_VERSION=${PYTHON_VERSION} \
-e CUDA_VERSION=${CUDA_VERSION} \
-e TORCH_VERSION=${TORCH_VERSION} \
-e PIP_CACHE_DIR=/ci_cache/.pip \
-u $(id -u):$(id -g) \
pytorch/manylinux-builder:cuda${CUDA_VERSION} \
bash /ScaleLLM/scripts/run_pytest.sh