Skip to content

Commit

Permalink
Merge aeb970b into 80fd3b3
Browse files Browse the repository at this point in the history
  • Loading branch information
PaliC authored Jul 11, 2022
2 parents 80fd3b3 + aeb970b commit 49b629a
Show file tree
Hide file tree
Showing 5 changed files with 49 additions and 20 deletions.
28 changes: 23 additions & 5 deletions .github/workflows/runtime_nightly.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,10 @@ name: Multipy runtime nightly release
on:
schedule:
- cron: '0 2 * * *' # run at 2 AM UTC
pull_request:
push:
branches:
- main

jobs:
unittest:
Expand All @@ -11,6 +15,7 @@ jobs:
python-version: [3.8]
platform: [linux.2xlarge]
abi: [0,1]
cuda: [0,1]
fail-fast: false
runs-on: ${{ matrix.platform }}
steps:
Expand All @@ -27,6 +32,7 @@ jobs:
- name: setup Path
run: |
echo /usr/local/cuda-11.3/bin >> $GITHUB_PATH
echo "/home/ec2-user/miniconda/bin" >> $GITHUB_PATH
echo "CONDA=/home/ec2-user/miniconda" >> $GITHUB_PATH
Expand Down Expand Up @@ -75,7 +81,7 @@ jobs:
conda run -n example_env python -m pip install torch torchvision torchaudio pathlib
conda run -n example_env python generate_examples.py
- name: Build pytorch with ABI=${{ matrix.abi }}
- name: Build pytorch with ABI=${{ matrix.abi }} and USE_CUDA=${{ matrix.cuda }}
shell: bash -l {0}
env:
PYTHON_VERSION: ${{ matrix.python-version }}
Expand All @@ -85,9 +91,10 @@ jobs:
export TORCH_CXX_FLAGS="-D_GLIBCXX_USE_CXX11_ABI=${{ matrix.abi }}"
cd multipy/runtime/third-party/pytorch
export USE_DEPLOY=1
export USE_CUDA=${{ matrix.cuda }}
conda run -n multipy_runtime_env python setup.py develop
- name: Build multipy runtime with ABI=${{ matrix.abi }}
- name: Build multipy runtime
shell: bash -l {0}
env:
PYTHON_VERSION: ${{ matrix.python-version }}
Expand All @@ -98,22 +105,33 @@ jobs:
conda run -n multipy_runtime_env cmake -DABI_EQUALS_1=${{ matrix.abi }} ..
conda run -n multipy_runtime_env cmake --build . --config Release
- name: install files
- name: Install files
shell: bash -l {0}
env:
PYTHON_VERSION: ${{ matrix.python-version }}
run: |
cd multipy/runtime/build
conda run -n multipy_runtime_env cmake --install . --prefix "."
- name: Run unit tests with ABI=${{ matrix.abi }}
- name: Run unit tests with ABI=${{ matrix.abi }} with cuda=${{ matrix.cuda }}
shell: bash -l {0}
env:
PYTHON_VERSION: ${{ matrix.python-version }}
run: |
cd multipy/runtime/build
./test_deploy
- name: Run unit tests with ABI=${{ matrix.abi }} with gpu
shell: bash -l {0}
env:
PYTHON_VERSION: ${{ matrix.python-version }}
run: |
if [[ ${{ matrix.cuda }} -eq 1 ]]
then
cd multipy/runtime/build
./test_deploy_gpu
fi
- name: create tarball [click me to get a list of files for the nightly release]
shell: bash -l {0}
env:
Expand All @@ -125,7 +143,7 @@ jobs:
- name: Update nightly release
uses: pyTooling/Actions/releaser@main
with:
tag: nightly-runtime-abi-${{ matrix.abi }}
tag: nightly-runtime-abi-${{ matrix.abi }}-cuda-${{matrix.cuda}}
rm: true
token: ${{ secrets.GITHUB_TOKEN }}
files: |
Expand Down
18 changes: 15 additions & 3 deletions .github/workflows/runtime_tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ jobs:
python-version: [3.7, 3.8, 3.9]
platform: [linux.2xlarge]
abi: [0,1]
cuda: [0,1]
fail-fast: false
runs-on: ${{ matrix.platform }}
steps:
Expand All @@ -30,6 +31,7 @@ jobs:
- name: setup Path
run: |
echo /usr/local/cuda-11.3/bin >> $GITHUB_PATH
echo "/home/ec2-user/miniconda/bin" >> $GITHUB_PATH
echo "CONDA=/home/ec2-user/miniconda" >> $GITHUB_PATH
Expand Down Expand Up @@ -78,7 +80,7 @@ jobs:
conda run -n example_env python -m pip install torch torchvision torchaudio pathlib
conda run -n example_env python generate_examples.py
- name: Build pytorch with ABI=${{ matrix.abi }}
- name: Build pytorch with ABI=${{ matrix.abi }} and USE_CUDA=${{ matrix.cuda }}
shell: bash -l {0}
env:
PYTHON_VERSION: ${{ matrix.python-version }}
Expand All @@ -88,6 +90,7 @@ jobs:
export TORCH_CXX_FLAGS="-D_GLIBCXX_USE_CXX11_ABI=${{ matrix.abi }}"
cd multipy/runtime/third-party/pytorch
export USE_DEPLOY=1
export USE_CUDA=${{ matrix.cuda }}
conda run -n multipy_runtime_env python setup.py develop
- name: Build multipy runtime
Expand All @@ -109,11 +112,20 @@ jobs:
cd multipy/runtime/build
conda run -n multipy_runtime_env cmake --install . --prefix "."
- name: Run unit tests with ABI=${{ matrix.abi }}
if: ${{ matrix.abi }} == 1
- name: Run unit tests with ABI=${{ matrix.abi }} with cuda=${{ matrix.cuda }}
shell: bash -l {0}
env:
PYTHON_VERSION: ${{ matrix.python-version }}
run: |
cd multipy/runtime/build
./test_deploy
- name: Run unit tests with ABI=${{ matrix.abi }} with gpu
shell: bash -l {0}
env:
PYTHON_VERSION: ${{ matrix.python-version }}
run: |
if [[ ${{ matrix.cuda }} -eq 1 ]]
then
cd multipy/runtime/build
./test_deploy_gpu
fi
4 changes: 3 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,9 @@ internally, please see the related [arXiv paper](https://arxiv.org/pdf/2104.0025

### Installing `multipy::runtime` **(recommended)**

The C++ binaries (`libtorch_interpreter.so`,`libtorch_deploy.a`, `utils.cmake`), and the header files of `multipy::runtime` can be installed from our [nightly release](https://github.com/pytorch/multipy/releases/tag/nightly-runtime-abi-0). The ABI for the nightly release is 0. You can find a version of the release with ABI=1 [here](https://github.com/pytorch/multipy/releases/tag/nightly-runtime-abi-1).
The C++ binaries (`libtorch_interpreter.so`,`libtorch_deploy.a`, `utils.cmake`), and the header files of `multipy::runtime` can be installed from our [nightly release](https://github.com/pytorch/multipy/releases/tag/nightly-runtime-abi-0-cuda-0) (uses the pre-cxx11 ABI). You can find a version of the release with the [cxx11 ABI](https://github.com/pytorch/multipy/releases/tag/nightly-runtime-abi-1-cuda-0.

C++ binaries with cuda (11.3) support can also be found for the [pre-cxx11 ABI](https://github.com/pytorch/multipy/releases/tag/nightly-runtime-cuda-abi-0-cuda-1) and the [cxx11 ABI](https://github.com/pytorch/multipy/releases/tag/nightly-runtime-cuda-abi-1-cuda-1).

```
wget https://github.com/pytorch/multipy/releases/download/nightly-runtime-abi-0/multipy_runtime.tar.gz
Expand Down
16 changes: 7 additions & 9 deletions multipy/runtime/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -86,14 +86,12 @@ target_link_libraries(test_deploy
)
target_include_directories(test_deploy PRIVATE ${CMAKE_SOURCE_DIR}/../..)

# LINK_DIRECTORIES("${PYTORCH_ROOT}/torch/lib")
# add_executable(test_deploy_gpu ${INTERPRETER_TEST_SOURCES_GPU})
# target_compile_definitions(test_deploy_gpu PUBLIC TEST_CUSTOM_LIBRARY)
# target_include_directories(test_deploy_gpu PRIVATE ${PYTORCH_ROOT}/torch)
# target_include_directories(test_deploy_gpu PRIVATE ${CMAKE_SOURCE_DIR}/../..)
# target_link_libraries(test_deploy_gpu
# PUBLIC "-Wl,--no-as-needed -rdynamic" gtest dl torch_deploy_interface c10 torch_cpu
# )
LINK_DIRECTORIES("${PYTORCH_ROOT}/torch/lib")
add_executable(test_deploy_gpu ${INTERPRETER_TEST_SOURCES_GPU})
target_compile_definitions(test_deploy_gpu PUBLIC TEST_CUSTOM_LIBRARY)
target_include_directories(test_deploy_gpu PRIVATE ${PYTORCH_ROOT}/torch)
target_include_directories(test_deploy_gpu PRIVATE ${CMAKE_SOURCE_DIR}/../..)
target_link_libraries(test_deploy_gpu PUBLIC "-Wl,--no-as-needed -rdynamic" gtest dl torch_deploy_interface c10 torch_cpu)

LINK_DIRECTORIES("${PYTORCH_ROOT}/torch/lib")
add_library(test_deploy_lib SHARED test_deploy_lib.cpp)
Expand All @@ -119,7 +117,7 @@ target_link_libraries(interactive_embedded_interpreter
)

install(TARGETS test_deploy DESTINATION tests/bin)
# install(TARGETS test_deploy_gpu DESTINATION tests/bin)
install(TARGETS test_deploy_gpu DESTINATION tests/bin)

install(TARGETS test_deploy DESTINATION tests/bin)

Expand Down
3 changes: 1 addition & 2 deletions multipy/runtime/test_deploy_gpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,7 @@ TEST(TorchDeployGPUTest, SimpleModel) {

TEST(TorchDeployGPUTest, UsesDistributed) {
const auto model_filename = path(
"USES_DISTRIBUTED",
"torch/csrc/deploy/example/generated/uses_distributed");
"USES_DISTRIBUTED", "multipy/runtime/example/generated/uses_distributed");
torch::deploy::InterpreterManager m(1);
torch::deploy::Package p = m.loadPackage(model_filename);
{
Expand Down

0 comments on commit 49b629a

Please sign in to comment.