From d542df42d10c0b0fc396d2a16b9941984242e84a Mon Sep 17 00:00:00 2001 From: YangXiuyu Date: Mon, 5 Dec 2022 14:59:02 +0800 Subject: [PATCH] fix: ci --- .github/workflows/ci.yml | 2 ++ server/clip_server/executors/clip_onnx.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6cea783a0..5566f98d8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -168,6 +168,8 @@ jobs: run: | pytest --suppress-no-test-exit-code --cov=clip_client --cov=clip_server --cov-report=xml \ -v -s -m "gpu" ./tests/test_tensorrt.py + pytest --suppress-no-test-exit-code --cov=clip_client --cov=clip_server --cov-report=xml \ + -v -s -m "gpu" ./tests/test_simple.py echo "::set-output name=codecov_flag::cas" timeout-minutes: 30 env: diff --git a/server/clip_server/executors/clip_onnx.py b/server/clip_server/executors/clip_onnx.py index ea0bd9031..039c2469b 100644 --- a/server/clip_server/executors/clip_onnx.py +++ b/server/clip_server/executors/clip_onnx.py @@ -42,7 +42,7 @@ def __init__( :param model_path: The path to the model to be used. If not specified, the model will be downloaded or loaded from the local cache. Visit https://clip-as-service.jina.ai/user-guides/server/#use-custom-model-for-onnx to learn how to finetune custom models. - :param dtype: inference data type, if None defaults to None. + :param dtype: inference data type, if None defaults to 'fp32' if device == 'cpu' else 'fp16'. """ super().__init__(**kwargs)