diff --git a/python-package/README.md b/python-package/README.md index 69c199b56..063adf903 100644 --- a/python-package/README.md +++ b/python-package/README.md @@ -14,7 +14,7 @@ For ``insightface<=0.1.5``, we use MXNet as inference backend. Starting from insightface>=0.2, we use onnxruntime as inference backend. -You have to install ``onnxruntime-gpu`` manually to enable GPU inference, or install ``onnxruntime`` to use CPU only inference. +You have to install ``onnxruntime-gpu`` manually to enable GPU inference, install ``onnxruntime-cann`` manually to enable Ascend NPU inference, or install ``onnxruntime`` to use CPU only inference. ## Change Log diff --git a/python-package/insightface/model_zoo/model_zoo.py b/python-package/insightface/model_zoo/model_zoo.py index fc6283114..11938ec9f 100644 --- a/python-package/insightface/model_zoo/model_zoo.py +++ b/python-package/insightface/model_zoo/model_zoo.py @@ -8,6 +8,7 @@ import os.path as osp import glob import onnxruntime +import importlib from .arcface_onnx import * from .retinaface import * #from .scrfd import * @@ -68,7 +69,12 @@ def find_onnx_file(dir_path): return paths[-1] def get_default_providers(): - return ['CUDAExecutionProvider', 'CPUExecutionProvider'] + # In Ascend NPU, acl is a base module, if the module `acl` exists, the codes runs in Ascend device. + if importlib.util.find_spec("acl") is not None: + providers = ["CANNExecutionProvider","CPUExecutionProvider"] + else: + providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] + return providers def get_default_provider_options(): return None