From a3488ed13d80bc529d7c50679038393073bd8474 Mon Sep 17 00:00:00 2001 From: huchengyi Date: Thu, 12 Feb 2026 10:03:05 +0800 Subject: [PATCH] Support Huawei NPU MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # 华为NPU适配方法 ## 测试环境 ### NPU相关软件版本 1. NPU驱动 npu-smi 25.0.rc1.1 Version: 25.0.rc1.1 2. CANN相关 - ./Ascend-cann-toolkit_8.5.0_linux-aarch64.run --install - ./Ascend-cann-{npu-version}-ops_8.5.0_linux-aarch64.run --install # 请根据不同型号下载不同安装程序 ### 依赖包安装 1. pip install torch==2.8.0 pytorch-wpe pytorch-wavelets rotary-embedding-torch torch-complex torchaudio 2. pip install torch_npu==2.8.0 3. pip install funasr==1.3.1 ## 代码修改 ### 模型适配代码 修改 ~/funasr/auto/auto_model.py ``` try: import torch_npu npu_is_available = torch_npu.npu.is_available() except ImportError: npu_is_available = False build_model(**kwargs): ... or (device == "npu" and not npu_is_available) ... ``` ### 测试验证代码 ``` from funasr import AutoModel model = AutoModel( model="iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch", disable_update=True, device="npu" ) res = model.generate( input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav" hotword="达摩院 磨搭", ) print(res) ``` --- funasr/auto/auto_model.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py index 9bf1ad57e..81ecaa879 100644 --- a/funasr/auto/auto_model.py +++ b/funasr/auto/auto_model.py @@ -29,7 +29,11 @@ from funasr.train_utils.load_pretrained_model import load_pretrained_model from funasr.utils import export_utils from funasr.utils import misc - +try: + import torch_npu + npu_is_available = torch_npu.npu.is_available() +except ImportError: + npu_is_available = False def _resolve_ncpu(config, fallback=4): """Return a positive integer representing CPU threads from config.""" @@ -199,6 +203,7 @@ def build_model(**kwargs): if ((device =="cuda" and not torch.cuda.is_available()) or (device == "xpu" and not torch.xpu.is_available()) or (device == "mps" and not torch.backends.mps.is_available()) + or (device == "npu" and not npu_is_available) or kwargs.get("ngpu", 1) == 0): device = "cpu" kwargs["batch_size"] = 1