当前位置: 首页 > news >正文

采用sherpa-onnx 实现 ios语音唤起的调研

背景

项目中需要实现一个语音唤起的功能,选择sherpa-onnx进行调研,要求各端都要验证没有问题。个人负责ios这部分的调研。查询官方发现没有直接针对ios语音唤起的稳定,主要技术平台也没有相关的可以借鉴。经过调研之后补充了一个。

一、下载 sherpa-onnx

https://github.com/k2-fsa/sherpa-onnx

二、构建 iOS 版 Sherpa-onnx

根据官方说明文档,先把项目构建编译起来。地址:Build sherpa-onnx for iOS 。根据文档要求安装环境,要两个小时左右。最终的效果的代码可以编译起来并且可以正常使用demo。
demo构建起来只是实现了语音转文字功能,还没有语音唤起能力。

三、补充语音唤起能力

目前官方文档只有安卓语音唤起的说明文档,但是总文档中说支持ios系统。那么原理应该是想通的,在ios的demo中搜索关键词 Keyword 发现,已经实现相关api。那么只要找到语音唤起模型并实现调用。
以下是构建成功之后的demo源码地址:
1、sherpa-onnx-ios-demo 。构建后的 ios-swift demo。构建后的全部内容有好几个G,因此支持提交部分,该demo需要在第二步运行成功之后,把该demo与源目录替换即可编译运行。
2、ios的语音模型、自定义关键词模型(用于语音唤起能力)

四、demo介绍

1、目录结构

在这里插入图片描述

2、sdk调用

Model.swift

// 在Model.swift中添加
func getKwsConfig() -> SherpaOnnxOnlineModelConfig {
//    let encoder = getResource("encoder-epoch-99-avg-1", "onnx")
//    let decoder = getResource("decoder-epoch-99-avg-1", "onnx")
//    let joiner = getResource("joiner-epoch-99-avg-1", "onnx")
//    let tokens = getResource("kws_tokens", "txt")let encoder = getResource("encoder-epoch-12-avg-2-chunk-16-left-64", "onnx")let decoder = getResource("decoder-epoch-12-avg-2-chunk-16-left-64", "onnx")let joiner = getResource("joiner-epoch-12-avg-2-chunk-16-left-64", "onnx")let tokens = getResource("kws_tokens", "txt")return sherpaOnnxOnlineModelConfig(tokens: tokens,transducer: sherpaOnnxOnlineTransducerModelConfig(encoder: encoder,decoder: decoder,joiner: joiner),numThreads: 2,modelType: "zipformer2")
}func getKeywordsFilePath() -> String {return getResource("keywords", "txt") // 文件内容:小苗小苗
}/// Please refer to
/// https://k2-fsa.github.io/sherpa/onnx/pretrained_models/index.html
/// to download pre-trained models/// sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20 (Bilingual, Chinese + English)
/// https://k2-fsa.github.io/sherpa/onnx/pretrained_models/zipformer-transducer-models.html
func getBilingualStreamZhEnZipformer20230220() -> SherpaOnnxOnlineModelConfig {let encoder = getResource("encoder-epoch-99-avg-1", "onnx")let decoder = getResource("decoder-epoch-99-avg-1", "onnx")let joiner = getResource("joiner-epoch-99-avg-1", "onnx")let tokens = getResource("tokens", "txt")return sherpaOnnxOnlineModelConfig(tokens: tokens,transducer: sherpaOnnxOnlineTransducerModelConfig(encoder: encoder,decoder: decoder,joiner: joiner),numThreads: 1,modelType: "zipformer")
}

ViewController.swift

//
//  ViewController.swift
//  SherpaOnnx
//
//  Created by fangjun on 2023/1/28.
//import AVFoundation
import UIKitextension AudioBuffer {func array() -> [Float] {return Array(UnsafeBufferPointer(self))}
}extension AVAudioPCMBuffer {func array() -> [Float] {return self.audioBufferList.pointee.mBuffers.array()}
}class ViewController: UIViewController {@IBOutlet weak var resultLabel: UILabel!@IBOutlet weak var recordBtn: UIButton!var audioEngine: AVAudioEngine? = nilvar recognizer: SherpaOnnxRecognizer! = nil// 新增关键词唤醒相关属性var kwsSpotter: SherpaOnnxKeywordSpotterWrapper!var isAwakened = false/// It saves the decoded results so farvar sentences: [String] = [] {didSet {updateLabel()}}var lastSentence: String = ""let maxSentence: Int = 20var results: String {if sentences.isEmpty && lastSentence.isEmpty {return ""}if sentences.isEmpty {return "0: \(lastSentence.lowercased())"}let start = max(sentences.count - maxSentence, 0)if lastSentence.isEmpty {return sentences.enumerated().map { (index, s) in "\(index): \(s.lowercased())" }[start...].joined(separator: "\n")} else {return sentences.enumerated().map { (index, s) in "\(index): \(s.lowercased())" }[start...].joined(separator: "\n") + "\n\(sentences.count): \(lastSentence.lowercased())"}}func updateLabel() {DispatchQueue.main.async {self.resultLabel.text = self.results}}override func viewDidLoad() {super.viewDidLoad()// Do any additional setup after loading the view.resultLabel.text = "ASR with Next-gen Kaldi\n\nSee https://github.com/k2-fsa/sherpa-onnx\n\nPress the Start button to run!"recordBtn.setTitle("Start", for: .normal)initRecognizer()initKeywordSpotter()initRecorder()startRecorder()}// 关键词检测初始化func initKeywordSpotter() {let modelConfig = getKwsConfig()let featConfig = sherpaOnnxFeatureConfig()var config = sherpaOnnxKeywordSpotterConfig(featConfig: featConfig,modelConfig: modelConfig,keywordsFile: getKeywordsFilePath(),maxActivePaths: 4,numTrailingBlanks: 2,keywordsScore: 1.5,keywordsThreshold: 0.25)kwsSpotter = SherpaOnnxKeywordSpotterWrapper(config: &config)}// 新增状态变量private var isRecording = false@IBAction func onRecordBtnClick(_ sender: UIButton) {if recordBtn.currentTitle == "Start" {isAwakened = true // 强制唤醒状态startRecorder()recordBtn.setTitle("Stop", for: .normal)} else {isAwakened = falsestopRecorder()recordBtn.setTitle("Start", for: .normal)}}// 初始化语音转文字模型func initRecognizer() {// Please select one model that is best suitable for you.//// You can also modify Model.swift to add new pre-trained models from// https://k2-fsa.github.io/sherpa/onnx/pretrained_models/index.htmllet modelConfig = getBilingualStreamZhEnZipformer20230220()// let modelConfig = getZhZipformer20230615()// let modelConfig = getEnZipformer20230626()
//        let modelConfig = getBilingualStreamingZhEnParaformer()let featConfig = sherpaOnnxFeatureConfig(sampleRate: 16000,featureDim: 80)var config = sherpaOnnxOnlineRecognizerConfig(featConfig: featConfig,modelConfig: modelConfig,enableEndpoint: true,rule1MinTrailingSilence: 2.4,rule2MinTrailingSilence: 0.8,rule3MinUtteranceLength: 30,decodingMethod: "greedy_search",maxActivePaths: 4)recognizer = SherpaOnnxRecognizer(config: &config)}func initRecorder() {print("init recorder")audioEngine = AVAudioEngine()let inputNode = self.audioEngine?.inputNodelet bus = 0let inputFormat = inputNode?.outputFormat(forBus: bus)let outputFormat = AVAudioFormat(commonFormat: .pcmFormatFloat32,sampleRate: 16000, channels: 1,interleaved: false)!let converter = AVAudioConverter(from: inputFormat!, to: outputFormat)!inputNode!.installTap(onBus: bus,bufferSize: 1024,format: inputFormat) {(buffer: AVAudioPCMBuffer, when: AVAudioTime) invar newBufferAvailable = truelet inputCallback: AVAudioConverterInputBlock = {inNumPackets, outStatus inif newBufferAvailable {outStatus.pointee = .haveDatanewBufferAvailable = falsereturn buffer} else {outStatus.pointee = .noDataNowreturn nil}}let convertedBuffer = AVAudioPCMBuffer(pcmFormat: outputFormat,frameCapacity:AVAudioFrameCount(outputFormat.sampleRate)* buffer.frameLength/ AVAudioFrameCount(buffer.format.sampleRate))!var error: NSError?let _ = converter.convert(to: convertedBuffer,error: &error, withInputFrom: inputCallback)// TODO(fangjun): Handle status != haveDatalet array = convertedBuffer.array()// 并行处理关键词检测和ASRself.processKeywordDetection(array)if self.isAwakened || self.isRecording {self.processASR(array)}}}var wakeupConfidence = 0let requiredConfidence = 2private func processKeywordDetection(_ samples: [Float]) {kwsSpotter.acceptWaveform(samples: samples)while kwsSpotter.isReady() {kwsSpotter.decode()let result = kwsSpotter.getResult()let detected = result.keyword.lowercased().contains("小苗小苗")print("demo----> result.keyword:\(result.keyword)    detected:\(detected)")wakeupConfidence = detected ?min(wakeupConfidence + 1, requiredConfidence) :max(wakeupConfidence - 1, 0)if wakeupConfidence >= requiredConfidence && !isAwakened {handleWakeupEvent()wakeupConfidence = 0}}}private func processASR(_ samples: [Float]) {if !samples.isEmpty {recognizer.acceptWaveform(samples: samples)while (recognizer.isReady()){recognizer.decode()}let isEndpoint = recognizer.isEndpoint()let text = recognizer.getResult().textif !text.isEmpty && lastSentence != text {lastSentence = textupdateLabel()print(text)}if isEndpoint {if !text.isEmpty {let tmp = self.lastSentencelastSentence = ""sentences.append(tmp)}recognizer.reset()}}}private func handleWakeupEvent() {isAwakened = trueDispatchQueue.main.async {
//            self.resultLabel.text = "已唤醒,请开始说话..."// 自动启动录音逻辑(可选)if self.recordBtn.currentTitle == "Start" {self.startRecorder()self.recordBtn.setTitle("Stop", for: .normal)}}// 重置ASR状态recognizer.reset()}func startRecorder() {lastSentence = ""sentences = []do {try self.audioEngine?.start()isRecording = true} catch let error as NSError {print("Got an error starting audioEngine: \(error.domain), \(error)")}print("started")}func stopRecorder() {audioEngine?.stop()kwsSpotter.reset()recognizer.reset()isRecording = falseprint("stopped")}
}
http://www.xdnf.cn/news/469621.html

相关文章:

  • 每周靶点:NY-ESO-1、GPC3、IL27分享
  • Linux操作
  • Oracle APEX IR报表列宽调整
  • [ctfshow web入门] web75
  • 运维实施30-FTP服务
  • 欧拉计划 Project Euler 73(分数有范围计数)题解
  • ABP User Interface-Angular UI中文详解
  • Loki的部署搭建
  • JS手写代码篇---手写 Object.create
  • 哈夫曼树完全解析:从原理到应用
  • 接口测试知识详解
  • 亚马逊运营中评论体系构建与高效索评策略解析!
  • 4寸工业三防手持机PDA,助力仓储高效管理
  • 【在qiankun模式下el-dropdown点击,浏览器报Failed to execute ‘getComputedStyle‘ on ‘Window‘: parameter 1 is not o
  • 亚马逊,temu测评采购低成本养号策略:如何用一台设备安全批量管理买家账号
  • 英语学习笔记
  • 移动端网络调试全流程:从常见抓包工具到Sniffmaster 的实战体验
  • Web》》url 参数 # 、 ? 、@
  • manuskript开源程序是面向作家的开源工具
  • Cursor vs VS Code vs Zed
  • deepseek讲解如何快速解决内存泄露,内存溢出问题
  • 拉取sset docker镜像
  • 经典卷积神经网络
  • 【Java ee初阶】http(1)
  • 【电子通识】热敏纸定义、分类与内在质量
  • 无人机避障——深蓝学院浙大Fast-planner学习部分(前端部分)
  • Java JSON 数据绑定对象的注意事项
  • 【FMC216】基于 VITA57.1 的 2 路 TLK2711 发送、2 路 TLK2711 接收 FMC 子卡模块
  • iOS性能调优实践:我常用的工具与流程(含克魔 KeyMob 使用体验)
  • 养生:健康生活的核心策略