diff --git a/.github/scripts/test-dart.sh b/.github/scripts/test-dart.sh index f392d204a..9da908f48 100755 --- a/.github/scripts/test-dart.sh +++ b/.github/scripts/test-dart.sh @@ -4,6 +4,33 @@ set -ex cd dart-api-examples +pushd vad-with-non-streaming-asr +echo '----------TeleSpeech CTC----------' +./run-telespeech-ctc.sh +rm -rf sherpa-onnx-* + +echo "----zipformer transducer----" +./run-zipformer-transducer.sh +rm -rf sherpa-onnx-* + +echo "----whisper----" +./run-whisper.sh +rm -rf sherpa-onnx-* + +echo "----paraformer----" +./run-paraformer.sh +rm -rf sherpa-onnx-* + +echo "----SenseVoice zh----" +./run-sense-voice-zh.sh +rm -rf sherpa-onnx-* + +echo "----SenseVoice en----" +./run-sense-voice-en.sh +rm -rf sherpa-onnx-* + +popd + pushd keyword-spotter ./run-zh.sh popd diff --git a/.github/workflows/test-dart.yaml b/.github/workflows/test-dart.yaml index 387cdebc2..eb5c0b2b0 100644 --- a/.github/workflows/test-dart.yaml +++ b/.github/workflows/test-dart.yaml @@ -109,6 +109,8 @@ jobs: cp scripts/dart/streaming-asr-pubspec.yaml dart-api-examples/streaming-asr/pubspec.yaml cp scripts/dart/tts-pubspec.yaml dart-api-examples/tts/pubspec.yaml cp scripts/dart/kws-pubspec.yaml dart-api-examples/keyword-spotter/pubspec.yaml + cp scripts/dart/vad-non-streaming-asr-pubspec.yaml dart-api-examples/vad-with-non-streaming-asr/pubspec.yaml + cp scripts/dart/sherpa-onnx-pubspec.yaml flutter/sherpa_onnx/pubspec.yaml .github/scripts/test-dart.sh diff --git a/dart-api-examples/README.md b/dart-api-examples/README.md index 930037160..855691e5b 100644 --- a/dart-api-examples/README.md +++ b/dart-api-examples/README.md @@ -5,6 +5,17 @@ This directory contains examples for Dart API. You can find the package at https://pub.dev/packages/sherpa_onnx +## Descirption + +| Directory | Description | +|-----------|-------------| +| [./keyword-spotter](./keyword-spotter)| Example for keyword spotting| +| [./non-streaming-asr](./non-streaming-asr)| Example for non-streaming speech recognition| +| [./streaming-asr](./streaming-asr)| Example for streaming speech recognition| +| [./tts](./tts)| Example for text to speech| +| [./vad](./vad)| Example for voice activity detection| +| [./vad-with-non-streaming-asr](./vad-with-non-streaming-asr)| Example for voice activity detection with non-streaming speech recognition. You can use it to generate subtitles.| + ## How to create an example in this folder ```bash diff --git a/dart-api-examples/non-streaming-asr/bin/sense-voice.dart b/dart-api-examples/non-streaming-asr/bin/sense-voice.dart index 055cc9a6c..d02fcaf6d 100644 --- a/dart-api-examples/non-streaming-asr/bin/sense-voice.dart +++ b/dart-api-examples/non-streaming-asr/bin/sense-voice.dart @@ -11,7 +11,7 @@ void main(List arguments) async { await initSherpaOnnx(); final parser = ArgParser() - ..addOption('model', help: 'Path to the paraformer model') + ..addOption('model', help: 'Path to the SenseVoice model') ..addOption('tokens', help: 'Path to tokens.txt') ..addOption('language', help: 'auto, zh, en, ja, ko, yue, or leave it empty to use auto', diff --git a/dart-api-examples/vad-with-non-streaming-asr/.gitignore b/dart-api-examples/vad-with-non-streaming-asr/.gitignore new file mode 100644 index 000000000..3a8579040 --- /dev/null +++ b/dart-api-examples/vad-with-non-streaming-asr/.gitignore @@ -0,0 +1,3 @@ +# https://dart.dev/guides/libraries/private-files +# Created by `dart pub` +.dart_tool/ diff --git a/dart-api-examples/vad-with-non-streaming-asr/README.md b/dart-api-examples/vad-with-non-streaming-asr/README.md new file mode 100644 index 000000000..e82c206eb --- /dev/null +++ b/dart-api-examples/vad-with-non-streaming-asr/README.md @@ -0,0 +1,13 @@ +# Introduction + +This folder contains examples for non-streaming ASR + voice activity detection +with Dart API. + +| File | Description| +|------|------------| +|[./bin/paraformer.dart](./bin/paraformer.dart)| Use a Paraformer model for speech recognition. See [./run-paraformer.sh](./run-paraformer.sh)| +|[./bin/sense-voice.dart](./bin/sense-voice.dart)| Use a SenseVoice Ctc model for speech recognition. See [./run-sense-voice-zh.sh](./run-sense-voice-zh.sh) and [./run-sense-voice-en.sh](./run-sense-voice-en.sh)| +|[./bin/telespeech-ctc.dart](./bin/telespeech-ctc.dart)| Use a TeleSpeech CTC model for speech recognition. See [./run-telespeech-ctc.sh](./run-telespeech-ctc.sh)| +|[./bin/whisper.dart](./bin/whisper.dart)| Use a Whisper model for speech recognition. See [./run-whisper.sh](./run-whisper.sh)| +|[./bin/zipformer-transducer.dart](./bin/zipformer-transducer.dart)| Use a Zipformer transducer model for speech recognition. See [./run-zipformer-transducer.sh](./run-zipformer-transducer.sh)| + diff --git a/dart-api-examples/vad-with-non-streaming-asr/analysis_options.yaml b/dart-api-examples/vad-with-non-streaming-asr/analysis_options.yaml new file mode 100644 index 000000000..dee8927aa --- /dev/null +++ b/dart-api-examples/vad-with-non-streaming-asr/analysis_options.yaml @@ -0,0 +1,30 @@ +# This file configures the static analysis results for your project (errors, +# warnings, and lints). +# +# This enables the 'recommended' set of lints from `package:lints`. +# This set helps identify many issues that may lead to problems when running +# or consuming Dart code, and enforces writing Dart using a single, idiomatic +# style and format. +# +# If you want a smaller set of lints you can change this to specify +# 'package:lints/core.yaml'. These are just the most critical lints +# (the recommended set includes the core lints). +# The core lints are also what is used by pub.dev for scoring packages. + +include: package:lints/recommended.yaml + +# Uncomment the following section to specify additional rules. + +# linter: +# rules: +# - camel_case_types + +# analyzer: +# exclude: +# - path/to/excluded/files/** + +# For more information about the core and recommended set of lints, see +# https://dart.dev/go/core-lints + +# For additional information about configuring this file, see +# https://dart.dev/guides/language/analysis-options diff --git a/dart-api-examples/vad-with-non-streaming-asr/bin/init.dart b/dart-api-examples/vad-with-non-streaming-asr/bin/init.dart new file mode 120000 index 000000000..48508cfd3 --- /dev/null +++ b/dart-api-examples/vad-with-non-streaming-asr/bin/init.dart @@ -0,0 +1 @@ +../../vad/bin/init.dart \ No newline at end of file diff --git a/dart-api-examples/vad-with-non-streaming-asr/bin/paraformer.dart b/dart-api-examples/vad-with-non-streaming-asr/bin/paraformer.dart new file mode 100644 index 000000000..744de34e7 --- /dev/null +++ b/dart-api-examples/vad-with-non-streaming-asr/bin/paraformer.dart @@ -0,0 +1,123 @@ +// Copyright (c) 2024 Xiaomi Corporation +import 'dart:io'; +import 'dart:typed_data'; + +import 'package:args/args.dart'; +import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx; + +import './init.dart'; + +void main(List arguments) async { + await initSherpaOnnx(); + + final parser = ArgParser() + ..addOption('silero-vad', help: 'Path to silero_vad.onnx') + ..addOption('model', help: 'Path to the paraformer model') + ..addOption('tokens', help: 'Path to tokens.txt') + ..addOption('input-wav', help: 'Path to input.wav to transcribe'); + + final res = parser.parse(arguments); + if (res['silero-vad'] == null || + res['model'] == null || + res['tokens'] == null || + res['input-wav'] == null) { + print(parser.usage); + exit(1); + } + + // create VAD + final sileroVad = res['silero-vad'] as String; + + final sileroVadConfig = sherpa_onnx.SileroVadModelConfig( + model: sileroVad, + minSilenceDuration: 0.25, + minSpeechDuration: 0.5, + ); + + final vadConfig = sherpa_onnx.VadModelConfig( + sileroVad: sileroVadConfig, + numThreads: 1, + debug: true, + ); + + final vad = sherpa_onnx.VoiceActivityDetector( + config: vadConfig, bufferSizeInSeconds: 10); + + // create paraformer recognizer + final model = res['model'] as String; + final tokens = res['tokens'] as String; + final inputWav = res['input-wav'] as String; + + final paraformer = sherpa_onnx.OfflineParaformerModelConfig( + model: model, + ); + + final modelConfig = sherpa_onnx.OfflineModelConfig( + paraformer: paraformer, + tokens: tokens, + debug: true, + numThreads: 1, + modelType: 'paraformer', + ); + final config = sherpa_onnx.OfflineRecognizerConfig(model: modelConfig); + final recognizer = sherpa_onnx.OfflineRecognizer(config); + + final waveData = sherpa_onnx.readWave(inputWav); + if (waveData.sampleRate != 16000) { + print('Only 16000 Hz is supported. Given: ${waveData.sampleRate}'); + exit(1); + } + + int numSamples = waveData.samples.length; + int numIter = numSamples ~/ vadConfig.sileroVad.windowSize; + + for (int i = 0; i != numIter; ++i) { + int start = i * vadConfig.sileroVad.windowSize; + vad.acceptWaveform(Float32List.sublistView( + waveData.samples, start, start + vadConfig.sileroVad.windowSize)); + + if (vad.isDetected()) { + while (!vad.isEmpty()) { + final samples = vad.front().samples; + final startTime = vad.front().start.toDouble() / waveData.sampleRate; + final endTime = + startTime + samples.length.toDouble() / waveData.sampleRate; + + final stream = recognizer.createStream(); + stream.acceptWaveform( + samples: samples, sampleRate: waveData.sampleRate); + recognizer.decode(stream); + + final result = recognizer.getResult(stream); + stream.free(); + print( + '${startTime.toStringAsPrecision(5)} -- ${endTime.toStringAsPrecision(5)} : ${result.text}'); + + vad.pop(); + } + } + } + + vad.flush(); + + while (!vad.isEmpty()) { + final samples = vad.front().samples; + final startTime = vad.front().start.toDouble() / waveData.sampleRate; + final endTime = startTime + samples.length.toDouble() / waveData.sampleRate; + + final stream = recognizer.createStream(); + stream.acceptWaveform(samples: samples, sampleRate: waveData.sampleRate); + recognizer.decode(stream); + + final result = recognizer.getResult(stream); + stream.free(); + print( + '${startTime.toStringAsPrecision(5)} -- ${endTime.toStringAsPrecision(5)} : ${result.text}'); + + vad.pop(); + } + + vad.free(); + + recognizer.free(); +} diff --git a/dart-api-examples/vad-with-non-streaming-asr/bin/sense-voice.dart b/dart-api-examples/vad-with-non-streaming-asr/bin/sense-voice.dart new file mode 100644 index 000000000..ef8c00681 --- /dev/null +++ b/dart-api-examples/vad-with-non-streaming-asr/bin/sense-voice.dart @@ -0,0 +1,128 @@ +// Copyright (c) 2024 Xiaomi Corporation +import 'dart:io'; +import 'dart:typed_data'; + +import 'package:args/args.dart'; +import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx; + +import './init.dart'; + +void main(List arguments) async { + await initSherpaOnnx(); + + final parser = ArgParser() + ..addOption('silero-vad', help: 'Path to silero_vad.onnx') + ..addOption('model', help: 'Path to the SenseVoice model') + ..addOption('tokens', help: 'Path to tokens.txt') + ..addOption('language', + help: 'auto, zh, en, ja, ko, yue, or leave it empty to use auto', + defaultsTo: '') + ..addOption('use-itn', + help: 'true to use inverse text normalization', defaultsTo: 'false') + ..addOption('input-wav', help: 'Path to input.wav to transcribe'); + + final res = parser.parse(arguments); + if (res['silero-vad'] == null || + res['model'] == null || + res['tokens'] == null || + res['input-wav'] == null) { + print(parser.usage); + exit(1); + } + + // create VAD + final sileroVad = res['silero-vad'] as String; + + final sileroVadConfig = sherpa_onnx.SileroVadModelConfig( + model: sileroVad, + minSilenceDuration: 0.25, + minSpeechDuration: 0.5, + ); + + final vadConfig = sherpa_onnx.VadModelConfig( + sileroVad: sileroVadConfig, + numThreads: 1, + debug: true, + ); + + final vad = sherpa_onnx.VoiceActivityDetector( + config: vadConfig, bufferSizeInSeconds: 10); + + // create SenseVoice + final model = res['model'] as String; + final tokens = res['tokens'] as String; + final inputWav = res['input-wav'] as String; + final language = res['language'] as String; + final useItn = (res['use-itn'] as String).toLowerCase() == 'true'; + + final senseVoice = sherpa_onnx.OfflineSenseVoiceModelConfig( + model: model, language: language, useInverseTextNormalization: useItn); + + final modelConfig = sherpa_onnx.OfflineModelConfig( + senseVoice: senseVoice, + tokens: tokens, + debug: true, + numThreads: 1, + ); + final config = sherpa_onnx.OfflineRecognizerConfig(model: modelConfig); + final recognizer = sherpa_onnx.OfflineRecognizer(config); + + final waveData = sherpa_onnx.readWave(inputWav); + if (waveData.sampleRate != 16000) { + print('Only 16000 Hz is supported. Given: ${waveData.sampleRate}'); + exit(1); + } + + int numSamples = waveData.samples.length; + int numIter = numSamples ~/ vadConfig.sileroVad.windowSize; + + for (int i = 0; i != numIter; ++i) { + int start = i * vadConfig.sileroVad.windowSize; + vad.acceptWaveform(Float32List.sublistView( + waveData.samples, start, start + vadConfig.sileroVad.windowSize)); + + if (vad.isDetected()) { + while (!vad.isEmpty()) { + final samples = vad.front().samples; + final startTime = vad.front().start.toDouble() / waveData.sampleRate; + final endTime = + startTime + samples.length.toDouble() / waveData.sampleRate; + + final stream = recognizer.createStream(); + stream.acceptWaveform( + samples: samples, sampleRate: waveData.sampleRate); + recognizer.decode(stream); + + final result = recognizer.getResult(stream); + stream.free(); + print( + '${startTime.toStringAsPrecision(5)} -- ${endTime.toStringAsPrecision(5)} : ${result.text}'); + + vad.pop(); + } + } + } + + vad.flush(); + + while (!vad.isEmpty()) { + final samples = vad.front().samples; + final startTime = vad.front().start.toDouble() / waveData.sampleRate; + final endTime = startTime + samples.length.toDouble() / waveData.sampleRate; + + final stream = recognizer.createStream(); + stream.acceptWaveform(samples: samples, sampleRate: waveData.sampleRate); + recognizer.decode(stream); + + final result = recognizer.getResult(stream); + stream.free(); + print( + '${startTime.toStringAsPrecision(5)} -- ${endTime.toStringAsPrecision(5)} : ${result.text}'); + + vad.pop(); + } + + vad.free(); + + recognizer.free(); +} diff --git a/dart-api-examples/vad-with-non-streaming-asr/bin/telespeech-ctc.dart b/dart-api-examples/vad-with-non-streaming-asr/bin/telespeech-ctc.dart new file mode 100644 index 000000000..161644deb --- /dev/null +++ b/dart-api-examples/vad-with-non-streaming-asr/bin/telespeech-ctc.dart @@ -0,0 +1,120 @@ +// Copyright (c) 2024 Xiaomi Corporation +import 'dart:io'; +import 'dart:typed_data'; + +import 'package:args/args.dart'; +import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx; + +import './init.dart'; + +void main(List arguments) async { + await initSherpaOnnx(); + + final parser = ArgParser() + ..addOption('silero-vad', help: 'Path to silero_vad.onnx') + ..addOption('model', help: 'Path to the telespeech CTC model') + ..addOption('tokens', help: 'Path to tokens.txt') + ..addOption('input-wav', help: 'Path to input.wav to transcribe'); + + final res = parser.parse(arguments); + + if (res['silero-vad'] == null || + res['model'] == null || + res['tokens'] == null || + res['input-wav'] == null) { + print(parser.usage); + exit(1); + } + + // create VAD + final sileroVad = res['silero-vad'] as String; + + final sileroVadConfig = sherpa_onnx.SileroVadModelConfig( + model: sileroVad, + minSilenceDuration: 0.25, + minSpeechDuration: 0.5, + ); + + final vadConfig = sherpa_onnx.VadModelConfig( + sileroVad: sileroVadConfig, + numThreads: 1, + debug: true, + ); + + final vad = sherpa_onnx.VoiceActivityDetector( + config: vadConfig, bufferSizeInSeconds: 10); + + // create telespeech CTC recognizer + final model = res['model'] as String; + final tokens = res['tokens'] as String; + final inputWav = res['input-wav'] as String; + + final modelConfig = sherpa_onnx.OfflineModelConfig( + telespeechCtc: model, + tokens: tokens, + debug: true, + numThreads: 1, + modelType: 'telespeech_ctc', + ); + final config = sherpa_onnx.OfflineRecognizerConfig(model: modelConfig); + final recognizer = sherpa_onnx.OfflineRecognizer(config); + + final waveData = sherpa_onnx.readWave(inputWav); + if (waveData.sampleRate != 16000) { + print('Only 16000 Hz is supported. Given: ${waveData.sampleRate}'); + exit(1); + } + + int numSamples = waveData.samples.length; + int numIter = numSamples ~/ vadConfig.sileroVad.windowSize; + + for (int i = 0; i != numIter; ++i) { + int start = i * vadConfig.sileroVad.windowSize; + vad.acceptWaveform(Float32List.sublistView( + waveData.samples, start, start + vadConfig.sileroVad.windowSize)); + + if (vad.isDetected()) { + while (!vad.isEmpty()) { + final samples = vad.front().samples; + final startTime = vad.front().start.toDouble() / waveData.sampleRate; + final endTime = + startTime + samples.length.toDouble() / waveData.sampleRate; + + final stream = recognizer.createStream(); + stream.acceptWaveform( + samples: samples, sampleRate: waveData.sampleRate); + recognizer.decode(stream); + + final result = recognizer.getResult(stream); + stream.free(); + print( + '${startTime.toStringAsPrecision(5)} -- ${endTime.toStringAsPrecision(5)} : ${result.text}'); + + vad.pop(); + } + } + } + + vad.flush(); + + while (!vad.isEmpty()) { + final samples = vad.front().samples; + final startTime = vad.front().start.toDouble() / waveData.sampleRate; + final endTime = startTime + samples.length.toDouble() / waveData.sampleRate; + + final stream = recognizer.createStream(); + stream.acceptWaveform(samples: samples, sampleRate: waveData.sampleRate); + recognizer.decode(stream); + + final result = recognizer.getResult(stream); + stream.free(); + print( + '${startTime.toStringAsPrecision(5)} -- ${endTime.toStringAsPrecision(5)} : ${result.text}'); + + vad.pop(); + } + + vad.free(); + + recognizer.free(); +} diff --git a/dart-api-examples/vad-with-non-streaming-asr/bin/whisper.dart b/dart-api-examples/vad-with-non-streaming-asr/bin/whisper.dart new file mode 100644 index 000000000..ce24fb87a --- /dev/null +++ b/dart-api-examples/vad-with-non-streaming-asr/bin/whisper.dart @@ -0,0 +1,127 @@ +// Copyright (c) 2024 Xiaomi Corporation +import 'dart:io'; +import 'dart:typed_data'; + +import 'package:args/args.dart'; +import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx; + +import './init.dart'; + +void main(List arguments) async { + await initSherpaOnnx(); + + final parser = ArgParser() + ..addOption('silero-vad', help: 'Path to silero_vad.onnx') + ..addOption('encoder', help: 'Path to the whisper encoder model') + ..addOption('decoder', help: 'Path to whisper decoder model') + ..addOption('tokens', help: 'Path to tokens.txt') + ..addOption('input-wav', help: 'Path to input.wav to transcribe'); + + final res = parser.parse(arguments); + if (res['silero-vad'] == null || + res['encoder'] == null || + res['decoder'] == null || + res['tokens'] == null || + res['input-wav'] == null) { + print(parser.usage); + exit(1); + } + + // create VAD + final sileroVad = res['silero-vad'] as String; + + final sileroVadConfig = sherpa_onnx.SileroVadModelConfig( + model: sileroVad, + minSilenceDuration: 0.25, + minSpeechDuration: 0.5, + ); + + final vadConfig = sherpa_onnx.VadModelConfig( + sileroVad: sileroVadConfig, + numThreads: 1, + debug: true, + ); + + final vad = sherpa_onnx.VoiceActivityDetector( + config: vadConfig, bufferSizeInSeconds: 10); + + // create whisper recognizer + final encoder = res['encoder'] as String; + final decoder = res['decoder'] as String; + final tokens = res['tokens'] as String; + final inputWav = res['input-wav'] as String; + + final whisper = sherpa_onnx.OfflineWhisperModelConfig( + encoder: encoder, + decoder: decoder, + ); + + final modelConfig = sherpa_onnx.OfflineModelConfig( + whisper: whisper, + tokens: tokens, + modelType: 'whisper', + debug: false, + numThreads: 1, + ); + final config = sherpa_onnx.OfflineRecognizerConfig(model: modelConfig); + final recognizer = sherpa_onnx.OfflineRecognizer(config); + + final waveData = sherpa_onnx.readWave(inputWav); + if (waveData.sampleRate != 16000) { + print('Only 16000 Hz is supported. Given: ${waveData.sampleRate}'); + exit(1); + } + + int numSamples = waveData.samples.length; + int numIter = numSamples ~/ vadConfig.sileroVad.windowSize; + + for (int i = 0; i != numIter; ++i) { + int start = i * vadConfig.sileroVad.windowSize; + vad.acceptWaveform(Float32List.sublistView( + waveData.samples, start, start + vadConfig.sileroVad.windowSize)); + + if (vad.isDetected()) { + while (!vad.isEmpty()) { + final samples = vad.front().samples; + final startTime = vad.front().start.toDouble() / waveData.sampleRate; + final endTime = + startTime + samples.length.toDouble() / waveData.sampleRate; + + final stream = recognizer.createStream(); + stream.acceptWaveform( + samples: samples, sampleRate: waveData.sampleRate); + recognizer.decode(stream); + + final result = recognizer.getResult(stream); + stream.free(); + print( + '${startTime.toStringAsPrecision(5)} -- ${endTime.toStringAsPrecision(5)} : ${result.text}'); + + vad.pop(); + } + } + } + + vad.flush(); + + while (!vad.isEmpty()) { + final samples = vad.front().samples; + final startTime = vad.front().start.toDouble() / waveData.sampleRate; + final endTime = startTime + samples.length.toDouble() / waveData.sampleRate; + + final stream = recognizer.createStream(); + stream.acceptWaveform(samples: samples, sampleRate: waveData.sampleRate); + recognizer.decode(stream); + + final result = recognizer.getResult(stream); + stream.free(); + print( + '${startTime.toStringAsPrecision(5)} -- ${endTime.toStringAsPrecision(5)} : ${result.text}'); + + vad.pop(); + } + + vad.free(); + + recognizer.free(); +} diff --git a/dart-api-examples/vad-with-non-streaming-asr/bin/zipformer-transducer.dart b/dart-api-examples/vad-with-non-streaming-asr/bin/zipformer-transducer.dart new file mode 100644 index 000000000..378837acc --- /dev/null +++ b/dart-api-examples/vad-with-non-streaming-asr/bin/zipformer-transducer.dart @@ -0,0 +1,131 @@ +// Copyright (c) 2024 Xiaomi Corporation +import 'dart:io'; +import 'dart:typed_data'; + +import 'package:args/args.dart'; +import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx; + +import './init.dart'; + +void main(List arguments) async { + await initSherpaOnnx(); + + final parser = ArgParser() + ..addOption('silero-vad', help: 'Path to silero_vad.onnx') + ..addOption('encoder', help: 'Path to the encoder model') + ..addOption('decoder', help: 'Path to decoder model') + ..addOption('joiner', help: 'Path to joiner model') + ..addOption('tokens', help: 'Path to tokens.txt') + ..addOption('input-wav', help: 'Path to input.wav to transcribe'); + + final res = parser.parse(arguments); + + if (res['silero-vad'] == null || + res['encoder'] == null || + res['decoder'] == null || + res['joiner'] == null || + res['tokens'] == null || + res['input-wav'] == null) { + print(parser.usage); + exit(1); + } + + // create VAD + final sileroVad = res['silero-vad'] as String; + + final sileroVadConfig = sherpa_onnx.SileroVadModelConfig( + model: sileroVad, + minSilenceDuration: 0.25, + minSpeechDuration: 0.5, + ); + + final vadConfig = sherpa_onnx.VadModelConfig( + sileroVad: sileroVadConfig, + numThreads: 1, + debug: true, + ); + + final vad = sherpa_onnx.VoiceActivityDetector( + config: vadConfig, bufferSizeInSeconds: 10); + + // create zipformer transducer recognizer + final encoder = res['encoder'] as String; + final decoder = res['decoder'] as String; + final joiner = res['joiner'] as String; + final tokens = res['tokens'] as String; + final inputWav = res['input-wav'] as String; + + final transducer = sherpa_onnx.OfflineTransducerModelConfig( + encoder: encoder, + decoder: decoder, + joiner: joiner, + ); + + final modelConfig = sherpa_onnx.OfflineModelConfig( + transducer: transducer, + tokens: tokens, + debug: true, + numThreads: 1, + ); + final config = sherpa_onnx.OfflineRecognizerConfig(model: modelConfig); + final recognizer = sherpa_onnx.OfflineRecognizer(config); + + final waveData = sherpa_onnx.readWave(inputWav); + if (waveData.sampleRate != 16000) { + print('Only 16000 Hz is supported. Given: ${waveData.sampleRate}'); + exit(1); + } + + int numSamples = waveData.samples.length; + int numIter = numSamples ~/ vadConfig.sileroVad.windowSize; + + for (int i = 0; i != numIter; ++i) { + int start = i * vadConfig.sileroVad.windowSize; + vad.acceptWaveform(Float32List.sublistView( + waveData.samples, start, start + vadConfig.sileroVad.windowSize)); + + if (vad.isDetected()) { + while (!vad.isEmpty()) { + final samples = vad.front().samples; + final startTime = vad.front().start.toDouble() / waveData.sampleRate; + final endTime = + startTime + samples.length.toDouble() / waveData.sampleRate; + + final stream = recognizer.createStream(); + stream.acceptWaveform( + samples: samples, sampleRate: waveData.sampleRate); + recognizer.decode(stream); + + final result = recognizer.getResult(stream); + stream.free(); + print( + '${startTime.toStringAsPrecision(5)} -- ${endTime.toStringAsPrecision(5)} : ${result.text}'); + + vad.pop(); + } + } + } + + vad.flush(); + + while (!vad.isEmpty()) { + final samples = vad.front().samples; + final startTime = vad.front().start.toDouble() / waveData.sampleRate; + final endTime = startTime + samples.length.toDouble() / waveData.sampleRate; + + final stream = recognizer.createStream(); + stream.acceptWaveform(samples: samples, sampleRate: waveData.sampleRate); + recognizer.decode(stream); + + final result = recognizer.getResult(stream); + stream.free(); + print( + '${startTime.toStringAsPrecision(5)} -- ${endTime.toStringAsPrecision(5)} : ${result.text}'); + + vad.pop(); + } + + vad.free(); + + recognizer.free(); +} diff --git a/dart-api-examples/vad-with-non-streaming-asr/pubspec.yaml b/dart-api-examples/vad-with-non-streaming-asr/pubspec.yaml new file mode 100644 index 000000000..955796b6a --- /dev/null +++ b/dart-api-examples/vad-with-non-streaming-asr/pubspec.yaml @@ -0,0 +1,18 @@ +name: vad_with_non_streaming_asr + +description: > + This example demonstrates how to use the Dart API for VAD (voice activity detection) + with non-streaming speech recognition. + +version: 1.0.0 + +environment: + sdk: ^3.4.0 + +dependencies: + sherpa_onnx: ^1.10.19 + path: ^1.9.0 + args: ^2.5.0 + +dev_dependencies: + lints: ^3.0.0 diff --git a/dart-api-examples/vad-with-non-streaming-asr/run-paraformer.sh b/dart-api-examples/vad-with-non-streaming-asr/run-paraformer.sh new file mode 100755 index 000000000..7631eefe1 --- /dev/null +++ b/dart-api-examples/vad-with-non-streaming-asr/run-paraformer.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +set -ex + +dart pub get + +if [ ! -f ./sherpa-onnx-paraformer-zh-2023-09-14/tokens.txt ]; then + curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-paraformer-zh-2023-09-14.tar.bz2 + + tar xvf sherpa-onnx-paraformer-zh-2023-09-14.tar.bz2 + rm sherpa-onnx-paraformer-zh-2023-09-14.tar.bz2 +fi + +if [ ! -f ./lei-jun-test.wav ]; then + curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/lei-jun-test.wav +fi + +if [[ ! -f ./silero_vad.onnx ]]; then + curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx +fi + +dart run \ + ./bin/paraformer.dart \ + --silero-vad ./silero_vad.onnx \ + --model ./sherpa-onnx-paraformer-zh-2023-09-14/model.int8.onnx \ + --tokens ./sherpa-onnx-paraformer-zh-2023-09-14/tokens.txt \ + --input-wav ./lei-jun-test.wav diff --git a/dart-api-examples/vad-with-non-streaming-asr/run-sense-voice-en.sh b/dart-api-examples/vad-with-non-streaming-asr/run-sense-voice-en.sh new file mode 100755 index 000000000..a8766d1b3 --- /dev/null +++ b/dart-api-examples/vad-with-non-streaming-asr/run-sense-voice-en.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +set -ex + +dart pub get + +if [ ! -f ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt ]; then + curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2 + tar xvf sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2 + rm sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2 +fi + +if [ ! -f ./Obama.wav ]; then + curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/Obama.wav +fi + +if [[ ! -f ./silero_vad.onnx ]]; then + curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx +fi + +dart run \ + ./bin/sense-voice.dart \ + --silero-vad ./silero_vad.onnx \ + --model ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/model.onnx \ + --tokens ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt \ + --use-itn true \ + --input-wav ./Obama.wav + diff --git a/dart-api-examples/vad-with-non-streaming-asr/run-sense-voice-zh.sh b/dart-api-examples/vad-with-non-streaming-asr/run-sense-voice-zh.sh new file mode 100755 index 000000000..3b713b218 --- /dev/null +++ b/dart-api-examples/vad-with-non-streaming-asr/run-sense-voice-zh.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +set -ex + +dart pub get + +if [ ! -f ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt ]; then + curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2 + tar xvf sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2 + rm sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2 +fi + +if [ ! -f ./lei-jun-test.wav ]; then + curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/lei-jun-test.wav +fi + +if [[ ! -f ./silero_vad.onnx ]]; then + curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx +fi + +dart run \ + ./bin/sense-voice.dart \ + --silero-vad ./silero_vad.onnx \ + --model ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/model.onnx \ + --tokens ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt \ + --use-itn true \ + --input-wav ./lei-jun-test.wav + diff --git a/dart-api-examples/vad-with-non-streaming-asr/run-telespeech-ctc.sh b/dart-api-examples/vad-with-non-streaming-asr/run-telespeech-ctc.sh new file mode 100755 index 000000000..2aa0c63a6 --- /dev/null +++ b/dart-api-examples/vad-with-non-streaming-asr/run-telespeech-ctc.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +set -ex + +dart pub get + +if [ ! -f ./sherpa-onnx-telespeech-ctc-int8-zh-2024-06-04/tokens.txt ]; then + curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-telespeech-ctc-int8-zh-2024-06-04.tar.bz2 + + tar xvf sherpa-onnx-telespeech-ctc-int8-zh-2024-06-04.tar.bz2 + rm sherpa-onnx-telespeech-ctc-int8-zh-2024-06-04.tar.bz2 +fi + +if [ ! -f ./lei-jun-test.wav ]; then + curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/lei-jun-test.wav +fi + +if [[ ! -f ./silero_vad.onnx ]]; then + curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx +fi + +dart run \ + ./bin/telespeech-ctc.dart \ + --silero-vad ./silero_vad.onnx \ + --model ./sherpa-onnx-telespeech-ctc-int8-zh-2024-06-04/model.int8.onnx \ + --tokens ./sherpa-onnx-telespeech-ctc-int8-zh-2024-06-04/tokens.txt \ + --input-wav ./lei-jun-test.wav diff --git a/dart-api-examples/vad-with-non-streaming-asr/run-whisper.sh b/dart-api-examples/vad-with-non-streaming-asr/run-whisper.sh new file mode 100755 index 000000000..c6eb73414 --- /dev/null +++ b/dart-api-examples/vad-with-non-streaming-asr/run-whisper.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +set -ex + +dart pub get + +if [ ! -f ./sherpa-onnx-whisper-tiny.en/tiny.en-tokens.txt ]; then + curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-whisper-tiny.en.tar.bz2 + + tar xvf sherpa-onnx-whisper-tiny.en.tar.bz2 + rm sherpa-onnx-whisper-tiny.en.tar.bz2 +fi + + + +if [ ! -f ./Obama.wav ]; then + curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/Obama.wav +fi + +if [[ ! -f ./silero_vad.onnx ]]; then + curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx +fi + +dart run \ + ./bin/whisper.dart \ + --silero-vad ./silero_vad.onnx \ + --encoder ./sherpa-onnx-whisper-tiny.en/tiny.en-encoder.int8.onnx \ + --decoder ./sherpa-onnx-whisper-tiny.en/tiny.en-decoder.int8.onnx \ + --tokens ./sherpa-onnx-whisper-tiny.en/tiny.en-tokens.txt \ + --input-wav ./Obama.wav diff --git a/dart-api-examples/vad-with-non-streaming-asr/run-zipformer-transducer.sh b/dart-api-examples/vad-with-non-streaming-asr/run-zipformer-transducer.sh new file mode 100755 index 000000000..9b8763274 --- /dev/null +++ b/dart-api-examples/vad-with-non-streaming-asr/run-zipformer-transducer.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +set -ex + +dart pub get + +if [ ! -f ./sherpa-onnx-zipformer-gigaspeech-2023-12-12/tokens.txt ]; then + curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-zipformer-gigaspeech-2023-12-12.tar.bz2 + + tar xvf sherpa-onnx-zipformer-gigaspeech-2023-12-12.tar.bz2 + rm sherpa-onnx-zipformer-gigaspeech-2023-12-12.tar.bz2 +fi + +if [ ! -f ./Obama.wav ]; then + curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/Obama.wav +fi + +if [[ ! -f ./silero_vad.onnx ]]; then + curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx +fi + +dart run \ + ./bin/zipformer-transducer.dart \ + --silero-vad ./silero_vad.onnx \ + --encoder ./sherpa-onnx-zipformer-gigaspeech-2023-12-12/encoder-epoch-30-avg-1.int8.onnx \ + --decoder ./sherpa-onnx-zipformer-gigaspeech-2023-12-12/decoder-epoch-30-avg-1.onnx \ + --joiner ./sherpa-onnx-zipformer-gigaspeech-2023-12-12/joiner-epoch-30-avg-1.int8.onnx \ + --tokens ./sherpa-onnx-zipformer-gigaspeech-2023-12-12/tokens.txt \ + --input-wav ./Obama.wav + diff --git a/scripts/dart/vad-non-streaming-asr-pubspec.yaml b/scripts/dart/vad-non-streaming-asr-pubspec.yaml new file mode 100644 index 000000000..7c6804deb --- /dev/null +++ b/scripts/dart/vad-non-streaming-asr-pubspec.yaml @@ -0,0 +1,19 @@ +name: vad_with_non_streaming_asr + +description: > + This example demonstrates how to use the Dart API for VAD (voice activity detection) + with non-streaming speech recognition. + +version: 1.0.0 + +environment: + sdk: ^3.4.0 + +dependencies: + sherpa_onnx: + path: ../../flutter/sherpa_onnx + path: ^1.9.0 + args: ^2.5.0 + +dev_dependencies: + lints: ^3.0.0