|
| 1 | +// Copyright (c) 2023-2024 Xiaomi Corporation (authors: Fangjun Kuang) |
| 2 | + |
| 3 | +const portAudio = require('naudiodon2'); |
| 4 | +// console.log(portAudio.getDevices()); |
| 5 | + |
| 6 | +const sherpa_onnx = require('sherpa-onnx-node'); |
| 7 | + |
| 8 | +function createVad() { |
| 9 | + // please download silero_vad.onnx from |
| 10 | + // https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx |
| 11 | + const config = { |
| 12 | + sileroVad: { |
| 13 | + model: './silero_vad.onnx', |
| 14 | + threshold: 0.5, |
| 15 | + minSpeechDuration: 0.25, |
| 16 | + minSilenceDuration: 0.5, |
| 17 | + windowSize: 512, |
| 18 | + }, |
| 19 | + sampleRate: 16000, |
| 20 | + debug: true, |
| 21 | + numThreads: 1, |
| 22 | + }; |
| 23 | + |
| 24 | + const bufferSizeInSeconds = 60; |
| 25 | + |
| 26 | + return new sherpa_onnx.Vad(config, bufferSizeInSeconds); |
| 27 | +} |
| 28 | + |
| 29 | +vad = createVad(); |
| 30 | + |
| 31 | +const bufferSizeInSeconds = 30; |
| 32 | +const buffer = |
| 33 | + new sherpa_onnx.CircularBuffer(bufferSizeInSeconds * vad.config.sampleRate); |
| 34 | + |
| 35 | + |
| 36 | +const ai = new portAudio.AudioIO({ |
| 37 | + inOptions: { |
| 38 | + channelCount: 1, |
| 39 | + closeOnError: true, // Close the stream if an audio error is detected, if |
| 40 | + // set false then just log the error |
| 41 | + deviceId: -1, // Use -1 or omit the deviceId to select the default device |
| 42 | + sampleFormat: portAudio.SampleFormatFloat32, |
| 43 | + sampleRate: vad.config.sampleRate, |
| 44 | + } |
| 45 | +}); |
| 46 | + |
| 47 | +let printed = false; |
| 48 | +let index = 0; |
| 49 | +ai.on('data', data => { |
| 50 | + const windowSize = vad.config.sileroVad.windowSize; |
| 51 | + buffer.push(new Float32Array(data.buffer)); |
| 52 | + while (buffer.size() > windowSize) { |
| 53 | + const samples = buffer.get(buffer.head(), windowSize); |
| 54 | + buffer.pop(windowSize); |
| 55 | + vad.acceptWaveform(samples) |
| 56 | + if (vad.isDetected() && !printed) { |
| 57 | + console.log(`${index}: Detected speech`) |
| 58 | + printed = true; |
| 59 | + } |
| 60 | + |
| 61 | + if (!vad.isDetected()) { |
| 62 | + printed = false; |
| 63 | + } |
| 64 | + |
| 65 | + while (!vad.isEmpty()) { |
| 66 | + const segment = vad.front(); |
| 67 | + vad.pop(); |
| 68 | + const filename = `${index}-${ |
| 69 | + new Date() |
| 70 | + .toLocaleTimeString('en-US', {hour12: false}) |
| 71 | + .split(' ')[0]}.wav`; |
| 72 | + sherpa_onnx.writeWave( |
| 73 | + filename, |
| 74 | + {samples: segment.samples, sampleRate: vad.config.sampleRate}) |
| 75 | + const duration = segment.samples.length / vad.config.sampleRate; |
| 76 | + console.log(`${index} End of speech. Duration: ${duration} seconds`); |
| 77 | + console.log(`Saved to ${filename}`); |
| 78 | + index += 1; |
| 79 | + } |
| 80 | + } |
| 81 | +}); |
| 82 | + |
| 83 | +ai.on('close', () => { |
| 84 | + console.log('Free resources'); |
| 85 | +}); |
| 86 | + |
| 87 | +ai.start(); |
| 88 | +console.log('Started! Please speak') |
0 commit comments