Skip to content

Commit 1d950a8

Browse files
authored
Initialize the audio session for iOS ASR example (#1786)
Fixes #1784
1 parent 59ff854 commit 1d950a8

File tree

1 file changed

+29
-10
lines changed

1 file changed

+29
-10
lines changed

ios-swiftui/SherpaOnnx/SherpaOnnx/SherpaOnnxViewModel.swift

+29-10
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,8 @@
55
// Created by knight on 2023/4/5.
66
//
77

8-
import Foundation
98
import AVFoundation
9+
import Foundation
1010

1111
enum Status {
1212
case stop
@@ -22,6 +22,7 @@ class SherpaOnnxViewModel: ObservableObject {
2222

2323
var audioEngine: AVAudioEngine? = nil
2424
var recognizer: SherpaOnnxRecognizer! = nil
25+
private var audioSession: AVAudioSession!
2526

2627
var lastSentence: String = ""
2728
let maxSentence: Int = 20
@@ -36,20 +37,37 @@ class SherpaOnnxViewModel: ObservableObject {
3637

3738
let start = max(sentences.count - maxSentence, 0)
3839
if lastSentence.isEmpty {
39-
return sentences.enumerated().map { (index, s) in "\(index): \(s.lowercased())" }[start...]
40-
.joined(separator: "\n")
40+
return sentences.enumerated().map { (index, s) in
41+
"\(index): \(s.lowercased())"
42+
}[start...]
43+
.joined(separator: "\n")
4144
} else {
42-
return sentences.enumerated().map { (index, s) in "\(index): \(s.lowercased())" }[start...]
43-
.joined(separator: "\n") + "\n\(sentences.count): \(lastSentence.lowercased())"
45+
return sentences.enumerated().map { (index, s) in
46+
"\(index): \(s.lowercased())"
47+
}[start...]
48+
.joined(separator: "\n")
49+
+ "\n\(sentences.count): \(lastSentence.lowercased())"
4450
}
4551
}
4652

4753
func updateLabel() {
4854
self.subtitles = self.results
4955
}
5056

57+
func setupAudioSession() {
58+
audioSession = AVAudioSession.sharedInstance()
59+
do {
60+
try audioSession.setCategory(
61+
.playAndRecord, mode: .default, options: [.defaultToSpeaker])
62+
try audioSession.setActive(true)
63+
} catch {
64+
print("Failed to set up audio session: \(error)")
65+
}
66+
}
67+
5168
init() {
5269
initRecognizer()
70+
setupAudioSession()
5371
initRecorder()
5472
}
5573

@@ -116,8 +134,8 @@ class SherpaOnnxViewModel: ObservableObject {
116134
pcmFormat: outputFormat,
117135
frameCapacity:
118136
AVAudioFrameCount(outputFormat.sampleRate)
119-
* buffer.frameLength
120-
/ AVAudioFrameCount(buffer.format.sampleRate))!
137+
* buffer.frameLength
138+
/ AVAudioFrameCount(buffer.format.sampleRate))!
121139

122140
var error: NSError?
123141
let _ = converter.convert(
@@ -129,7 +147,7 @@ class SherpaOnnxViewModel: ObservableObject {
129147
let array = convertedBuffer.array()
130148
if !array.isEmpty {
131149
self.recognizer.acceptWaveform(samples: array)
132-
while (self.recognizer.isReady()){
150+
while self.recognizer.isReady() {
133151
self.recognizer.decode()
134152
}
135153
let isEndpoint = self.recognizer.isEndpoint()
@@ -141,7 +159,7 @@ class SherpaOnnxViewModel: ObservableObject {
141159
print(text)
142160
}
143161

144-
if isEndpoint{
162+
if isEndpoint {
145163
if !text.isEmpty {
146164
let tmp = self.lastSentence
147165
self.lastSentence = ""
@@ -170,7 +188,8 @@ class SherpaOnnxViewModel: ObservableObject {
170188
do {
171189
try self.audioEngine?.start()
172190
} catch let error as NSError {
173-
print("Got an error starting audioEngine: \(error.domain), \(error)")
191+
print(
192+
"Got an error starting audioEngine: \(error.domain), \(error)")
174193
}
175194
print("started")
176195
}

0 commit comments

Comments
 (0)