5
5
// Created by knight on 2023/4/5.
6
6
//
7
7
8
- import Foundation
9
8
import AVFoundation
9
+ import Foundation
10
10
11
11
enum Status {
12
12
case stop
@@ -22,6 +22,7 @@ class SherpaOnnxViewModel: ObservableObject {
22
22
23
23
var audioEngine : AVAudioEngine ? = nil
24
24
var recognizer : SherpaOnnxRecognizer ! = nil
25
+ private var audioSession : AVAudioSession !
25
26
26
27
var lastSentence : String = " "
27
28
let maxSentence : Int = 20
@@ -36,20 +37,37 @@ class SherpaOnnxViewModel: ObservableObject {
36
37
37
38
let start = max ( sentences. count - maxSentence, 0 )
38
39
if lastSentence. isEmpty {
39
- return sentences. enumerated ( ) . map { ( index, s) in " \( index) : \( s. lowercased ( ) ) " } [ start... ]
40
- . joined ( separator: " \n " )
40
+ return sentences. enumerated ( ) . map { ( index, s) in
41
+ " \( index) : \( s. lowercased ( ) ) "
42
+ } [ start... ]
43
+ . joined ( separator: " \n " )
41
44
} else {
42
- return sentences. enumerated ( ) . map { ( index, s) in " \( index) : \( s. lowercased ( ) ) " } [ start... ]
43
- . joined ( separator: " \n " ) + " \n \( sentences. count) : \( lastSentence. lowercased ( ) ) "
45
+ return sentences. enumerated ( ) . map { ( index, s) in
46
+ " \( index) : \( s. lowercased ( ) ) "
47
+ } [ start... ]
48
+ . joined ( separator: " \n " )
49
+ + " \n \( sentences. count) : \( lastSentence. lowercased ( ) ) "
44
50
}
45
51
}
46
52
47
53
func updateLabel( ) {
48
54
self . subtitles = self . results
49
55
}
50
56
57
+ func setupAudioSession( ) {
58
+ audioSession = AVAudioSession . sharedInstance ( )
59
+ do {
60
+ try audioSession. setCategory (
61
+ . playAndRecord, mode: . default, options: [ . defaultToSpeaker] )
62
+ try audioSession. setActive ( true )
63
+ } catch {
64
+ print ( " Failed to set up audio session: \( error) " )
65
+ }
66
+ }
67
+
51
68
init ( ) {
52
69
initRecognizer ( )
70
+ setupAudioSession ( )
53
71
initRecorder ( )
54
72
}
55
73
@@ -116,8 +134,8 @@ class SherpaOnnxViewModel: ObservableObject {
116
134
pcmFormat: outputFormat,
117
135
frameCapacity:
118
136
AVAudioFrameCount ( outputFormat. sampleRate)
119
- * buffer. frameLength
120
- / AVAudioFrameCount( buffer. format. sampleRate) ) !
137
+ * buffer. frameLength
138
+ / AVAudioFrameCount( buffer. format. sampleRate) ) !
121
139
122
140
var error : NSError ?
123
141
let _ = converter. convert (
@@ -129,7 +147,7 @@ class SherpaOnnxViewModel: ObservableObject {
129
147
let array = convertedBuffer. array ( )
130
148
if !array. isEmpty {
131
149
self . recognizer. acceptWaveform ( samples: array)
132
- while ( self . recognizer. isReady ( ) ) {
150
+ while self . recognizer. isReady ( ) {
133
151
self . recognizer. decode ( )
134
152
}
135
153
let isEndpoint = self . recognizer. isEndpoint ( )
@@ -141,7 +159,7 @@ class SherpaOnnxViewModel: ObservableObject {
141
159
print ( text)
142
160
}
143
161
144
- if isEndpoint{
162
+ if isEndpoint {
145
163
if !text. isEmpty {
146
164
let tmp = self . lastSentence
147
165
self . lastSentence = " "
@@ -170,7 +188,8 @@ class SherpaOnnxViewModel: ObservableObject {
170
188
do {
171
189
try self . audioEngine? . start ( )
172
190
} catch let error as NSError {
173
- print ( " Got an error starting audioEngine: \( error. domain) , \( error) " )
191
+ print (
192
+ " Got an error starting audioEngine: \( error. domain) , \( error) " )
174
193
}
175
194
print ( " started " )
176
195
}
0 commit comments