Skip to content

Commit

Permalink
handled the errors happened during startRecording
Browse files Browse the repository at this point in the history
by passing it to resultSubject
  • Loading branch information
Cay-Zhang committed Aug 26, 2020
1 parent 4c1f01f commit ead9075
Show file tree
Hide file tree
Showing 4 changed files with 57 additions and 52 deletions.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,8 @@ In `handleResult`, the first closure parameter is a `SwiftSpeech.Session`, which

The second is a [`SFSpeechRecognitionResult`](https://developer.apple.com/documentation/speech/sfspeechrecognitionresult), which contains rich information about the recognition. Not only the recognized text (`result.bestTranscription.formattedString`), but also interesting stuff like **speaking rate** and **pitch**!

In `handleError`, you will handle the errors produced in the recognition process and also during the initialization of the recording session (such as a microphone activation failure).

```swift
// 2
.onStartRecording(appendAction: (SwiftSpeech.Session) -> Void)
Expand Down
4 changes: 2 additions & 2 deletions Sources/SwiftSpeech/Session.swift
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,9 @@ extension SwiftSpeech {
2. An AVAudioSession error occurred
3. The driver failed to start the hardware
*/
public func startRecording() throws {
public func startRecording() {
guard let recognizer = SpeechRecognizer.recognizer(withID: id) else { return }
try recognizer.startRecording()
recognizer.startRecording()
}

public func stopRecording() {
Expand Down
99 changes: 51 additions & 48 deletions Sources/SwiftSpeech/SpeechRecognizer.swift
Original file line number Diff line number Diff line change
Expand Up @@ -42,60 +42,63 @@ public class SpeechRecognizer {
.eraseToAnyPublisher()
}

public func startRecording() throws {

// Cancel the previous task if it's running.
recognitionTask?.cancel()
self.recognitionTask = nil

// Configure the audio session for the app if it's on iOS/Mac Catalyst.
#if canImport(UIKit)
let audioSession = AVAudioSession.sharedInstance()
try audioSession.setCategory(.record, mode: .measurement, options: .duckOthers)
try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
#endif

let inputNode = audioEngine.inputNode
public func startRecording() {
do {
// Cancel the previous task if it's running.
recognitionTask?.cancel()
self.recognitionTask = nil
// Configure the audio session for the app if it's on iOS/Mac Catalyst.
#if canImport(UIKit)
let audioSession = AVAudioSession.sharedInstance()
try audioSession.setCategory(.record, mode: .measurement, options: .duckOthers)
try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
#endif
let inputNode = audioEngine.inputNode

// Create and configure the speech recognition request.
recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
guard let recognitionRequest = recognitionRequest else { fatalError("Unable to create a SFSpeechAudioBufferRecognitionRequest object") }

// Use `sessionConfiguration` to configure the recognition request
recognitionRequest.shouldReportPartialResults = sessionConfiguration.shouldReportPartialResults
recognitionRequest.requiresOnDeviceRecognition = sessionConfiguration.requiresOnDeviceRecognition
recognitionRequest.taskHint = sessionConfiguration.taskHint
recognitionRequest.contextualStrings = sessionConfiguration.contextualStrings
recognitionRequest.interactionIdentifier = sessionConfiguration.interactionIdentifier

// Create a recognition task for the speech recognition session.
// Keep a reference to the task so that it can be cancelled.
recognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest) { [weak self] result, error in
guard let self = self else { return }
if let result = result {
self.resultSubject.send(result)
if result.isFinal {
self.resultSubject.send(completion: .finished)
// Create and configure the speech recognition request.
recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
guard let recognitionRequest = recognitionRequest else { fatalError("Unable to create a SFSpeechAudioBufferRecognitionRequest object") }

// Use `sessionConfiguration` to configure the recognition request
recognitionRequest.shouldReportPartialResults = sessionConfiguration.shouldReportPartialResults
recognitionRequest.requiresOnDeviceRecognition = sessionConfiguration.requiresOnDeviceRecognition
recognitionRequest.taskHint = sessionConfiguration.taskHint
recognitionRequest.contextualStrings = sessionConfiguration.contextualStrings
recognitionRequest.interactionIdentifier = sessionConfiguration.interactionIdentifier

// Create a recognition task for the speech recognition session.
// Keep a reference to the task so that it can be cancelled.
recognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest) { [weak self] result, error in
guard let self = self else { return }
if let result = result {
self.resultSubject.send(result)
if result.isFinal {
self.resultSubject.send(completion: .finished)
SpeechRecognizer.remove(id: self.id)
}
} else if let error = error {
self.stopRecording()
self.resultSubject.send(completion: .failure(error))
SpeechRecognizer.remove(id: self.id)
} else {
fatalError("No result and no error")
}
} else if let error = error {
self.stopRecording()
self.resultSubject.send(completion: .failure(error))
SpeechRecognizer.remove(id: self.id)
} else {
fatalError("No result and no error")
}
}

// Configure the microphone input.
let recordingFormat = inputNode.outputFormat(forBus: 0)
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer: AVAudioPCMBuffer, when: AVAudioTime) in
self.recognitionRequest?.append(buffer)
// Configure the microphone input.
let recordingFormat = inputNode.outputFormat(forBus: 0)
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer: AVAudioPCMBuffer, when: AVAudioTime) in
self.recognitionRequest?.append(buffer)
}

audioEngine.prepare()
try audioEngine.start()
} catch {
resultSubject.send(completion: .failure(error))
SpeechRecognizer.remove(id: self.id)
}

audioEngine.prepare()
try audioEngine.start()

}

public func stopRecording() {
Expand Down
4 changes: 2 additions & 2 deletions Sources/SwiftSpeech/ViewModifiers.swift
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,8 @@ public extension SwiftSpeech.ViewModifiers {
// View update
self.viewComponentState = .recording
self.recordingSession = session
try! session.startRecording()
delegate.onStartRecording(session: session)
session.startRecording()
}

fileprivate func cancelRecording() {
Expand Down Expand Up @@ -173,8 +173,8 @@ public extension SwiftSpeech.ViewModifiers {
// View update
self.viewComponentState = .recording
self.recordingSession = session
try! session.startRecording()
delegate.onStartRecording(session: session)
session.startRecording()
}

fileprivate func endRecording() {
Expand Down

0 comments on commit ead9075

Please sign in to comment.