AutoCat/AutoCatCore/Services/AudioRecordService/AudioRecordService.swift
2025-03-16 22:23:03 +03:00

116 lines
3.2 KiB
Swift

//
// AudioRecordService.swift
// AutoCatCore
//
// Created by Selim Mustafaev on 11.03.2025.
// Copyright © 2025 Selim Mustafaev. All rights reserved.
//
import AVFoundation
import Speech
public actor AudioRecordService {
let audioFileSettings: [String : Any] = [
AVFormatIDKey: kAudioFormatMPEG4AAC,
AVSampleRateKey: 44100,
AVNumberOfChannelsKey: 2,
]
var recorder: AVAudioRecorder?
public init() {
}
}
extension AudioRecordService: AudioRecordServiceProtocol {
public func requestRecordPermissions() async -> Bool {
await AVAudioApplication.requestRecordPermission()
}
@discardableResult
public func requestRecognitionAuthorization() async -> SFSpeechRecognizerAuthorizationStatus {
let status = SFSpeechRecognizer.authorizationStatus()
guard status == .notDetermined else {
return status
}
return await withCheckedContinuation { continuation in
SFSpeechRecognizer.requestAuthorization { status in
continuation.resume(returning: status)
}
}
}
public func startRecording(to url: URL) async throws {
guard AVAudioApplication.shared.recordPermission != .denied else {
throw AudioRecordError.permissionDenied
}
switch AVAudioApplication.shared.recordPermission {
case .denied:
throw AudioRecordError.permissionDenied
case .undetermined:
if await AVAudioApplication.requestRecordPermission() {
break
} else {
return
}
case .granted:
break
@unknown default:
throw AudioRecordError.unknown
}
try AVAudioSession.sharedInstance().setCategory(.playAndRecord)
try AVAudioSession.sharedInstance().setActive(true)
recorder = try AVAudioRecorder(url: url, settings: audioFileSettings)
recorder?.record()
}
public func stopRecording() async {
if recorder?.isRecording == true {
recorder?.stop()
}
self.recorder = nil
}
public func cancelRecording() async {
if recorder?.isRecording == true {
recorder?.stop()
recorder?.deleteRecording()
}
self.recorder = nil
}
public func recognizeText(from url: URL) async -> String? {
guard let recognizer = SFSpeechRecognizer(locale: Locale(identifier: "ru-RU")), recognizer.isAvailable else {
return nil
}
let request = SFSpeechURLRecognitionRequest(url: url)
request.shouldReportPartialResults = false
return await withCheckedContinuation { continuation in
recognizer.recognitionTask(with: request) { result, error in
continuation.resume(returning: result?.bestTranscription.formattedString)
}
}
}
public func getDuration(from url: URL) async throws -> TimeInterval {
let asset = AVURLAsset(url: url)
let duration = try await asset.load(.duration)
return CMTimeGetSeconds(duration)
}
}