Skip to content

Commit

Permalink
feat(All): Add support for Swift 4.2
Browse files Browse the repository at this point in the history
  • Loading branch information
Anthony Oliveri committed Nov 12, 2018
1 parent fa212ec commit 4bbf42b
Show file tree
Hide file tree
Showing 6 changed files with 109 additions and 78 deletions.
4 changes: 4 additions & 0 deletions Source/SpeechToTextV1/SpeechToText+Recognize.swift
Expand Up @@ -177,7 +177,11 @@ extension SpeechToText {
// make sure the AVAudioSession shared instance is properly configured
do {
let audioSession = AVAudioSession.sharedInstance()
#if swift(>=4.2)
try audioSession.setCategory(AVAudioSession.Category.playAndRecord, mode: .default, options: [.defaultToSpeaker, .mixWithOthers])
#else
try audioSession.setCategory(AVAudioSessionCategoryPlayAndRecord, with: [.defaultToSpeaker, .mixWithOthers])
#endif
try audioSession.setActive(true)
} catch {
let failureReason = "Failed to setup the AVAudioSession sharedInstance properly."
Expand Down
4 changes: 2 additions & 2 deletions Source/SpeechToTextV1/WebSockets/SpeechToTextEncoder.swift
Expand Up @@ -123,7 +123,7 @@ internal class SpeechToTextEncoder {
}

// deallocate header data buffer
packetData.deallocate(capacity: headerData.count)
packetData.deallocate()

// assemble pages and add to ogg cache
assemblePages(flush: true)
Expand Down Expand Up @@ -153,7 +153,7 @@ internal class SpeechToTextEncoder {
}

// deallocate header data buffer
packetData.deallocate(capacity: headerData.count)
packetData.deallocate()

// assemble pages and add to ogg cache
assemblePages(flush: true)
Expand Down
4 changes: 4 additions & 0 deletions Source/SpeechToTextV1/WebSockets/SpeechToTextRecorder.swift
Expand Up @@ -127,7 +127,11 @@ internal class SpeechToTextRecorder {
userInfo: nil,
repeats: true
)
#if swift(>=4.2)
RunLoop.current.add(powerTimer!, forMode: RunLoop.Mode.common)
#else
RunLoop.current.add(powerTimer!, forMode: RunLoopMode.commonModes)
#endif
}

internal func startRecording() throws {
Expand Down
6 changes: 3 additions & 3 deletions Source/TextToSpeechV1/TextToSpeechDecoder.swift
Expand Up @@ -86,7 +86,7 @@ internal class TextToSpeechDecoder {
ogg_sync_wrote(&syncState, bufferSize)

// attempt to get a page from the data that we wrote
while (ogg_sync_pageout(&syncState, &page) == 1) {
while ogg_sync_pageout(&syncState, &page) == 1 {
if beginStream {
// assign stream's number with the page.
ogg_stream_init(&streamState, ogg_page_serialno(&page))
Expand Down Expand Up @@ -176,7 +176,7 @@ internal class TextToSpeechDecoder {

// deallocate pcmDataBuffer when the function ends, regardless if the function ended normally or with an error.
defer {
pcmDataBuffer.deallocate(capacity: MemoryLayout<Float>.stride * Int(MAX_FRAME_SIZE) * Int(numChannels))
pcmDataBuffer.deallocate()
}
} else if packetCount == 1 {
hasTagsPacket = true
Expand All @@ -196,7 +196,7 @@ internal class TextToSpeechDecoder {
numberOfSamplesDecoded = opus_multistream_decode_float(decoder, packet.packet, Int32(packet.bytes), pcmDataBuffer, MAX_FRAME_SIZE, 0)

if numberOfSamplesDecoded < 0 {
NSLog("Decoding error: \(opus_strerror(numberOfSamplesDecoded))")
NSLog("Decoding error: \(String(describing: opus_strerror(numberOfSamplesDecoded)))")
throw OpusError.internalError
}

Expand Down
14 changes: 14 additions & 0 deletions Source/VisualRecognitionV3/VisualRecognition+UIImage.swift
Expand Up @@ -154,13 +154,23 @@ extension VisualRecognition {
success: @escaping (ClassifiedImages) -> Void)
{
// convert UIImage to Data
#if swift(>=4.2)
guard let imageData = image.pngData() else {
let description = "Failed to convert image from UIImage to Data."
let userInfo = [NSLocalizedDescriptionKey: description]
let error = NSError(domain: self.domain, code: 0, userInfo: userInfo)
failure?(error)
return
}
#else
guard let imageData = UIImagePNGRepresentation(image) else {
let description = "Failed to convert image from UIImage to Data."
let userInfo = [NSLocalizedDescriptionKey: description]
let error = NSError(domain: self.domain, code: 0, userInfo: userInfo)
failure?(error)
return
}
#endif

self.classifyWithLocalModel(imageData: imageData, classifierIDs: classifierIDs, threshold: threshold,
failure: failure, success: success)
Expand All @@ -174,7 +184,11 @@ extension VisualRecognition {
let filename = UUID().uuidString + ".jpg"
let directory = NSURL(fileURLWithPath: NSTemporaryDirectory(), isDirectory: true)
guard let file = directory.appendingPathComponent(filename) else { throw RestError.encodingError }
#if swift(>=4.2)
guard let data = image.jpegData(compressionQuality: 0.75) else { throw RestError.encodingError }
#else
guard let data = UIImageJPEGRepresentation(image, 0.75) else { throw RestError.encodingError }
#endif
try data.write(to: file)
return file
}
Expand Down

0 comments on commit 4bbf42b

Please sign in to comment.