Skip to content

Commit

Permalink
Updated result from CoreML to be a struct and not a tuple
Browse files Browse the repository at this point in the history
  • Loading branch information
dokun1 committed Feb 14, 2018
1 parent 85d1651 commit b09b33a
Show file tree
Hide file tree
Showing 7 changed files with 31 additions and 21 deletions.
2 changes: 1 addition & 1 deletion Lumina/Lumina/Camera/LuminaCamera.swift
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ protocol LuminaCameraDelegate: class {
func stillImageCaptured(camera: LuminaCamera, image: UIImage, livePhotoURL: URL?, depthData: Any?)
func videoFrameCaptured(camera: LuminaCamera, frame: UIImage)
@available (iOS 11.0, *)
func videoFrameCaptured(camera: LuminaCamera, frame: UIImage, predictedObjects: [([LuminaPrediction]?, Any.Type)]?)
func videoFrameCaptured(camera: LuminaCamera, frame: UIImage, predictedObjects: [LuminaRecognitionResult]?)
func depthDataCaptured(camera: LuminaCamera, depthData: Any)
func videoRecordingCaptured(camera: LuminaCamera, videoURL: URL)
func finishedFocus(camera: LuminaCamera)
Expand Down
28 changes: 19 additions & 9 deletions Lumina/Lumina/Camera/LuminaObjectRecognizer.swift
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,16 @@ public struct LuminaPrediction {
public var name: String
/// The numeric value of the confidence of the prediction, out of 1.0
public var confidence: Float
/// The unique identifier associated with this prediction, as determined by the Vision framework
public var UUID: UUID
}

/// An object that represents a collection of predictions that Lumina detects, along with their associated types
public struct LuminaRecognitionResult {
/// The collection of predictions in a given result, as predicted by Lumina
public var predictions: [LuminaPrediction]?
/// The type of MLModel that made the predictions, best resolved as a String
public var type: Any.Type
}

@available(iOS 11.0, *)
Expand All @@ -27,8 +37,12 @@ final class LuminaObjectRecognizer: NSObject {
self.modelPairs = modelPairs
}

func recognize(from image: UIImage, completion: @escaping ([([LuminaPrediction]?, Any.Type)]) -> Void) {
var recognitionResults = [([LuminaPrediction]?, Any.Type)]()
func recognize(from image: UIImage, completion: @escaping ([LuminaRecognitionResult]?) -> Void) {
guard let coreImage = image.cgImage else {
completion(nil)
return
}
var recognitionResults = [LuminaRecognitionResult]()
let recognitionGroup = DispatchGroup()
for modelPair in modelPairs {
recognitionGroup.enter()
Expand All @@ -38,18 +52,14 @@ final class LuminaObjectRecognizer: NSObject {
}
let request = VNCoreMLRequest(model: visionModel) { request, error in
if error != nil || request.results == nil {
recognitionResults.append((nil, modelPair.1))
recognitionResults.append(LuminaRecognitionResult(predictions: nil, type: modelPair.1))
recognitionGroup.leave()
} else if let results = request.results {
let mappedResults = self.mapResults(results)
recognitionResults.append((mappedResults, modelPair.1))
recognitionResults.append(LuminaRecognitionResult(predictions: mappedResults, type: modelPair.1))
recognitionGroup.leave()
}
}
guard let coreImage = image.cgImage else {
recognitionGroup.leave()
continue
}
let handler = VNImageRequestHandler(cgImage: coreImage)
do {
try handler.perform([request])
Expand All @@ -67,7 +77,7 @@ final class LuminaObjectRecognizer: NSObject {
var results = [LuminaPrediction]()
for object in objects {
if let object = object as? VNClassificationObservation {
results.append(LuminaPrediction(name: object.identifier, confidence: object.confidence))
results.append(LuminaPrediction(name: object.identifier, confidence: object.confidence, UUID: object.uuid))
}
}
return results.sorted(by: {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,10 @@ import Foundation
import CoreML

extension LuminaViewController: LuminaCameraDelegate {
func videoFrameCaptured(camera: LuminaCamera, frame: UIImage, predictedObjects: [LuminaRecognitionResult]?) {
delegate?.streamed(videoFrame: frame, with: predictedObjects, from: self)
}

func videoRecordingCaptured(camera: LuminaCamera, videoURL: URL) {
delegate?.captured(videoAt: videoURL, from: self)
}
Expand All @@ -29,10 +33,6 @@ extension LuminaViewController: LuminaCameraDelegate {
delegate?.streamed(videoFrame: frame, from: self)
}

func videoFrameCaptured(camera: LuminaCamera, frame: UIImage, predictedObjects: [([LuminaPrediction]?, Any.Type)]?) {
delegate?.streamed(videoFrame: frame, with: predictedObjects, from: self)
}

func detected(camera: LuminaCamera, metadata: [Any]) {
delegate?.detected(metadata: metadata, from: self)
}
Expand Down
4 changes: 2 additions & 2 deletions Lumina/Lumina/UI/Extensions/Delegates/LuminaDelegate.swift
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,9 @@ public protocol LuminaDelegate: class {
/// - Warning: The other method for passing video frames back via a delegate will not be triggered in the presence of a CoreML model
/// - Parameters:
/// - videoFrame: the frame captured by Lumina
/// - predictions: an array of tuples, each containing the predictions made by a model used with Lumina, and its type, for matching against when parsing results.
/// - predictions: an array of objects typed LuminaRecognitionResult, containing the predictions made by a model used with Lumina, and its type, for matching against when parsing results.
/// - controller: the instance of Lumina that is streaming the frames
func streamed(videoFrame: UIImage, with predictions: [([LuminaPrediction]?, Any.Type)]?, from controller: LuminaViewController)
func streamed(videoFrame: UIImage, with predictions: [LuminaRecognitionResult]?, from controller: LuminaViewController)

/// Triggered whenever streamDepthData is set to true on Lumina, and streams depth data detected in the form of AVDepthData
///
Expand Down
2 changes: 1 addition & 1 deletion Lumina/Lumina/Util/Info.plist
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
<key>CFBundlePackageType</key>
<string>FMWK</string>
<key>CFBundleShortVersionString</key>
<string>1.0.1</string>
<string>1.1.0</string>
<key>CFBundleVersion</key>
<string>$(CURRENT_PROJECT_VERSION)</string>
<key>NSPrincipalClass</key>
Expand Down
2 changes: 1 addition & 1 deletion LuminaSample/LuminaSample/Info.plist
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleShortVersionString</key>
<string>1.0.1</string>
<string>1.1.0</string>
<key>CFBundleVersion</key>
<string>1</string>
<key>LSRequiresIPhoneOS</key>
Expand Down
6 changes: 3 additions & 3 deletions LuminaSample/LuminaSample/ViewController.swift
Original file line number Diff line number Diff line change
Expand Up @@ -98,20 +98,20 @@ extension ViewController { //MARK: IBActions
}

extension ViewController: LuminaDelegate {
func streamed(videoFrame: UIImage, with predictions: [([LuminaPrediction]?, Any.Type)]?, from controller: LuminaViewController) {
func streamed(videoFrame: UIImage, with predictions: [LuminaRecognitionResult]?, from controller: LuminaViewController) {
if #available(iOS 11.0, *) {
guard let predicted = predictions else {
return
}
var resultString = String()
for prediction in predicted {
guard let values = prediction.0 else {
guard let values = prediction.predictions else {
continue
}
guard let bestPrediction = values.first else {
continue
}
resultString.append("\(String(describing: prediction.1)): \(bestPrediction.name)" + "\r\n")
resultString.append("\(String(describing: prediction.type)): \(bestPrediction.name)" + "\r\n")
}
controller.textPrompt = resultString
} else {
Expand Down

0 comments on commit b09b33a

Please sign in to comment.