-
Notifications
You must be signed in to change notification settings - Fork 5
/
CameraVideoCapturer.swift
118 lines (90 loc) · 4.05 KB
/
CameraVideoCapturer.swift
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import Foundation
import WebRTC
import AVFoundation
import GPUImage
class CameraVideoCapturer: RTCVideoCapturer {
let kNanosecondsPerSecond = 1000000000
var camera:Camera?
var filter: BasicOperation?
var capturing = false;
fileprivate var gpuImageConsumer:GPUImageConsumer!
override init() {
super.init()
self.gpuImageConsumer = GPUImageConsumer(capturer: self)
}
func applyFilter(_ filter: BasicOperation?) {
self.filter = filter
if let cam = self.camera, capturing {
cam.removeAllTargets()
self.gpuImageConsumer.removeSourceAtIndex(0)
if let fil = self.filter {
cam --> fil --> self.gpuImageConsumer
} else {
cam --> self.gpuImageConsumer
}
}
}
func startCapture() {
if (self.camera == nil) {
do {
let camera = try Camera(sessionPreset:.vga640x480, orientation: .portrait, captureAsYUV: false)
self.camera = camera
} catch {
fatalError("Could not initialize rendering pipeline: \(error)")
}
}
self.capturing = true
applyFilter(self.filter)
self.camera?.startCapture()
}
func stopCapture() {
camera?.stopCapture()
self.capturing = false
}
func captureOutput(_ pixelBuffer: CVPixelBuffer, time: CMTime) {
let rtcPixelBuffer = RTCCVPixelBuffer(pixelBuffer: pixelBuffer)
let videoFrame = RTCVideoFrame(buffer: rtcPixelBuffer,
rotation: ._90,
timeStampNs: Int64(CMTimeGetSeconds(time) * Float64(kNanosecondsPerSecond)))
self.delegate?.capturer(self, didCapture: videoFrame)
}
}
fileprivate class GPUImageConsumer : ImageConsumer {
public let sources = SourceContainer()
public let maximumInputs:UInt = 1
var capturer: CameraVideoCapturer;
private var previousFrameTime = CMTime.negativeInfinity
public init(capturer: CameraVideoCapturer) {
self.capturer = capturer
}
public func newTextureAvailable(_ texture:Texture, fromSourceIndex:UInt) {
// Ignore still images and other non-video updates (do I still need this?)
guard let frameTime = texture.timingStyle.timestamp?.asCMTime else { return }
// If two consecutive times with the same value are added to the movie, it aborts recording, so I bail on that case
guard (frameTime != previousFrameTime) else {
return
}
var pixelBufferFromPool:CVPixelBuffer? = nil
let pixelBufferStatus = CVPixelBufferCreate(kCFAllocatorDefault, texture.texture.width, texture.texture.height, kCVPixelFormatType_32BGRA, nil, &pixelBufferFromPool);
guard let pixelBuffer = pixelBufferFromPool, (pixelBufferStatus == kCVReturnSuccess) else {
return
}
CVPixelBufferLockBaseAddress(pixelBuffer, [])
renderIntoPixelBuffer(pixelBuffer, texture:texture)
capturer.captureOutput(pixelBuffer, time: frameTime)
CVPixelBufferUnlockBaseAddress(pixelBuffer, [])
}
func renderIntoPixelBuffer(_ pixelBuffer:CVPixelBuffer, texture:Texture) {
guard let pixelBufferBytes = CVPixelBufferGetBaseAddress(pixelBuffer) else {
print("Could not get buffer bytes")
return
}
let mtlTexture = texture.texture;
guard let commandBuffer = sharedMetalRenderingDevice.commandQueue.makeCommandBuffer() else { fatalError("Could not create command buffer on image rendering.")}
commandBuffer.commit()
commandBuffer.waitUntilCompleted()
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let region = MTLRegionMake2D(0, 0, mtlTexture.width, mtlTexture.height)
mtlTexture.getBytes(pixelBufferBytes, bytesPerRow: bytesPerRow, from: region, mipmapLevel: 0)
}
}