Permalink
Browse files

open-sourced the ISF editor, misc project updates

  • Loading branch information...
1 parent 4aa1ffc commit 6e8ec574d12a0be814e8e1449cd7a0d13c762f11 @mrRay committed Aug 18, 2016
Showing 327 changed files with 60,392 additions and 42 deletions.
@@ -9,7 +9,7 @@
<key>CFBundleIconFile</key>
<string></string>
<key>CFBundleIdentifier</key>
- <string>com.Vidvox.${PRODUCT_NAME:rfc1034identifier}</string>
+ <string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
@@ -7,7 +7,7 @@
<key>CFBundleExecutable</key>
<string>${EXECUTABLE_NAME}</string>
<key>CFBundleIdentifier</key>
- <string>com.yourcompany.${PRODUCT_NAME:identifier}</string>
+ <string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundlePackageType</key>
@@ -9,7 +9,7 @@
<key>CFBundleIconFile</key>
<string></string>
<key>CFBundleIdentifier</key>
- <string>com.Vidvox.${PRODUCT_NAME:rfc1034identifier}</string>
+ <string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
@@ -0,0 +1,22 @@
+#import <Foundation/Foundation.h>
+#import "VideoSource.h"
+#import <AVFoundation/AVFoundation.h>
+
+
+
+
+@interface AVCaptureVideoSource : VideoSource <AVCaptureVideoDataOutputSampleBufferDelegate> {
+ AVCaptureDeviceInput *propDeviceInput;
+ AVCaptureSession *propSession;
+ AVCaptureVideoDataOutput *propOutput;
+ dispatch_queue_t propQueue;
+ CVOpenGLTextureCacheRef propTextureCache;
+ VVBuffer *propLastBuffer;
+
+ //OSSpinLock lastBufferLock;
+ //VVBuffer *lastBuffer;
+}
+
+- (void) loadDeviceWithUniqueID:(NSString *)n;
+
+@end
@@ -0,0 +1,199 @@
+#import "AVCaptureVideoSource.h"
+
+
+
+
+@implementation AVCaptureVideoSource
+
+
+/*===================================================================================*/
+#pragma mark --------------------- init/dealloc
+/*------------------------------------*/
+
+
+- (id) init {
+ //NSLog(@"%s",__func__);
+ if (self = [super init]) {
+ propDeviceInput = nil;
+ propSession = nil;
+ propOutput = nil;
+ propQueue = NULL;
+ CVReturn err = kCVReturnSuccess;
+ //NSLog(@"\t\tshared context used for tex cache is %@",[_globalVVBufferPool sharedContext]);
+ err = CVOpenGLTextureCacheCreate(NULL,NULL,[[_globalVVBufferPool sharedContext] CGLContextObj],[[GLScene defaultPixelFormat] CGLPixelFormatObj],NULL,&propTextureCache);
+ if (err != kCVReturnSuccess) {
+ NSLog(@"\t\terr %d at CVOpenGLTextureCacheCreate, %s",err,__func__);
+ }
+ propLastBuffer = nil;
+ return self;
+ }
+ [self release];
+ return nil;
+}
+- (void) prepareToBeDeleted {
+ [super prepareToBeDeleted];
+}
+- (void) dealloc {
+ //NSLog(@"%s",__func__);
+ if (!deleted)
+ [self prepareToBeDeleted];
+
+ OSSpinLockLock(&propLock);
+ CVOpenGLTextureCacheRelease(propTextureCache);
+ VVRELEASE(propLastBuffer);
+ OSSpinLockUnlock(&propLock);
+
+ [super dealloc];
+}
+
+
+/*===================================================================================*/
+#pragma mark --------------------- superclass overrides
+/*------------------------------------*/
+
+
+- (NSArray *) arrayOfSourceMenuItems {
+ NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
+ if (devices==nil || [devices count]<1)
+ return nil;
+ NSMutableArray *returnMe = MUTARRAY;
+ for (AVCaptureDevice *devicePtr in devices) {
+ NSMenuItem *newItem = [[NSMenuItem alloc] initWithTitle:[devicePtr localizedName] action:nil keyEquivalent:@""];
+ NSString *uniqueID = [devicePtr uniqueID];
+ [newItem setRepresentedObject:uniqueID];
+ [returnMe addObject:newItem];
+ [newItem release];
+ }
+ return returnMe;
+}
+- (void) _stop {
+ //NSLog(@"%s",__func__);
+ if (propSession != nil) {
+ [propSession stopRunning];
+ if (propDeviceInput != nil)
+ [propSession removeInput:propDeviceInput];
+ if (propOutput != nil)
+ [propSession removeOutput:propOutput];
+
+ dispatch_release(propQueue);
+ propQueue = NULL;
+
+ [propDeviceInput release];
+ propDeviceInput = nil;
+ [propOutput release];
+ propOutput = nil;
+ [propSession release];
+ propSession = nil;
+ }
+ VVRELEASE(propLastBuffer);
+}
+- (VVBuffer *) allocBuffer {
+ VVBuffer *returnMe = nil;
+ OSSpinLockLock(&propLock);
+ returnMe = (propLastBuffer==nil) ? nil : [propLastBuffer retain];
+ OSSpinLockUnlock(&propLock);
+ return returnMe;
+}
+
+
+/*===================================================================================*/
+#pragma mark --------------------- misc
+/*------------------------------------*/
+
+
+- (void) loadDeviceWithUniqueID:(NSString *)n {
+ if ([self propRunning])
+ [self stop];
+ if (n==nil)
+ return;
+ BOOL bail = NO;
+ NSError *err = nil;
+ OSSpinLockLock(&propLock);
+ AVCaptureDevice *propDevice = [AVCaptureDevice deviceWithUniqueID:n];
+ propDeviceInput = (propDevice==nil) ? nil : [[AVCaptureDeviceInput alloc] initWithDevice:propDevice error:&err];
+ if (propDeviceInput != nil) {
+ propSession = [[AVCaptureSession alloc] init];
+ propOutput = [[AVCaptureVideoDataOutput alloc] init];
+
+ if (![propSession canAddInput:propDeviceInput]) {
+ NSLog(@"\t\terr: problem adding propDeviceInput in %s",__func__);
+ bail = YES;
+ }
+ if (![propSession canAddOutput:propOutput]) {
+ NSLog(@"\t\terr: problem adding propOutput in %s",__func__);
+ bail = YES;
+ }
+
+ if (!bail) {
+ propQueue = dispatch_queue_create([[[NSBundle mainBundle] bundleIdentifier] UTF8String], NULL);
+ [propOutput setSampleBufferDelegate:self queue:propQueue];
+
+ [propSession addInput:propDeviceInput];
+ [propSession addOutput:propOutput];
+ [propSession startRunning];
+ }
+ }
+ else
+ bail = YES;
+ OSSpinLockUnlock(&propLock);
+
+ if (bail)
+ [self stop];
+ else
+ [self start];
+}
+
+
+/*===================================================================================*/
+#pragma mark --------------------- AVCaptureVideoDataOutputSampleBufferDelegate protocol (and AVCaptureFileOutputDelegate, too- some protocols share these methods)
+/*------------------------------------*/
+
+
+- (void)captureOutput:(AVCaptureOutput *)o didDropSampleBuffer:(CMSampleBufferRef)b fromConnection:(AVCaptureConnection *)c {
+ NSLog(@"%s",__func__);
+}
+- (void)captureOutput:(AVCaptureOutput *)o didOutputSampleBuffer:(CMSampleBufferRef)b fromConnection:(AVCaptureConnection *)c {
+ //NSLog(@"%s",__func__);
+ /*
+ CMFormatDescriptionRef portFormatDesc = CMSampleBufferGetFormatDescription(b);
+ NSLog(@"\t\t\tCMMediaType is %ld, video is %ld",CMFormatDescriptionGetMediaType(portFormatDesc),kCMMediaType_Video);
+ NSLog(@"\t\t\tthe FourCharCode for the media subtype is %ld",CMFormatDescriptionGetMediaSubType(portFormatDesc));
+ CMVideoDimensions vidDims = CMVideoFormatDescriptionGetDimensions(portFormatDesc);
+ NSLog(@"\t\t\tport size is %d x %d",vidDims.width,vidDims.height);
+ */
+
+ OSSpinLockLock(&propLock);
+ // if this came from a connection belonging to the data output
+ VVBuffer *newBuffer = nil;
+ //CMBlockBufferRef blockBufferRef = CMSampleBufferGetDataBuffer(b)
+ CVImageBufferRef imgBufferRef = CMSampleBufferGetImageBuffer(b);
+ if (imgBufferRef != NULL) {
+ //CGSize imgBufferSize = CVImageBufferGetDisplaySize(imgBufferRef);
+ //NSSizeLog(@"\t\timg buffer size is",imgBufferSize);
+ CVOpenGLTextureRef cvTexRef = NULL;
+ CVReturn err = kCVReturnSuccess;
+
+
+ err = CVOpenGLTextureCacheCreateTextureFromImage(NULL,propTextureCache,imgBufferRef,NULL,&cvTexRef);
+ if (err != kCVReturnSuccess) {
+ NSLog(@"\t\terr %d at CVOpenGLTextureCacheCreateTextureFromImage() in %s",err,__func__);
+ }
+ else {
+ newBuffer = [_globalVVBufferPool allocBufferForCVGLTex:cvTexRef];
+ if (newBuffer != nil) {
+ VVRELEASE(propLastBuffer);
+ propLastBuffer = [newBuffer retain];
+
+ [newBuffer release];
+ newBuffer = nil;
+ }
+ CVOpenGLTextureRelease(cvTexRef);
+ }
+ }
+ CVOpenGLTextureCacheFlush(propTextureCache,0);
+ OSSpinLockUnlock(&propLock);
+
+}
+
+
+@end
@@ -0,0 +1,90 @@
+/*{
+ "DESCRIPTION": "draws the passed image over a checkerboard such that the alpha channel in the image is visible. the passed image is automatically scaled to always fit within the GL context.",
+ "CREDIT": "by zoidberg WOOP WOOP WOOP WOOP WOOP",
+ "CATEGORIES": [
+ ],
+ "INPUTS": [
+ {
+ "NAME": "inputImage",
+ "TYPE": "image"
+ },
+ {
+ "NAME": "viewAlpha",
+ "TYPE": "bool",
+ "DEFAULT": true
+ }
+ ]
+
+}*/
+
+// rect that fits 'a' in 'b' using sizing mode 'fit'
+vec4 RectThatFitsRectInRect(vec4 a, vec4 b) {
+ float bAspect = b.z/b.w;
+ float aAspect = a.z/a.w;
+ if (aAspect==bAspect) {
+ return b;
+ }
+ vec4 returnMe = vec4(0.0);
+ // fit
+
+ // if the rect i'm trying to fit stuff *into* is wider than the rect i'm resizing
+ if (bAspect > aAspect) {
+ returnMe.w = b.w;
+ returnMe.z = returnMe.w * aAspect;
+ }
+ // else if the rect i'm resizing is wider than the rect it's going into
+ else if (bAspect < aAspect) {
+ returnMe.z = b.z;
+ returnMe.w = returnMe.z / aAspect;
+ }
+ else {
+ returnMe.z = b.z;
+ returnMe.w = b.w;
+ }
+ returnMe.x = (b.z-returnMe.z)/2.0+b.x;
+ returnMe.y = (b.w-returnMe.w)/2.0+b.y;
+ return returnMe;
+}
+
+#define checkerboardWidth 25.0
+
+void main() {
+ // first calculate the "bottom" pixel color (a checkerboard)
+ vec4 bottomPixel = vec4(1,0,0,1);
+
+ float sizeOfTwoCheckers = floor(checkerboardWidth)*2.0;
+ vec2 normPosInTwoByTwoGrid = mod(gl_FragCoord.xy, sizeOfTwoCheckers)/vec2(sizeOfTwoCheckers);
+ bool drawWhite = false;
+ if (normPosInTwoByTwoGrid.x>0.5)
+ drawWhite = !drawWhite;
+ if (normPosInTwoByTwoGrid.y>0.5)
+ drawWhite = !drawWhite;
+ bottomPixel = (drawWhite==true) ? vec4(0.80, 0.80, 0.80, 1) : vec4(0.70, 0.70, 0.70, 1);
+
+
+ // get the rect of the mask image after it's been resized according to the passed sizing mode. this is in pixel coords relative to the rendering space!
+ vec4 rectOfResizedInputImage = RectThatFitsRectInRect(vec4(0.0, 0.0, _inputImage_imgSize.x, _inputImage_imgSize.y), vec4(0,0,RENDERSIZE.x,RENDERSIZE.y));
+ // i know the pixel coords of this frag in the render space- convert this to NORMALIZED texture coords for the resized mask image
+ vec2 normMaskSrcCoord;
+ normMaskSrcCoord.x = (gl_FragCoord.x-rectOfResizedInputImage.x)/rectOfResizedInputImage.z;
+ normMaskSrcCoord.y = (gl_FragCoord.y-rectOfResizedInputImage.y)/rectOfResizedInputImage.w;
+ // get the color of the pixel from the input image for these normalized coords (the color is transparent black if there should be no image here as a result of the rect resize)
+ vec4 inputImagePixel = (normMaskSrcCoord.x>=0.0 && normMaskSrcCoord.x<=1.0 && normMaskSrcCoord.y>=0.0 && normMaskSrcCoord.y<=1.0) ? IMG_NORM_PIXEL(inputImage, normMaskSrcCoord) : vec4(0,0,0,0);
+
+ // now we do the "source atop" composition that will show the checkerboard backing
+
+ // if the top pixel is transparent, something may be visible "through" it
+ float TTO = (viewAlpha==true) ? inputImagePixel.a : 1.0;
+ // the less opaque the top, the more the bottom should "show through"- unless the bottom is transparent!
+ float TBO = bottomPixel.a;
+
+ // ...so use TBO to calculate the "real bottom color"...
+ vec4 realBottomColor = mix(bottomPixel,inputImagePixel,(1.0-TBO));
+ // ...then use TTO to calculate how much this shows through the top color...
+ vec4 realTop = mix(realBottomColor, inputImagePixel, TTO);
+
+ vec4 outColor = realTop;
+ outColor.a = (TTO) + (bottomPixel.a * (1.0-TTO));
+ gl_FragColor = outColor;
+
+}
Oops, something went wrong.

0 comments on commit 6e8ec57

Please sign in to comment.