Permalink
Browse files

Ready for OpenCV

  • Loading branch information...
1 parent 30275de commit 28fd62de5c8129ec34ffd4c35d07f7cbcb0a7152 @gabriel committed May 3, 2010
Showing with 3,437 additions and 2,738 deletions.
  1. +11 −13 Classes/FFProcessing/FFConverter.h
  2. +33 −32 Classes/FFProcessing/FFConverter.m
  3. +5 −5 Classes/FFProcessing/FFDecoder.m
  4. +5 −8 Classes/FFProcessing/FFDecoderOptions.h
  5. +8 −8 Classes/FFProcessing/FFDecoderOptions.m
  6. +9 −4 Classes/FFProcessing/FFEncoder.m
  7. +4 −8 Classes/FFProcessing/FFEncoderOptions.h
  8. +9 −13 Classes/FFProcessing/FFEncoderOptions.m
  9. +5 −5 Classes/FFProcessing/FFGLDrawable.m
  10. +1 −0 Classes/FFProcessing/FFProcessing.h
  11. +9 −4 Classes/FFProcessing/FFProcessing.m
  12. +2 −2 Classes/FFProcessing/FFReadThread.h
  13. +10 −10 Classes/FFProcessing/FFReadThread.m
  14. +2 −4 Classes/FFProcessing/FFReader.h
  15. +11 −14 Classes/FFProcessing/FFReader.m
  16. +49 −0 Classes/FFProcessing/FFTypes.h
  17. +13 −0 Classes/FFProcessing/FFTypes.m
  18. +24 −3 Classes/FFProcessing/FFUtils.h
  19. +39 −18 Classes/FFProcessing/FFUtils.m
  20. +2 −1 Classes/FFProcessing/Filters/FFEdgeFilter.h
  21. +18 −16 Classes/FFProcessing/Filters/FFEdgeFilter.m
  22. +1 −1 Classes/FFProcessing/Filters/FFFilter.h
  23. +17 −0 Classes/FFProcessing/Filters/FFFilters.h
  24. +34 −0 Classes/FFProcessing/Filters/FFFilters.m
  25. +3 −3 Classes/FFProcessing/Processors/FFDataMoshProcessor.m
  26. +1 −1 Classes/FFProcessing/Processors/FFEncodeProcessor.h
  27. +9 −13 Classes/FFProcessing/Processors/FFEncodeProcessor.m
  28. +1 −1 Classes/FFProcessing/Processors/FFProcessor.h
  29. +8 −3 Classes/PB/PBProcessing.m
  30. +135 −85 FFProcessing.xcodeproj/gabe.mode1v3
  31. +2,916 −2,451 FFProcessing.xcodeproj/gabe.pbxuser
  32. +41 −12 FFProcessing.xcodeproj/project.pbxproj
  33. +2 −0 IDEAS
  34. 0 Resources/{PixelBleeder-Info.plist → FFProcessing-Info.plist}
@@ -10,22 +10,20 @@
#include "libavdevice/avdevice.h"
#include "libswscale/swscale.h"
-#import "FFDecoderOptions.h"
-#import "FFEncoderOptions.h"
+#import "FFTypes.h"
+#import "FFFilter.h"
-@interface FFConverter : NSObject {
- AVFrame *_picture;
-
- FFDecoderOptions *_decoderOptions;
- FFEncoderOptions *_encoderOptions;
+@interface FFConverter : NSObject <FFFilter> {
+ FFPictureFormat _pictureFormat;
+ FFPictureFrame _pictureFrame;
}
-@property (readonly, nonatomic) FFDecoderOptions *decoderOptions;
-@property (readonly, nonatomic) FFEncoderOptions *encoderOptions;
-
-- (id)initWithDecoderOptions:(FFDecoderOptions *)decoderOptions encoderOptions:(FFEncoderOptions *)encoderOptions;
-
+/*!
+ Converter with picture format (output).
+ @param pictureFormat Picture format; If width, height, or pixelFormat are set to 0, then will use the source format for that parameter
+ */
+- (id)initWithPictureFormat:(FFPictureFormat)pictureFormat;
-- (AVFrame *)scalePicture:(AVFrame *)picture error:(NSError **)error;
+- (FFPictureFrame)scalePicture:(FFPictureFrame)pictureFrame error:(NSError **)error;
@end
@@ -8,66 +8,67 @@
#import "FFConverter.h"
#import "FFUtils.h"
+#import "FFFilter.h"
@implementation FFConverter
-@synthesize decoderOptions=_decoderOptions, encoderOptions=_encoderOptions;
-
-- (id)initWithDecoderOptions:(FFDecoderOptions *)decoderOptions encoderOptions:(FFEncoderOptions *)encoderOptions {
-
+- (id)initWithPictureFormat:(FFPictureFormat)pictureFormat {
if ((self = [self init])) {
- _decoderOptions = [decoderOptions retain];
- _encoderOptions = [encoderOptions retain];
- _picture = NULL;
+ _pictureFormat = pictureFormat;
}
return self;
}
- (void)dealloc {
- FFPictureRelease(_picture);
- [_decoderOptions release];
- [_encoderOptions release];
+ FFPictureFrameRelease(_pictureFrame);
[super dealloc];
}
-- (AVFrame *)scalePicture:(AVFrame *)picture error:(NSError **)error {
+- (FFPictureFrame)scalePicture:(FFPictureFrame)pictureFrame error:(NSError **)error {
struct SwsContext *scaleContext = NULL;
- if (_picture == NULL) {
- _picture = FFPictureCreate(_encoderOptions.pixelFormat, _encoderOptions.width, _encoderOptions.height);
- if (_picture == NULL) {
- FFSetError(error, FFErrorCodeAllocateFrame, -1, @"Couldn't allocate frame");
- return NULL;
+ FFPictureFormat pictureFormat = _pictureFormat;
+ if (pictureFormat.width == 0) pictureFormat.width = pictureFrame.pictureFormat.width;
+ if (pictureFormat.height == 0) pictureFormat.height = pictureFrame.pictureFormat.height;
+ if (pictureFormat.pixelFormat == PIX_FMT_NONE) pictureFormat.pixelFormat = pictureFrame.pictureFormat.pixelFormat;
+
+ NSAssert(pictureFormat.width != 0 && pictureFormat.height != 0 && pictureFormat.pixelFormat != PIX_FMT_NONE, @"Invalid picture format");
+
+ if (_pictureFrame.frame == NULL) {
+ _pictureFrame = FFPictureFrameCreate(pictureFormat);
+ if (_pictureFrame.frame == NULL) {
+ FFSetError(error, FFErrorCodeAllocateFrame, -1, @"Couldn't allocate frame in converter");
+ return _pictureFrame;
}
}
-
+
scaleContext = sws_getCachedContext(scaleContext,
- _decoderOptions.width, _decoderOptions.height, _decoderOptions.pixelFormat,
- _encoderOptions.width, _encoderOptions.height, _encoderOptions.pixelFormat,
+ pictureFrame.pictureFormat.width, pictureFrame.pictureFormat.height, pictureFrame.pictureFormat.pixelFormat,
+ pictureFormat.width, pictureFormat.height, pictureFormat.pixelFormat,
SWS_BICUBIC, NULL, NULL, NULL);
if (scaleContext == NULL) {
FFSetError(error, FFErrorCodeScaleContext, -1, @"No scale context");
- return NULL;
+ return FFPictureFrameNone;
}
- /*!
- int sws_scale(struct SwsContext *context, const uint8_t* const srcSlice[], const int srcStride[],
- int srcSliceY, int srcSliceH, uint8_t* const dst[], const int dstStride[]);
- */
-
sws_scale(scaleContext,
- (const uint8_t* const *)picture->data,
- (const int *)picture->linesize,
+ (const uint8_t* const *)pictureFrame.frame->data,
+ (const int *)pictureFrame.frame->linesize,
0,
- _decoderOptions.height,
- _picture->data,
- _picture->linesize);
+ pictureFormat.height,
+ _pictureFrame.frame->data,
+ _pictureFrame.frame->linesize);
- _picture->pts = picture->pts;
+ _pictureFrame.frame->pts = pictureFrame.frame->pts;
- return _picture;
+ return _pictureFrame;
}
+#pragma mark FFFilter
+
+- (FFPictureFrame)filterPictureFrame:(FFPictureFrame)pictureFrame error:(NSError **)error {
+ return [self scalePicture:pictureFrame error:error];
+}
@end
@@ -109,11 +109,11 @@ - (BOOL)openWithURL:(NSURL *)URL format:(NSString *)format error:(NSError **)err
*/
// Set options
- _options = [[FFDecoderOptions alloc] initWithWidth:_videoStream->codec->coded_width
- height:_videoStream->codec->coded_height
- pixelFormat:_videoStream->codec->pix_fmt
- videoFrameRate:_videoStream->r_frame_rate
- videoTimeBase:_videoStream->time_base];
+ _options = [[FFDecoderOptions alloc] initWithPictureFormat:FFPictureFormatMake(_videoStream->codec->coded_width,
+ _videoStream->codec->coded_height,
+ _videoStream->codec->pix_fmt)
+ videoFrameRate:_videoStream->r_frame_rate
+ videoTimeBase:_videoStream->time_base];
FFDebug(@"Decoder options: %@", _options);
@@ -9,26 +9,23 @@
#import "libavcodec/avcodec.h"
#import "FFPresets.h"
+#import "FFTypes.h"
@interface FFDecoderOptions : NSObject {
- int _width;
- int _height;
- enum PixelFormat _pixelFormat;
+ FFPictureFormat _pictureFormat;
AVRational _videoFrameRate;
AVRational _videoTimeBase;
AVRational _sampleAspectRatio;
int64_t _duration;
}
-@property (readonly, nonatomic) int width;
-@property (readonly, nonatomic) int height;
-@property (readonly, nonatomic) enum PixelFormat pixelFormat;
+@property (readonly, nonatomic) FFPictureFormat pictureFormat;
@property (readonly, nonatomic) AVRational videoFrameRate;
@property (readonly, nonatomic) AVRational videoTimeBase;
@property (readonly, nonatomic) AVRational sampleAspectRatio;
-- (id)initWithWidth:(int)width height:(int)height pixelFormat:(enum PixelFormat)pixelFormat videoFrameRate:(AVRational)videoFrameRate
- videoTimeBase:(AVRational)videoTimeBase;
+- (id)initWithPictureFormat:(FFPictureFormat)pictureFormat videoFrameRate:(AVRational)videoFrameRate
+ videoTimeBase:(AVRational)videoTimeBase;
@end
@@ -11,25 +11,25 @@
@implementation FFDecoderOptions
-@synthesize width=_width, height=_height, pixelFormat=_pixelFormat, videoFrameRate=_videoFrameRate, videoTimeBase=_videoTimeBase,
+@synthesize pictureFormat=_pictureFormat, videoFrameRate=_videoFrameRate, videoTimeBase=_videoTimeBase,
sampleAspectRatio=_sampleAspectRatio;
-- (id)initWithWidth:(int)width height:(int)height pixelFormat:(enum PixelFormat)pixelFormat videoFrameRate:(AVRational)videoFrameRate
- videoTimeBase:(AVRational)videoTimeBase {
+- (id)initWithPictureFormat:(FFPictureFormat)pictureFormat videoFrameRate:(AVRational)videoFrameRate
+ videoTimeBase:(AVRational)videoTimeBase {
+
if ((self = [super init])) {
- _width = width;
- _height = height;
- _pixelFormat = pixelFormat;
+ _pictureFormat = pictureFormat;
_videoFrameRate = videoFrameRate;
_videoTimeBase = videoTimeBase;
- _sampleAspectRatio = FFFindRationalApproximation((float)_width/(float)_height, 255);
+ _sampleAspectRatio = FFFindRationalApproximation((float)_pictureFormat.width/(float)_pictureFormat.height, 255);
}
return self;
}
- (NSString *)description {
return [NSString stringWithFormat:@"width=%d, height=%d, pixelFormat=%d, videoFrameRate=%d/%d, videoTimeBase=%d/%d, sampleAspectRatio=%d/%d",
- _width, _height, _pixelFormat, _videoFrameRate.num, _videoFrameRate.den, _videoTimeBase.num, _videoTimeBase.den,
+ _pictureFormat.width, _pictureFormat.height, _pictureFormat.pixelFormat,
+ _videoFrameRate.num, _videoFrameRate.den, _videoTimeBase.num, _videoTimeBase.den,
_sampleAspectRatio.num, _sampleAspectRatio.den];
}
@@ -175,12 +175,17 @@ - (BOOL)writeHeader:(NSError **)error {
}
- (BOOL)writeTrailer:(NSError **)error {
- int averror = av_write_trailer(_formatContext);
- if (averror != 0) {
- FFSetError(error, FFErrorCodeWriteTrailer, averror, @"Couldn't write trailer");
+ if (_formatContext) {
+ int averror = av_write_trailer(_formatContext);
+ if (averror != 0) {
+ FFSetError(error, FFErrorCodeWriteTrailer, averror, @"Couldn't write trailer");
+ return NO;
+ }
+ FFDebug(@"Wrote trailer");
+ } else {
+ FFSetError(error, FFErrorCodeWriteTrailer, 0, @"Couldn't write trailer: no format context");
return NO;
}
- FFDebug(@"Wrote trailer");
return YES;
}
@@ -7,16 +7,15 @@
//
#import "FFPresets.h"
+#import "FFTypes.h"
@interface FFEncoderOptions : NSObject {
FFPresets *_presets;
NSString *_path;
NSString *_format;
NSString *_codecName;
- int _width;
- int _height;
- enum PixelFormat _pixelFormat;
+ FFPictureFormat _pictureFormat;
AVRational _videoTimeBase;
AVRational _sampleAspectRatio;
}
@@ -25,16 +24,13 @@
@property (readonly, nonatomic) NSString *path;
@property (readonly, nonatomic) NSString *format;
@property (readonly, nonatomic) NSString *codecName;
-@property (readonly, nonatomic) int width;
-@property (readonly, nonatomic) int height;
-@property (readonly, nonatomic) enum PixelFormat pixelFormat;
+@property (readonly, nonatomic) FFPictureFormat pictureFormat;
@property (readonly, nonatomic) AVRational videoTimeBase;
@property (readonly, nonatomic) AVRational sampleAspectRatio;
- (id)initWithPath:(NSString *)path format:(NSString *)format codecName:(NSString *)codecName
- width:(int)width height:(int)height pixelFormat:(enum PixelFormat)pixelFormat
- videoTimeBase:(AVRational)videoTimeBase;
+ pictureFormat:(FFPictureFormat)pictureFormat videoTimeBase:(AVRational)videoTimeBase;
- (void)apply:(AVCodecContext *)codecContext;
@@ -11,26 +11,22 @@
@implementation FFEncoderOptions
-@synthesize path=_path, format=_format, codecName=_codecName, presets=_presets,
-width=_width, height=_height, pixelFormat=_pixelFormat, videoTimeBase=_videoTimeBase,
-sampleAspectRatio=_sampleAspectRatio;
+@synthesize path=_path, format=_format, codecName=_codecName, presets=_presets, pictureFormat=_pictureFormat,
+videoTimeBase=_videoTimeBase, sampleAspectRatio=_sampleAspectRatio;
- (id)initWithPath:(NSString *)path format:(NSString *)format codecName:(NSString *)codecName
- width:(int)width height:(int)height pixelFormat:(enum PixelFormat)pixelFormat
- videoTimeBase:(AVRational)videoTimeBase {
+ pictureFormat:(FFPictureFormat)pictureFormat videoTimeBase:(AVRational)videoTimeBase {
if ((self = [self init])) {
_path = [path retain];
_format = [format retain];
_codecName = [codecName retain];
if (_codecName)
_presets = [[FFPresets alloc] initWithCodeName:_codecName];
- _width = width;
- _height = height;
- _pixelFormat = pixelFormat;
+ _pictureFormat = pictureFormat;
_videoTimeBase = videoTimeBase;
- if (_height > 0)
- _sampleAspectRatio = FFFindRationalApproximation((float)_width/(float)_height, 255);
+ if (_pictureFormat.height > 0)
+ _sampleAspectRatio = FFFindRationalApproximation((float)_pictureFormat.width/(float)_pictureFormat.height, 255);
}
return self;
}
@@ -45,9 +41,9 @@ - (void)dealloc {
- (void)apply:(AVCodecContext *)codecContext {
[_presets apply:codecContext];
- codecContext->width = _width;
- codecContext->height = _height;
- codecContext->pix_fmt = _pixelFormat;
+ codecContext->width = _pictureFormat.width;
+ codecContext->height = _pictureFormat.height;
+ codecContext->pix_fmt = _pictureFormat.pixelFormat;
codecContext->time_base = _videoTimeBase;
codecContext->sample_aspect_ratio = _sampleAspectRatio;
}
@@ -104,10 +104,10 @@ - (BOOL)drawView:(CGRect)frame inView:(GHGLView *)view {
return NO;
}
*/
- AVFrame *avFrame = [_reader nextFrame:nil];
- if (avFrame == NULL) return NO;
+ FFPictureFrame pictureFrame = [_reader nextFrame:nil];
+ if (pictureFrame.frame == NULL) return NO;
- uint8_t *nextData = avFrame->data[0];
+ uint8_t *nextData = pictureFrame.frame->data[0];
if (nextData == NULL) return NO;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
@@ -120,9 +120,9 @@ - (BOOL)drawView:(CGRect)frame inView:(GHGLView *)view {
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
if (!_textureLoaded) {
- glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, _reader.converter.encoderOptions.width, _reader.converter.encoderOptions.height, 0, GL_RGB, GL_UNSIGNED_BYTE, nextData);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, pictureFrame.pictureFormat.width, pictureFrame.pictureFormat.height, 0, GL_RGB, GL_UNSIGNED_BYTE, nextData);
} else {
- glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, _reader.converter.encoderOptions.width, _reader.converter.encoderOptions.height,
+ glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, pictureFrame.pictureFormat.width, pictureFrame.pictureFormat.height,
GL_RGB, GL_UNSIGNED_BYTE, nextData);
}
@@ -28,6 +28,7 @@
FFDecoder *_decoder;
AVFrame *_decoderFrame;
+ FFPictureFrame _decodedFrame;
id<FFProcessingDelegate> _delegate; // Weak
@@ -100,19 +100,24 @@ - (BOOL)_processAtIndex:(NSInteger)index count:(NSInteger)count error:(NSError *
//FFDebug(@"Decoded frame, pict_type=%@", NSStringFromAVFramePictType(_decoderFrame->pict_type));
_decoderFrame->pts += _previousEndPTS;
+ _decodedFrame = FFPictureFrameMake(_decoderFrame, _decoder.options.pictureFormat);
+
[_delegate processing:self didReadFramePTS:[_decoder readVideoPTS] duration:[_decoder videoDuration]
index:index count:count];
// Apply filter
- if (_filter)
- _decoderFrame = [_filter filterFrame:_decoderFrame decoder:_decoder];
+ if (_filter) {
+ _decodedFrame = [_filter filterPictureFrame:_decodedFrame error:error];
+ if (!_decodedFrame.frame) break;
+ }
// Run processor
- if (![_processor processFrame:_decoderFrame decoder:_decoder index:index error:error])
+ if (![_processor processPictureFrame:_decodedFrame decoder:_decoder index:index error:error])
break;
}
- _previousEndPTS = _decoderFrame->pts + 1; // TODO(gabe): Fix me
+ if (_decoderFrame)
+ _previousEndPTS = _decoderFrame->pts + 1; // TODO(gabe): Fix me
// Last file
if (index == (count - 1)) {
Oops, something went wrong.

0 comments on commit 28fd62d

Please sign in to comment.