Skip to content

Commit

Permalink
addressed #6 and wrote full implementation handling frame rate dispar…
Browse files Browse the repository at this point in the history
…ities
  • Loading branch information
Olive Team committed Feb 2, 2018
1 parent ea89f21 commit aa780b1
Show file tree
Hide file tree
Showing 7 changed files with 160 additions and 96 deletions.
18 changes: 16 additions & 2 deletions include/olive-compositor.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
#define OLIVE_COMPOSITOR_H

#include <stdint.h>
#include <stdio.h>
#include <vector>
class Sequence;
class SDL_Texture;
class AVFrame;
Expand All @@ -12,8 +14,20 @@ class Compositor {
void compose(Sequence* sequence, SDL_Texture* canvas, unsigned long frame);
private:
void retrieveNextFrame(Clip* c, AVFrame* f);
uint8_t* data;
int pitch;
//uint8_t* data;
//int pitch;
uint8_t* yPlane;
uint8_t* uPlane;
uint8_t* vPlane;
int uvPitch;
size_t yPlaneSz;
size_t uvPlaneSz;

bool cached;
int cachedSequenceWidth = 0;
int cachedSequenceHeight = 0;

std::vector<Clip*> currentClips;
};

#endif
1 change: 1 addition & 0 deletions include/olive-timeline.h
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ class Timeline : public Panel {
double time;
void getNextFrameTime();
unsigned long playhead;
bool startup;
};

#endif
3 changes: 2 additions & 1 deletion include/sources-media.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@

class Stream;
class AVFormatContext;
class AVCodecParameters;
class AVCodecContext;
class AVCodec;
class AVStream;
class AVFrame;

class Media {
public:
Expand All @@ -31,6 +31,7 @@ class Stream {
AVCodec* codec;
AVCodecContext* codecCtx;
unsigned long lastFrame;
AVFrame* cacheFrame;
};

#endif
213 changes: 130 additions & 83 deletions src/olive-compositor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,103 +13,150 @@ extern "C" {
}

void Compositor::compose(Sequence* sequence, SDL_Texture* canvas, unsigned long frame) {
// prepare time test - determine how long it takes to grab this frame
Uint32 start = SDL_GetTicks();
if (sequence->width != cachedSequenceWidth || sequence->height != cachedSequenceHeight || !cached) {
// free old buffers
if (cached) {
free(yPlane);
free(uPlane);
free(vPlane);
}

// realloc buffers
yPlaneSz = sequence->width * sequence->height;
uvPlaneSz = yPlaneSz / 4;
yPlane = (uint8_t*)malloc(yPlaneSz);
uPlane = (uint8_t*)malloc(uvPlaneSz);
vPlane = (uint8_t*)malloc(uvPlaneSz);
if (!yPlane || !uPlane || !vPlane) {
printf("[ERROR] Could not allocate pixel buffers.");
}

// Update SDL canvas with retrieved image
SDL_LockTexture(canvas, NULL, (void**) &data, &pitch);
uvPitch = sequence->width / 2;

// blank canvas
unsigned int datalength = sequence->width * sequence->height * 3;
memset(data, 0, datalength);
cachedSequenceWidth = sequence->width;
cachedSequenceHeight = sequence->height;
cached = true;
}

// clear frame for composition
memset(yPlane, 0, yPlaneSz);
memset(uPlane, 128, uvPlaneSz);
memset(vPlane, 128, uvPlaneSz);

// sorts clips in order of track
for (unsigned int i=0;i<sequence->getClipCount();i++) {
Clip* c = sequence->getClip(i);
if (frame >= c->timelineIn && frame < c->timelineOut) {
if (c->stream->avstream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { // disables audio stream reading for now
// allocate frame
AVFrame* pFrame = av_frame_alloc();

//int dstW = sequence->width;
//int dstH = sequence->height;

// initialize SWS context for software scaling
struct SwsContext* sws_ctx = sws_getContext(
c->stream->avstream->codecpar->width,
c->stream->avstream->codecpar->height,
static_cast<AVPixelFormat>(c->stream->avstream->codecpar->format),
c->stream->avstream->codecpar->width,
c->stream->avstream->codecpar->height,
AV_PIX_FMT_RGB24,
SWS_BILINEAR,
NULL,
NULL,
NULL
);

// get values for seek
double timecodeSecs = ((frame - c->timelineIn + c->clipIn) / sequence->frameRate);
unsigned long targetFrame = floor(timecodeSecs * av_q2d(c->stream->avstream->avg_frame_rate));
double timebase = av_q2d(c->stream->avstream->time_base);
double targetTime = timecodeSecs / timebase;

int frameDiff = targetFrame - c->stream->lastFrame;
if (frameDiff == 0) {
// TODO insert code if the frame is the same frame because we don't need to decode anything (will need to cache the last frame somewhere tho)

} else if (frameDiff > 0 && frameDiff < 4) {
// skipping a few frames is faster than seeking to them,
// so this will speed up playback
for (int i=0;i<frameDiff;i++) {
retrieveNextFrame(c, pFrame);
}
} else {
printf("[INFO] Seeked clip\n");

// Seeks to nearest keyframe
avcodec_flush_buffers(c->stream->codecCtx);
av_seek_frame(c->stream->media->formatContext, c->stream->fileIndex, targetTime, AVSEEK_FLAG_BACKWARD);
//avformat_seek_file(c->stream->media->formatContext, c->stream->fileIndex, 0, targetTime, targetTime, 0);
//avformat_flush(c->stream->media->formatContext);
retrieveNextFrame(c, pFrame);

// play up to the frame we actually want
unsigned long retrievedFrame = 0;
do {
retrieveNextFrame(c, pFrame);
if (retrievedFrame == 0) {
if (targetFrame != 0) retrievedFrame = floor(av_frame_get_best_effort_timestamp(pFrame) * timebase * av_q2d(c->stream->avstream->avg_frame_rate));
} else {
retrievedFrame++;
}
//printf("- TARGET: %ld - RETRIEVED: %ld\n", targetFrame, retrievedFrame);
} while (retrievedFrame < targetFrame);
//printf("FINISHED\n");
bool found = false;
for (unsigned int j=0;j<currentClips.size();j++) {
if (currentClips[j]->track < c->track) {
currentClips.insert(currentClips.begin()+j, c);
j = currentClips.size();
found = true;
}
}
if (!found) {
currentClips.push_back(c);
}
}
}

// compose clips retrieved from above
for (unsigned int i=0;i<currentClips.size();i++) {
Clip* c = currentClips[i];
if (c->stream->avstream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { // disables audio stream reading for now
// prepare time test - determine how long it takes to grab this frame
Uint32 start = SDL_GetTicks();

// allocate frame
if (c->stream->cacheFrame == NULL) {
c->stream->cacheFrame = av_frame_alloc();
}

// initialize SWS context for software scaling
struct SwsContext* sws_ctx = sws_getContext(
c->stream->avstream->codecpar->width,
c->stream->avstream->codecpar->height,
static_cast<AVPixelFormat>(c->stream->avstream->codecpar->format),
c->stream->avstream->codecpar->width,
c->stream->avstream->codecpar->height,
AV_PIX_FMT_YUV420P,
SWS_BILINEAR,
NULL,
NULL,
NULL
);

// get values for seek
double timecodeSecs = ((frame - c->timelineIn + c->clipIn) / sequence->frameRate);
unsigned long targetFrame = floor(timecodeSecs * av_q2d(c->stream->avstream->avg_frame_rate));
double timebase = av_q2d(c->stream->avstream->time_base);
double targetTime = timecodeSecs / timebase;

int frameDiff = targetFrame - c->stream->lastFrame;
if (frameDiff > 0 && frameDiff < 4) {
// skipping a few frames is faster than seeking to them,
// so this will speed up playback
for (int i=0;i<frameDiff;i++) {
retrieveNextFrame(c, c->stream->cacheFrame);
}
} else if (frameDiff != 0) {
printf("[INFO] Seeked clip\n");

// Seeks to nearest keyframe
avcodec_flush_buffers(c->stream->codecCtx);
av_seek_frame(c->stream->media->formatContext, c->stream->fileIndex, targetTime, AVSEEK_FLAG_BACKWARD);
//avformat_seek_file(c->stream->media->formatContext, c->stream->fileIndex, 0, targetTime, targetTime, 0);
//avformat_flush(c->stream->media->formatContext);
retrieveNextFrame(c, c->stream->cacheFrame);

// play up to the frame we actually want
unsigned long retrievedFrame = 0;
do {
retrieveNextFrame(c, c->stream->cacheFrame);
if (retrievedFrame == 0) {
if (targetFrame != 0) retrievedFrame = floor(av_frame_get_best_effort_timestamp(c->stream->cacheFrame) * timebase * av_q2d(c->stream->avstream->avg_frame_rate));
} else {
retrievedFrame++;
}
//printf("- TARGET: %ld - RETRIEVED: %ld\n", targetFrame, retrievedFrame);
} while (retrievedFrame < targetFrame);
//printf("FINISHED\n");
}

// assuming we got the targetFrame, record it
c->stream->lastFrame = targetFrame;

// Show c->stream->cacheFrame
uint8_t* data[AV_NUM_DATA_POINTERS];
data[0] = yPlane;
data[1] = uPlane;
data[2] = vPlane;
int linesize[AV_NUM_DATA_POINTERS];
linesize[0] = sequence->width;
linesize[1] = linesize[2] = uvPitch;

// assuming we got the targetFrame, record it
c->stream->lastFrame = targetFrame;
// Convert the image into YUV format that SDL uses
sws_scale(sws_ctx, (uint8_t const * const *) c->stream->cacheFrame->data, c->stream->cacheFrame->linesize, 0, c->stream->avstream->codecpar->height, data, linesize);

// Convert the image into YUV format that SDL uses
sws_scale(sws_ctx, (uint8_t const * const *) pFrame->data, pFrame->linesize, 0, c->stream->avstream->codecpar->height, &data, &pitch);
av_frame_unref(pFrame);
sws_freeContext(sws_ctx);
} else {
printf("[WARNING] Skipped audio stream because Olive has no audio support yet.\n");
//av_frame_unref(c->stream->cacheFrame);
sws_freeContext(sws_ctx);

double exp = (1000 / sequence->frameRate);
double ret = SDL_GetTicks() - start;
if (ret > exp) {
printf("[WARNING] Frame %lu took too long to render (expected %f, took %f)\n", frame, exp, ret);
}
} else {
printf("[WARNING] Skipped audio stream because Olive has no audio support yet.\n");
}
}

// apply changes to canvas texture
SDL_UnlockTexture(canvas);

// evaluate time taken to render frame
double exp = (1000 / sequence->frameRate);
double ret = SDL_GetTicks() - start;
if (ret > exp) {
printf("[WARNING] Frame %lu took too long to render (expected %f, took %f)\n", frame, exp, ret);
if (SDL_UpdateYUVTexture(canvas, NULL, yPlane, sequence->width, uPlane, uvPitch, vPlane, uvPitch) < 0) {
printf("[ERROR] Could not update texture. %s\n", SDL_GetError());
}

currentClips.clear();
}

void Compositor::retrieveNextFrame(Clip* c, AVFrame* f) {
Expand Down
2 changes: 1 addition & 1 deletion src/olive-core.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -102,10 +102,10 @@ bool Core::launch() {
break;
}

timeline->handleEvent(&e);
sources->handleEvent(&e);
viewer->handleEvent(&e);
fx->handleEvent(&e);
timeline->handleEvent(&e);
// if (e.type == SDL_WINDOWEVENT) {
// if (e.window.event == SDL_WINDOWEVENT_RESIZED) {
// update(window);
Expand Down
10 changes: 7 additions & 3 deletions src/olive-timeline.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ Timeline::Timeline(Core* c) : Panel(c) {
trackHeight = 40;
setTool(TOOL_EDIT);
mousedown = dragStartedWithin = dragging = isShiftDown = playing = false;
startup = true;

toolbarCount = 5;
timelineToolbar = new UIButton* [toolbarCount];
Expand All @@ -91,12 +92,15 @@ Timeline::Timeline(Core* c) : Panel(c) {
void Timeline::setDependencies(Sources* s, Viewer* v) {
sources = s;
viewer = v;

//setPlayhead(0);
playhead = 0;
}

void Timeline::render(SDL_Renderer* renderer) {
// initialize canvas texture
if (startup) {
setPlayhead(0);
startup = false;
}

// TODO hefty calculations 60 times per second?
viewportActual.x = viewport.x + toolbarButtonSize + 1;
viewportActual.y = viewport.y;
Expand Down
9 changes: 3 additions & 6 deletions src/olive-viewer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -99,14 +99,10 @@ void Viewer::render(SDL_Renderer* renderer) {
viewerControls[i]->render(renderer);
}

/*SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
SDL_Rect canvasRect = {viewport.x, viewport.y, viewport.w, controlY-uiPadding};
SDL_RenderFillRect(renderer, &canvasRect);*/

if (!canvasInit) {
canvas = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_RGB24, SDL_TEXTUREACCESS_STREAMING, timeline->sequence.width, timeline->sequence.height);
canvas = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING, timeline->sequence.width, timeline->sequence.height);
if (canvas == NULL) {
printf("[ERROR] Couldn't create viewer texture. %s\n", SDL_GetError());
printf("[ERROR] Couldn't create canvas texture. %s\n", SDL_GetError());
} else {
canvasInit = true;
}
Expand All @@ -118,6 +114,7 @@ void Viewer::render(SDL_Renderer* renderer) {
int maxH = viewport.h-viewerControls[0]->h-uiPadding-uiPadding;
double panelar = (double) maxW / (double) maxH;
int dstW, dstH;

// TODO messy but works
if (ar > 1) { // video is landscape
if (panelar > ar) {
Expand Down

0 comments on commit aa780b1

Please sign in to comment.