Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Improve and fix the upload of video data to textures.

The new version only copies the required data, not more. Also, it avoids
reading beyond buffer boundaries for the even-odd-row input mode.
  • Loading branch information...
commit 795897c6131f802afe8c1f793324e31d06161e59 1 parent a7956ce
@marlam marlam authored
View
13 src/decoder.cpp
@@ -42,6 +42,19 @@ std::string decoder::video_frame_format_name(enum video_frame_format f)
}
}
+int decoder::video_frame_format_planes(enum video_frame_format f)
+{
+ switch (f)
+ {
+ case frame_format_yuv420p:
+ return 3;
+ break;
+ case frame_format_bgra32:
+ return 1;
+ break;
+ }
+}
+
std::string decoder::audio_sample_format_name(enum audio_sample_format f)
{
switch (f)
View
1  src/decoder.h
@@ -42,6 +42,7 @@ class decoder
};
static std::string video_frame_format_name(enum video_frame_format f);
+ static int video_frame_format_planes(enum video_frame_format f);
/* The audio sample format */
View
147 src/input.cpp
@@ -22,6 +22,7 @@
#include "config.h"
#include <cctype>
+#include <cstdlib>
#include "debug.h"
#include "exc.h"
@@ -472,101 +473,99 @@ int64_t input::read_video_frame()
return t;
}
-void input::get_video_frame(enum decoder::video_frame_format fmt,
- uint8_t *l_data[3], size_t l_line_size[3],
- uint8_t *r_data[3], size_t r_line_size[3])
+void input::prepare_video_frame()
{
- uint8_t *data[3], *data1[3];
- size_t line_size[3], line_size1[3];
+ _decoders.at(_video_decoders[0])->get_video_frame(_video_streams[0], video_frame_format(),
+ _video_data[0], _video_data_line_size[0]);
+ if (_mode == separate)
+ {
+ _decoders.at(_video_decoders[1])->get_video_frame(_video_streams[1], video_frame_format(),
+ _video_data[1], _video_data_line_size[1]);
+ }
+}
+
+static int next_multiple_of_4(int x)
+{
+ return (x / 4 + (x % 4 == 0 ? 0 : 1)) * 4;
+}
+
+void input::get_video_frame(int view, int plane, void *buf)
+{
+ if (_swap_eyes)
+ {
+ view = (view == 0 ? 1 : 0);
+ }
+
+ uint8_t *dst = reinterpret_cast<uint8_t *>(buf);
+ uint8_t *src;
+ size_t src_offset;
+ size_t src_row_size;
+ size_t dst_row_width;
+ size_t dst_row_size;
+ size_t height;
+
+ if (video_frame_format() == decoder::frame_format_yuv420p)
+ {
+ if (plane == 0)
+ {
+ dst_row_width = video_width();
+ dst_row_size = next_multiple_of_4(dst_row_width);
+ height = video_height();
+ }
+ else
+ {
+ dst_row_width = video_width() / 2;
+ dst_row_size = next_multiple_of_4(dst_row_width);
+ height = video_height() / 2;
+ }
+ }
+ else
+ {
+ dst_row_width = video_width() * 4;
+ dst_row_size = dst_row_width;
+ height = video_height();
+ }
- _decoders.at(_video_decoders[0])->get_video_frame(_video_streams[0], fmt, data, line_size);
switch (_mode)
{
case separate:
- _decoders.at(_video_decoders[1])->get_video_frame(_video_streams[1], fmt, data1, line_size1);
- l_data[0] = data[0];
- l_data[1] = data[1];
- l_data[2] = data[2];
- l_line_size[0] = line_size[0];
- l_line_size[1] = line_size[1];
- l_line_size[2] = line_size[2];
- r_data[0] = data1[0];
- r_data[1] = data1[1];
- r_data[2] = data1[2];
- r_line_size[0] = line_size1[0];
- r_line_size[1] = line_size1[1];
- r_line_size[2] = line_size1[2];
+ src = _video_data[view][plane];
+ src_row_size = _video_data_line_size[view][plane];
+ src_offset = 0;
break;
case top_bottom:
case top_bottom_half:
- l_data[0] = data[0];
- l_data[1] = data[1];
- l_data[2] = data[2];
- l_line_size[0] = line_size[0];
- l_line_size[1] = line_size[1];
- l_line_size[2] = line_size[2];
- r_data[0] = l_data[0] + video_height() * line_size[0];
- r_data[1] = l_data[1] + video_height() / 2 * line_size[1];
- r_data[2] = l_data[2] + video_height() / 2 * line_size[2];
- r_line_size[0] = l_line_size[0];
- r_line_size[1] = l_line_size[1];
- r_line_size[2] = l_line_size[2];
+ src = _video_data[0][plane];
+ src_row_size = _video_data_line_size[0][plane];
+ src_offset = view * height * src_row_size;
break;
case left_right:
case left_right_half:
- l_data[0] = data[0];
- l_data[1] = data[1];
- l_data[2] = data[2];
- l_line_size[0] = line_size[0];
- l_line_size[1] = line_size[1];
- l_line_size[2] = line_size[2];
- r_data[0] = data[0] + video_width() * (fmt == decoder::frame_format_yuv420p ? 1 : 4);
- r_data[1] = data[1] + video_width() / 2; // irrelevant for bgra32
- r_data[2] = data[2] + video_width() / 2; // irrelevant for bgra32
- r_line_size[0] = line_size[0];
- r_line_size[1] = line_size[1];
- r_line_size[2] = line_size[2];
+ src = _video_data[0][plane];
+ src_row_size = _video_data_line_size[0][plane];
+ src_offset = view * dst_row_width;
break;
case even_odd_rows:
- l_data[0] = data[0];
- l_data[1] = data[1];
- l_data[2] = data[2];
- l_line_size[0] = 2 * line_size[0];
- l_line_size[1] = 2 * line_size[1];
- l_line_size[2] = 2 * line_size[2];
- r_data[0] = data[0] + line_size[0];
- r_data[1] = data[1] + line_size[1];
- r_data[2] = data[2] + line_size[2];
- r_line_size[0] = 2 * line_size[0];
- r_line_size[1] = 2 * line_size[1];
- r_line_size[2] = 2 * line_size[2];
+ src = _video_data[0][plane];
+ src_row_size = 2 * _video_data_line_size[0][plane];
+ src_offset = view * _video_data_line_size[0][plane];
break;
case mono:
- l_data[0] = data[0];
- l_data[1] = data[1];
- l_data[2] = data[2];
- l_line_size[0] = line_size[0];
- l_line_size[1] = line_size[1];
- l_line_size[2] = line_size[2];
- r_data[0] = data[0];
- r_data[1] = data[1];
- r_data[2] = data[2];
- r_line_size[0] = line_size[0];
- r_line_size[1] = line_size[1];
- r_line_size[2] = line_size[2];
+ src = _video_data[0][plane];
+ src_row_size = _video_data_line_size[0][plane];
+ src_offset = 0;
break;
case automatic:
/* cannot happen */
break;
}
- if (_swap_eyes)
+
+ size_t dst_offset = 0;
+ for (size_t y = 0; y < height; y++)
{
- std::swap(l_data[0], r_data[0]);
- std::swap(l_data[1], r_data[1]);
- std::swap(l_data[2], r_data[2]);
- std::swap(l_line_size[0], r_line_size[0]);
- std::swap(l_line_size[1], r_line_size[1]);
- std::swap(l_line_size[2], r_line_size[2]);
+ std::memcpy(dst + dst_offset, src + src_offset, dst_row_width);
+ dst_offset += dst_row_size;
+ src_offset += src_row_size;
}
}
View
13 src/input.h
@@ -65,6 +65,9 @@ class input
int _audio_channels;
enum decoder::audio_sample_format _audio_sample_format;
int64_t _duration;
+
+ uint8_t *_video_data[2][3];
+ size_t _video_data_line_size[2][3];
blob _audio_buffer;
public:
@@ -158,10 +161,12 @@ class input
/* Read the next video frame into an internal buffer. Return its time stamp in microseconds,
* or a negative value on end-of-file. */
int64_t read_video_frame();
- /* Get the video frame that is currently in the internal buffer, in the given format. */
- void get_video_frame(enum decoder::video_frame_format fmt,
- uint8_t *l_data[3], size_t l_line_size[3],
- uint8_t *r_data[3], size_t r_line_size[3]);
+ /* Prepare the video frame that is currently in the internal buffer. Must be called before get_video_frame(). */
+ void prepare_video_frame();
+ /* Get the video frame data from the internal buffer, for the given view (0=left, 1=right) and the given plane
+ * (depending on the video_frame_format()), and copy it to the given buffer with guaranteed 4-byte alignment for
+ * each row. */
+ void get_video_frame(int view, int plane, void *buf);
/* Release the video frame from the internal buffer */
void release_video_frame();
View
16 src/player.cpp
@@ -434,14 +434,22 @@ void player::run_step(bool *more_steps, int64_t *seek_to, bool *prep_frame, bool
}
}
-void player::get_video_frame(enum decoder::video_frame_format fmt)
+void player::get_video_frame()
{
- _input->get_video_frame(fmt, _l_data, _l_line_size, _r_data, _r_line_size);
+ _input->prepare_video_frame();
}
void player::prepare_video_frame(video_output *vo)
{
- vo->prepare(_l_data, _l_line_size, _r_data, _r_line_size);
+ for (int i = 0; i < (_input->video_is_mono() ? 1 : 2); i++)
+ {
+ for (int j = 0; j < decoder::video_frame_format_planes(_input->video_frame_format()); j++)
+ {
+ void *buf = vo->prepare_start(i, j);
+ _input->get_video_frame(i, j, buf);
+ vo->prepare_finish(i, j);
+ }
+ }
}
void player::release_video_frame()
@@ -518,7 +526,7 @@ void player::run()
}
if (prep_frame)
{
- get_video_frame(_input->video_frame_format());
+ get_video_frame();
prepare_video_frame(_video_output);
release_video_frame();
}
View
4 src/player.h
@@ -77,8 +77,6 @@ class player
bool _pause_request;
int64_t _seek_request;
- uint8_t *_l_data[3], *_r_data[3];
- size_t _l_line_size[3], _r_line_size[3];
void *_audio_data;
size_t _required_audio_data_size;
int64_t _pause_start;
@@ -114,7 +112,7 @@ class player
void make_master();
void run_step(bool *more_steps, int64_t *seek_to, bool *prep_frame, bool *drop_frame, bool *display_frame);
void seek(int64_t seek_to);
- void get_video_frame(enum decoder::video_frame_format fmt);
+ void get_video_frame();
void prepare_video_frame(video_output *vo);
void release_video_frame();
input *get_input() { return _input; }
View
2  src/player_equalizer.cpp
@@ -136,7 +136,7 @@ class player_eq_node : public player
void eq_get_frame()
{
- get_video_frame(get_input()->video_frame_format());
+ get_video_frame();
}
void eq_release_frame()
View
2  src/player_qt.cpp
@@ -102,7 +102,7 @@ bool player_qt_internal::playloop_step()
}
if (prep_frame)
{
- get_video_frame(get_input()->video_frame_format());
+ get_video_frame();
prepare_video_frame(get_video_output());
release_video_frame();
}
View
12 src/video_output.h
@@ -100,10 +100,14 @@ class video_output : public controller
/* Get current state */
virtual const video_output_state &state() const = 0;
- /* Prepare a left/right view pair for display */
- virtual void prepare(
- uint8_t *l_data[3], size_t l_line_size[3],
- uint8_t *r_data[3], size_t r_line_size[3]) = 0;
+ /* Prepare a left/right view pair for display.
+ * The video data is organized in planes, depending on the frame format.
+ * First, call prepare_start() to get a buffer. Then copy the plane data
+ * to this buffer, with a 4-byte alignment of line lengths. Then, call
+ * prepare_finish() with the same parameters and this buffer. Repeat for
+ * all planes in both views. */
+ virtual void *prepare_start(int view, int plane) = 0;
+ virtual void prepare_finish(int view, int plane) = 0;
/* Display the prepared left/right view pair */
virtual void activate() = 0;
/* Process window system events */
View
138 src/video_output_opengl.cpp
@@ -32,6 +32,7 @@
#include "msg.h"
#include "str.h"
#include "timer.h"
+#include "debug.h"
#include "video_output_opengl.h"
#include "video_output_opengl_color.fs.glsl.h"
@@ -622,89 +623,96 @@ void video_output_opengl::reshape(int w, int h)
}
}
-static void upload_texture(
- GLuint tex, GLuint pbo,
- GLsizei w, GLsizei h, int bytes_per_pixel, int line_size,
- GLenum fmt, GLenum type, const GLvoid *data)
+/* Step 1: Input of video data:
+ * prepare_start() and prepare_finish() for each data plane and each view
+ * (for mono: only view 0). */
+
+static int next_multiple_of_4(int x)
{
- uintptr_t p = reinterpret_cast<uintptr_t>(data);
- int row_alignment = 1;
- if (p % 8 == 0 && line_size % 8 == 0)
+ return (x / 4 + (x % 4 == 0 ? 0 : 1)) * 4;
+}
+
+void *video_output_opengl::prepare_start(int /* view */, int plane)
+{
+ int w, h;
+ int bytes_per_pixel;
+ if (_src_format == decoder::frame_format_yuv420p)
{
- row_alignment = 8;
+ if (plane == 0)
+ {
+ w = next_multiple_of_4(_src_width);
+ h = next_multiple_of_4(_src_height);
+ }
+ else
+ {
+ w = next_multiple_of_4(_src_width / 2);
+ h = next_multiple_of_4(_src_height / 2);
+ }
+ bytes_per_pixel = 1;
}
- else if (p % 4 == 0 && line_size % 4 == 0)
+ else
{
- row_alignment = 4;
+ w = _src_width;
+ h = _src_height;
+ bytes_per_pixel = 4;
}
- else if (p % 2 == 0 && line_size % 2 == 0)
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER, _pbo);
+ glBufferData(GL_PIXEL_UNPACK_BUFFER, w * h * bytes_per_pixel, NULL, GL_STREAM_DRAW);
+ void *pboptr = glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_WRITE_ONLY);
+ if (!pboptr)
{
- row_alignment = 2;
+ debug::oom_abort();
}
-
- glPixelStorei(GL_UNPACK_ROW_LENGTH, line_size / bytes_per_pixel);
- glPixelStorei(GL_UNPACK_ALIGNMENT, row_alignment);
- glBindTexture(GL_TEXTURE_2D, tex);
-
- glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo);
- glBufferData(GL_PIXEL_UNPACK_BUFFER, h * line_size, NULL, GL_STREAM_DRAW);
- void *pboptr = glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_WRITE_ONLY);
- std::memcpy(pboptr, data, h * line_size);
- glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
- glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, fmt, type, NULL);
- glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
+ if (reinterpret_cast<uintptr_t>(pboptr) % 4 != 0)
+ {
+ // We can assume that the buffer is always at least aligned at a 4-byte boundary.
+ // This is just a sanity check; this error should never be triggered.
+ msg::err("pixel buffer alignment is less than 4!");
+ debug::crash();
+ }
+ return pboptr;
}
-void video_output_opengl::prepare(
- uint8_t *l_data[3], size_t l_line_size[3],
- uint8_t *r_data[3], size_t r_line_size[3])
+void video_output_opengl::prepare_finish(int view, int plane)
{
int tex_set = (_active_tex_set == 0 ? 1 : 0);
- if (!l_data[0])
- {
- _have_valid_data[tex_set] = false;
- return;
- }
-
- /* Step 1: input of video data */
-
- glActiveTexture(GL_TEXTURE0);
+ int w, h;
+ GLenum format;
+ GLenum type;
+ GLuint tex;
if (_src_format == decoder::frame_format_yuv420p)
{
- upload_texture(_yuv420p_y_tex[tex_set][0], _pbo,
- _src_width, _src_height, 1, l_line_size[0],
- GL_LUMINANCE, GL_UNSIGNED_BYTE, l_data[0]);
- upload_texture(_yuv420p_u_tex[tex_set][0], _pbo,
- _src_width / 2, _src_height / 2, 1, l_line_size[1],
- GL_LUMINANCE, GL_UNSIGNED_BYTE, l_data[1]);
- upload_texture(_yuv420p_v_tex[tex_set][0], _pbo,
- _src_width / 2, _src_height / 2, 1, l_line_size[2],
- GL_LUMINANCE, GL_UNSIGNED_BYTE, l_data[2]);
- if (!_src_is_mono)
+ if (plane == 0)
{
- upload_texture(_yuv420p_y_tex[tex_set][1], _pbo,
- _src_width, _src_height, 1, r_line_size[0],
- GL_LUMINANCE, GL_UNSIGNED_BYTE, r_data[0]);
- upload_texture(_yuv420p_u_tex[tex_set][1], _pbo,
- _src_width / 2, _src_height / 2, 1, r_line_size[1],
- GL_LUMINANCE, GL_UNSIGNED_BYTE, r_data[1]);
- upload_texture(_yuv420p_v_tex[tex_set][1], _pbo,
- _src_width / 2, _src_height / 2, 1, r_line_size[2],
- GL_LUMINANCE, GL_UNSIGNED_BYTE, r_data[2]);
+ w = _src_width;
+ h = _src_height;
+ tex = _yuv420p_y_tex[tex_set][view];
}
- }
- else if (_src_format == decoder::frame_format_bgra32)
- {
- upload_texture(_bgra32_tex[tex_set][0], _pbo,
- _src_width, _src_height, 4, l_line_size[0],
- GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, l_data[0]);
- if (!_src_is_mono)
+ else
{
- upload_texture(_bgra32_tex[tex_set][1], _pbo,
- _src_width, _src_height, 4, r_line_size[0],
- GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, r_data[0]);
+ w = _src_width / 2;
+ h = _src_height / 2;
+ tex = (plane == 1 ? _yuv420p_u_tex[tex_set][view] : _yuv420p_v_tex[tex_set][view]);
}
+ format = GL_LUMINANCE;
+ type = GL_UNSIGNED_BYTE;
}
+ else
+ {
+ w = _src_width;
+ h = _src_height;
+ format = GL_BGRA;
+ type = GL_UNSIGNED_INT_8_8_8_8_REV;
+ tex = _bgra32_tex[tex_set][view];
+ }
+
+ glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
+ glPixelStorei(GL_UNPACK_ROW_LENGTH, w);
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
+ glBindTexture(GL_TEXTURE_2D, tex);
+ glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, format, type, NULL);
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
+
_have_valid_data[tex_set] = true;
}
View
6 src/video_output_opengl.h
@@ -149,9 +149,9 @@ class video_output_opengl : public video_output
return _state;
}
- virtual void prepare(
- uint8_t *l_data[3], size_t l_line_size[3],
- uint8_t *r_data[3], size_t r_line_size[3]);
+ virtual void *prepare_start(int view, int plane);
+ virtual void prepare_finish(int view, int plane);
+
virtual void activate() = 0;
virtual void process_events() = 0;
Please sign in to comment.
Something went wrong with that request. Please try again.