Permalink
Browse files

Improve frame format handling.

The frame format is now determined by the input alone. The video output has to
deal with it and cannot choose a different method anymore. Likewise, the input
determines is a video is mono or not; this cannot change.
  • Loading branch information...
1 parent d9ce810 commit 73b2c7aa700d90c520bc37b280d672e89449b5b0 @marlam marlam committed Dec 17, 2010
View
2 src/decoder.h
@@ -84,7 +84,7 @@ class decoder
virtual int video_frame_rate_numerator(int video_stream) const throw () = 0; // frames per second
virtual int video_frame_rate_denominator(int video_stream) const throw () = 0; // frames per second
virtual int64_t video_duration(int video_stream) const throw () = 0; // microseconds
- virtual enum video_frame_format video_preferred_frame_format(int video_stream) const throw () = 0;
+ virtual enum video_frame_format video_frame_format(int video_stream) const throw () = 0;
/* Get information about audio streams. */
virtual int audio_rate(int audio_stream) const throw () = 0; // samples per second
View
6 src/decoder_ffmpeg.cpp
@@ -448,7 +448,7 @@ int64_t decoder_ffmpeg::video_duration(int index) const throw ()
return duration * 1000000 * time_base.num / time_base.den;
}
-enum decoder::video_frame_format decoder_ffmpeg::video_preferred_frame_format(int index) const throw ()
+enum decoder::video_frame_format decoder_ffmpeg::video_frame_format(int index) const throw ()
{
return (_stuff->video_codec_ctxs.at(index)->pix_fmt == PIX_FMT_YUV420P
? decoder::frame_format_yuv420p
@@ -576,7 +576,7 @@ void decoder_ffmpeg::release_video_frame(int video_stream)
av_free_packet(&(_stuff->packets[video_stream]));
}
-void decoder_ffmpeg::get_video_frame(int video_stream, video_frame_format fmt,
+void decoder_ffmpeg::get_video_frame(int video_stream, enum video_frame_format fmt,
uint8_t *data[3], size_t line_size[3])
{
data[0] = NULL;
@@ -587,7 +587,7 @@ void decoder_ffmpeg::get_video_frame(int video_stream, video_frame_format fmt,
line_size[2] = 0;
if (fmt == decoder::frame_format_yuv420p)
{
- if (video_preferred_frame_format(video_stream) == decoder::frame_format_yuv420p)
+ if (video_frame_format(video_stream) == decoder::frame_format_yuv420p)
{
data[0] = _stuff->frames[video_stream]->data[0];
data[1] = _stuff->frames[video_stream]->data[1];
View
4 src/decoder_ffmpeg.h
@@ -61,15 +61,15 @@ class decoder_ffmpeg : public decoder
virtual int video_frame_rate_numerator(int video_stream) const throw ();
virtual int video_frame_rate_denominator(int video_stream) const throw ();
virtual int64_t video_duration(int video_stream) const throw ();
- virtual video_frame_format video_preferred_frame_format(int video_stream) const throw ();
+ virtual enum video_frame_format video_frame_format(int video_stream) const throw ();
virtual int audio_rate(int audio_stream) const throw ();
virtual int audio_channels(int audio_stream) const throw ();
virtual enum audio_sample_format audio_sample_format(int audio_stream) const throw ();
virtual int64_t audio_duration(int video_stream) const throw ();
virtual int64_t read_video_frame(int video_stream);
- virtual void get_video_frame(int video_stream, video_frame_format fmt,
+ virtual void get_video_frame(int video_stream, enum video_frame_format fmt,
uint8_t *data[3], size_t line_size[3]);
virtual void release_video_frame(int video_stream);
View
10 src/input.cpp
@@ -181,10 +181,10 @@ void input::open(std::vector<decoder *> decoders,
{
throw exc("video streams have different frame rates");
}
- if (decoders.at(video1_decoder)->video_preferred_frame_format(video1_stream)
- != decoders.at(video0_decoder)->video_preferred_frame_format(video0_stream))
+ if (decoders.at(video1_decoder)->video_frame_format(video1_stream)
+ != decoders.at(video0_decoder)->video_frame_format(video0_stream))
{
- throw exc("video streams have different preferred frame formats");
+ throw exc("video streams have different frame formats");
}
}
_mode = mode;
@@ -388,7 +388,7 @@ void input::open(std::vector<decoder *> decoders,
}
_video_frame_rate_num = _decoders.at(_video_decoders[0])->video_frame_rate_numerator(_video_streams[0]);
_video_frame_rate_den = _decoders.at(_video_decoders[0])->video_frame_rate_denominator(_video_streams[0]);
- _video_preferred_frame_format = _decoders.at(_video_decoders[0])->video_preferred_frame_format(_video_streams[0]);
+ _video_frame_format = _decoders.at(_video_decoders[0])->video_frame_format(_video_streams[0]);
if (audio_stream != -1)
{
@@ -431,7 +431,7 @@ void input::open(std::vector<decoder *> decoders,
msg::inf("input:");
msg::inf(" video: %dx%d, format %s,",
video_width(), video_height(),
- decoder::video_frame_format_name(video_preferred_frame_format()).c_str());
+ decoder::video_frame_format_name(video_frame_format()).c_str());
msg::inf(" aspect ratio %g:1, %g fps, %g seconds,",
video_aspect_ratio(),
static_cast<float>(video_frame_rate_numerator()) / static_cast<float>(video_frame_rate_denominator()),
View
12 src/input.h
@@ -59,7 +59,8 @@ class input
float _video_aspect_ratio;
int _video_frame_rate_num;
int _video_frame_rate_den;
- enum decoder::video_frame_format _video_preferred_frame_format;
+ enum decoder::video_frame_format _video_frame_format;
+ bool _video_is_mono;
int _audio_rate;
int _audio_channels;
enum decoder::audio_sample_format _audio_sample_format;
@@ -117,9 +118,14 @@ class input
return static_cast<int64_t>(_video_frame_rate_den) * 1000000 / _video_frame_rate_num;
}
- enum decoder::video_frame_format video_preferred_frame_format() const throw ()
+ enum decoder::video_frame_format video_frame_format() const throw ()
{
- return _video_preferred_frame_format;
+ return _video_frame_format;
+ }
+
+ bool video_is_mono() const throw ()
+ {
+ return (_mode == mono);
}
bool has_audio() const throw ()
View
12 src/player.cpp
@@ -154,14 +154,6 @@ void player::create_input(enum input::mode input_mode)
input_mode);
}
-void player::get_input_info(int *w, int *h, float *ar, enum decoder::video_frame_format *fmt)
-{
- *w = _input->video_width();
- *h = _input->video_height();
- *ar = _input->video_aspect_ratio();
- *fmt = _input->video_preferred_frame_format();
-}
-
void player::create_audio_output()
{
if (_input->has_audio() && !_benchmark)
@@ -194,7 +186,7 @@ void player::open_video_output(enum video_output::mode video_mode, unsigned int
}
}
_video_output->open(
- _input->video_preferred_frame_format(),
+ _input->video_frame_format(), _input->video_is_mono(),
_input->video_width(), _input->video_height(), _input->video_aspect_ratio(),
video_mode, _video_state, video_flags, -1, -1);
_video_output->process_events();
@@ -526,7 +518,7 @@ void player::run()
}
if (prep_frame)
{
- get_video_frame(_video_output->frame_format());
+ get_video_frame(_input->video_frame_format());
prepare_video_frame(_video_output);
release_video_frame();
}
View
1 src/player.h
@@ -103,7 +103,6 @@ class player
void reset_playstate();
void create_decoders(const std::vector<std::string> &filenames);
void create_input(enum input::mode input_mode);
- void get_input_info(int *w, int *h, float *ar, enum decoder::video_frame_format *fmt);
void create_audio_output();
void create_video_output();
void set_video_output(video_output *vo)
View
49 src/player_equalizer.cpp
@@ -65,7 +65,6 @@ class player_eq_node : public player
{
private:
bool _is_master;
- enum decoder::video_frame_format _fmt;
public:
player_eq_node() : player(player::slave), _is_master(false)
@@ -80,16 +79,19 @@ class player_eq_node : public player
bool eq_init(const player_init_data &init_data,
int *src_width, int *src_height, float *src_aspect_ratio,
- enum decoder::video_frame_format *src_preferred_frame_format)
+ enum decoder::video_frame_format *src_frame_format, bool *src_is_mono)
{
try
{
set_benchmark(init_data.benchmark);
reset_playstate();
create_decoders(init_data.filenames);
create_input(init_data.input_mode);
- get_input_info(src_width, src_height, src_aspect_ratio, src_preferred_frame_format);
- _fmt = *src_preferred_frame_format;
+ *src_width = get_input()->video_width();
+ *src_height = get_input()->video_height();
+ *src_aspect_ratio = get_input()->video_aspect_ratio();
+ *src_frame_format = get_input()->video_frame_format();
+ *src_is_mono = get_input()->video_is_mono();
if (_is_master)
{
create_audio_output();
@@ -134,7 +136,7 @@ class player_eq_node : public player
void eq_get_frame()
{
- get_video_frame(_fmt);
+ get_video_frame(get_input()->video_frame_format());
}
void eq_release_frame()
@@ -162,10 +164,10 @@ class video_output_opengl_eq_window : public video_output_opengl
}
void eq_initialize(int src_width, int src_height, float src_aspect_ratio,
- enum decoder::video_frame_format src_preferred_frame_format)
+ enum decoder::video_frame_format src_frame_format, bool src_is_mono)
{
set_mode(mono_left); // to display the right view, we can toggle the swap_eyes flag
- set_source_info(src_width, src_height, src_aspect_ratio, src_preferred_frame_format);
+ set_source_info(src_width, src_height, src_aspect_ratio, src_frame_format, src_is_mono);
initialize();
}
@@ -189,7 +191,8 @@ class video_output_opengl_eq_window : public video_output_opengl
virtual int screen_pos_y() { return 0; }
virtual void receive_notification(const notification &) {}
virtual bool supports_stereo() { return false; }
- virtual void open(enum decoder::video_frame_format, int, int, float, int, const video_output_state&, unsigned int, int, int) {}
+ virtual void open(enum decoder::video_frame_format, bool, int, int, float, int,
+ const video_output_state&, unsigned int, int, int) {}
virtual void activate() {}
virtual void process_events() {}
virtual void close() {}
@@ -359,14 +362,15 @@ class eq_config : public eq::Config
// Source video properties:
int src_width, src_height;
float src_aspect_ratio;
- enum decoder::video_frame_format src_preferred_frame_format;
+ enum decoder::video_frame_format src_frame_format;
+ bool src_is_mono;
public:
eq_config(eq::ServerPtr parent)
: eq::Config(parent), _is_master_config(false), _eq_init_data(), _eq_frame_data(),
_player(), _controller(false),
src_width(-1), src_height(-1), src_aspect_ratio(0.0f),
- src_preferred_frame_format(decoder::frame_format_yuv420p)
+ src_frame_format(decoder::frame_format_yuv420p), src_is_mono(true)
{
}
@@ -387,7 +391,8 @@ class eq_config : public eq::Config
_eq_frame_data.video_state = _eq_init_data.init_data.video_state;
// Initialize master player
_player.eq_make_master();
- if (!_player.eq_init(init_data, &src_width, &src_height, &src_aspect_ratio, &src_preferred_frame_format))
+ if (!_player.eq_init(init_data, &src_width, &src_height, &src_aspect_ratio,
+ &src_frame_format, &src_is_mono))
{
return false;
}
@@ -636,13 +641,14 @@ class eq_node : public eq::Node
eq_frame_data frame_data;
int src_width, src_height;
float src_aspect_ratio;
- enum decoder::video_frame_format src_preferred_frame_format;
+ enum decoder::video_frame_format src_frame_format;
+ bool src_is_mono;
eq_node(eq::Config *parent)
: eq::Node(parent), _is_app_node(false),
_player(), init_data(), frame_data(),
src_width(-1), src_height(-1), src_aspect_ratio(-1.0f),
- src_preferred_frame_format(decoder::frame_format_yuv420p)
+ src_frame_format(decoder::frame_format_yuv420p), src_is_mono(true)
{
}
@@ -675,7 +681,8 @@ class eq_node : public eq::Node
// Create decoders and input
if (!_is_app_node)
{
- if (!_player.eq_init(init_data.init_data, &src_width, &src_height, &src_aspect_ratio, &src_preferred_frame_format))
+ if (!_player.eq_init(init_data.init_data, &src_width, &src_height, &src_aspect_ratio,
+ &src_frame_format, &src_is_mono))
{
return false;
}
@@ -685,7 +692,8 @@ class eq_node : public eq::Node
src_width = config->src_width;
src_height = config->src_height;
src_aspect_ratio = config->src_aspect_ratio;
- src_preferred_frame_format = config->src_preferred_frame_format;
+ src_frame_format = config->src_frame_format;
+ src_is_mono = config->src_is_mono;
}
msg::dbg(HERE);
return true;
@@ -764,13 +772,8 @@ class eq_node : public eq::Node
}
public:
- void prep_frame(video_output *vo, enum decoder::video_frame_format fmt)
+ void prep_frame(video_output *vo)
{
- if (fmt != src_preferred_frame_format)
- {
- msg::err("cannot provide video in requested frame format");
- abort();
- }
if (_is_app_node)
{
eq_config *config = static_cast<eq_config *>(getConfig());
@@ -834,7 +837,7 @@ class eq_window : public eq::Window
eq_node *node = static_cast<eq_node *>(getNode());
_video_output.eq_initialize(node->src_width, node->src_height,
- node->src_aspect_ratio, node->src_preferred_frame_format);
+ node->src_aspect_ratio, node->src_frame_format, node->src_is_mono);
msg::dbg(HERE);
return true;
@@ -857,7 +860,7 @@ class eq_window : public eq::Window
if (node->frame_data.prep_frame)
{
makeCurrent(); // XXX Is this necessary?
- node->prep_frame(&_video_output, _video_output.frame_format());
+ node->prep_frame(&_video_output);
}
if (node->frame_data.display_frame)
{
View
2 src/player_qt.cpp
@@ -102,7 +102,7 @@ bool player_qt_internal::playloop_step()
}
if (prep_frame)
{
- get_video_frame(get_video_output()->frame_format());
+ get_video_frame(get_input()->video_frame_format());
prepare_video_frame(get_video_output());
release_video_frame();
}
View
5 src/video_output.h
@@ -89,17 +89,14 @@ class video_output : public controller
/* Initialize */
virtual void open(
- enum decoder::video_frame_format preferred_format,
+ enum decoder::video_frame_format format, bool mono,
int src_width, int src_height, float src_aspect_ratio,
int mode, const video_output_state &state, unsigned int flags,
int win_width, int win_height) = 0;
/* Get the video mode */
virtual enum mode mode() const = 0;
- /* Get the required video frame format. This can differ from the preferred format! */
- virtual enum decoder::video_frame_format frame_format() const = 0;
-
/* Get current state */
virtual const video_output_state &state() const = 0;
View
53 src/video_output_opengl.cpp
@@ -95,12 +95,13 @@ video_output_opengl::~video_output_opengl()
}
void video_output_opengl::set_source_info(int width, int height, float aspect_ratio,
- enum decoder::video_frame_format preferred_frame_format)
+ enum decoder::video_frame_format format, bool mono)
{
+ _src_format = format;
+ _src_is_mono = mono;
_src_width = width;
_src_height = height;
_src_aspect_ratio = aspect_ratio;
- _src_preferred_frame_format = preferred_frame_format;
}
void video_output_opengl::set_screen_info(int width, int height, float pixel_aspect_ratio)
@@ -186,15 +187,14 @@ void video_output_opengl::initialize()
// Step 1: input of video data
_active_tex_set = 0;
- _input_is_mono = false;
_have_valid_data[0] = false;
_have_valid_data[1] = false;
glGenBuffers(1, &_pbo);
- if (frame_format() == decoder::frame_format_yuv420p)
+ if (_src_format == decoder::frame_format_yuv420p)
{
for (int i = 0; i < 2; i++)
{
- for (int j = 0; j < 2; j++)
+ for (int j = 0; j < (_src_is_mono ? 1 : 2); j++)
{
glGenTextures(1, &(_yuv420p_y_tex[i][j]));
glBindTexture(GL_TEXTURE_2D, _yuv420p_y_tex[i][j]);
@@ -224,7 +224,7 @@ void video_output_opengl::initialize()
{
for (int i = 0; i < 2; i++)
{
- for (int j = 0; j < 2; j++)
+ for (int j = 0; j < (_src_is_mono ? 1 : 2); j++)
{
glGenTextures(1, &(_bgra32_tex[i][j]));
glBindTexture(GL_TEXTURE_2D, _bgra32_tex[i][j]);
@@ -238,15 +238,15 @@ void video_output_opengl::initialize()
}
// Step 2: color-correction
- std::string input_str = (frame_format() == decoder::frame_format_yuv420p
+ std::string input_str = (_src_format == decoder::frame_format_yuv420p
? "input_yuv420p" : "input_bgra32");
std::string color_fs_src = xgl::ShaderSourcePrep(
VIDEO_OUTPUT_OPENGL_COLOR_FS_GLSL_STR,
std::string("$input=") + input_str);
_color_prg = xgl::CreateProgram("video_output_color", "", "", color_fs_src);
xgl::LinkProgram("video_output_color", _color_prg);
glGenFramebuffersEXT(1, &_color_fbo);
- for (int j = 0; j < 2; j++)
+ for (int j = 0; j < (_src_is_mono ? 1 : 2); j++)
{
glGenTextures(1, &(_srgb_tex[j]));
glBindTexture(GL_TEXTURE_2D, _srgb_tex[j]);
@@ -311,19 +311,22 @@ void video_output_opengl::deinitialize()
_have_valid_data[0] = false;
_have_valid_data[1] = false;
glDeleteBuffers(1, &_pbo);
- if (frame_format() == decoder::frame_format_yuv420p)
+ if (_src_format == decoder::frame_format_yuv420p)
{
- glDeleteTextures(2 * 2, reinterpret_cast<GLuint *>(_yuv420p_y_tex));
- glDeleteTextures(2 * 2, reinterpret_cast<GLuint *>(_yuv420p_u_tex));
- glDeleteTextures(2 * 2, reinterpret_cast<GLuint *>(_yuv420p_v_tex));
+ glDeleteTextures(_src_is_mono ? 1 : 2, _yuv420p_y_tex[0]);
+ glDeleteTextures(_src_is_mono ? 1 : 2, _yuv420p_y_tex[1]);
+ glDeleteTextures(_src_is_mono ? 1 : 2, _yuv420p_u_tex[0]);
+ glDeleteTextures(_src_is_mono ? 1 : 2, _yuv420p_u_tex[1]);
+ glDeleteTextures(_src_is_mono ? 1 : 2, _yuv420p_v_tex[0]);
+ glDeleteTextures(_src_is_mono ? 1 : 2, _yuv420p_v_tex[1]);
}
else
{
- glDeleteTextures(2 * 2, reinterpret_cast<GLuint *>(_bgra32_tex));
+ glDeleteTextures(_src_is_mono ? 1 : 2, _bgra32_tex[0]);
}
xgl::DeleteProgram(_color_prg);
glDeleteFramebuffersEXT(1, &_color_fbo);
- glDeleteTextures(2, reinterpret_cast<GLuint *>(_srgb_tex));
+ glDeleteTextures(_src_is_mono ? 1 : 2, reinterpret_cast<GLuint *>(_srgb_tex));
xgl::DeleteProgram(_render_prg);
if (_mode == even_odd_rows || _mode == even_odd_columns || _mode == checkerboard)
{
@@ -332,11 +335,6 @@ void video_output_opengl::deinitialize()
_initialized = false;
}
-enum decoder::video_frame_format video_output_opengl::frame_format() const
-{
- return _src_preferred_frame_format;
-}
-
void video_output_opengl::clear()
{
if (_mode == stereo)
@@ -377,7 +375,7 @@ void video_output_opengl::display(bool toggle_swap_eyes, float x, float y, float
/* Use correct left and right view indices */
int left = 0;
- int right = (_input_is_mono ? 0 : 1);
+ int right = (_src_is_mono ? 0 : 1);
if (_state.swap_eyes)
{
std::swap(left, right);
@@ -414,7 +412,7 @@ void video_output_opengl::display(bool toggle_swap_eyes, float x, float y, float
glLoadIdentity();
glViewport(0, 0, _src_width, _src_height);
glUseProgram(_color_prg);
- if (frame_format() == decoder::frame_format_yuv420p)
+ if (_src_format == decoder::frame_format_yuv420p)
{
glUniform1i(glGetUniformLocation(_color_prg, "y_tex"), 0);
glUniform1i(glGetUniformLocation(_color_prg, "u_tex"), 1);
@@ -431,7 +429,7 @@ void video_output_opengl::display(bool toggle_swap_eyes, float x, float y, float
glUniform1f(glGetUniformLocation(_color_prg, "sin_hue"), std::sin(_state.hue * M_PI));
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, _color_fbo);
// left view: render into _srgb_tex[0]
- if (frame_format() == decoder::frame_format_yuv420p)
+ if (_src_format == decoder::frame_format_yuv420p)
{
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, _yuv420p_y_tex[_active_tex_set][left]);
@@ -451,7 +449,7 @@ void video_output_opengl::display(bool toggle_swap_eyes, float x, float y, float
// right view: render into _srgb_tex[1]
if (left != right)
{
- if (frame_format() == decoder::frame_format_yuv420p)
+ if (_src_format == decoder::frame_format_yuv420p)
{
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, _yuv420p_y_tex[_active_tex_set][right]);
@@ -666,12 +664,11 @@ void video_output_opengl::prepare(
_have_valid_data[tex_set] = false;
return;
}
- _input_is_mono = (l_data[0] == r_data[0] && l_data[1] == r_data[1] && l_data[2] == r_data[2]);
/* Step 1: input of video data */
glActiveTexture(GL_TEXTURE0);
- if (frame_format() == decoder::frame_format_yuv420p)
+ if (_src_format == decoder::frame_format_yuv420p)
{
upload_texture(_yuv420p_y_tex[tex_set][0], _pbo,
_src_width, _src_height, 1, l_line_size[0],
@@ -682,7 +679,7 @@ void video_output_opengl::prepare(
upload_texture(_yuv420p_v_tex[tex_set][0], _pbo,
_src_width / 2, _src_height / 2, 1, l_line_size[2],
GL_LUMINANCE, GL_UNSIGNED_BYTE, l_data[2]);
- if (!_input_is_mono)
+ if (!_src_is_mono)
{
upload_texture(_yuv420p_y_tex[tex_set][1], _pbo,
_src_width, _src_height, 1, r_line_size[0],
@@ -695,12 +692,12 @@ void video_output_opengl::prepare(
GL_LUMINANCE, GL_UNSIGNED_BYTE, r_data[2]);
}
}
- else if (frame_format() == decoder::frame_format_bgra32)
+ else if (_src_format == decoder::frame_format_bgra32)
{
upload_texture(_bgra32_tex[tex_set][0], _pbo,
_src_width, _src_height, 4, l_line_size[0],
GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, l_data[0]);
- if (!_input_is_mono)
+ if (!_src_is_mono)
{
upload_texture(_bgra32_tex[tex_set][1], _pbo,
_src_width, _src_height, 4, r_line_size[0],
View
10 src/video_output_opengl.h
@@ -34,10 +34,11 @@ class video_output_opengl : public video_output
{
private:
// Video properties (fixed during playback)
+ enum decoder::video_frame_format _src_format;
+ bool _src_is_mono;
int _src_width;
int _src_height;
float _src_aspect_ratio;
- enum decoder::video_frame_format _src_preferred_frame_format;
// Screen properties (fixed during playback)
int _screen_width;
int _screen_height;
@@ -53,7 +54,6 @@ class video_output_opengl : public video_output
* for output, and one for preparing the next video frame. Each texture set
* has textures for the left and right view. */
int _active_tex_set; // 0 or 1
- bool _input_is_mono; // left view == right view
bool _have_valid_data[2]; // do we have valid data in the given texture set?
// Step 1: input of video data
GLuint _pbo; // pixel-buffer object for texture uploading
@@ -78,7 +78,7 @@ class video_output_opengl : public video_output
* initialization functions in the order in which they appear here.
* You must make sure that the OpenGL context provides GL 2.1 + FBOs. */
void set_mode(enum video_output::mode mode);
- void set_source_info(int width, int height, float aspect_ratio, enum decoder::video_frame_format preferred_frame_format);
+ void set_source_info(int width, int height, float aspect_ratio, enum decoder::video_frame_format format, bool mono);
void set_screen_info(int width, int height, float pixel_aspect_ratio);
void compute_win_size(int width = -1, int height = -1);
void set_state(const video_output_state &_state);
@@ -132,7 +132,7 @@ class video_output_opengl : public video_output
virtual bool supports_stereo() = 0;
virtual void open(
- enum decoder::video_frame_format preferred_format,
+ enum decoder::video_frame_format format, bool mono,
int src_width, int src_height, float src_aspect_ratio,
int mode, const video_output_state &state, unsigned int flags,
int win_width, int win_height) = 0;
@@ -142,8 +142,6 @@ class video_output_opengl : public video_output
return _mode;
}
- virtual enum decoder::video_frame_format frame_format() const;
-
virtual const video_output_state &state() const
{
return _state;
View
4 src/video_output_opengl_qt.cpp
@@ -289,7 +289,7 @@ bool video_output_opengl_qt::supports_stereo()
}
void video_output_opengl_qt::open(
- enum decoder::video_frame_format preferred_frame_format,
+ enum decoder::video_frame_format src_format, bool src_mono,
int src_width, int src_height, float src_aspect_ratio,
int mode, const video_output_state &state, unsigned int flags,
int win_width, int win_height)
@@ -300,7 +300,7 @@ void video_output_opengl_qt::open(
}
set_mode(static_cast<enum video_output::mode>(mode));
- set_source_info(src_width, src_height, src_aspect_ratio, preferred_frame_format);
+ set_source_info(src_width, src_height, src_aspect_ratio, src_format, src_mono);
int screen_width = QApplication::desktop()->screenGeometry().width();
int screen_height = QApplication::desktop()->screenGeometry().height();
View
2 src/video_output_opengl_qt.h
@@ -111,7 +111,7 @@ class video_output_opengl_qt : public video_output_opengl
virtual bool supports_stereo();
virtual void open(
- enum decoder::video_frame_format preferred_format,
+ enum decoder::video_frame_format src_format, bool src_mono,
int src_width, int src_height, float src_aspect_ratio,
int mode, const video_output_state &state, unsigned int flags,
int win_width, int win_height);

0 comments on commit 73b2c7a

Please sign in to comment.