Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Expire entries in the shared texture cache before allocating another layer #3293

Merged
merged 6 commits into from Nov 9, 2018
Next

Consolidate driver limits and work around mac bug.

This restores non-mac platforms back to the old maximum of 64 layers.

Differential Revision: https://phabricator.services.mozilla.com/D11416
  • Loading branch information
bholley committed Nov 9, 2018
commit 2df20cd363983f9b85c53d767a4247c2f332f0b9
@@ -818,6 +818,7 @@ pub struct Device {
resource_override_path: Option<PathBuf>,

max_texture_size: u32,
max_texture_layers: u32,
renderer_name: String,
cached_programs: Option<Rc<ProgramCache>>,

@@ -903,10 +904,13 @@ impl Device {
cached_programs: Option<Rc<ProgramCache>>,
) -> Device {
let mut max_texture_size = [0];
let mut max_texture_layers = [0];
unsafe {
gl.get_integer_v(gl::MAX_TEXTURE_SIZE, &mut max_texture_size);
gl.get_integer_v(gl::MAX_ARRAY_TEXTURE_LAYERS, &mut max_texture_layers);
}
let max_texture_size = max_texture_size[0] as u32;
let max_texture_layers = max_texture_layers[0] as u32;
let renderer_name = gl.get_string(gl::RENDERER);

let mut extension_count = [0];
@@ -1022,6 +1026,7 @@ impl Device {
depth_available: true,

max_texture_size,
max_texture_layers,
renderer_name,
cached_programs,
frame_id: GpuFrameId(0),
@@ -1046,10 +1051,23 @@ impl Device {
self.cached_programs = Some(cached_programs);
}

/// Ensures that the maximum texture size is less than or equal to the
/// provided value. If the provided value is less than the value supported
/// by the driver, the latter is used.
pub fn clamp_max_texture_size(&mut self, size: u32) {
self.max_texture_size = self.max_texture_size.min(size);
}

/// Returns the limit on texture dimensions (width or height).
pub fn max_texture_size(&self) -> u32 {
self.max_texture_size
}

/// Returns the limit on texture array layers.
pub fn max_texture_layers(&self) -> usize {
self.max_texture_layers as usize
}

#[cfg(feature = "debug_renderer")]
pub fn get_capabilities(&self) -> &Capabilities {
&self.capabilities
@@ -732,7 +732,7 @@ mod test_glyph_rasterizer {
let mut glyph_rasterizer = GlyphRasterizer::new(workers).unwrap();
let mut glyph_cache = GlyphCache::new();
let mut gpu_cache = GpuCache::new();
let mut texture_cache = TextureCache::new(2048);
let mut texture_cache = TextureCache::new(2048, 1024);
let mut render_task_cache = RenderTaskCache::new();
let mut render_task_tree = RenderTaskTree::new(FrameId::invalid());
let mut special_render_passes = SpecialRenderPasses::new(&DeviceIntSize::new(1366, 768));
@@ -786,7 +786,7 @@ mod test_glyph_rasterizer {

glyph_rasterizer.resolve_glyphs(
&mut glyph_cache,
&mut TextureCache::new(4096),
&mut TextureCache::new(4096, 1024),
&mut gpu_cache,
&mut render_task_cache,
&mut render_task_tree,
@@ -1525,7 +1525,6 @@ pub struct Renderer {

pub gpu_glyph_renderer: GpuGlyphRenderer,

max_texture_size: u32,
max_recorded_profiles: usize,

clear_color: Option<ColorF>,
@@ -1669,25 +1668,23 @@ impl Renderer {
device.supports_extension("GL_ARB_blend_func_extended") &&
device.supports_extension("GL_ARB_explicit_attrib_location");

let device_max_size = device.max_texture_size();
// 512 is the minimum that the texture cache can work with.
// Broken GL contexts can return a max texture size of zero (See #1260). Better to
// gracefully fail now than panic as soon as a texture is allocated.
let min_texture_size = 512;
if device_max_size < min_texture_size {
const MIN_TEXTURE_SIZE: u32 = 512;
if let Some(user_limit) = options.max_texture_size {
assert!(user_limit >= MIN_TEXTURE_SIZE);
device.clamp_max_texture_size(user_limit);
}
if device.max_texture_size() < MIN_TEXTURE_SIZE {
// Broken GL contexts can return a max texture size of zero (See #1260).
// Better to gracefully fail now than panic as soon as a texture is allocated.
error!(
"Device reporting insufficient max texture size ({})",
device_max_size
device.max_texture_size()
);
return Err(RendererError::MaxTextureSize);
}
let max_device_size = cmp::max(
cmp::min(
device_max_size,
options.max_texture_size.unwrap_or(device_max_size),
),
min_texture_size,
);
let max_texture_size = device.max_texture_size();
let max_texture_layers = device.max_texture_layers();

register_thread_with_profiler("Compositor".to_owned());

@@ -1936,7 +1933,11 @@ impl Renderer {
thread_listener.thread_started(&rb_thread_name);
}

let texture_cache = TextureCache::new(max_device_size);
let texture_cache = TextureCache::new(
max_texture_size,
max_texture_layers,
);

let resource_cache = ResourceCache::new(
texture_cache,
glyph_rasterizer,
@@ -1992,7 +1993,6 @@ impl Renderer {
new_scene_indicator: ChangeIndicator::new(),
#[cfg(feature = "debug_renderer")]
slow_frame_indicator: ChangeIndicator::new(),
max_texture_size: max_device_size,
max_recorded_profiles: options.max_recorded_profiles,
clear_color: options.clear_color,
enable_clear_scissor: options.enable_clear_scissor,
@@ -2041,7 +2041,7 @@ impl Renderer {
}

pub fn get_max_texture_size(&self) -> u32 {
self.max_texture_size
self.device.max_texture_size()
}

pub fn get_graphics_api_info(&self) -> GraphicsApiInfo {
@@ -2751,7 +2751,7 @@ impl Renderer {
(count + list.blocks.len(), cmp::max(height, list.height))
});

if max_requested_height > self.max_texture_size && !self.gpu_cache_overflow {
if max_requested_height > self.get_max_texture_size() && !self.gpu_cache_overflow {
self.gpu_cache_overflow = true;
self.renderer_errors.push(RendererError::MaxTextureSize);
}
@@ -2037,8 +2037,10 @@ impl ResourceCache {
self.cached_glyph_dimensions.clear();
self.cached_images.clear();
self.cached_render_tasks.clear();
let max_texture_size = self.texture_cache.max_texture_size();
self.texture_cache = TextureCache::new(max_texture_size);
self.texture_cache = TextureCache::new(
self.texture_cache.max_texture_size(),
self.texture_cache.max_texture_layers(),
);
}
}

@@ -202,28 +202,28 @@ struct SharedTextures {

impl SharedTextures {
/// Mints a new set of shared textures.
fn new() -> Self {
fn new(max_texture_layers: usize) -> Self {
Self {
// Used primarily for cached shadow masks. There can be lots of
// these on some pages like francine, but most pages don't use it
// much.
array_a8_linear: TextureArray::new(
ImageFormat::R8,
TextureFilter::Linear,
4,
max_texture_layers.min(4),
),
// Used for experimental hdr yuv texture support, but not used in
// production Firefox.
array_a16_linear: TextureArray::new(
ImageFormat::R16,
TextureFilter::Linear,
4,
max_texture_layers.min(4),
),
// The primary cache for images, glyphs, etc.
array_rgba8_linear: TextureArray::new(
ImageFormat::BGRA8,
TextureFilter::Linear,
32, /* More than 32 layers breaks on mac intel drivers for some reason */
max_texture_layers.min(64),
),
// Used for image-rendering: crisp. This is mostly favicons, which
// are small. Some other images use it too, but those tend to be
@@ -235,7 +235,7 @@ impl SharedTextures {
array_rgba8_nearest: TextureArray::new(
ImageFormat::BGRA8,
TextureFilter::Nearest,
4,
max_texture_layers.min(4),
),
}
}
@@ -286,6 +286,9 @@ pub struct TextureCache {
// Maximum texture size supported by hardware.
max_texture_size: u32,

// Maximum number of texture layers supported by hardware.
max_texture_layers: usize,

// The next unused virtual texture ID. Monotonically increasing.
next_id: CacheTextureId,

@@ -314,10 +317,36 @@ pub struct TextureCache {
}

impl TextureCache {
pub fn new(max_texture_size: u32) -> Self {
pub fn new(max_texture_size: u32, mut max_texture_layers: usize) -> Self {
if cfg!(target_os = "macos") {
// On MBP integrated Intel GPUs, texture arrays appear to be
// implemented as a single texture of stacked layers, and that
// texture appears to be subject to the texture size limit. As such,
// allocating more than 32 512x512 regions results in a dimension
// longer than 16k (the max texture size), causing incorrect behavior.
//
// So we clamp the number of layers on mac. This results in maximum
// texture array size of 32MB, which isn't ideal but isn't terrible
// either. OpenGL on mac is not long for this earth, so this may be
// good enough until we have WebRender on gfx-rs (on Metal).
//
// Note that we could also define this more generally in terms of
// |max_texture_size / TEXTURE_REGION_DIMENSION|, except:
// * max_texture_size is actually clamped beyond the device limit
// by Gecko to 8192, so we'd need to thread the raw device value
// here, and:
// * The bug we're working around is likely specific to a single
// driver family, and those drivers are also likely to share
// the same max texture size of 16k. If we do encounter a driver
// with the same bug but a lower max texture size, we might need
// to rethink our strategy anyway, since a limit below 32MB might
// start to introduce performance issues.
max_texture_layers = max_texture_layers.min(32);
}
TextureCache {
shared_textures: SharedTextures::new(),
shared_textures: SharedTextures::new(max_texture_layers),
max_texture_size,
max_texture_layers,
next_id: CacheTextureId(1),
pending_updates: TextureUpdateList::new(),
frame_id: FrameId::invalid(),
@@ -404,6 +433,11 @@ impl TextureCache {
self.max_texture_size
}

#[allow(dead_code)]
pub fn max_texture_layers(&self) -> usize {
self.max_texture_layers
}

pub fn pending_updates(&mut self) -> TextureUpdateList {
mem::replace(&mut self.pending_updates, TextureUpdateList::new())
}
ProTip! Use n and p to navigate between commits in a pull request.
You can’t perform that action at this time.