From 3574e3bd60a3e4b37a8bb0e03160c0482322a01f Mon Sep 17 00:00:00 2001 From: WR Updater Bot Date: Sun, 2 Dec 2018 00:35:50 +0000 Subject: [PATCH] Bug 1511409 - Update webrender to commit dbaa10971f08f964120ba339f5b0ab3e7ace77d6 (WR PR #3374). r=kats https://github.com/servo/webrender/pull/3374 Differential Revision: https://phabricator.services.mozilla.com/D13626 --- gfx/webrender_bindings/revision.txt | 2 +- gfx/wr/Cargo.lock | 1 + gfx/wr/appveyor.yml | 4 +- gfx/wr/webrender/Cargo.toml | 1 + gfx/wr/webrender/src/batch.rs | 3 +- gfx/wr/webrender/src/lib.rs | 2 + gfx/wr/webrender/src/render_task.rs | 8 +- gfx/wr/webrender/src/texture_allocator.rs | 327 +++++++++++----------- gfx/wr/webrender/src/tiling.rs | 210 +++++--------- 9 files changed, 261 insertions(+), 297 deletions(-) diff --git a/gfx/webrender_bindings/revision.txt b/gfx/webrender_bindings/revision.txt index 808229b649ab9..e67fee83e0851 100644 --- a/gfx/webrender_bindings/revision.txt +++ b/gfx/webrender_bindings/revision.txt @@ -1 +1 @@ -5b26863178f8533eeba2de28c6bdc019ba9ed3e8 +dbaa10971f08f964120ba339f5b0ab3e7ace77d6 diff --git a/gfx/wr/Cargo.lock b/gfx/wr/Cargo.lock index 8a41d5153726c..e1fd8437c7a59 100644 --- a/gfx/wr/Cargo.lock +++ b/gfx/wr/Cargo.lock @@ -1556,6 +1556,7 @@ dependencies = [ "pathfinder_path_utils 0.2.0 (git+https://github.com/pcwalton/pathfinder?branch=webrender)", "plane-split 0.13.3 (registry+https://github.com/rust-lang/crates.io-index)", "png 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "ron 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/gfx/wr/appveyor.yml b/gfx/wr/appveyor.yml index bd367cf9da982..7049611c2b735 100644 --- a/gfx/wr/appveyor.yml +++ b/gfx/wr/appveyor.yml @@ -8,8 +8,8 @@ environment: TARGET: x86_64-pc-windows-msvc install: - - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-1.27.0-${env:TARGET}.msi" - - msiexec /passive /i "rust-1.27.0-%TARGET%.msi" ADDLOCAL=Rustc,Cargo,Std INSTALLDIR=C:\Rust + - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-1.30.0-${env:TARGET}.msi" + - msiexec /passive /i "rust-1.30.0-%TARGET%.msi" ADDLOCAL=Rustc,Cargo,Std INSTALLDIR=C:\Rust - rustc -V - cargo -V diff --git a/gfx/wr/webrender/Cargo.toml b/gfx/wr/webrender/Cargo.toml index d2d7020a32462..0c313f719b435 100644 --- a/gfx/wr/webrender/Cargo.toml +++ b/gfx/wr/webrender/Cargo.toml @@ -68,6 +68,7 @@ optional = true [dev-dependencies] mozangle = "0.1" +rand = "0.4" [target.'cfg(any(target_os = "android", all(unix, not(target_os = "macos"))))'.dependencies] freetype = { version = "0.4", default-features = false } diff --git a/gfx/wr/webrender/src/batch.rs b/gfx/wr/webrender/src/batch.rs index c6923ca880976..c201914296dba 100644 --- a/gfx/wr/webrender/src/batch.rs +++ b/gfx/wr/webrender/src/batch.rs @@ -151,7 +151,8 @@ impl AlphaBatchList { ) -> &mut Vec { if z_id != self.current_z_id || self.current_batch_index == usize::MAX || - !self.batches[self.current_batch_index].key.is_compatible_with(&key) { + !self.batches[self.current_batch_index].key.is_compatible_with(&key) + { let mut selected_batch_index = None; match key.blend_mode { diff --git a/gfx/wr/webrender/src/lib.rs b/gfx/wr/webrender/src/lib.rs index 9bd30b1df2d30..9ff5b6a7d16a7 100644 --- a/gfx/wr/webrender/src/lib.rs +++ b/gfx/wr/webrender/src/lib.rs @@ -190,6 +190,8 @@ extern crate image as image_loader; extern crate base64; #[cfg(all(feature = "capture", feature = "png"))] extern crate png; +#[cfg(test)] +extern crate rand; pub extern crate webrender_api; diff --git a/gfx/wr/webrender/src/render_task.rs b/gfx/wr/webrender/src/render_task.rs index 5898702f77de8..28a8eaca9092d 100644 --- a/gfx/wr/webrender/src/render_task.rs +++ b/gfx/wr/webrender/src/render_task.rs @@ -144,8 +144,12 @@ impl RenderTaskTree { pass_index }; - let pass = &mut passes[pass_index]; - pass.add_render_task(id, task.get_dynamic_size(), task.target_kind(), &task.location); + passes[pass_index].add_render_task( + id, + task.get_dynamic_size(), + task.target_kind(), + &task.location, + ); } pub fn prepare_for_render(&mut self) { diff --git a/gfx/wr/webrender/src/texture_allocator.rs b/gfx/wr/webrender/src/texture_allocator.rs index c2a5f631e1e01..1d662f2bbe815 100644 --- a/gfx/wr/webrender/src/texture_allocator.rs +++ b/gfx/wr/webrender/src/texture_allocator.rs @@ -3,16 +3,49 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use api::{DeviceIntPoint, DeviceIntRect, DeviceIntSize}; -use std::slice::Iter; use util; -/// The minimum number of pixels on each side that we require for rects to be classified as -/// "medium" within the free list. -const MINIMUM_MEDIUM_RECT_SIZE: i32 = 16; +//TODO: gather real-world statistics on the bin usage in order to assist the decision +// on where to place the size thresholds. + +/// This is an optimization tweak to enable looking through all the free rectangles in a bin +/// and choosing the smallest, as opposed to picking the first match. +const FIND_SMALLEST_AREA: bool = false; +const NUM_BINS: usize = 3; /// The minimum number of pixels on each side that we require for rects to be classified as -/// "large" within the free list. -const MINIMUM_LARGE_RECT_SIZE: i32 = 32; +/// particular bin of freelists. +const MIN_RECT_AXIS_SIZES: [i32; NUM_BINS] = [1, 16, 32]; + +#[derive(Debug, Clone, Copy, PartialEq, PartialOrd)] +struct FreeListBin(u8); + +#[derive(Debug, Clone, Copy)] +struct FreeListIndex(usize); + +impl FreeListBin { + fn for_size(size: &DeviceIntSize) -> Self { + MIN_RECT_AXIS_SIZES + .iter() + .enumerate() + .rev() + .find(|(_, &min_size)| min_size <= size.width && min_size <= size.height) + .map(|(id, _)| FreeListBin(id as u8)) + .expect("Unable to find a bin!") + } +} + +#[derive(Debug, Clone, Copy)] +#[cfg_attr(feature = "capture", derive(Serialize))] +#[cfg_attr(feature = "replay", derive(Deserialize))] +pub struct FreeRectSlice(pub u32); + +#[cfg_attr(feature = "capture", derive(Serialize))] +#[cfg_attr(feature = "replay", derive(Deserialize))] +pub struct FreeRect { + slice: FreeRectSlice, + rect: DeviceIntRect, +} /// A texture allocator using the guillotine algorithm with the rectangle merge improvement. See /// sections 2.2 and 2.2.5 in "A Thousand Ways to Pack the Bin - A Practical Approach to Two- @@ -22,44 +55,32 @@ const MINIMUM_LARGE_RECT_SIZE: i32 = 32; /// /// This approach was chosen because of its simplicity, good performance, and easy support for /// dynamic texture deallocation. +/// +/// Note: the allocations are spread across multiple textures, and also are binned +/// orthogonally in order to speed up the search. #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] -pub struct GuillotineAllocator { - texture_size: DeviceIntSize, - free_list: FreeRectList, - allocations: u32, - dirty: bool, +pub struct ArrayAllocationTracker { + bins: [Vec; NUM_BINS], } -impl GuillotineAllocator { - pub fn new(texture_size: DeviceIntSize) -> GuillotineAllocator { - let mut page = GuillotineAllocator { - texture_size, - free_list: FreeRectList::new(), - allocations: 0, - dirty: false, - }; - page.clear(); - page - } - - fn find_index_of_best_rect_in_bin( - &self, - bin: FreeListBin, - requested_dimensions: &DeviceIntSize, - ) -> Option { - let mut smallest_index_and_area = None; - for (candidate_index, candidate_rect) in self.free_list.iter(bin).enumerate() { - if !requested_dimensions.fits_inside(&candidate_rect.size) { - continue; - } - - let candidate_area = candidate_rect.size.width * candidate_rect.size.height; - smallest_index_and_area = Some((candidate_index, candidate_area)); - break; +impl ArrayAllocationTracker { + pub fn new() -> Self { + ArrayAllocationTracker { + bins: [ + Vec::new(), + Vec::new(), + Vec::new(), + ], } + } - smallest_index_and_area.map(|(index, _)| FreeListIndex(bin, index)) + fn push(&mut self, slice: FreeRectSlice, rect: DeviceIntRect) { + let id = FreeListBin::for_size(&rect.size).0 as usize; + self.bins[id].push(FreeRect { + slice, + rect, + }) } /// Find a suitable rect in the free list. We choose the smallest such rect @@ -67,66 +88,70 @@ impl GuillotineAllocator { fn find_index_of_best_rect( &self, requested_dimensions: &DeviceIntSize, - ) -> Option { - let bin = FreeListBin::for_size(requested_dimensions); - for &target_bin in &[FreeListBin::Small, FreeListBin::Medium, FreeListBin::Large] { - if bin <= target_bin { - if let Some(index) = - self.find_index_of_best_rect_in_bin(target_bin, requested_dimensions) - { - return Some(index); + ) -> Option<(FreeListBin, FreeListIndex)> { + let start_bin = FreeListBin::for_size(requested_dimensions); + (start_bin.0 .. NUM_BINS as u8) + .find_map(|id| if FIND_SMALLEST_AREA { + let mut smallest_index_and_area = None; + for (candidate_index, candidate) in self.bins[id as usize].iter().enumerate() { + if requested_dimensions.width > candidate.rect.size.width || + requested_dimensions.height > candidate.rect.size.height + { + continue; + } + + let candidate_area = candidate.rect.size.area(); + match smallest_index_and_area { + Some((_, area)) if candidate_area >= area => continue, + _ => smallest_index_and_area = Some((candidate_index, candidate_area)), + } } - } - } - None + + smallest_index_and_area + .map(|(index, _)| (FreeListBin(id), FreeListIndex(index))) + } else { + self.bins[id as usize] + .iter() + .position(|candidate| { + requested_dimensions.width <= candidate.rect.size.width && + requested_dimensions.height <= candidate.rect.size.height + }) + .map(|index| (FreeListBin(id), FreeListIndex(index))) + }) } - pub fn allocate(&mut self, requested_dimensions: &DeviceIntSize) -> Option { - if requested_dimensions.width == 0 || requested_dimensions.height == 0 { - return Some(DeviceIntPoint::new(0, 0)); - } - let index = match self.find_index_of_best_rect(requested_dimensions) { - None => return None, - Some(index) => index, - }; - - // Remove the rect from the free list and decide how to guillotine it. We choose the split - // that results in the single largest area (Min Area Split Rule, MINAS). - let chosen_rect = self.free_list.remove(index); + // Split that results in the single largest area (Min Area Split Rule, MINAS). + fn split_guillotine(&mut self, chosen: &FreeRect, requested_dimensions: &DeviceIntSize) { let candidate_free_rect_to_right = DeviceIntRect::new( DeviceIntPoint::new( - chosen_rect.origin.x + requested_dimensions.width, - chosen_rect.origin.y, + chosen.rect.origin.x + requested_dimensions.width, + chosen.rect.origin.y, ), DeviceIntSize::new( - chosen_rect.size.width - requested_dimensions.width, + chosen.rect.size.width - requested_dimensions.width, requested_dimensions.height, ), ); let candidate_free_rect_to_bottom = DeviceIntRect::new( DeviceIntPoint::new( - chosen_rect.origin.x, - chosen_rect.origin.y + requested_dimensions.height, + chosen.rect.origin.x, + chosen.rect.origin.y + requested_dimensions.height, ), DeviceIntSize::new( requested_dimensions.width, - chosen_rect.size.height - requested_dimensions.height, + chosen.rect.size.height - requested_dimensions.height, ), ); - let candidate_free_rect_to_right_area = - candidate_free_rect_to_right.size.width * candidate_free_rect_to_right.size.height; - let candidate_free_rect_to_bottom_area = - candidate_free_rect_to_bottom.size.width * candidate_free_rect_to_bottom.size.height; // Guillotine the rectangle. let new_free_rect_to_right; let new_free_rect_to_bottom; - if candidate_free_rect_to_right_area > candidate_free_rect_to_bottom_area { + if candidate_free_rect_to_right.size.area() > candidate_free_rect_to_bottom.size.area() { new_free_rect_to_right = DeviceIntRect::new( candidate_free_rect_to_right.origin, DeviceIntSize::new( candidate_free_rect_to_right.size.width, - chosen_rect.size.height, + chosen.rect.size.height, ), ); new_free_rect_to_bottom = candidate_free_rect_to_bottom @@ -135,115 +160,105 @@ impl GuillotineAllocator { new_free_rect_to_bottom = DeviceIntRect::new( candidate_free_rect_to_bottom.origin, DeviceIntSize::new( - chosen_rect.size.width, + chosen.rect.size.width, candidate_free_rect_to_bottom.size.height, ), ) } - // Add the guillotined rects back to the free list. If any changes were made, we're now - // dirty since coalescing might be able to defragment. + // Add the guillotined rects back to the free list. if !util::rect_is_empty(&new_free_rect_to_right) { - self.free_list.push(&new_free_rect_to_right); - self.dirty = true + self.push(chosen.slice, new_free_rect_to_right); } if !util::rect_is_empty(&new_free_rect_to_bottom) { - self.free_list.push(&new_free_rect_to_bottom); - self.dirty = true + self.push(chosen.slice, new_free_rect_to_bottom); + } + } + + pub fn allocate( + &mut self, requested_dimensions: &DeviceIntSize + ) -> Option<(FreeRectSlice, DeviceIntPoint)> { + if requested_dimensions.width == 0 || requested_dimensions.height == 0 { + return Some((FreeRectSlice(0), DeviceIntPoint::new(0, 0))); } + let (bin, index) = self.find_index_of_best_rect(requested_dimensions)?; - // Bump the allocation counter. - self.allocations += 1; + // Remove the rect from the free list and decide how to guillotine it. + let chosen = self.bins[bin.0 as usize].swap_remove(index.0); + self.split_guillotine(&chosen, requested_dimensions); // Return the result. - Some(chosen_rect.origin) + Some((chosen.slice, chosen.rect.origin)) } - fn clear(&mut self) { - self.free_list = FreeRectList::new(); - self.free_list.push(&DeviceIntRect::new( - DeviceIntPoint::zero(), - self.texture_size, - )); - self.allocations = 0; - self.dirty = false; + /// Add a new slice to the allocator, and immediately allocate a rect from it. + pub fn extend( + &mut self, + slice: FreeRectSlice, + total_size: DeviceIntSize, + requested_dimensions: DeviceIntSize, + ) { + self.split_guillotine( + &FreeRect { slice, rect: total_size.into() }, + &requested_dimensions + ); } } -/// A binning free list. Binning is important to avoid sifting through lots of small strips when -/// allocating many texture items. -#[cfg_attr(feature = "capture", derive(Serialize))] -#[cfg_attr(feature = "replay", derive(Deserialize))] -struct FreeRectList { - small: Vec, - medium: Vec, - large: Vec, -} +#[cfg(test)] +fn random_fill(count: usize, texture_size: i32) -> f32 { + use rand::{thread_rng, Rng}; -impl FreeRectList { - fn new() -> Self { - FreeRectList { - small: vec![], - medium: vec![], - large: vec![], - } - } - - fn push(&mut self, rect: &DeviceIntRect) { - match FreeListBin::for_size(&rect.size) { - FreeListBin::Small => self.small.push(*rect), - FreeListBin::Medium => self.medium.push(*rect), - FreeListBin::Large => self.large.push(*rect), - } - } + let total_rect = DeviceIntRect::new( + DeviceIntPoint::zero(), + DeviceIntSize::new(texture_size, texture_size), + ); + let mut rng = thread_rng(); + let mut allocator = ArrayAllocationTracker::new(); + let mut slices: Vec> = Vec::new(); + let mut requested_area = 0f32; + // fill up the allocator + for _ in 0 .. count { + let size = DeviceIntSize::new( + rng.gen_range(1, texture_size), + rng.gen_range(1, texture_size), + ); + requested_area += size.area() as f32; - fn remove(&mut self, index: FreeListIndex) -> DeviceIntRect { - match index.0 { - FreeListBin::Small => self.small.swap_remove(index.1), - FreeListBin::Medium => self.medium.swap_remove(index.1), - FreeListBin::Large => self.large.swap_remove(index.1), + match allocator.allocate(&size) { + Some((slice, origin)) => { + let rect = DeviceIntRect::new(origin, size); + assert_eq!(None, slices[slice.0 as usize].iter().find(|r| r.intersects(&rect))); + assert!(total_rect.contains_rect(&rect)); + slices[slice.0 as usize].push(rect); + } + None => { + allocator.extend(FreeRectSlice(slices.len() as u32), total_rect.size, size); + let rect = DeviceIntRect::new(DeviceIntPoint::zero(), size); + slices.push(vec![rect]); + } } } - - fn iter(&self, bin: FreeListBin) -> Iter { - match bin { - FreeListBin::Small => self.small.iter(), - FreeListBin::Medium => self.medium.iter(), - FreeListBin::Large => self.large.iter(), + // validate the free rects + for (i, free_vecs) in allocator.bins.iter().enumerate() { + for fr in free_vecs { + assert_eq!(FreeListBin(i as u8), FreeListBin::for_size(&fr.rect.size)); + assert_eq!(None, slices[fr.slice.0 as usize].iter().find(|r| r.intersects(&fr.rect))); + assert!(total_rect.contains_rect(&fr.rect)); + slices[fr.slice.0 as usize].push(fr.rect); } } -} - -#[derive(Debug, Clone, Copy)] -struct FreeListIndex(FreeListBin, usize); -#[derive(Debug, Clone, Copy, PartialEq, PartialOrd)] -enum FreeListBin { - Small, - Medium, - Large, + let allocated_area = slices.len() as f32 * (texture_size * texture_size) as f32; + requested_area / allocated_area } -impl FreeListBin { - fn for_size(size: &DeviceIntSize) -> FreeListBin { - if size.width >= MINIMUM_LARGE_RECT_SIZE && size.height >= MINIMUM_LARGE_RECT_SIZE { - FreeListBin::Large - } else if size.width >= MINIMUM_MEDIUM_RECT_SIZE && size.height >= MINIMUM_MEDIUM_RECT_SIZE - { - FreeListBin::Medium - } else { - debug_assert!(size.width > 0 && size.height > 0); - FreeListBin::Small - } - } +#[test] +fn test_small() { + random_fill(100, 100); } -trait FitsInside { - fn fits_inside(&self, other: &Self) -> bool; -} - -impl FitsInside for DeviceIntSize { - fn fits_inside(&self, other: &DeviceIntSize) -> bool { - self.width <= other.width && self.height <= other.height - } +#[test] +fn test_large() { + random_fill(1000, 10000); } diff --git a/gfx/wr/webrender/src/tiling.rs b/gfx/wr/webrender/src/tiling.rs index f34f86b9c38a7..b480f4724fbe2 100644 --- a/gfx/wr/webrender/src/tiling.rs +++ b/gfx/wr/webrender/src/tiling.rs @@ -25,7 +25,7 @@ use render_task::{BlitSource, RenderTaskAddress, RenderTaskId, RenderTaskKind, T use render_task::{BlurTask, ClearMode, GlyphTask, RenderTaskLocation, RenderTaskTree, ScalingTask}; use resource_cache::ResourceCache; use std::{cmp, usize, f32, i32, mem}; -use texture_allocator::GuillotineAllocator; +use texture_allocator::{ArrayAllocationTracker, FreeRectSlice}; #[cfg(feature = "pathfinder")] use webrender_api::{DevicePixel, FontRenderMode}; @@ -37,6 +37,9 @@ const STYLE_MASK: i32 = 0x00FF_FF00; /// we try to avoid it. This can go away when proper tiling support lands, /// since we can then split large primitives across multiple textures. const IDEAL_MAX_TEXTURE_DIMENSION: i32 = 2048; +/// If we ever need a larger texture than the ideal, we better round it up to a +/// reasonable number in order to have a bit of leeway in placing things inside. +const TEXTURE_DIMENSION_MASK: i32 = 0xFF; /// Identifies a given `RenderTarget` in a `RenderTargetList`. #[derive(Debug, Copy, Clone)] @@ -55,46 +58,6 @@ pub struct RenderTargetContext<'a, 'rc> { pub scratch: &'a PrimitiveScratchBuffer, } -#[cfg_attr(feature = "capture", derive(Serialize))] -#[cfg_attr(feature = "replay", derive(Deserialize))] -struct TextureAllocator { - // TODO(gw): Replace this with a simpler allocator for - // render target allocation - this use case doesn't need - // to deal with coalescing etc that the general texture - // cache allocator requires. - allocator: GuillotineAllocator, - - // Track the used rect of the render target, so that - // we can set a scissor rect and only clear to the - // used portion of the target as an optimization. - used_rect: DeviceIntRect, -} - -impl TextureAllocator { - fn new(size: DeviceIntSize) -> Self { - TextureAllocator { - allocator: GuillotineAllocator::new(size), - used_rect: DeviceIntRect::zero(), - } - } - - fn allocate(&mut self, size: &DeviceIntSize) -> Option { - let origin = self.allocator.allocate(size); - - if let Some(origin) = origin { - // TODO(gw): We need to make all the device rects - // be consistent in the use of the - // DeviceIntRect and DeviceIntRect types! - let origin = DeviceIntPoint::new(origin.x as i32, origin.y as i32); - let size = DeviceIntSize::new(size.width as i32, size.height as i32); - let rect = DeviceIntRect::new(origin, size); - self.used_rect = rect.union(&self.used_rect); - } - - origin - } -} - /// Represents a number of rendering operations on a surface. /// /// In graphics parlance, a "render target" usually means "a surface (texture or @@ -110,17 +73,7 @@ impl TextureAllocator { /// and sometimes on its parameters. See `RenderTask::target_kind`. pub trait RenderTarget { /// Creates a new RenderTarget of the given type. - fn new( - size: Option, - screen_size: DeviceIntSize, - ) -> Self; - - /// Allocates a region of the given size in this target, and returns either - /// the offset of that region or `None` if it won't fit. - /// - /// If a non-`None` result is returned, that value is generally stored in - /// a task which is then added to this target via `add_task()`. - fn allocate(&mut self, size: DeviceIntSize) -> Option; + fn new(screen_size: DeviceIntSize) -> Self; /// Optional hook to provide additional processing for the target at the /// end of the build phase. @@ -155,8 +108,11 @@ pub trait RenderTarget { transforms: &mut TransformPalette, deferred_resolves: &mut Vec, ); - fn used_rect(&self) -> DeviceIntRect; + fn needs_depth(&self) -> bool; + + fn used_rect(&self) -> DeviceIntRect; + fn add_used(&mut self, rect: DeviceIntRect); } /// A tag used to identify the output format of a `RenderTarget`. @@ -208,6 +164,7 @@ pub struct RenderTargetList { pub max_dynamic_size: DeviceIntSize, pub targets: Vec, pub saved_index: Option, + pub alloc_tracker: ArrayAllocationTracker, } impl RenderTargetList { @@ -221,6 +178,7 @@ impl RenderTargetList { max_dynamic_size: DeviceIntSize::new(0, 0), targets: Vec::new(), saved_index: None, + alloc_tracker: ArrayAllocationTracker::new(), } } @@ -251,56 +209,44 @@ impl RenderTargetList { } } - fn add_task( - &mut self, - task_id: RenderTaskId, - ctx: &RenderTargetContext, - gpu_cache: &mut GpuCache, - render_tasks: &mut RenderTaskTree, - clip_store: &ClipStore, - transforms: &mut TransformPalette, - deferred_resolves: &mut Vec, - ) { - self.targets.last_mut().unwrap().add_task( - task_id, - ctx, - gpu_cache, - render_tasks, - clip_store, - transforms, - deferred_resolves, - ); - } - fn allocate( &mut self, alloc_size: DeviceIntSize, - ) -> (DeviceIntPoint, RenderTargetIndex) { - let existing_origin = self.targets - .last_mut() - .and_then(|target| target.allocate(alloc_size)); - - let origin = match existing_origin { - Some(origin) => origin, + ) -> (RenderTargetIndex, DeviceIntPoint) { + let (free_rect_slice, origin) = match self.alloc_tracker.allocate(&alloc_size) { + Some(allocation) => allocation, None => { // Have the allocator restrict slice sizes to our max ideal // dimensions, unless we've already gone bigger on a previous // slice. + let rounded_dimensions = DeviceIntSize::new( + (self.max_dynamic_size.width + TEXTURE_DIMENSION_MASK) & !TEXTURE_DIMENSION_MASK, + (self.max_dynamic_size.height + TEXTURE_DIMENSION_MASK) & !TEXTURE_DIMENSION_MASK, + ); let allocator_dimensions = DeviceIntSize::new( - cmp::max(IDEAL_MAX_TEXTURE_DIMENSION, self.max_dynamic_size.width), - cmp::max(IDEAL_MAX_TEXTURE_DIMENSION, self.max_dynamic_size.height), + cmp::max(IDEAL_MAX_TEXTURE_DIMENSION, rounded_dimensions.width), + cmp::max(IDEAL_MAX_TEXTURE_DIMENSION, rounded_dimensions.height), ); - let mut new_target = T::new(Some(allocator_dimensions), self.screen_size); - let origin = new_target.allocate(alloc_size).expect(&format!( - "Each render task must allocate <= size of one target! ({})", - alloc_size - )); - self.targets.push(new_target); - origin + + assert!(alloc_size.width <= allocator_dimensions.width && + alloc_size.height <= allocator_dimensions.height); + let slice = FreeRectSlice(self.targets.len() as u32); + self.targets.push(T::new(self.screen_size)); + + self.alloc_tracker.extend( + slice, + allocator_dimensions, + alloc_size, + ); + + (slice, DeviceIntPoint::zero()) } }; - (origin, RenderTargetIndex(self.targets.len() - 1)) + self.targets[free_rect_slice.0 as usize] + .add_used(DeviceIntRect::new(origin, alloc_size)); + + (RenderTargetIndex(free_rect_slice.0 as usize), origin) } pub fn needs_depth(&self) -> bool { @@ -389,23 +335,16 @@ pub struct ColorRenderTarget { pub outputs: Vec, pub tile_blits: Vec, pub color_clears: Vec, - allocator: Option, alpha_tasks: Vec, screen_size: DeviceIntSize, + // Track the used rect of the render target, so that + // we can set a scissor rect and only clear to the + // used portion of the target as an optimization. + pub used_rect: DeviceIntRect, } impl RenderTarget for ColorRenderTarget { - fn allocate(&mut self, size: DeviceIntSize) -> Option { - self.allocator - .as_mut() - .expect("bug: calling allocate on framebuffer") - .allocate(&size) - } - - fn new( - size: Option, - screen_size: DeviceIntSize, - ) -> Self { + fn new(screen_size: DeviceIntSize) -> Self { ColorRenderTarget { alpha_batch_containers: Vec::new(), vertical_blurs: Vec::new(), @@ -413,12 +352,12 @@ impl RenderTarget for ColorRenderTarget { readbacks: Vec::new(), scalings: Vec::new(), blits: Vec::new(), - allocator: size.map(TextureAllocator::new), outputs: Vec::new(), alpha_tasks: Vec::new(), color_clears: Vec::new(), tile_blits: Vec::new(), screen_size, + used_rect: DeviceIntRect::zero(), } } @@ -600,18 +539,19 @@ impl RenderTarget for ColorRenderTarget { } } - fn used_rect(&self) -> DeviceIntRect { - self.allocator - .as_ref() - .expect("bug: used_rect called on framebuffer") - .used_rect - } - fn needs_depth(&self) -> bool { self.alpha_batch_containers.iter().any(|ab| { !ab.opaque_batches.is_empty() }) } + + fn used_rect(&self) -> DeviceIntRect { + self.used_rect + } + + fn add_used(&mut self, rect: DeviceIntRect) { + self.used_rect = self.used_rect.union(&rect); + } } /// Contains the work (in the form of instance arrays) needed to fill an alpha @@ -627,25 +567,21 @@ pub struct AlphaRenderTarget { pub horizontal_blurs: Vec, pub scalings: Vec, pub zero_clears: Vec, - allocator: TextureAllocator, + // Track the used rect of the render target, so that + // we can set a scissor rect and only clear to the + // used portion of the target as an optimization. + pub used_rect: DeviceIntRect, } impl RenderTarget for AlphaRenderTarget { - fn allocate(&mut self, size: DeviceIntSize) -> Option { - self.allocator.allocate(&size) - } - - fn new( - size: Option, - _: DeviceIntSize, - ) -> Self { + fn new(_screen_size: DeviceIntSize) -> Self { AlphaRenderTarget { clip_batcher: ClipBatcher::new(), vertical_blurs: Vec::new(), horizontal_blurs: Vec::new(), scalings: Vec::new(), zero_clears: Vec::new(), - allocator: TextureAllocator::new(size.expect("bug: alpha targets need size")), + used_rect: DeviceIntRect::zero(), } } @@ -728,12 +664,16 @@ impl RenderTarget for AlphaRenderTarget { } } + fn needs_depth(&self) -> bool { + false + } + fn used_rect(&self) -> DeviceIntRect { - self.allocator.used_rect + self.used_rect } - fn needs_depth(&self) -> bool { - false + fn add_used(&mut self, rect: DeviceIntRect) { + self.used_rect = self.used_rect.union(&rect); } } @@ -895,7 +835,7 @@ impl RenderPass { /// Creates a pass for the main framebuffer. There is only one of these, and /// it is always the last pass. pub fn new_main_framebuffer(screen_size: DeviceIntSize) -> Self { - let target = ColorRenderTarget::new(None, screen_size); + let target = ColorRenderTarget::new(screen_size); RenderPass { kind: RenderPassKind::MainFramebuffer(target), tasks: vec![], @@ -1002,26 +942,26 @@ impl RenderPass { // Step through each task, adding to batches as appropriate. for &task_id in &self.tasks { - let (target_kind, texture_target) = { + let (target_kind, texture_target, layer) = { let task = &mut render_tasks[task_id]; let target_kind = task.target_kind(); // Find a target to assign this task to, or create a new // one if required. - let texture_target = match task.location { + let (texture_target, layer) = match task.location { RenderTaskLocation::TextureCache { texture, layer, .. } => { - Some((texture, layer)) + (Some(texture), layer) } RenderTaskLocation::Fixed(..) => { - None + (None, 0) } RenderTaskLocation::Dynamic(ref mut origin, size) => { - let (alloc_origin, target_index) = match target_kind { + let (target_index, alloc_origin) = match target_kind { RenderTargetKind::Color => color.allocate(size), RenderTargetKind::Alpha => alpha.allocate(size), }; *origin = Some((alloc_origin, target_index)); - None + (None, target_index.0) } }; @@ -1038,13 +978,13 @@ impl RenderPass { // information to the GPU cache, if appropriate. task.write_gpu_blocks(gpu_cache); - (target_kind, texture_target) + (target_kind, texture_target, layer) }; match texture_target { Some(texture_target) => { let texture = texture_cache - .entry(texture_target) + .entry((texture_target, layer)) .or_insert( TextureCacheRenderTarget::new(target_kind) ); @@ -1052,7 +992,7 @@ impl RenderPass { } None => { match target_kind { - RenderTargetKind::Color => color.add_task( + RenderTargetKind::Color => color.targets[layer].add_task( task_id, ctx, gpu_cache, @@ -1061,7 +1001,7 @@ impl RenderPass { transforms, deferred_resolves, ), - RenderTargetKind::Alpha => alpha.add_task( + RenderTargetKind::Alpha => alpha.targets[layer].add_task( task_id, ctx, gpu_cache,