From d06351dd65821d75746779bc7ae5131a1d614a37 Mon Sep 17 00:00:00 2001 From: Doug Thayer Date: Wed, 28 Nov 2018 10:56:18 -0800 Subject: [PATCH 1/3] Make TextureCache document-aware This change makes the TextureCache segment its cached data by document, so that documents' data are not evicted out from underneath them. Differential Revision: https://phabricator.services.mozilla.com/D13343 --- webrender/src/frame_builder.rs | 2 +- webrender/src/render_backend.rs | 25 +++++- webrender/src/resource_cache.rs | 4 +- webrender/src/texture_cache.rs | 139 ++++++++++++++++++++++---------- webrender_api/src/api.rs | 4 + 5 files changed, 125 insertions(+), 49 deletions(-) diff --git a/webrender/src/frame_builder.rs b/webrender/src/frame_builder.rs index 3350e13541..718bf8a2ec 100644 --- a/webrender/src/frame_builder.rs +++ b/webrender/src/frame_builder.rs @@ -496,7 +496,7 @@ impl FrameBuilder { render_tasks.write_task_data(device_pixel_scale); - resource_cache.end_frame(); + resource_cache.end_frame(texture_cache_profile); Frame { window_size: self.window_size, diff --git a/webrender/src/render_backend.rs b/webrender/src/render_backend.rs index 73b34d0289..25fece7fc2 100644 --- a/webrender/src/render_backend.rs +++ b/webrender/src/render_backend.rs @@ -148,12 +148,15 @@ impl ::std::ops::Sub for FrameId { pub struct FrameStamp { id: FrameId, time: SystemTime, + document_id: DocumentId, } impl Eq for FrameStamp {} impl PartialEq for FrameStamp { fn eq(&self, other: &Self) -> bool { + // We should not be checking equality unless the documents are the same + debug_assert!(self.document_id == other.document_id); self.id == other.id } } @@ -175,11 +178,24 @@ impl FrameStamp { self.time } + /// Gets the DocumentId in this stamp. + pub fn document_id(&self) -> DocumentId { + self.document_id + } + + pub fn is_valid(&self) -> bool { + // If any fields are their default values, the whole struct should equal INVALID + debug_assert!((self.time != UNIX_EPOCH && self.id != FrameId(0) && self.document_id != DocumentId::INVALID) || + *self == Self::INVALID); + self.document_id != DocumentId::INVALID + } + /// Returns a FrameStamp corresponding to the first frame. - pub fn first() -> Self { + pub fn first(document_id: DocumentId) -> Self { FrameStamp { id: FrameId::first(), time: SystemTime::now(), + document_id: document_id, } } @@ -193,6 +209,7 @@ impl FrameStamp { pub const INVALID: FrameStamp = FrameStamp { id: FrameId(0), time: UNIX_EPOCH, + document_id: DocumentId::INVALID, }; } @@ -332,6 +349,7 @@ struct Document { impl Document { pub fn new( + id: DocumentId, window_size: DeviceIntSize, layer: DocumentLayer, default_device_pixel_ratio: f32, @@ -349,7 +367,7 @@ impl Document { device_pixel_ratio: default_device_pixel_ratio, }, clip_scroll_tree: ClipScrollTree::new(), - stamp: FrameStamp::first(), + stamp: FrameStamp::first(id), frame_builder: None, output_pipelines: FastHashSet::default(), hit_tester: None, @@ -980,6 +998,7 @@ impl RenderBackend { } ApiMsg::AddDocument(document_id, initial_size, layer) => { let document = Document::new( + document_id, initial_size, layer, self.default_device_pixel_ratio, @@ -1743,7 +1762,7 @@ impl RenderBackend { removed_pipelines: Vec::new(), view: view.clone(), clip_scroll_tree: ClipScrollTree::new(), - stamp: FrameStamp::first(), + stamp: FrameStamp::first(id), frame_builder: Some(FrameBuilder::empty()), output_pipelines: FastHashSet::default(), dynamic_properties: SceneProperties::new(), diff --git a/webrender/src/resource_cache.rs b/webrender/src/resource_cache.rs index 010b2d7246..3cb0db61f9 100644 --- a/webrender/src/resource_cache.rs +++ b/webrender/src/resource_cache.rs @@ -1610,7 +1610,6 @@ impl ResourceCache { &mut self.texture_cache, render_tasks, ); - self.texture_cache.end_frame(texture_cache_profile); } fn rasterize_missing_blob_images(&mut self) { @@ -1767,9 +1766,10 @@ impl ResourceCache { } } - pub fn end_frame(&mut self) { + pub fn end_frame(&mut self, texture_cache_profile: &mut TextureCacheProfileCounters) { debug_assert_eq!(self.state, State::QueryResources); self.state = State::Idle; + self.texture_cache.end_frame(texture_cache_profile); } pub fn set_debug_flags(&mut self, flags: DebugFlags) { diff --git a/webrender/src/texture_cache.rs b/webrender/src/texture_cache.rs index 32d094fb7c..c49ace6307 100644 --- a/webrender/src/texture_cache.rs +++ b/webrender/src/texture_cache.rs @@ -2,14 +2,14 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -use api::{DebugFlags, DeviceIntPoint, DeviceIntRect, DeviceIntSize, DirtyRect, ImageDirtyRect}; -use api::{ExternalImageType, ImageFormat}; -use api::ImageDescriptor; +use api::{DebugFlags, DeviceIntPoint, DeviceIntRect, DeviceIntSize}; +use api::{DirtyRect, ImageDirtyRect, DocumentId, ExternalImageType, ImageFormat}; +use api::{IdNamespace, ImageDescriptor}; use device::{TextureFilter, total_gpu_bytes_allocated}; use freelist::{FreeList, FreeListHandle, UpsertResult, WeakFreeListHandle}; use gpu_cache::{GpuCache, GpuCacheHandle}; use gpu_types::{ImageSource, UvRectKind}; -use internal_types::{CacheTextureId, LayerIndex, TextureUpdateList, TextureUpdateSource}; +use internal_types::{CacheTextureId, FastHashMap, LayerIndex, TextureUpdateList, TextureUpdateSource}; use internal_types::{TextureSource, TextureCacheAllocInfo, TextureCacheUpdate}; use profiler::{ResourceProfileCounter, TextureCacheProfileCounters}; use render_backend::{FrameId, FrameStamp}; @@ -282,7 +282,7 @@ impl SharedTextures { /// Lists of strong handles owned by the texture cache. There is only one strong /// handle for each entry, but unlimited weak handles. Consumers receive the weak /// handles, and `TextureCache` owns the strong handles internally. -#[derive(Default)] +#[derive(Default, Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] struct EntryHandles { @@ -410,6 +410,27 @@ impl EvictionThresholdBuilder { } } +#[cfg_attr(feature = "capture", derive(Serialize))] +#[cfg_attr(feature = "replay", derive(Deserialize))] +pub struct PerDocumentData { + /// The last `FrameStamp` in which we expired the shared cache for + /// this document. + last_shared_cache_expiration: FrameStamp, + + /// Strong handles for all entries that this document has allocated + /// from the shared FreeList. + handles: EntryHandles, +} + +impl PerDocumentData { + pub fn new() -> Self { + PerDocumentData { + last_shared_cache_expiration: FrameStamp::INVALID, + handles: EntryHandles::default(), + } + } +} + /// General-purpose manager for images in GPU memory. This includes images, /// rasterized glyphs, rasterized blobs, cached render tasks, etc. /// @@ -453,9 +474,6 @@ pub struct TextureCache { /// The current `FrameStamp`. Used for cache eviction policies. now: FrameStamp, - /// The last `FrameStamp` in which we expired the shared cache. - last_shared_cache_expiration: FrameStamp, - /// The time at which we first reached the byte threshold for reclaiming /// cache memory. `None if we haven't reached the threshold. reached_reclaim_threshold: Option, @@ -463,8 +481,16 @@ pub struct TextureCache { /// Maintains the list of all current items in the texture cache. entries: FreeList, - /// Strong handles for all entries allocated from the above `FreeList`. - handles: EntryHandles, + /// Holds items that need to be maintained on a per-document basis. If we + /// modify this data for a document without also building a frame for that + /// document, then we might end up erroneously evicting items out from + /// under that document. + per_doc_data: FastHashMap, + + /// The current document's data. This is moved out of per_doc_data in + /// begin_frame and moved back in end_frame to solve borrow checker issues. + /// We should try removing this when we require a rustc with NLL. + doc_data: PerDocumentData, } impl TextureCache { @@ -497,16 +523,16 @@ impl TextureCache { TextureCache { shared_textures: SharedTextures::new(), + reached_reclaim_threshold: None, + entries: FreeList::new(), max_texture_size, max_texture_layers, debug_flags: DebugFlags::empty(), next_id: CacheTextureId(1), pending_updates: TextureUpdateList::new(), now: FrameStamp::INVALID, - last_shared_cache_expiration: FrameStamp::INVALID, - reached_reclaim_threshold: None, - entries: FreeList::new(), - handles: EntryHandles::default(), + per_doc_data: FastHashMap::default(), + doc_data: PerDocumentData::new(), } } @@ -516,7 +542,7 @@ impl TextureCache { #[allow(dead_code)] pub fn new_for_testing(max_texture_size: i32, max_texture_layers: usize) -> Self { let mut cache = Self::new(max_texture_size, max_texture_layers); - let mut now = FrameStamp::first(); + let mut now = FrameStamp::first(DocumentId(IdNamespace(1), 1)); now.advance(); cache.begin_frame(now); cache @@ -528,32 +554,40 @@ impl TextureCache { /// Clear all standalone textures in the cache. pub fn clear_standalone(&mut self) { - let standalone_entry_handles = mem::replace( - &mut self.handles.standalone, - Vec::new(), - ); + let mut per_doc_data = mem::replace(&mut self.per_doc_data, FastHashMap::default()); + for (&_, doc_data) in per_doc_data.iter_mut() { + let standalone_entry_handles = mem::replace( + &mut doc_data.handles.standalone, + Vec::new(), + ); - for handle in standalone_entry_handles { - let entry = self.entries.free(handle); - entry.evict(); - self.free(entry); + for handle in standalone_entry_handles { + let entry = self.entries.free(handle); + entry.evict(); + self.free(entry); + } } + self.per_doc_data = per_doc_data; } /// Clear all shared textures in the cache. pub fn clear_shared(&mut self) { - let shared_entry_handles = mem::replace( - &mut self.handles.shared, - Vec::new(), - ); + let mut per_doc_data = mem::replace(&mut self.per_doc_data, FastHashMap::default()); + for (&_, doc_data) in per_doc_data.iter_mut() { + let shared_entry_handles = mem::replace( + &mut doc_data.handles.shared, + Vec::new(), + ); - for handle in shared_entry_handles { - let entry = self.entries.free(handle); - entry.evict(); - self.free(entry); + for handle in shared_entry_handles { + let entry = self.entries.free(handle); + entry.evict(); + self.free(entry); + } } self.shared_textures.clear(&mut self.pending_updates); + self.per_doc_data = per_doc_data; } /// Clear all entries in the texture cache. This is a fairly drastic @@ -565,13 +599,19 @@ impl TextureCache { /// Called at the beginning of each frame. pub fn begin_frame(&mut self, stamp: FrameStamp) { + debug_assert!(!self.now.is_valid()); self.now = stamp; + let document_id = self.now.document_id(); + self.doc_data = self.per_doc_data + .remove(&document_id) + .unwrap_or_else(|| PerDocumentData::new()); self.maybe_reclaim_shared_cache_memory(); } /// Called at the beginning of each frame to periodically GC and reclaim /// storage if the cache has grown too large. fn maybe_reclaim_shared_cache_memory(&mut self) { + debug_assert!(self.now.is_valid()); // The minimum number of bytes that we must be able to reclaim in order // to justify clearing the entire shared cache in order to shrink it. const RECLAIM_THRESHOLD_BYTES: usize = 5 * 1024 * 1024; @@ -581,7 +621,7 @@ impl TextureCache { // we recover unused memory in bounded time, rather than having it // depend on allocation patterns of subsequent content. let time_since_last_gc = self.now.time() - .duration_since(self.last_shared_cache_expiration.time()) + .duration_since(self.doc_data.last_shared_cache_expiration.time()) .unwrap_or(Duration::default()); let do_periodic_gc = time_since_last_gc >= Duration::from_secs(5) && self.shared_textures.size_in_bytes() >= RECLAIM_THRESHOLD_BYTES * 2; @@ -598,6 +638,10 @@ impl TextureCache { // // We could do this more intelligently with a resize+blit, but that would // add complexity for a rare case. + // + // This block of code is broken with multiple documents, and should be + // moved out into a section that runs before building any frames in a + // group of documents. if self.shared_textures.empty_region_bytes() >= RECLAIM_THRESHOLD_BYTES { self.reached_reclaim_threshold.get_or_insert(self.now.time()); } else { @@ -610,10 +654,10 @@ impl TextureCache { self.reached_reclaim_threshold = None; } } - } pub fn end_frame(&mut self, texture_cache_profile: &mut TextureCacheProfileCounters) { + debug_assert!(self.now.is_valid()); // Expire standalone entries. // // Most of the time, standalone cache entries correspond to images whose @@ -633,6 +677,10 @@ impl TextureCache { .update_profile(&mut texture_cache_profile.pages_rgba8_linear); self.shared_textures.array_rgba8_nearest .update_profile(&mut texture_cache_profile.pages_rgba8_nearest); + + self.per_doc_data.insert(self.now.document_id(), + mem::replace(&mut self.doc_data, PerDocumentData::new())); + self.now = FrameStamp::INVALID; } // Request an item in the texture cache. All images that will @@ -690,6 +738,8 @@ impl TextureCache { uv_rect_kind: UvRectKind, eviction: Eviction, ) { + debug_assert!(self.now.is_valid()); + // Determine if we need to allocate texture cache memory // for this item. We need to reallocate if any of the following // is true: @@ -853,12 +903,13 @@ impl TextureCache { /// /// See `EvictionThreshold` for more details on policy. fn expire_old_entries(&mut self, kind: EntryKind, threshold: EvictionThreshold) { + debug_assert!(self.now.is_valid()); // Iterate over the entries in reverse order, evicting the ones older than // the frame age threshold. Reverse order avoids iterator invalidation when // removing entries. - for i in (0..self.handles.select(kind).len()).rev() { + for i in (0..self.doc_data.handles.select(kind).len()).rev() { let evict = { - let entry = self.entries.get(&self.handles.select(kind)[i]); + let entry = self.entries.get(&self.doc_data.handles.select(kind)[i]); match entry.eviction { Eviction::Manual => false, Eviction::Auto => threshold.should_evict(entry.last_access), @@ -880,7 +931,7 @@ impl TextureCache { } }; if evict { - let handle = self.handles.select(kind).swap_remove(i); + let handle = self.doc_data.handles.select(kind).swap_remove(i); let entry = self.entries.free(handle); entry.evict(); self.free(entry); @@ -892,12 +943,13 @@ impl TextureCache { /// /// Returns true if any entries were expired. fn maybe_expire_old_shared_entries(&mut self, threshold: EvictionThreshold) -> bool { - let old_len = self.handles.shared.len(); - if self.last_shared_cache_expiration.frame_id() < self.now.frame_id() { + debug_assert!(self.now.is_valid()); + let old_len = self.doc_data.handles.shared.len(); + if self.doc_data.last_shared_cache_expiration.frame_id() < self.now.frame_id() { self.expire_old_entries(EntryKind::Shared, threshold); - self.last_shared_cache_expiration = self.now; + self.doc_data.last_shared_cache_expiration = self.now; } - self.handles.shared.len() != old_len + self.doc_data.handles.shared.len() != old_len } // Free a cache entry from the standalone list or shared cache. @@ -1106,6 +1158,7 @@ impl TextureCache { /// Allocates a cache entry for the given parameters, and updates the /// provided handle to point to the new entry. fn allocate(&mut self, params: &CacheAllocParams, handle: &mut TextureCacheHandle) { + debug_assert!(self.now.is_valid()); let new_cache_entry = self.allocate_cache_entry(params); let new_kind = new_cache_entry.details.kind(); @@ -1125,9 +1178,9 @@ impl TextureCache { // shared to standalone or vice versa. This involves a linear // search, but should be rare enough not to matter. let (from, to) = if new_kind == EntryKind::Standalone { - (&mut self.handles.shared, &mut self.handles.standalone) + (&mut self.doc_data.handles.shared, &mut self.doc_data.handles.standalone) } else { - (&mut self.handles.standalone, &mut self.handles.shared) + (&mut self.doc_data.handles.standalone, &mut self.doc_data.handles.shared) }; let idx = from.iter().position(|h| h.weak() == *handle).unwrap(); to.push(from.remove(idx)); @@ -1136,7 +1189,7 @@ impl TextureCache { } UpsertResult::Inserted(new_handle) => { *handle = new_handle.weak(); - self.handles.select(new_kind).push(new_handle); + self.doc_data.handles.select(new_kind).push(new_handle); } } } diff --git a/webrender_api/src/api.rs b/webrender_api/src/api.rs index f2329435de..5fff883b8b 100644 --- a/webrender_api/src/api.rs +++ b/webrender_api/src/api.rs @@ -794,6 +794,10 @@ pub struct IdNamespace(pub u32); #[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] pub struct DocumentId(pub IdNamespace, pub u32); +impl DocumentId { + pub const INVALID: DocumentId = DocumentId(IdNamespace(0), 0); +} + /// This type carries no valuable semantics for WR. However, it reflects the fact that /// clients (Servo) may generate pipelines by different semi-independent sources. /// These pipelines still belong to the same `IdNamespace` and the same `DocumentId`. From 7435380a635dba8b9a7e380c93c764cf528644cf Mon Sep 17 00:00:00 2001 From: Doug Thayer Date: Wed, 5 Dec 2018 13:09:34 -0500 Subject: [PATCH 2/3] Make GpuCache document-aware GpuCache can currently evict things out from underneath docs which are not updating this frame. This makes its roots document-specific, so that we only evict items for currently updating documents. Differential Revision: https://phabricator.services.mozilla.com/D13840 --- webrender/src/frame_builder.rs | 4 +- webrender/src/glyph_rasterizer/mod.rs | 2 +- webrender/src/gpu_cache.rs | 83 +++++++++++++++++---------- 3 files changed, 56 insertions(+), 33 deletions(-) diff --git a/webrender/src/frame_builder.rs b/webrender/src/frame_builder.rs index 718bf8a2ec..b0ba4f3195 100644 --- a/webrender/src/frame_builder.rs +++ b/webrender/src/frame_builder.rs @@ -387,7 +387,7 @@ impl FrameBuilder { .set(self.prim_store.prim_count()); resource_cache.begin_frame(stamp); - gpu_cache.begin_frame(stamp.frame_id()); + gpu_cache.begin_frame(stamp); let mut transform_palette = TransformPalette::new(); clip_scroll_tree.update_tree( @@ -492,7 +492,7 @@ impl FrameBuilder { } } - let gpu_cache_frame_id = gpu_cache.end_frame(gpu_cache_profile); + let gpu_cache_frame_id = gpu_cache.end_frame(gpu_cache_profile).frame_id(); render_tasks.write_task_data(device_pixel_scale); diff --git a/webrender/src/glyph_rasterizer/mod.rs b/webrender/src/glyph_rasterizer/mod.rs index a0d9665145..78b0265908 100644 --- a/webrender/src/glyph_rasterizer/mod.rs +++ b/webrender/src/glyph_rasterizer/mod.rs @@ -731,7 +731,7 @@ mod test_glyph_rasterizer { let workers = Arc::new(worker.unwrap()); let mut glyph_rasterizer = GlyphRasterizer::new(workers).unwrap(); let mut glyph_cache = GlyphCache::new(); - let mut gpu_cache = GpuCache::new(); + let mut gpu_cache = GpuCache::new_for_testing(); let mut texture_cache = TextureCache::new_for_testing(2048, 1024); let mut render_task_cache = RenderTaskCache::new(); let mut render_task_tree = RenderTaskTree::new(FrameId::INVALID); diff --git a/webrender/src/gpu_cache.rs b/webrender/src/gpu_cache.rs index fc03bbce78..49e9f7dfc6 100644 --- a/webrender/src/gpu_cache.rs +++ b/webrender/src/gpu_cache.rs @@ -24,11 +24,12 @@ //! address in the GPU cache of a given resource slot //! for this frame. -use api::{DebugFlags, PremultipliedColorF, TexelRect}; +use api::{DebugFlags, DocumentId, PremultipliedColorF, IdNamespace, TexelRect}; use api::{VoidPtrToSizeFn}; use euclid::TypedRect; +use internal_types::{FastHashMap}; use profiler::GpuCacheProfileCounters; -use render_backend::FrameId; +use render_backend::{FrameStamp, FrameId}; use renderer::MAX_VERTEX_TEXTURE_WIDTH; use std::{mem, u16, u32}; use std::num::NonZeroU32; @@ -411,7 +412,7 @@ struct Texture { // Linked list of currently occupied blocks. This // makes it faster to iterate blocks looking for // candidates to be evicted from the cache. - occupied_list_head: Option, + occupied_list_heads: FastHashMap, // Pending blocks that have been written this frame // and will need to be sent to the GPU. pending_blocks: Vec, @@ -447,7 +448,7 @@ impl Texture { free_lists: FreeBlockLists::new(), pending_blocks: Vec::new(), updates: Vec::new(), - occupied_list_head: None, + occupied_list_heads: FastHashMap::default(), allocated_block_count: 0, reached_reclaim_threshold: None, debug_commands: Vec::new(), @@ -474,8 +475,9 @@ impl Texture { &mut self, pending_block_index: Option, block_count: usize, - frame_id: FrameId, + frame_stamp: FrameStamp ) -> CacheLocation { + debug_assert!(frame_stamp.is_valid()); // Find the appropriate free list to use based on the block size. let (alloc_size, free_list) = self.free_lists .get_actual_block_count_and_free_list(block_count); @@ -498,7 +500,7 @@ impl Texture { for i in 0 .. items_per_row { let address = GpuCacheAddress::new(i * alloc_size, row_index); let block_index = BlockIndex::new(self.blocks.len()); - let block = Block::new(address, prev_block_index, frame_id, self.base_epoch); + let block = Block::new(address, prev_block_index, frame_stamp.frame_id(), self.base_epoch); self.blocks.push(block); prev_block_index = Some(block_index); } @@ -514,9 +516,9 @@ impl Texture { *free_list = block.next; // Add the block to the occupied linked list. - block.next = self.occupied_list_head; - block.last_access_time = frame_id; - self.occupied_list_head = Some(free_block_index); + block.next = self.occupied_list_heads.get(&frame_stamp.document_id()).cloned(); + block.last_access_time = frame_stamp.frame_id(); + self.occupied_list_heads.insert(frame_stamp.document_id(), free_block_index); self.allocated_block_count += alloc_size; if let Some(pending_block_index) = pending_block_index { @@ -549,11 +551,12 @@ impl Texture { // Run through the list of occupied cache blocks and evict // any old blocks that haven't been referenced for a while. - fn evict_old_blocks(&mut self, frame_id: FrameId) { + fn evict_old_blocks(&mut self, frame_stamp: FrameStamp) { + debug_assert!(frame_stamp.is_valid()); // Prune any old items from the list to make room. // Traverse the occupied linked list and see // which items have not been used for a long time. - let mut current_block = self.occupied_list_head; + let mut current_block = self.occupied_list_heads.get(&frame_stamp.document_id()).map(|x| *x); let mut prev_block: Option = None; while let Some(index) = current_block { @@ -566,7 +569,7 @@ impl Texture { // If this resource has not been used in the last // few frames, free it from the texture and mark // as empty. - if block.last_access_time + FRAMES_BEFORE_EVICTION < frame_id { + if block.last_access_time + FRAMES_BEFORE_EVICTION < frame_stamp.frame_id() { should_unlink = true; // Get the row metadata from the address. @@ -600,7 +603,14 @@ impl Texture { self.blocks[prev_block.get()].next = next_block; } None => { - self.occupied_list_head = next_block; + match next_block { + Some(next_block) => { + self.occupied_list_heads.insert(frame_stamp.document_id(), next_block); + } + None => { + self.occupied_list_heads.remove(&frame_stamp.document_id()); + } + } } } } else { @@ -627,7 +637,7 @@ impl Texture { #[must_use] pub struct GpuDataRequest<'a> { handle: &'a mut GpuCacheHandle, - frame_id: FrameId, + frame_stamp: FrameStamp, start_index: usize, max_block_count: usize, texture: &'a mut Texture, @@ -653,7 +663,7 @@ impl<'a> Drop for GpuDataRequest<'a> { debug_assert!(block_count <= self.max_block_count); let location = self.texture - .push_data(Some(self.start_index), block_count, self.frame_id); + .push_data(Some(self.start_index), block_count, self.frame_stamp); self.handle.location = Some(location); } } @@ -663,8 +673,8 @@ impl<'a> Drop for GpuDataRequest<'a> { #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct GpuCache { - /// Current frame ID. - frame_id: FrameId, + /// Current FrameId. + now: FrameStamp, /// CPU-side texture allocator. texture: Texture, /// Number of blocks requested this frame that don't @@ -681,7 +691,7 @@ impl GpuCache { pub fn new() -> Self { let debug_flags = DebugFlags::empty(); GpuCache { - frame_id: FrameId::INVALID, + now: FrameStamp::INVALID, texture: Texture::new(Epoch(0), debug_flags), saved_block_count: 0, debug_flags, @@ -689,6 +699,18 @@ impl GpuCache { } } + /// Creates a GpuCache and sets it up with a valid `FrameStamp`, which + /// is useful for avoiding panics when instantiating the `GpuCache` + /// directly from unit test code. + #[allow(dead_code)] + pub fn new_for_testing() -> Self { + let mut cache = Self::new(); + let mut now = FrameStamp::first(DocumentId(IdNamespace(1), 1)); + now.advance(); + cache.begin_frame(now); + cache + } + /// Drops everything in the GPU cache. Must not be called once gpu cache entries /// for the next frame have already been requested. pub fn clear(&mut self) { @@ -701,10 +723,10 @@ impl GpuCache { } /// Begin a new frame. - pub fn begin_frame(&mut self, frame_id: FrameId) { + pub fn begin_frame(&mut self, stamp: FrameStamp) { debug_assert!(self.texture.pending_blocks.is_empty()); - self.frame_id = frame_id; - self.texture.evict_old_blocks(self.frame_id); + self.now = stamp; + self.texture.evict_old_blocks(self.now); self.saved_block_count = 0; } @@ -731,9 +753,9 @@ impl GpuCache { if let Some(block) = self.texture.blocks.get_mut(location.block_index.get()) { if block.epoch == location.epoch { max_block_count = self.texture.rows[block.address.v as usize].block_count_per_item; - if block.last_access_time != self.frame_id { + if block.last_access_time != self.now.frame_id() { // Mark last access time to avoid evicting this block. - block.last_access_time = self.frame_id; + block.last_access_time = self.now.frame_id(); self.saved_block_count += max_block_count; } return None; @@ -741,9 +763,10 @@ impl GpuCache { } } + debug_assert!(self.now.is_valid()); Some(GpuDataRequest { handle, - frame_id: self.frame_id, + frame_stamp: self.now, start_index: self.texture.pending_blocks.len(), texture: &mut self.texture, max_block_count, @@ -760,7 +783,7 @@ impl GpuCache { let start_index = self.texture.pending_blocks.len(); self.texture.pending_blocks.extend_from_slice(blocks); let location = self.texture - .push_data(Some(start_index), blocks.len(), self.frame_id); + .push_data(Some(start_index), blocks.len(), self.now); GpuCacheHandle { location: Some(location), } @@ -770,7 +793,7 @@ impl GpuCache { // will be resolved by the render thread via the // external image callback. pub fn push_deferred_per_frame_blocks(&mut self, block_count: usize) -> GpuCacheHandle { - let location = self.texture.push_data(None, block_count, self.frame_id); + let location = self.texture.push_data(None, block_count, self.now); GpuCacheHandle { location: Some(location), } @@ -781,7 +804,7 @@ impl GpuCache { pub fn end_frame( &mut self, profile_counters: &mut GpuCacheProfileCounters, - ) -> FrameId { + ) -> FrameStamp { profile_counters .allocated_rows .set(self.texture.rows.len()); @@ -801,7 +824,7 @@ impl GpuCache { self.texture.reached_reclaim_threshold = None; } - self.frame_id + self.now } /// Returns true if utilization has been low enough for long enough that we @@ -816,7 +839,7 @@ impl GpuCache { let clear = self.pending_clear; self.pending_clear = false; GpuCacheUpdateList { - frame_id: self.frame_id, + frame_id: self.now.frame_id(), clear, height: self.texture.height, debug_commands: mem::replace(&mut self.texture.debug_commands, Vec::new()), @@ -839,7 +862,7 @@ impl GpuCache { let location = id.location.expect("handle not requested or allocated!"); let block = &self.texture.blocks[location.block_index.get()]; debug_assert_eq!(block.epoch, location.epoch); - debug_assert_eq!(block.last_access_time, self.frame_id); + debug_assert_eq!(block.last_access_time, self.now.frame_id()); block.address } From f7cb7cecdeb485be03768888244228b872f5f14b Mon Sep 17 00:00:00 2001 From: Doug Thayer Date: Tue, 4 Dec 2018 16:21:09 -0500 Subject: [PATCH 3/3] Add option to disable texture cache clear To facilitate testing of document splitting before it is preffed on, I'm adding an option to disable clearing the texture cache, since this will currently crash the browser with doc splitting on. Differential Revision: https://phabricator.services.mozilla.com/D13841 --- webrender/src/texture_cache.rs | 14 ++++++++++++++ webrender_api/src/api.rs | 1 + 2 files changed, 15 insertions(+) diff --git a/webrender/src/texture_cache.rs b/webrender/src/texture_cache.rs index c49ace6307..202d0a8364 100644 --- a/webrender/src/texture_cache.rs +++ b/webrender/src/texture_cache.rs @@ -554,6 +554,13 @@ impl TextureCache { /// Clear all standalone textures in the cache. pub fn clear_standalone(&mut self) { + // This pref just helps us avoid crashes when we begin using multiple documents. + // What we need to do for clear to work correctly with multiple documents is + // to ensure that we generate frames for all documents whenever we do this. + if self.debug_flags.contains(DebugFlags::TEXTURE_CACHE_DBG_DISABLE_SHRINK) { + return; + } + let mut per_doc_data = mem::replace(&mut self.per_doc_data, FastHashMap::default()); for (&_, doc_data) in per_doc_data.iter_mut() { let standalone_entry_handles = mem::replace( @@ -572,6 +579,13 @@ impl TextureCache { /// Clear all shared textures in the cache. pub fn clear_shared(&mut self) { + // This pref just helps us avoid crashes when we begin using multiple documents. + // What we need to do for clear to work correctly with multiple documents is + // to ensure that we generate frames for all documents whenever we do this. + if self.debug_flags.contains(DebugFlags::TEXTURE_CACHE_DBG_DISABLE_SHRINK) { + return; + } + let mut per_doc_data = mem::replace(&mut self.per_doc_data, FastHashMap::default()); for (&_, doc_data) in per_doc_data.iter_mut() { let shared_entry_handles = mem::replace( diff --git a/webrender_api/src/api.rs b/webrender_api/src/api.rs index 5fff883b8b..bc231b39fb 100644 --- a/webrender_api/src/api.rs +++ b/webrender_api/src/api.rs @@ -988,6 +988,7 @@ bitflags! { const GPU_CACHE_DBG = 1 << 12; const SLOW_FRAME_INDICATOR = 1 << 13; const TEXTURE_CACHE_DBG_CLEAR_EVICTED = 1 << 14; + const TEXTURE_CACHE_DBG_DISABLE_SHRINK = 1 << 15; } }