Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sync changes from mozilla-central #3813

Merged
merged 8 commits into from Dec 13, 2019
@@ -564,43 +564,74 @@ impl ClipChainInstance {
/// Maintains a (flattened) list of clips for a given level in the surface level stack.
#[derive(Debug)]
pub struct ClipChainLevel {
clips: Vec<ClipChainId>,
clip_counts: Vec<usize>,
/// These clips will be handled when compositing this surface into the parent,
/// and can thus be ignored on the primitives that are drawn as part of this surface.
shared_clips: Vec<ClipDataHandle>,
}

impl ClipChainLevel {
/// Construct a new level in the active clip chain stack. The viewport
/// is used to filter out irrelevant clips.
fn new(
shared_clips: Vec<ClipDataHandle>,
) -> Self {
ClipChainLevel {
clips: Vec::new(),
clip_counts: Vec::new(),
shared_clips,
}
}
/// Index of the first element in ClipChainStack::clip that belongs to this level.
first_clip_index: usize,
/// Used to sanity check push/pop balance.
initial_clip_counts_len: usize,
}

/// Maintains a stack of clip chain ids that are currently active,
/// when a clip exists on a picture that has no surface, and is passed
/// on down to the child primitive(s).
///
///
/// In order to avoid many small vector allocations, all clip chain ids are
/// stored in a single vector instead of per-level.
/// Since we only work with the top-most level of the stack, we only need to
/// know the first index in the clips vector that belongs to each level. The
/// last index for the top-most level is always the end of the clips array.
///
/// Likewise, we push several clip chain ids to the clips array at each
/// push_clip, and the number of clip chain ids removed during pop_clip
/// must match. This is done by having a separate stack of clip counts
/// in the clip-stack rather than per-level to avoid vector allocations.
///
/// ```ascii
/// +----+----+---
/// levels: | | | ...
/// +----+----+---
/// |first \
/// | \
/// | \
/// +--+--+--+--+--+--+--
/// clips: | | | | | | | ...
/// +--+--+--+--+--+--+--
/// | / /
/// | / /
/// | / /
/// +--+--+--+--
/// clip_counts: | 1| 2| 2| ...
/// +--+--+--+--
/// ```
pub struct ClipChainStack {
// TODO(gw): Consider using SmallVec, or recycling the clip stacks here.
/// A stack of clip chain lists. Each time a new surface is pushed,
/// a new entry is added to the main stack. Each time a new picture
/// without surface is pushed, it adds the picture clip chain to the
/// current stack list.
pub stack: Vec<ClipChainLevel>,
/// a new level is added. Each time a new picture without surface is
/// pushed, it adds the picture clip chain to the clips vector in the
/// range belonging to the level (always the top-most level, so always
/// at the end of the clips array).
levels: Vec<ClipChainLevel>,
/// The actual stack of clip ids.
clips: Vec<ClipChainId>,
/// How many clip ids to pop from the vector each time we call pop_clip.
clip_counts: Vec<usize>,
}

impl ClipChainStack {
pub fn new() -> Self {
ClipChainStack {
stack: vec![ClipChainLevel::new(Vec::new())],
levels: vec![
ClipChainLevel {
shared_clips: Vec::new(),
first_clip_index: 0,
initial_clip_counts_len: 0,
}
],
clips: Vec::new(),
clip_counts: Vec::new(),
}
}

@@ -622,7 +653,7 @@ impl ClipChainStack {
// TODO(gw): We could consider making this a HashSet if it ever shows up in
// profiles, but the typical array length is 2-3 elements.
let mut valid_clip = true;
for level in &self.stack {
for level in &self.levels {
if level.shared_clips.iter().any(|handle| {
handle.uid() == clip_uid
}) {
@@ -632,22 +663,21 @@ impl ClipChainStack {
}

if valid_clip {
self.stack.last_mut().unwrap().clips.push(current_clip_chain_id);
self.clips.push(current_clip_chain_id);
clip_count += 1;
}

current_clip_chain_id = clip_chain_node.parent_clip_chain_id;
}

self.stack.last_mut().unwrap().clip_counts.push(clip_count);
self.clip_counts.push(clip_count);
}

/// Pop a clip chain root from the currently active list.
pub fn pop_clip(&mut self) {
let level = self.stack.last_mut().unwrap();
let count = level.clip_counts.pop().unwrap();
let count = self.clip_counts.pop().unwrap();
for _ in 0 .. count {
level.clips.pop().unwrap();
self.clips.pop().unwrap();
}
}

@@ -657,19 +687,26 @@ impl ClipChainStack {
&mut self,
shared_clips: &[ClipDataHandle],
) {
let level = ClipChainLevel::new(shared_clips.to_vec());
self.stack.push(level);
let level = ClipChainLevel {
shared_clips: shared_clips.to_vec(),
first_clip_index: self.clips.len(),
initial_clip_counts_len: self.clip_counts.len(),
};

self.levels.push(level);
}

/// Pop a surface from the clip chain stack
pub fn pop_surface(&mut self) {
let level = self.stack.pop().unwrap();
assert!(level.clip_counts.is_empty() && level.clips.is_empty());
let level = self.levels.pop().unwrap();
assert!(self.clip_counts.len() == level.initial_clip_counts_len);
assert!(self.clips.len() == level.first_clip_index);
}

/// Get the list of currently active clip chains
pub fn current_clips_array(&self) -> &[ClipChainId] {
&self.stack.last().unwrap().clips
let first = self.levels.last().unwrap().first_clip_index;
&self.clips[first..]
}
}

@@ -2697,67 +2697,29 @@ impl Device {
pbo.reserved_size = 0
}

/// Returns the size in bytes required to upload an area of pixels of the specified
/// size, with the specified stride, to a texture of the specified format.
pub fn required_upload_size(size: DeviceIntSize, stride: Option<i32>, format: ImageFormat, optimal_pbo_stride: NonZeroUsize) -> usize {
assert!(size.width >= 0);
assert!(size.height >= 0);
if let Some(stride) = stride {
assert!(stride >= 0);
}

let bytes_pp = format.bytes_per_pixel() as usize;
let width_bytes = size.width as usize * bytes_pp;
let src_stride = stride.map_or(width_bytes, |stride| {
assert!(stride >= 0);
stride as usize
});

let dst_stride = round_up_to_multiple(src_stride, optimal_pbo_stride);

// The size of the chunk should only need to be (height - 1) * dst_stride + width_bytes,
// however, the android emulator will error unless it is height * dst_stride.
// See bug 1587047 for details.
// Using the full final row also ensures that the offset of the next chunk is
// optimally aligned.
dst_stride * size.height as usize
}

/// Returns a `TextureUploader` which can be used to upload texture data to `texture`.
/// The total size in bytes is specified by `upload_size`, and must be greater than zero
/// and at least as large as the sum of the `required_upload_size()` for each subsequent
/// call to `TextureUploader.upload()`.
pub fn upload_texture<'a, T>(
&'a mut self,
texture: &'a Texture,
pbo: &PBO,
upload_size: usize,
upload_count: usize,
) -> TextureUploader<'a, T> {
debug_assert!(self.inside_frame);
assert_ne!(upload_size, 0, "Must specify valid upload size");

self.bind_texture(DEFAULT_TEXTURE, texture, Swizzle::default());

let buffer = match self.upload_method {
UploadMethod::Immediate => None,
UploadMethod::PixelBuffer(hint) => {
let upload_size = upload_count * mem::size_of::<T>();
self.gl.bind_buffer(gl::PIXEL_UNPACK_BUFFER, pbo.id);
self.gl.buffer_data_untyped(
gl::PIXEL_UNPACK_BUFFER,
upload_size as _,
ptr::null(),
hint.to_gl(),
);
let ptr = self.gl.map_buffer_range(
gl::PIXEL_UNPACK_BUFFER,
0,
upload_size as _,
gl::MAP_WRITE_BIT | gl::MAP_INVALIDATE_BUFFER_BIT,
);
let mapping = unsafe {
slice::from_raw_parts_mut(ptr as *mut _, upload_size)
};
Some(PixelBuffer::new(upload_size, mapping))
if upload_size != 0 {
self.gl.buffer_data_untyped(
gl::PIXEL_UNPACK_BUFFER,
upload_size as _,
ptr::null(),
hint.to_gl(),
);
}
Some(PixelBuffer::new(hint.to_gl(), upload_size))
},
};

@@ -3523,24 +3485,24 @@ struct UploadChunk {
format_override: Option<ImageFormat>,
}

struct PixelBuffer<'a> {
struct PixelBuffer {
usage: gl::GLenum,
size_allocated: usize,
size_used: usize,
// small vector avoids heap allocation for a single chunk
chunks: SmallVec<[UploadChunk; 1]>,
mapping: &'a mut [mem::MaybeUninit<u8>],
}

impl<'a> PixelBuffer<'a> {
impl PixelBuffer {
fn new(
usage: gl::GLenum,
size_allocated: usize,
mapping: &'a mut [mem::MaybeUninit<u8>],
) -> Self {
PixelBuffer {
usage,
size_allocated,
size_used: 0,
chunks: SmallVec::new(),
mapping,
}
}
}
@@ -3554,14 +3516,13 @@ struct UploadTarget<'a> {

pub struct TextureUploader<'a, T> {
target: UploadTarget<'a>,
buffer: Option<PixelBuffer<'a>>,
buffer: Option<PixelBuffer>,
marker: PhantomData<T>,
}

impl<'a, T> Drop for TextureUploader<'a, T> {
fn drop(&mut self) {
if let Some(buffer) = self.buffer.take() {
self.target.gl.unmap_buffer(gl::PIXEL_UNPACK_BUFFER);
for chunk in buffer.chunks {
self.target.update_impl(chunk);
}
@@ -3603,42 +3564,70 @@ impl<'a, T> TextureUploader<'a, T> {
let src_size = (rect.size.height as usize - 1) * src_stride + width_bytes;
assert!(src_size <= len * mem::size_of::<T>());

// for optimal PBO texture uploads the offset and stride of the data in
// for optimal PBO texture uploads the stride of the data in
// the buffer may have to be a multiple of a certain value.
let dst_stride = round_up_to_multiple(src_stride, self.target.optimal_pbo_stride);
let dst_size = Device::required_upload_size(
rect.size,
stride,
self.target.texture.format,
self.target.optimal_pbo_stride
);
// The size of the PBO should only need to be (height - 1) * dst_stride + width_bytes,
// however, the android emulator will error unless it is height * dst_stride.
// See bug 1587047 for details.
let dst_size = rect.size.height as usize * dst_stride;

match self.buffer {
Some(ref mut buffer) => {
assert!(buffer.size_used + dst_size <= buffer.size_allocated, "UploadBuffer is too small");
if buffer.size_used + dst_size > buffer.size_allocated {
// flush
for chunk in buffer.chunks.drain() {
self.target.update_impl(chunk);
}
buffer.size_used = 0;
}

if dst_size > buffer.size_allocated {
// allocate a buffer large enough
self.target.gl.buffer_data_untyped(
gl::PIXEL_UNPACK_BUFFER,
dst_size as _,
ptr::null(),
buffer.usage,
);
buffer.size_allocated = dst_size;
}

if src_stride == dst_stride {
// the stride is already optimal, so simply copy
// the data as-is in to the buffer
assert_eq!(src_size % mem::size_of::<T>(), 0);
self.target.gl.buffer_sub_data_untyped(
gl::PIXEL_UNPACK_BUFFER,
buffer.size_used as isize,
src_size as isize,
data as *const _,
);
} else {
// copy the data line-by-line in to the buffer so
// that it has an optimal stride
let ptr = self.target.gl.map_buffer_range(
gl::PIXEL_UNPACK_BUFFER,
buffer.size_used as _,
dst_size as _,
gl::MAP_WRITE_BIT | gl::MAP_INVALIDATE_RANGE_BIT,
);

unsafe {
let src: &[mem::MaybeUninit<u8>] = slice::from_raw_parts(data as *const _, src_size);
let dst: &mut [mem::MaybeUninit<u8>] = slice::from_raw_parts_mut(ptr as *mut _, dst_size);

unsafe {
let src: &[mem::MaybeUninit<u8>] = slice::from_raw_parts(data as *const _, src_size);

if src_stride == dst_stride {
// the stride is already optimal, so simply copy
// the data as-is in to the buffer
let dst_start = buffer.size_used;
let dst_end = dst_start + src_size;

buffer.mapping[dst_start..dst_end].copy_from_slice(src);
} else {
// copy the data line-by-line in to the buffer so
// that it has an optimal stride
for y in 0..rect.size.height as usize {
let src_start = y * src_stride;
let src_end = src_start + width_bytes;
let dst_start = buffer.size_used + y * dst_stride;
let dst_start = y * dst_stride;
let dst_end = dst_start + width_bytes;

buffer.mapping[dst_start..dst_end].copy_from_slice(&src[src_start..src_end])
dst[dst_start..dst_end].copy_from_slice(&src[src_start..src_end])
}
}

self.target.gl.unmap_buffer(gl::PIXEL_UNPACK_BUFFER);
}

buffer.chunks.push(UploadChunk {
ProTip! Use n and p to navigate between commits in a pull request.
You can’t perform that action at this time.