diff options
author | 2024-04-03 22:13:00 +0200 | |
---|---|---|
committer | 2024-04-03 22:13:00 +0200 | |
commit | 88b72de282441367092b07f8075eb931eff495ad (patch) | |
tree | a4dfafd7ac4bd4ca5481a6bf3337511c652fe00b /wgpu | |
parent | 09af6773bdfe3039f6bf1720da945ae874496b81 (diff) | |
download | iced-88b72de282441367092b07f8075eb931eff495ad.tar.gz iced-88b72de282441367092b07f8075eb931eff495ad.tar.bz2 iced-88b72de282441367092b07f8075eb931eff495ad.zip |
Implement preliminary cache grouping for mesh primitives
Due to AA, it's very expensive to render every cached
layer independently.
Diffstat (limited to 'wgpu')
-rw-r--r-- | wgpu/src/lib.rs | 95 | ||||
-rw-r--r-- | wgpu/src/triangle.rs | 63 |
2 files changed, 106 insertions, 52 deletions
diff --git a/wgpu/src/lib.rs b/wgpu/src/lib.rs index 0e173e0a..b9869583 100644 --- a/wgpu/src/lib.rs +++ b/wgpu/src/lib.rs @@ -315,9 +315,10 @@ impl Renderer { // TODO: Can we avoid collecting here? let layers: Vec<_> = self.layers.iter().collect(); + let mut i = 0; - for layer in &layers { - match layer { + while i < layers.len() { + match layers[i] { Layer::Live(live) => { let bounds = live .bounds @@ -393,32 +394,54 @@ impl Renderer { image_layer += 1; } + + i += 1; } - Layer::Cached(cached) => { - let bounds = cached - .bounds - .map(|bounds| bounds * scale_factor) - .map(Rectangle::snap) - .unwrap_or(Rectangle::with_size(target_size)); + Layer::Cached(_) => { + let group_len = layers[i..] + .iter() + .position(|layer| matches!(layer, Layer::Live(_))) + .unwrap_or(layers.len()); - if !cached.quads.is_empty() { - engine.quad_pipeline.render_cache( - &cached.quads, - bounds, - &mut render_pass, - ); + let group = layers[i..i + group_len].iter().map(|layer| { + let Layer::Cached(cached) = layer else { + unreachable!() + }; + + let bounds = cached + .bounds + .map(|bounds| bounds * scale_factor) + .map(Rectangle::snap) + .unwrap_or(Rectangle::with_size(target_size)); + + (cached, bounds) + }); + + for (cached, bounds) in group.clone() { + if !cached.quads.is_empty() { + engine.quad_pipeline.render_cache( + &cached.quads, + bounds, + &mut render_pass, + ); + } } - if !cached.meshes.is_empty() { + let group_has_meshes = group + .clone() + .any(|(cached, _)| !cached.meshes.is_empty()); + + if group_has_meshes { let _ = ManuallyDrop::into_inner(render_pass); - engine.triangle_pipeline.render_cache( + engine.triangle_pipeline.render_cache_group( device, encoder, frame, target_size, - &cached.meshes, - bounds, + group.clone().map(|(cached, bounds)| { + (&cached.meshes, bounds) + }), scale_factor, ); @@ -443,24 +466,28 @@ impl Renderer { )); } - if !cached.text.is_empty() { - engine.text_pipeline.render_cache( - &cached.text, - bounds, - &mut render_pass, - ); + for (cached, bounds) in group { + if !cached.text.is_empty() { + engine.text_pipeline.render_cache( + &cached.text, + bounds, + &mut render_pass, + ); + } + + #[cfg(any(feature = "svg", feature = "image"))] + if !cached.images.is_empty() { + engine.image_pipeline.render( + image_layer, + bounds, + &mut render_pass, + ); + + image_layer += 1; + } } - #[cfg(any(feature = "svg", feature = "image"))] - if !cached.images.is_empty() { - engine.image_pipeline.render( - image_layer, - bounds, - &mut render_pass, - ); - - image_layer += 1; - } + i += group_len; } } } diff --git a/wgpu/src/triangle.rs b/wgpu/src/triangle.rs index 6df97a7b..9cd02f72 100644 --- a/wgpu/src/triangle.rs +++ b/wgpu/src/triangle.rs @@ -136,14 +136,13 @@ impl Pipeline { self.blit.as_mut(), &self.solid, &self.gradient, - &self.layers[layer], target_size, - meshes, - bounds, + std::iter::once((&self.layers[layer], meshes, bounds)), scale_factor, ); } + #[allow(dead_code)] pub fn render_cache( &mut self, device: &wgpu::Device, @@ -165,25 +164,51 @@ impl Pipeline { self.blit.as_mut(), &self.solid, &self.gradient, - layer, target_size, - batch, - bounds, + std::iter::once((layer, batch, bounds)), scale_factor, ); } - fn render( + pub fn render_cache_group<'a>( + &mut self, + device: &wgpu::Device, + encoder: &mut wgpu::CommandEncoder, + target: &wgpu::TextureView, + target_size: Size<u32>, + group: impl Iterator<Item = (&'a Cache, Rectangle<u32>)>, + scale_factor: f32, + ) { + let group = group.filter_map(|(cache, bounds)| { + if let Cache::Uploaded { batch, layer, .. } = cache { + Some((layer, batch, bounds)) + } else { + None + } + }); + + Self::render( + device, + encoder, + target, + self.blit.as_mut(), + &self.solid, + &self.gradient, + target_size, + group, + scale_factor, + ); + } + + fn render<'a>( device: &wgpu::Device, encoder: &mut wgpu::CommandEncoder, target: &wgpu::TextureView, mut blit: Option<&mut msaa::Blit>, solid: &solid::Pipeline, gradient: &gradient::Pipeline, - layer: &Layer, target_size: Size<u32>, - meshes: &Batch, - bounds: Rectangle<u32>, + group: impl Iterator<Item = (&'a Layer, &'a Batch, Rectangle<u32>)>, scale_factor: f32, ) { { @@ -220,14 +245,16 @@ impl Pipeline { occlusion_query_set: None, }); - layer.render( - solid, - gradient, - meshes, - bounds, - scale_factor, - &mut render_pass, - ); + for (layer, meshes, bounds) in group { + layer.render( + solid, + gradient, + meshes, + bounds, + scale_factor, + &mut render_pass, + ); + } } if let Some(blit) = blit { |