summaryrefslogtreecommitdiffstats
path: root/wgpu/src/lib.rs
diff options
context:
space:
mode:
authorLibravatar Héctor Ramón Jiménez <hector@hecrj.dev>2024-04-03 22:13:00 +0200
committerLibravatar Héctor Ramón Jiménez <hector@hecrj.dev>2024-04-03 22:13:00 +0200
commit88b72de282441367092b07f8075eb931eff495ad (patch)
treea4dfafd7ac4bd4ca5481a6bf3337511c652fe00b /wgpu/src/lib.rs
parent09af6773bdfe3039f6bf1720da945ae874496b81 (diff)
downloadiced-88b72de282441367092b07f8075eb931eff495ad.tar.gz
iced-88b72de282441367092b07f8075eb931eff495ad.tar.bz2
iced-88b72de282441367092b07f8075eb931eff495ad.zip
Implement preliminary cache grouping for mesh primitives
Due to AA, it's very expensive to render every cached layer independently.
Diffstat (limited to 'wgpu/src/lib.rs')
-rw-r--r--wgpu/src/lib.rs95
1 files changed, 61 insertions, 34 deletions
diff --git a/wgpu/src/lib.rs b/wgpu/src/lib.rs
index 0e173e0a..b9869583 100644
--- a/wgpu/src/lib.rs
+++ b/wgpu/src/lib.rs
@@ -315,9 +315,10 @@ impl Renderer {
// TODO: Can we avoid collecting here?
let layers: Vec<_> = self.layers.iter().collect();
+ let mut i = 0;
- for layer in &layers {
- match layer {
+ while i < layers.len() {
+ match layers[i] {
Layer::Live(live) => {
let bounds = live
.bounds
@@ -393,32 +394,54 @@ impl Renderer {
image_layer += 1;
}
+
+ i += 1;
}
- Layer::Cached(cached) => {
- let bounds = cached
- .bounds
- .map(|bounds| bounds * scale_factor)
- .map(Rectangle::snap)
- .unwrap_or(Rectangle::with_size(target_size));
+ Layer::Cached(_) => {
+ let group_len = layers[i..]
+ .iter()
+ .position(|layer| matches!(layer, Layer::Live(_)))
+ .unwrap_or(layers.len());
- if !cached.quads.is_empty() {
- engine.quad_pipeline.render_cache(
- &cached.quads,
- bounds,
- &mut render_pass,
- );
+ let group = layers[i..i + group_len].iter().map(|layer| {
+ let Layer::Cached(cached) = layer else {
+ unreachable!()
+ };
+
+ let bounds = cached
+ .bounds
+ .map(|bounds| bounds * scale_factor)
+ .map(Rectangle::snap)
+ .unwrap_or(Rectangle::with_size(target_size));
+
+ (cached, bounds)
+ });
+
+ for (cached, bounds) in group.clone() {
+ if !cached.quads.is_empty() {
+ engine.quad_pipeline.render_cache(
+ &cached.quads,
+ bounds,
+ &mut render_pass,
+ );
+ }
}
- if !cached.meshes.is_empty() {
+ let group_has_meshes = group
+ .clone()
+ .any(|(cached, _)| !cached.meshes.is_empty());
+
+ if group_has_meshes {
let _ = ManuallyDrop::into_inner(render_pass);
- engine.triangle_pipeline.render_cache(
+ engine.triangle_pipeline.render_cache_group(
device,
encoder,
frame,
target_size,
- &cached.meshes,
- bounds,
+ group.clone().map(|(cached, bounds)| {
+ (&cached.meshes, bounds)
+ }),
scale_factor,
);
@@ -443,24 +466,28 @@ impl Renderer {
));
}
- if !cached.text.is_empty() {
- engine.text_pipeline.render_cache(
- &cached.text,
- bounds,
- &mut render_pass,
- );
+ for (cached, bounds) in group {
+ if !cached.text.is_empty() {
+ engine.text_pipeline.render_cache(
+ &cached.text,
+ bounds,
+ &mut render_pass,
+ );
+ }
+
+ #[cfg(any(feature = "svg", feature = "image"))]
+ if !cached.images.is_empty() {
+ engine.image_pipeline.render(
+ image_layer,
+ bounds,
+ &mut render_pass,
+ );
+
+ image_layer += 1;
+ }
}
- #[cfg(any(feature = "svg", feature = "image"))]
- if !cached.images.is_empty() {
- engine.image_pipeline.render(
- image_layer,
- bounds,
- &mut render_pass,
- );
-
- image_layer += 1;
- }
+ i += group_len;
}
}
}