summaryrefslogtreecommitdiffstats
path: root/wgpu/src/triangle.rs
diff options
context:
space:
mode:
Diffstat (limited to 'wgpu/src/triangle.rs')
-rw-r--r--wgpu/src/triangle.rs233
1 files changed, 189 insertions, 44 deletions
diff --git a/wgpu/src/triangle.rs b/wgpu/src/triangle.rs
index 3cc1d3fb..9159b0a2 100644
--- a/wgpu/src/triangle.rs
+++ b/wgpu/src/triangle.rs
@@ -1,32 +1,82 @@
//! Draw meshes of triangles.
-use crate::Transformation;
+use crate::{settings, Transformation};
use iced_native::{Point, Rectangle};
use std::{mem, sync::Arc};
+mod msaa;
+
+const UNIFORM_BUFFER_SIZE: usize = 100;
+const VERTEX_BUFFER_SIZE: usize = 100_000;
+const INDEX_BUFFER_SIZE: usize = 100_000;
+
#[derive(Debug)]
pub(crate) struct Pipeline {
pipeline: wgpu::RenderPipeline,
+ blit: Option<msaa::Blit>,
constants: wgpu::BindGroup,
- constants_buffer: wgpu::Buffer,
+ uniforms_buffer: Buffer<Uniforms>,
+ vertex_buffer: Buffer<Vertex2D>,
+ index_buffer: Buffer<u32>,
+}
+
+#[derive(Debug)]
+struct Buffer<T> {
+ raw: wgpu::Buffer,
+ size: usize,
+ usage: wgpu::BufferUsage,
+ _type: std::marker::PhantomData<T>,
+}
+
+impl<T> Buffer<T> {
+ pub fn new(
+ device: &wgpu::Device,
+ size: usize,
+ usage: wgpu::BufferUsage,
+ ) -> Self {
+ let raw = device.create_buffer(&wgpu::BufferDescriptor {
+ size: (std::mem::size_of::<T>() * size) as u64,
+ usage,
+ });
+
+ Buffer {
+ raw,
+ size,
+ usage,
+ _type: std::marker::PhantomData,
+ }
+ }
+
+ pub fn ensure_capacity(&mut self, device: &wgpu::Device, size: usize) {
+ if self.size < size {
+ self.raw = device.create_buffer(&wgpu::BufferDescriptor {
+ size: (std::mem::size_of::<T>() * size) as u64,
+ usage: self.usage,
+ });
+
+ self.size = size;
+ }
+ }
}
impl Pipeline {
- pub fn new(device: &mut wgpu::Device) -> Pipeline {
+ pub fn new(
+ device: &mut wgpu::Device,
+ antialiasing: Option<settings::MSAA>,
+ ) -> Pipeline {
let constant_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
- ty: wgpu::BindingType::UniformBuffer { dynamic: false },
+ ty: wgpu::BindingType::UniformBuffer { dynamic: true },
}],
});
- let constants_buffer = device
- .create_buffer_mapped(
- 1,
- wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
- )
- .fill_from_slice(&[Uniforms::default()]);
+ let constants_buffer = Buffer::new(
+ device,
+ UNIFORM_BUFFER_SIZE,
+ wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
+ );
let constant_bind_group =
device.create_bind_group(&wgpu::BindGroupDescriptor {
@@ -34,7 +84,7 @@ impl Pipeline {
bindings: &[wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::Buffer {
- buffer: &constants_buffer,
+ buffer: &constants_buffer.raw,
range: 0..std::mem::size_of::<Uniforms>() as u64,
},
}],
@@ -110,15 +160,28 @@ impl Pipeline {
},
],
}],
- sample_count: 1,
+ sample_count: antialiasing
+ .map(|a| a.sample_count())
+ .unwrap_or(1),
sample_mask: !0,
alpha_to_coverage_enabled: false,
});
Pipeline {
pipeline,
+ blit: antialiasing.map(|a| msaa::Blit::new(device, a)),
constants: constant_bind_group,
- constants_buffer,
+ uniforms_buffer: constants_buffer,
+ vertex_buffer: Buffer::new(
+ device,
+ VERTEX_BUFFER_SIZE,
+ wgpu::BufferUsage::VERTEX | wgpu::BufferUsage::COPY_DST,
+ ),
+ index_buffer: Buffer::new(
+ device,
+ INDEX_BUFFER_SIZE,
+ wgpu::BufferUsage::INDEX | wgpu::BufferUsage::COPY_DST,
+ ),
}
}
@@ -127,50 +190,116 @@ impl Pipeline {
device: &mut wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
target: &wgpu::TextureView,
+ target_width: u32,
+ target_height: u32,
transformation: Transformation,
meshes: &Vec<(Point, Arc<Mesh2D>)>,
bounds: Rectangle<u32>,
) {
+ // This looks a bit crazy, but we are just counting how many vertices
+ // and indices we will need to handle.
+ // TODO: Improve readability
+ let (total_vertices, total_indices) = meshes
+ .iter()
+ .map(|(_, mesh)| (mesh.vertices.len(), mesh.indices.len()))
+ .fold((0, 0), |(total_v, total_i), (v, i)| {
+ (total_v + v, total_i + i)
+ });
+
+ // Then we ensure the current buffers are big enough, resizing if
+ // necessary
+ self.uniforms_buffer.ensure_capacity(device, meshes.len());
+ self.vertex_buffer.ensure_capacity(device, total_vertices);
+ self.index_buffer.ensure_capacity(device, total_indices);
+
+ let mut uniforms: Vec<Uniforms> = Vec::with_capacity(meshes.len());
+ let mut offsets: Vec<(
+ wgpu::BufferAddress,
+ wgpu::BufferAddress,
+ usize,
+ )> = Vec::with_capacity(meshes.len());
+ let mut last_vertex = 0;
+ let mut last_index = 0;
+
+ // We upload everything upfront
for (origin, mesh) in meshes {
- let uniforms = Uniforms {
+ let transform = Uniforms {
transform: (transformation
* Transformation::translate(origin.x, origin.y))
.into(),
};
- let constants_buffer = device
- .create_buffer_mapped(1, wgpu::BufferUsage::COPY_SRC)
- .fill_from_slice(&[uniforms]);
-
- encoder.copy_buffer_to_buffer(
- &constants_buffer,
- 0,
- &self.constants_buffer,
- 0,
- std::mem::size_of::<Uniforms>() as u64,
- );
-
- let vertices_buffer = device
+ let vertex_buffer = device
.create_buffer_mapped(
mesh.vertices.len(),
- wgpu::BufferUsage::VERTEX,
+ wgpu::BufferUsage::COPY_SRC,
)
.fill_from_slice(&mesh.vertices);
- let indices_buffer = device
+ let index_buffer = device
.create_buffer_mapped(
mesh.indices.len(),
- wgpu::BufferUsage::INDEX,
+ wgpu::BufferUsage::COPY_SRC,
)
.fill_from_slice(&mesh.indices);
+ encoder.copy_buffer_to_buffer(
+ &vertex_buffer,
+ 0,
+ &self.vertex_buffer.raw,
+ last_vertex as u64,
+ (std::mem::size_of::<Vertex2D>() * mesh.vertices.len()) as u64,
+ );
+
+ encoder.copy_buffer_to_buffer(
+ &index_buffer,
+ 0,
+ &self.index_buffer.raw,
+ last_index as u64,
+ (std::mem::size_of::<u32>() * mesh.indices.len()) as u64,
+ );
+
+ uniforms.push(transform);
+ offsets.push((
+ last_vertex as u64,
+ last_index as u64,
+ mesh.indices.len(),
+ ));
+
+ last_vertex += mesh.vertices.len();
+ last_index += mesh.indices.len();
+ }
+
+ let uniforms_buffer = device
+ .create_buffer_mapped(uniforms.len(), wgpu::BufferUsage::COPY_SRC)
+ .fill_from_slice(&uniforms);
+
+ encoder.copy_buffer_to_buffer(
+ &uniforms_buffer,
+ 0,
+ &self.uniforms_buffer.raw,
+ 0,
+ (std::mem::size_of::<Uniforms>() * uniforms.len()) as u64,
+ );
+
+ {
+ let (attachment, resolve_target, load_op) =
+ if let Some(blit) = &mut self.blit {
+ let (attachment, resolve_target) =
+ blit.targets(device, target_width, target_height);
+
+ (attachment, Some(resolve_target), wgpu::LoadOp::Clear)
+ } else {
+ (target, None, wgpu::LoadOp::Load)
+ };
+
let mut render_pass =
encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[
wgpu::RenderPassColorAttachmentDescriptor {
- attachment: target,
- resolve_target: None,
- load_op: wgpu::LoadOp::Load,
+ attachment,
+ resolve_target,
+ load_op,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color {
r: 0.0,
@@ -183,18 +312,34 @@ impl Pipeline {
depth_stencil_attachment: None,
});
- render_pass.set_pipeline(&self.pipeline);
- render_pass.set_bind_group(0, &self.constants, &[]);
- render_pass.set_index_buffer(&indices_buffer, 0);
- render_pass.set_vertex_buffers(0, &[(&vertices_buffer, 0)]);
- render_pass.set_scissor_rect(
- bounds.x,
- bounds.y,
- bounds.width,
- bounds.height,
- );
+ for (i, (vertex_offset, index_offset, indices)) in
+ offsets.drain(..).enumerate()
+ {
+ render_pass.set_pipeline(&self.pipeline);
+ render_pass.set_bind_group(
+ 0,
+ &self.constants,
+ &[(std::mem::size_of::<Uniforms>() * i) as u64],
+ );
+ render_pass
+ .set_index_buffer(&self.index_buffer.raw, index_offset);
+ render_pass.set_vertex_buffers(
+ 0,
+ &[(&self.vertex_buffer.raw, vertex_offset)],
+ );
+ render_pass.set_scissor_rect(
+ bounds.x,
+ bounds.y,
+ bounds.width,
+ bounds.height,
+ );
+
+ render_pass.draw_indexed(0..indices as u32, 0, 0..1);
+ }
+ }
- render_pass.draw_indexed(0..mesh.indices.len() as u32, 0, 0..1);
+ if let Some(blit) = &mut self.blit {
+ blit.draw(encoder, target);
}
}
}