diff options
Diffstat (limited to '')
-rw-r--r-- | wgpu/src/image.rs | 283 | ||||
-rw-r--r-- | wgpu/src/image/atlas.rs | 215 | ||||
-rw-r--r-- | wgpu/src/image/atlas/allocation.rs | 3 | ||||
-rw-r--r-- | wgpu/src/image/atlas/allocator.rs | 4 | ||||
-rw-r--r-- | wgpu/src/image/atlas/entry.rs | 9 | ||||
-rw-r--r-- | wgpu/src/image/raster.rs | 121 | ||||
-rw-r--r-- | wgpu/src/image/vector.rs (renamed from graphics/src/image/vector.rs) | 50 |
7 files changed, 403 insertions, 282 deletions
diff --git a/wgpu/src/image.rs b/wgpu/src/image.rs index 9f56c188..263bcfa2 100644 --- a/wgpu/src/image.rs +++ b/wgpu/src/image.rs @@ -1,16 +1,17 @@ mod atlas; #[cfg(feature = "image")] -use iced_graphics::image::raster; +mod raster; #[cfg(feature = "svg")] -use iced_graphics::image::vector; +mod vector; -use crate::Transformation; use atlas::Atlas; -use iced_graphics::layer; -use iced_native::{Rectangle, Size}; +use crate::core::{Rectangle, Size}; +use crate::graphics::Transformation; +use crate::layer; +use crate::Buffer; use std::cell::RefCell; use std::mem; @@ -18,10 +19,10 @@ use std::mem; use bytemuck::{Pod, Zeroable}; #[cfg(feature = "image")] -use iced_native::image; +use crate::core::image; #[cfg(feature = "svg")] -use iced_native::svg; +use crate::core::svg; #[cfg(feature = "tracing")] use tracing::info_span; @@ -29,20 +30,112 @@ use tracing::info_span; #[derive(Debug)] pub struct Pipeline { #[cfg(feature = "image")] - raster_cache: RefCell<raster::Cache<Atlas>>, + raster_cache: RefCell<raster::Cache>, #[cfg(feature = "svg")] - vector_cache: RefCell<vector::Cache<Atlas>>, + vector_cache: RefCell<vector::Cache>, pipeline: wgpu::RenderPipeline, - uniforms: wgpu::Buffer, vertices: wgpu::Buffer, indices: wgpu::Buffer, - instances: wgpu::Buffer, - constants: wgpu::BindGroup, + sampler: wgpu::Sampler, texture: wgpu::BindGroup, texture_version: usize, - texture_layout: wgpu::BindGroupLayout, texture_atlas: Atlas, + texture_layout: wgpu::BindGroupLayout, + constant_layout: wgpu::BindGroupLayout, + + layers: Vec<Layer>, + prepare_layer: usize, +} + +#[derive(Debug)] +struct Layer { + uniforms: wgpu::Buffer, + constants: wgpu::BindGroup, + instances: Buffer<Instance>, + instance_count: usize, +} + +impl Layer { + fn new( + device: &wgpu::Device, + constant_layout: &wgpu::BindGroupLayout, + sampler: &wgpu::Sampler, + ) -> Self { + let uniforms = device.create_buffer(&wgpu::BufferDescriptor { + label: Some("iced_wgpu::image uniforms buffer"), + size: mem::size_of::<Uniforms>() as u64, + usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, + mapped_at_creation: false, + }); + + let constants = device.create_bind_group(&wgpu::BindGroupDescriptor { + label: Some("iced_wgpu::image constants bind group"), + layout: constant_layout, + entries: &[ + wgpu::BindGroupEntry { + binding: 0, + resource: wgpu::BindingResource::Buffer( + wgpu::BufferBinding { + buffer: &uniforms, + offset: 0, + size: None, + }, + ), + }, + wgpu::BindGroupEntry { + binding: 1, + resource: wgpu::BindingResource::Sampler(sampler), + }, + ], + }); + + let instances = Buffer::new( + device, + "iced_wgpu::image instance buffer", + Instance::INITIAL, + wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST, + ); + + Self { + uniforms, + constants, + instances, + instance_count: 0, + } + } + + fn prepare( + &mut self, + device: &wgpu::Device, + queue: &wgpu::Queue, + instances: &[Instance], + transformation: Transformation, + ) { + queue.write_buffer( + &self.uniforms, + 0, + bytemuck::bytes_of(&Uniforms { + transform: transformation.into(), + }), + ); + + let _ = self.instances.resize(device, instances.len()); + self.instances.write(queue, 0, instances); + + self.instance_count = instances.len(); + } + + fn render<'a>(&'a self, render_pass: &mut wgpu::RenderPass<'a>) { + render_pass.set_bind_group(0, &self.constants, &[]); + render_pass.set_vertex_buffer(1, self.instances.slice(..)); + + render_pass.draw_indexed( + 0..QUAD_INDICES.len() as u32, + 0, + 0..self.instance_count as u32, + ); + } } impl Pipeline { @@ -86,35 +179,6 @@ impl Pipeline { ], }); - let uniforms_buffer = device.create_buffer(&wgpu::BufferDescriptor { - label: Some("iced_wgpu::image uniforms buffer"), - size: mem::size_of::<Uniforms>() as u64, - usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, - mapped_at_creation: false, - }); - - let constant_bind_group = - device.create_bind_group(&wgpu::BindGroupDescriptor { - label: Some("iced_wgpu::image constants bind group"), - layout: &constant_layout, - entries: &[ - wgpu::BindGroupEntry { - binding: 0, - resource: wgpu::BindingResource::Buffer( - wgpu::BufferBinding { - buffer: &uniforms_buffer, - offset: 0, - size: None, - }, - ), - }, - wgpu::BindGroupEntry { - binding: 1, - resource: wgpu::BindingResource::Sampler(&sampler), - }, - ], - }); - let texture_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { label: Some("iced_wgpu::image texture atlas layout"), @@ -225,13 +289,6 @@ impl Pipeline { usage: wgpu::BufferUsages::INDEX, }); - let instances = device.create_buffer(&wgpu::BufferDescriptor { - label: Some("iced_wgpu::image instance buffer"), - size: mem::size_of::<Instance>() as u64 * Instance::MAX as u64, - usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST, - mapped_at_creation: false, - }); - let texture_atlas = Atlas::new(device); let texture = device.create_bind_group(&wgpu::BindGroupDescriptor { @@ -253,15 +310,17 @@ impl Pipeline { vector_cache: RefCell::new(vector::Cache::default()), pipeline, - uniforms: uniforms_buffer, vertices, indices, - instances, - constants: constant_bind_group, + sampler, texture, texture_version: texture_atlas.layer_count(), - texture_layout, texture_atlas, + texture_layout, + constant_layout, + + layers: Vec::new(), + prepare_layer: 0, } } @@ -281,18 +340,19 @@ impl Pipeline { svg.viewport_dimensions() } - pub fn draw( + pub fn prepare( &mut self, device: &wgpu::Device, - staging_belt: &mut wgpu::util::StagingBelt, + queue: &wgpu::Queue, encoder: &mut wgpu::CommandEncoder, images: &[layer::Image], transformation: Transformation, - bounds: Rectangle<u32>, - target: &wgpu::TextureView, _scale: f32, ) { #[cfg(feature = "tracing")] + let _ = info_span!("Wgpu::Image", "PREPARE").entered(); + + #[cfg(feature = "tracing")] let _ = info_span!("Wgpu::Image", "DRAW").entered(); let instances: &mut Vec<Instance> = &mut Vec::new(); @@ -308,8 +368,10 @@ impl Pipeline { #[cfg(feature = "image")] layer::Image::Raster { handle, bounds } => { if let Some(atlas_entry) = raster_cache.upload( + device, + queue, + encoder, handle, - &mut (device, encoder), &mut self.texture_atlas, ) { add_instances( @@ -332,11 +394,13 @@ impl Pipeline { let size = [bounds.width, bounds.height]; if let Some(atlas_entry) = vector_cache.upload( + device, + queue, + encoder, handle, *color, size, _scale, - &mut (device, encoder), &mut self.texture_atlas, ) { add_instances( @@ -376,68 +440,28 @@ impl Pipeline { self.texture_version = texture_version; } - { - let mut uniforms_buffer = staging_belt.write_buffer( - encoder, - &self.uniforms, - 0, - wgpu::BufferSize::new(mem::size_of::<Uniforms>() as u64) - .unwrap(), + if self.layers.len() <= self.prepare_layer { + self.layers.push(Layer::new( device, - ); - - uniforms_buffer.copy_from_slice(bytemuck::bytes_of(&Uniforms { - transform: transformation.into(), - })); + &self.constant_layout, + &self.sampler, + )); } - let mut i = 0; - let total = instances.len(); - - while i < total { - let end = (i + Instance::MAX).min(total); - let amount = end - i; - - let mut instances_buffer = staging_belt.write_buffer( - encoder, - &self.instances, - 0, - wgpu::BufferSize::new( - (amount * std::mem::size_of::<Instance>()) as u64, - ) - .unwrap(), - device, - ); + let layer = &mut self.layers[self.prepare_layer]; + layer.prepare(device, queue, instances, transformation); - instances_buffer.copy_from_slice(bytemuck::cast_slice( - &instances[i..i + amount], - )); - - let mut render_pass = - encoder.begin_render_pass(&wgpu::RenderPassDescriptor { - label: Some("iced_wgpu::image render pass"), - color_attachments: &[Some( - wgpu::RenderPassColorAttachment { - view: target, - resolve_target: None, - ops: wgpu::Operations { - load: wgpu::LoadOp::Load, - store: true, - }, - }, - )], - depth_stencil_attachment: None, - }); + self.prepare_layer += 1; + } + pub fn render<'a>( + &'a self, + layer: usize, + bounds: Rectangle<u32>, + render_pass: &mut wgpu::RenderPass<'a>, + ) { + if let Some(layer) = self.layers.get(layer) { render_pass.set_pipeline(&self.pipeline); - render_pass.set_bind_group(0, &self.constants, &[]); - render_pass.set_bind_group(1, &self.texture, &[]); - render_pass.set_index_buffer( - self.indices.slice(..), - wgpu::IndexFormat::Uint16, - ); - render_pass.set_vertex_buffer(0, self.vertices.slice(..)); - render_pass.set_vertex_buffer(1, self.instances.slice(..)); render_pass.set_scissor_rect( bounds.x, @@ -446,30 +470,25 @@ impl Pipeline { bounds.height, ); - render_pass.draw_indexed( - 0..QUAD_INDICES.len() as u32, - 0, - 0..amount as u32, + render_pass.set_bind_group(1, &self.texture, &[]); + render_pass.set_index_buffer( + self.indices.slice(..), + wgpu::IndexFormat::Uint16, ); + render_pass.set_vertex_buffer(0, self.vertices.slice(..)); - i += Instance::MAX; + layer.render(render_pass); } } - pub fn trim_cache( - &mut self, - device: &wgpu::Device, - encoder: &mut wgpu::CommandEncoder, - ) { + pub fn end_frame(&mut self) { #[cfg(feature = "image")] - self.raster_cache - .borrow_mut() - .trim(&mut self.texture_atlas, &mut (device, encoder)); + self.raster_cache.borrow_mut().trim(&mut self.texture_atlas); #[cfg(feature = "svg")] - self.vector_cache - .borrow_mut() - .trim(&mut self.texture_atlas, &mut (device, encoder)); + self.vector_cache.borrow_mut().trim(&mut self.texture_atlas); + + self.prepare_layer = 0; } } @@ -507,7 +526,7 @@ struct Instance { } impl Instance { - pub const MAX: usize = 1_000; + pub const INITIAL: usize = 1_000; } #[repr(C)] diff --git a/wgpu/src/image/atlas.rs b/wgpu/src/image/atlas.rs index a0fdf146..366fe623 100644 --- a/wgpu/src/image/atlas.rs +++ b/wgpu/src/image/atlas.rs @@ -12,8 +12,7 @@ use allocator::Allocator; pub const SIZE: u32 = 2048; -use iced_graphics::image; -use iced_graphics::Size; +use crate::core::Size; #[derive(Debug)] pub struct Atlas { @@ -37,10 +36,10 @@ impl Atlas { sample_count: 1, dimension: wgpu::TextureDimension::D2, format: wgpu::TextureFormat::Rgba8UnormSrgb, - view_formats: &[], usage: wgpu::TextureUsages::COPY_DST | wgpu::TextureUsages::COPY_SRC | wgpu::TextureUsages::TEXTURE_BINDING, + view_formats: &[], }); let texture_view = texture.create_view(&wgpu::TextureViewDescriptor { @@ -63,6 +62,97 @@ impl Atlas { self.layers.len() } + pub fn upload( + &mut self, + device: &wgpu::Device, + queue: &wgpu::Queue, + encoder: &mut wgpu::CommandEncoder, + width: u32, + height: u32, + data: &[u8], + ) -> Option<Entry> { + let entry = { + let current_size = self.layers.len(); + let entry = self.allocate(width, height)?; + + // We grow the internal texture after allocating if necessary + let new_layers = self.layers.len() - current_size; + self.grow(new_layers, device, encoder); + + entry + }; + + log::info!("Allocated atlas entry: {:?}", entry); + + // It is a webgpu requirement that: + // BufferCopyView.layout.bytes_per_row % wgpu::COPY_BYTES_PER_ROW_ALIGNMENT == 0 + // So we calculate padded_width by rounding width up to the next + // multiple of wgpu::COPY_BYTES_PER_ROW_ALIGNMENT. + let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT; + let padding = (align - (4 * width) % align) % align; + let padded_width = (4 * width + padding) as usize; + let padded_data_size = padded_width * height as usize; + + let mut padded_data = vec![0; padded_data_size]; + + for row in 0..height as usize { + let offset = row * padded_width; + + padded_data[offset..offset + 4 * width as usize].copy_from_slice( + &data[row * 4 * width as usize..(row + 1) * 4 * width as usize], + ) + } + + match &entry { + Entry::Contiguous(allocation) => { + self.upload_allocation( + &padded_data, + width, + height, + padding, + 0, + allocation, + queue, + ); + } + Entry::Fragmented { fragments, .. } => { + for fragment in fragments { + let (x, y) = fragment.position; + let offset = (y * padded_width as u32 + 4 * x) as usize; + + self.upload_allocation( + &padded_data, + width, + height, + padding, + offset, + &fragment.allocation, + queue, + ); + } + } + } + + log::info!("Current atlas: {:?}", self); + + Some(entry) + } + + pub fn remove(&mut self, entry: &Entry) { + log::info!("Removing atlas entry: {:?}", entry); + + match entry { + Entry::Contiguous(allocation) => { + self.deallocate(allocation); + } + Entry::Fragmented { fragments, .. } => { + for fragment in fragments { + self.deallocate(&fragment.allocation); + } + } + } + } + fn allocate(&mut self, width: u32, height: u32) -> Option<Entry> { // Allocate one layer if texture fits perfectly if width == SIZE && height == SIZE { @@ -184,13 +274,13 @@ impl Atlas { fn upload_allocation( &mut self, - buffer: &wgpu::Buffer, + data: &[u8], image_width: u32, image_height: u32, padding: u32, offset: usize, allocation: &Allocation, - encoder: &mut wgpu::CommandEncoder, + queue: &wgpu::Queue, ) { let (x, y) = allocation.position(); let Size { width, height } = allocation.size(); @@ -202,15 +292,7 @@ impl Atlas { depth_or_array_layers: 1, }; - encoder.copy_buffer_to_texture( - wgpu::ImageCopyBuffer { - buffer, - layout: wgpu::ImageDataLayout { - offset: offset as u64, - bytes_per_row: Some(4 * image_width + padding), - rows_per_image: Some(image_height), - }, - }, + queue.write_texture( wgpu::ImageCopyTexture { texture: &self.texture, mip_level: 0, @@ -221,6 +303,12 @@ impl Atlas { }, aspect: wgpu::TextureAspect::default(), }, + data, + wgpu::ImageDataLayout { + offset: offset as u64, + bytes_per_row: Some(4 * image_width + padding), + rows_per_image: Some(image_height), + }, extent, ); } @@ -246,10 +334,10 @@ impl Atlas { sample_count: 1, dimension: wgpu::TextureDimension::D2, format: wgpu::TextureFormat::Rgba8UnormSrgb, - view_formats: &[], usage: wgpu::TextureUsages::COPY_DST | wgpu::TextureUsages::COPY_SRC | wgpu::TextureUsages::TEXTURE_BINDING, + view_formats: &[], }); let amount_to_copy = self.layers.len() - amount; @@ -298,100 +386,3 @@ impl Atlas { }); } } - -impl image::Storage for Atlas { - type Entry = Entry; - type State<'a> = (&'a wgpu::Device, &'a mut wgpu::CommandEncoder); - - fn upload( - &mut self, - width: u32, - height: u32, - data: &[u8], - (device, encoder): &mut Self::State<'_>, - ) -> Option<Self::Entry> { - use wgpu::util::DeviceExt; - - let entry = { - let current_size = self.layers.len(); - let entry = self.allocate(width, height)?; - - // We grow the internal texture after allocating if necessary - let new_layers = self.layers.len() - current_size; - self.grow(new_layers, device, encoder); - - entry - }; - - log::info!("Allocated atlas entry: {:?}", entry); - - // It is a webgpu requirement that: - // BufferCopyView.layout.bytes_per_row % wgpu::COPY_BYTES_PER_ROW_ALIGNMENT == 0 - // So we calculate padded_width by rounding width up to the next - // multiple of wgpu::COPY_BYTES_PER_ROW_ALIGNMENT. - let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT; - let padding = (align - (4 * width) % align) % align; - let padded_width = (4 * width + padding) as usize; - let padded_data_size = padded_width * height as usize; - - let mut padded_data = vec![0; padded_data_size]; - - for row in 0..height as usize { - let offset = row * padded_width; - - padded_data[offset..offset + 4 * width as usize].copy_from_slice( - &data[row * 4 * width as usize..(row + 1) * 4 * width as usize], - ) - } - - let buffer = - device.create_buffer_init(&wgpu::util::BufferInitDescriptor { - label: Some("iced_wgpu::image staging buffer"), - contents: &padded_data, - usage: wgpu::BufferUsages::COPY_SRC, - }); - - match &entry { - Entry::Contiguous(allocation) => { - self.upload_allocation( - &buffer, width, height, padding, 0, allocation, encoder, - ); - } - Entry::Fragmented { fragments, .. } => { - for fragment in fragments { - let (x, y) = fragment.position; - let offset = (y * padded_width as u32 + 4 * x) as usize; - - self.upload_allocation( - &buffer, - width, - height, - padding, - offset, - &fragment.allocation, - encoder, - ); - } - } - } - - log::info!("Current atlas: {:?}", self); - - Some(entry) - } - - fn remove(&mut self, entry: &Entry, _: &mut Self::State<'_>) { - log::info!("Removing atlas entry: {:?}", entry); - - match entry { - Entry::Contiguous(allocation) => { - self.deallocate(allocation); - } - Entry::Fragmented { fragments, .. } => { - for fragment in fragments { - self.deallocate(&fragment.allocation); - } - } - } - } -} diff --git a/wgpu/src/image/atlas/allocation.rs b/wgpu/src/image/atlas/allocation.rs index 43aba875..11289771 100644 --- a/wgpu/src/image/atlas/allocation.rs +++ b/wgpu/src/image/atlas/allocation.rs @@ -1,7 +1,6 @@ +use crate::core::Size; use crate::image::atlas::{self, allocator}; -use iced_graphics::Size; - #[derive(Debug)] pub enum Allocation { Partial { diff --git a/wgpu/src/image/atlas/allocator.rs b/wgpu/src/image/atlas/allocator.rs index 03effdcb..204a5c26 100644 --- a/wgpu/src/image/atlas/allocator.rs +++ b/wgpu/src/image/atlas/allocator.rs @@ -46,10 +46,10 @@ impl Region { (rectangle.min.x as u32, rectangle.min.y as u32) } - pub fn size(&self) -> iced_graphics::Size<u32> { + pub fn size(&self) -> crate::core::Size<u32> { let size = self.allocation.rectangle.size(); - iced_graphics::Size::new(size.width as u32, size.height as u32) + crate::core::Size::new(size.width as u32, size.height as u32) } } diff --git a/wgpu/src/image/atlas/entry.rs b/wgpu/src/image/atlas/entry.rs index 69c05a50..7e4c92a2 100644 --- a/wgpu/src/image/atlas/entry.rs +++ b/wgpu/src/image/atlas/entry.rs @@ -1,8 +1,6 @@ +use crate::core::Size; use crate::image::atlas; -use iced_graphics::image; -use iced_graphics::Size; - #[derive(Debug)] pub enum Entry { Contiguous(atlas::Allocation), @@ -12,8 +10,9 @@ pub enum Entry { }, } -impl image::storage::Entry for Entry { - fn size(&self) -> Size<u32> { +impl Entry { + #[cfg(feature = "image")] + pub fn size(&self) -> Size<u32> { match self { Entry::Contiguous(allocation) => allocation.size(), Entry::Fragmented { size, .. } => *size, diff --git a/wgpu/src/image/raster.rs b/wgpu/src/image/raster.rs new file mode 100644 index 00000000..9b38dce4 --- /dev/null +++ b/wgpu/src/image/raster.rs @@ -0,0 +1,121 @@ +use crate::core::image; +use crate::core::Size; +use crate::graphics; +use crate::graphics::image::image_rs; +use crate::image::atlas::{self, Atlas}; + +use std::collections::{HashMap, HashSet}; + +/// Entry in cache corresponding to an image handle +#[derive(Debug)] +pub enum Memory { + /// Image data on host + Host(image_rs::ImageBuffer<image_rs::Rgba<u8>, Vec<u8>>), + /// Storage entry + Device(atlas::Entry), + /// Image not found + NotFound, + /// Invalid image data + Invalid, +} + +impl Memory { + /// Width and height of image + pub fn dimensions(&self) -> Size<u32> { + match self { + Memory::Host(image) => { + let (width, height) = image.dimensions(); + + Size::new(width, height) + } + Memory::Device(entry) => entry.size(), + Memory::NotFound => Size::new(1, 1), + Memory::Invalid => Size::new(1, 1), + } + } +} + +/// Caches image raster data +#[derive(Debug, Default)] +pub struct Cache { + map: HashMap<u64, Memory>, + hits: HashSet<u64>, +} + +impl Cache { + /// Load image + pub fn load(&mut self, handle: &image::Handle) -> &mut Memory { + if self.contains(handle) { + return self.get(handle).unwrap(); + } + + let memory = match graphics::image::load(handle) { + Ok(image) => Memory::Host(image.to_rgba8()), + Err(image_rs::error::ImageError::IoError(_)) => Memory::NotFound, + Err(_) => Memory::Invalid, + }; + + self.insert(handle, memory); + self.get(handle).unwrap() + } + + /// Load image and upload raster data + pub fn upload( + &mut self, + device: &wgpu::Device, + queue: &wgpu::Queue, + encoder: &mut wgpu::CommandEncoder, + handle: &image::Handle, + atlas: &mut Atlas, + ) -> Option<&atlas::Entry> { + let memory = self.load(handle); + + if let Memory::Host(image) = memory { + let (width, height) = image.dimensions(); + + let entry = + atlas.upload(device, queue, encoder, width, height, image)?; + + *memory = Memory::Device(entry); + } + + if let Memory::Device(allocation) = memory { + Some(allocation) + } else { + None + } + } + + /// Trim cache misses from cache + pub fn trim(&mut self, atlas: &mut Atlas) { + let hits = &self.hits; + + self.map.retain(|k, memory| { + let retain = hits.contains(k); + + if !retain { + if let Memory::Device(entry) = memory { + atlas.remove(entry); + } + } + + retain + }); + + self.hits.clear(); + } + + fn get(&mut self, handle: &image::Handle) -> Option<&mut Memory> { + let _ = self.hits.insert(handle.id()); + + self.map.get_mut(&handle.id()) + } + + fn insert(&mut self, handle: &image::Handle, memory: Memory) { + let _ = self.map.insert(handle.id(), memory); + } + + fn contains(&self, handle: &image::Handle) -> bool { + self.map.contains_key(&handle.id()) + } +} diff --git a/graphics/src/image/vector.rs b/wgpu/src/image/vector.rs index c950ccd6..58bdf64a 100644 --- a/graphics/src/image/vector.rs +++ b/wgpu/src/image/vector.rs @@ -1,9 +1,6 @@ -//! Vector image loading and caching -use crate::image::Storage; -use crate::Color; - -use iced_native::svg; -use iced_native::Size; +use crate::core::svg; +use crate::core::{Color, Size}; +use crate::image::atlas::{self, Atlas}; use resvg::tiny_skia; use resvg::usvg; @@ -33,19 +30,21 @@ impl Svg { } /// Caches svg vector and raster data -#[derive(Debug)] -pub struct Cache<T: Storage> { +#[derive(Debug, Default)] +pub struct Cache { svgs: HashMap<u64, Svg>, - rasterized: HashMap<(u64, u32, u32, ColorFilter), T::Entry>, + rasterized: HashMap<(u64, u32, u32, ColorFilter), atlas::Entry>, svg_hits: HashSet<u64>, rasterized_hits: HashSet<(u64, u32, u32, ColorFilter)>, } type ColorFilter = Option<[u8; 4]>; -impl<T: Storage> Cache<T> { +impl Cache { /// Load svg pub fn load(&mut self, handle: &svg::Handle) -> &Svg { + use usvg::TreeParsing; + if self.svgs.contains_key(&handle.id()) { return self.svgs.get(&handle.id()).unwrap(); } @@ -74,13 +73,15 @@ impl<T: Storage> Cache<T> { /// Load svg and upload raster data pub fn upload( &mut self, + device: &wgpu::Device, + queue: &wgpu::Queue, + encoder: &mut wgpu::CommandEncoder, handle: &svg::Handle, color: Option<Color>, [width, height]: [f32; 2], scale: f32, - state: &mut T::State<'_>, - storage: &mut T, - ) -> Option<&T::Entry> { + atlas: &mut Atlas, + ) -> Option<&atlas::Entry> { let id = handle.id(); let (width, height) = ( @@ -117,9 +118,9 @@ impl<T: Storage> Cache<T> { resvg::render( tree, if width > height { - usvg::FitTo::Width(width) + resvg::FitTo::Width(width) } else { - usvg::FitTo::Height(height) + resvg::FitTo::Height(height) }, tiny_skia::Transform::default(), img.as_mut(), @@ -137,7 +138,9 @@ impl<T: Storage> Cache<T> { }); } - let allocation = storage.upload(width, height, &rgba, state)?; + let allocation = atlas + .upload(device, queue, encoder, width, height, &rgba)?; + log::debug!("allocating {} {}x{}", id, width, height); let _ = self.svg_hits.insert(id); @@ -151,7 +154,7 @@ impl<T: Storage> Cache<T> { } /// Load svg and upload raster data - pub fn trim(&mut self, storage: &mut T, state: &mut T::State<'_>) { + pub fn trim(&mut self, atlas: &mut Atlas) { let svg_hits = &self.svg_hits; let rasterized_hits = &self.rasterized_hits; @@ -160,7 +163,7 @@ impl<T: Storage> Cache<T> { let retain = rasterized_hits.contains(k); if !retain { - storage.remove(entry, state); + atlas.remove(entry); } retain @@ -170,17 +173,6 @@ impl<T: Storage> Cache<T> { } } -impl<T: Storage> Default for Cache<T> { - fn default() -> Self { - Self { - svgs: HashMap::new(), - rasterized: HashMap::new(), - svg_hits: HashSet::new(), - rasterized_hits: HashSet::new(), - } - } -} - impl std::fmt::Debug for Svg { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { |