diff options
author | 2023-07-12 12:23:18 -0700 | |
---|---|---|
committer | 2023-07-12 12:23:18 -0700 | |
commit | 633f405f3f78bc7f82d2b2061491b0e011137451 (patch) | |
tree | 5ebfc1f45d216a5c14a90492563599e6969eab4d /wgpu/src/image | |
parent | 41836dd80d0534608e7aedfbf2319c540a23de1a (diff) | |
parent | 21bd51426d900e271206f314e0c915dd41065521 (diff) | |
download | iced-633f405f3f78bc7f82d2b2061491b0e011137451.tar.gz iced-633f405f3f78bc7f82d2b2061491b0e011137451.tar.bz2 iced-633f405f3f78bc7f82d2b2061491b0e011137451.zip |
Merge remote-tracking branch 'origin/master' into feat/multi-window-support
# Conflicts:
# Cargo.toml
# core/src/window/icon.rs
# core/src/window/id.rs
# core/src/window/position.rs
# core/src/window/settings.rs
# examples/integration/src/main.rs
# examples/integration_opengl/src/main.rs
# glutin/src/application.rs
# native/src/subscription.rs
# native/src/window.rs
# runtime/src/window/action.rs
# src/lib.rs
# src/window.rs
# winit/Cargo.toml
# winit/src/application.rs
# winit/src/icon.rs
# winit/src/settings.rs
# winit/src/window.rs
Diffstat (limited to 'wgpu/src/image')
-rw-r--r-- | wgpu/src/image/atlas.rs | 227 | ||||
-rw-r--r-- | wgpu/src/image/atlas/allocation.rs | 3 | ||||
-rw-r--r-- | wgpu/src/image/atlas/allocator.rs | 4 | ||||
-rw-r--r-- | wgpu/src/image/atlas/entry.rs | 9 | ||||
-rw-r--r-- | wgpu/src/image/raster.rs | 119 | ||||
-rw-r--r-- | wgpu/src/image/vector.rs | 194 |
6 files changed, 440 insertions, 116 deletions
diff --git a/wgpu/src/image/atlas.rs b/wgpu/src/image/atlas.rs index eafe2f96..e3de1290 100644 --- a/wgpu/src/image/atlas.rs +++ b/wgpu/src/image/atlas.rs @@ -12,10 +12,8 @@ use allocator::Allocator; pub const SIZE: u32 = 2048; -use iced_graphics::image; -use iced_graphics::Size; - -use std::num::NonZeroU32; +use crate::core::Size; +use crate::graphics::color; #[derive(Debug)] pub struct Atlas { @@ -38,10 +36,15 @@ impl Atlas { mip_level_count: 1, sample_count: 1, dimension: wgpu::TextureDimension::D2, - format: wgpu::TextureFormat::Rgba8UnormSrgb, + format: if color::GAMMA_CORRECTION { + wgpu::TextureFormat::Rgba8UnormSrgb + } else { + wgpu::TextureFormat::Rgba8Unorm + }, usage: wgpu::TextureUsages::COPY_DST | wgpu::TextureUsages::COPY_SRC | wgpu::TextureUsages::TEXTURE_BINDING, + view_formats: &[], }); let texture_view = texture.create_view(&wgpu::TextureViewDescriptor { @@ -64,6 +67,98 @@ impl Atlas { self.layers.len() } + pub fn upload( + &mut self, + device: &wgpu::Device, + encoder: &mut wgpu::CommandEncoder, + width: u32, + height: u32, + data: &[u8], + ) -> Option<Entry> { + let entry = { + let current_size = self.layers.len(); + let entry = self.allocate(width, height)?; + + // We grow the internal texture after allocating if necessary + let new_layers = self.layers.len() - current_size; + self.grow(new_layers, device, encoder); + + entry + }; + + log::info!("Allocated atlas entry: {:?}", entry); + + // It is a webgpu requirement that: + // BufferCopyView.layout.bytes_per_row % wgpu::COPY_BYTES_PER_ROW_ALIGNMENT == 0 + // So we calculate padded_width by rounding width up to the next + // multiple of wgpu::COPY_BYTES_PER_ROW_ALIGNMENT. + let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT; + let padding = (align - (4 * width) % align) % align; + let padded_width = (4 * width + padding) as usize; + let padded_data_size = padded_width * height as usize; + + let mut padded_data = vec![0; padded_data_size]; + + for row in 0..height as usize { + let offset = row * padded_width; + + padded_data[offset..offset + 4 * width as usize].copy_from_slice( + &data[row * 4 * width as usize..(row + 1) * 4 * width as usize], + ) + } + + match &entry { + Entry::Contiguous(allocation) => { + self.upload_allocation( + &padded_data, + width, + height, + padding, + 0, + allocation, + device, + encoder, + ); + } + Entry::Fragmented { fragments, .. } => { + for fragment in fragments { + let (x, y) = fragment.position; + let offset = (y * padded_width as u32 + 4 * x) as usize; + + self.upload_allocation( + &padded_data, + width, + height, + padding, + offset, + &fragment.allocation, + device, + encoder, + ); + } + } + } + + log::info!("Current atlas: {:?}", self); + + Some(entry) + } + + pub fn remove(&mut self, entry: &Entry) { + log::info!("Removing atlas entry: {:?}", entry); + + match entry { + Entry::Contiguous(allocation) => { + self.deallocate(allocation); + } + Entry::Fragmented { fragments, .. } => { + for fragment in fragments { + self.deallocate(&fragment.allocation); + } + } + } + } + fn allocate(&mut self, width: u32, height: u32) -> Option<Entry> { // Allocate one layer if texture fits perfectly if width == SIZE && height == SIZE { @@ -185,14 +280,17 @@ impl Atlas { fn upload_allocation( &mut self, - buffer: &wgpu::Buffer, + data: &[u8], image_width: u32, image_height: u32, padding: u32, offset: usize, allocation: &Allocation, + device: &wgpu::Device, encoder: &mut wgpu::CommandEncoder, ) { + use wgpu::util::DeviceExt; + let (x, y) = allocation.position(); let Size { width, height } = allocation.size(); let layer = allocation.layer(); @@ -203,13 +301,20 @@ impl Atlas { depth_or_array_layers: 1, }; + let buffer = + device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("image upload buffer"), + contents: data, + usage: wgpu::BufferUsages::COPY_SRC, + }); + encoder.copy_buffer_to_texture( wgpu::ImageCopyBuffer { - buffer, + buffer: &buffer, layout: wgpu::ImageDataLayout { offset: offset as u64, - bytes_per_row: NonZeroU32::new(4 * image_width + padding), - rows_per_image: NonZeroU32::new(image_height), + bytes_per_row: Some(4 * image_width + padding), + rows_per_image: Some(image_height), }, }, wgpu::ImageCopyTexture { @@ -246,10 +351,15 @@ impl Atlas { mip_level_count: 1, sample_count: 1, dimension: wgpu::TextureDimension::D2, - format: wgpu::TextureFormat::Rgba8UnormSrgb, + format: if color::GAMMA_CORRECTION { + wgpu::TextureFormat::Rgba8UnormSrgb + } else { + wgpu::TextureFormat::Rgba8Unorm + }, usage: wgpu::TextureUsages::COPY_DST | wgpu::TextureUsages::COPY_SRC | wgpu::TextureUsages::TEXTURE_BINDING, + view_formats: &[], }); let amount_to_copy = self.layers.len() - amount; @@ -298,100 +408,3 @@ impl Atlas { }); } } - -impl image::Storage for Atlas { - type Entry = Entry; - type State<'a> = (&'a wgpu::Device, &'a mut wgpu::CommandEncoder); - - fn upload( - &mut self, - width: u32, - height: u32, - data: &[u8], - (device, encoder): &mut Self::State<'_>, - ) -> Option<Self::Entry> { - use wgpu::util::DeviceExt; - - let entry = { - let current_size = self.layers.len(); - let entry = self.allocate(width, height)?; - - // We grow the internal texture after allocating if necessary - let new_layers = self.layers.len() - current_size; - self.grow(new_layers, device, encoder); - - entry - }; - - log::info!("Allocated atlas entry: {:?}", entry); - - // It is a webgpu requirement that: - // BufferCopyView.layout.bytes_per_row % wgpu::COPY_BYTES_PER_ROW_ALIGNMENT == 0 - // So we calculate padded_width by rounding width up to the next - // multiple of wgpu::COPY_BYTES_PER_ROW_ALIGNMENT. - let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT; - let padding = (align - (4 * width) % align) % align; - let padded_width = (4 * width + padding) as usize; - let padded_data_size = padded_width * height as usize; - - let mut padded_data = vec![0; padded_data_size]; - - for row in 0..height as usize { - let offset = row * padded_width; - - padded_data[offset..offset + 4 * width as usize].copy_from_slice( - &data[row * 4 * width as usize..(row + 1) * 4 * width as usize], - ) - } - - let buffer = - device.create_buffer_init(&wgpu::util::BufferInitDescriptor { - label: Some("iced_wgpu::image staging buffer"), - contents: &padded_data, - usage: wgpu::BufferUsages::COPY_SRC, - }); - - match &entry { - Entry::Contiguous(allocation) => { - self.upload_allocation( - &buffer, width, height, padding, 0, allocation, encoder, - ); - } - Entry::Fragmented { fragments, .. } => { - for fragment in fragments { - let (x, y) = fragment.position; - let offset = (y * padded_width as u32 + 4 * x) as usize; - - self.upload_allocation( - &buffer, - width, - height, - padding, - offset, - &fragment.allocation, - encoder, - ); - } - } - } - - log::info!("Current atlas: {:?}", self); - - Some(entry) - } - - fn remove(&mut self, entry: &Entry, _: &mut Self::State<'_>) { - log::info!("Removing atlas entry: {:?}", entry); - - match entry { - Entry::Contiguous(allocation) => { - self.deallocate(allocation); - } - Entry::Fragmented { fragments, .. } => { - for fragment in fragments { - self.deallocate(&fragment.allocation); - } - } - } - } -} diff --git a/wgpu/src/image/atlas/allocation.rs b/wgpu/src/image/atlas/allocation.rs index 43aba875..11289771 100644 --- a/wgpu/src/image/atlas/allocation.rs +++ b/wgpu/src/image/atlas/allocation.rs @@ -1,7 +1,6 @@ +use crate::core::Size; use crate::image::atlas::{self, allocator}; -use iced_graphics::Size; - #[derive(Debug)] pub enum Allocation { Partial { diff --git a/wgpu/src/image/atlas/allocator.rs b/wgpu/src/image/atlas/allocator.rs index 03effdcb..204a5c26 100644 --- a/wgpu/src/image/atlas/allocator.rs +++ b/wgpu/src/image/atlas/allocator.rs @@ -46,10 +46,10 @@ impl Region { (rectangle.min.x as u32, rectangle.min.y as u32) } - pub fn size(&self) -> iced_graphics::Size<u32> { + pub fn size(&self) -> crate::core::Size<u32> { let size = self.allocation.rectangle.size(); - iced_graphics::Size::new(size.width as u32, size.height as u32) + crate::core::Size::new(size.width as u32, size.height as u32) } } diff --git a/wgpu/src/image/atlas/entry.rs b/wgpu/src/image/atlas/entry.rs index 69c05a50..7e4c92a2 100644 --- a/wgpu/src/image/atlas/entry.rs +++ b/wgpu/src/image/atlas/entry.rs @@ -1,8 +1,6 @@ +use crate::core::Size; use crate::image::atlas; -use iced_graphics::image; -use iced_graphics::Size; - #[derive(Debug)] pub enum Entry { Contiguous(atlas::Allocation), @@ -12,8 +10,9 @@ pub enum Entry { }, } -impl image::storage::Entry for Entry { - fn size(&self) -> Size<u32> { +impl Entry { + #[cfg(feature = "image")] + pub fn size(&self) -> Size<u32> { match self { Entry::Contiguous(allocation) => allocation.size(), Entry::Fragmented { size, .. } => *size, diff --git a/wgpu/src/image/raster.rs b/wgpu/src/image/raster.rs new file mode 100644 index 00000000..a6cba76a --- /dev/null +++ b/wgpu/src/image/raster.rs @@ -0,0 +1,119 @@ +use crate::core::image; +use crate::core::Size; +use crate::graphics; +use crate::graphics::image::image_rs; +use crate::image::atlas::{self, Atlas}; + +use std::collections::{HashMap, HashSet}; + +/// Entry in cache corresponding to an image handle +#[derive(Debug)] +pub enum Memory { + /// Image data on host + Host(image_rs::ImageBuffer<image_rs::Rgba<u8>, Vec<u8>>), + /// Storage entry + Device(atlas::Entry), + /// Image not found + NotFound, + /// Invalid image data + Invalid, +} + +impl Memory { + /// Width and height of image + pub fn dimensions(&self) -> Size<u32> { + match self { + Memory::Host(image) => { + let (width, height) = image.dimensions(); + + Size::new(width, height) + } + Memory::Device(entry) => entry.size(), + Memory::NotFound => Size::new(1, 1), + Memory::Invalid => Size::new(1, 1), + } + } +} + +/// Caches image raster data +#[derive(Debug, Default)] +pub struct Cache { + map: HashMap<u64, Memory>, + hits: HashSet<u64>, +} + +impl Cache { + /// Load image + pub fn load(&mut self, handle: &image::Handle) -> &mut Memory { + if self.contains(handle) { + return self.get(handle).unwrap(); + } + + let memory = match graphics::image::load(handle) { + Ok(image) => Memory::Host(image.to_rgba8()), + Err(image_rs::error::ImageError::IoError(_)) => Memory::NotFound, + Err(_) => Memory::Invalid, + }; + + self.insert(handle, memory); + self.get(handle).unwrap() + } + + /// Load image and upload raster data + pub fn upload( + &mut self, + device: &wgpu::Device, + encoder: &mut wgpu::CommandEncoder, + handle: &image::Handle, + atlas: &mut Atlas, + ) -> Option<&atlas::Entry> { + let memory = self.load(handle); + + if let Memory::Host(image) = memory { + let (width, height) = image.dimensions(); + + let entry = atlas.upload(device, encoder, width, height, image)?; + + *memory = Memory::Device(entry); + } + + if let Memory::Device(allocation) = memory { + Some(allocation) + } else { + None + } + } + + /// Trim cache misses from cache + pub fn trim(&mut self, atlas: &mut Atlas) { + let hits = &self.hits; + + self.map.retain(|k, memory| { + let retain = hits.contains(k); + + if !retain { + if let Memory::Device(entry) = memory { + atlas.remove(entry); + } + } + + retain + }); + + self.hits.clear(); + } + + fn get(&mut self, handle: &image::Handle) -> Option<&mut Memory> { + let _ = self.hits.insert(handle.id()); + + self.map.get_mut(&handle.id()) + } + + fn insert(&mut self, handle: &image::Handle, memory: Memory) { + let _ = self.map.insert(handle.id(), memory); + } + + fn contains(&self, handle: &image::Handle) -> bool { + self.map.contains_key(&handle.id()) + } +} diff --git a/wgpu/src/image/vector.rs b/wgpu/src/image/vector.rs new file mode 100644 index 00000000..2c03d36b --- /dev/null +++ b/wgpu/src/image/vector.rs @@ -0,0 +1,194 @@ +use crate::core::svg; +use crate::core::{Color, Size}; +use crate::image::atlas::{self, Atlas}; + +use resvg::tiny_skia; +use resvg::usvg; +use std::collections::{HashMap, HashSet}; +use std::fs; + +/// Entry in cache corresponding to an svg handle +pub enum Svg { + /// Parsed svg + Loaded(usvg::Tree), + /// Svg not found or failed to parse + NotFound, +} + +impl Svg { + /// Viewport width and height + pub fn viewport_dimensions(&self) -> Size<u32> { + match self { + Svg::Loaded(tree) => { + let size = tree.size; + + Size::new(size.width() as u32, size.height() as u32) + } + Svg::NotFound => Size::new(1, 1), + } + } +} + +/// Caches svg vector and raster data +#[derive(Debug, Default)] +pub struct Cache { + svgs: HashMap<u64, Svg>, + rasterized: HashMap<(u64, u32, u32, ColorFilter), atlas::Entry>, + svg_hits: HashSet<u64>, + rasterized_hits: HashSet<(u64, u32, u32, ColorFilter)>, +} + +type ColorFilter = Option<[u8; 4]>; + +impl Cache { + /// Load svg + pub fn load(&mut self, handle: &svg::Handle) -> &Svg { + use usvg::TreeParsing; + + if self.svgs.contains_key(&handle.id()) { + return self.svgs.get(&handle.id()).unwrap(); + } + + let svg = match handle.data() { + svg::Data::Path(path) => { + let tree = fs::read_to_string(path).ok().and_then(|contents| { + usvg::Tree::from_str(&contents, &usvg::Options::default()) + .ok() + }); + + tree.map(Svg::Loaded).unwrap_or(Svg::NotFound) + } + svg::Data::Bytes(bytes) => { + match usvg::Tree::from_data(bytes, &usvg::Options::default()) { + Ok(tree) => Svg::Loaded(tree), + Err(_) => Svg::NotFound, + } + } + }; + + let _ = self.svgs.insert(handle.id(), svg); + self.svgs.get(&handle.id()).unwrap() + } + + /// Load svg and upload raster data + pub fn upload( + &mut self, + device: &wgpu::Device, + encoder: &mut wgpu::CommandEncoder, + handle: &svg::Handle, + color: Option<Color>, + [width, height]: [f32; 2], + scale: f32, + atlas: &mut Atlas, + ) -> Option<&atlas::Entry> { + let id = handle.id(); + + let (width, height) = ( + (scale * width).ceil() as u32, + (scale * height).ceil() as u32, + ); + + let color = color.map(Color::into_rgba8); + let key = (id, width, height, color); + + // TODO: Optimize! + // We currently rerasterize the SVG when its size changes. This is slow + // as heck. A GPU rasterizer like `pathfinder` may perform better. + // It would be cool to be able to smooth resize the `svg` example. + if self.rasterized.contains_key(&key) { + let _ = self.svg_hits.insert(id); + let _ = self.rasterized_hits.insert(key); + + return self.rasterized.get(&key); + } + + match self.load(handle) { + Svg::Loaded(tree) => { + if width == 0 || height == 0 { + return None; + } + + // TODO: Optimize! + // We currently rerasterize the SVG when its size changes. This is slow + // as heck. A GPU rasterizer like `pathfinder` may perform better. + // It would be cool to be able to smooth resize the `svg` example. + let mut img = tiny_skia::Pixmap::new(width, height)?; + + let tree_size = tree.size.to_int_size(); + + let target_size = if width > height { + tree_size.scale_to_width(width) + } else { + tree_size.scale_to_height(height) + }; + + let transform = if let Some(target_size) = target_size { + let tree_size = tree_size.to_size(); + let target_size = target_size.to_size(); + + tiny_skia::Transform::from_scale( + target_size.width() / tree_size.width(), + target_size.height() / tree_size.height(), + ) + } else { + tiny_skia::Transform::default() + }; + + resvg::Tree::from_usvg(tree) + .render(transform, &mut img.as_mut()); + + let mut rgba = img.take(); + + if let Some(color) = color { + rgba.chunks_exact_mut(4).for_each(|rgba| { + if rgba[3] > 0 { + rgba[0] = color[0]; + rgba[1] = color[1]; + rgba[2] = color[2]; + } + }); + } + + let allocation = + atlas.upload(device, encoder, width, height, &rgba)?; + + log::debug!("allocating {} {}x{}", id, width, height); + + let _ = self.svg_hits.insert(id); + let _ = self.rasterized_hits.insert(key); + let _ = self.rasterized.insert(key, allocation); + + self.rasterized.get(&key) + } + Svg::NotFound => None, + } + } + + /// Load svg and upload raster data + pub fn trim(&mut self, atlas: &mut Atlas) { + let svg_hits = &self.svg_hits; + let rasterized_hits = &self.rasterized_hits; + + self.svgs.retain(|k, _| svg_hits.contains(k)); + self.rasterized.retain(|k, entry| { + let retain = rasterized_hits.contains(k); + + if !retain { + atlas.remove(entry); + } + + retain + }); + self.svg_hits.clear(); + self.rasterized_hits.clear(); + } +} + +impl std::fmt::Debug for Svg { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Svg::Loaded(_) => write!(f, "Svg::Loaded"), + Svg::NotFound => write!(f, "Svg::NotFound"), + } + } +} |