diff options
author | 2022-10-31 13:37:56 -0700 | |
---|---|---|
committer | 2022-11-05 03:19:37 +0100 | |
commit | 2c7c42ee93a61f39562590f6a75eb2dd8b220fb8 (patch) | |
tree | 83279bcbd9ef0ca5ea8c8b763d38259555640216 /wgpu/src/image | |
parent | 7b129917281baaa6688158c303922f94341ab69f (diff) | |
download | iced-2c7c42ee93a61f39562590f6a75eb2dd8b220fb8.tar.gz iced-2c7c42ee93a61f39562590f6a75eb2dd8b220fb8.tar.bz2 iced-2c7c42ee93a61f39562590f6a75eb2dd8b220fb8.zip |
Move image/svg handling into `iced_graphics`
The `TextureStore` trait is implemented by the atlas, and can also be
implemented in the glow renderer or in a software renderer.
The API here may be improved in the future, but API stability is
presumably not a huge issue since these types will only be used by
renderer backends.
Diffstat (limited to 'wgpu/src/image')
-rw-r--r-- | wgpu/src/image/atlas.rs | 191 | ||||
-rw-r--r-- | wgpu/src/image/atlas/entry.rs | 6 | ||||
-rw-r--r-- | wgpu/src/image/raster.rs | 222 | ||||
-rw-r--r-- | wgpu/src/image/vector.rs | 173 |
4 files changed, 101 insertions, 491 deletions
diff --git a/wgpu/src/image/atlas.rs b/wgpu/src/image/atlas.rs index 953dd4e2..d3e0c753 100644 --- a/wgpu/src/image/atlas.rs +++ b/wgpu/src/image/atlas.rs @@ -4,6 +4,7 @@ mod allocation; mod allocator; mod layer; +use iced_graphics::image::TextureStore; use std::num::NonZeroU32; pub use allocation::Allocation; @@ -61,99 +62,6 @@ impl Atlas { self.layers.len() } - pub fn upload( - &mut self, - width: u32, - height: u32, - data: &[u8], - device: &wgpu::Device, - encoder: &mut wgpu::CommandEncoder, - ) -> Option<Entry> { - use wgpu::util::DeviceExt; - - let entry = { - let current_size = self.layers.len(); - let entry = self.allocate(width, height)?; - - // We grow the internal texture after allocating if necessary - let new_layers = self.layers.len() - current_size; - self.grow(new_layers, device, encoder); - - entry - }; - - log::info!("Allocated atlas entry: {:?}", entry); - - // It is a webgpu requirement that: - // BufferCopyView.layout.bytes_per_row % wgpu::COPY_BYTES_PER_ROW_ALIGNMENT == 0 - // So we calculate padded_width by rounding width up to the next - // multiple of wgpu::COPY_BYTES_PER_ROW_ALIGNMENT. - let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT; - let padding = (align - (4 * width) % align) % align; - let padded_width = (4 * width + padding) as usize; - let padded_data_size = padded_width * height as usize; - - let mut padded_data = vec![0; padded_data_size]; - - for row in 0..height as usize { - let offset = row * padded_width; - - padded_data[offset..offset + 4 * width as usize].copy_from_slice( - &data[row * 4 * width as usize..(row + 1) * 4 * width as usize], - ) - } - - let buffer = - device.create_buffer_init(&wgpu::util::BufferInitDescriptor { - label: Some("iced_wgpu::image staging buffer"), - contents: &padded_data, - usage: wgpu::BufferUsages::COPY_SRC, - }); - - match &entry { - Entry::Contiguous(allocation) => { - self.upload_allocation( - &buffer, width, height, padding, 0, allocation, encoder, - ); - } - Entry::Fragmented { fragments, .. } => { - for fragment in fragments { - let (x, y) = fragment.position; - let offset = (y * padded_width as u32 + 4 * x) as usize; - - self.upload_allocation( - &buffer, - width, - height, - padding, - offset, - &fragment.allocation, - encoder, - ); - } - } - } - - log::info!("Current atlas: {:?}", self); - - Some(entry) - } - - pub fn remove(&mut self, entry: &Entry) { - log::info!("Removing atlas entry: {:?}", entry); - - match entry { - Entry::Contiguous(allocation) => { - self.deallocate(allocation); - } - Entry::Fragmented { fragments, .. } => { - for fragment in fragments { - self.deallocate(&fragment.allocation); - } - } - } - } - fn allocate(&mut self, width: u32, height: u32) -> Option<Entry> { // Allocate one layer if texture fits perfectly if width == SIZE && height == SIZE { @@ -388,3 +296,100 @@ impl Atlas { }); } } + +impl TextureStore for Atlas { + type Entry = Entry; + type State<'a> = (&'a wgpu::Device, &'a mut wgpu::CommandEncoder); + + fn upload( + &mut self, + width: u32, + height: u32, + data: &[u8], + (device, encoder): &mut Self::State<'_>, + ) -> Option<Self::Entry> { + use wgpu::util::DeviceExt; + + let entry = { + let current_size = self.layers.len(); + let entry = self.allocate(width, height)?; + + // We grow the internal texture after allocating if necessary + let new_layers = self.layers.len() - current_size; + self.grow(new_layers, device, encoder); + + entry + }; + + log::info!("Allocated atlas entry: {:?}", entry); + + // It is a webgpu requirement that: + // BufferCopyView.layout.bytes_per_row % wgpu::COPY_BYTES_PER_ROW_ALIGNMENT == 0 + // So we calculate padded_width by rounding width up to the next + // multiple of wgpu::COPY_BYTES_PER_ROW_ALIGNMENT. + let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT; + let padding = (align - (4 * width) % align) % align; + let padded_width = (4 * width + padding) as usize; + let padded_data_size = padded_width * height as usize; + + let mut padded_data = vec![0; padded_data_size]; + + for row in 0..height as usize { + let offset = row * padded_width; + + padded_data[offset..offset + 4 * width as usize].copy_from_slice( + &data[row * 4 * width as usize..(row + 1) * 4 * width as usize], + ) + } + + let buffer = + device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("iced_wgpu::image staging buffer"), + contents: &padded_data, + usage: wgpu::BufferUsages::COPY_SRC, + }); + + match &entry { + Entry::Contiguous(allocation) => { + self.upload_allocation( + &buffer, width, height, padding, 0, allocation, encoder, + ); + } + Entry::Fragmented { fragments, .. } => { + for fragment in fragments { + let (x, y) = fragment.position; + let offset = (y * padded_width as u32 + 4 * x) as usize; + + self.upload_allocation( + &buffer, + width, + height, + padding, + offset, + &fragment.allocation, + encoder, + ); + } + } + } + + log::info!("Current atlas: {:?}", self); + + Some(entry) + } + + fn remove(&mut self, entry: &Entry, _: &mut Self::State<'_>) { + log::info!("Removing atlas entry: {:?}", entry); + + match entry { + Entry::Contiguous(allocation) => { + self.deallocate(allocation); + } + Entry::Fragmented { fragments, .. } => { + for fragment in fragments { + self.deallocate(&fragment.allocation); + } + } + } + } +} diff --git a/wgpu/src/image/atlas/entry.rs b/wgpu/src/image/atlas/entry.rs index 9b3f16df..0c2f67fc 100644 --- a/wgpu/src/image/atlas/entry.rs +++ b/wgpu/src/image/atlas/entry.rs @@ -1,4 +1,5 @@ use crate::image::atlas; +use iced_graphics::image::TextureStoreEntry; #[derive(Debug)] pub enum Entry { @@ -9,9 +10,8 @@ pub enum Entry { }, } -impl Entry { - #[cfg(feature = "image_rs")] - pub fn size(&self) -> (u32, u32) { +impl TextureStoreEntry for Entry { + fn size(&self) -> (u32, u32) { match self { Entry::Contiguous(allocation) => allocation.size(), Entry::Fragmented { size, .. } => *size, diff --git a/wgpu/src/image/raster.rs b/wgpu/src/image/raster.rs deleted file mode 100644 index 2b4d4af3..00000000 --- a/wgpu/src/image/raster.rs +++ /dev/null @@ -1,222 +0,0 @@ -use crate::image::atlas::{self, Atlas}; -use iced_native::image; -use std::collections::{HashMap, HashSet}; - -use bitflags::bitflags; - -#[derive(Debug)] -pub enum Memory { - Host(::image_rs::ImageBuffer<::image_rs::Bgra<u8>, Vec<u8>>), - Device(atlas::Entry), - NotFound, - Invalid, -} - -impl Memory { - pub fn dimensions(&self) -> (u32, u32) { - match self { - Memory::Host(image) => image.dimensions(), - Memory::Device(entry) => entry.size(), - Memory::NotFound => (1, 1), - Memory::Invalid => (1, 1), - } - } -} - -#[derive(Debug)] -pub struct Cache { - map: HashMap<u64, Memory>, - hits: HashSet<u64>, -} - -impl Cache { - pub fn new() -> Self { - Self { - map: HashMap::new(), - hits: HashSet::new(), - } - } - - pub fn load(&mut self, handle: &image::Handle) -> &mut Memory { - if self.contains(handle) { - return self.get(handle).unwrap(); - } - - let memory = match handle.data() { - image::Data::Path(path) => { - if let Ok(image) = image_rs::open(path) { - let operation = std::fs::File::open(path) - .ok() - .map(std::io::BufReader::new) - .and_then(|mut reader| { - Operation::from_exif(&mut reader).ok() - }) - .unwrap_or_else(Operation::empty); - - Memory::Host(operation.perform(image.to_bgra8())) - } else { - Memory::NotFound - } - } - image::Data::Bytes(bytes) => { - if let Ok(image) = image_rs::load_from_memory(bytes) { - let operation = - Operation::from_exif(&mut std::io::Cursor::new(bytes)) - .ok() - .unwrap_or_else(Operation::empty); - - Memory::Host(operation.perform(image.to_bgra8())) - } else { - Memory::Invalid - } - } - image::Data::Pixels { - width, - height, - pixels, - } => { - if let Some(image) = image_rs::ImageBuffer::from_vec( - *width, - *height, - pixels.to_vec(), - ) { - Memory::Host(image) - } else { - Memory::Invalid - } - } - }; - - self.insert(handle, memory); - self.get(handle).unwrap() - } - - pub fn upload( - &mut self, - handle: &image::Handle, - device: &wgpu::Device, - encoder: &mut wgpu::CommandEncoder, - atlas: &mut Atlas, - ) -> Option<&atlas::Entry> { - let memory = self.load(handle); - - if let Memory::Host(image) = memory { - let (width, height) = image.dimensions(); - - let entry = atlas.upload(width, height, image, device, encoder)?; - - *memory = Memory::Device(entry); - } - - if let Memory::Device(allocation) = memory { - Some(allocation) - } else { - None - } - } - - pub fn trim(&mut self, atlas: &mut Atlas) { - let hits = &self.hits; - - self.map.retain(|k, memory| { - let retain = hits.contains(k); - - if !retain { - if let Memory::Device(entry) = memory { - atlas.remove(entry); - } - } - - retain - }); - - self.hits.clear(); - } - - fn get(&mut self, handle: &image::Handle) -> Option<&mut Memory> { - let _ = self.hits.insert(handle.id()); - - self.map.get_mut(&handle.id()) - } - - fn insert(&mut self, handle: &image::Handle, memory: Memory) { - let _ = self.map.insert(handle.id(), memory); - } - - fn contains(&self, handle: &image::Handle) -> bool { - self.map.contains_key(&handle.id()) - } -} - -bitflags! { - struct Operation: u8 { - const FLIP_HORIZONTALLY = 0b001; - const ROTATE_180 = 0b010; - const FLIP_DIAGONALLY = 0b100; - } -} - -impl Operation { - // Meaning of the returned value is described e.g. at: - // https://magnushoff.com/articles/jpeg-orientation/ - fn from_exif<R>(reader: &mut R) -> Result<Self, exif::Error> - where - R: std::io::BufRead + std::io::Seek, - { - let exif = exif::Reader::new().read_from_container(reader)?; - - Ok(exif - .get_field(exif::Tag::Orientation, exif::In::PRIMARY) - .and_then(|field| field.value.get_uint(0)) - .and_then(|value| u8::try_from(value).ok()) - .and_then(|value| Self::from_bits(value.saturating_sub(1))) - .unwrap_or_else(Self::empty)) - } - - fn perform<P>( - self, - image: image_rs::ImageBuffer<P, Vec<P::Subpixel>>, - ) -> image_rs::ImageBuffer<P, Vec<P::Subpixel>> - where - P: image_rs::Pixel + 'static, - { - use image_rs::imageops; - - let mut image = if self.contains(Self::FLIP_DIAGONALLY) { - flip_diagonally(image) - } else { - image - }; - - if self.contains(Self::ROTATE_180) { - imageops::rotate180_in_place(&mut image); - } - - if self.contains(Self::FLIP_HORIZONTALLY) { - imageops::flip_horizontal_in_place(&mut image); - } - - image - } -} - -fn flip_diagonally<I>( - image: I, -) -> image_rs::ImageBuffer<I::Pixel, Vec<<I::Pixel as image_rs::Pixel>::Subpixel>> -where - I: image_rs::GenericImage, - I::Pixel: 'static, -{ - let (width, height) = image.dimensions(); - let mut out = image_rs::ImageBuffer::new(height, width); - - for x in 0..width { - for y in 0..height { - let p = image.get_pixel(x, y); - - out.put_pixel(y, x, p); - } - } - - out -} diff --git a/wgpu/src/image/vector.rs b/wgpu/src/image/vector.rs deleted file mode 100644 index b08a0aa2..00000000 --- a/wgpu/src/image/vector.rs +++ /dev/null @@ -1,173 +0,0 @@ -use crate::image::atlas::{self, Atlas}; - -use iced_native::svg; - -use std::collections::{HashMap, HashSet}; -use std::fs; - -pub enum Svg { - Loaded(usvg::Tree), - NotFound, -} - -impl Svg { - pub fn viewport_dimensions(&self) -> (u32, u32) { - match self { - Svg::Loaded(tree) => { - let size = tree.svg_node().size; - - (size.width() as u32, size.height() as u32) - } - Svg::NotFound => (1, 1), - } - } -} - -#[derive(Debug)] -pub struct Cache { - svgs: HashMap<u64, Svg>, - rasterized: HashMap<(u64, u32, u32), atlas::Entry>, - svg_hits: HashSet<u64>, - rasterized_hits: HashSet<(u64, u32, u32)>, -} - -impl Cache { - pub fn new() -> Self { - Self { - svgs: HashMap::new(), - rasterized: HashMap::new(), - svg_hits: HashSet::new(), - rasterized_hits: HashSet::new(), - } - } - - pub fn load(&mut self, handle: &svg::Handle) -> &Svg { - if self.svgs.contains_key(&handle.id()) { - return self.svgs.get(&handle.id()).unwrap(); - } - - let svg = match handle.data() { - svg::Data::Path(path) => { - let tree = fs::read_to_string(path).ok().and_then(|contents| { - usvg::Tree::from_str( - &contents, - &usvg::Options::default().to_ref(), - ) - .ok() - }); - - tree.map(Svg::Loaded).unwrap_or(Svg::NotFound) - } - svg::Data::Bytes(bytes) => { - match usvg::Tree::from_data( - bytes, - &usvg::Options::default().to_ref(), - ) { - Ok(tree) => Svg::Loaded(tree), - Err(_) => Svg::NotFound, - } - } - }; - - let _ = self.svgs.insert(handle.id(), svg); - self.svgs.get(&handle.id()).unwrap() - } - - pub fn upload( - &mut self, - handle: &svg::Handle, - [width, height]: [f32; 2], - scale: f32, - device: &wgpu::Device, - encoder: &mut wgpu::CommandEncoder, - texture_atlas: &mut Atlas, - ) -> Option<&atlas::Entry> { - let id = handle.id(); - - let (width, height) = ( - (scale * width).ceil() as u32, - (scale * height).ceil() as u32, - ); - - // TODO: Optimize! - // We currently rerasterize the SVG when its size changes. This is slow - // as heck. A GPU rasterizer like `pathfinder` may perform better. - // It would be cool to be able to smooth resize the `svg` example. - if self.rasterized.contains_key(&(id, width, height)) { - let _ = self.svg_hits.insert(id); - let _ = self.rasterized_hits.insert((id, width, height)); - - return self.rasterized.get(&(id, width, height)); - } - - match self.load(handle) { - Svg::Loaded(tree) => { - if width == 0 || height == 0 { - return None; - } - - // TODO: Optimize! - // We currently rerasterize the SVG when its size changes. This is slow - // as heck. A GPU rasterizer like `pathfinder` may perform better. - // It would be cool to be able to smooth resize the `svg` example. - let mut img = tiny_skia::Pixmap::new(width, height)?; - - resvg::render( - tree, - if width > height { - usvg::FitTo::Width(width) - } else { - usvg::FitTo::Height(height) - }, - img.as_mut(), - )?; - - let mut rgba = img.take(); - rgba.chunks_exact_mut(4).for_each(|rgba| rgba.swap(0, 2)); - - let allocation = texture_atlas.upload( - width, - height, - bytemuck::cast_slice(rgba.as_slice()), - device, - encoder, - )?; - log::debug!("allocating {} {}x{}", id, width, height); - - let _ = self.svg_hits.insert(id); - let _ = self.rasterized_hits.insert((id, width, height)); - let _ = self.rasterized.insert((id, width, height), allocation); - - self.rasterized.get(&(id, width, height)) - } - Svg::NotFound => None, - } - } - - pub fn trim(&mut self, atlas: &mut Atlas) { - let svg_hits = &self.svg_hits; - let rasterized_hits = &self.rasterized_hits; - - self.svgs.retain(|k, _| svg_hits.contains(k)); - self.rasterized.retain(|k, entry| { - let retain = rasterized_hits.contains(k); - - if !retain { - atlas.remove(entry); - } - - retain - }); - self.svg_hits.clear(); - self.rasterized_hits.clear(); - } -} - -impl std::fmt::Debug for Svg { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Svg::Loaded(_) => write!(f, "Svg::Loaded"), - Svg::NotFound => write!(f, "Svg::NotFound"), - } - } -} |