summaryrefslogtreecommitdiffstats
path: root/wgpu/src/image
diff options
context:
space:
mode:
authorLibravatar Héctor Ramón Jiménez <hector0193@gmail.com>2023-03-07 03:47:49 +0100
committerLibravatar Héctor Ramón Jiménez <hector0193@gmail.com>2023-03-07 03:47:49 +0100
commit3a26baa564524b0f25c5cb180b592c8b004b68a9 (patch)
treec6a51e9116e7f29552130778fe071efa9b1d1262 /wgpu/src/image
parent9b4bcd287a7f4822314e158990d1dc023d5aab51 (diff)
downloadiced-3a26baa564524b0f25c5cb180b592c8b004b68a9.tar.gz
iced-3a26baa564524b0f25c5cb180b592c8b004b68a9.tar.bz2
iced-3a26baa564524b0f25c5cb180b592c8b004b68a9.zip
Remove `image` abstractions in `iced_graphics`
Diffstat (limited to 'wgpu/src/image')
-rw-r--r--wgpu/src/image/atlas.rs190
-rw-r--r--wgpu/src/image/atlas/entry.rs6
-rw-r--r--wgpu/src/image/raster.rs121
-rw-r--r--wgpu/src/image/vector.rs181
4 files changed, 396 insertions, 102 deletions
diff --git a/wgpu/src/image/atlas.rs b/wgpu/src/image/atlas.rs
index 0a17ca33..c00b8cef 100644
--- a/wgpu/src/image/atlas.rs
+++ b/wgpu/src/image/atlas.rs
@@ -13,7 +13,6 @@ use allocator::Allocator;
pub const SIZE: u32 = 2048;
use crate::core::Size;
-use crate::graphics::image;
use std::num::NonZeroU32;
@@ -64,6 +63,97 @@ impl Atlas {
self.layers.len()
}
+ pub fn upload(
+ &mut self,
+ device: &wgpu::Device,
+ queue: &wgpu::Queue,
+ encoder: &mut wgpu::CommandEncoder,
+ width: u32,
+ height: u32,
+ data: &[u8],
+ ) -> Option<Entry> {
+ let entry = {
+ let current_size = self.layers.len();
+ let entry = self.allocate(width, height)?;
+
+ // We grow the internal texture after allocating if necessary
+ let new_layers = self.layers.len() - current_size;
+ self.grow(new_layers, device, encoder);
+
+ entry
+ };
+
+ log::info!("Allocated atlas entry: {:?}", entry);
+
+ // It is a webgpu requirement that:
+ // BufferCopyView.layout.bytes_per_row % wgpu::COPY_BYTES_PER_ROW_ALIGNMENT == 0
+ // So we calculate padded_width by rounding width up to the next
+ // multiple of wgpu::COPY_BYTES_PER_ROW_ALIGNMENT.
+ let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT;
+ let padding = (align - (4 * width) % align) % align;
+ let padded_width = (4 * width + padding) as usize;
+ let padded_data_size = padded_width * height as usize;
+
+ let mut padded_data = vec![0; padded_data_size];
+
+ for row in 0..height as usize {
+ let offset = row * padded_width;
+
+ padded_data[offset..offset + 4 * width as usize].copy_from_slice(
+ &data[row * 4 * width as usize..(row + 1) * 4 * width as usize],
+ )
+ }
+
+ match &entry {
+ Entry::Contiguous(allocation) => {
+ self.upload_allocation(
+ &padded_data,
+ width,
+ height,
+ padding,
+ 0,
+ allocation,
+ queue,
+ );
+ }
+ Entry::Fragmented { fragments, .. } => {
+ for fragment in fragments {
+ let (x, y) = fragment.position;
+ let offset = (y * padded_width as u32 + 4 * x) as usize;
+
+ self.upload_allocation(
+ &padded_data,
+ width,
+ height,
+ padding,
+ offset,
+ &fragment.allocation,
+ queue,
+ );
+ }
+ }
+ }
+
+ log::info!("Current atlas: {:?}", self);
+
+ Some(entry)
+ }
+
+ pub fn remove(&mut self, entry: &Entry) {
+ log::info!("Removing atlas entry: {:?}", entry);
+
+ match entry {
+ Entry::Contiguous(allocation) => {
+ self.deallocate(allocation);
+ }
+ Entry::Fragmented { fragments, .. } => {
+ for fragment in fragments {
+ self.deallocate(&fragment.allocation);
+ }
+ }
+ }
+ }
+
fn allocate(&mut self, width: u32, height: u32) -> Option<Entry> {
// Allocate one layer if texture fits perfectly
if width == SIZE && height == SIZE {
@@ -296,101 +386,3 @@ impl Atlas {
});
}
}
-
-impl image::Storage for Atlas {
- type Entry = Entry;
- type State<'a> = (
- &'a wgpu::Device,
- &'a wgpu::Queue,
- &'a mut wgpu::CommandEncoder,
- );
-
- fn upload(
- &mut self,
- width: u32,
- height: u32,
- data: &[u8],
- (device, queue, encoder): &mut Self::State<'_>,
- ) -> Option<Self::Entry> {
- let entry = {
- let current_size = self.layers.len();
- let entry = self.allocate(width, height)?;
-
- // We grow the internal texture after allocating if necessary
- let new_layers = self.layers.len() - current_size;
- self.grow(new_layers, device, encoder);
-
- entry
- };
-
- log::info!("Allocated atlas entry: {:?}", entry);
-
- // It is a webgpu requirement that:
- // BufferCopyView.layout.bytes_per_row % wgpu::COPY_BYTES_PER_ROW_ALIGNMENT == 0
- // So we calculate padded_width by rounding width up to the next
- // multiple of wgpu::COPY_BYTES_PER_ROW_ALIGNMENT.
- let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT;
- let padding = (align - (4 * width) % align) % align;
- let padded_width = (4 * width + padding) as usize;
- let padded_data_size = padded_width * height as usize;
-
- let mut padded_data = vec![0; padded_data_size];
-
- for row in 0..height as usize {
- let offset = row * padded_width;
-
- padded_data[offset..offset + 4 * width as usize].copy_from_slice(
- &data[row * 4 * width as usize..(row + 1) * 4 * width as usize],
- )
- }
-
- match &entry {
- Entry::Contiguous(allocation) => {
- self.upload_allocation(
- &padded_data,
- width,
- height,
- padding,
- 0,
- allocation,
- queue,
- );
- }
- Entry::Fragmented { fragments, .. } => {
- for fragment in fragments {
- let (x, y) = fragment.position;
- let offset = (y * padded_width as u32 + 4 * x) as usize;
-
- self.upload_allocation(
- &padded_data,
- width,
- height,
- padding,
- offset,
- &fragment.allocation,
- queue,
- );
- }
- }
- }
-
- log::info!("Current atlas: {:?}", self);
-
- Some(entry)
- }
-
- fn remove(&mut self, entry: &Entry, _: &mut Self::State<'_>) {
- log::info!("Removing atlas entry: {:?}", entry);
-
- match entry {
- Entry::Contiguous(allocation) => {
- self.deallocate(allocation);
- }
- Entry::Fragmented { fragments, .. } => {
- for fragment in fragments {
- self.deallocate(&fragment.allocation);
- }
- }
- }
- }
-}
diff --git a/wgpu/src/image/atlas/entry.rs b/wgpu/src/image/atlas/entry.rs
index 4b06bd95..7e4c92a2 100644
--- a/wgpu/src/image/atlas/entry.rs
+++ b/wgpu/src/image/atlas/entry.rs
@@ -1,5 +1,4 @@
use crate::core::Size;
-use crate::graphics::image;
use crate::image::atlas;
#[derive(Debug)]
@@ -11,8 +10,9 @@ pub enum Entry {
},
}
-impl image::storage::Entry for Entry {
- fn size(&self) -> Size<u32> {
+impl Entry {
+ #[cfg(feature = "image")]
+ pub fn size(&self) -> Size<u32> {
match self {
Entry::Contiguous(allocation) => allocation.size(),
Entry::Fragmented { size, .. } => *size,
diff --git a/wgpu/src/image/raster.rs b/wgpu/src/image/raster.rs
new file mode 100644
index 00000000..9b38dce4
--- /dev/null
+++ b/wgpu/src/image/raster.rs
@@ -0,0 +1,121 @@
+use crate::core::image;
+use crate::core::Size;
+use crate::graphics;
+use crate::graphics::image::image_rs;
+use crate::image::atlas::{self, Atlas};
+
+use std::collections::{HashMap, HashSet};
+
+/// Entry in cache corresponding to an image handle
+#[derive(Debug)]
+pub enum Memory {
+ /// Image data on host
+ Host(image_rs::ImageBuffer<image_rs::Rgba<u8>, Vec<u8>>),
+ /// Storage entry
+ Device(atlas::Entry),
+ /// Image not found
+ NotFound,
+ /// Invalid image data
+ Invalid,
+}
+
+impl Memory {
+ /// Width and height of image
+ pub fn dimensions(&self) -> Size<u32> {
+ match self {
+ Memory::Host(image) => {
+ let (width, height) = image.dimensions();
+
+ Size::new(width, height)
+ }
+ Memory::Device(entry) => entry.size(),
+ Memory::NotFound => Size::new(1, 1),
+ Memory::Invalid => Size::new(1, 1),
+ }
+ }
+}
+
+/// Caches image raster data
+#[derive(Debug, Default)]
+pub struct Cache {
+ map: HashMap<u64, Memory>,
+ hits: HashSet<u64>,
+}
+
+impl Cache {
+ /// Load image
+ pub fn load(&mut self, handle: &image::Handle) -> &mut Memory {
+ if self.contains(handle) {
+ return self.get(handle).unwrap();
+ }
+
+ let memory = match graphics::image::load(handle) {
+ Ok(image) => Memory::Host(image.to_rgba8()),
+ Err(image_rs::error::ImageError::IoError(_)) => Memory::NotFound,
+ Err(_) => Memory::Invalid,
+ };
+
+ self.insert(handle, memory);
+ self.get(handle).unwrap()
+ }
+
+ /// Load image and upload raster data
+ pub fn upload(
+ &mut self,
+ device: &wgpu::Device,
+ queue: &wgpu::Queue,
+ encoder: &mut wgpu::CommandEncoder,
+ handle: &image::Handle,
+ atlas: &mut Atlas,
+ ) -> Option<&atlas::Entry> {
+ let memory = self.load(handle);
+
+ if let Memory::Host(image) = memory {
+ let (width, height) = image.dimensions();
+
+ let entry =
+ atlas.upload(device, queue, encoder, width, height, image)?;
+
+ *memory = Memory::Device(entry);
+ }
+
+ if let Memory::Device(allocation) = memory {
+ Some(allocation)
+ } else {
+ None
+ }
+ }
+
+ /// Trim cache misses from cache
+ pub fn trim(&mut self, atlas: &mut Atlas) {
+ let hits = &self.hits;
+
+ self.map.retain(|k, memory| {
+ let retain = hits.contains(k);
+
+ if !retain {
+ if let Memory::Device(entry) = memory {
+ atlas.remove(entry);
+ }
+ }
+
+ retain
+ });
+
+ self.hits.clear();
+ }
+
+ fn get(&mut self, handle: &image::Handle) -> Option<&mut Memory> {
+ let _ = self.hits.insert(handle.id());
+
+ self.map.get_mut(&handle.id())
+ }
+
+ fn insert(&mut self, handle: &image::Handle, memory: Memory) {
+ let _ = self.map.insert(handle.id(), memory);
+ }
+
+ fn contains(&self, handle: &image::Handle) -> bool {
+ self.map.contains_key(&handle.id())
+ }
+}
diff --git a/wgpu/src/image/vector.rs b/wgpu/src/image/vector.rs
new file mode 100644
index 00000000..3624e46b
--- /dev/null
+++ b/wgpu/src/image/vector.rs
@@ -0,0 +1,181 @@
+use crate::core::svg;
+use crate::core::{Color, Size};
+use crate::image::atlas::{self, Atlas};
+
+use resvg::tiny_skia;
+use resvg::usvg;
+use std::collections::{HashMap, HashSet};
+use std::fs;
+
+/// Entry in cache corresponding to an svg handle
+pub enum Svg {
+ /// Parsed svg
+ Loaded(usvg::Tree),
+ /// Svg not found or failed to parse
+ NotFound,
+}
+
+impl Svg {
+ /// Viewport width and height
+ pub fn viewport_dimensions(&self) -> Size<u32> {
+ match self {
+ Svg::Loaded(tree) => {
+ let size = tree.size;
+
+ Size::new(size.width() as u32, size.height() as u32)
+ }
+ Svg::NotFound => Size::new(1, 1),
+ }
+ }
+}
+
+/// Caches svg vector and raster data
+#[derive(Debug, Default)]
+pub struct Cache {
+ svgs: HashMap<u64, Svg>,
+ rasterized: HashMap<(u64, u32, u32, ColorFilter), atlas::Entry>,
+ svg_hits: HashSet<u64>,
+ rasterized_hits: HashSet<(u64, u32, u32, ColorFilter)>,
+}
+
+type ColorFilter = Option<[u8; 4]>;
+
+impl Cache {
+ /// Load svg
+ pub fn load(&mut self, handle: &svg::Handle) -> &Svg {
+ if self.svgs.contains_key(&handle.id()) {
+ return self.svgs.get(&handle.id()).unwrap();
+ }
+
+ let svg = match handle.data() {
+ svg::Data::Path(path) => {
+ let tree = fs::read_to_string(path).ok().and_then(|contents| {
+ usvg::Tree::from_str(&contents, &usvg::Options::default())
+ .ok()
+ });
+
+ tree.map(Svg::Loaded).unwrap_or(Svg::NotFound)
+ }
+ svg::Data::Bytes(bytes) => {
+ match usvg::Tree::from_data(bytes, &usvg::Options::default()) {
+ Ok(tree) => Svg::Loaded(tree),
+ Err(_) => Svg::NotFound,
+ }
+ }
+ };
+
+ let _ = self.svgs.insert(handle.id(), svg);
+ self.svgs.get(&handle.id()).unwrap()
+ }
+
+ /// Load svg and upload raster data
+ pub fn upload(
+ &mut self,
+ device: &wgpu::Device,
+ queue: &wgpu::Queue,
+ encoder: &mut wgpu::CommandEncoder,
+ handle: &svg::Handle,
+ color: Option<Color>,
+ [width, height]: [f32; 2],
+ scale: f32,
+ atlas: &mut Atlas,
+ ) -> Option<&atlas::Entry> {
+ let id = handle.id();
+
+ let (width, height) = (
+ (scale * width).ceil() as u32,
+ (scale * height).ceil() as u32,
+ );
+
+ let color = color.map(Color::into_rgba8);
+ let key = (id, width, height, color);
+
+ // TODO: Optimize!
+ // We currently rerasterize the SVG when its size changes. This is slow
+ // as heck. A GPU rasterizer like `pathfinder` may perform better.
+ // It would be cool to be able to smooth resize the `svg` example.
+ if self.rasterized.contains_key(&key) {
+ let _ = self.svg_hits.insert(id);
+ let _ = self.rasterized_hits.insert(key);
+
+ return self.rasterized.get(&key);
+ }
+
+ match self.load(handle) {
+ Svg::Loaded(tree) => {
+ if width == 0 || height == 0 {
+ return None;
+ }
+
+ // TODO: Optimize!
+ // We currently rerasterize the SVG when its size changes. This is slow
+ // as heck. A GPU rasterizer like `pathfinder` may perform better.
+ // It would be cool to be able to smooth resize the `svg` example.
+ let mut img = tiny_skia::Pixmap::new(width, height)?;
+
+ resvg::render(
+ tree,
+ if width > height {
+ usvg::FitTo::Width(width)
+ } else {
+ usvg::FitTo::Height(height)
+ },
+ tiny_skia::Transform::default(),
+ img.as_mut(),
+ )?;
+
+ let mut rgba = img.take();
+
+ if let Some(color) = color {
+ rgba.chunks_exact_mut(4).for_each(|rgba| {
+ if rgba[3] > 0 {
+ rgba[0] = color[0];
+ rgba[1] = color[1];
+ rgba[2] = color[2];
+ }
+ });
+ }
+
+ let allocation = atlas
+ .upload(device, queue, encoder, width, height, &rgba)?;
+
+ log::debug!("allocating {} {}x{}", id, width, height);
+
+ let _ = self.svg_hits.insert(id);
+ let _ = self.rasterized_hits.insert(key);
+ let _ = self.rasterized.insert(key, allocation);
+
+ self.rasterized.get(&key)
+ }
+ Svg::NotFound => None,
+ }
+ }
+
+ /// Load svg and upload raster data
+ pub fn trim(&mut self, atlas: &mut Atlas) {
+ let svg_hits = &self.svg_hits;
+ let rasterized_hits = &self.rasterized_hits;
+
+ self.svgs.retain(|k, _| svg_hits.contains(k));
+ self.rasterized.retain(|k, entry| {
+ let retain = rasterized_hits.contains(k);
+
+ if !retain {
+ atlas.remove(entry);
+ }
+
+ retain
+ });
+ self.svg_hits.clear();
+ self.rasterized_hits.clear();
+ }
+}
+
+impl std::fmt::Debug for Svg {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ Svg::Loaded(_) => write!(f, "Svg::Loaded"),
+ Svg::NotFound => write!(f, "Svg::NotFound"),
+ }
+ }
+}