From c2b1135066bc7942d4b046464399656b12c44587 Mon Sep 17 00:00:00 2001 From: awxkee Date: Tue, 29 Oct 2024 12:20:58 +0000 Subject: [PATCH 01/21] Added YUV decoder, drop dcv, added high bit depth support --- Cargo.toml | 3 +- src/codecs/avif/decoder.rs | 460 +++++++++++++++---- src/codecs/avif/mod.rs | 2 + src/codecs/avif/yuv.rs | 891 +++++++++++++++++++++++++++++++++++++ 4 files changed, 1270 insertions(+), 86 deletions(-) create mode 100644 src/codecs/avif/yuv.rs diff --git a/Cargo.toml b/Cargo.toml index 4a6108a26c..000e85b90f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,7 +42,6 @@ num-traits = { version = "0.2.0" } # Optional dependencies color_quant = { version = "1.1", optional = true } dav1d = { version = "0.10.3", optional = true } -dcv-color-primitives = { version = "0.6.1", optional = true } exr = { version = "1.5.0", optional = true } gif = { version = "0.13", optional = true } image-webp = { version = "0.2.0", optional = true } @@ -88,7 +87,7 @@ webp = ["dep:image-webp"] rayon = ["dep:rayon", "ravif?/threading"] # Enables multi-threading nasm = ["ravif?/asm"] # Enables use of nasm by rav1e (requires nasm to be installed) color_quant = ["dep:color_quant"] # Enables color quantization -avif-native = ["dep:mp4parse", "dep:dcv-color-primitives", "dep:dav1d"] # Enable native dependency libdav1d +avif-native = ["dep:mp4parse", "dep:dav1d"] # Enable native dependency libdav1d benchmarks = [] # Build some inline benchmarks. Useful only during development (requires nightly Rust) [[bench]] diff --git a/src/codecs/avif/decoder.rs b/src/codecs/avif/decoder.rs index 15b2244b09..5a28e55f8d 100644 --- a/src/codecs/avif/decoder.rs +++ b/src/codecs/avif/decoder.rs @@ -10,8 +10,8 @@ use std::marker::PhantomData; use crate::error::{DecodingError, ImageFormatHint, UnsupportedError, UnsupportedErrorKind}; use crate::{ColorType, ImageDecoder, ImageError, ImageFormat, ImageResult}; +use crate::codecs::avif::yuv::*; use dav1d::{PixelLayout, PlanarImageComponent}; -use dcv_color_primitives as dcp; use mp4parse::{read_avif, ParseStrictness}; fn error_map>>(err: E) -> ImageError { @@ -56,17 +56,7 @@ impl AvifDecoder { match picture.bit_depth() { 8 => (), - 10 | 12 => { - return ImageResult::Err(ImageError::Unsupported( - UnsupportedError::from_format_and_kind( - ImageFormatHint::Exact(ImageFormat::Avif), - UnsupportedErrorKind::GenericFeature(format!( - "Only 8 bit depth is supported but was {}", - picture.bit_depth() - )), - ), - )) - } + 10 | 12 => (), _ => { return ImageResult::Err(ImageError::Decoding(DecodingError::new( ImageFormatHint::Exact(ImageFormat::Avif), @@ -86,13 +76,30 @@ impl AvifDecoder { } } +fn reshape_plane(source: &[u8], stride: usize, width: usize, height: usize) -> Vec { + let mut target_plane = vec![0u16; width * height]; + for (shaped_row, src_row) in target_plane + .chunks_exact_mut(width) + .zip(source.chunks_exact(stride)) + { + for (dst, src) in shaped_row.iter_mut().zip(src_row.chunks_exact(2)) { + *dst = u16::from_le_bytes([src[0], src[1]]); + } + } + target_plane +} + impl ImageDecoder for AvifDecoder { fn dimensions(&self) -> (u32, u32) { (self.picture.width(), self.picture.height()) } fn color_type(&self) -> ColorType { - ColorType::Rgba8 + if self.picture.bit_depth() == 8 { + ColorType::Rgba8 + } else { + ColorType::Rgba16 + } } fn icc_profile(&mut self) -> ImageResult>> { @@ -102,84 +109,369 @@ impl ImageDecoder for AvifDecoder { fn read_image(self, buf: &mut [u8]) -> ImageResult<()> { assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes())); - if self.picture.pixel_layout() != PixelLayout::I400 { - let pixel_format = match self.picture.pixel_layout() { - PixelLayout::I400 => todo!(), - PixelLayout::I420 => dcp::PixelFormat::I420, - PixelLayout::I422 => dcp::PixelFormat::I422, - PixelLayout::I444 => dcp::PixelFormat::I444, - }; - let src_color_space = match (self.picture.color_primaries(), self.picture.color_range()) - { - (dav1d::pixel::ColorPrimaries::BT709, dav1d::pixel::YUVRange::Full) => { - dcp::ColorSpace::Bt709FR - } - (dav1d::pixel::ColorPrimaries::BT709, dav1d::pixel::YUVRange::Limited) => { - dcp::ColorSpace::Bt709 - } - (_, dav1d::pixel::YUVRange::Full) => dcp::ColorSpace::Bt601FR, - (_, dav1d::pixel::YUVRange::Limited) => dcp::ColorSpace::Bt601, - }; - let src_format = dcp::ImageFormat { - pixel_format, - color_space: src_color_space, - num_planes: 3, - }; - let dst_format = dcp::ImageFormat { - pixel_format: dcp::PixelFormat::Rgba, - color_space: dcp::ColorSpace::Rgb, - num_planes: 1, - }; - let (width, height) = self.dimensions(); - let planes = &[ - self.picture.plane(PlanarImageComponent::Y), - self.picture.plane(PlanarImageComponent::U), - self.picture.plane(PlanarImageComponent::V), - ]; - let src_buffers = planes.iter().map(AsRef::as_ref).collect::>(); - let strides = &[ - self.picture.stride(PlanarImageComponent::Y) as usize, - self.picture.stride(PlanarImageComponent::U) as usize, - self.picture.stride(PlanarImageComponent::V) as usize, - ]; - let dst_buffers = &mut [&mut buf[..]]; - dcp::convert_image( - width, - height, - &src_format, - Some(strides), - &src_buffers, - &dst_format, - None, - dst_buffers, - ) - .map_err(error_map)?; - } else { - let plane = self.picture.plane(PlanarImageComponent::Y); - buf.copy_from_slice(plane.as_ref()); - } + let (width, height) = self.dimensions(); - if let Some(picture) = self.alpha_picture { - if picture.pixel_layout() != PixelLayout::I400 { + let yuv_range = match self.picture.color_range() { + dav1d::pixel::YUVRange::Limited => YuvIntensityRange::Tv, + dav1d::pixel::YUVRange::Full => YuvIntensityRange::Pc, + }; + let color_matrix = match self.picture.color_primaries() { + dav1d::pixel::ColorPrimaries::Reserved0 | dav1d::pixel::ColorPrimaries::Reserved => { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature( + "Using 'Reserved' color matrix is not supported".to_string(), + ), + ), + )); + } + dav1d::pixel::ColorPrimaries::BT709 => YuvStandardMatrix::Bt709, + // This is arguable, some applications prefer to go with Bt.709 as default some applications as Bt.601 + // For ex. chrome always prefer Bt.709 even for SD content + // However, nowadays standard should be Bt.709 for HD+ size otherwise Bt.601 + dav1d::pixel::ColorPrimaries::Unspecified => YuvStandardMatrix::Bt709, + dav1d::pixel::ColorPrimaries::BT470M => YuvStandardMatrix::Bt470_6, + dav1d::pixel::ColorPrimaries::BT470BG => YuvStandardMatrix::Bt601, + dav1d::pixel::ColorPrimaries::ST170M => YuvStandardMatrix::Smpte240, + dav1d::pixel::ColorPrimaries::ST240M => YuvStandardMatrix::Smpte240, + dav1d::pixel::ColorPrimaries::Film => YuvStandardMatrix::Bt2020, + dav1d::pixel::ColorPrimaries::BT2020 => YuvStandardMatrix::Bt2020, + dav1d::pixel::ColorPrimaries::ST428 => YuvStandardMatrix::Bt709, + dav1d::pixel::ColorPrimaries::P3DCI => YuvStandardMatrix::Bt709, + dav1d::pixel::ColorPrimaries::P3Display => YuvStandardMatrix::Bt709, + dav1d::pixel::ColorPrimaries::Tech3213 => { return Err(ImageError::Unsupported( UnsupportedError::from_format_and_kind( ImageFormat::Avif.into(), - UnsupportedErrorKind::GenericFeature(format!( - "Alpha must be PixelLayout::I400 but was: {:?}", - picture.pixel_layout() // PixelLayout does not implement display - )), + UnsupportedErrorKind::GenericFeature("Unknown color matrix".to_string()), ), )); } - let stride = picture.stride(PlanarImageComponent::Y) as usize; - let plane = picture.plane(PlanarImageComponent::Y); - let width = picture.width(); - for (buf, slice) in Iterator::zip( - buf.chunks_exact_mut(width as usize * 4), - plane.as_ref().chunks_exact(stride), - ) { - for i in 0..width as usize { - buf[3 + i * 4] = slice[i]; + }; + + if self.picture.bit_depth() == 8 { + if self.picture.pixel_layout() != PixelLayout::I400 { + let worker = match self.picture.pixel_layout() { + PixelLayout::I400 => unreachable!(), + PixelLayout::I420 => yuv420_to_rgba, + PixelLayout::I422 => yuv422_to_rgba, + PixelLayout::I444 => yuv444_to_rgba, + }; + + let ref_y = self.picture.plane(PlanarImageComponent::Y); + let ref_u = self.picture.plane(PlanarImageComponent::U); + let ref_v = self.picture.plane(PlanarImageComponent::V); + let image = YuvPlanarImage::new( + ref_y.as_ref(), + self.picture.stride(PlanarImageComponent::Y) as usize, + ref_u.as_ref(), + self.picture.stride(PlanarImageComponent::U) as usize, + ref_v.as_ref(), + self.picture.stride(PlanarImageComponent::V) as usize, + width as usize, + height as usize, + ); + + let res = worker(image, buf, 8, yuv_range, color_matrix); + + if let Err(err) = res { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature(err), + ), + )); + } + } else { + let plane = self.picture.plane(PlanarImageComponent::Y); + + let gray_image = YuvGrayImage::new( + plane.as_ref(), + self.picture.stride(PlanarImageComponent::Y) as usize, + width as usize, + height as usize, + ); + + let cr = yuv400_to_rgba(gray_image, buf, 8, yuv_range, color_matrix); + if let Err(err) = cr { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature(err), + ), + )); + } + } + + if let Some(picture) = self.alpha_picture { + if picture.pixel_layout() != PixelLayout::I400 { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature(format!( + "Alpha must be PixelLayout::I400 but was: {:?}", + picture.pixel_layout() // PixelLayout does not implement display + )), + ), + )); + } + let stride = picture.stride(PlanarImageComponent::Y) as usize; + let plane = picture.plane(PlanarImageComponent::Y); + let width = picture.width(); + for (buf, slice) in Iterator::zip( + buf.chunks_exact_mut(width as usize * 4), + plane.as_ref().chunks_exact(stride), + ) { + for (rgba, a_src) in buf.chunks_exact_mut(4).zip(slice) { + rgba[3] = *a_src; + } + } + } + } else { + // 8+ bit-depth case + let rgba16_buf: &mut [u16] = match bytemuck::try_cast_slice_mut(buf) { + Ok(slice) => slice, + Err(_) => { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature( + "Incorrectly determined image type".to_string(), + ), + ), + )); + } + }; + + // dav1d may return not aligned and not correctly constrained data, + // or at least I can't find guarantees on that + // so if it is happened, instead casting we'll need to reshape it into a target slice + // required criteria: bytemuck allows this data align, and stride must be dividable by 2 + + let mut y_plane_stride = self.picture.stride(PlanarImageComponent::Y) >> 1; + + let ref_y = self.picture.plane(PlanarImageComponent::Y); + let mut _bind_y = vec![]; + + let mut shape_y_plane = || { + y_plane_stride = width; + _bind_y = reshape_plane( + ref_y.as_ref(), + self.picture.stride(PlanarImageComponent::Y) as usize, + width as usize, + height as usize, + ); + }; + + let y_plane: &[u16] = if self.picture.stride(PlanarImageComponent::Y) as usize & 1 == 0 + { + match bytemuck::try_cast_slice(ref_y.as_ref()) { + Ok(slice) => slice, + Err(_) => { + shape_y_plane(); + _bind_y.as_slice() + } + } + } else { + shape_y_plane(); + _bind_y.as_slice() + }; + + if self.picture.pixel_layout() != PixelLayout::I400 { + let mut u_plane_stride = self.picture.stride(PlanarImageComponent::U) >> 1; + + let ref_u = self.picture.plane(PlanarImageComponent::U); + let mut _bind_u = vec![]; + let ref_v = self.picture.plane(PlanarImageComponent::V); + let mut _bind_v = vec![]; + + let mut shape_u_plane = || { + u_plane_stride = match self.picture.pixel_layout() { + PixelLayout::I400 => unreachable!(), + PixelLayout::I420 | PixelLayout::I422 => (width + 1) / 2, + PixelLayout::I444 => width, + }; + let u_plane_height = match self.picture.pixel_layout() { + PixelLayout::I400 => unreachable!(), + PixelLayout::I420 => (height + 1) / 2, + PixelLayout::I422 | PixelLayout::I444 => height, + }; + _bind_u = reshape_plane( + ref_u.as_ref(), + self.picture.stride(PlanarImageComponent::U) as usize, + u_plane_stride as usize, + u_plane_height as usize, + ); + }; + + let u_plane: &[u16] = + if self.picture.stride(PlanarImageComponent::U) as usize & 1 == 0 { + match bytemuck::try_cast_slice(ref_u.as_ref()) { + Ok(slice) => slice, + Err(_) => { + shape_u_plane(); + _bind_u.as_slice() + } + } + } else { + shape_u_plane(); + _bind_u.as_slice() + }; + + let mut v_plane_stride = self.picture.stride(PlanarImageComponent::V) >> 1; + + let mut shape_v_plane = || { + v_plane_stride = match self.picture.pixel_layout() { + PixelLayout::I400 => unreachable!(), + PixelLayout::I420 | PixelLayout::I422 => (width + 1) / 2, + PixelLayout::I444 => width, + }; + let v_plane_height = match self.picture.pixel_layout() { + PixelLayout::I400 => unreachable!(), + PixelLayout::I420 => (height + 1) / 2, + PixelLayout::I422 | PixelLayout::I444 => height, + }; + _bind_v = reshape_plane( + ref_v.as_ref(), + self.picture.stride(PlanarImageComponent::V) as usize, + v_plane_stride as usize, + v_plane_height as usize, + ); + }; + + let v_plane: &[u16] = + if self.picture.stride(PlanarImageComponent::V) as usize & 1 == 0 { + match bytemuck::try_cast_slice(ref_v.as_ref()) { + Ok(slice) => slice, + Err(_) => { + shape_v_plane(); + _bind_v.as_slice() + } + } + } else { + shape_v_plane(); + _bind_v.as_slice() + }; + + let worker = match self.picture.pixel_layout() { + PixelLayout::I400 => unreachable!(), + PixelLayout::I420 => yuv420_to_rgba, + PixelLayout::I422 => yuv422_to_rgba, + PixelLayout::I444 => yuv444_to_rgba, + }; + + let image = YuvPlanarImage::new( + y_plane, + y_plane_stride as usize, + u_plane, + u_plane_stride as usize, + v_plane, + v_plane_stride as usize, + width as usize, + height as usize, + ); + + let res = worker( + image, + rgba16_buf, + self.picture.bit_depth() as u32, + yuv_range, + color_matrix, + ); + + if let Err(err) = res { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature(err), + ), + )); + } + } else { + let gray_image = YuvGrayImage::new( + y_plane, + y_plane_stride as usize, + width as usize, + height as usize, + ); + let cr = yuv400_to_rgba( + gray_image, + rgba16_buf, + self.picture.bit_depth() as u32, + yuv_range, + color_matrix, + ); + if let Err(err) = cr { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature(err), + ), + )); + } + } + + // Squashing alpha plane into a picture + if let Some(picture) = self.alpha_picture { + if picture.pixel_layout() != PixelLayout::I400 { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature(format!( + "Alpha must be PixelLayout::I400 but was: {:?}", + picture.pixel_layout() // PixelLayout does not implement display + )), + ), + )); + } + let ref_a = self.picture.plane(PlanarImageComponent::Y); + let mut _bind_a = vec![]; + + let mut a_plane_stride = self.picture.stride(PlanarImageComponent::Y) >> 1; + + let mut shape_a_plane = || { + a_plane_stride = width; + _bind_a = reshape_plane( + ref_a.as_ref(), + picture.stride(PlanarImageComponent::Y) as usize, + width as usize, + height as usize, + ); + }; + + let a_plane: &[u16] = if picture.stride(PlanarImageComponent::Y) as usize & 1 == 0 { + match bytemuck::try_cast_slice(ref_y.as_ref()) { + Ok(slice) => slice, + Err(_) => { + shape_a_plane(); + _bind_a.as_slice() + } + } + } else { + shape_a_plane(); + _bind_a.as_slice() + }; + + let width = picture.width(); + for (buf, slice) in Iterator::zip( + rgba16_buf.chunks_exact_mut(width as usize * 4), + a_plane.as_ref().chunks_exact(a_plane_stride as usize), + ) { + for (rgba, a_src) in buf.chunks_exact_mut(4).zip(slice) { + rgba[3] = *a_src; + } + } + } + + // Expand current bit depth to target 16 + let target_expand_bits = 16u32.saturating_sub(self.picture.bit_depth() as u32); + if target_expand_bits > 0 { + for rgba in rgba16_buf.chunks_exact_mut(4) { + rgba[0] = rgba[0] << target_expand_bits; + rgba[1] = rgba[1] << target_expand_bits; + rgba[2] = rgba[2] << target_expand_bits; + rgba[3] = rgba[3] << target_expand_bits; } } } diff --git a/src/codecs/avif/mod.rs b/src/codecs/avif/mod.rs index 89edfc2c97..7c18cc5f30 100644 --- a/src/codecs/avif/mod.rs +++ b/src/codecs/avif/mod.rs @@ -12,3 +12,5 @@ pub use self::encoder::{AvifEncoder, ColorSpace}; mod decoder; #[cfg(feature = "avif")] mod encoder; +#[cfg(feature = "avif-native")] +mod yuv; diff --git a/src/codecs/avif/yuv.rs b/src/codecs/avif/yuv.rs new file mode 100644 index 0000000000..41f566ba97 --- /dev/null +++ b/src/codecs/avif/yuv.rs @@ -0,0 +1,891 @@ +use num_traits::AsPrimitive; + +#[derive(Debug, Copy, Clone)] +struct CbCrInverseTransform { + pub y_coef: T, + pub cr_coef: T, + pub cb_coef: T, + pub g_coeff_1: T, + pub g_coeff_2: T, +} + +impl CbCrInverseTransform { + fn new( + y_coef: T, + cr_coef: T, + cb_coef: T, + g_coeff_1: T, + g_coeff_2: T, + ) -> CbCrInverseTransform { + CbCrInverseTransform { + y_coef, + cr_coef, + cb_coef, + g_coeff_1, + g_coeff_2, + } + } +} + +impl CbCrInverseTransform { + fn to_integers(self, precision: u32) -> CbCrInverseTransform { + let precision_scale: i32 = 1i32 << (precision as i32); + let cr_coef = (self.cr_coef * precision_scale as f32) as i32; + let cb_coef = (self.cb_coef * precision_scale as f32) as i32; + let y_coef = (self.y_coef * precision_scale as f32) as i32; + let g_coef_1 = (self.g_coeff_1 * precision_scale as f32) as i32; + let g_coef_2 = (self.g_coeff_2 * precision_scale as f32) as i32; + CbCrInverseTransform:: { + y_coef, + cr_coef, + cb_coef, + g_coeff_1: g_coef_1, + g_coeff_2: g_coef_2, + } + } +} + +/// Transformation RGB to YUV with coefficients as specified in [ITU-R](https://www.itu.int/rec/T-REC-H.273/en) +fn get_inverse_transform( + range_bgra: u32, + range_y: u32, + range_uv: u32, + kr: f32, + kb: f32, + precision: u32, +) -> Result, String> { + let range_uv = range_bgra as f32 / range_uv as f32; + let y_coef = range_bgra as f32 / range_y as f32; + let cr_coeff = (2f32 * (1f32 - kr)) * range_uv; + let cb_coeff = (2f32 * (1f32 - kb)) * range_uv; + let kg = 1.0f32 - kr - kb; + if kg == 0f32 { + return Err("1.0f - kr - kg must not be 0".parse().unwrap()); + } + let g_coeff_1 = (2f32 * ((1f32 - kr) * kr / kg)) * range_uv; + let g_coeff_2 = (2f32 * ((1f32 - kb) * kb / kg)) * range_uv; + let exact_transform = + CbCrInverseTransform::new(y_coef, cr_coeff, cb_coeff, g_coeff_1, g_coeff_2); + Ok(exact_transform.to_integers(precision)) +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, PartialOrd, PartialEq)] +/// Declares YUV range TV (limited) or Full, +/// more info [ITU-R](https://www.itu.int/rec/T-REC-H.273/en) +pub(crate) enum YuvIntensityRange { + /// Limited range Y ∈ [16 << (depth - 8), 16 << (depth - 8) + 224 << (depth - 8)], + /// UV ∈ [-1 << (depth - 1), -1 << (depth - 1) + 1 << (depth - 1)] + Tv, + /// Full range Y ∈ [0, 2^bit_depth - 1], + /// UV ∈ [-1 << (depth - 1), -1 << (depth - 1) + 2^bit_depth - 1] + Pc, +} + +#[derive(Debug, Copy, Clone, PartialOrd, PartialEq)] +struct YuvChromaRange { + pub bias_y: u32, + pub bias_uv: u32, + pub range_y: u32, + pub range_uv: u32, + pub range: YuvIntensityRange, +} + +const fn get_yuv_range(depth: u32, range: YuvIntensityRange) -> YuvChromaRange { + match range { + YuvIntensityRange::Tv => YuvChromaRange { + bias_y: 16 << (depth - 8), + bias_uv: 1 << (depth - 1), + range_y: 219 << (depth - 8), + range_uv: 224 << (depth - 8), + range, + }, + YuvIntensityRange::Pc => YuvChromaRange { + bias_y: 0, + bias_uv: 1 << (depth - 1), + range_uv: (1 << depth) - 1, + range_y: (1 << depth) - 1, + range, + }, + } +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, PartialOrd, PartialEq)] +/// Declares standard prebuilt YUV conversion matrices, +/// check [ITU-R](https://www.itu.int/rec/T-REC-H.273/en) information for more info +pub(crate) enum YuvStandardMatrix { + Bt601, + Bt709, + Bt2020, + Smpte240, + Bt470_6, +} + +#[derive(Debug, Copy, Clone, PartialOrd, PartialEq)] +struct YuvBias { + pub kr: f32, + pub kb: f32, +} + +const fn get_kr_kb(matrix: YuvStandardMatrix) -> YuvBias { + match matrix { + YuvStandardMatrix::Bt601 => YuvBias { + kr: 0.299f32, + kb: 0.114f32, + }, + YuvStandardMatrix::Bt709 => YuvBias { + kr: 0.2126f32, + kb: 0.0722f32, + }, + YuvStandardMatrix::Bt2020 => YuvBias { + kr: 0.2627f32, + kb: 0.0593f32, + }, + YuvStandardMatrix::Smpte240 => YuvBias { + kr: 0.087f32, + kb: 0.212f32, + }, + YuvStandardMatrix::Bt470_6 => YuvBias { + kr: 0.2220f32, + kb: 0.0713f32, + }, + } +} + +pub(crate) struct YuvPlanarImage<'a, T> { + y_plane: &'a [T], + y_stride: usize, + u_plane: &'a [T], + u_stride: usize, + v_plane: &'a [T], + v_stride: usize, + width: usize, + height: usize, +} + +impl<'a, T> YuvPlanarImage<'a, T> { + #[allow(clippy::too_many_arguments)] + pub(crate) fn new( + y_plane: &'a [T], + y_stride: usize, + u_plane: &'a [T], + u_stride: usize, + v_plane: &'a [T], + v_stride: usize, + width: usize, + height: usize, + ) -> Self { + YuvPlanarImage { + y_plane, + y_stride, + u_plane, + u_stride, + v_plane, + v_stride, + width, + height, + } + } +} + +pub(crate) struct YuvGrayImage<'a, T> { + y_plane: &'a [T], + y_stride: usize, + width: usize, + height: usize, +} + +impl<'a, T> YuvGrayImage<'a, T> { + pub(crate) fn new(y_plane: &'a [T], y_stride: usize, width: usize, height: usize) -> Self { + YuvGrayImage { + y_plane, + y_stride, + width, + height, + } + } +} + +/// Converts Yuv 400 planar format to Rgba +/// +/// This support not tightly packed data and crop image using stride in place. +/// +/// # Arguments +/// +/// * `y_plane`: Luma plane +/// * `y_stride`: Luma stride +/// * `u_plane`: U chroma plane +/// * `u_stride`: U chroma stride, even odd images is supported this always must match `u_stride * height` +/// * `v_plane`: V chroma plane +/// * `v_stride`: V chroma stride, even odd images is supported this always must match `v_stride * height` +/// * `rgba`: RGBA image layout +/// * `width`: Image width +/// * `height`: Image height +/// * `range`: see [YuvIntensityRange] +/// * `matrix`: see [YuvStandardMatrix] +/// +/// +pub(crate) fn yuv400_to_rgba + 'static>( + image: YuvGrayImage, + rgba: &mut [V], + bit_depth: u32, + range: YuvIntensityRange, + matrix: YuvStandardMatrix, +) -> Result<(), String> +where + i32: AsPrimitive, +{ + let y_plane = image.y_plane; + let y_stride = image.y_stride; + let height = image.height; + let width = image.width; + if y_plane.len() != y_stride * height { + return Err(format!( + "Luma plane expected {} bytes, got {}", + y_stride * height, + y_plane.len() + )); + } + const CHANNELS: usize = 4; + let rgba_stride = width * CHANNELS; + + // If luma plane is in full range it can be just redistributed across the image + if range == YuvIntensityRange::Pc { + let y_iter = y_plane.chunks_exact(y_stride); + let rgb_iter = rgba.chunks_exact_mut(rgba_stride); + + for (y_src, rgb) in y_iter.zip(rgb_iter) { + let rgb_chunks = rgb.chunks_exact_mut(CHANNELS); + + for (y_src, rgb_dst) in y_src.iter().zip(rgb_chunks) { + let r = *y_src; + rgb_dst[0] = r; + rgb_dst[1] = r; + rgb_dst[2] = r; + rgb_dst[3] = r; + } + } + return Ok(()); + } + + let range = get_yuv_range(bit_depth, range); + let kr_kb = get_kr_kb(matrix); + const PRECISION: i32 = 11; + const ROUNDING: i32 = 1 << (PRECISION - 1); + let inverse_transform = get_inverse_transform( + (1 << bit_depth) - 1, + range.range_y, + range.range_uv, + kr_kb.kr, + kr_kb.kb, + PRECISION as u32, + )?; + let y_coef = inverse_transform.y_coef; + + let bias_y = range.bias_y as i32; + + if rgba.len() != width * height * CHANNELS { + return Err(format!( + "RGB image layout expected {} bytes, got {}", + width * height * CHANNELS, + rgba.len() + )); + } + + let max_value = (1 << bit_depth) - 1; + + let y_iter = y_plane.chunks_exact(y_stride); + let rgb_iter = rgba.chunks_exact_mut(rgba_stride); + + for (y_src, rgb) in y_iter.zip(rgb_iter) { + let rgb_chunks = rgb.chunks_exact_mut(CHANNELS); + + for (y_src, rgb_dst) in y_src.iter().zip(rgb_chunks) { + let y_value = (y_src.as_() - bias_y) * y_coef; + + let r = ((y_value + ROUNDING) >> PRECISION).clamp(0, max_value); + rgb_dst[0] = r.as_(); + rgb_dst[1] = r.as_(); + rgb_dst[2] = r.as_(); + rgb_dst[3] = max_value.as_(); + } + } + + Ok(()) +} + +/// Converts YUV420 to Rgb +/// +/// This support not tightly packed data and crop image using stride in place. +/// Stride here is not supports u16 as it can be in passed from FFI. +/// +/// # Arguments +/// +/// * `image`: see [YuvPlanarImage] +/// * `rgb`: RGB image layout +/// * `range`: see [YuvIntensityRange] +/// * `matrix`: see [YuvStandardMatrix] +/// +/// +pub(crate) fn yuv420_to_rgba + 'static>( + image: YuvPlanarImage, + rgb: &mut [V], + bit_depth: u32, + range: YuvIntensityRange, + matrix: YuvStandardMatrix, +) -> Result<(), String> +where + i32: AsPrimitive, +{ + let y_plane = image.y_plane; + let u_plane = image.u_plane; + let v_plane = image.v_plane; + let y_stride = image.y_stride; + let u_stride = image.u_stride; + let v_stride = image.v_stride; + let chroma_height = (image.height + 1) / 2; + if y_plane.len() != y_stride * image.height { + return Err(format!( + "Luma plane expected {} bytes, got {}", + y_stride * image.height, + y_plane.len() + )); + } + + if u_plane.len() != u_stride * chroma_height { + return Err(format!( + "U plane expected {} bytes, got {}", + u_stride * chroma_height, + u_plane.len() + )); + } + + if v_plane.len() != v_stride * chroma_height { + return Err(format!( + "V plane expected {} bytes, got {}", + v_stride * chroma_height, + v_plane.len() + )); + } + + let max_value = (1 << bit_depth) - 1; + + const PRECISION: i32 = 11; + const ROUNDING: i32 = 1 << (PRECISION - 1); + + let range = get_yuv_range(bit_depth, range); + let kr_kb = get_kr_kb(matrix); + let inverse_transform = get_inverse_transform( + (1 << bit_depth) - 1, + range.range_y, + range.range_uv, + kr_kb.kr, + kr_kb.kb, + PRECISION as u32, + )?; + let cr_coef = inverse_transform.cr_coef; + let cb_coef = inverse_transform.cb_coef; + let y_coef = inverse_transform.y_coef; + let g_coef_1 = inverse_transform.g_coeff_1; + let g_coef_2 = inverse_transform.g_coeff_2; + + let bias_y = range.bias_y as i32; + let bias_uv = range.bias_uv as i32; + + const CHANNELS: usize = 4; + + if rgb.len() != image.width * image.height * CHANNELS { + return Err(format!( + "RGB image layout expected {} bytes, got {}", + image.width * image.height * CHANNELS, + rgb.len() + )); + } + + let rgb_stride = image.width * CHANNELS; + + let y_iter = y_plane.chunks_exact(y_stride * 2); + let rgb_iter = rgb.chunks_exact_mut(rgb_stride * 2); + let u_iter = u_plane.chunks_exact(u_stride); + let v_iter = v_plane.chunks_exact(v_stride); + + /* + Sample 4x4 YUV420 planar image + start_y + 0: Y00 Y01 Y02 Y03 + start_y + 4: Y04 Y05 Y06 Y07 + start_y + 8: Y08 Y09 Y10 Y11 + start_y + 12: Y12 Y13 Y14 Y15 + start_cb + 0: Cb00 Cb01 + start_cb + 2: Cb02 Cb03 + start_cr + 0: Cr00 Cr01 + start_cr + 2: Cr02 Cr03 + + For 4 luma components (2x2 on rows and cols) there are 1 chroma Cb/Cr components. + Luma channel must have always exact size as RGB target layout, but chroma is not. + + We're sectioning an image by pair of rows, then for each pair of luma and RGB row, + there is one chroma row. + + As chroma is shrunk by factor of 2 then we're processing by pairs of RGB and luma, + for each RGB and luma pair there is one chroma component. + + If image have odd width then luma channel must be exact, and we're replicating last + chroma component. + + If image have odd height then luma channel is exact, and we're replicating last chroma rows. + */ + + for (((y_src, u_src), v_src), rgb) in y_iter.zip(u_iter).zip(v_iter).zip(rgb_iter) { + // Since we're processing two rows in one loop we need to re-slice once more + let y_iter = y_src.chunks_exact(y_stride); + let rgb_iter = rgb.chunks_exact_mut(rgb_stride); + for (y_src, rgb) in y_iter.zip(rgb_iter) { + let y_iter = y_src.chunks_exact(2); + let rgb_chunks = rgb.chunks_exact_mut(CHANNELS * 2); + for (((y_src, &u_src), &v_src), rgb_dst) in y_iter.zip(u_src).zip(v_src).zip(rgb_chunks) + { + let y_value: i32 = (y_src[0].as_() - bias_y) * y_coef; + let cb_value: i32 = u_src.as_() - bias_uv; + let cr_value: i32 = v_src.as_() - bias_uv; + + let r = + ((y_value + cr_coef * cr_value + ROUNDING) >> PRECISION).clamp(0, max_value); + let b = + ((y_value + cb_coef * cb_value + ROUNDING) >> PRECISION).clamp(0, max_value); + let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) + >> PRECISION) + .clamp(0, max_value); + + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + rgb_dst[3] = max_value.as_(); + + let y_value = (y_src[1].as_() - bias_y) * y_coef; + + let r = + ((y_value + cr_coef * cr_value + ROUNDING) >> PRECISION).clamp(0, max_value); + let b = + ((y_value + cb_coef * cb_value + ROUNDING) >> PRECISION).clamp(0, max_value); + let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) + >> PRECISION) + .clamp(0, max_value); + + rgb_dst[4] = r.as_(); + rgb_dst[5] = g.as_(); + rgb_dst[6] = b.as_(); + rgb_dst[7] = max_value.as_(); + } + + // Process remainder if width is odd. + if image.width & 1 != 0 { + let y_left = y_src.chunks_exact(2).remainder(); + let rgb_chunks = rgb + .chunks_exact_mut(CHANNELS * 2) + .into_remainder() + .chunks_exact_mut(CHANNELS); + let u_iter = u_src.iter().rev(); + let v_iter = v_src.iter().rev(); + + for (((y_src, u_src), v_src), rgb_dst) in + y_left.iter().zip(u_iter).zip(v_iter).zip(rgb_chunks) + { + let y_value = (y_src.as_() - bias_y) * y_coef; + let cb_value = u_src.as_() - bias_uv; + let cr_value = v_src.as_() - bias_uv; + + let r = ((y_value + cr_coef * cr_value + ROUNDING) >> PRECISION) + .clamp(0, max_value); + let b = ((y_value + cb_coef * cb_value + ROUNDING) >> PRECISION) + .clamp(0, max_value); + let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) + >> PRECISION) + .clamp(0, max_value); + + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + rgb_dst[3] = max_value.as_(); + } + } + } + } + + // Process remainder if height is odd + + let y_iter = y_plane + .chunks_exact(y_stride * 2) + .remainder() + .chunks_exact(y_stride); + let rgb_iter = rgb.chunks_exact_mut(rgb_stride).rev(); + let u_iter = u_plane.chunks_exact(u_stride).rev(); + let v_iter = v_plane.chunks_exact(v_stride).rev(); + + for (((y_src, u_src), v_src), rgb) in y_iter.zip(u_iter).zip(v_iter).zip(rgb_iter) { + let y_iter = y_src.chunks_exact(2); + let rgb_chunks = rgb.chunks_exact_mut(CHANNELS * 2); + for (((y_src, u_src), v_src), rgb_dst) in y_iter.zip(u_src).zip(v_src).zip(rgb_chunks) { + let y_value = (y_src[0].as_() - bias_y) * y_coef; + let cb_value = u_src.as_() - bias_uv; + let cr_value = v_src.as_() - bias_uv; + + let r = ((y_value + cr_coef * cr_value + ROUNDING) >> PRECISION).clamp(0, max_value); + let b = ((y_value + cb_coef * cb_value + ROUNDING) >> PRECISION).clamp(0, max_value); + let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) >> PRECISION) + .clamp(0, max_value); + + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + rgb_dst[3] = max_value.as_(); + + let y_value = (y_src[1].as_() - bias_y) * y_coef; + + let r = ((y_value + cr_coef * cr_value + ROUNDING) >> PRECISION).clamp(0, max_value); + let b = ((y_value + cb_coef * cb_value + ROUNDING) >> PRECISION).clamp(0, max_value); + let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) >> PRECISION) + .clamp(0, max_value); + + rgb_dst[4] = r.as_(); + rgb_dst[5] = g.as_(); + rgb_dst[6] = b.as_(); + rgb_dst[7] = max_value.as_(); + } + + let y_left = y_src.chunks_exact(2).remainder(); + let rgb_chunks = rgb + .chunks_exact_mut(CHANNELS * 2) + .into_remainder() + .chunks_exact_mut(CHANNELS); + let u_iter = u_plane.iter().rev(); + let v_iter = v_plane.iter().rev(); + + // Process remainder if width is odd. + + for (((y_src, u_src), v_src), rgb_dst) in + y_left.iter().zip(u_iter).zip(v_iter).zip(rgb_chunks) + { + let y_value = (y_src.as_() - bias_y) * y_coef; + let cb_value = u_src.as_() - bias_uv; + let cr_value = v_src.as_() - bias_uv; + + let r = ((y_value + cr_coef * cr_value + ROUNDING) >> PRECISION).clamp(0, max_value); + let b = ((y_value + cb_coef * cb_value + ROUNDING) >> PRECISION).clamp(0, max_value); + let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) >> PRECISION) + .clamp(0, max_value); + + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + rgb_dst[3] = max_value.as_(); + } + } + + Ok(()) +} + +/// Converts Yuv 422 planar format to Rgba +/// +/// This support not tightly packed data and crop image using stride in place. +/// +/// # Arguments +/// +/// * `image`: see [YuvPlanarImage] +/// * `rgb`: RGB image layout +/// * `range`: see [YuvIntensityRange] +/// * `matrix`: see [YuvStandardMatrix] +/// +/// +pub(crate) fn yuv422_to_rgba + 'static>( + image: YuvPlanarImage, + rgb: &mut [V], + bit_depth: u32, + range: YuvIntensityRange, + matrix: YuvStandardMatrix, +) -> Result<(), String> +where + i32: AsPrimitive, +{ + let y_plane = image.y_plane; + let u_plane = image.u_plane; + let v_plane = image.v_plane; + let y_stride = image.y_stride; + let u_stride = image.u_stride; + let v_stride = image.v_stride; + let height = image.height; + let width = image.width; + if y_plane.len() != y_stride * height { + return Err(format!( + "Luma plane expected {} bytes, got {}", + y_stride * height, + y_plane.len() + )); + } + + if u_plane.len() != u_stride * height { + return Err(format!( + "U plane expected {} bytes, got {}", + u_stride * height, + u_plane.len() + )); + } + + if v_plane.len() != v_stride * height { + return Err(format!( + "V plane expected {} bytes, got {}", + v_stride * height, + v_plane.len() + )); + } + + let max_value = (1 << bit_depth) - 1; + + let range = get_yuv_range(bit_depth, range); + let kr_kb = get_kr_kb(matrix); + const PRECISION: i32 = 11; + const ROUNDING: i32 = 1 << (PRECISION - 1); + let inverse_transform = get_inverse_transform( + (1 << bit_depth) - 1, + range.range_y, + range.range_uv, + kr_kb.kr, + kr_kb.kb, + PRECISION as u32, + )?; + let cr_coef = inverse_transform.cr_coef; + let cb_coef = inverse_transform.cb_coef; + let y_coef = inverse_transform.y_coef; + let g_coef_1 = inverse_transform.g_coeff_1; + let g_coef_2 = inverse_transform.g_coeff_2; + + let bias_y = range.bias_y as i32; + let bias_uv = range.bias_uv as i32; + + const CHANNELS: usize = 4; + + if rgb.len() != width * height * CHANNELS { + return Err(format!( + "RGB image layout expected {} bytes, got {}", + width * height * CHANNELS, + rgb.len() + )); + } + + /* + Sample 4x4 YUV422 planar image + start_y + 0: Y00 Y01 Y02 Y03 + start_y + 4: Y04 Y05 Y06 Y07 + start_y + 8: Y08 Y09 Y10 Y11 + start_y + 12: Y12 Y13 Y14 Y15 + start_cb + 0: Cb00 Cb01 + start_cb + 2: Cb02 Cb03 + start_cb + 4: Cb04 Cb05 + start_cb + 6: Cb06 Cb07 + start_cr + 0: Cr00 Cr01 + start_cr + 2: Cr02 Cr03 + start_cr + 4: Cr04 Cr05 + start_cr + 6: Cr06 Cr07 + + For 2 luma components there are 1 chroma Cb/Cr components. + Luma channel must have always exact size as RGB target layout, but chroma is not. + + As chroma is shrunk by factor of 2 then we're processing by pairs of RGB and luma, + for each RGB and luma pair there is one chroma component. + + If image have odd width then luma channel must be exact, and we're replicating last + chroma component. + */ + + let rgb_stride = width * CHANNELS; + + let y_iter = y_plane.chunks_exact(y_stride); + let rgb_iter = rgb.chunks_exact_mut(rgb_stride); + let u_iter = u_plane.chunks_exact(u_stride); + let v_iter = v_plane.chunks_exact(v_stride); + + for (((y_src, u_src), v_src), rgb) in y_iter.zip(u_iter).zip(v_iter).zip(rgb_iter) { + let y_iter = y_src.chunks_exact(2); + let rgb_chunks = rgb.chunks_exact_mut(CHANNELS * 2); + + for (((y_src, u_src), v_src), rgb_dst) in y_iter.zip(u_src).zip(v_src).zip(rgb_chunks) { + let y_value = (y_src[0].as_() - bias_y) * y_coef; + let cb_value = u_src.as_() - bias_uv; + let cr_value = v_src.as_() - bias_uv; + + let r = ((y_value + cr_coef * cr_value + ROUNDING) >> PRECISION).clamp(0, max_value); + let b = ((y_value + cb_coef * cb_value + ROUNDING) >> PRECISION).clamp(0, max_value); + let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) >> PRECISION) + .clamp(0, max_value); + + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + rgb_dst[3] = max_value.as_(); + + let y_value = (y_src[1].as_() - bias_y) * y_coef; + + let r = ((y_value + cr_coef * cr_value + ROUNDING) >> PRECISION).clamp(0, max_value); + let b = ((y_value + cb_coef * cb_value + ROUNDING) >> PRECISION).clamp(0, max_value); + let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) >> PRECISION) + .clamp(0, max_value); + + rgb_dst[4] = r.as_(); + rgb_dst[5] = g.as_(); + rgb_dst[6] = b.as_(); + rgb_dst[7] = max_value.as_(); + } + + // Process left pixels for odd images, this should work since luma must be always exact + if width & 1 != 0 { + let y_left = y_src.chunks_exact(2).remainder(); + let rgb_chunks = rgb + .chunks_exact_mut(CHANNELS * 2) + .into_remainder() + .chunks_exact_mut(CHANNELS); + let u_iter = u_src.iter().rev(); + let v_iter = v_src.iter().rev(); + + for (((y_src, u_src), v_src), rgb_dst) in + y_left.iter().zip(u_iter).zip(v_iter).zip(rgb_chunks) + { + let y_value = (y_src.as_() - bias_y) * y_coef; + let cb_value = u_src.as_() - bias_uv; + let cr_value = v_src.as_() - bias_uv; + + let r = + ((y_value + cr_coef * cr_value + ROUNDING) >> PRECISION).clamp(0, max_value); + let b = + ((y_value + cb_coef * cb_value + ROUNDING) >> PRECISION).clamp(0, max_value); + let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) + >> PRECISION) + .clamp(0, max_value); + + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + rgb_dst[3] = max_value.as_(); + } + } + } + + Ok(()) +} + +/// Converts YUV444 to Rgb +/// +/// This support not tightly packed data and crop image using stride in place. +/// +/// # Arguments +/// +/// * `image`: see [YuvPlanarImage] +/// * `rgb`: RGB image layout +/// * `range`: see [YuvIntensityRange] +/// * `matrix`: see [YuvStandardMatrix] +/// +/// +pub(crate) fn yuv444_to_rgba + 'static>( + image: YuvPlanarImage, + rgb: &mut [V], + bit_depth: u32, + range: YuvIntensityRange, + matrix: YuvStandardMatrix, +) -> Result<(), String> +where + i32: AsPrimitive, +{ + let y_plane = image.y_plane; + let u_plane = image.u_plane; + let v_plane = image.v_plane; + let y_stride = image.y_stride; + let u_stride = image.u_stride; + let v_stride = image.v_stride; + let height = image.height; + let width = image.width; + if y_plane.len() != y_stride * height { + return Err(format!( + "Luma plane expected {} bytes, got {}", + y_stride * height, + y_plane.len() + )); + } + + if u_plane.len() != u_stride * height { + return Err(format!( + "U plane expected {} bytes, got {}", + u_stride * height, + u_plane.len() + )); + } + + if v_plane.len() != v_stride * height { + return Err(format!( + "V plane expected {} bytes, got {}", + v_stride * height, + v_plane.len() + )); + } + + let range = get_yuv_range(bit_depth, range); + let kr_kb = get_kr_kb(matrix); + const PRECISION: i32 = 11; + const ROUNDING: i32 = 1 << (PRECISION - 1); + let inverse_transform = get_inverse_transform( + (1 << bit_depth) - 1, + range.range_y, + range.range_uv, + kr_kb.kr, + kr_kb.kb, + PRECISION as u32, + )?; + let cr_coef = inverse_transform.cr_coef; + let cb_coef = inverse_transform.cb_coef; + let y_coef = inverse_transform.y_coef; + let g_coef_1 = inverse_transform.g_coeff_1; + let g_coef_2 = inverse_transform.g_coeff_2; + + let bias_y = range.bias_y as i32; + let bias_uv = range.bias_uv as i32; + + const CHANNELS: usize = 4; + + if rgb.len() != width * height * CHANNELS { + return Err(format!( + "RGB image layout expected {} bytes, got {}", + width * height * CHANNELS, + rgb.len() + )); + } + + let max_value = (1 << bit_depth) - 1; + + let rgb_stride = width * CHANNELS; + + let y_iter = y_plane.chunks_exact(y_stride); + let rgb_iter = rgb.chunks_exact_mut(rgb_stride); + let u_iter = u_plane.chunks_exact(u_stride); + let v_iter = v_plane.chunks_exact(v_stride); + + for (((y_src, u_src), v_src), rgb) in y_iter.zip(u_iter).zip(v_iter).zip(rgb_iter) { + let rgb_chunks = rgb.chunks_exact_mut(CHANNELS); + + for (((y_src, u_src), v_src), rgb_dst) in y_src.iter().zip(u_src).zip(v_src).zip(rgb_chunks) + { + let y_value = (y_src.as_() - bias_y) * y_coef; + let cb_value = u_src.as_() - bias_uv; + let cr_value = v_src.as_() - bias_uv; + + let r = ((y_value + cr_coef * cr_value + ROUNDING) >> PRECISION).clamp(0, max_value); + let b = ((y_value + cb_coef * cb_value + ROUNDING) >> PRECISION).clamp(0, max_value); + let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) >> PRECISION) + .clamp(0, max_value); + + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + rgb_dst[3] = max_value.as_(); + } + } + + Ok(()) +} From bfff09183cbbd96775fdb430ac754e0567119db5 Mon Sep 17 00:00:00 2001 From: awxkee Date: Tue, 29 Oct 2024 12:26:34 +0000 Subject: [PATCH 02/21] Added YUV decoder, drop dcv, added high bit depth support --- src/codecs/avif/yuv.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/codecs/avif/yuv.rs b/src/codecs/avif/yuv.rs index 41f566ba97..963baee512 100644 --- a/src/codecs/avif/yuv.rs +++ b/src/codecs/avif/yuv.rs @@ -213,12 +213,7 @@ impl<'a, T> YuvGrayImage<'a, T> { /// /// # Arguments /// -/// * `y_plane`: Luma plane -/// * `y_stride`: Luma stride -/// * `u_plane`: U chroma plane -/// * `u_stride`: U chroma stride, even odd images is supported this always must match `u_stride * height` -/// * `v_plane`: V chroma plane -/// * `v_stride`: V chroma stride, even odd images is supported this always must match `v_stride * height` +/// * `image`: see [YuvGrayImage] /// * `rgba`: RGBA image layout /// * `width`: Image width /// * `height`: Image height From c6b462f553ae2d61f73e869e8dd1499226f36351 Mon Sep 17 00:00:00 2001 From: awxkee Date: Tue, 29 Oct 2024 12:32:58 +0000 Subject: [PATCH 03/21] Fix clippy --- src/codecs/avif/decoder.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/codecs/avif/decoder.rs b/src/codecs/avif/decoder.rs index 5a28e55f8d..1c9ab4d9de 100644 --- a/src/codecs/avif/decoder.rs +++ b/src/codecs/avif/decoder.rs @@ -468,10 +468,10 @@ impl ImageDecoder for AvifDecoder { let target_expand_bits = 16u32.saturating_sub(self.picture.bit_depth() as u32); if target_expand_bits > 0 { for rgba in rgba16_buf.chunks_exact_mut(4) { - rgba[0] = rgba[0] << target_expand_bits; - rgba[1] = rgba[1] << target_expand_bits; - rgba[2] = rgba[2] << target_expand_bits; - rgba[3] = rgba[3] << target_expand_bits; + rgba[0] <<= target_expand_bits; + rgba[1] <<= target_expand_bits; + rgba[2] <<= target_expand_bits; + rgba[3] <<= target_expand_bits; } } } From 52facf83f8e7b6d960dc47c6e5b27599e1c5619f Mon Sep 17 00:00:00 2001 From: Radzivon Bartoshyk Date: Tue, 29 Oct 2024 16:52:37 +0000 Subject: [PATCH 04/21] Bug fixes, change color matrix determining --- src/codecs/avif/decoder.rs | 201 ++++++++++++++++++++++++++----------- src/codecs/avif/yuv.rs | 190 +++++++++++++++++++++++++++++------ 2 files changed, 304 insertions(+), 87 deletions(-) diff --git a/src/codecs/avif/decoder.rs b/src/codecs/avif/decoder.rs index 1c9ab4d9de..c4320eb2d0 100644 --- a/src/codecs/avif/decoder.rs +++ b/src/codecs/avif/decoder.rs @@ -115,8 +115,19 @@ impl ImageDecoder for AvifDecoder { dav1d::pixel::YUVRange::Limited => YuvIntensityRange::Tv, dav1d::pixel::YUVRange::Full => YuvIntensityRange::Pc, }; - let color_matrix = match self.picture.color_primaries() { - dav1d::pixel::ColorPrimaries::Reserved0 | dav1d::pixel::ColorPrimaries::Reserved => { + + let is_identity = + self.picture.matrix_coefficients() == dav1d::pixel::MatrixCoefficients::Identity; + + let color_matrix = match self.picture.matrix_coefficients() { + // Identity just a stub here, we'll handle it in different way + dav1d::pixel::MatrixCoefficients::Identity => YuvStandardMatrix::Bt709, + dav1d::pixel::MatrixCoefficients::BT709 => YuvStandardMatrix::Bt709, + // This is arguable, some applications prefer to go with Bt.709 as default some applications as Bt.601 + // For ex. `Chrome` always prefer Bt.709 even for SD content + // However, nowadays standard should be Bt.709 for HD+ size otherwise Bt.601 + dav1d::pixel::MatrixCoefficients::Unspecified => YuvStandardMatrix::Bt709, + dav1d::pixel::MatrixCoefficients::Reserved => { return Err(ImageError::Unsupported( UnsupportedError::from_format_and_kind( ImageFormat::Avif.into(), @@ -126,39 +137,82 @@ impl ImageDecoder for AvifDecoder { ), )); } - dav1d::pixel::ColorPrimaries::BT709 => YuvStandardMatrix::Bt709, - // This is arguable, some applications prefer to go with Bt.709 as default some applications as Bt.601 - // For ex. chrome always prefer Bt.709 even for SD content - // However, nowadays standard should be Bt.709 for HD+ size otherwise Bt.601 - dav1d::pixel::ColorPrimaries::Unspecified => YuvStandardMatrix::Bt709, - dav1d::pixel::ColorPrimaries::BT470M => YuvStandardMatrix::Bt470_6, - dav1d::pixel::ColorPrimaries::BT470BG => YuvStandardMatrix::Bt601, - dav1d::pixel::ColorPrimaries::ST170M => YuvStandardMatrix::Smpte240, - dav1d::pixel::ColorPrimaries::ST240M => YuvStandardMatrix::Smpte240, - dav1d::pixel::ColorPrimaries::Film => YuvStandardMatrix::Bt2020, - dav1d::pixel::ColorPrimaries::BT2020 => YuvStandardMatrix::Bt2020, - dav1d::pixel::ColorPrimaries::ST428 => YuvStandardMatrix::Bt709, - dav1d::pixel::ColorPrimaries::P3DCI => YuvStandardMatrix::Bt709, - dav1d::pixel::ColorPrimaries::P3Display => YuvStandardMatrix::Bt709, - dav1d::pixel::ColorPrimaries::Tech3213 => { + dav1d::pixel::MatrixCoefficients::BT470M => YuvStandardMatrix::Bt470_6, + dav1d::pixel::MatrixCoefficients::BT470BG => YuvStandardMatrix::Bt601, + dav1d::pixel::MatrixCoefficients::ST170M => YuvStandardMatrix::Smpte240, + dav1d::pixel::MatrixCoefficients::ST240M => YuvStandardMatrix::Smpte240, + dav1d::pixel::MatrixCoefficients::YCgCo => { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature( + "YCgCo matrix is not supported".to_string(), + ), + ), + )); + } + dav1d::pixel::MatrixCoefficients::BT2020NonConstantLuminance => { + YuvStandardMatrix::Bt2020 + } + dav1d::pixel::MatrixCoefficients::BT2020ConstantLuminance => { + // This matrix significantly differs from others because linearize values is required + // to compute Y instead of Y'. + // Actually it is almost everywhere is not implemented. + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature( + "BT2020ConstantLuminance matrix is not supported".to_string(), + ), + ), + )); + } + dav1d::pixel::MatrixCoefficients::ST2085 => { return Err(ImageError::Unsupported( UnsupportedError::from_format_and_kind( ImageFormat::Avif.into(), - UnsupportedErrorKind::GenericFeature("Unknown color matrix".to_string()), + UnsupportedErrorKind::GenericFeature( + "ST2085 matrix is not supported".to_string(), + ), + ), + )); + } + dav1d::pixel::MatrixCoefficients::ChromaticityDerivedConstantLuminance + | dav1d::pixel::MatrixCoefficients::ChromaticityDerivedNonConstantLuminance => { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature( + "Chromaticity Derived Luminance matrix is not supported".to_string(), + ), + ), + )); + } + dav1d::pixel::MatrixCoefficients::ICtCp => { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature( + "ICtCp Derived Luminance matrix is not supported".to_string(), + ), ), )); } }; + if is_identity && self.picture.pixel_layout() != PixelLayout::I444 { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature( + "Impossible YUV layout for Identity matrix".to_string(), + ), + ), + )); + } + if self.picture.bit_depth() == 8 { if self.picture.pixel_layout() != PixelLayout::I400 { - let worker = match self.picture.pixel_layout() { - PixelLayout::I400 => unreachable!(), - PixelLayout::I420 => yuv420_to_rgba, - PixelLayout::I422 => yuv422_to_rgba, - PixelLayout::I444 => yuv444_to_rgba, - }; - let ref_y = self.picture.plane(PlanarImageComponent::Y); let ref_u = self.picture.plane(PlanarImageComponent::U); let ref_v = self.picture.plane(PlanarImageComponent::V); @@ -173,15 +227,35 @@ impl ImageDecoder for AvifDecoder { height as usize, ); - let res = worker(image, buf, 8, yuv_range, color_matrix); + if !is_identity { + let worker = match self.picture.pixel_layout() { + PixelLayout::I400 => unreachable!(), + PixelLayout::I420 => yuv420_to_rgba, + PixelLayout::I422 => yuv422_to_rgba, + PixelLayout::I444 => yuv444_to_rgba, + }; - if let Err(err) = res { - return Err(ImageError::Unsupported( - UnsupportedError::from_format_and_kind( - ImageFormat::Avif.into(), - UnsupportedErrorKind::GenericFeature(err), - ), - )); + let res = worker(image, buf, 8, yuv_range, color_matrix); + + if let Err(err) = res { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature(err), + ), + )); + } + } else { + let res = gbr_to_rgba(image, buf, 8); + + if let Err(err) = res { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature(err), + ), + )); + } } } else { let plane = self.picture.plane(PlanarImageComponent::Y); @@ -354,13 +428,6 @@ impl ImageDecoder for AvifDecoder { _bind_v.as_slice() }; - let worker = match self.picture.pixel_layout() { - PixelLayout::I400 => unreachable!(), - PixelLayout::I420 => yuv420_to_rgba, - PixelLayout::I422 => yuv422_to_rgba, - PixelLayout::I444 => yuv444_to_rgba, - }; - let image = YuvPlanarImage::new( y_plane, y_plane_stride as usize, @@ -372,21 +439,41 @@ impl ImageDecoder for AvifDecoder { height as usize, ); - let res = worker( - image, - rgba16_buf, - self.picture.bit_depth() as u32, - yuv_range, - color_matrix, - ); + if !is_identity { + let worker = match self.picture.pixel_layout() { + PixelLayout::I400 => unreachable!(), + PixelLayout::I420 => yuv420_to_rgba, + PixelLayout::I422 => yuv422_to_rgba, + PixelLayout::I444 => yuv444_to_rgba, + }; - if let Err(err) = res { - return Err(ImageError::Unsupported( - UnsupportedError::from_format_and_kind( - ImageFormat::Avif.into(), - UnsupportedErrorKind::GenericFeature(err), - ), - )); + let res = worker( + image, + rgba16_buf, + self.picture.bit_depth() as u32, + yuv_range, + color_matrix, + ); + + if let Err(err) = res { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature(err), + ), + )); + } + } else { + let res = gbr_to_rgba(image, rgba16_buf, self.picture.bit_depth() as u32); + + if let Err(err) = res { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature(err), + ), + )); + } } } else { let gray_image = YuvGrayImage::new( @@ -425,10 +512,10 @@ impl ImageDecoder for AvifDecoder { ), )); } - let ref_a = self.picture.plane(PlanarImageComponent::Y); + let ref_a = picture.plane(PlanarImageComponent::Y); let mut _bind_a = vec![]; - let mut a_plane_stride = self.picture.stride(PlanarImageComponent::Y) >> 1; + let mut a_plane_stride = picture.stride(PlanarImageComponent::Y) >> 1; let mut shape_a_plane = || { a_plane_stride = width; @@ -456,7 +543,7 @@ impl ImageDecoder for AvifDecoder { let width = picture.width(); for (buf, slice) in Iterator::zip( rgba16_buf.chunks_exact_mut(width as usize * 4), - a_plane.as_ref().chunks_exact(a_plane_stride as usize), + a_plane.chunks_exact(a_plane_stride as usize), ) { for (rgba, a_src) in buf.chunks_exact_mut(4).zip(slice) { rgba[3] = *a_src; diff --git a/src/codecs/avif/yuv.rs b/src/codecs/avif/yuv.rs index 963baee512..f024acf480 100644 --- a/src/codecs/avif/yuv.rs +++ b/src/codecs/avif/yuv.rs @@ -209,7 +209,7 @@ impl<'a, T> YuvGrayImage<'a, T> { /// Converts Yuv 400 planar format to Rgba /// -/// This support not tightly packed data and crop image using stride in place. +/// Stride here is not supports u16 as it can be in passed from FFI. /// /// # Arguments /// @@ -235,6 +235,7 @@ where let y_stride = image.y_stride; let height = image.height; let width = image.width; + if y_plane.len() != y_stride * height { return Err(format!( "Luma plane expected {} bytes, got {}", @@ -242,9 +243,18 @@ where y_plane.len() )); } + + if !(8..=16).contains(&bit_depth) { + return Err(format!( + "Unexpected bit depth value {}, only 8...16 is supported", + bit_depth + )); + } const CHANNELS: usize = 4; let rgba_stride = width * CHANNELS; + let max_value = (1 << bit_depth) - 1; + // If luma plane is in full range it can be just redistributed across the image if range == YuvIntensityRange::Pc { let y_iter = y_plane.chunks_exact(y_stride); @@ -258,7 +268,7 @@ where rgb_dst[0] = r; rgb_dst[1] = r; rgb_dst[2] = r; - rgb_dst[3] = r; + rgb_dst[3] = max_value.as_(); } } return Ok(()); @@ -288,8 +298,6 @@ where )); } - let max_value = (1 << bit_depth) - 1; - let y_iter = y_plane.chunks_exact(y_stride); let rgb_iter = rgba.chunks_exact_mut(rgba_stride); @@ -312,7 +320,6 @@ where /// Converts YUV420 to Rgb /// -/// This support not tightly packed data and crop image using stride in place. /// Stride here is not supports u16 as it can be in passed from FFI. /// /// # Arguments @@ -340,6 +347,7 @@ where let u_stride = image.u_stride; let v_stride = image.v_stride; let chroma_height = (image.height + 1) / 2; + if y_plane.len() != y_stride * image.height { return Err(format!( "Luma plane expected {} bytes, got {}", @@ -364,6 +372,13 @@ where )); } + if !(8..=16).contains(&bit_depth) { + return Err(format!( + "Unexpected bit depth value {}, only 8...16 is supported", + bit_depth + )); + } + let max_value = (1 << bit_depth) - 1; const PRECISION: i32 = 11; @@ -548,32 +563,37 @@ where rgb_dst[7] = max_value.as_(); } - let y_left = y_src.chunks_exact(2).remainder(); - let rgb_chunks = rgb - .chunks_exact_mut(CHANNELS * 2) - .into_remainder() - .chunks_exact_mut(CHANNELS); - let u_iter = u_plane.iter().rev(); - let v_iter = v_plane.iter().rev(); - // Process remainder if width is odd. - for (((y_src, u_src), v_src), rgb_dst) in - y_left.iter().zip(u_iter).zip(v_iter).zip(rgb_chunks) - { - let y_value = (y_src.as_() - bias_y) * y_coef; - let cb_value = u_src.as_() - bias_uv; - let cr_value = v_src.as_() - bias_uv; + if image.width & 1 != 0 { + let y_left = y_src.chunks_exact(2).remainder(); + let rgb_chunks = rgb + .chunks_exact_mut(CHANNELS * 2) + .into_remainder() + .chunks_exact_mut(CHANNELS); + let u_iter = u_plane.iter().rev(); + let v_iter = v_plane.iter().rev(); - let r = ((y_value + cr_coef * cr_value + ROUNDING) >> PRECISION).clamp(0, max_value); - let b = ((y_value + cb_coef * cb_value + ROUNDING) >> PRECISION).clamp(0, max_value); - let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) >> PRECISION) - .clamp(0, max_value); + for (((y_src, u_src), v_src), rgb_dst) in + y_left.iter().zip(u_iter).zip(v_iter).zip(rgb_chunks) + { + let y_value = (y_src.as_() - bias_y) * y_coef; + let cb_value = u_src.as_() - bias_uv; + let cr_value = v_src.as_() - bias_uv; - rgb_dst[0] = r.as_(); - rgb_dst[1] = g.as_(); - rgb_dst[2] = b.as_(); - rgb_dst[3] = max_value.as_(); + let r = + ((y_value + cr_coef * cr_value + ROUNDING) >> PRECISION).clamp(0, max_value); + let b = + ((y_value + cb_coef * cb_value + ROUNDING) >> PRECISION).clamp(0, max_value); + let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) + >> PRECISION) + .clamp(0, max_value); + + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + rgb_dst[3] = max_value.as_(); + } } } @@ -582,7 +602,7 @@ where /// Converts Yuv 422 planar format to Rgba /// -/// This support not tightly packed data and crop image using stride in place. +/// Stride here is not supports u16 as it can be in passed from FFI. /// /// # Arguments /// @@ -610,6 +630,7 @@ where let v_stride = image.v_stride; let height = image.height; let width = image.width; + if y_plane.len() != y_stride * height { return Err(format!( "Luma plane expected {} bytes, got {}", @@ -634,6 +655,13 @@ where )); } + if !(8..=16).contains(&bit_depth) { + return Err(format!( + "Unexpected bit depth value {}, only 8...16 is supported", + bit_depth + )); + } + let max_value = (1 << bit_depth) - 1; let range = get_yuv_range(bit_depth, range); @@ -767,9 +795,9 @@ where Ok(()) } -/// Converts YUV444 to Rgb +/// Converts Yuv 444 planar format to Rgba /// -/// This support not tightly packed data and crop image using stride in place. +/// Stride here is not supports u16 as it can be in passed from FFI. /// /// # Arguments /// @@ -797,6 +825,7 @@ where let v_stride = image.v_stride; let height = image.height; let width = image.width; + if y_plane.len() != y_stride * height { return Err(format!( "Luma plane expected {} bytes, got {}", @@ -821,6 +850,13 @@ where )); } + if !(8..=16).contains(&bit_depth) { + return Err(format!( + "Unexpected bit depth value {}, only 8...16 is supported", + bit_depth + )); + } + let range = get_yuv_range(bit_depth, range); let kr_kb = get_kr_kb(matrix); const PRECISION: i32 = 11; @@ -884,3 +920,97 @@ where Ok(()) } + +/// Converts Gbr planar format to Rgba +/// +/// Stride here is not supports u16 as it can be in passed from FFI. +/// +/// # Arguments +/// +/// * `image`: see [YuvPlanarImage] +/// * `rgb`: RGB image layout +/// * `range`: see [YuvIntensityRange] +/// * `matrix`: see [YuvStandardMatrix] +/// +/// +pub(crate) fn gbr_to_rgba + 'static>( + image: YuvPlanarImage, + rgb: &mut [V], + bit_depth: u32, +) -> Result<(), String> +where + i32: AsPrimitive, +{ + let y_plane = image.y_plane; + let u_plane = image.u_plane; + let v_plane = image.v_plane; + let y_stride = image.y_stride; + let u_stride = image.u_stride; + let v_stride = image.v_stride; + let height = image.height; + let width = image.width; + + if y_plane.len() != y_stride * height { + return Err(format!( + "Luma plane expected {} bytes, got {}", + y_stride * height, + y_plane.len() + )); + } + + if u_plane.len() != u_stride * height { + return Err(format!( + "U plane expected {} bytes, got {}", + u_stride * height, + u_plane.len() + )); + } + + if v_plane.len() != v_stride * height { + return Err(format!( + "V plane expected {} bytes, got {}", + v_stride * height, + v_plane.len() + )); + } + + if !(8..=16).contains(&bit_depth) { + return Err(format!( + "Unexpected bit depth value {}, only 8...16 is supported", + bit_depth + )); + } + + const CHANNELS: usize = 4; + + if rgb.len() != width * height * CHANNELS { + return Err(format!( + "RGB image layout expected {} bytes, got {}", + width * height * CHANNELS, + rgb.len() + )); + } + + let max_value = (1 << bit_depth) - 1; + + let rgb_stride = width * CHANNELS; + + let y_iter = y_plane.chunks_exact(y_stride); + let rgb_iter = rgb.chunks_exact_mut(rgb_stride); + let u_iter = u_plane.chunks_exact(u_stride); + let v_iter = v_plane.chunks_exact(v_stride); + + for (((y_src, u_src), v_src), rgb) in y_iter.zip(u_iter).zip(v_iter).zip(rgb_iter) { + let rgb_chunks = rgb.chunks_exact_mut(CHANNELS); + + for (((y_src, u_src), v_src), rgb_dst) in y_src.iter().zip(u_src).zip(v_src).zip(rgb_chunks) + { + rgb_dst[0] = v_src.as_(); + rgb_dst[1] = y_src.as_(); + rgb_dst[2] = u_src.as_(); + rgb_dst[3] = max_value.as_(); + } + } + + Ok(()) +} From 8bdf5a8e4c5219ca9442fc3521a81fc7ea4026b2 Mon Sep 17 00:00:00 2001 From: Radzivon Bartoshyk Date: Tue, 29 Oct 2024 17:07:12 +0000 Subject: [PATCH 05/21] Fix GBR decoding --- src/codecs/avif/yuv.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/codecs/avif/yuv.rs b/src/codecs/avif/yuv.rs index f024acf480..ee58730288 100644 --- a/src/codecs/avif/yuv.rs +++ b/src/codecs/avif/yuv.rs @@ -1003,11 +1003,11 @@ where for (((y_src, u_src), v_src), rgb) in y_iter.zip(u_iter).zip(v_iter).zip(rgb_iter) { let rgb_chunks = rgb.chunks_exact_mut(CHANNELS); - for (((y_src, u_src), v_src), rgb_dst) in y_src.iter().zip(u_src).zip(v_src).zip(rgb_chunks) + for (((&y_src, &u_src), &v_src), rgb_dst) in y_src.iter().zip(u_src).zip(v_src).zip(rgb_chunks) { - rgb_dst[0] = v_src.as_(); - rgb_dst[1] = y_src.as_(); - rgb_dst[2] = u_src.as_(); + rgb_dst[0] = v_src; + rgb_dst[1] = y_src; + rgb_dst[2] = u_src; rgb_dst[3] = max_value.as_(); } } From 2e4ec87554032d880136afb98a105572b544c9fe Mon Sep 17 00:00:00 2001 From: Radzivon Bartoshyk Date: Tue, 29 Oct 2024 17:08:16 +0000 Subject: [PATCH 06/21] Rustfmt --- src/codecs/avif/yuv.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/codecs/avif/yuv.rs b/src/codecs/avif/yuv.rs index ee58730288..e584eb361c 100644 --- a/src/codecs/avif/yuv.rs +++ b/src/codecs/avif/yuv.rs @@ -1003,7 +1003,8 @@ where for (((y_src, u_src), v_src), rgb) in y_iter.zip(u_iter).zip(v_iter).zip(rgb_iter) { let rgb_chunks = rgb.chunks_exact_mut(CHANNELS); - for (((&y_src, &u_src), &v_src), rgb_dst) in y_src.iter().zip(u_src).zip(v_src).zip(rgb_chunks) + for (((&y_src, &u_src), &v_src), rgb_dst) in + y_src.iter().zip(u_src).zip(v_src).zip(rgb_chunks) { rgb_dst[0] = v_src; rgb_dst[1] = y_src; From f400ce853d91921b535e4f3a3f46a30fac062797 Mon Sep 17 00:00:00 2001 From: Radzivon Bartoshyk Date: Tue, 29 Oct 2024 17:47:03 +0000 Subject: [PATCH 07/21] Small refactor --- src/codecs/avif/decoder.rs | 160 ++++++++++++++++++------------------- src/codecs/avif/yuv.rs | 3 +- 2 files changed, 80 insertions(+), 83 deletions(-) diff --git a/src/codecs/avif/decoder.rs b/src/codecs/avif/decoder.rs index c4320eb2d0..b43696472e 100644 --- a/src/codecs/avif/decoder.rs +++ b/src/codecs/avif/decoder.rs @@ -76,6 +76,7 @@ impl AvifDecoder { } } +/// Reshaping incorrectly aligned or sized FFI data into Rust constraints fn reshape_plane(source: &[u8], stride: usize, width: usize, height: usize) -> Vec { let mut target_plane = vec![0u16; width * height]; for (shaped_row, src_row) in target_plane @@ -89,6 +90,80 @@ fn reshape_plane(source: &[u8], stride: usize, width: usize, height: usize) -> V target_plane } +/// Getting one of prebuilt matrix of fails +fn get_matrix( + david_matrix: dav1d::pixel::MatrixCoefficients, +) -> Result { + match david_matrix { + // Identity just a stub here, we'll handle it in different way + dav1d::pixel::MatrixCoefficients::Identity => Ok(YuvStandardMatrix::Bt709), + dav1d::pixel::MatrixCoefficients::BT709 => Ok(YuvStandardMatrix::Bt709), + // This is arguable, some applications prefer to go with Bt.709 as default, + // and some applications prefer Bt.601 as default. + // For ex. `Chrome` always prefer Bt.709 even for SD content + // However, nowadays standard should be Bt.709 for HD+ size otherwise Bt.601 + dav1d::pixel::MatrixCoefficients::Unspecified => Ok(YuvStandardMatrix::Bt709), + dav1d::pixel::MatrixCoefficients::Reserved => Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature( + "Using 'Reserved' color matrix is not supported".to_string(), + ), + ), + )), + dav1d::pixel::MatrixCoefficients::BT470M => Ok(YuvStandardMatrix::Bt470_6), + dav1d::pixel::MatrixCoefficients::BT470BG => Ok(YuvStandardMatrix::Bt601), + dav1d::pixel::MatrixCoefficients::ST170M => Ok(YuvStandardMatrix::Smpte240), + dav1d::pixel::MatrixCoefficients::ST240M => Ok(YuvStandardMatrix::Smpte240), + // This is an experimental matrix in libavif yet. + dav1d::pixel::MatrixCoefficients::YCgCo => Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature("YCgCo matrix is not supported".to_string()), + ), + )), + dav1d::pixel::MatrixCoefficients::BT2020NonConstantLuminance => { + Ok(YuvStandardMatrix::Bt2020) + } + dav1d::pixel::MatrixCoefficients::BT2020ConstantLuminance => { + // This matrix significantly differs from others because linearize values is required + // to compute Y instead of Y'. + // Actually it is almost everywhere is not implemented. + Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature( + "BT2020ConstantLuminance matrix is not supported".to_string(), + ), + ), + )) + } + dav1d::pixel::MatrixCoefficients::ST2085 => Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature("ST2085 matrix is not supported".to_string()), + ), + )), + dav1d::pixel::MatrixCoefficients::ChromaticityDerivedConstantLuminance + | dav1d::pixel::MatrixCoefficients::ChromaticityDerivedNonConstantLuminance => Err( + ImageError::Unsupported(UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature( + "Chromaticity Derived Luminance matrix is not supported".to_string(), + ), + )), + ), + dav1d::pixel::MatrixCoefficients::ICtCp => Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature( + "ICtCp Derived Luminance matrix is not supported".to_string(), + ), + ), + )), + } +} + impl ImageDecoder for AvifDecoder { fn dimensions(&self) -> (u32, u32) { (self.picture.width(), self.picture.height()) @@ -119,87 +194,9 @@ impl ImageDecoder for AvifDecoder { let is_identity = self.picture.matrix_coefficients() == dav1d::pixel::MatrixCoefficients::Identity; - let color_matrix = match self.picture.matrix_coefficients() { - // Identity just a stub here, we'll handle it in different way - dav1d::pixel::MatrixCoefficients::Identity => YuvStandardMatrix::Bt709, - dav1d::pixel::MatrixCoefficients::BT709 => YuvStandardMatrix::Bt709, - // This is arguable, some applications prefer to go with Bt.709 as default some applications as Bt.601 - // For ex. `Chrome` always prefer Bt.709 even for SD content - // However, nowadays standard should be Bt.709 for HD+ size otherwise Bt.601 - dav1d::pixel::MatrixCoefficients::Unspecified => YuvStandardMatrix::Bt709, - dav1d::pixel::MatrixCoefficients::Reserved => { - return Err(ImageError::Unsupported( - UnsupportedError::from_format_and_kind( - ImageFormat::Avif.into(), - UnsupportedErrorKind::GenericFeature( - "Using 'Reserved' color matrix is not supported".to_string(), - ), - ), - )); - } - dav1d::pixel::MatrixCoefficients::BT470M => YuvStandardMatrix::Bt470_6, - dav1d::pixel::MatrixCoefficients::BT470BG => YuvStandardMatrix::Bt601, - dav1d::pixel::MatrixCoefficients::ST170M => YuvStandardMatrix::Smpte240, - dav1d::pixel::MatrixCoefficients::ST240M => YuvStandardMatrix::Smpte240, - dav1d::pixel::MatrixCoefficients::YCgCo => { - return Err(ImageError::Unsupported( - UnsupportedError::from_format_and_kind( - ImageFormat::Avif.into(), - UnsupportedErrorKind::GenericFeature( - "YCgCo matrix is not supported".to_string(), - ), - ), - )); - } - dav1d::pixel::MatrixCoefficients::BT2020NonConstantLuminance => { - YuvStandardMatrix::Bt2020 - } - dav1d::pixel::MatrixCoefficients::BT2020ConstantLuminance => { - // This matrix significantly differs from others because linearize values is required - // to compute Y instead of Y'. - // Actually it is almost everywhere is not implemented. - return Err(ImageError::Unsupported( - UnsupportedError::from_format_and_kind( - ImageFormat::Avif.into(), - UnsupportedErrorKind::GenericFeature( - "BT2020ConstantLuminance matrix is not supported".to_string(), - ), - ), - )); - } - dav1d::pixel::MatrixCoefficients::ST2085 => { - return Err(ImageError::Unsupported( - UnsupportedError::from_format_and_kind( - ImageFormat::Avif.into(), - UnsupportedErrorKind::GenericFeature( - "ST2085 matrix is not supported".to_string(), - ), - ), - )); - } - dav1d::pixel::MatrixCoefficients::ChromaticityDerivedConstantLuminance - | dav1d::pixel::MatrixCoefficients::ChromaticityDerivedNonConstantLuminance => { - return Err(ImageError::Unsupported( - UnsupportedError::from_format_and_kind( - ImageFormat::Avif.into(), - UnsupportedErrorKind::GenericFeature( - "Chromaticity Derived Luminance matrix is not supported".to_string(), - ), - ), - )); - } - dav1d::pixel::MatrixCoefficients::ICtCp => { - return Err(ImageError::Unsupported( - UnsupportedError::from_format_and_kind( - ImageFormat::Avif.into(), - UnsupportedErrorKind::GenericFeature( - "ICtCp Derived Luminance matrix is not supported".to_string(), - ), - ), - )); - } - }; + let color_matrix = get_matrix(self.picture.matrix_coefficients())?; + // Identity matrix should be possible only on 444 if is_identity && self.picture.pixel_layout() != PixelLayout::I444 { return Err(ImageError::Unsupported( UnsupportedError::from_format_and_kind( @@ -216,6 +213,7 @@ impl ImageDecoder for AvifDecoder { let ref_y = self.picture.plane(PlanarImageComponent::Y); let ref_u = self.picture.plane(PlanarImageComponent::U); let ref_v = self.picture.plane(PlanarImageComponent::V); + let image = YuvPlanarImage::new( ref_y.as_ref(), self.picture.stride(PlanarImageComponent::Y) as usize, @@ -321,7 +319,7 @@ impl ImageDecoder for AvifDecoder { // dav1d may return not aligned and not correctly constrained data, // or at least I can't find guarantees on that // so if it is happened, instead casting we'll need to reshape it into a target slice - // required criteria: bytemuck allows this data align, and stride must be dividable by 2 + // required criteria: bytemuck allows this align of this data, and stride must be dividable by 2 let mut y_plane_stride = self.picture.stride(PlanarImageComponent::Y) >> 1; diff --git a/src/codecs/avif/yuv.rs b/src/codecs/avif/yuv.rs index e584eb361c..9d20b5f130 100644 --- a/src/codecs/avif/yuv.rs +++ b/src/codecs/avif/yuv.rs @@ -1,6 +1,7 @@ use num_traits::AsPrimitive; #[derive(Debug, Copy, Clone)] +/// Representation of inversion matrix struct CbCrInverseTransform { pub y_coef: T, pub cr_coef: T, @@ -215,8 +216,6 @@ impl<'a, T> YuvGrayImage<'a, T> { /// /// * `image`: see [YuvGrayImage] /// * `rgba`: RGBA image layout -/// * `width`: Image width -/// * `height`: Image height /// * `range`: see [YuvIntensityRange] /// * `matrix`: see [YuvStandardMatrix] /// From d305c0d49d80874e532285d39afc279a82347c32 Mon Sep 17 00:00:00 2001 From: Radzivon Bartoshyk Date: Tue, 29 Oct 2024 17:51:34 +0000 Subject: [PATCH 08/21] Some clarification --- src/codecs/avif/decoder.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/codecs/avif/decoder.rs b/src/codecs/avif/decoder.rs index b43696472e..138431d829 100644 --- a/src/codecs/avif/decoder.rs +++ b/src/codecs/avif/decoder.rs @@ -129,6 +129,8 @@ fn get_matrix( // This matrix significantly differs from others because linearize values is required // to compute Y instead of Y'. // Actually it is almost everywhere is not implemented. + // Libavif + libheif missing this also so actually AVIF images + // with CL BT.2020 might be made only by mistake Err(ImageError::Unsupported( UnsupportedError::from_format_and_kind( ImageFormat::Avif.into(), From fecff101d75ae0c85eb250d747fc7ae5120140e8 Mon Sep 17 00:00:00 2001 From: Radzivon Bartoshyk Date: Tue, 29 Oct 2024 17:55:41 +0000 Subject: [PATCH 09/21] Some clarification --- src/codecs/avif/decoder.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/codecs/avif/decoder.rs b/src/codecs/avif/decoder.rs index 138431d829..bafdb99a05 100644 --- a/src/codecs/avif/decoder.rs +++ b/src/codecs/avif/decoder.rs @@ -188,6 +188,16 @@ impl ImageDecoder for AvifDecoder { let (width, height) = self.dimensions(); + // This is suspicious if this happens, better fail early + if width == 0 || height == 0 { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::GenericFeature("Invalid image dimensions".to_string()), + ), + )); + } + let yuv_range = match self.picture.color_range() { dav1d::pixel::YUVRange::Limited => YuvIntensityRange::Tv, dav1d::pixel::YUVRange::Full => YuvIntensityRange::Pc, From 102675418faca740adb07deef00335ad20e24123 Mon Sep 17 00:00:00 2001 From: Radzivon Bartoshyk Date: Wed, 30 Oct 2024 17:15:42 +0000 Subject: [PATCH 10/21] Refactor, performed some cleaning, input check improvements, changed methods visibility --- src/codecs/avif/decoder.rs | 620 +++++++++++--------- src/codecs/avif/yuv.rs | 1123 ++++++++++++++++++++++++------------ 2 files changed, 1078 insertions(+), 665 deletions(-) diff --git a/src/codecs/avif/decoder.rs b/src/codecs/avif/decoder.rs index bafdb99a05..1e23dcebfa 100644 --- a/src/codecs/avif/decoder.rs +++ b/src/codecs/avif/decoder.rs @@ -4,10 +4,14 @@ /// /// [AVIF]: https://aomediacodec.github.io/av1-avif/ use std::error::Error; +use std::fmt::{Display, Formatter}; use std::io::Read; use std::marker::PhantomData; -use crate::error::{DecodingError, ImageFormatHint, UnsupportedError, UnsupportedErrorKind}; +use crate::error::{ + DecodingError, ImageFormatHint, LimitError, LimitErrorKind, UnsupportedError, + UnsupportedErrorKind, +}; use crate::{ColorType, ImageDecoder, ImageError, ImageFormat, ImageResult}; use crate::codecs::avif::yuv::*; @@ -28,6 +32,43 @@ pub struct AvifDecoder { icc_profile: Option>, } +#[derive(Debug, Clone, PartialEq, Eq)] +enum AvifDecoderError { + AlphaPlaneFormat(PixelLayout), + MemoryLayout, + YuvLayoutOnIdentityMatrix(PixelLayout), +} + +impl Display for AvifDecoderError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + AvifDecoderError::AlphaPlaneFormat(pixel_layout) => match pixel_layout { + PixelLayout::I400 => unreachable!("This option must be handled correctly"), + PixelLayout::I420 => f.write_str("Alpha layout must be 4:0:0 but it was 4:2:0"), + PixelLayout::I422 => f.write_str("Alpha layout must be 4:0:0 but it was 4:2:2"), + PixelLayout::I444 => f.write_str("Alpha layout must be 4:0:0 but it was 4:4:4"), + }, + AvifDecoderError::MemoryLayout => { + f.write_str("Unexpected data size for current RGBx layout") + } + AvifDecoderError::YuvLayoutOnIdentityMatrix(pixel_layout) => match pixel_layout { + PixelLayout::I400 => { + f.write_str("YUV layout on 'Identity' matrix must be 4:4:4 but it was 4:0:0") + } + PixelLayout::I420 => { + f.write_str("YUV layout on 'Identity' matrix must be 4:4:4 but it was 4:2:0") + } + PixelLayout::I422 => { + f.write_str("YUV layout on 'Identity' matrix must be 4:4:4 but it was 4:2:2") + } + PixelLayout::I444 => unreachable!("This option must be handled correctly"), + }, + } + } +} + +impl Error for AvifDecoderError {} + impl AvifDecoder { /// Create a new decoder that reads its input from `r`. pub fn new(mut r: R) -> ImageResult { @@ -90,6 +131,100 @@ fn reshape_plane(source: &[u8], stride: usize, width: usize, height: usize) -> V target_plane } +struct Plane16View<'a> { + data: std::borrow::Cow<'a, [u16]>, + stride: usize, +} + +/// This is correct to transmute FFI data for Y plane and Alpha plane +fn transmute_y_plane16( + plane: &dav1d::Plane, + stride: usize, + width: usize, + height: usize, +) -> Plane16View { + let mut y_plane_stride = stride >> 1; + + let mut bind_y = vec![]; + let plane_ref = plane.as_ref(); + + let mut shape_y_plane = || { + y_plane_stride = width; + bind_y = reshape_plane(plane_ref, stride, width, height); + }; + + if stride & 1 == 0 { + match bytemuck::try_cast_slice(plane_ref) { + Ok(slice) => Plane16View { + data: std::borrow::Cow::Borrowed(slice), + stride: y_plane_stride, + }, + Err(_) => { + shape_y_plane(); + Plane16View { + data: std::borrow::Cow::Owned(bind_y), + stride: y_plane_stride, + } + } + } + } else { + shape_y_plane(); + Plane16View { + data: std::borrow::Cow::Owned(bind_y), + stride: y_plane_stride, + } + } +} + +/// This is correct to transmute FFI data for Y plane and Alpha plane +fn transmute_chroma_plane16( + plane: &dav1d::Plane, + pixel_layout: PixelLayout, + stride: usize, + width: usize, + height: usize, +) -> Plane16View { + let plane_ref = plane.as_ref(); + let mut chroma_plane_stride = stride >> 1; + let mut bind_chroma = vec![]; + + let mut shape_chroma_plane = || { + chroma_plane_stride = match pixel_layout { + PixelLayout::I400 => unreachable!(), + PixelLayout::I420 | PixelLayout::I422 => (width + 1) / 2, + PixelLayout::I444 => width, + }; + let u_plane_height = match pixel_layout { + PixelLayout::I400 => unreachable!(), + PixelLayout::I420 => (height + 1) / 2, + PixelLayout::I422 | PixelLayout::I444 => height, + }; + bind_chroma = reshape_plane(plane_ref, stride, chroma_plane_stride, u_plane_height); + }; + + if stride & 1 == 0 { + match bytemuck::try_cast_slice(plane_ref) { + Ok(slice) => Plane16View { + data: std::borrow::Cow::Borrowed(slice), + stride: chroma_plane_stride, + }, + Err(_) => { + shape_chroma_plane(); + Plane16View { + data: std::borrow::Cow::Owned(bind_chroma), + stride: chroma_plane_stride, + } + } + } + } else { + shape_chroma_plane(); + Plane16View { + data: std::borrow::Cow::Owned(bind_chroma), + stride: chroma_plane_stride, + } + } +} + /// Getting one of prebuilt matrix of fails fn get_matrix( david_matrix: dav1d::pixel::MatrixCoefficients, @@ -166,6 +301,60 @@ fn get_matrix( } } +fn check_target_rgba_dimension_preconditions( + width: usize, + height: usize, +) -> Result<(), ImageError> { + // This is suspicious if this happens, better fail early + if width == 0 || height == 0 { + return Err(ImageError::Limits(LimitError::from_kind( + LimitErrorKind::DimensionError, + ))); + } + // Image dimensions must not exceed pointer size + let (v_stride, ow) = width.overflowing_mul(4); + if ow { + return Err(ImageError::Limits(LimitError::from_kind( + LimitErrorKind::InsufficientMemory, + ))); + } + let (_, ow) = v_stride.overflowing_mul(height); + if ow { + return Err(ImageError::Limits(LimitError::from_kind( + LimitErrorKind::InsufficientMemory, + ))); + } + Ok(()) +} + +fn check_plane_dimension_preconditions( + width: usize, + height: usize, + target_width: usize, + target_height: usize, +) -> Result<(), ImageError> { + // This is suspicious if this happens, better fail early + if width == 0 || height == 0 { + return Err(ImageError::Limits(LimitError::from_kind( + LimitErrorKind::DimensionError, + ))); + } + // Plane dimensions must not exceed pointer size + let (_, ow) = width.overflowing_mul(height); + if ow { + return Err(ImageError::Limits(LimitError::from_kind( + LimitErrorKind::InsufficientMemory, + ))); + } + // This should never happen that plane size differs from target size + if target_width != width || target_height != height { + return Err(ImageError::Limits(LimitError::from_kind( + LimitErrorKind::DimensionError, + ))); + } + Ok(()) +} + impl ImageDecoder for AvifDecoder { fn dimensions(&self) -> (u32, u32) { (self.picture.width(), self.picture.height()) @@ -186,17 +375,14 @@ impl ImageDecoder for AvifDecoder { fn read_image(self, buf: &mut [u8]) -> ImageResult<()> { assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes())); - let (width, height) = self.dimensions(); + let bit_depth = self.picture.bit_depth(); - // This is suspicious if this happens, better fail early - if width == 0 || height == 0 { - return Err(ImageError::Unsupported( - UnsupportedError::from_format_and_kind( - ImageFormat::Avif.into(), - UnsupportedErrorKind::GenericFeature("Invalid image dimensions".to_string()), - ), - )); - } + // Normally this should never happen, + // if this happens then there is an incorrect implementation somewhere else + assert!(bit_depth == 8 || bit_depth == 10 || bit_depth == 12); + + let (width, height) = self.dimensions(); + check_target_rgba_dimension_preconditions(width as usize, height as usize)?; let yuv_range = match self.picture.color_range() { dav1d::pixel::YUVRange::Limited => YuvIntensityRange::Tv, @@ -208,101 +394,75 @@ impl ImageDecoder for AvifDecoder { let color_matrix = get_matrix(self.picture.matrix_coefficients())?; - // Identity matrix should be possible only on 444 + // Identity matrix should be possible only on 4:4:4 if is_identity && self.picture.pixel_layout() != PixelLayout::I444 { - return Err(ImageError::Unsupported( - UnsupportedError::from_format_and_kind( - ImageFormat::Avif.into(), - UnsupportedErrorKind::GenericFeature( - "Impossible YUV layout for Identity matrix".to_string(), - ), - ), - )); + return Err(ImageError::Decoding(DecodingError::new( + ImageFormat::Avif.into(), + AvifDecoderError::YuvLayoutOnIdentityMatrix(self.picture.pixel_layout()), + ))); } - if self.picture.bit_depth() == 8 { + if bit_depth == 8 { if self.picture.pixel_layout() != PixelLayout::I400 { let ref_y = self.picture.plane(PlanarImageComponent::Y); let ref_u = self.picture.plane(PlanarImageComponent::U); let ref_v = self.picture.plane(PlanarImageComponent::V); - let image = YuvPlanarImage::new( - ref_y.as_ref(), - self.picture.stride(PlanarImageComponent::Y) as usize, - ref_u.as_ref(), - self.picture.stride(PlanarImageComponent::U) as usize, - ref_v.as_ref(), - self.picture.stride(PlanarImageComponent::V) as usize, - width as usize, - height as usize, - ); + let image = YuvPlanarImage { + y_plane: ref_y.as_ref(), + y_stride: self.picture.stride(PlanarImageComponent::Y) as usize, + u_plane: ref_u.as_ref(), + u_stride: self.picture.stride(PlanarImageComponent::U) as usize, + v_plane: ref_v.as_ref(), + v_stride: self.picture.stride(PlanarImageComponent::V) as usize, + width: width as usize, + height: height as usize, + }; if !is_identity { let worker = match self.picture.pixel_layout() { PixelLayout::I400 => unreachable!(), - PixelLayout::I420 => yuv420_to_rgba, - PixelLayout::I422 => yuv422_to_rgba, - PixelLayout::I444 => yuv444_to_rgba, + PixelLayout::I420 => yuv420_to_rgba8, + PixelLayout::I422 => yuv422_to_rgba8, + PixelLayout::I444 => yuv444_to_rgba8, }; - let res = worker(image, buf, 8, yuv_range, color_matrix); - - if let Err(err) = res { - return Err(ImageError::Unsupported( - UnsupportedError::from_format_and_kind( - ImageFormat::Avif.into(), - UnsupportedErrorKind::GenericFeature(err), - ), - )); - } + worker(image, buf, yuv_range, color_matrix)?; } else { - let res = gbr_to_rgba(image, buf, 8); - - if let Err(err) = res { - return Err(ImageError::Unsupported( - UnsupportedError::from_format_and_kind( - ImageFormat::Avif.into(), - UnsupportedErrorKind::GenericFeature(err), - ), - )); - } + gbr_to_rgba8(image, buf)?; } } else { let plane = self.picture.plane(PlanarImageComponent::Y); - let gray_image = YuvGrayImage::new( - plane.as_ref(), - self.picture.stride(PlanarImageComponent::Y) as usize, - width as usize, - height as usize, - ); + let gray_image = YuvGrayImage { + y_plane: plane.as_ref(), + y_stride: self.picture.stride(PlanarImageComponent::Y) as usize, + width: width as usize, + height: height as usize, + }; - let cr = yuv400_to_rgba(gray_image, buf, 8, yuv_range, color_matrix); - if let Err(err) = cr { - return Err(ImageError::Unsupported( - UnsupportedError::from_format_and_kind( - ImageFormat::Avif.into(), - UnsupportedErrorKind::GenericFeature(err), - ), - )); - } + yuv400_to_rgba8(gray_image, buf, yuv_range, color_matrix)?; } + // Squashing alpha plane into a picture if let Some(picture) = self.alpha_picture { if picture.pixel_layout() != PixelLayout::I400 { - return Err(ImageError::Unsupported( - UnsupportedError::from_format_and_kind( - ImageFormat::Avif.into(), - UnsupportedErrorKind::GenericFeature(format!( - "Alpha must be PixelLayout::I400 but was: {:?}", - picture.pixel_layout() // PixelLayout does not implement display - )), - ), - )); + return Err(ImageError::Decoding(DecodingError::new( + ImageFormat::Avif.into(), + AvifDecoderError::AlphaPlaneFormat(picture.pixel_layout()), + ))); } + + check_plane_dimension_preconditions( + picture.width() as usize, + picture.height() as usize, + width as usize, + height as usize, + )?; + let stride = picture.stride(PlanarImageComponent::Y) as usize; let plane = picture.plane(PlanarImageComponent::Y); - let width = picture.width(); + for (buf, slice) in Iterator::zip( buf.chunks_exact_mut(width as usize * 4), plane.as_ref().chunks_exact(stride), @@ -317,14 +477,10 @@ impl ImageDecoder for AvifDecoder { let rgba16_buf: &mut [u16] = match bytemuck::try_cast_slice_mut(buf) { Ok(slice) => slice, Err(_) => { - return Err(ImageError::Unsupported( - UnsupportedError::from_format_and_kind( - ImageFormat::Avif.into(), - UnsupportedErrorKind::GenericFeature( - "Incorrectly determined image type".to_string(), - ), - ), - )); + return Err(ImageError::Decoding(DecodingError::new( + ImageFormat::Avif.into(), + AvifDecoderError::MemoryLayout, + ))); } }; @@ -333,227 +489,122 @@ impl ImageDecoder for AvifDecoder { // so if it is happened, instead casting we'll need to reshape it into a target slice // required criteria: bytemuck allows this align of this data, and stride must be dividable by 2 - let mut y_plane_stride = self.picture.stride(PlanarImageComponent::Y) >> 1; + let y_dav1d_plane = self.picture.plane(PlanarImageComponent::Y); - let ref_y = self.picture.plane(PlanarImageComponent::Y); - let mut _bind_y = vec![]; + let y_plane_view = transmute_y_plane16( + &y_dav1d_plane, + self.picture.stride(PlanarImageComponent::Y) as usize, + width as usize, + height as usize, + ); + + if self.picture.pixel_layout() != PixelLayout::I400 { + let u_dav1d_plane = self.picture.plane(PlanarImageComponent::U); + let v_dav1d_plane = self.picture.plane(PlanarImageComponent::V); - let mut shape_y_plane = || { - y_plane_stride = width; - _bind_y = reshape_plane( - ref_y.as_ref(), - self.picture.stride(PlanarImageComponent::Y) as usize, + let u_plane_view = transmute_chroma_plane16( + &u_dav1d_plane, + self.picture.pixel_layout(), + self.picture.stride(PlanarImageComponent::U) as usize, width as usize, height as usize, ); - }; - - let y_plane: &[u16] = if self.picture.stride(PlanarImageComponent::Y) as usize & 1 == 0 - { - match bytemuck::try_cast_slice(ref_y.as_ref()) { - Ok(slice) => slice, - Err(_) => { - shape_y_plane(); - _bind_y.as_slice() - } - } - } else { - shape_y_plane(); - _bind_y.as_slice() - }; - - if self.picture.pixel_layout() != PixelLayout::I400 { - let mut u_plane_stride = self.picture.stride(PlanarImageComponent::U) >> 1; - - let ref_u = self.picture.plane(PlanarImageComponent::U); - let mut _bind_u = vec![]; - let ref_v = self.picture.plane(PlanarImageComponent::V); - let mut _bind_v = vec![]; - - let mut shape_u_plane = || { - u_plane_stride = match self.picture.pixel_layout() { - PixelLayout::I400 => unreachable!(), - PixelLayout::I420 | PixelLayout::I422 => (width + 1) / 2, - PixelLayout::I444 => width, - }; - let u_plane_height = match self.picture.pixel_layout() { - PixelLayout::I400 => unreachable!(), - PixelLayout::I420 => (height + 1) / 2, - PixelLayout::I422 | PixelLayout::I444 => height, - }; - _bind_u = reshape_plane( - ref_u.as_ref(), - self.picture.stride(PlanarImageComponent::U) as usize, - u_plane_stride as usize, - u_plane_height as usize, - ); - }; - - let u_plane: &[u16] = - if self.picture.stride(PlanarImageComponent::U) as usize & 1 == 0 { - match bytemuck::try_cast_slice(ref_u.as_ref()) { - Ok(slice) => slice, - Err(_) => { - shape_u_plane(); - _bind_u.as_slice() - } - } - } else { - shape_u_plane(); - _bind_u.as_slice() - }; - - let mut v_plane_stride = self.picture.stride(PlanarImageComponent::V) >> 1; - - let mut shape_v_plane = || { - v_plane_stride = match self.picture.pixel_layout() { - PixelLayout::I400 => unreachable!(), - PixelLayout::I420 | PixelLayout::I422 => (width + 1) / 2, - PixelLayout::I444 => width, - }; - let v_plane_height = match self.picture.pixel_layout() { - PixelLayout::I400 => unreachable!(), - PixelLayout::I420 => (height + 1) / 2, - PixelLayout::I422 | PixelLayout::I444 => height, - }; - _bind_v = reshape_plane( - ref_v.as_ref(), - self.picture.stride(PlanarImageComponent::V) as usize, - v_plane_stride as usize, - v_plane_height as usize, - ); - }; - - let v_plane: &[u16] = - if self.picture.stride(PlanarImageComponent::V) as usize & 1 == 0 { - match bytemuck::try_cast_slice(ref_v.as_ref()) { - Ok(slice) => slice, - Err(_) => { - shape_v_plane(); - _bind_v.as_slice() - } - } - } else { - shape_v_plane(); - _bind_v.as_slice() - }; - - let image = YuvPlanarImage::new( - y_plane, - y_plane_stride as usize, - u_plane, - u_plane_stride as usize, - v_plane, - v_plane_stride as usize, + let v_plane_view = transmute_chroma_plane16( + &v_dav1d_plane, + self.picture.pixel_layout(), + self.picture.stride(PlanarImageComponent::V) as usize, width as usize, height as usize, ); + let image = YuvPlanarImage { + y_plane: y_plane_view.data.as_ref(), + y_stride: y_plane_view.stride, + u_plane: u_plane_view.data.as_ref(), + u_stride: u_plane_view.stride, + v_plane: v_plane_view.data.as_ref(), + v_stride: v_plane_view.stride, + width: width as usize, + height: height as usize, + }; + if !is_identity { let worker = match self.picture.pixel_layout() { PixelLayout::I400 => unreachable!(), - PixelLayout::I420 => yuv420_to_rgba, - PixelLayout::I422 => yuv422_to_rgba, - PixelLayout::I444 => yuv444_to_rgba, + PixelLayout::I420 => { + if bit_depth == 10 { + yuv420_to_rgba10 + } else { + yuv420_to_rgba12 + } + } + PixelLayout::I422 => { + if bit_depth == 10 { + yuv422_to_rgba10 + } else { + yuv422_to_rgba12 + } + } + PixelLayout::I444 => { + if bit_depth == 10 { + yuv444_to_rgba10 + } else { + yuv444_to_rgba12 + } + } }; - let res = worker( - image, - rgba16_buf, - self.picture.bit_depth() as u32, - yuv_range, - color_matrix, - ); - - if let Err(err) = res { - return Err(ImageError::Unsupported( - UnsupportedError::from_format_and_kind( - ImageFormat::Avif.into(), - UnsupportedErrorKind::GenericFeature(err), - ), - )); - } + worker(image, rgba16_buf, yuv_range, color_matrix)?; } else { - let res = gbr_to_rgba(image, rgba16_buf, self.picture.bit_depth() as u32); - - if let Err(err) = res { - return Err(ImageError::Unsupported( - UnsupportedError::from_format_and_kind( - ImageFormat::Avif.into(), - UnsupportedErrorKind::GenericFeature(err), - ), - )); - } + let worker = if bit_depth == 10 { + gbr_to_rgba10 + } else { + gbr_to_rgba12 + }; + worker(image, rgba16_buf)?; } } else { - let gray_image = YuvGrayImage::new( - y_plane, - y_plane_stride as usize, - width as usize, - height as usize, - ); - let cr = yuv400_to_rgba( - gray_image, - rgba16_buf, - self.picture.bit_depth() as u32, - yuv_range, - color_matrix, - ); - if let Err(err) = cr { - return Err(ImageError::Unsupported( - UnsupportedError::from_format_and_kind( - ImageFormat::Avif.into(), - UnsupportedErrorKind::GenericFeature(err), - ), - )); - } + let gray_image = YuvGrayImage { + y_plane: y_plane_view.data.as_ref(), + y_stride: y_plane_view.stride, + width: width as usize, + height: height as usize, + }; + let worker = if bit_depth == 10 { + yuv400_to_rgba10 + } else { + yuv400_to_rgba12 + }; + worker(gray_image, rgba16_buf, yuv_range, color_matrix)?; } // Squashing alpha plane into a picture if let Some(picture) = self.alpha_picture { if picture.pixel_layout() != PixelLayout::I400 { - return Err(ImageError::Unsupported( - UnsupportedError::from_format_and_kind( - ImageFormat::Avif.into(), - UnsupportedErrorKind::GenericFeature(format!( - "Alpha must be PixelLayout::I400 but was: {:?}", - picture.pixel_layout() // PixelLayout does not implement display - )), - ), - )); + return Err(ImageError::Decoding(DecodingError::new( + ImageFormat::Avif.into(), + AvifDecoderError::AlphaPlaneFormat(picture.pixel_layout()), + ))); } - let ref_a = picture.plane(PlanarImageComponent::Y); - let mut _bind_a = vec![]; - - let mut a_plane_stride = picture.stride(PlanarImageComponent::Y) >> 1; - - let mut shape_a_plane = || { - a_plane_stride = width; - _bind_a = reshape_plane( - ref_a.as_ref(), - picture.stride(PlanarImageComponent::Y) as usize, - width as usize, - height as usize, - ); - }; - let a_plane: &[u16] = if picture.stride(PlanarImageComponent::Y) as usize & 1 == 0 { - match bytemuck::try_cast_slice(ref_y.as_ref()) { - Ok(slice) => slice, - Err(_) => { - shape_a_plane(); - _bind_a.as_slice() - } - } - } else { - shape_a_plane(); - _bind_a.as_slice() - }; + check_plane_dimension_preconditions( + picture.width() as usize, + picture.height() as usize, + width as usize, + height as usize, + )?; + + let a_dav1d_plane = picture.plane(PlanarImageComponent::Y); + let a_plane_view = transmute_y_plane16( + &a_dav1d_plane, + picture.stride(PlanarImageComponent::Y) as usize, + width as usize, + height as usize, + ); - let width = picture.width(); for (buf, slice) in Iterator::zip( rgba16_buf.chunks_exact_mut(width as usize * 4), - a_plane.chunks_exact(a_plane_stride as usize), + a_plane_view.data.as_ref().chunks_exact(a_plane_view.stride), ) { for (rgba, a_src) in buf.chunks_exact_mut(4).zip(slice) { rgba[3] = *a_src; @@ -564,11 +615,8 @@ impl ImageDecoder for AvifDecoder { // Expand current bit depth to target 16 let target_expand_bits = 16u32.saturating_sub(self.picture.bit_depth() as u32); if target_expand_bits > 0 { - for rgba in rgba16_buf.chunks_exact_mut(4) { - rgba[0] <<= target_expand_bits; - rgba[1] <<= target_expand_bits; - rgba[2] <<= target_expand_bits; - rgba[3] <<= target_expand_bits; + for item in rgba16_buf.iter_mut() { + *item <<= target_expand_bits; } } } diff --git a/src/codecs/avif/yuv.rs b/src/codecs/avif/yuv.rs index 9d20b5f130..4f1f386c8b 100644 --- a/src/codecs/avif/yuv.rs +++ b/src/codecs/avif/yuv.rs @@ -1,4 +1,7 @@ +use crate::error::DecodingError; +use crate::{ImageError, ImageFormat}; use num_traits::AsPrimitive; +use std::fmt::{Display, Formatter}; #[derive(Debug, Copy, Clone)] /// Representation of inversion matrix @@ -10,24 +13,6 @@ struct CbCrInverseTransform { pub g_coeff_2: T, } -impl CbCrInverseTransform { - fn new( - y_coef: T, - cr_coef: T, - cb_coef: T, - g_coeff_1: T, - g_coeff_2: T, - ) -> CbCrInverseTransform { - CbCrInverseTransform { - y_coef, - cr_coef, - cb_coef, - g_coeff_1, - g_coeff_2, - } - } -} - impl CbCrInverseTransform { fn to_integers(self, precision: u32) -> CbCrInverseTransform { let precision_scale: i32 = 1i32 << (precision as i32); @@ -46,7 +31,97 @@ impl CbCrInverseTransform { } } -/// Transformation RGB to YUV with coefficients as specified in [ITU-R](https://www.itu.int/rec/T-REC-H.273/en) +#[derive(Copy, Clone, Debug)] +struct ErrorSize { + expected: usize, + received: usize, +} + +#[derive(Copy, Clone, Debug)] +enum PlaneDefinition { + Y, + U, + V, +} + +impl Display for PlaneDefinition { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + match self { + PlaneDefinition::Y => f.write_str("Luma"), + PlaneDefinition::U => f.write_str("U chroma"), + PlaneDefinition::V => f.write_str("V chroma"), + } + } +} + +#[derive(Debug, Clone, Copy)] +enum YuvConversionError { + YuvPlaneSizeMismatch(PlaneDefinition, ErrorSize), + RgbDestinationSizeMismatch(ErrorSize), +} + +impl Display for YuvConversionError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + YuvConversionError::YuvPlaneSizeMismatch(plane, error_size) => { + f.write_fmt(format_args!( + "For plane {} expected size is {} but was received {}", + plane, error_size.received, error_size.expected, + )) + } + YuvConversionError::RgbDestinationSizeMismatch(error_size) => { + f.write_fmt(format_args!( + "For RGB destination expected size is {} but was received {}", + error_size.received, error_size.expected, + )) + } + } + } +} + +impl std::error::Error for YuvConversionError {} + +#[inline] +fn check_yuv_plane_preconditions( + plane: &[V], + plane_definition: PlaneDefinition, + stride: usize, + height: usize, +) -> Result<(), ImageError> { + if plane.len() != stride * height { + return Err(ImageError::Decoding(DecodingError::new( + ImageFormat::Avif.into(), + YuvConversionError::YuvPlaneSizeMismatch( + plane_definition, + ErrorSize { + expected: stride * height, + received: plane.len(), + }, + ), + ))); + } + Ok(()) +} + +#[inline] +fn check_rgb_preconditions( + rgb_data: &[V], + stride: usize, + height: usize, +) -> Result<(), ImageError> { + if rgb_data.len() != stride * height { + return Err(ImageError::Decoding(DecodingError::new( + ImageFormat::Avif.into(), + YuvConversionError::RgbDestinationSizeMismatch(ErrorSize { + expected: stride * height, + received: rgb_data.len(), + }), + ))); + } + Ok(()) +} + +/// Transformation YUV to RGB with coefficients as specified in [ITU-R](https://www.itu.int/rec/T-REC-H.273/en) fn get_inverse_transform( range_bgra: u32, range_y: u32, @@ -54,25 +129,27 @@ fn get_inverse_transform( kr: f32, kb: f32, precision: u32, -) -> Result, String> { +) -> CbCrInverseTransform { let range_uv = range_bgra as f32 / range_uv as f32; let y_coef = range_bgra as f32 / range_y as f32; let cr_coeff = (2f32 * (1f32 - kr)) * range_uv; let cb_coeff = (2f32 * (1f32 - kb)) * range_uv; let kg = 1.0f32 - kr - kb; - if kg == 0f32 { - return Err("1.0f - kr - kg must not be 0".parse().unwrap()); - } + assert_ne!(kg, 0., "1.0f - kr - kg must not be 0"); let g_coeff_1 = (2f32 * ((1f32 - kr) * kr / kg)) * range_uv; let g_coeff_2 = (2f32 * ((1f32 - kb) * kb / kg)) * range_uv; - let exact_transform = - CbCrInverseTransform::new(y_coef, cr_coeff, cb_coeff, g_coeff_1, g_coeff_2); - Ok(exact_transform.to_integers(precision)) + let exact_transform = CbCrInverseTransform { + y_coef, + cr_coef: cr_coeff, + cb_coef: cb_coeff, + g_coeff_1, + g_coeff_2, + }; + exact_transform.to_integers(precision) } -#[repr(C)] #[derive(Debug, Copy, Clone, PartialOrd, PartialEq)] -/// Declares YUV range TV (limited) or Full, +/// Declares YUV range TV (limited) or PC (full), /// more info [ITU-R](https://www.itu.int/rec/T-REC-H.273/en) pub(crate) enum YuvIntensityRange { /// Limited range Y ∈ [16 << (depth - 8), 16 << (depth - 8) + 224 << (depth - 8)], @@ -92,26 +169,27 @@ struct YuvChromaRange { pub range: YuvIntensityRange, } -const fn get_yuv_range(depth: u32, range: YuvIntensityRange) -> YuvChromaRange { - match range { - YuvIntensityRange::Tv => YuvChromaRange { - bias_y: 16 << (depth - 8), - bias_uv: 1 << (depth - 1), - range_y: 219 << (depth - 8), - range_uv: 224 << (depth - 8), - range, - }, - YuvIntensityRange::Pc => YuvChromaRange { - bias_y: 0, - bias_uv: 1 << (depth - 1), - range_uv: (1 << depth) - 1, - range_y: (1 << depth) - 1, - range, - }, +impl YuvIntensityRange { + const fn get_yuv_range(self, depth: u32) -> YuvChromaRange { + match self { + YuvIntensityRange::Tv => YuvChromaRange { + bias_y: 16 << (depth - 8), + bias_uv: 1 << (depth - 1), + range_y: 219 << (depth - 8), + range_uv: 224 << (depth - 8), + range: self, + }, + YuvIntensityRange::Pc => YuvChromaRange { + bias_y: 0, + bias_uv: 1 << (depth - 1), + range_uv: (1 << depth) - 1, + range_y: (1 << depth) - 1, + range: self, + }, + } } } -#[repr(C)] #[derive(Debug, Copy, Clone, PartialOrd, PartialEq)] /// Declares standard prebuilt YUV conversion matrices, /// check [ITU-R](https://www.itu.int/rec/T-REC-H.273/en) information for more info @@ -125,92 +203,119 @@ pub(crate) enum YuvStandardMatrix { #[derive(Debug, Copy, Clone, PartialOrd, PartialEq)] struct YuvBias { - pub kr: f32, - pub kb: f32, + kr: f32, + kb: f32, } -const fn get_kr_kb(matrix: YuvStandardMatrix) -> YuvBias { - match matrix { - YuvStandardMatrix::Bt601 => YuvBias { - kr: 0.299f32, - kb: 0.114f32, - }, - YuvStandardMatrix::Bt709 => YuvBias { - kr: 0.2126f32, - kb: 0.0722f32, - }, - YuvStandardMatrix::Bt2020 => YuvBias { - kr: 0.2627f32, - kb: 0.0593f32, - }, - YuvStandardMatrix::Smpte240 => YuvBias { - kr: 0.087f32, - kb: 0.212f32, - }, - YuvStandardMatrix::Bt470_6 => YuvBias { - kr: 0.2220f32, - kb: 0.0713f32, - }, +impl YuvStandardMatrix { + const fn get_kr_kb(self) -> YuvBias { + match self { + YuvStandardMatrix::Bt601 => YuvBias { + kr: 0.299f32, + kb: 0.114f32, + }, + YuvStandardMatrix::Bt709 => YuvBias { + kr: 0.2126f32, + kb: 0.0722f32, + }, + YuvStandardMatrix::Bt2020 => YuvBias { + kr: 0.2627f32, + kb: 0.0593f32, + }, + YuvStandardMatrix::Smpte240 => YuvBias { + kr: 0.087f32, + kb: 0.212f32, + }, + YuvStandardMatrix::Bt470_6 => YuvBias { + kr: 0.2220f32, + kb: 0.0713f32, + }, + } } } pub(crate) struct YuvPlanarImage<'a, T> { - y_plane: &'a [T], - y_stride: usize, - u_plane: &'a [T], - u_stride: usize, - v_plane: &'a [T], - v_stride: usize, - width: usize, - height: usize, + pub(crate) y_plane: &'a [T], + pub(crate) y_stride: usize, + pub(crate) u_plane: &'a [T], + pub(crate) u_stride: usize, + pub(crate) v_plane: &'a [T], + pub(crate) v_stride: usize, + pub(crate) width: usize, + pub(crate) height: usize, } -impl<'a, T> YuvPlanarImage<'a, T> { - #[allow(clippy::too_many_arguments)] - pub(crate) fn new( - y_plane: &'a [T], - y_stride: usize, - u_plane: &'a [T], - u_stride: usize, - v_plane: &'a [T], - v_stride: usize, - width: usize, - height: usize, - ) -> Self { - YuvPlanarImage { - y_plane, - y_stride, - u_plane, - u_stride, - v_plane, - v_stride, - width, - height, - } - } +pub(crate) struct YuvGrayImage<'a, T> { + pub(crate) y_plane: &'a [T], + pub(crate) y_stride: usize, + pub(crate) width: usize, + pub(crate) height: usize, } -pub(crate) struct YuvGrayImage<'a, T> { - y_plane: &'a [T], - y_stride: usize, - width: usize, - height: usize, +/// Converts Yuv 400 planar format 8 bit to Rgba 8 bit +/// +/// # Arguments +/// +/// * `image`: see [YuvGrayImage] +/// * `rgba`: RGBA image layout +/// * `range`: see [YuvIntensityRange] +/// * `matrix`: see [YuvStandardMatrix] +/// +/// +pub(crate) fn yuv400_to_rgba8( + image: YuvGrayImage, + rgba: &mut [u8], + range: YuvIntensityRange, + matrix: YuvStandardMatrix, +) -> Result<(), ImageError> { + yuv400_to_rgbx_impl::(image, rgba, range, matrix) } -impl<'a, T> YuvGrayImage<'a, T> { - pub(crate) fn new(y_plane: &'a [T], y_stride: usize, width: usize, height: usize) -> Self { - YuvGrayImage { - y_plane, - y_stride, - width, - height, - } - } +/// Converts Yuv 400 planar format 10 bit to Rgba 10 bit +/// +/// Stride here is not supported as it can be in passed from FFI. +/// +/// # Arguments +/// +/// * `image`: see [YuvGrayImage] +/// * `rgba`: RGBA image layout +/// * `range`: see [YuvIntensityRange] +/// * `matrix`: see [YuvStandardMatrix] +/// +/// +pub(crate) fn yuv400_to_rgba10( + image: YuvGrayImage, + rgba: &mut [u16], + range: YuvIntensityRange, + matrix: YuvStandardMatrix, +) -> Result<(), ImageError> { + yuv400_to_rgbx_impl::(image, rgba, range, matrix) +} + +/// Converts Yuv 400 planar format 12 bit to Rgba 12 bit +/// +/// Stride here is not supported as it can be in passed from FFI. +/// +/// # Arguments +/// +/// * `image`: see [YuvGrayImage] +/// * `rgba`: RGBA image layout +/// * `range`: see [YuvIntensityRange] +/// * `matrix`: see [YuvStandardMatrix] +/// +/// +pub(crate) fn yuv400_to_rgba12( + image: YuvGrayImage, + rgba: &mut [u16], + range: YuvIntensityRange, + matrix: YuvStandardMatrix, +) -> Result<(), ImageError> { + yuv400_to_rgbx_impl::(image, rgba, range, matrix) } /// Converts Yuv 400 planar format to Rgba /// -/// Stride here is not supports u16 as it can be in passed from FFI. +/// Stride here is not supported as it can be in passed from FFI. /// /// # Arguments /// @@ -220,45 +325,55 @@ impl<'a, T> YuvGrayImage<'a, T> { /// * `matrix`: see [YuvStandardMatrix] /// /// -pub(crate) fn yuv400_to_rgba + 'static>( +#[inline] +fn yuv400_to_rgbx_impl< + V: Copy + AsPrimitive + 'static + Sized, + const CHANNELS: usize, + const BIT_DEPTH: usize, +>( image: YuvGrayImage, rgba: &mut [V], - bit_depth: u32, range: YuvIntensityRange, matrix: YuvStandardMatrix, -) -> Result<(), String> +) -> Result<(), ImageError> where i32: AsPrimitive, { + assert!( + CHANNELS == 3 || CHANNELS == 4, + "YUV 4:0:0 -> RGB is implemented only on 3 and 4 channels" + ); + assert!( + (8..=16).contains(&BIT_DEPTH), + "Invalid bit depth is provided" + ); + assert!( + if BIT_DEPTH > 8 { + size_of::() == 2 + } else { + size_of::() == 1 + }, + "Unsupported bit depth and data type combination" + ); + let y_plane = image.y_plane; let y_stride = image.y_stride; let height = image.height; let width = image.width; - if y_plane.len() != y_stride * height { - return Err(format!( - "Luma plane expected {} bytes, got {}", - y_stride * height, - y_plane.len() - )); - } + check_yuv_plane_preconditions(y_plane, PlaneDefinition::Y, y_stride, height)?; + check_rgb_preconditions(rgba, width * CHANNELS, height)?; - if !(8..=16).contains(&bit_depth) { - return Err(format!( - "Unexpected bit depth value {}, only 8...16 is supported", - bit_depth - )); - } - const CHANNELS: usize = 4; let rgba_stride = width * CHANNELS; - let max_value = (1 << bit_depth) - 1; + let max_value = (1 << BIT_DEPTH) - 1; // If luma plane is in full range it can be just redistributed across the image if range == YuvIntensityRange::Pc { let y_iter = y_plane.chunks_exact(y_stride); let rgb_iter = rgba.chunks_exact_mut(rgba_stride); + // All branches on generic const will be optimized out. for (y_src, rgb) in y_iter.zip(rgb_iter) { let rgb_chunks = rgb.chunks_exact_mut(CHANNELS); @@ -267,39 +382,34 @@ where rgb_dst[0] = r; rgb_dst[1] = r; rgb_dst[2] = r; - rgb_dst[3] = max_value.as_(); + if CHANNELS == 4 { + rgb_dst[3] = max_value.as_(); + } } } return Ok(()); } - let range = get_yuv_range(bit_depth, range); - let kr_kb = get_kr_kb(matrix); + let range = range.get_yuv_range(BIT_DEPTH as u32); + let kr_kb = matrix.get_kr_kb(); const PRECISION: i32 = 11; const ROUNDING: i32 = 1 << (PRECISION - 1); let inverse_transform = get_inverse_transform( - (1 << bit_depth) - 1, + (1 << BIT_DEPTH) - 1, range.range_y, range.range_uv, kr_kb.kr, kr_kb.kb, PRECISION as u32, - )?; + ); let y_coef = inverse_transform.y_coef; let bias_y = range.bias_y as i32; - if rgba.len() != width * height * CHANNELS { - return Err(format!( - "RGB image layout expected {} bytes, got {}", - width * height * CHANNELS, - rgba.len() - )); - } - let y_iter = y_plane.chunks_exact(y_stride); let rgb_iter = rgba.chunks_exact_mut(rgba_stride); + // All branches on generic const will be optimized out. for (y_src, rgb) in y_iter.zip(rgb_iter) { let rgb_chunks = rgb.chunks_exact_mut(CHANNELS); @@ -310,16 +420,37 @@ where rgb_dst[0] = r.as_(); rgb_dst[1] = r.as_(); rgb_dst[2] = r.as_(); - rgb_dst[3] = max_value.as_(); + if CHANNELS == 4 { + rgb_dst[3] = max_value.as_(); + } } } Ok(()) } -/// Converts YUV420 to Rgb +/// Converts YUV420 8 bit-depth to Rgba 8 bit +/// +/// # Arguments +/// +/// * `image`: see [YuvPlanarImage] +/// * `rgb`: RGB image layout +/// * `range`: see [YuvIntensityRange] +/// * `matrix`: see [YuvStandardMatrix] /// -/// Stride here is not supports u16 as it can be in passed from FFI. +/// +pub(crate) fn yuv420_to_rgba8( + image: YuvPlanarImage, + rgb: &mut [u8], + range: YuvIntensityRange, + matrix: YuvStandardMatrix, +) -> Result<(), ImageError> { + yuv420_to_rgbx::(image, rgb, range, matrix) +} + +/// Converts YUV420 10 bit-depth to Rgba 10 bit-depth +/// +/// Stride here is not supported as it can be in passed from FFI. /// /// # Arguments /// @@ -329,16 +460,78 @@ where /// * `matrix`: see [YuvStandardMatrix] /// /// -pub(crate) fn yuv420_to_rgba + 'static>( +pub(crate) fn yuv420_to_rgba10( + image: YuvPlanarImage, + rgb: &mut [u16], + range: YuvIntensityRange, + matrix: YuvStandardMatrix, +) -> Result<(), ImageError> { + yuv420_to_rgbx::(image, rgb, range, matrix) +} + +/// Converts YUV420 12 bit-depth to Rgba 12 bit-depth +/// +/// Stride here is not supported as it can be in passed from FFI. +/// +/// # Arguments +/// +/// * `image`: see [YuvPlanarImage] +/// * `rgb`: RGB image layout +/// * `range`: see [YuvIntensityRange] +/// * `matrix`: see [YuvStandardMatrix] +/// +/// +pub(crate) fn yuv420_to_rgba12( + image: YuvPlanarImage, + rgb: &mut [u16], + range: YuvIntensityRange, + matrix: YuvStandardMatrix, +) -> Result<(), ImageError> { + yuv420_to_rgbx::(image, rgb, range, matrix) +} + +/// Converts YUV420 to Rgba +/// +/// Stride here is not supported as it can be in passed from FFI. +/// +/// # Arguments +/// +/// * `image`: see [YuvPlanarImage] +/// * `rgb`: RGB image layout +/// * `range`: see [YuvIntensityRange] +/// * `matrix`: see [YuvStandardMatrix] +/// +/// +#[inline] +fn yuv420_to_rgbx< + V: Copy + AsPrimitive + 'static + Sized, + const CHANNELS: usize, + const BIT_DEPTH: usize, +>( image: YuvPlanarImage, rgb: &mut [V], - bit_depth: u32, range: YuvIntensityRange, matrix: YuvStandardMatrix, -) -> Result<(), String> +) -> Result<(), ImageError> where i32: AsPrimitive, { + assert!( + CHANNELS == 3 || CHANNELS == 4, + "YUV 4:2:0 -> RGB is implemented only on 3 and 4 channels" + ); + assert!( + (8..=16).contains(&BIT_DEPTH), + "Invalid bit depth is provided" + ); + assert!( + if BIT_DEPTH > 8 { + size_of::() == 2 + } else { + size_of::() == 1 + }, + "Unsupported bit depth and data type combination" + ); let y_plane = image.y_plane; let u_plane = image.u_plane; let v_plane = image.v_plane; @@ -347,52 +540,27 @@ where let v_stride = image.v_stride; let chroma_height = (image.height + 1) / 2; - if y_plane.len() != y_stride * image.height { - return Err(format!( - "Luma plane expected {} bytes, got {}", - y_stride * image.height, - y_plane.len() - )); - } - - if u_plane.len() != u_stride * chroma_height { - return Err(format!( - "U plane expected {} bytes, got {}", - u_stride * chroma_height, - u_plane.len() - )); - } + check_yuv_plane_preconditions(y_plane, PlaneDefinition::Y, y_stride, image.height)?; + check_yuv_plane_preconditions(u_plane, PlaneDefinition::U, u_stride, chroma_height)?; + check_yuv_plane_preconditions(v_plane, PlaneDefinition::V, v_stride, chroma_height)?; - if v_plane.len() != v_stride * chroma_height { - return Err(format!( - "V plane expected {} bytes, got {}", - v_stride * chroma_height, - v_plane.len() - )); - } + check_rgb_preconditions(rgb, image.width * CHANNELS, image.height)?; - if !(8..=16).contains(&bit_depth) { - return Err(format!( - "Unexpected bit depth value {}, only 8...16 is supported", - bit_depth - )); - } - - let max_value = (1 << bit_depth) - 1; + let max_value = (1 << BIT_DEPTH) - 1; const PRECISION: i32 = 11; const ROUNDING: i32 = 1 << (PRECISION - 1); - let range = get_yuv_range(bit_depth, range); - let kr_kb = get_kr_kb(matrix); + let range = range.get_yuv_range(BIT_DEPTH as u32); + let kr_kb = matrix.get_kr_kb(); let inverse_transform = get_inverse_transform( - (1 << bit_depth) - 1, + (1 << BIT_DEPTH) - 1, range.range_y, range.range_uv, kr_kb.kr, kr_kb.kb, PRECISION as u32, - )?; + ); let cr_coef = inverse_transform.cr_coef; let cb_coef = inverse_transform.cb_coef; let y_coef = inverse_transform.y_coef; @@ -402,16 +570,6 @@ where let bias_y = range.bias_y as i32; let bias_uv = range.bias_uv as i32; - const CHANNELS: usize = 4; - - if rgb.len() != image.width * image.height * CHANNELS { - return Err(format!( - "RGB image layout expected {} bytes, got {}", - image.width * image.height * CHANNELS, - rgb.len() - )); - } - let rgb_stride = image.width * CHANNELS; let y_iter = y_plane.chunks_exact(y_stride * 2); @@ -445,6 +603,7 @@ where If image have odd height then luma channel is exact, and we're replicating last chroma rows. */ + // All branches on generic const will be optimized out. for (((y_src, u_src), v_src), rgb) in y_iter.zip(u_iter).zip(v_iter).zip(rgb_iter) { // Since we're processing two rows in one loop we need to re-slice once more let y_iter = y_src.chunks_exact(y_stride); @@ -466,10 +625,18 @@ where >> PRECISION) .clamp(0, max_value); - rgb_dst[0] = r.as_(); - rgb_dst[1] = g.as_(); - rgb_dst[2] = b.as_(); - rgb_dst[3] = max_value.as_(); + if CHANNELS == 4 { + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + rgb_dst[3] = max_value.as_(); + } else if CHANNELS == 3 { + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + } else { + unreachable!(); + } let y_value = (y_src[1].as_() - bias_y) * y_coef; @@ -481,10 +648,18 @@ where >> PRECISION) .clamp(0, max_value); - rgb_dst[4] = r.as_(); - rgb_dst[5] = g.as_(); - rgb_dst[6] = b.as_(); - rgb_dst[7] = max_value.as_(); + if CHANNELS == 4 { + rgb_dst[4] = r.as_(); + rgb_dst[5] = g.as_(); + rgb_dst[6] = b.as_(); + rgb_dst[7] = max_value.as_(); + } else if CHANNELS == 3 { + rgb_dst[3] = r.as_(); + rgb_dst[4] = g.as_(); + rgb_dst[5] = b.as_(); + } else { + unreachable!(); + } } // Process remainder if width is odd. @@ -512,10 +687,18 @@ where >> PRECISION) .clamp(0, max_value); - rgb_dst[0] = r.as_(); - rgb_dst[1] = g.as_(); - rgb_dst[2] = b.as_(); - rgb_dst[3] = max_value.as_(); + if CHANNELS == 4 { + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + rgb_dst[3] = max_value.as_(); + } else if CHANNELS == 3 { + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + } else { + unreachable!(); + } } } } @@ -544,10 +727,18 @@ where let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) >> PRECISION) .clamp(0, max_value); - rgb_dst[0] = r.as_(); - rgb_dst[1] = g.as_(); - rgb_dst[2] = b.as_(); - rgb_dst[3] = max_value.as_(); + if CHANNELS == 4 { + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + rgb_dst[3] = max_value.as_(); + } else if CHANNELS == 3 { + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + } else { + unreachable!(); + } let y_value = (y_src[1].as_() - bias_y) * y_coef; @@ -556,10 +747,18 @@ where let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) >> PRECISION) .clamp(0, max_value); - rgb_dst[4] = r.as_(); - rgb_dst[5] = g.as_(); - rgb_dst[6] = b.as_(); - rgb_dst[7] = max_value.as_(); + if CHANNELS == 4 { + rgb_dst[4] = r.as_(); + rgb_dst[5] = g.as_(); + rgb_dst[6] = b.as_(); + rgb_dst[7] = max_value.as_(); + } else if CHANNELS == 3 { + rgb_dst[3] = r.as_(); + rgb_dst[4] = g.as_(); + rgb_dst[5] = b.as_(); + } else { + unreachable!(); + } } // Process remainder if width is odd. @@ -588,10 +787,18 @@ where >> PRECISION) .clamp(0, max_value); - rgb_dst[0] = r.as_(); - rgb_dst[1] = g.as_(); - rgb_dst[2] = b.as_(); - rgb_dst[3] = max_value.as_(); + if CHANNELS == 4 { + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + rgb_dst[3] = max_value.as_(); + } else if CHANNELS == 3 { + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + } else { + unreachable!(); + } } } } @@ -599,6 +806,67 @@ where Ok(()) } +/// Converts Yuv 422 8-bit planar format to Rgba 8-bit +/// +/// # Arguments +/// +/// * `image`: see [YuvPlanarImage] +/// * `rgb`: RGB image layout +/// * `range`: see [YuvIntensityRange] +/// * `matrix`: see [YuvStandardMatrix] +/// +/// +pub(crate) fn yuv422_to_rgba8( + image: YuvPlanarImage, + rgb: &mut [u8], + range: YuvIntensityRange, + matrix: YuvStandardMatrix, +) -> Result<(), ImageError> { + yuv422_to_rgbx_impl::(image, rgb, range, matrix) +} + +/// Converts Yuv 422 10-bit planar format to Rgba 10-bit +/// +/// Stride here is not supported as it can be in passed from FFI. +/// +/// # Arguments +/// +/// * `image`: see [YuvPlanarImage] +/// * `rgb`: RGB image layout +/// * `range`: see [YuvIntensityRange] +/// * `matrix`: see [YuvStandardMatrix] +/// +/// +pub(crate) fn yuv422_to_rgba10( + image: YuvPlanarImage, + rgb: &mut [u16], + range: YuvIntensityRange, + matrix: YuvStandardMatrix, +) -> Result<(), ImageError> { + yuv422_to_rgbx_impl::(image, rgb, range, matrix) +} + +/// Converts Yuv 422 12-bit planar format to Rgba 12-bit +/// +/// Stride here is not supported as it can be in passed from FFI. +/// +/// # Arguments +/// +/// * `image`: see [YuvPlanarImage] +/// * `rgb`: RGB image layout +/// * `range`: see [YuvIntensityRange] +/// * `matrix`: see [YuvStandardMatrix] +/// +/// +pub(crate) fn yuv422_to_rgba12( + image: YuvPlanarImage, + rgb: &mut [u16], + range: YuvIntensityRange, + matrix: YuvStandardMatrix, +) -> Result<(), ImageError> { + yuv422_to_rgbx_impl::(image, rgb, range, matrix) +} + /// Converts Yuv 422 planar format to Rgba /// /// Stride here is not supports u16 as it can be in passed from FFI. @@ -611,70 +879,63 @@ where /// * `matrix`: see [YuvStandardMatrix] /// /// -pub(crate) fn yuv422_to_rgba + 'static>( +fn yuv422_to_rgbx_impl< + V: Copy + AsPrimitive + 'static + Sized, + const CHANNELS: usize, + const BIT_DEPTH: usize, +>( image: YuvPlanarImage, rgb: &mut [V], - bit_depth: u32, range: YuvIntensityRange, matrix: YuvStandardMatrix, -) -> Result<(), String> +) -> Result<(), ImageError> where i32: AsPrimitive, { + assert!( + CHANNELS == 3 || CHANNELS == 4, + "YUV 4:2:2 -> RGB is implemented only on 3 and 4 channels" + ); + assert!( + (8..=16).contains(&BIT_DEPTH), + "Invalid bit depth is provided" + ); + assert!( + if BIT_DEPTH > 8 { + size_of::() == 2 + } else { + size_of::() == 1 + }, + "Unsupported bit depth and data type combination" + ); let y_plane = image.y_plane; let u_plane = image.u_plane; let v_plane = image.v_plane; let y_stride = image.y_stride; let u_stride = image.u_stride; let v_stride = image.v_stride; - let height = image.height; let width = image.width; - if y_plane.len() != y_stride * height { - return Err(format!( - "Luma plane expected {} bytes, got {}", - y_stride * height, - y_plane.len() - )); - } + check_yuv_plane_preconditions(y_plane, PlaneDefinition::Y, y_stride, image.height)?; + check_yuv_plane_preconditions(u_plane, PlaneDefinition::U, u_stride, image.height)?; + check_yuv_plane_preconditions(v_plane, PlaneDefinition::V, v_stride, image.height)?; - if u_plane.len() != u_stride * height { - return Err(format!( - "U plane expected {} bytes, got {}", - u_stride * height, - u_plane.len() - )); - } + check_rgb_preconditions(rgb, image.width * CHANNELS, image.height)?; - if v_plane.len() != v_stride * height { - return Err(format!( - "V plane expected {} bytes, got {}", - v_stride * height, - v_plane.len() - )); - } + let max_value = (1 << BIT_DEPTH) - 1; - if !(8..=16).contains(&bit_depth) { - return Err(format!( - "Unexpected bit depth value {}, only 8...16 is supported", - bit_depth - )); - } - - let max_value = (1 << bit_depth) - 1; - - let range = get_yuv_range(bit_depth, range); - let kr_kb = get_kr_kb(matrix); + let range = range.get_yuv_range(BIT_DEPTH as u32); + let kr_kb = matrix.get_kr_kb(); const PRECISION: i32 = 11; const ROUNDING: i32 = 1 << (PRECISION - 1); let inverse_transform = get_inverse_transform( - (1 << bit_depth) - 1, + (1 << BIT_DEPTH) - 1, range.range_y, range.range_uv, kr_kb.kr, kr_kb.kb, PRECISION as u32, - )?; + ); let cr_coef = inverse_transform.cr_coef; let cb_coef = inverse_transform.cb_coef; let y_coef = inverse_transform.y_coef; @@ -684,16 +945,6 @@ where let bias_y = range.bias_y as i32; let bias_uv = range.bias_uv as i32; - const CHANNELS: usize = 4; - - if rgb.len() != width * height * CHANNELS { - return Err(format!( - "RGB image layout expected {} bytes, got {}", - width * height * CHANNELS, - rgb.len() - )); - } - /* Sample 4x4 YUV422 planar image start_y + 0: Y00 Y01 Y02 Y03 @@ -726,6 +977,7 @@ where let u_iter = u_plane.chunks_exact(u_stride); let v_iter = v_plane.chunks_exact(v_stride); + // All branches on generic const will be optimized out. for (((y_src, u_src), v_src), rgb) in y_iter.zip(u_iter).zip(v_iter).zip(rgb_iter) { let y_iter = y_src.chunks_exact(2); let rgb_chunks = rgb.chunks_exact_mut(CHANNELS * 2); @@ -740,10 +992,18 @@ where let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) >> PRECISION) .clamp(0, max_value); - rgb_dst[0] = r.as_(); - rgb_dst[1] = g.as_(); - rgb_dst[2] = b.as_(); - rgb_dst[3] = max_value.as_(); + if CHANNELS == 4 { + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + rgb_dst[3] = max_value.as_(); + } else if CHANNELS == 3 { + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + } else { + unreachable!(); + } let y_value = (y_src[1].as_() - bias_y) * y_coef; @@ -752,10 +1012,18 @@ where let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) >> PRECISION) .clamp(0, max_value); - rgb_dst[4] = r.as_(); - rgb_dst[5] = g.as_(); - rgb_dst[6] = b.as_(); - rgb_dst[7] = max_value.as_(); + if CHANNELS == 4 { + rgb_dst[4] = r.as_(); + rgb_dst[5] = g.as_(); + rgb_dst[6] = b.as_(); + rgb_dst[7] = max_value.as_(); + } else if CHANNELS == 3 { + rgb_dst[3] = r.as_(); + rgb_dst[4] = g.as_(); + rgb_dst[5] = b.as_(); + } else { + unreachable!(); + } } // Process left pixels for odd images, this should work since luma must be always exact @@ -783,10 +1051,18 @@ where >> PRECISION) .clamp(0, max_value); - rgb_dst[0] = r.as_(); - rgb_dst[1] = g.as_(); - rgb_dst[2] = b.as_(); - rgb_dst[3] = max_value.as_(); + if CHANNELS == 4 { + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + rgb_dst[3] = max_value.as_(); + } else if CHANNELS == 3 { + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + } else { + unreachable!(); + } } } } @@ -794,6 +1070,67 @@ where Ok(()) } +/// Converts Yuv 444 planar format 8 bit-depth to Rgba 8 bit +/// +/// # Arguments +/// +/// * `image`: see [YuvPlanarImage] +/// * `rgba`: RGB image layout +/// * `range`: see [YuvIntensityRange] +/// * `matrix`: see [YuvStandardMatrix] +/// +/// +pub(super) fn yuv444_to_rgba8( + image: YuvPlanarImage, + rgba: &mut [u8], + range: YuvIntensityRange, + matrix: YuvStandardMatrix, +) -> Result<(), ImageError> { + yuv444_to_rgbx_impl::(image, rgba, range, matrix) +} + +/// Converts Yuv 444 planar format 10 bit-depth to Rgba 10 bit +/// +/// Stride here is not supports u16 as it can be in passed from FFI. +/// +/// # Arguments +/// +/// * `image`: see [YuvPlanarImage] +/// * `rgba`: RGB image layout +/// * `range`: see [YuvIntensityRange] +/// * `matrix`: see [YuvStandardMatrix] +/// +/// +pub(super) fn yuv444_to_rgba10( + image: YuvPlanarImage, + rgba: &mut [u16], + range: YuvIntensityRange, + matrix: YuvStandardMatrix, +) -> Result<(), ImageError> { + yuv444_to_rgbx_impl::(image, rgba, range, matrix) +} + +/// Converts Yuv 444 planar format 12 bit-depth to Rgba 12 bit +/// +/// Stride here is not supports u16 as it can be in passed from FFI. +/// +/// # Arguments +/// +/// * `image`: see [YuvPlanarImage] +/// * `rgba`: RGB image layout +/// * `range`: see [YuvIntensityRange] +/// * `matrix`: see [YuvStandardMatrix] +/// +/// +pub(super) fn yuv444_to_rgba12( + image: YuvPlanarImage, + rgba: &mut [u16], + range: YuvIntensityRange, + matrix: YuvStandardMatrix, +) -> Result<(), ImageError> { + yuv444_to_rgbx_impl::(image, rgba, range, matrix) +} + /// Converts Yuv 444 planar format to Rgba /// /// Stride here is not supports u16 as it can be in passed from FFI. @@ -801,21 +1138,42 @@ where /// # Arguments /// /// * `image`: see [YuvPlanarImage] -/// * `rgb`: RGB image layout +/// * `rgba`: RGB image layout /// * `range`: see [YuvIntensityRange] /// * `matrix`: see [YuvStandardMatrix] /// /// -pub(crate) fn yuv444_to_rgba + 'static>( +#[inline] +fn yuv444_to_rgbx_impl< + V: Copy + AsPrimitive + 'static + Sized, + const CHANNELS: usize, + const BIT_DEPTH: usize, +>( image: YuvPlanarImage, - rgb: &mut [V], - bit_depth: u32, + rgba: &mut [V], range: YuvIntensityRange, matrix: YuvStandardMatrix, -) -> Result<(), String> +) -> Result<(), ImageError> where i32: AsPrimitive, { + assert!( + CHANNELS == 3 || CHANNELS == 4, + "YUV 4:4:4 -> RGB is implemented only on 3 and 4 channels" + ); + assert!( + (8..=16).contains(&BIT_DEPTH), + "Invalid bit depth is provided" + ); + assert!( + if BIT_DEPTH > 8 { + size_of::() == 2 + } else { + size_of::() == 1 + }, + "Unsupported bit depth and data type combination" + ); + let y_plane = image.y_plane; let u_plane = image.u_plane; let v_plane = image.v_plane; @@ -825,49 +1183,24 @@ where let height = image.height; let width = image.width; - if y_plane.len() != y_stride * height { - return Err(format!( - "Luma plane expected {} bytes, got {}", - y_stride * height, - y_plane.len() - )); - } - - if u_plane.len() != u_stride * height { - return Err(format!( - "U plane expected {} bytes, got {}", - u_stride * height, - u_plane.len() - )); - } + check_yuv_plane_preconditions(y_plane, PlaneDefinition::Y, y_stride, height)?; + check_yuv_plane_preconditions(u_plane, PlaneDefinition::U, u_stride, height)?; + check_yuv_plane_preconditions(v_plane, PlaneDefinition::V, v_stride, height)?; - if v_plane.len() != v_stride * height { - return Err(format!( - "V plane expected {} bytes, got {}", - v_stride * height, - v_plane.len() - )); - } - - if !(8..=16).contains(&bit_depth) { - return Err(format!( - "Unexpected bit depth value {}, only 8...16 is supported", - bit_depth - )); - } + check_rgb_preconditions(rgba, image.width * CHANNELS, height)?; - let range = get_yuv_range(bit_depth, range); - let kr_kb = get_kr_kb(matrix); + let range = range.get_yuv_range(BIT_DEPTH as u32); + let kr_kb = matrix.get_kr_kb(); const PRECISION: i32 = 11; const ROUNDING: i32 = 1 << (PRECISION - 1); let inverse_transform = get_inverse_transform( - (1 << bit_depth) - 1, + (1 << BIT_DEPTH) - 1, range.range_y, range.range_uv, kr_kb.kr, kr_kb.kb, PRECISION as u32, - )?; + ); let cr_coef = inverse_transform.cr_coef; let cb_coef = inverse_transform.cb_coef; let y_coef = inverse_transform.y_coef; @@ -877,25 +1210,16 @@ where let bias_y = range.bias_y as i32; let bias_uv = range.bias_uv as i32; - const CHANNELS: usize = 4; - - if rgb.len() != width * height * CHANNELS { - return Err(format!( - "RGB image layout expected {} bytes, got {}", - width * height * CHANNELS, - rgb.len() - )); - } - - let max_value = (1 << bit_depth) - 1; + let max_value = (1 << BIT_DEPTH) - 1; let rgb_stride = width * CHANNELS; let y_iter = y_plane.chunks_exact(y_stride); - let rgb_iter = rgb.chunks_exact_mut(rgb_stride); + let rgb_iter = rgba.chunks_exact_mut(rgb_stride); let u_iter = u_plane.chunks_exact(u_stride); let v_iter = v_plane.chunks_exact(v_stride); + // All branches on generic const will be optimized out. for (((y_src, u_src), v_src), rgb) in y_iter.zip(u_iter).zip(v_iter).zip(rgb_iter) { let rgb_chunks = rgb.chunks_exact_mut(CHANNELS); @@ -910,19 +1234,73 @@ where let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) >> PRECISION) .clamp(0, max_value); - rgb_dst[0] = r.as_(); - rgb_dst[1] = g.as_(); - rgb_dst[2] = b.as_(); - rgb_dst[3] = max_value.as_(); + if CHANNELS == 4 { + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + rgb_dst[3] = max_value.as_(); + } else if CHANNELS == 3 { + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + } else { + unreachable!(); + } } } Ok(()) } +/// Converts Gbr 8 bit planar format to Rgba 8 bit-depth +/// +/// # Arguments +/// +/// * `image`: see [YuvPlanarImage] +/// * `rgb`: RGB image layout +/// * `range`: see [YuvIntensityRange] +/// * `matrix`: see [YuvStandardMatrix] +/// +/// +pub(crate) fn gbr_to_rgba8(image: YuvPlanarImage, rgb: &mut [u8]) -> Result<(), ImageError> { + gbr_to_rgbx_impl::(image, rgb) +} + +/// Converts Gbr 10 bit planar format to Rgba 10 bit-depth +/// +/// Stride here is not supported as it can be in passed from FFI. +/// +/// # Arguments +/// +/// * `image`: see [YuvPlanarImage] +/// * `rgb`: RGB image layout +/// * `range`: see [YuvIntensityRange] +/// * `matrix`: see [YuvStandardMatrix] +/// +/// +pub(crate) fn gbr_to_rgba10(image: YuvPlanarImage, rgb: &mut [u16]) -> Result<(), ImageError> { + gbr_to_rgbx_impl::(image, rgb) +} + +/// Converts Gbr 12 bit planar format to Rgba 12 bit-depth +/// +/// Stride here is not supported as it can be in passed from FFI. +/// +/// # Arguments +/// +/// * `image`: see [YuvPlanarImage] +/// * `rgb`: RGB image layout +/// * `range`: see [YuvIntensityRange] +/// * `matrix`: see [YuvStandardMatrix] +/// +/// +pub(crate) fn gbr_to_rgba12(image: YuvPlanarImage, rgb: &mut [u16]) -> Result<(), ImageError> { + gbr_to_rgbx_impl::(image, rgb) +} + /// Converts Gbr planar format to Rgba /// -/// Stride here is not supports u16 as it can be in passed from FFI. +/// Stride here is not supported as it can be in passed from FFI. /// /// # Arguments /// @@ -932,14 +1310,34 @@ where /// * `matrix`: see [YuvStandardMatrix] /// /// -pub(crate) fn gbr_to_rgba + 'static>( +#[inline] +fn gbr_to_rgbx_impl< + V: Copy + AsPrimitive + 'static + Sized, + const CHANNELS: usize, + const BIT_DEPTH: usize, +>( image: YuvPlanarImage, rgb: &mut [V], - bit_depth: u32, -) -> Result<(), String> +) -> Result<(), ImageError> where i32: AsPrimitive, { + assert!( + CHANNELS == 3 || CHANNELS == 4, + "GBR -> RGB is implemented only on 3 and 4 channels" + ); + assert!( + (8..=16).contains(&BIT_DEPTH), + "Invalid bit depth is provided" + ); + assert!( + if BIT_DEPTH > 8 { + size_of::() == 2 + } else { + size_of::() == 1 + }, + "Unsupported bit depth and data type combination" + ); let y_plane = image.y_plane; let u_plane = image.u_plane; let v_plane = image.v_plane; @@ -949,48 +1347,13 @@ where let height = image.height; let width = image.width; - if y_plane.len() != y_stride * height { - return Err(format!( - "Luma plane expected {} bytes, got {}", - y_stride * height, - y_plane.len() - )); - } - - if u_plane.len() != u_stride * height { - return Err(format!( - "U plane expected {} bytes, got {}", - u_stride * height, - u_plane.len() - )); - } - - if v_plane.len() != v_stride * height { - return Err(format!( - "V plane expected {} bytes, got {}", - v_stride * height, - v_plane.len() - )); - } - - if !(8..=16).contains(&bit_depth) { - return Err(format!( - "Unexpected bit depth value {}, only 8...16 is supported", - bit_depth - )); - } - - const CHANNELS: usize = 4; + check_yuv_plane_preconditions(y_plane, PlaneDefinition::Y, y_stride, height)?; + check_yuv_plane_preconditions(u_plane, PlaneDefinition::U, u_stride, height)?; + check_yuv_plane_preconditions(v_plane, PlaneDefinition::V, v_stride, height)?; - if rgb.len() != width * height * CHANNELS { - return Err(format!( - "RGB image layout expected {} bytes, got {}", - width * height * CHANNELS, - rgb.len() - )); - } + check_rgb_preconditions(rgb, width * CHANNELS, height)?; - let max_value = (1 << bit_depth) - 1; + let max_value = (1 << BIT_DEPTH) - 1; let rgb_stride = width * CHANNELS; @@ -1008,7 +1371,9 @@ where rgb_dst[0] = v_src; rgb_dst[1] = y_src; rgb_dst[2] = u_src; - rgb_dst[3] = max_value.as_(); + if CHANNELS == 4 { + rgb_dst[3] = max_value.as_(); + } } } From 069209382e0a6c5bb97b92407fbedfdec8359281 Mon Sep 17 00:00:00 2001 From: Radzivon Bartoshyk Date: Thu, 31 Oct 2024 13:06:28 +0000 Subject: [PATCH 11/21] Changed unspecified to bt.601 --- src/codecs/avif/decoder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/codecs/avif/decoder.rs b/src/codecs/avif/decoder.rs index 1e23dcebfa..2b3d921196 100644 --- a/src/codecs/avif/decoder.rs +++ b/src/codecs/avif/decoder.rs @@ -237,7 +237,7 @@ fn get_matrix( // and some applications prefer Bt.601 as default. // For ex. `Chrome` always prefer Bt.709 even for SD content // However, nowadays standard should be Bt.709 for HD+ size otherwise Bt.601 - dav1d::pixel::MatrixCoefficients::Unspecified => Ok(YuvStandardMatrix::Bt709), + dav1d::pixel::MatrixCoefficients::Unspecified => Ok(YuvStandardMatrix::Bt601), dav1d::pixel::MatrixCoefficients::Reserved => Err(ImageError::Unsupported( UnsupportedError::from_format_and_kind( ImageFormat::Avif.into(), From fd73671ccd6ab0bd181f952dc1bbec185dcaf859 Mon Sep 17 00:00:00 2001 From: Radzivon Bartoshyk Date: Fri, 1 Nov 2024 22:49:35 +0000 Subject: [PATCH 12/21] Refactoring and readability improvements --- src/codecs/avif/decoder.rs | 409 +++++++++++++++---------------------- src/codecs/avif/yuv.rs | 43 ++-- 2 files changed, 192 insertions(+), 260 deletions(-) diff --git a/src/codecs/avif/decoder.rs b/src/codecs/avif/decoder.rs index 2b3d921196..a02cf995e5 100644 --- a/src/codecs/avif/decoder.rs +++ b/src/codecs/avif/decoder.rs @@ -1,4 +1,9 @@ //! Decoding of AVIF images. +use crate::error::{ + DecodingError, ImageFormatHint, LimitError, LimitErrorKind, UnsupportedError, + UnsupportedErrorKind, +}; +use crate::{ColorType, ImageDecoder, ImageError, ImageFormat, ImageResult}; /// /// The [AVIF] specification defines an image derivative of the AV1 bitstream, an open video codec. /// @@ -8,12 +13,6 @@ use std::fmt::{Display, Formatter}; use std::io::Read; use std::marker::PhantomData; -use crate::error::{ - DecodingError, ImageFormatHint, LimitError, LimitErrorKind, UnsupportedError, - UnsupportedErrorKind, -}; -use crate::{ColorType, ImageDecoder, ImageError, ImageFormat, ImageResult}; - use crate::codecs::avif::yuv::*; use dav1d::{PixelLayout, PlanarImageComponent}; use mp4parse::{read_avif, ParseStrictness}; @@ -35,7 +34,6 @@ pub struct AvifDecoder { #[derive(Debug, Clone, PartialEq, Eq)] enum AvifDecoderError { AlphaPlaneFormat(PixelLayout), - MemoryLayout, YuvLayoutOnIdentityMatrix(PixelLayout), } @@ -48,9 +46,6 @@ impl Display for AvifDecoderError { PixelLayout::I422 => f.write_str("Alpha layout must be 4:0:0 but it was 4:2:2"), PixelLayout::I444 => f.write_str("Alpha layout must be 4:0:0 but it was 4:4:4"), }, - AvifDecoderError::MemoryLayout => { - f.write_str("Unexpected data size for current RGBx layout") - } AvifDecoderError::YuvLayoutOnIdentityMatrix(pixel_layout) => match pixel_layout { PixelLayout::I400 => { f.write_str("YUV layout on 'Identity' matrix must be 4:4:4 but it was 4:0:0") @@ -125,7 +120,7 @@ fn reshape_plane(source: &[u8], stride: usize, width: usize, height: usize) -> V .zip(source.chunks_exact(stride)) { for (dst, src) in shaped_row.iter_mut().zip(src_row.chunks_exact(2)) { - *dst = u16::from_le_bytes([src[0], src[1]]); + *dst = u16::from_ne_bytes([src[0], src[1]]); } } target_plane @@ -136,6 +131,15 @@ struct Plane16View<'a> { stride: usize, } +impl Default for Plane16View<'_> { + fn default() -> Self { + Plane16View { + data: std::borrow::Cow::Owned(vec![]), + stride: 0, + } + } +} + /// This is correct to transmute FFI data for Y plane and Alpha plane fn transmute_y_plane16( plane: &dav1d::Plane, @@ -230,8 +234,7 @@ fn get_matrix( david_matrix: dav1d::pixel::MatrixCoefficients, ) -> Result { match david_matrix { - // Identity just a stub here, we'll handle it in different way - dav1d::pixel::MatrixCoefficients::Identity => Ok(YuvStandardMatrix::Bt709), + dav1d::pixel::MatrixCoefficients::Identity => Ok(YuvStandardMatrix::Identity), dav1d::pixel::MatrixCoefficients::BT709 => Ok(YuvStandardMatrix::Bt709), // This is arguable, some applications prefer to go with Bt.709 as default, // and some applications prefer Bt.601 as default. @@ -301,60 +304,6 @@ fn get_matrix( } } -fn check_target_rgba_dimension_preconditions( - width: usize, - height: usize, -) -> Result<(), ImageError> { - // This is suspicious if this happens, better fail early - if width == 0 || height == 0 { - return Err(ImageError::Limits(LimitError::from_kind( - LimitErrorKind::DimensionError, - ))); - } - // Image dimensions must not exceed pointer size - let (v_stride, ow) = width.overflowing_mul(4); - if ow { - return Err(ImageError::Limits(LimitError::from_kind( - LimitErrorKind::InsufficientMemory, - ))); - } - let (_, ow) = v_stride.overflowing_mul(height); - if ow { - return Err(ImageError::Limits(LimitError::from_kind( - LimitErrorKind::InsufficientMemory, - ))); - } - Ok(()) -} - -fn check_plane_dimension_preconditions( - width: usize, - height: usize, - target_width: usize, - target_height: usize, -) -> Result<(), ImageError> { - // This is suspicious if this happens, better fail early - if width == 0 || height == 0 { - return Err(ImageError::Limits(LimitError::from_kind( - LimitErrorKind::DimensionError, - ))); - } - // Plane dimensions must not exceed pointer size - let (_, ow) = width.overflowing_mul(height); - if ow { - return Err(ImageError::Limits(LimitError::from_kind( - LimitErrorKind::InsufficientMemory, - ))); - } - // This should never happen that plane size differs from target size - if target_width != width || target_height != height { - return Err(ImageError::Limits(LimitError::from_kind( - LimitErrorKind::DimensionError, - ))); - } - Ok(()) -} - impl ImageDecoder for AvifDecoder { fn dimensions(&self) -> (u32, u32) { (self.picture.width(), self.picture.height()) @@ -382,7 +331,12 @@ impl ImageDecoder for AvifDecoder { assert!(bit_depth == 8 || bit_depth == 10 || bit_depth == 12); let (width, height) = self.dimensions(); - check_target_rgba_dimension_preconditions(width as usize, height as usize)?; + // This is suspicious if this happens, better fail early + if width == 0 || height == 0 { + return Err(ImageError::Limits(LimitError::from_kind( + LimitErrorKind::DimensionError, + ))); + } let yuv_range = match self.picture.color_range() { dav1d::pixel::YUVRange::Limited => YuvIntensityRange::Tv, @@ -403,46 +357,29 @@ impl ImageDecoder for AvifDecoder { } if bit_depth == 8 { - if self.picture.pixel_layout() != PixelLayout::I400 { - let ref_y = self.picture.plane(PlanarImageComponent::Y); - let ref_u = self.picture.plane(PlanarImageComponent::U); - let ref_v = self.picture.plane(PlanarImageComponent::V); - - let image = YuvPlanarImage { - y_plane: ref_y.as_ref(), - y_stride: self.picture.stride(PlanarImageComponent::Y) as usize, - u_plane: ref_u.as_ref(), - u_stride: self.picture.stride(PlanarImageComponent::U) as usize, - v_plane: ref_v.as_ref(), - v_stride: self.picture.stride(PlanarImageComponent::V) as usize, - width: width as usize, - height: height as usize, - }; - - if !is_identity { - let worker = match self.picture.pixel_layout() { - PixelLayout::I400 => unreachable!(), - PixelLayout::I420 => yuv420_to_rgba8, - PixelLayout::I422 => yuv422_to_rgba8, - PixelLayout::I444 => yuv444_to_rgba8, - }; - - worker(image, buf, yuv_range, color_matrix)?; - } else { - gbr_to_rgba8(image, buf)?; - } - } else { - let plane = self.picture.plane(PlanarImageComponent::Y); + let ref_y = self.picture.plane(PlanarImageComponent::Y); + let ref_u = self.picture.plane(PlanarImageComponent::U); + let ref_v = self.picture.plane(PlanarImageComponent::V); + + let image = YuvPlanarImage { + y_plane: ref_y.as_ref(), + y_stride: self.picture.stride(PlanarImageComponent::Y) as usize, + u_plane: ref_u.as_ref(), + u_stride: self.picture.stride(PlanarImageComponent::U) as usize, + v_plane: ref_v.as_ref(), + v_stride: self.picture.stride(PlanarImageComponent::V) as usize, + width: width as usize, + height: height as usize, + }; - let gray_image = YuvGrayImage { - y_plane: plane.as_ref(), - y_stride: self.picture.stride(PlanarImageComponent::Y) as usize, - width: width as usize, - height: height as usize, - }; + let worker = match self.picture.pixel_layout() { + PixelLayout::I400 => yuv400_to_rgba8, + PixelLayout::I420 => yuv420_to_rgba8, + PixelLayout::I422 => yuv422_to_rgba8, + PixelLayout::I444 => yuv444_to_rgba8, + }; - yuv400_to_rgba8(gray_image, buf, yuv_range, color_matrix)?; - } + worker(image, buf, yuv_range, color_matrix)?; // Squashing alpha plane into a picture if let Some(picture) = self.alpha_picture { @@ -453,13 +390,6 @@ impl ImageDecoder for AvifDecoder { ))); } - check_plane_dimension_preconditions( - picture.width() as usize, - picture.height() as usize, - width as usize, - height as usize, - )?; - let stride = picture.stride(PlanarImageComponent::Y) as usize; let plane = picture.plane(PlanarImageComponent::Y); @@ -473,159 +403,154 @@ impl ImageDecoder for AvifDecoder { } } } else { - // 8+ bit-depth case - let rgba16_buf: &mut [u16] = match bytemuck::try_cast_slice_mut(buf) { - Ok(slice) => slice, - Err(_) => { - return Err(ImageError::Decoding(DecodingError::new( - ImageFormat::Avif.into(), - AvifDecoderError::MemoryLayout, - ))); - } - }; + // // 8+ bit-depth case // dav1d may return not aligned and not correctly constrained data, // or at least I can't find guarantees on that // so if it is happened, instead casting we'll need to reshape it into a target slice // required criteria: bytemuck allows this align of this data, and stride must be dividable by 2 - let y_dav1d_plane = self.picture.plane(PlanarImageComponent::Y); + if let Ok(buf) = bytemuck::try_cast_slice_mut(buf) { + let target_slice: &mut [u16] = buf; + self.process_16bit_picture(target_slice, yuv_range, color_matrix)?; + } else { + // If buffer from Decoder is unaligned + let mut aligned_store = vec![0u16; buf.len() / 2]; + self.process_16bit_picture(&mut aligned_store, yuv_range, color_matrix)?; + for (dst, src) in buf.chunks_exact_mut(2).zip(aligned_store.iter()) { + let bytes = src.to_ne_bytes(); + dst[0] = bytes[0]; + dst[1] = bytes[1]; + } + } + } + + Ok(()) + } - let y_plane_view = transmute_y_plane16( - &y_dav1d_plane, - self.picture.stride(PlanarImageComponent::Y) as usize, + fn read_image_boxed(self: Box, buf: &mut [u8]) -> ImageResult<()> { + (*self).read_image(buf) + } +} + +impl AvifDecoder { + fn process_16bit_picture( + &self, + target: &mut [u16], + yuv_range: YuvIntensityRange, + color_matrix: YuvStandardMatrix, + ) -> ImageResult<()> { + let y_dav1d_plane = self.picture.plane(PlanarImageComponent::Y); + + let (width, height) = (self.picture.width(), self.picture.height()); + let bit_depth = self.picture.bit_depth(); + + let y_plane_view = transmute_y_plane16( + &y_dav1d_plane, + self.picture.stride(PlanarImageComponent::Y) as usize, + width as usize, + height as usize, + ); + + let u_dav1d_plane = self.picture.plane(PlanarImageComponent::U); + let v_dav1d_plane = self.picture.plane(PlanarImageComponent::V); + let mut u_plane_view = Plane16View::default(); + let mut v_plane_view = Plane16View::default(); + + if self.picture.pixel_layout() != PixelLayout::I400 { + u_plane_view = transmute_chroma_plane16( + &u_dav1d_plane, + self.picture.pixel_layout(), + self.picture.stride(PlanarImageComponent::U) as usize, width as usize, height as usize, ); + v_plane_view = transmute_chroma_plane16( + &v_dav1d_plane, + self.picture.pixel_layout(), + self.picture.stride(PlanarImageComponent::V) as usize, + width as usize, + height as usize, + ); + } - if self.picture.pixel_layout() != PixelLayout::I400 { - let u_dav1d_plane = self.picture.plane(PlanarImageComponent::U); - let v_dav1d_plane = self.picture.plane(PlanarImageComponent::V); - - let u_plane_view = transmute_chroma_plane16( - &u_dav1d_plane, - self.picture.pixel_layout(), - self.picture.stride(PlanarImageComponent::U) as usize, - width as usize, - height as usize, - ); - let v_plane_view = transmute_chroma_plane16( - &v_dav1d_plane, - self.picture.pixel_layout(), - self.picture.stride(PlanarImageComponent::V) as usize, - width as usize, - height as usize, - ); - - let image = YuvPlanarImage { - y_plane: y_plane_view.data.as_ref(), - y_stride: y_plane_view.stride, - u_plane: u_plane_view.data.as_ref(), - u_stride: u_plane_view.stride, - v_plane: v_plane_view.data.as_ref(), - v_stride: v_plane_view.stride, - width: width as usize, - height: height as usize, - }; - - if !is_identity { - let worker = match self.picture.pixel_layout() { - PixelLayout::I400 => unreachable!(), - PixelLayout::I420 => { - if bit_depth == 10 { - yuv420_to_rgba10 - } else { - yuv420_to_rgba12 - } - } - PixelLayout::I422 => { - if bit_depth == 10 { - yuv422_to_rgba10 - } else { - yuv422_to_rgba12 - } - } - PixelLayout::I444 => { - if bit_depth == 10 { - yuv444_to_rgba10 - } else { - yuv444_to_rgba12 - } - } - }; - - worker(image, rgba16_buf, yuv_range, color_matrix)?; - } else { - let worker = if bit_depth == 10 { - gbr_to_rgba10 - } else { - gbr_to_rgba12 - }; - worker(image, rgba16_buf)?; - } - } else { - let gray_image = YuvGrayImage { - y_plane: y_plane_view.data.as_ref(), - y_stride: y_plane_view.stride, - width: width as usize, - height: height as usize, - }; - let worker = if bit_depth == 10 { + let image = YuvPlanarImage { + y_plane: y_plane_view.data.as_ref(), + y_stride: y_plane_view.stride, + u_plane: u_plane_view.data.as_ref(), + u_stride: u_plane_view.stride, + v_plane: v_plane_view.data.as_ref(), + v_stride: v_plane_view.stride, + width: width as usize, + height: height as usize, + }; + + let worker = match self.picture.pixel_layout() { + PixelLayout::I400 => { + if bit_depth == 10 { yuv400_to_rgba10 } else { yuv400_to_rgba12 - }; - worker(gray_image, rgba16_buf, yuv_range, color_matrix)?; + } } - - // Squashing alpha plane into a picture - if let Some(picture) = self.alpha_picture { - if picture.pixel_layout() != PixelLayout::I400 { - return Err(ImageError::Decoding(DecodingError::new( - ImageFormat::Avif.into(), - AvifDecoderError::AlphaPlaneFormat(picture.pixel_layout()), - ))); + PixelLayout::I420 => { + if bit_depth == 10 { + yuv420_to_rgba10 + } else { + yuv420_to_rgba12 } - - check_plane_dimension_preconditions( - picture.width() as usize, - picture.height() as usize, - width as usize, - height as usize, - )?; - - let a_dav1d_plane = picture.plane(PlanarImageComponent::Y); - let a_plane_view = transmute_y_plane16( - &a_dav1d_plane, - picture.stride(PlanarImageComponent::Y) as usize, - width as usize, - height as usize, - ); - - for (buf, slice) in Iterator::zip( - rgba16_buf.chunks_exact_mut(width as usize * 4), - a_plane_view.data.as_ref().chunks_exact(a_plane_view.stride), - ) { - for (rgba, a_src) in buf.chunks_exact_mut(4).zip(slice) { - rgba[3] = *a_src; - } + } + PixelLayout::I422 => { + if bit_depth == 10 { + yuv422_to_rgba10 + } else { + yuv422_to_rgba12 + } + } + PixelLayout::I444 => { + if bit_depth == 10 { + yuv444_to_rgba10 + } else { + yuv444_to_rgba12 } } + }; + worker(image, target, yuv_range, color_matrix)?; + + // Squashing alpha plane into a picture + if let Some(picture) = &self.alpha_picture { + if picture.pixel_layout() != PixelLayout::I400 { + return Err(ImageError::Decoding(DecodingError::new( + ImageFormat::Avif.into(), + AvifDecoderError::AlphaPlaneFormat(picture.pixel_layout()), + ))); + } + + let a_dav1d_plane = picture.plane(PlanarImageComponent::Y); + let a_plane_view = transmute_y_plane16( + &a_dav1d_plane, + picture.stride(PlanarImageComponent::Y) as usize, + width as usize, + height as usize, + ); - // Expand current bit depth to target 16 - let target_expand_bits = 16u32.saturating_sub(self.picture.bit_depth() as u32); - if target_expand_bits > 0 { - for item in rgba16_buf.iter_mut() { - *item <<= target_expand_bits; + for (buf, slice) in Iterator::zip( + target.chunks_exact_mut(width as usize * 4), + a_plane_view.data.as_ref().chunks_exact(a_plane_view.stride), + ) { + for (rgba, a_src) in buf.chunks_exact_mut(4).zip(slice) { + rgba[3] = *a_src; } } } - Ok(()) - } + // Expand current bit depth to target 16 + let target_expand_bits = 16u32 - self.picture.bit_depth() as u32; + for item in target.iter_mut() { + *item <<= target_expand_bits; + } - fn read_image_boxed(self: Box, buf: &mut [u8]) -> ImageResult<()> { - (*self).read_image(buf) + Ok(()) } } diff --git a/src/codecs/avif/yuv.rs b/src/codecs/avif/yuv.rs index 4f1f386c8b..a41b99a7a3 100644 --- a/src/codecs/avif/yuv.rs +++ b/src/codecs/avif/yuv.rs @@ -199,6 +199,7 @@ pub(crate) enum YuvStandardMatrix { Bt2020, Smpte240, Bt470_6, + Identity, } #[derive(Debug, Copy, Clone, PartialOrd, PartialEq)] @@ -230,6 +231,7 @@ impl YuvStandardMatrix { kr: 0.2220f32, kb: 0.0713f32, }, + YuvStandardMatrix::Identity => unreachable!(), } } } @@ -245,13 +247,6 @@ pub(crate) struct YuvPlanarImage<'a, T> { pub(crate) height: usize, } -pub(crate) struct YuvGrayImage<'a, T> { - pub(crate) y_plane: &'a [T], - pub(crate) y_stride: usize, - pub(crate) width: usize, - pub(crate) height: usize, -} - /// Converts Yuv 400 planar format 8 bit to Rgba 8 bit /// /// # Arguments @@ -263,7 +258,7 @@ pub(crate) struct YuvGrayImage<'a, T> { /// /// pub(crate) fn yuv400_to_rgba8( - image: YuvGrayImage, + image: YuvPlanarImage, rgba: &mut [u8], range: YuvIntensityRange, matrix: YuvStandardMatrix, @@ -284,7 +279,7 @@ pub(crate) fn yuv400_to_rgba8( /// /// pub(crate) fn yuv400_to_rgba10( - image: YuvGrayImage, + image: YuvPlanarImage, rgba: &mut [u16], range: YuvIntensityRange, matrix: YuvStandardMatrix, @@ -305,7 +300,7 @@ pub(crate) fn yuv400_to_rgba10( /// /// pub(crate) fn yuv400_to_rgba12( - image: YuvGrayImage, + image: YuvPlanarImage, rgba: &mut [u16], range: YuvIntensityRange, matrix: YuvStandardMatrix, @@ -331,7 +326,7 @@ fn yuv400_to_rgbx_impl< const CHANNELS: usize, const BIT_DEPTH: usize, >( - image: YuvGrayImage, + image: YuvPlanarImage, rgba: &mut [V], range: YuvIntensityRange, matrix: YuvStandardMatrix, @@ -1080,13 +1075,17 @@ where /// * `matrix`: see [YuvStandardMatrix] /// /// -pub(super) fn yuv444_to_rgba8( +pub(crate) fn yuv444_to_rgba8( image: YuvPlanarImage, rgba: &mut [u8], range: YuvIntensityRange, matrix: YuvStandardMatrix, ) -> Result<(), ImageError> { - yuv444_to_rgbx_impl::(image, rgba, range, matrix) + if matrix == YuvStandardMatrix::Identity { + gbr_to_rgba8(image, rgba) + } else { + yuv444_to_rgbx_impl::(image, rgba, range, matrix) + } } /// Converts Yuv 444 planar format 10 bit-depth to Rgba 10 bit @@ -1107,7 +1106,11 @@ pub(super) fn yuv444_to_rgba10( range: YuvIntensityRange, matrix: YuvStandardMatrix, ) -> Result<(), ImageError> { - yuv444_to_rgbx_impl::(image, rgba, range, matrix) + if matrix == YuvStandardMatrix::Identity { + gbr_to_rgba10(image, rgba) + } else { + yuv444_to_rgbx_impl::(image, rgba, range, matrix) + } } /// Converts Yuv 444 planar format 12 bit-depth to Rgba 12 bit @@ -1128,7 +1131,11 @@ pub(super) fn yuv444_to_rgba12( range: YuvIntensityRange, matrix: YuvStandardMatrix, ) -> Result<(), ImageError> { - yuv444_to_rgbx_impl::(image, rgba, range, matrix) + if matrix == YuvStandardMatrix::Identity { + gbr_to_rgba12(image, rgba) + } else { + yuv444_to_rgbx_impl::(image, rgba, range, matrix) + } } /// Converts Yuv 444 planar format to Rgba @@ -1262,7 +1269,7 @@ where /// * `matrix`: see [YuvStandardMatrix] /// /// -pub(crate) fn gbr_to_rgba8(image: YuvPlanarImage, rgb: &mut [u8]) -> Result<(), ImageError> { +fn gbr_to_rgba8(image: YuvPlanarImage, rgb: &mut [u8]) -> Result<(), ImageError> { gbr_to_rgbx_impl::(image, rgb) } @@ -1278,7 +1285,7 @@ pub(crate) fn gbr_to_rgba8(image: YuvPlanarImage, rgb: &mut [u8]) -> Result< /// * `matrix`: see [YuvStandardMatrix] /// /// -pub(crate) fn gbr_to_rgba10(image: YuvPlanarImage, rgb: &mut [u16]) -> Result<(), ImageError> { +fn gbr_to_rgba10(image: YuvPlanarImage, rgb: &mut [u16]) -> Result<(), ImageError> { gbr_to_rgbx_impl::(image, rgb) } @@ -1294,7 +1301,7 @@ pub(crate) fn gbr_to_rgba10(image: YuvPlanarImage, rgb: &mut [u16]) -> Resu /// * `matrix`: see [YuvStandardMatrix] /// /// -pub(crate) fn gbr_to_rgba12(image: YuvPlanarImage, rgb: &mut [u16]) -> Result<(), ImageError> { +fn gbr_to_rgba12(image: YuvPlanarImage, rgb: &mut [u16]) -> Result<(), ImageError> { gbr_to_rgbx_impl::(image, rgb) } From 696404db2e53e615c956e6a75517a2e1e11415cc Mon Sep 17 00:00:00 2001 From: Radzivon Bartoshyk Date: Fri, 1 Nov 2024 22:52:02 +0000 Subject: [PATCH 13/21] Refactoring and readability improvements --- src/codecs/avif/decoder.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/codecs/avif/decoder.rs b/src/codecs/avif/decoder.rs index a02cf995e5..141f6dbf96 100644 --- a/src/codecs/avif/decoder.rs +++ b/src/codecs/avif/decoder.rs @@ -404,12 +404,6 @@ impl ImageDecoder for AvifDecoder { } } else { // // 8+ bit-depth case - - // dav1d may return not aligned and not correctly constrained data, - // or at least I can't find guarantees on that - // so if it is happened, instead casting we'll need to reshape it into a target slice - // required criteria: bytemuck allows this align of this data, and stride must be dividable by 2 - if let Ok(buf) = bytemuck::try_cast_slice_mut(buf) { let target_slice: &mut [u16] = buf; self.process_16bit_picture(target_slice, yuv_range, color_matrix)?; @@ -445,6 +439,11 @@ impl AvifDecoder { let (width, height) = (self.picture.width(), self.picture.height()); let bit_depth = self.picture.bit_depth(); + // dav1d may return not aligned and not correctly constrained data, + // or at least I can't find guarantees on that + // so if it is happened, instead casting we'll need to reshape it into a target slice + // required criteria: bytemuck allows this align of this data, and stride must be dividable by 2 + let y_plane_view = transmute_y_plane16( &y_dav1d_plane, self.picture.stride(PlanarImageComponent::Y) as usize, From ba517630c8cb414b9c889fb767297106085c27f4 Mon Sep 17 00:00:00 2001 From: Radzivon Bartoshyk Date: Fri, 1 Nov 2024 22:53:16 +0000 Subject: [PATCH 14/21] Rustfmt --- src/codecs/avif/decoder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/codecs/avif/decoder.rs b/src/codecs/avif/decoder.rs index 141f6dbf96..f64acd8478 100644 --- a/src/codecs/avif/decoder.rs +++ b/src/codecs/avif/decoder.rs @@ -443,7 +443,7 @@ impl AvifDecoder { // or at least I can't find guarantees on that // so if it is happened, instead casting we'll need to reshape it into a target slice // required criteria: bytemuck allows this align of this data, and stride must be dividable by 2 - + let y_plane_view = transmute_y_plane16( &y_dav1d_plane, self.picture.stride(PlanarImageComponent::Y) as usize, From a36f5028d9845b7da4d86e0041d04105dd9e18d2 Mon Sep 17 00:00:00 2001 From: Radzivon Bartoshyk Date: Fri, 1 Nov 2024 22:56:26 +0000 Subject: [PATCH 15/21] Removed unnecessary check for identity --- src/codecs/avif/decoder.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/codecs/avif/decoder.rs b/src/codecs/avif/decoder.rs index f64acd8478..910036bcac 100644 --- a/src/codecs/avif/decoder.rs +++ b/src/codecs/avif/decoder.rs @@ -343,13 +343,12 @@ impl ImageDecoder for AvifDecoder { dav1d::pixel::YUVRange::Full => YuvIntensityRange::Pc, }; - let is_identity = - self.picture.matrix_coefficients() == dav1d::pixel::MatrixCoefficients::Identity; - let color_matrix = get_matrix(self.picture.matrix_coefficients())?; // Identity matrix should be possible only on 4:4:4 - if is_identity && self.picture.pixel_layout() != PixelLayout::I444 { + if color_matrix == YuvStandardMatrix::Identity + && self.picture.pixel_layout() != PixelLayout::I444 + { return Err(ImageError::Decoding(DecodingError::new( ImageFormat::Avif.into(), AvifDecoderError::YuvLayoutOnIdentityMatrix(self.picture.pixel_layout()), From b2e7a7342ee52d8e8589eae0a87dc5132057394e Mon Sep 17 00:00:00 2001 From: Radzivon Bartoshyk Date: Fri, 1 Nov 2024 22:58:12 +0000 Subject: [PATCH 16/21] Asserts on `Identity` --- src/codecs/avif/yuv.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/codecs/avif/yuv.rs b/src/codecs/avif/yuv.rs index a41b99a7a3..747cce8381 100644 --- a/src/codecs/avif/yuv.rs +++ b/src/codecs/avif/yuv.rs @@ -350,6 +350,11 @@ where }, "Unsupported bit depth and data type combination" ); + assert_ne!( + matrix, + YuvStandardMatrix::Identity, + "Identity matrix cannot be used on 4:0:0" + ); let y_plane = image.y_plane; let y_stride = image.y_stride; @@ -527,6 +532,11 @@ where }, "Unsupported bit depth and data type combination" ); + assert_ne!( + matrix, + YuvStandardMatrix::Identity, + "Identity matrix cannot be used on 4:2:0" + ); let y_plane = image.y_plane; let u_plane = image.u_plane; let v_plane = image.v_plane; @@ -903,6 +913,11 @@ where }, "Unsupported bit depth and data type combination" ); + assert_ne!( + matrix, + YuvStandardMatrix::Identity, + "Identity matrix cannot be used on 4:2:2" + ); let y_plane = image.y_plane; let u_plane = image.u_plane; let v_plane = image.v_plane; From fa0bf3b78f4aa27ad941b7c8d60c902d4f0a5dec Mon Sep 17 00:00:00 2001 From: Radzivon Bartoshyk Date: Sat, 2 Nov 2024 12:08:15 +0000 Subject: [PATCH 17/21] Update src/codecs/avif/yuv.rs Co-authored-by: Andreas Molzer --- src/codecs/avif/yuv.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/codecs/avif/yuv.rs b/src/codecs/avif/yuv.rs index 747cce8381..ffaecf1be6 100644 --- a/src/codecs/avif/yuv.rs +++ b/src/codecs/avif/yuv.rs @@ -727,9 +727,15 @@ where let cb_value = u_src.as_() - bias_uv; let cr_value = v_src.as_() - bias_uv; - let r = ((y_value + cr_coef * cr_value + ROUNDING) >> PRECISION).clamp(0, max_value); - let b = ((y_value + cb_coef * cb_value + ROUNDING) >> PRECISION).clamp(0, max_value); - let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) >> PRECISION) +#[inline(always)] +fn round(val: i32) -> i32 { + let ROUNDING: i32 = 1 << (PRECISION - 1); + ((val + ROUNDING) >> PRECISION).clamp(0, MAX) +} + + let r = round::(y_value + cr_coef * cr_value); + let b = round::(y_value + cb_coef * cb_value); + let g = round::(y_value - g_coef_1 * cr_value - g_coef_2 * cb_value); .clamp(0, max_value); if CHANNELS == 4 { From 03d14cfa9c344384c4c05fe60aec598a1837d5f2 Mon Sep 17 00:00:00 2001 From: Radzivon Bartoshyk Date: Sat, 2 Nov 2024 13:03:39 +0000 Subject: [PATCH 18/21] Saturating rounding shift right, small refactor in 4:2:2 and 4:2:0 --- src/codecs/avif/yuv.rs | 477 +++++++++++++++-------------------------- 1 file changed, 175 insertions(+), 302 deletions(-) diff --git a/src/codecs/avif/yuv.rs b/src/codecs/avif/yuv.rs index ffaecf1be6..2186e1e6fd 100644 --- a/src/codecs/avif/yuv.rs +++ b/src/codecs/avif/yuv.rs @@ -247,6 +247,14 @@ pub(crate) struct YuvPlanarImage<'a, T> { pub(crate) height: usize, } +#[inline(always)] +/// Saturating rounding shift right against bit depth +fn qrshr(val: i32) -> i32 { + let rounding: i32 = 1 << (PRECISION - 1); + let max_value: i32 = (1 << BIT_DEPTH) - 1; + ((val + rounding) >> PRECISION).clamp(0, max_value) +} + /// Converts Yuv 400 planar format 8 bit to Rgba 8 bit /// /// # Arguments @@ -393,7 +401,7 @@ where let range = range.get_yuv_range(BIT_DEPTH as u32); let kr_kb = matrix.get_kr_kb(); const PRECISION: i32 = 11; - const ROUNDING: i32 = 1 << (PRECISION - 1); + let inverse_transform = get_inverse_transform( (1 << BIT_DEPTH) - 1, range.range_y, @@ -416,7 +424,7 @@ where for (y_src, rgb_dst) in y_src.iter().zip(rgb_chunks) { let y_value = (y_src.as_() - bias_y) * y_coef; - let r = ((y_value + ROUNDING) >> PRECISION).clamp(0, max_value); + let r = qrshr::(y_value); rgb_dst[0] = r.as_(); rgb_dst[1] = r.as_(); rgb_dst[2] = r.as_(); @@ -490,6 +498,114 @@ pub(crate) fn yuv420_to_rgba12( yuv420_to_rgbx::(image, rgb, range, matrix) } +#[inline] +fn process_halved_chroma_row< + V: Copy + AsPrimitive + 'static + Sized, + const PRECISION: i32, + const CHANNELS: usize, + const BIT_DEPTH: usize, +>( + image: YuvPlanarImage, + rgba: &mut [V], + transform: &CbCrInverseTransform, + range: &YuvChromaRange, +) where + i32: AsPrimitive, +{ + let cr_coef = transform.cr_coef; + let cb_coef = transform.cb_coef; + let y_coef = transform.y_coef; + let g_coef_1 = transform.g_coeff_1; + let g_coef_2 = transform.g_coeff_2; + + let max_value = (1 << BIT_DEPTH) - 1; + + let bias_y = range.bias_y as i32; + let bias_uv = range.bias_uv as i32; + let y_iter = image.y_plane.chunks_exact(2); + let rgb_chunks = rgba.chunks_exact_mut(CHANNELS * 2); + for (((y_src, &u_src), &v_src), rgb_dst) in + y_iter.zip(image.u_plane).zip(image.v_plane).zip(rgb_chunks) + { + let y_value: i32 = (y_src[0].as_() - bias_y) * y_coef; + let cb_value: i32 = u_src.as_() - bias_uv; + let cr_value: i32 = v_src.as_() - bias_uv; + + let r = qrshr::(y_value + cr_coef * cr_value); + let b = qrshr::(y_value + cb_coef * cb_value); + let g = qrshr::(y_value - g_coef_1 * cr_value - g_coef_2 * cb_value); + + if CHANNELS == 4 { + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + rgb_dst[3] = max_value.as_(); + } else if CHANNELS == 3 { + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + } else { + unreachable!(); + } + + let y_value = (y_src[1].as_() - bias_y) * y_coef; + + let r = qrshr::(y_value + cr_coef * cr_value); + let b = qrshr::(y_value + cb_coef * cb_value); + let g = qrshr::(y_value - g_coef_1 * cr_value - g_coef_2 * cb_value); + + if CHANNELS == 4 { + rgb_dst[4] = r.as_(); + rgb_dst[5] = g.as_(); + rgb_dst[6] = b.as_(); + rgb_dst[7] = max_value.as_(); + } else if CHANNELS == 3 { + rgb_dst[3] = r.as_(); + rgb_dst[4] = g.as_(); + rgb_dst[5] = b.as_(); + } else { + unreachable!(); + } + } + + // Process remainder if width is odd. + if image.width & 1 != 0 { + let y_left = image.y_plane.chunks_exact(2).remainder(); + let rgb_chunks = rgba + .chunks_exact_mut(CHANNELS * 2) + .into_remainder() + .chunks_exact_mut(CHANNELS); + let u_iter = image.u_plane.iter().rev(); + let v_iter = image.v_plane.iter().rev(); + + for (((y_src, u_src), v_src), rgb_dst) in + y_left.iter().zip(u_iter).zip(v_iter).zip(rgb_chunks) + { + let y_value = (y_src.as_() - bias_y) * y_coef; + let cb_value = u_src.as_() - bias_uv; + let cr_value = v_src.as_() - bias_uv; + + let r = qrshr::(y_value + cr_coef * cr_value); + let b = qrshr::(y_value + cb_coef * cb_value); + let g = + qrshr::(y_value - g_coef_1 * cr_value - g_coef_2 * cb_value); + + if CHANNELS == 4 { + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + rgb_dst[3] = max_value.as_(); + } else if CHANNELS == 3 { + rgb_dst[0] = r.as_(); + rgb_dst[1] = g.as_(); + rgb_dst[2] = b.as_(); + } else { + unreachable!(); + } + } + } +} + /// Converts YUV420 to Rgba /// /// Stride here is not supported as it can be in passed from FFI. @@ -551,10 +667,7 @@ where check_rgb_preconditions(rgb, image.width * CHANNELS, image.height)?; - let max_value = (1 << BIT_DEPTH) - 1; - const PRECISION: i32 = 11; - const ROUNDING: i32 = 1 << (PRECISION - 1); let range = range.get_yuv_range(BIT_DEPTH as u32); let kr_kb = matrix.get_kr_kb(); @@ -566,14 +679,6 @@ where kr_kb.kb, PRECISION as u32, ); - let cr_coef = inverse_transform.cr_coef; - let cb_coef = inverse_transform.cb_coef; - let y_coef = inverse_transform.y_coef; - let g_coef_1 = inverse_transform.g_coeff_1; - let g_coef_2 = inverse_transform.g_coeff_2; - - let bias_y = range.bias_y as i32; - let bias_uv = range.bias_uv as i32; let rgb_stride = image.width * CHANNELS; @@ -613,99 +718,23 @@ where // Since we're processing two rows in one loop we need to re-slice once more let y_iter = y_src.chunks_exact(y_stride); let rgb_iter = rgb.chunks_exact_mut(rgb_stride); - for (y_src, rgb) in y_iter.zip(rgb_iter) { - let y_iter = y_src.chunks_exact(2); - let rgb_chunks = rgb.chunks_exact_mut(CHANNELS * 2); - for (((y_src, &u_src), &v_src), rgb_dst) in y_iter.zip(u_src).zip(v_src).zip(rgb_chunks) - { - let y_value: i32 = (y_src[0].as_() - bias_y) * y_coef; - let cb_value: i32 = u_src.as_() - bias_uv; - let cr_value: i32 = v_src.as_() - bias_uv; - - let r = - ((y_value + cr_coef * cr_value + ROUNDING) >> PRECISION).clamp(0, max_value); - let b = - ((y_value + cb_coef * cb_value + ROUNDING) >> PRECISION).clamp(0, max_value); - let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) - >> PRECISION) - .clamp(0, max_value); - - if CHANNELS == 4 { - rgb_dst[0] = r.as_(); - rgb_dst[1] = g.as_(); - rgb_dst[2] = b.as_(); - rgb_dst[3] = max_value.as_(); - } else if CHANNELS == 3 { - rgb_dst[0] = r.as_(); - rgb_dst[1] = g.as_(); - rgb_dst[2] = b.as_(); - } else { - unreachable!(); - } - - let y_value = (y_src[1].as_() - bias_y) * y_coef; - - let r = - ((y_value + cr_coef * cr_value + ROUNDING) >> PRECISION).clamp(0, max_value); - let b = - ((y_value + cb_coef * cb_value + ROUNDING) >> PRECISION).clamp(0, max_value); - let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) - >> PRECISION) - .clamp(0, max_value); - - if CHANNELS == 4 { - rgb_dst[4] = r.as_(); - rgb_dst[5] = g.as_(); - rgb_dst[6] = b.as_(); - rgb_dst[7] = max_value.as_(); - } else if CHANNELS == 3 { - rgb_dst[3] = r.as_(); - rgb_dst[4] = g.as_(); - rgb_dst[5] = b.as_(); - } else { - unreachable!(); - } - } - - // Process remainder if width is odd. - if image.width & 1 != 0 { - let y_left = y_src.chunks_exact(2).remainder(); - let rgb_chunks = rgb - .chunks_exact_mut(CHANNELS * 2) - .into_remainder() - .chunks_exact_mut(CHANNELS); - let u_iter = u_src.iter().rev(); - let v_iter = v_src.iter().rev(); - - for (((y_src, u_src), v_src), rgb_dst) in - y_left.iter().zip(u_iter).zip(v_iter).zip(rgb_chunks) - { - let y_value = (y_src.as_() - bias_y) * y_coef; - let cb_value = u_src.as_() - bias_uv; - let cr_value = v_src.as_() - bias_uv; - - let r = ((y_value + cr_coef * cr_value + ROUNDING) >> PRECISION) - .clamp(0, max_value); - let b = ((y_value + cb_coef * cb_value + ROUNDING) >> PRECISION) - .clamp(0, max_value); - let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) - >> PRECISION) - .clamp(0, max_value); - - if CHANNELS == 4 { - rgb_dst[0] = r.as_(); - rgb_dst[1] = g.as_(); - rgb_dst[2] = b.as_(); - rgb_dst[3] = max_value.as_(); - } else if CHANNELS == 3 { - rgb_dst[0] = r.as_(); - rgb_dst[1] = g.as_(); - rgb_dst[2] = b.as_(); - } else { - unreachable!(); - } - } - } + for (y_src, rgba) in y_iter.zip(rgb_iter) { + let image = YuvPlanarImage { + y_plane: y_src, + y_stride: 0, + u_plane: u_src, + u_stride: 0, + v_plane: v_src, + v_stride: 0, + width: image.width, + height: image.height, + }; + process_halved_chroma_row::( + image, + rgba, + &inverse_transform, + &range, + ); } } @@ -719,99 +748,23 @@ where let u_iter = u_plane.chunks_exact(u_stride).rev(); let v_iter = v_plane.chunks_exact(v_stride).rev(); - for (((y_src, u_src), v_src), rgb) in y_iter.zip(u_iter).zip(v_iter).zip(rgb_iter) { - let y_iter = y_src.chunks_exact(2); - let rgb_chunks = rgb.chunks_exact_mut(CHANNELS * 2); - for (((y_src, u_src), v_src), rgb_dst) in y_iter.zip(u_src).zip(v_src).zip(rgb_chunks) { - let y_value = (y_src[0].as_() - bias_y) * y_coef; - let cb_value = u_src.as_() - bias_uv; - let cr_value = v_src.as_() - bias_uv; - -#[inline(always)] -fn round(val: i32) -> i32 { - let ROUNDING: i32 = 1 << (PRECISION - 1); - ((val + ROUNDING) >> PRECISION).clamp(0, MAX) -} - - let r = round::(y_value + cr_coef * cr_value); - let b = round::(y_value + cb_coef * cb_value); - let g = round::(y_value - g_coef_1 * cr_value - g_coef_2 * cb_value); - .clamp(0, max_value); - - if CHANNELS == 4 { - rgb_dst[0] = r.as_(); - rgb_dst[1] = g.as_(); - rgb_dst[2] = b.as_(); - rgb_dst[3] = max_value.as_(); - } else if CHANNELS == 3 { - rgb_dst[0] = r.as_(); - rgb_dst[1] = g.as_(); - rgb_dst[2] = b.as_(); - } else { - unreachable!(); - } - - let y_value = (y_src[1].as_() - bias_y) * y_coef; - - let r = ((y_value + cr_coef * cr_value + ROUNDING) >> PRECISION).clamp(0, max_value); - let b = ((y_value + cb_coef * cb_value + ROUNDING) >> PRECISION).clamp(0, max_value); - let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) >> PRECISION) - .clamp(0, max_value); - - if CHANNELS == 4 { - rgb_dst[4] = r.as_(); - rgb_dst[5] = g.as_(); - rgb_dst[6] = b.as_(); - rgb_dst[7] = max_value.as_(); - } else if CHANNELS == 3 { - rgb_dst[3] = r.as_(); - rgb_dst[4] = g.as_(); - rgb_dst[5] = b.as_(); - } else { - unreachable!(); - } - } - - // Process remainder if width is odd. - - if image.width & 1 != 0 { - let y_left = y_src.chunks_exact(2).remainder(); - let rgb_chunks = rgb - .chunks_exact_mut(CHANNELS * 2) - .into_remainder() - .chunks_exact_mut(CHANNELS); - let u_iter = u_plane.iter().rev(); - let v_iter = v_plane.iter().rev(); - - for (((y_src, u_src), v_src), rgb_dst) in - y_left.iter().zip(u_iter).zip(v_iter).zip(rgb_chunks) - { - let y_value = (y_src.as_() - bias_y) * y_coef; - let cb_value = u_src.as_() - bias_uv; - let cr_value = v_src.as_() - bias_uv; - - let r = - ((y_value + cr_coef * cr_value + ROUNDING) >> PRECISION).clamp(0, max_value); - let b = - ((y_value + cb_coef * cb_value + ROUNDING) >> PRECISION).clamp(0, max_value); - let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) - >> PRECISION) - .clamp(0, max_value); - - if CHANNELS == 4 { - rgb_dst[0] = r.as_(); - rgb_dst[1] = g.as_(); - rgb_dst[2] = b.as_(); - rgb_dst[3] = max_value.as_(); - } else if CHANNELS == 3 { - rgb_dst[0] = r.as_(); - rgb_dst[1] = g.as_(); - rgb_dst[2] = b.as_(); - } else { - unreachable!(); - } - } - } + for (((y_src, u_src), v_src), rgba) in y_iter.zip(u_iter).zip(v_iter).zip(rgb_iter) { + let image = YuvPlanarImage { + y_plane: y_src, + y_stride: 0, + u_plane: u_src, + u_stride: 0, + v_plane: v_src, + v_stride: 0, + width: image.width, + height: image.height, + }; + process_halved_chroma_row::( + image, + rgba, + &inverse_transform, + &range, + ); } Ok(()) @@ -938,12 +891,10 @@ where check_rgb_preconditions(rgb, image.width * CHANNELS, image.height)?; - let max_value = (1 << BIT_DEPTH) - 1; - let range = range.get_yuv_range(BIT_DEPTH as u32); let kr_kb = matrix.get_kr_kb(); const PRECISION: i32 = 11; - const ROUNDING: i32 = 1 << (PRECISION - 1); + let inverse_transform = get_inverse_transform( (1 << BIT_DEPTH) - 1, range.range_y, @@ -952,14 +903,6 @@ where kr_kb.kb, PRECISION as u32, ); - let cr_coef = inverse_transform.cr_coef; - let cb_coef = inverse_transform.cb_coef; - let y_coef = inverse_transform.y_coef; - let g_coef_1 = inverse_transform.g_coeff_1; - let g_coef_2 = inverse_transform.g_coeff_2; - - let bias_y = range.bias_y as i32; - let bias_uv = range.bias_uv as i32; /* Sample 4x4 YUV422 planar image @@ -994,93 +937,23 @@ where let v_iter = v_plane.chunks_exact(v_stride); // All branches on generic const will be optimized out. - for (((y_src, u_src), v_src), rgb) in y_iter.zip(u_iter).zip(v_iter).zip(rgb_iter) { - let y_iter = y_src.chunks_exact(2); - let rgb_chunks = rgb.chunks_exact_mut(CHANNELS * 2); - - for (((y_src, u_src), v_src), rgb_dst) in y_iter.zip(u_src).zip(v_src).zip(rgb_chunks) { - let y_value = (y_src[0].as_() - bias_y) * y_coef; - let cb_value = u_src.as_() - bias_uv; - let cr_value = v_src.as_() - bias_uv; - - let r = ((y_value + cr_coef * cr_value + ROUNDING) >> PRECISION).clamp(0, max_value); - let b = ((y_value + cb_coef * cb_value + ROUNDING) >> PRECISION).clamp(0, max_value); - let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) >> PRECISION) - .clamp(0, max_value); - - if CHANNELS == 4 { - rgb_dst[0] = r.as_(); - rgb_dst[1] = g.as_(); - rgb_dst[2] = b.as_(); - rgb_dst[3] = max_value.as_(); - } else if CHANNELS == 3 { - rgb_dst[0] = r.as_(); - rgb_dst[1] = g.as_(); - rgb_dst[2] = b.as_(); - } else { - unreachable!(); - } - - let y_value = (y_src[1].as_() - bias_y) * y_coef; - - let r = ((y_value + cr_coef * cr_value + ROUNDING) >> PRECISION).clamp(0, max_value); - let b = ((y_value + cb_coef * cb_value + ROUNDING) >> PRECISION).clamp(0, max_value); - let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) >> PRECISION) - .clamp(0, max_value); - - if CHANNELS == 4 { - rgb_dst[4] = r.as_(); - rgb_dst[5] = g.as_(); - rgb_dst[6] = b.as_(); - rgb_dst[7] = max_value.as_(); - } else if CHANNELS == 3 { - rgb_dst[3] = r.as_(); - rgb_dst[4] = g.as_(); - rgb_dst[5] = b.as_(); - } else { - unreachable!(); - } - } - - // Process left pixels for odd images, this should work since luma must be always exact - if width & 1 != 0 { - let y_left = y_src.chunks_exact(2).remainder(); - let rgb_chunks = rgb - .chunks_exact_mut(CHANNELS * 2) - .into_remainder() - .chunks_exact_mut(CHANNELS); - let u_iter = u_src.iter().rev(); - let v_iter = v_src.iter().rev(); - - for (((y_src, u_src), v_src), rgb_dst) in - y_left.iter().zip(u_iter).zip(v_iter).zip(rgb_chunks) - { - let y_value = (y_src.as_() - bias_y) * y_coef; - let cb_value = u_src.as_() - bias_uv; - let cr_value = v_src.as_() - bias_uv; - - let r = - ((y_value + cr_coef * cr_value + ROUNDING) >> PRECISION).clamp(0, max_value); - let b = - ((y_value + cb_coef * cb_value + ROUNDING) >> PRECISION).clamp(0, max_value); - let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) - >> PRECISION) - .clamp(0, max_value); - - if CHANNELS == 4 { - rgb_dst[0] = r.as_(); - rgb_dst[1] = g.as_(); - rgb_dst[2] = b.as_(); - rgb_dst[3] = max_value.as_(); - } else if CHANNELS == 3 { - rgb_dst[0] = r.as_(); - rgb_dst[1] = g.as_(); - rgb_dst[2] = b.as_(); - } else { - unreachable!(); - } - } - } + for (((y_src, u_src), v_src), rgba) in y_iter.zip(u_iter).zip(v_iter).zip(rgb_iter) { + let image = YuvPlanarImage { + y_plane: y_src, + y_stride: 0, + u_plane: u_src, + u_stride: 0, + v_plane: v_src, + v_stride: 0, + width: image.width, + height: image.height, + }; + process_halved_chroma_row::( + image, + rgba, + &inverse_transform, + &range, + ); } Ok(()) @@ -1220,7 +1093,7 @@ where let range = range.get_yuv_range(BIT_DEPTH as u32); let kr_kb = matrix.get_kr_kb(); const PRECISION: i32 = 11; - const ROUNDING: i32 = 1 << (PRECISION - 1); + let inverse_transform = get_inverse_transform( (1 << BIT_DEPTH) - 1, range.range_y, @@ -1257,10 +1130,10 @@ where let cb_value = u_src.as_() - bias_uv; let cr_value = v_src.as_() - bias_uv; - let r = ((y_value + cr_coef * cr_value + ROUNDING) >> PRECISION).clamp(0, max_value); - let b = ((y_value + cb_coef * cb_value + ROUNDING) >> PRECISION).clamp(0, max_value); - let g = ((y_value - g_coef_1 * cr_value - g_coef_2 * cb_value + ROUNDING) >> PRECISION) - .clamp(0, max_value); + let r = qrshr::(y_value + cr_coef * cr_value); + let b = qrshr::(y_value + cb_coef * cb_value); + let g = + qrshr::(y_value - g_coef_1 * cr_value - g_coef_2 * cb_value); if CHANNELS == 4 { rgb_dst[0] = r.as_(); From 7f78d8257d33fb50fe762749d4f834483405723c Mon Sep 17 00:00:00 2001 From: Radzivon Bartoshyk Date: Sat, 2 Nov 2024 18:22:55 +0000 Subject: [PATCH 19/21] Added missed `Identity` in `Limited` range --- src/codecs/avif/yuv.rs | 98 +++++++++++++++++++++++++++++------------- 1 file changed, 69 insertions(+), 29 deletions(-) diff --git a/src/codecs/avif/yuv.rs b/src/codecs/avif/yuv.rs index 2186e1e6fd..77155b3286 100644 --- a/src/codecs/avif/yuv.rs +++ b/src/codecs/avif/yuv.rs @@ -976,7 +976,7 @@ pub(crate) fn yuv444_to_rgba8( matrix: YuvStandardMatrix, ) -> Result<(), ImageError> { if matrix == YuvStandardMatrix::Identity { - gbr_to_rgba8(image, rgba) + gbr_to_rgba8(image, rgba, range) } else { yuv444_to_rgbx_impl::(image, rgba, range, matrix) } @@ -1001,7 +1001,7 @@ pub(super) fn yuv444_to_rgba10( matrix: YuvStandardMatrix, ) -> Result<(), ImageError> { if matrix == YuvStandardMatrix::Identity { - gbr_to_rgba10(image, rgba) + gbr_to_rgba10(image, rgba, range) } else { yuv444_to_rgbx_impl::(image, rgba, range, matrix) } @@ -1026,7 +1026,7 @@ pub(super) fn yuv444_to_rgba12( matrix: YuvStandardMatrix, ) -> Result<(), ImageError> { if matrix == YuvStandardMatrix::Identity { - gbr_to_rgba12(image, rgba) + gbr_to_rgba12(image, rgba, range) } else { yuv444_to_rgbx_impl::(image, rgba, range, matrix) } @@ -1160,11 +1160,14 @@ where /// * `image`: see [YuvPlanarImage] /// * `rgb`: RGB image layout /// * `range`: see [YuvIntensityRange] -/// * `matrix`: see [YuvStandardMatrix] /// /// -fn gbr_to_rgba8(image: YuvPlanarImage, rgb: &mut [u8]) -> Result<(), ImageError> { - gbr_to_rgbx_impl::(image, rgb) +fn gbr_to_rgba8( + image: YuvPlanarImage, + rgb: &mut [u8], + range: YuvIntensityRange, +) -> Result<(), ImageError> { + gbr_to_rgbx_impl::(image, rgb, range) } /// Converts Gbr 10 bit planar format to Rgba 10 bit-depth @@ -1174,13 +1177,16 @@ fn gbr_to_rgba8(image: YuvPlanarImage, rgb: &mut [u8]) -> Result<(), ImageEr /// # Arguments /// /// * `image`: see [YuvPlanarImage] -/// * `rgb`: RGB image layout +/// * `rgba`: RGBx image layout /// * `range`: see [YuvIntensityRange] -/// * `matrix`: see [YuvStandardMatrix] /// /// -fn gbr_to_rgba10(image: YuvPlanarImage, rgb: &mut [u16]) -> Result<(), ImageError> { - gbr_to_rgbx_impl::(image, rgb) +fn gbr_to_rgba10( + image: YuvPlanarImage, + rgba: &mut [u16], + range: YuvIntensityRange, +) -> Result<(), ImageError> { + gbr_to_rgbx_impl::(image, rgba, range) } /// Converts Gbr 12 bit planar format to Rgba 12 bit-depth @@ -1190,13 +1196,16 @@ fn gbr_to_rgba10(image: YuvPlanarImage, rgb: &mut [u16]) -> Result<(), Imag /// # Arguments /// /// * `image`: see [YuvPlanarImage] -/// * `rgb`: RGB image layout +/// * `rgba`: RGBx image layout /// * `range`: see [YuvIntensityRange] -/// * `matrix`: see [YuvStandardMatrix] /// /// -fn gbr_to_rgba12(image: YuvPlanarImage, rgb: &mut [u16]) -> Result<(), ImageError> { - gbr_to_rgbx_impl::(image, rgb) +fn gbr_to_rgba12( + image: YuvPlanarImage, + rgba: &mut [u16], + range: YuvIntensityRange, +) -> Result<(), ImageError> { + gbr_to_rgbx_impl::(image, rgba, range) } /// Converts Gbr planar format to Rgba @@ -1208,7 +1217,6 @@ fn gbr_to_rgba12(image: YuvPlanarImage, rgb: &mut [u16]) -> Result<(), Imag /// * `image`: see [YuvPlanarImage] /// * `rgb`: RGB image layout /// * `range`: see [YuvIntensityRange] -/// * `matrix`: see [YuvStandardMatrix] /// /// #[inline] @@ -1218,7 +1226,8 @@ fn gbr_to_rgbx_impl< const BIT_DEPTH: usize, >( image: YuvPlanarImage, - rgb: &mut [V], + rgba: &mut [V], + yuv_range: YuvIntensityRange, ) -> Result<(), ImageError> where i32: AsPrimitive, @@ -1252,28 +1261,59 @@ where check_yuv_plane_preconditions(u_plane, PlaneDefinition::U, u_stride, height)?; check_yuv_plane_preconditions(v_plane, PlaneDefinition::V, v_stride, height)?; - check_rgb_preconditions(rgb, width * CHANNELS, height)?; + check_rgb_preconditions(rgba, width * CHANNELS, height)?; let max_value = (1 << BIT_DEPTH) - 1; let rgb_stride = width * CHANNELS; let y_iter = y_plane.chunks_exact(y_stride); - let rgb_iter = rgb.chunks_exact_mut(rgb_stride); + let rgb_iter = rgba.chunks_exact_mut(rgb_stride); let u_iter = u_plane.chunks_exact(u_stride); let v_iter = v_plane.chunks_exact(v_stride); - for (((y_src, u_src), v_src), rgb) in y_iter.zip(u_iter).zip(v_iter).zip(rgb_iter) { - let rgb_chunks = rgb.chunks_exact_mut(CHANNELS); - - for (((&y_src, &u_src), &v_src), rgb_dst) in - y_src.iter().zip(u_src).zip(v_src).zip(rgb_chunks) - { - rgb_dst[0] = v_src; - rgb_dst[1] = y_src; - rgb_dst[2] = u_src; - if CHANNELS == 4 { - rgb_dst[3] = max_value.as_(); + match yuv_range { + YuvIntensityRange::Tv => { + const PRECISION: i32 = 11; + // All channels on identity should use Y range + let range = yuv_range.get_yuv_range(BIT_DEPTH as u32); + let range_rgba = (1 << BIT_DEPTH) - 1; + let y_coef = + ((range_rgba as f32 / range.range_y as f32) * (1 << PRECISION) as f32) as i32; + let y_bias = range.bias_y as i32; + + for (((y_src, u_src), v_src), rgb) in y_iter.zip(u_iter).zip(v_iter).zip(rgb_iter) { + let rgb_chunks = rgb.chunks_exact_mut(CHANNELS); + + for (((&y_src, &u_src), &v_src), rgb_dst) in + y_src.iter().zip(u_src).zip(v_src).zip(rgb_chunks) + { + rgb_dst[0] = + qrshr::((v_src.as_() - y_bias) * y_coef).as_(); + rgb_dst[1] = + qrshr::((y_src.as_() - y_bias) * y_coef).as_(); + rgb_dst[2] = + qrshr::((u_src.as_() - y_bias) * y_coef).as_(); + if CHANNELS == 4 { + rgb_dst[3] = max_value.as_(); + } + } + } + } + YuvIntensityRange::Pc => { + for (((y_src, u_src), v_src), rgb) in y_iter.zip(u_iter).zip(v_iter).zip(rgb_iter) { + let rgb_chunks = rgb.chunks_exact_mut(CHANNELS); + + for (((&y_src, &u_src), &v_src), rgb_dst) in + y_src.iter().zip(u_src).zip(v_src).zip(rgb_chunks) + { + rgb_dst[0] = v_src; + rgb_dst[1] = y_src; + rgb_dst[2] = u_src; + if CHANNELS == 4 { + rgb_dst[3] = max_value.as_(); + } + } } } } From 092879c9acb3d5823aecc5973fc210728f44f04e Mon Sep 17 00:00:00 2001 From: Radzivon Bartoshyk Date: Sat, 2 Nov 2024 20:40:07 +0000 Subject: [PATCH 20/21] More precise bit depth expanding --- src/codecs/avif/decoder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/codecs/avif/decoder.rs b/src/codecs/avif/decoder.rs index 910036bcac..203844e55a 100644 --- a/src/codecs/avif/decoder.rs +++ b/src/codecs/avif/decoder.rs @@ -545,7 +545,7 @@ impl AvifDecoder { // Expand current bit depth to target 16 let target_expand_bits = 16u32 - self.picture.bit_depth() as u32; for item in target.iter_mut() { - *item <<= target_expand_bits; + *item = (*item << target_expand_bits) | (*item >> (16 - target_expand_bits)); } Ok(()) From daa5e88c1bd864d9f85115f73e54d8017357ae51 Mon Sep 17 00:00:00 2001 From: Radzivon Bartoshyk Date: Sat, 2 Nov 2024 21:14:48 +0000 Subject: [PATCH 21/21] Changed `Unspecified` to Bt.709 --- src/codecs/avif/decoder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/codecs/avif/decoder.rs b/src/codecs/avif/decoder.rs index 203844e55a..04e43f666a 100644 --- a/src/codecs/avif/decoder.rs +++ b/src/codecs/avif/decoder.rs @@ -240,7 +240,7 @@ fn get_matrix( // and some applications prefer Bt.601 as default. // For ex. `Chrome` always prefer Bt.709 even for SD content // However, nowadays standard should be Bt.709 for HD+ size otherwise Bt.601 - dav1d::pixel::MatrixCoefficients::Unspecified => Ok(YuvStandardMatrix::Bt601), + dav1d::pixel::MatrixCoefficients::Unspecified => Ok(YuvStandardMatrix::Bt709), dav1d::pixel::MatrixCoefficients::Reserved => Err(ImageError::Unsupported( UnsupportedError::from_format_and_kind( ImageFormat::Avif.into(),