diff --git a/deku-derive/src/lib.rs b/deku-derive/src/lib.rs index 3ee77823..13c38b97 100644 --- a/deku-derive/src/lib.rs +++ b/deku-derive/src/lib.rs @@ -39,8 +39,10 @@ struct DekuData { id_type: Option, /// enum only: bit size of the enum `id` - /// `bytes` is converted to `bits` if provided bits: Option, + + /// enum only: byte size of the enum `id` + bytes: Option, } impl DekuData { @@ -80,8 +82,6 @@ impl DekuData { .transpose() .map_err(|e| e.to_compile_error())?; - let bits = receiver.bytes.map(|b| b * 8).or(receiver.bits); - Ok(Self { vis: receiver.vis, ident: receiver.ident, @@ -93,7 +93,8 @@ impl DekuData { magic: receiver.magic, id: receiver.id, id_type: receiver.id_type, - bits, + bits: receiver.bits, + bytes: receiver.bytes, }) } @@ -198,6 +199,9 @@ struct FieldData { /// field bit size bits: Option, + /// field byte size + bytes: Option, + /// tokens providing the length of the container count: Option, @@ -240,8 +244,6 @@ impl FieldData { FieldData::validate(&receiver) .map_err(|(span, msg)| syn::Error::new(span, msg).to_compile_error())?; - let bits = receiver.bytes.map(|b| b * 8).or(receiver.bits); - let bits_read = receiver .bytes_read .map(|tokens| quote! { (#tokens) * 8 }) @@ -259,7 +261,8 @@ impl FieldData { ident: receiver.ident, ty: receiver.ty, endian: receiver.endian, - bits, + bits: receiver.bits, + bytes: receiver.bytes, count: receiver.count, bits_read, until: receiver.until, diff --git a/deku-derive/src/macros/deku_read.rs b/deku-derive/src/macros/deku_read.rs index 8a250e94..5c781adc 100644 --- a/deku-derive/src/macros/deku_read.rs +++ b/deku-derive/src/macros/deku_read.rs @@ -129,7 +129,7 @@ fn emit_enum(input: &DekuData) -> Result { let id = input.id.as_ref(); let id_type = input.id_type.as_ref(); - let id_args = gen_id_args(input.endian.as_ref(), input.bits)?; + let id_args = gen_id_args(input.endian.as_ref(), input.bits, input.bytes)?; let magic_read = emit_magic_read(input)?; @@ -368,7 +368,7 @@ fn emit_field_read( let field_read_func = if field_reader.is_some() { quote! { #field_reader } } else { - let read_args = gen_field_args(field_endian, f.bits, f.ctx.as_ref())?; + let read_args = gen_field_args(field_endian, f.bits, f.bytes, f.ctx.as_ref())?; // The container limiting options are special, we need to generate `(limit, (other, ..))` for them. // These have a problem where when it isn't a copy type, the field will be moved. @@ -388,7 +388,7 @@ fn emit_field_read( quote! { { use core::borrow::Borrow; - DekuRead::read(rest, (deku::ctx::Limit::new_bits(deku::ctx::BitSize(usize::try_from(*((#field_bits).borrow()))?)), (#read_args))) + DekuRead::read(rest, (deku::ctx::Limit::new_bits(deku::ctx::Size::Bits(usize::try_from(*((#field_bits).borrow()))?)), (#read_args))) } } } else if let Some(field_until) = &f.until { diff --git a/deku-derive/src/macros/deku_write.rs b/deku-derive/src/macros/deku_write.rs index 033ad3d6..c6ffc8ad 100644 --- a/deku-derive/src/macros/deku_write.rs +++ b/deku-derive/src/macros/deku_write.rs @@ -156,7 +156,7 @@ fn emit_enum(input: &DekuData) -> Result { let id = input.id.as_ref(); let id_type = input.id_type.as_ref(); - let id_args = gen_id_args(input.endian.as_ref(), input.bits)?; + let id_args = gen_id_args(input.endian.as_ref(), input.bits, input.bytes)?; let mut variant_writes = vec![]; let mut variant_updates = vec![]; @@ -417,7 +417,7 @@ fn emit_field_write( let field_write_func = if field_writer.is_some() { quote! { #field_writer } } else { - let write_args = gen_field_args(field_endian, f.bits, f.ctx.as_ref())?; + let write_args = gen_field_args(field_endian, f.bits, f.bytes, f.ctx.as_ref())?; quote! { #object_prefix #field_ident.write(output, (#write_args)) } }; diff --git a/deku-derive/src/macros/mod.rs b/deku-derive/src/macros/mod.rs index 9e8976ad..5500534f 100644 --- a/deku-derive/src/macros/mod.rs +++ b/deku-derive/src/macros/mod.rs @@ -160,13 +160,18 @@ fn gen_ctx_types_and_arg( } /// Generate argument for `id`: -/// `#deku(endian = "big", bits = "1")` -> `Endian::Big, BitSize(1)` -fn gen_id_args(endian: Option<&syn::LitStr>, bits: Option) -> syn::Result { +/// `#deku(endian = "big", bits = "1")` -> `Endian::Big, Size::Bits(1)` +fn gen_id_args( + endian: Option<&syn::LitStr>, + bits: Option, + bytes: Option, +) -> syn::Result { let endian = endian.map(gen_endian_from_str).transpose()?; - let bits = bits.map(|n| quote! {deku::ctx::BitSize(#n)}); + let bits = bits.map(|n| quote! {deku::ctx::Size::Bits(#n)}); + let bytes = bytes.map(|n| quote! {deku::ctx::Size::Bytes(#n)}); // FIXME: Should be `into_iter` here, see /~https://github.com/rust-lang/rust/issues/66145. - let id_args = [endian.as_ref(), bits.as_ref()] + let id_args = [endian.as_ref(), bits.as_ref(), bytes.as_ref()] .iter() .filter_map(|i| *i) .collect::>(); @@ -179,18 +184,20 @@ fn gen_id_args(endian: Option<&syn::LitStr>, bits: Option) -> syn::Result /// Generate argument for fields: /// -/// `#deku(endian = "big", bits = "1", ctx = "a")` -> `Endian::Big, BitSize(1), a` +/// `#deku(endian = "big", bits = "1", ctx = "a")` -> `Endian::Big, Size::Bits(1), a` fn gen_field_args( endian: Option<&syn::LitStr>, bits: Option, + bytes: Option, ctx: Option<&Punctuated>, ) -> syn::Result { let endian = endian.map(gen_endian_from_str).transpose()?; - let bits = bits.map(|n| quote! {deku::ctx::BitSize(#n)}); + let bits = bits.map(|n| quote! {deku::ctx::Size::Bits(#n)}); + let bytes = bytes.map(|n| quote! {deku::ctx::Size::Bytes(#n)}); let ctx = ctx.map(|c| quote! {#c}); // FIXME: Should be `into_iter` here, see /~https://github.com/rust-lang/rust/issues/66145. - let field_args = [endian.as_ref(), bits.as_ref(), ctx.as_ref()] + let field_args = [endian.as_ref(), bits.as_ref(), bytes.as_ref(), ctx.as_ref()] .iter() .filter_map(|i| *i) .collect::>(); diff --git a/examples/custom_reader_and_writer.rs b/examples/custom_reader_and_writer.rs index 90bfa8df..0046db88 100644 --- a/examples/custom_reader_and_writer.rs +++ b/examples/custom_reader_and_writer.rs @@ -1,11 +1,11 @@ -use deku::ctx::BitSize; +use deku::ctx::Size; use deku::prelude::*; use std::convert::TryInto; fn bit_flipper_read( field_a: u8, rest: &BitSlice, - bit_size: BitSize, + bit_size: Size, ) -> Result<(&BitSlice, u8), DekuError> { // Access to previously read fields println!("field_a = 0x{:X}", field_a); @@ -29,7 +29,7 @@ fn bit_flipper_write( field_a: u8, field_b: u8, output: &mut BitVec, - bit_size: BitSize, + bit_size: Size, ) -> Result<(), DekuError> { // Access to previously written fields println!("field_a = 0x{:X}", field_a); @@ -51,8 +51,8 @@ struct DekuTest { field_a: u8, #[deku( - reader = "bit_flipper_read(*field_a, rest, BitSize(8))", - writer = "bit_flipper_write(*field_a, *field_b, output, BitSize(8))" + reader = "bit_flipper_read(*field_a, rest, Size::Bits(8))", + writer = "bit_flipper_write(*field_a, *field_b, output, Size::Bits(8))" )] field_b: u8, } diff --git a/src/attributes.rs b/src/attributes.rs index cc4a31b0..c6bcf6c7 100644 --- a/src/attributes.rs +++ b/src/attributes.rs @@ -631,7 +631,7 @@ struct Type1 { // is equivalent to struct Type1 { - #[deku(ctx = "Endian::Big, BitSize(1)")] + #[deku(ctx = "Endian::Big, Size::Bits(1)")] field: u8, } ``` @@ -650,7 +650,7 @@ struct Type1 { struct Type1 { #[deku(ctx = "Endian::Big")] field_a: u16, - #[deku(ctx = "Endian::Big, BitSize(5), *field_a")] // endian is prepended + #[deku(ctx = "Endian::Big, Size::Bits(5), *field_a")] // endian is prepended field_b: SubType, } ``` diff --git a/src/ctx.rs b/src/ctx.rs index b9c0a8e3..4c3969d3 100644 --- a/src/ctx.rs +++ b/src/ctx.rs @@ -1,10 +1,13 @@ //! Types for context representation //! See [ctx attribute](super::attributes#ctx) for more information. +use crate::error::DekuError; use core::marker::PhantomData; -use core::ops::{Deref, DerefMut}; use core::str::FromStr; +#[cfg(feature = "alloc")] +use alloc::format; + /// An endian #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum Endian { @@ -83,7 +86,7 @@ pub enum Limit bool> { Until(Predicate, PhantomData), /// Read until a given quantity of bits have been read - Bits(BitSize), + Size(Size), } impl From for Limit bool> { @@ -98,9 +101,9 @@ impl FnMut(&'a T) -> bool> From for Limit From for Limit bool> { - fn from(bits: BitSize) -> Self { - Limit::Bits(bits) +impl From for Limit bool> { + fn from(size: Size) -> Self { + Limit::Size(size) } } @@ -120,72 +123,77 @@ impl Limit bool> { } /// Constructs a new Limit that reads until the given number of bits have been read - pub fn new_bits(bits: BitSize) -> Self { + pub fn new_bits(bits: Size) -> Self { bits.into() } } -/// The number bits in a field +/// The size of a field #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)] -pub struct BitSize(pub usize); +pub enum Size { + /// bit size + Bits(usize), + /// byte size + Bytes(usize), +} -impl BitSize { +impl Size { /// Convert the size in bytes to a bit size. - /// # Examples - /// ```rust - /// # use std::mem::size_of; - /// # use deku::ctx::BitSize; - /// - /// assert_eq!(BitSize::with_byte_size(1), BitSize(8)); - /// ``` /// /// # Panic /// Panic if `byte_size * 8` is greater than `usize::MAX`. - pub fn with_byte_size(byte_size: usize) -> Self { - Self(byte_size.checked_mul(8).expect("bit size overflow")) + fn bits_from_bytes(byte_size: usize) -> Self { + Self::Bits(byte_size.checked_mul(8).expect("bit size overflow")) } /// Returns the bit size of a type. /// # Examples /// ```rust - /// # use deku::ctx::BitSize; + /// # use deku::ctx::Size; /// - /// assert_eq!(BitSize::of::(), BitSize(4 * 8)); + /// assert_eq!(Size::of::(), Size::Bits(4 * 8)); /// ``` + /// /// # Panics /// Panic if the bit size of given type is greater than `usize::MAX` pub fn of() -> Self { - Self::with_byte_size(core::mem::size_of::()) + Self::bits_from_bytes(core::mem::size_of::()) } /// Returns the bit size of the pointed-to value pub fn of_val(val: &T) -> Self { - Self::with_byte_size(core::mem::size_of_val(val)) - } -} - -impl Into for BitSize { - fn into(self) -> usize { - self.0 + Self::bits_from_bytes(core::mem::size_of_val(val)) } -} -impl From for BitSize { - fn from(n: usize) -> Self { - Self(n) - } -} - -impl Deref for BitSize { - type Target = usize; - - fn deref(&self) -> &Self::Target { - &self.0 + /// Returns the size in bits of a Size + /// + /// # Panics + /// Panic if the bit size of Size::Bytes(n) is greater than `usize::MAX` + pub fn bit_size(&self) -> usize { + match *self { + Size::Bits(size) => size, + Size::Bytes(size) => size.checked_mul(8).expect("bit size overflow"), + } } -} -impl DerefMut for BitSize { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 + /// Returns the size in bytes of a Size + /// + /// # Panics + /// Panic if the bit size of Size::Bytes(n) is greater than `usize::MAX` + pub fn byte_size(&self) -> Result { + match *self { + Size::Bits(size) => { + if size % 8 == 0 { + Ok(size / 8) + } else { + Err(DekuError::InvalidParam(format!( + "Bit size of {} is not a multiple of 8. + Cannot be represented in bytes", + size + ))) + } + } + Size::Bytes(size) => Ok(size), + } } } diff --git a/src/lib.rs b/src/lib.rs index 5b55cd3d..c56e09dd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -219,7 +219,7 @@ use alloc::{format, vec::Vec}; #[cfg(feature = "std")] use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; -use crate::ctx::{BitSize, Endian, Limit}; +use crate::ctx::{Endian, Limit, Size}; use bitvec::prelude::*; use core::convert::TryInto; pub use deku_derive::*; @@ -282,13 +282,13 @@ pub trait DekuUpdate { macro_rules! ImplDekuTraits { ($typ:ty) => { - impl DekuRead<(Endian, BitSize)> for $typ { + impl DekuRead<(Endian, Size)> for $typ { fn read( input: &BitSlice, - (endian, bit_size): (Endian, BitSize), + (endian, size): (Endian, Size), ) -> Result<(&BitSlice, Self), DekuError> { - let max_type_bits: usize = BitSize::of::<$typ>().into(); - let bit_size: usize = bit_size.into(); + let max_type_bits: usize = Size::of::<$typ>().bit_size(); + let bit_size: usize = size.bit_size(); let input_is_le = endian.is_le(); @@ -371,23 +371,23 @@ macro_rules! ImplDekuTraits { } } - // Only have `endian`, set `bit_size` to `BitSize::of::()` + // Only have `endian`, set `bit_size` to `Size::of::()` impl DekuRead for $typ { fn read( input: &BitSlice, endian: Endian, ) -> Result<(&BitSlice, Self), DekuError> { - let max_type_bits = BitSize::of::<$typ>(); + let max_type_bits = Size::of::<$typ>(); <$typ>::read(input, (endian, max_type_bits)) } } // Only have `bit_size`, set `endian` to `Endian::default`. - impl DekuRead for $typ { + impl DekuRead for $typ { fn read( input: &BitSlice, - bit_size: BitSize, + bit_size: Size, ) -> Result<(&BitSlice, Self), DekuError> { let endian = Endian::default(); @@ -404,18 +404,18 @@ macro_rules! ImplDekuTraits { } } - impl DekuWrite<(Endian, BitSize)> for $typ { + impl DekuWrite<(Endian, Size)> for $typ { fn write( &self, output: &mut BitVec, - (endian, bit_size): (Endian, BitSize), + (endian, size): (Endian, Size), ) -> Result<(), DekuError> { let input = match endian { Endian::Little => self.to_le_bytes(), Endian::Big => self.to_be_bytes(), }; - let bit_size: usize = bit_size.into(); + let bit_size: usize = size.bit_size(); let input_bits = input.view_bits::(); @@ -466,11 +466,11 @@ macro_rules! ImplDekuTraits { } // Only have `bit_size`, set `endian` to `Endian::default`. - impl DekuWrite for $typ { + impl DekuWrite for $typ { fn write( &self, output: &mut BitVec, - bit_size: BitSize, + bit_size: Size, ) -> Result<(), DekuError> { <$typ>::write(self, output, (Endian::default(), bit_size)) } @@ -563,12 +563,20 @@ impl, Ctx: Copy, Predicate: FnMut(&T) -> bool> DekuRead<(Limit< read_vec_with_predicate(input, None, inner_ctx, move |_, value| predicate(value)) } - // Read until a given quanity of bits have been read - Limit::Bits(bits) => { - read_vec_with_predicate(input, None, inner_ctx, move |read_bits, _| { - read_bits == bits.into() - }) - } + // Read until a given quantity of bits have been read + Limit::Size(size) => match size { + Size::Bits(bit_size) => { + read_vec_with_predicate(input, None, inner_ctx, move |read_bits, _| { + read_bits == bit_size + }) + } + Size::Bytes(_) => { + let bit_size = size.bit_size(); + read_vec_with_predicate(input, None, inner_ctx, move |read_bits, _| { + read_bits == bit_size + }) + } + }, } } } @@ -837,7 +845,7 @@ mod tests { let bit_slice = input.view_bits::(); let (rest, res_read) = match bit_size { - Some(bit_size) => u32::read(bit_slice, (endian, BitSize(bit_size))).unwrap(), + Some(bit_size) => u32::read(bit_slice, (endian, Size::Bits(bit_size))).unwrap(), None => u32::read(bit_slice, endian).unwrap(), }; @@ -857,7 +865,7 @@ mod tests { let mut res_write = bitvec![Msb0, u8;]; match bit_size { Some(bit_size) => input - .write(&mut res_write, (endian, BitSize(bit_size))) + .write(&mut res_write, (endian, Size::Bits(bit_size))) .unwrap(), None => input.write(&mut res_write, endian).unwrap(), }; @@ -878,7 +886,7 @@ mod tests { let bit_slice = input.view_bits::(); let (rest, res_read) = match bit_size { - Some(bit_size) => u32::read(bit_slice, (endian, BitSize(bit_size))).unwrap(), + Some(bit_size) => u32::read(bit_slice, (endian, Size::Bits(bit_size))).unwrap(), None => u32::read(bit_slice, endian).unwrap(), }; assert_eq!(expected, res_read); @@ -887,7 +895,7 @@ mod tests { let mut res_write = bitvec![Msb0, u8;]; match bit_size { Some(bit_size) => res_read - .write(&mut res_write, (endian, BitSize(bit_size))) + .write(&mut res_write, (endian, Size::Bits(bit_size))) .unwrap(), None => res_read.write(&mut res_write, endian).unwrap(), }; @@ -900,7 +908,7 @@ mod tests { case::count_1([0xAA, 0xBB].as_ref(), Endian::Little, Some(8), 1.into(), vec![0xAA], bits![Msb0, u8; 1, 0, 1, 1, 1, 0, 1, 1]), case::count_2([0xAA, 0xBB, 0xCC].as_ref(), Endian::Little, Some(8), 2.into(), vec![0xAA, 0xBB], bits![Msb0, u8; 1, 1, 0, 0, 1, 1, 0, 0]), case::until_null([0xAA, 0, 0xBB].as_ref(), Endian::Little, None, (|v: &u8| *v == 0u8).into(), vec![0xAA, 0], bits![Msb0, u8; 1, 0, 1, 1, 1, 0, 1, 1]), - case::until_bits([0xAA, 0xBB].as_ref(), Endian::Little, None, BitSize(8).into(), vec![0xAA], bits![Msb0, u8; 1, 0, 1, 1, 1, 0, 1, 1]), + case::until_bits([0xAA, 0xBB].as_ref(), Endian::Little, None, Size::Bits(8).into(), vec![0xAA], bits![Msb0, u8; 1, 0, 1, 1, 1, 0, 1, 1]), case::bits_6([0b0110_1001, 0b1110_1001].as_ref(), Endian::Little, Some(6), 2.into(), vec![0b00_011010, 0b00_011110], bits![Msb0, u8; 1, 0, 0, 1]), #[should_panic(expected = "Parse(\"too much data: container of 8 bits cannot hold 9 bits\")")] case::not_enough_data([].as_ref(), Endian::Little, Some(9), 1.into(), vec![], bits![Msb0, u8;]), @@ -911,7 +919,7 @@ mod tests { #[should_panic(expected = "Parse(\"not enough data: expected 8 bits got 0 bits\")")] case::not_enough_data_until([0xAA].as_ref(), Endian::Little, Some(8), (|_: &u8| false).into(), vec![], bits![Msb0, u8;]), #[should_panic(expected = "Parse(\"not enough data: expected 8 bits got 0 bits\")")] - case::not_enough_data_bits([0xAA].as_ref(), Endian::Little, Some(8), (BitSize(16)).into(), vec![], bits![Msb0, u8;]), + case::not_enough_data_bits([0xAA].as_ref(), Endian::Little, Some(8), (Size::Bits(16)).into(), vec![], bits![Msb0, u8;]), #[should_panic(expected = "Parse(\"too much data: container of 8 bits cannot hold 9 bits\")")] case::too_much_data([0xAA, 0xBB].as_ref(), Endian::Little, Some(9), 1.into(), vec![], bits![Msb0, u8;]), )] @@ -927,7 +935,7 @@ mod tests { let (rest, res_read) = match bit_size { Some(bit_size) => { - Vec::::read(bit_slice, (limit, (endian, BitSize(bit_size)))).unwrap() + Vec::::read(bit_slice, (limit, (endian, Size::Bits(bit_size)))).unwrap() } None => Vec::::read(bit_slice, (limit, (endian))).unwrap(), }; @@ -950,8 +958,8 @@ mod tests { case::normal_be([0xAA, 0xBB, 0xCC, 0xDD].as_ref(), Endian::Big, Some(16), 2.into(), vec![0xAABB, 0xCCDD], bits![Msb0, u8;], vec![0xAA, 0xBB, 0xCC, 0xDD]), case::predicate_le([0xAA, 0xBB, 0xCC, 0xDD].as_ref(), Endian::Little, Some(16), (|v: &u16| *v == 0xBBAA).into(), vec![0xBBAA], bits![Msb0, u8; 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1], vec![0xAA, 0xBB]), case::predicate_be([0xAA, 0xBB, 0xCC, 0xDD].as_ref(), Endian::Big, Some(16), (|v: &u16| *v == 0xAABB).into(), vec![0xAABB], bits![Msb0, u8; 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1], vec![0xAA, 0xBB]), - case::bytes_le([0xAA, 0xBB, 0xCC, 0xDD].as_ref(), Endian::Little, Some(16), BitSize(16).into(), vec![0xBBAA], bits![Msb0, u8; 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1], vec![0xAA, 0xBB]), - case::bytes_be([0xAA, 0xBB, 0xCC, 0xDD].as_ref(), Endian::Big, Some(16), BitSize(16).into(), vec![0xAABB], bits![Msb0, u8; 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1], vec![0xAA, 0xBB]), + case::bytes_le([0xAA, 0xBB, 0xCC, 0xDD].as_ref(), Endian::Little, Some(16), Size::Bits(16).into(), vec![0xBBAA], bits![Msb0, u8; 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1], vec![0xAA, 0xBB]), + case::bytes_be([0xAA, 0xBB, 0xCC, 0xDD].as_ref(), Endian::Big, Some(16), Size::Bits(16).into(), vec![0xAABB], bits![Msb0, u8; 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1], vec![0xAA, 0xBB]), )] fn test_vec_read_write bool>( input: &[u8], @@ -968,13 +976,13 @@ mod tests { let bit_size = bit_size.unwrap(); let (rest, res_read) = - Vec::::read(bit_slice, (limit, (endian, BitSize(bit_size)))).unwrap(); + Vec::::read(bit_slice, (limit, (endian, Size::Bits(bit_size)))).unwrap(); assert_eq!(expected, res_read); assert_eq!(expected_rest, rest); let mut res_write = bitvec![Msb0, u8;]; res_read - .write(&mut res_write, (endian, BitSize(bit_size))) + .write(&mut res_write, (endian, Size::Bits(bit_size))) .unwrap(); assert_eq!(expected_write, res_write.into_vec());