From 7a8e97c2a0218f061253111ff589618abaf0d5d1 Mon Sep 17 00:00:00 2001 From: Daniel Lehmann <59584561+danlehmann@users.noreply.github.com> Date: Tue, 24 Oct 2023 10:31:52 -0700 Subject: [PATCH] Add various arithmetic operations (#39) * Implement Mul, MulAssign, Div, DivAssign Small step towards #32 * Implement wrapping_xxx; fix Shl/Shr semantics in debug builds - Implement `wrapping_add`, `wrapping_sub`, `wrapping_mul`, `wrapping_div`, `wrapping_shl`, `wrapping_shr` - In debug builds, `<<` (`Shl`, `ShlAssign`) and `>>` (`Shr`, `ShrAssign`) now bounds-check the shift amount using the same semantics as built-in shifts. For example, shifting a u5 by 5 or more bits will now panic as expected. * Add saturating_xxx * Addressed comments * Fix build in debug mode * Add checked_xxx and overflowing_xxx * List all new methods --- CHANGELOG.md | 9 + src/lib.rs | 372 +++++++++++++++++++++++++++++++++-- tests/tests.rs | 516 ++++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 874 insertions(+), 23 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0e526ad..a31fa94 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,15 @@ - Support `Step` so that arbitrary-int can be used in a range expression, e.g. `for n in u3::MIN..=u3::MAX { println!("{n}") }`. Note this trait is currently unstable, and so is only usable in nightly. Enable this feature with `step_trait`. - Support formatting via [defmt](https://crates.io/crates/defmt). Enable the option `defmt` feature - Support serializing and deserializing via [serde](https://crates.io/crates/serde). Enable the option `serde` feature +- Support `Mul`, `MulAssign`, `Div`, `DivAssign` +- The following new methods were implemented to make arbitrary ints feel more like built-in types: + * `wrapping_add`, `wrapping_sub`, `wrapping_mul`, `wrapping_div`, `wrapping_shl`, `wrapping_shr` + * `saturating_add`, `saturating_sub`, `saturating_mul`, `saturating_div`, `saturating_pow` + * `checked_add`, `checked_sub`, `checked_mul`, `checked_div`, `checked_shl`, `checked_shr` + * `overflowing_add`, `overflowing_sub`, `overflowing_mul`, `overflowing_div`, `overflowing_shl`, `overflowing_shr` + +### Changed +- In debug builds, `<<` (`Shl`, `ShlAssign`) and `>>` (`Shr`, `ShrAssign`) now bounds-check the shift amount using the same semantics as built-in shifts. For example, shifting a u5 by 5 or more bits will now panic as expected. ## arbitrary-int 1.2.6 diff --git a/src/lib.rs b/src/lib.rs index 16ae29a..4c2b9c3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -12,8 +12,8 @@ use core::iter::Step; #[cfg(feature = "num-traits")] use core::num::Wrapping; use core::ops::{ - Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Not, Shl, - ShlAssign, Shr, ShrAssign, Sub, SubAssign, + Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Div, DivAssign, + Mul, MulAssign, Not, Shl, ShlAssign, Shr, ShrAssign, Sub, SubAssign, }; #[cfg(feature = "serde")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -340,6 +340,254 @@ macro_rules! uint_impl { UInt::<$type, BITS_RESULT> { value: self.value } } + pub const fn wrapping_add(self, rhs: Self) -> Self { + let sum = self.value.wrapping_add(rhs.value); + Self { + value: sum & Self::MASK, + } + } + + pub const fn wrapping_sub(self, rhs: Self) -> Self { + let sum = self.value.wrapping_sub(rhs.value); + Self { + value: sum & Self::MASK, + } + } + + pub const fn wrapping_mul(self, rhs: Self) -> Self { + let sum = self.value.wrapping_mul(rhs.value); + Self { + value: sum & Self::MASK, + } + } + + pub const fn wrapping_div(self, rhs: Self) -> Self { + let sum = self.value.wrapping_div(rhs.value); + Self { + // No need to mask here - divisions always produce a result that is <= self + value: sum, + } + } + + pub const fn wrapping_shl(self, rhs: u32) -> Self { + // modulo is expensive on some platforms, so only do it when necessary + let shift_amount = if rhs >= (BITS as u32) { + rhs % (BITS as u32) + } else { + rhs + }; + + Self { + // We could use wrapping_shl here to make Debug builds slightly smaller; + // the downside would be that on weird CPUs that don't do wrapping_shl by + // default release builds would get slightly worse. Using << should give + // good release performance everywere + value: (self.value << shift_amount) & Self::MASK, + } + } + + pub const fn wrapping_shr(self, rhs: u32) -> Self { + // modulo is expensive on some platforms, so only do it when necessary + let shift_amount = if rhs >= (BITS as u32) { + rhs % (BITS as u32) + } else { + rhs + }; + + Self { + value: (self.value >> shift_amount), + } + } + + pub const fn saturating_add(self, rhs: Self) -> Self { + let saturated = if core::mem::size_of::<$type>() << 3 == BITS { + // We are something like a UInt::. We can fallback to the base implementation + self.value.saturating_add(rhs.value) + } else { + // We're dealing with fewer bits than the underlying type (e.g. u7). + // That means the addition can never overflow the underlying type + let sum = self.value.wrapping_add(rhs.value); + let max = Self::MAX.value(); + if sum > max { max } else { sum } + }; + Self { + value: saturated, + } + } + + pub const fn saturating_sub(self, rhs: Self) -> Self { + // For unsigned numbers, the only difference is when we reach 0 - which is the same + // no matter the data size + Self { + value: self.value.saturating_sub(rhs.value), + } + } + + pub const fn saturating_mul(self, rhs: Self) -> Self { + let product = if BITS << 1 <= (core::mem::size_of::<$type>() << 3) { + // We have half the bits (e.g. u4 * u4) of the base type, so we can't overflow the base type + // wrapping_mul likely provides the best performance on all cpus + self.value.wrapping_mul(rhs.value) + } else { + // We have more than half the bits (e.g. u6 * u6) + self.value.saturating_mul(rhs.value) + }; + + let max = Self::MAX.value(); + let saturated = if product > max { max } else { product }; + Self { + value: saturated, + } + } + + pub const fn saturating_div(self, rhs: Self) -> Self { + // When dividing unsigned numbers, we never need to saturate. + // Divison by zero in saturating_div throws an exception (in debug and release mode), + // so no need to do anything special there either + Self { + value: self.value.saturating_div(rhs.value), + } + } + + pub const fn saturating_pow(self, exp: u32) -> Self { + // It might be possible to handwrite this to be slightly faster as both + // saturating_pow has to do a bounds-check and then we do second one + let powed = self.value.saturating_pow(exp); + let max = Self::MAX.value(); + let saturated = if powed > max { max } else { powed }; + Self { + value: saturated, + } + } + + pub const fn checked_add(self, rhs: Self) -> Option { + if core::mem::size_of::<$type>() << 3 == BITS { + // We are something like a UInt::. We can fallback to the base implementation + match self.value.checked_add(rhs.value) { + Some(value) => Some(Self { value }), + None => None + } + } else { + // We're dealing with fewer bits than the underlying type (e.g. u7). + // That means the addition can never overflow the underlying type + let sum = self.value.wrapping_add(rhs.value); + if sum > Self::MAX.value() { None } else { Some(Self { value: sum })} + } + } + + pub const fn checked_sub(self, rhs: Self) -> Option { + match self.value.checked_sub(rhs.value) { + Some(value) => Some(Self { value }), + None => None + } + } + + pub const fn checked_mul(self, rhs: Self) -> Option { + let product = if BITS << 1 <= (core::mem::size_of::<$type>() << 3) { + // We have half the bits (e.g. u4 * u4) of the base type, so we can't overflow the base type + // wrapping_mul likely provides the best performance on all cpus + Some(self.value.wrapping_mul(rhs.value)) + } else { + // We have more than half the bits (e.g. u6 * u6) + self.value.checked_mul(rhs.value) + }; + + match product { + Some(value) => { + if value > Self::MAX.value() { + None + } else { + Some(Self {value}) + } + } + None => None + } + } + + pub const fn checked_div(self, rhs: Self) -> Option { + match self.value.checked_div(rhs.value) { + Some(value) => Some(Self { value }), + None => None + } + } + + pub const fn checked_shl(self, rhs: u32) -> Option { + if rhs >= (BITS as u32) { + None + } else { + Some(Self { + value: (self.value << rhs) & Self::MASK, + }) + } + } + + pub const fn checked_shr(self, rhs: u32) -> Option { + if rhs >= (BITS as u32) { + None + } else { + Some(Self { + value: (self.value >> rhs), + }) + } + } + + pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) { + let (value, overflow) = if core::mem::size_of::<$type>() << 3 == BITS { + // We are something like a UInt::. We can fallback to the base implementation + self.value.overflowing_add(rhs.value) + } else { + // We're dealing with fewer bits than the underlying type (e.g. u7). + // That means the addition can never overflow the underlying type + let sum = self.value.wrapping_add(rhs.value); + let masked = sum & Self::MASK; + (masked, masked != sum) + }; + (Self { value }, overflow) + } + + pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) { + // For unsigned numbers, the only difference is when we reach 0 - which is the same + // no matter the data size. In the case of overflow we do have the mask the result though + let (value, overflow) = self.value.overflowing_sub(rhs.value); + (Self { value: value & Self::MASK }, overflow) + } + + pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) { + let (wrapping_product, overflow) = if BITS << 1 <= (core::mem::size_of::<$type>() << 3) { + // We have half the bits (e.g. u4 * u4) of the base type, so we can't overflow the base type + // wrapping_mul likely provides the best performance on all cpus + self.value.overflowing_mul(rhs.value) + } else { + // We have more than half the bits (e.g. u6 * u6) + self.value.overflowing_mul(rhs.value) + }; + + let masked = wrapping_product & Self::MASK; + let overflow2 = masked != wrapping_product; + (Self { value: masked }, overflow || overflow2 ) + } + + pub const fn overflowing_div(self, rhs: Self) -> (Self, bool) { + let value = self.value.wrapping_div(rhs.value); + (Self { value }, false ) + } + + pub const fn overflowing_shl(self, rhs: u32) -> (Self, bool) { + if rhs >= (BITS as u32) { + (Self { value: self.value << (rhs % (BITS as u32)) }, true) + } else { + (Self { value: self.value << rhs }, false) + } + } + + pub const fn overflowing_shr(self, rhs: u32) -> (Self, bool) { + if rhs >= (BITS as u32) { + (Self { value: self.value >> (rhs % (BITS as u32)) }, true) + } else { + (Self { value: self.value >> rhs }, false) + } + } + /// Reverses the order of bits in the integer. The least significant bit becomes the most significant bit, second least-significant bit becomes second most-significant bit, etc. pub const fn reverse_bits(self) -> Self { let shift_right = (core::mem::size_of::<$type>() << 3) - BITS; @@ -419,8 +667,6 @@ where + Not + Add + Sub - + Shr - + Shl + From, { type Output = UInt; @@ -447,9 +693,6 @@ where + AddAssign + BitAnd + BitAndAssign - + Sub - + Shr - + Shl + From, { fn add_assign(&mut self, rhs: Self) { @@ -465,12 +708,7 @@ where impl Sub for UInt where Self: Number, - T: Copy - + BitAnd - + Sub - + Shl - + Shr - + From, + T: Copy + BitAnd + Sub, { type Output = UInt; @@ -485,14 +723,7 @@ where impl SubAssign for UInt where Self: Number, - T: Copy - + SubAssign - + BitAnd - + BitAndAssign - + Sub - + Shl - + Shr - + From, + T: Copy + SubAssign + BitAnd + BitAndAssign + Sub, { fn sub_assign(&mut self, rhs: Self) { // No need for extra overflow checking as the regular minus operator already handles it for us @@ -501,6 +732,76 @@ where } } +impl Mul for UInt +where + Self: Number, + T: PartialEq + Copy + BitAnd + Not + Mul + From, +{ + type Output = UInt; + + fn mul(self, rhs: Self) -> Self::Output { + // In debug builds, this will perform two bounds checks: Initial multiplication, followed by + // our bounds check. As wrapping_mul isn't available as a trait bound (in regular Rust), this + // is unavoidable + let product = self.value * rhs.value; + #[cfg(debug_assertions)] + if (product & !Self::MASK) != T::from(0) { + panic!("attempt to multiply with overflow"); + } + Self { + value: product & Self::MASK, + } + } +} + +impl MulAssign for UInt +where + Self: Number, + T: PartialEq + + Eq + + Not + + Copy + + MulAssign + + BitAnd + + BitAndAssign + + From, +{ + fn mul_assign(&mut self, rhs: Self) { + self.value *= rhs.value; + #[cfg(debug_assertions)] + if (self.value & !Self::MASK) != T::from(0) { + panic!("attempt to multiply with overflow"); + } + self.value &= Self::MASK; + } +} + +impl Div for UInt +where + Self: Number, + T: PartialEq + Div, +{ + type Output = UInt; + + fn div(self, rhs: Self) -> Self::Output { + // Integer division can only make the value smaller. And as the result is same type as + // Self, there's no need to range-check or mask + Self { + value: self.value / rhs.value, + } + } +} + +impl DivAssign for UInt +where + Self: Number, + T: PartialEq + DivAssign, +{ + fn div_assign(&mut self, rhs: Self) { + self.value /= rhs.value; + } +} + impl BitAnd for UInt where Self: Number, @@ -603,10 +904,18 @@ where + Shl + Shr + From, + TSHIFTBITS: TryInto + Copy, { type Output = UInt; fn shl(self, rhs: TSHIFTBITS) -> Self::Output { + // With debug assertions, the << and >> operators throw an exception if the shift amount + // is larger than the number of bits (in which case the result would always be 0) + #[cfg(debug_assertions)] + if rhs.try_into().unwrap_or(usize::MAX) >= BITS { + panic!("attempt to shift left with overflow") + } + Self { value: (self.value << rhs) & Self::MASK, } @@ -624,8 +933,15 @@ where + Shr + Shl + From, + TSHIFTBITS: TryInto + Copy, { fn shl_assign(&mut self, rhs: TSHIFTBITS) { + // With debug assertions, the << and >> operators throw an exception if the shift amount + // is larger than the number of bits (in which case the result would always be 0) + #[cfg(debug_assertions)] + if rhs.try_into().unwrap_or(usize::MAX) >= BITS { + panic!("attempt to shift left with overflow") + } self.value <<= rhs; self.value &= Self::MASK; } @@ -634,10 +950,17 @@ where impl Shr for UInt where T: Copy + Shr + Sub + Shl + From, + TSHIFTBITS: TryInto + Copy, { type Output = UInt; fn shr(self, rhs: TSHIFTBITS) -> Self::Output { + // With debug assertions, the << and >> operators throw an exception if the shift amount + // is larger than the number of bits (in which case the result would always be 0) + #[cfg(debug_assertions)] + if rhs.try_into().unwrap_or(usize::MAX) >= BITS { + panic!("attempt to shift left with overflow") + } Self { value: self.value >> rhs, } @@ -647,8 +970,15 @@ where impl ShrAssign for UInt where T: Copy + ShrAssign + Sub + Shl + From, + TSHIFTBITS: TryInto + Copy, { fn shr_assign(&mut self, rhs: TSHIFTBITS) { + // With debug assertions, the << and >> operators throw an exception if the shift amount + // is larger than the number of bits (in which case the result would always be 0) + #[cfg(debug_assertions)] + if rhs.try_into().unwrap_or(usize::MAX) >= BITS { + panic!("attempt to shift left with overflow") + } self.value >>= rhs; } } diff --git a/tests/tests.rs b/tests/tests.rs index 22cb827..e050f00 100644 --- a/tests/tests.rs +++ b/tests/tests.rs @@ -147,7 +147,8 @@ fn addassign_overflow() { #[test] fn addassign_no_overflow() { let mut value = u9::new(500); - value += u9::new(40); + value += u9::new(28); + assert_eq!(value, u9::new(16)); } #[test] @@ -166,7 +167,8 @@ fn sub_overflow() { #[cfg(not(debug_assertions))] #[test] fn sub_no_overflow() { - let _ = u7::new(100) - u7::new(127); + let value = u7::new(100) - u7::new(127); + assert_eq!(value, u7::new(101)); } #[test] @@ -189,6 +191,78 @@ fn subassign_overflow() { fn subassign_no_overflow() { let mut value = u9::new(30); value -= u9::new(40); + assert_eq!(value, u9::new(502)); +} + +#[test] +fn mul() { + assert_eq!(u7::new(22) * u7::new(4), u7::new(88)); + assert_eq!(u7::new(127) * u7::new(0), u7::new(0)); +} + +#[cfg(debug_assertions)] +#[test] +#[should_panic] +fn mul_overflow() { + let _ = u7::new(100) * u7::new(2); +} + +#[cfg(not(debug_assertions))] +#[test] +fn mul_no_overflow() { + let result = u7::new(100) * u7::new(2); + assert_eq!(result, u7::new(72)); +} + +#[test] +fn mulassign() { + let mut value = u9::new(240); + value *= u9::new(2); + assert_eq!(value, u9::new(480)); +} + +#[cfg(debug_assertions)] +#[test] +#[should_panic] +fn mulassign_overflow() { + let mut value = u9::new(500); + value *= u9::new(2); +} + +#[cfg(not(debug_assertions))] +#[test] +fn mulassign_no_overflow() { + let mut value = u9::new(500); + value *= u9::new(40); + assert_eq!(value, u9::new(32)); +} + +#[test] +fn div() { + // div just forwards to the underlying type, so there isn't much to do + assert_eq!(u7::new(22) / u7::new(4), u7::new(5)); + assert_eq!(u7::new(127) / u7::new(1), u7::new(127)); + assert_eq!(u7::new(127) / u7::new(127), u7::new(1)); +} + +#[should_panic] +#[test] +fn div_by_zero() { + let _ = u7::new(22) / u7::new(0); +} + +#[test] +fn divassign() { + let mut value = u9::new(240); + value /= u9::new(2); + assert_eq!(value, u9::new(120)); +} + +#[should_panic] +#[test] +fn divassign_by_zero() { + let mut value = u9::new(240); + value /= u9::new(0); } #[test] @@ -264,6 +338,48 @@ fn shl() { assert_eq!(u9::new(0b11110000) << 3u64, u9::new(0b1_10000000)); } +#[cfg(debug_assertions)] +#[test] +#[should_panic] +fn shl_too_much8() { + let _ = u53::new(123) << 53u8; +} + +#[cfg(debug_assertions)] +#[test] +#[should_panic] +fn shl_too_much16() { + let _ = u53::new(123) << 53u16; +} + +#[cfg(debug_assertions)] +#[test] +#[should_panic] +fn shl_too_much32() { + let _ = u53::new(123) << 53u32; +} + +#[cfg(debug_assertions)] +#[test] +#[should_panic] +fn shl_too_much64() { + let _ = u53::new(123) << 53u64; +} + +#[cfg(debug_assertions)] +#[test] +#[should_panic] +fn shl_too_much128() { + let _ = u53::new(123) << 53u128; +} + +#[cfg(debug_assertions)] +#[test] +#[should_panic] +fn shl_too_much_usize() { + let _ = u53::new(123) << 53usize; +} + #[test] fn shlassign() { let mut value = u9::new(0b11110000); @@ -271,6 +387,22 @@ fn shlassign() { assert_eq!(value, u9::new(0b1_10000000)); } +#[cfg(debug_assertions)] +#[test] +#[should_panic] +fn shlassign_too_much() { + let mut value = u9::new(0b11110000); + value <<= 9; +} + +#[cfg(debug_assertions)] +#[test] +#[should_panic] +fn shlassign_too_much2() { + let mut value = u9::new(0b11110000); + value <<= 10; +} + #[test] fn shr() { assert_eq!(u17::new(0b100110) >> 5usize, u17::new(1)); @@ -1236,6 +1368,386 @@ fn simple_le_be() { } } +#[test] +fn wrapping_add() { + assert_eq!(u7::new(120).wrapping_add(u7::new(1)), u7::new(121)); + assert_eq!(u7::new(120).wrapping_add(u7::new(10)), u7::new(2)); + assert_eq!(u7::new(127).wrapping_add(u7::new(127)), u7::new(126)); +} + +#[test] +fn wrapping_sub() { + assert_eq!(u7::new(120).wrapping_sub(u7::new(1)), u7::new(119)); + assert_eq!(u7::new(10).wrapping_sub(u7::new(20)), u7::new(118)); + assert_eq!(u7::new(0).wrapping_sub(u7::new(1)), u7::new(127)); +} + +#[test] +fn wrapping_mul() { + assert_eq!(u7::new(120).wrapping_mul(u7::new(0)), u7::new(0)); + assert_eq!(u7::new(120).wrapping_mul(u7::new(1)), u7::new(120)); + + // Overflow u7 + assert_eq!(u7::new(120).wrapping_mul(u7::new(2)), u7::new(112)); + + // Overflow the underlying type + assert_eq!(u7::new(120).wrapping_mul(u7::new(3)), u7::new(104)); +} + +#[test] +fn wrapping_div() { + assert_eq!(u7::new(120).wrapping_div(u7::new(1)), u7::new(120)); + assert_eq!(u7::new(120).wrapping_div(u7::new(2)), u7::new(60)); + assert_eq!(u7::new(120).wrapping_div(u7::new(120)), u7::new(1)); + assert_eq!(u7::new(120).wrapping_div(u7::new(121)), u7::new(0)); +} + +#[should_panic] +#[test] +fn wrapping_div_by_zero() { + let _ = u7::new(120).wrapping_div(u7::new(0)); +} + +#[test] +fn wrapping_shl() { + assert_eq!(u7::new(0b010_1101).wrapping_shl(0), u7::new(0b010_1101)); + assert_eq!(u7::new(0b010_1101).wrapping_shl(1), u7::new(0b101_1010)); + assert_eq!(u7::new(0b010_1101).wrapping_shl(6), u7::new(0b100_0000)); + assert_eq!(u7::new(0b010_1101).wrapping_shl(7), u7::new(0b010_1101)); + assert_eq!(u7::new(0b010_1101).wrapping_shl(8), u7::new(0b101_1010)); + assert_eq!(u7::new(0b010_1101).wrapping_shl(14), u7::new(0b010_1101)); + assert_eq!(u7::new(0b010_1101).wrapping_shl(15), u7::new(0b101_1010)); +} + +#[test] +fn wrapping_shr() { + assert_eq!(u7::new(0b010_1101).wrapping_shr(0), u7::new(0b010_1101)); + assert_eq!(u7::new(0b010_1101).wrapping_shr(1), u7::new(0b001_0110)); + assert_eq!(u7::new(0b010_1101).wrapping_shr(5), u7::new(0b000_0001)); + assert_eq!(u7::new(0b010_1101).wrapping_shr(7), u7::new(0b010_1101)); + assert_eq!(u7::new(0b010_1101).wrapping_shr(8), u7::new(0b001_0110)); + assert_eq!(u7::new(0b010_1101).wrapping_shr(14), u7::new(0b010_1101)); + assert_eq!(u7::new(0b010_1101).wrapping_shr(15), u7::new(0b001_0110)); +} + +#[test] +fn saturating_add() { + assert_eq!(u7::new(120).saturating_add(u7::new(1)), u7::new(121)); + assert_eq!(u7::new(120).saturating_add(u7::new(10)), u7::new(127)); + assert_eq!(u7::new(127).saturating_add(u7::new(127)), u7::new(127)); + assert_eq!( + UInt::::new(250).saturating_add(UInt::::new(10)), + UInt::::new(255) + ); +} + +#[test] +fn saturating_sub() { + assert_eq!(u7::new(120).saturating_sub(u7::new(30)), u7::new(90)); + assert_eq!(u7::new(120).saturating_sub(u7::new(119)), u7::new(1)); + assert_eq!(u7::new(120).saturating_sub(u7::new(120)), u7::new(0)); + assert_eq!(u7::new(120).saturating_sub(u7::new(121)), u7::new(0)); + assert_eq!(u7::new(0).saturating_sub(u7::new(127)), u7::new(0)); +} + +#[test] +fn saturating_mul() { + // Fast-path: Only the arbitrary int is bounds checked + assert_eq!(u4::new(5).saturating_mul(u4::new(2)), u4::new(10)); + assert_eq!(u4::new(5).saturating_mul(u4::new(3)), u4::new(15)); + assert_eq!(u4::new(5).saturating_mul(u4::new(4)), u4::new(15)); + assert_eq!(u4::new(5).saturating_mul(u4::new(5)), u4::new(15)); + assert_eq!(u4::new(5).saturating_mul(u4::new(6)), u4::new(15)); + assert_eq!(u4::new(5).saturating_mul(u4::new(7)), u4::new(15)); + + // Slow-path (well, one more comparison) + assert_eq!(u5::new(5).saturating_mul(u5::new(2)), u5::new(10)); + assert_eq!(u5::new(5).saturating_mul(u5::new(3)), u5::new(15)); + assert_eq!(u5::new(5).saturating_mul(u5::new(4)), u5::new(20)); + assert_eq!(u5::new(5).saturating_mul(u5::new(5)), u5::new(25)); + assert_eq!(u5::new(5).saturating_mul(u5::new(6)), u5::new(30)); + assert_eq!(u5::new(5).saturating_mul(u5::new(7)), u5::new(31)); + assert_eq!(u5::new(30).saturating_mul(u5::new(1)), u5::new(30)); + assert_eq!(u5::new(30).saturating_mul(u5::new(2)), u5::new(31)); + assert_eq!(u5::new(30).saturating_mul(u5::new(10)), u5::new(31)); +} + +#[test] +fn saturating_div() { + assert_eq!(u4::new(5).saturating_div(u4::new(1)), u4::new(5)); + assert_eq!(u4::new(5).saturating_div(u4::new(2)), u4::new(2)); + assert_eq!(u4::new(5).saturating_div(u4::new(3)), u4::new(1)); + assert_eq!(u4::new(5).saturating_div(u4::new(4)), u4::new(1)); + assert_eq!(u4::new(5).saturating_div(u4::new(5)), u4::new(1)); +} + +#[test] +#[should_panic] +fn saturating_divby0() { + // saturating_div throws an exception on zero + let _ = u4::new(5).saturating_div(u4::new(0)); +} + +#[test] +fn saturating_pow() { + assert_eq!(u7::new(5).saturating_pow(0), u7::new(1)); + assert_eq!(u7::new(5).saturating_pow(1), u7::new(5)); + assert_eq!(u7::new(5).saturating_pow(2), u7::new(25)); + assert_eq!(u7::new(5).saturating_pow(3), u7::new(125)); + assert_eq!(u7::new(5).saturating_pow(4), u7::new(127)); + assert_eq!(u7::new(5).saturating_pow(255), u7::new(127)); +} + +#[test] +fn checked_add() { + assert_eq!(u7::new(120).checked_add(u7::new(1)), Some(u7::new(121))); + assert_eq!(u7::new(120).checked_add(u7::new(7)), Some(u7::new(127))); + assert_eq!(u7::new(120).checked_add(u7::new(10)), None); + assert_eq!(u7::new(127).checked_add(u7::new(127)), None); + assert_eq!( + UInt::::new(250).checked_add(UInt::::new(10)), + None + ); +} + +#[test] +fn checked_sub() { + assert_eq!(u7::new(120).checked_sub(u7::new(30)), Some(u7::new(90))); + assert_eq!(u7::new(120).checked_sub(u7::new(119)), Some(u7::new(1))); + assert_eq!(u7::new(120).checked_sub(u7::new(120)), Some(u7::new(0))); + assert_eq!(u7::new(120).checked_sub(u7::new(121)), None); + assert_eq!(u7::new(0).checked_sub(u7::new(127)), None); +} + +#[test] +fn checked_mul() { + // Fast-path: Only the arbitrary int is bounds checked + assert_eq!(u4::new(5).checked_mul(u4::new(2)), Some(u4::new(10))); + assert_eq!(u4::new(5).checked_mul(u4::new(3)), Some(u4::new(15))); + assert_eq!(u4::new(5).checked_mul(u4::new(4)), None); + assert_eq!(u4::new(5).checked_mul(u4::new(5)), None); + assert_eq!(u4::new(5).checked_mul(u4::new(6)), None); + assert_eq!(u4::new(5).checked_mul(u4::new(7)), None); + + // Slow-path (well, one more comparison) + assert_eq!(u5::new(5).checked_mul(u5::new(2)), Some(u5::new(10))); + assert_eq!(u5::new(5).checked_mul(u5::new(3)), Some(u5::new(15))); + assert_eq!(u5::new(5).checked_mul(u5::new(4)), Some(u5::new(20))); + assert_eq!(u5::new(5).checked_mul(u5::new(5)), Some(u5::new(25))); + assert_eq!(u5::new(5).checked_mul(u5::new(6)), Some(u5::new(30))); + assert_eq!(u5::new(5).checked_mul(u5::new(7)), None); + assert_eq!(u5::new(30).checked_mul(u5::new(1)), Some(u5::new(30))); + assert_eq!(u5::new(30).checked_mul(u5::new(2)), None); + assert_eq!(u5::new(30).checked_mul(u5::new(10)), None); +} + +#[test] +fn checked_div() { + // checked_div handles division by zero without exception, unlike saturating_div + assert_eq!(u4::new(5).checked_div(u4::new(0)), None); + assert_eq!(u4::new(5).checked_div(u4::new(1)), Some(u4::new(5))); + assert_eq!(u4::new(5).checked_div(u4::new(2)), Some(u4::new(2))); + assert_eq!(u4::new(5).checked_div(u4::new(3)), Some(u4::new(1))); + assert_eq!(u4::new(5).checked_div(u4::new(4)), Some(u4::new(1))); + assert_eq!(u4::new(5).checked_div(u4::new(5)), Some(u4::new(1))); +} + +#[test] +fn checked_shl() { + assert_eq!( + u7::new(0b010_1101).checked_shl(0), + Some(u7::new(0b010_1101)) + ); + assert_eq!( + u7::new(0b010_1101).checked_shl(1), + Some(u7::new(0b101_1010)) + ); + assert_eq!( + u7::new(0b010_1101).checked_shl(6), + Some(u7::new(0b100_0000)) + ); + assert_eq!(u7::new(0b010_1101).checked_shl(7), None); + assert_eq!(u7::new(0b010_1101).checked_shl(8), None); + assert_eq!(u7::new(0b010_1101).checked_shl(14), None); + assert_eq!(u7::new(0b010_1101).checked_shl(15), None); +} + +#[test] +fn checked_shr() { + assert_eq!( + u7::new(0b010_1101).checked_shr(0), + Some(u7::new(0b010_1101)) + ); + assert_eq!( + u7::new(0b010_1101).checked_shr(1), + Some(u7::new(0b001_0110)) + ); + assert_eq!( + u7::new(0b010_1101).checked_shr(5), + Some(u7::new(0b000_0001)) + ); + assert_eq!(u7::new(0b010_1101).checked_shr(7), None); + assert_eq!(u7::new(0b010_1101).checked_shr(8), None); + assert_eq!(u7::new(0b010_1101).checked_shr(14), None); + assert_eq!(u7::new(0b010_1101).checked_shr(15), None); +} + +#[test] +fn overflowing_add() { + assert_eq!( + u7::new(120).overflowing_add(u7::new(1)), + (u7::new(121), false) + ); + assert_eq!( + u7::new(120).overflowing_add(u7::new(7)), + (u7::new(127), false) + ); + assert_eq!( + u7::new(120).overflowing_add(u7::new(10)), + (u7::new(2), true) + ); + assert_eq!( + u7::new(127).overflowing_add(u7::new(127)), + (u7::new(126), true) + ); + assert_eq!( + UInt::::new(250).overflowing_add(UInt::::new(5)), + (UInt::::new(255), false) + ); + assert_eq!( + UInt::::new(250).overflowing_add(UInt::::new(10)), + (UInt::::new(4), true) + ); +} + +#[test] +fn overflowing_sub() { + assert_eq!( + u7::new(120).overflowing_sub(u7::new(30)), + (u7::new(90), false) + ); + assert_eq!( + u7::new(120).overflowing_sub(u7::new(119)), + (u7::new(1), false) + ); + assert_eq!( + u7::new(120).overflowing_sub(u7::new(120)), + (u7::new(0), false) + ); + assert_eq!( + u7::new(120).overflowing_sub(u7::new(121)), + (u7::new(127), true) + ); + assert_eq!(u7::new(0).overflowing_sub(u7::new(127)), (u7::new(1), true)); +} + +#[test] +fn overflowing_mul() { + // Fast-path: Only the arbitrary int is bounds checked + assert_eq!(u4::new(5).overflowing_mul(u4::new(2)), (u4::new(10), false)); + assert_eq!(u4::new(5).overflowing_mul(u4::new(3)), (u4::new(15), false)); + assert_eq!(u4::new(5).overflowing_mul(u4::new(4)), (u4::new(4), true)); + assert_eq!(u4::new(5).overflowing_mul(u4::new(5)), (u4::new(9), true)); + assert_eq!(u4::new(5).overflowing_mul(u4::new(6)), (u4::new(14), true)); + assert_eq!(u4::new(5).overflowing_mul(u4::new(7)), (u4::new(3), true)); + + // Slow-path (well, one more comparison) + assert_eq!(u5::new(5).overflowing_mul(u5::new(2)), (u5::new(10), false)); + assert_eq!(u5::new(5).overflowing_mul(u5::new(3)), (u5::new(15), false)); + assert_eq!(u5::new(5).overflowing_mul(u5::new(4)), (u5::new(20), false)); + assert_eq!(u5::new(5).overflowing_mul(u5::new(5)), (u5::new(25), false)); + assert_eq!(u5::new(5).overflowing_mul(u5::new(6)), (u5::new(30), false)); + assert_eq!(u5::new(5).overflowing_mul(u5::new(7)), (u5::new(3), true)); + assert_eq!( + u5::new(30).overflowing_mul(u5::new(1)), + (u5::new(30), false) + ); + assert_eq!(u5::new(30).overflowing_mul(u5::new(2)), (u5::new(28), true)); + assert_eq!( + u5::new(30).overflowing_mul(u5::new(10)), + (u5::new(12), true) + ); +} + +#[test] +fn overflowing_div() { + assert_eq!(u4::new(5).overflowing_div(u4::new(1)), (u4::new(5), false)); + assert_eq!(u4::new(5).overflowing_div(u4::new(2)), (u4::new(2), false)); + assert_eq!(u4::new(5).overflowing_div(u4::new(3)), (u4::new(1), false)); + assert_eq!(u4::new(5).overflowing_div(u4::new(4)), (u4::new(1), false)); + assert_eq!(u4::new(5).overflowing_div(u4::new(5)), (u4::new(1), false)); +} + +#[should_panic] +#[test] +fn overflowing_div_by_zero() { + let _ = u4::new(5).overflowing_div(u4::new(0)); +} + +#[test] +fn overflowing_shl() { + assert_eq!( + u7::new(0b010_1101).overflowing_shl(0), + (u7::new(0b010_1101), false) + ); + assert_eq!( + u7::new(0b010_1101).overflowing_shl(1), + (u7::new(0b101_1010), false) + ); + assert_eq!( + u7::new(0b010_1101).overflowing_shl(6), + (u7::new(0b100_0000), false) + ); + assert_eq!( + u7::new(0b010_1101).overflowing_shl(7), + (u7::new(0b010_1101), true) + ); + assert_eq!( + u7::new(0b010_1101).overflowing_shl(8), + (u7::new(0b101_1010), true) + ); + assert_eq!( + u7::new(0b010_1101).overflowing_shl(14), + (u7::new(0b010_1101), true) + ); + assert_eq!( + u7::new(0b010_1101).overflowing_shl(15), + (u7::new(0b101_1010), true) + ); +} + +#[test] +fn overflowing_shr() { + assert_eq!( + u7::new(0b010_1101).overflowing_shr(0), + (u7::new(0b010_1101), false) + ); + assert_eq!( + u7::new(0b010_1101).overflowing_shr(1), + (u7::new(0b001_0110), false) + ); + assert_eq!( + u7::new(0b010_1101).overflowing_shr(5), + (u7::new(0b000_0001), false) + ); + assert_eq!( + u7::new(0b010_1101).overflowing_shr(7), + (u7::new(0b010_1101), true) + ); + assert_eq!( + u7::new(0b010_1101).overflowing_shr(8), + (u7::new(0b001_0110), true) + ); + assert_eq!( + u7::new(0b010_1101).overflowing_shr(14), + (u7::new(0b010_1101), true) + ); + assert_eq!( + u7::new(0b010_1101).overflowing_shr(15), + (u7::new(0b001_0110), true) + ); +} + #[test] fn reverse_bits() { const A: u5 = u5::new(0b11101);