diff --git a/lexical-benchmark/algorithm/bigint.rs b/lexical-benchmark/algorithm/bigint.rs index 9d209449..31394f95 100644 --- a/lexical-benchmark/algorithm/bigint.rs +++ b/lexical-benchmark/algorithm/bigint.rs @@ -15,7 +15,7 @@ fn standard_pow(big: &mut bigint::Bigint, exp: u32) { fn small_pow(big: &mut bigint::Bigint, mut exp: u32) { let shift = exp as usize; // Mul pow5 - let small_step = if bigint::LIMB_BITS == 32 { + let small_step = if bigint::Limb::BITS == 32 { u32_power_limit(5) } else { u64_power_limit(5) @@ -189,7 +189,7 @@ fn karatsuba_mul_algo(big: &mut bigint::Bigint, y: &[bigint::Limb]) { #[inline(always)] fn new_limb(rng: &mut Rng) -> bigint::Limb { - if bigint::LIMB_BITS == 32 { + if bigint::Limb::BITS == 32 { rng.u32(..) as bigint::Limb } else { rng.u64(..) as bigint::Limb diff --git a/lexical-parse-float/src/bigint.rs b/lexical-parse-float/src/bigint.rs index 0070ae4f..114a2fa7 100644 --- a/lexical-parse-float/src/bigint.rs +++ b/lexical-parse-float/src/bigint.rs @@ -41,7 +41,7 @@ const BIGINT_BITS: usize = 6000; const BIGINT_BITS: usize = 4000; /// The number of limbs for the bigint. -const BIGINT_LIMBS: usize = BIGINT_BITS / LIMB_BITS; +const BIGINT_LIMBS: usize = BIGINT_BITS / Limb::BITS as usize; /// Storage for a big integer type. /// @@ -144,7 +144,7 @@ const BIGFLOAT_BITS: usize = 1200; /// The number of limbs for the Bigfloat. #[cfg(feature = "radix")] -const BIGFLOAT_LIMBS: usize = BIGFLOAT_BITS / LIMB_BITS; +const BIGFLOAT_LIMBS: usize = BIGFLOAT_BITS / Limb::BITS as usize; /// Storage for a big floating-point type. /// @@ -247,7 +247,7 @@ impl Bigfloat { impl ops::MulAssign<&Bigfloat> for Bigfloat { #[inline(always)] #[allow(clippy::suspicious_op_assign_impl)] // reason="intended increment" - #[allow(clippy::unwrap_used)] // reason="exceeding the bounds is a developper error" + #[allow(clippy::unwrap_used)] // reason="exceeding the bounds is a developer error" fn mul_assign(&mut self, rhs: &Bigfloat) { large_mul(&mut self.data, &rhs.data).unwrap(); self.exp += rhs.exp; @@ -544,9 +544,9 @@ impl StackVec { unsafe { match rview.len() { 0 => (0, false), - 1 if LIMB_BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi16_1), + 1 if Limb::BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi16_1), 1 => hi!(@1 self, rview, u64, u64_to_hi16_1), - _ if LIMB_BITS == 32 => hi!(@nonzero2 self, rview, u32, u32_to_hi16_2), + _ if Limb::BITS == 32 => hi!(@nonzero2 self, rview, u32, u32_to_hi16_2), _ => hi!(@nonzero2 self, rview, u64, u64_to_hi16_2), } } @@ -561,9 +561,9 @@ impl StackVec { unsafe { match rview.len() { 0 => (0, false), - 1 if LIMB_BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi32_1), + 1 if Limb::BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi32_1), 1 => hi!(@1 self, rview, u64, u64_to_hi32_1), - _ if LIMB_BITS == 32 => hi!(@nonzero2 self, rview, u32, u32_to_hi32_2), + _ if Limb::BITS == 32 => hi!(@nonzero2 self, rview, u32, u32_to_hi32_2), _ => hi!(@nonzero2 self, rview, u64, u64_to_hi32_2), } } @@ -578,11 +578,11 @@ impl StackVec { unsafe { match rview.len() { 0 => (0, false), - 1 if LIMB_BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi64_1), + 1 if Limb::BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi64_1), 1 => hi!(@1 self, rview, u64, u64_to_hi64_1), - 2 if LIMB_BITS == 32 => hi!(@2 self, rview, u32, u32_to_hi64_2), + 2 if Limb::BITS == 32 => hi!(@2 self, rview, u32, u32_to_hi64_2), 2 => hi!(@2 self, rview, u64, u64_to_hi64_2), - _ if LIMB_BITS == 32 => hi!(@nonzero3 self, rview, u32, u32_to_hi64_3), + _ if Limb::BITS == 32 => hi!(@nonzero3 self, rview, u32, u32_to_hi64_3), _ => hi!(@nonzero2 self, rview, u64, u64_to_hi64_2), } } @@ -620,7 +620,7 @@ impl StackVec { let mut vec = Self::new(); debug_assert!(2 <= vec.capacity(), "cannot exceed our array bounds"); assert!(2 <= SIZE, "cannot exceed our array bounds"); - if LIMB_BITS == 32 { + if Limb::BITS == 32 { _ = vec.try_push(x as Limb); _ = vec.try_push((x >> 32) as Limb); } else { @@ -700,8 +700,7 @@ impl PartialEq for StackVec { } } -impl Eq for StackVec { -} +impl Eq for StackVec {} impl cmp::PartialOrd for StackVec { #[inline(always)] @@ -746,7 +745,7 @@ impl ops::DerefMut for StackVec { impl ops::MulAssign<&[Limb]> for StackVec { #[inline(always)] - #[allow(clippy::unwrap_used)] // reason="exceeding the bounds is a developper error" + #[allow(clippy::unwrap_used)] // reason="exceeding the bounds is a developer error" fn mul_assign(&mut self, rhs: &[Limb]) { large_mul(self, rhs).unwrap(); } @@ -1016,7 +1015,7 @@ pub fn pow(x: &mut StackVec, base: u32, mut exp: u32) - } // Now use our pre-computed small powers iteratively. - let small_step = if LIMB_BITS == 32 { + let small_step = if Limb::BITS == 32 { u32_power_limit(base) } else { u64_power_limit(base) @@ -1055,7 +1054,7 @@ pub const fn scalar_mul(x: Limb, y: Limb, carry: Limb) -> (Limb, Limb) { // the following is always true: // `Wide::MAX - (Narrow::MAX * Narrow::MAX) >= Narrow::MAX` let z: Wide = (x as Wide) * (y as Wide) + (carry as Wide); - (z as Limb, (z >> LIMB_BITS) as Limb) + (z as Limb, (z >> Limb::BITS) as Limb) } // SMALL @@ -1301,10 +1300,10 @@ pub fn large_quorem(x: &mut StackVec, y: &[Limb]) -> Li for j in 0..x.len() { let yj = y[j] as Wide; let p = yj * q as Wide + carry; - carry = p >> LIMB_BITS; + carry = p >> Limb::BITS; let xj = x[j] as Wide; let t = xj.wrapping_sub(p & mask).wrapping_sub(borrow); - borrow = (t >> LIMB_BITS) & 1; + borrow = (t >> Limb::BITS) & 1; x[j] = t as Limb; } x.normalize(); @@ -1318,10 +1317,10 @@ pub fn large_quorem(x: &mut StackVec, y: &[Limb]) -> Li for j in 0..x.len() { let yj = y[j] as Wide; let p = yj + carry; - carry = p >> LIMB_BITS; + carry = p >> Limb::BITS; let xj = x[j] as Wide; let t = xj.wrapping_sub(p & mask).wrapping_sub(borrow); - borrow = (t >> LIMB_BITS) & 1; + borrow = (t >> Limb::BITS) & 1; x[j] = t as Limb; } x.normalize(); @@ -1366,8 +1365,8 @@ pub fn shl_bits(x: &mut StackVec, n: usize) -> Option<( // For example, we transform (for u8) shifted left 2, to: // b10100100 b01000010 // b10 b10010001 b00001000 - debug_assert!(n < LIMB_BITS, "cannot shift left more bits than in our limb"); - let rshift = LIMB_BITS - n; + debug_assert!(n < Limb::BITS as usize, "cannot shift left more bits than in our limb"); + let rshift = Limb::BITS as usize - n; let lshift = n; let mut prev: Limb = 0; for xi in x.iter_mut() { @@ -1416,8 +1415,8 @@ pub fn shl_limbs(x: &mut StackVec, n: usize) -> Option< #[must_use] #[inline(always)] pub fn shl(x: &mut StackVec, n: usize) -> Option<()> { - let rem = n % LIMB_BITS; - let div = n / LIMB_BITS; + let rem = n % Limb::BITS as usize; + let div = n / Limb::BITS as usize; if rem != 0 { shl_bits(x, rem)?; } @@ -1445,7 +1444,7 @@ pub fn leading_zeros(x: &[Limb]) -> u32 { #[inline(always)] pub fn bit_length(x: &[Limb]) -> u32 { let nlz = leading_zeros(x); - LIMB_BITS as u32 * x.len() as u32 - nlz + Limb::BITS as u32 * x.len() as u32 - nlz } // RADIX @@ -1612,8 +1611,6 @@ pub type Limb = u64; pub type Wide = u128; #[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))] pub type SignedWide = i128; -#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))] -pub const LIMB_BITS: usize = 64; #[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))] pub type Limb = u32; @@ -1621,5 +1618,3 @@ pub type Limb = u32; pub type Wide = u64; #[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))] pub type SignedWide = i64; -#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))] -pub const LIMB_BITS: usize = 32; diff --git a/lexical-parse-float/src/number.rs b/lexical-parse-float/src/number.rs index d901feef..4e783e73 100644 --- a/lexical-parse-float/src/number.rs +++ b/lexical-parse-float/src/number.rs @@ -48,7 +48,7 @@ impl Number<'_> { && !self.many_digits } - /// The fast path algorithmn using machine-sized integers and floats. + /// The fast path algorithm using machine-sized integers and floats. /// /// This is extracted into a separate function so that it can be attempted /// before constructing a Decimal. This only works if both the mantissa @@ -60,7 +60,7 @@ impl Number<'_> { // `set_precision` doesn't return a unit value on x87 FPUs. #[must_use] #[allow(clippy::missing_inline_in_public_items)] // reason = "only public for testing" - #[allow(clippy::let_unit_value)] // reason = "untentional ASM drop for X87 FPUs" + #[allow(clippy::let_unit_value)] // reason = "intentional ASM drop for X87 FPUs" pub fn try_fast_path(&self) -> Option { let format = NumberFormat:: {}; debug_assert!( @@ -110,7 +110,7 @@ impl Number<'_> { // `set_precision` doesn't return a unit value on x87 FPUs. #[must_use] #[allow(clippy::missing_inline_in_public_items)] // reason = "only public for testing" - #[allow(clippy::let_unit_value)] // reason = "untentional ASM drop for X87 FPUs" + #[allow(clippy::let_unit_value)] // reason = "intentional ASM drop for X87 FPUs" pub fn force_fast_path(&self) -> F { let format = NumberFormat:: {}; debug_assert!( diff --git a/lexical-parse-float/src/slow.rs b/lexical-parse-float/src/slow.rs index db57e209..30e4d522 100644 --- a/lexical-parse-float/src/slow.rs +++ b/lexical-parse-float/src/slow.rs @@ -19,7 +19,7 @@ use lexical_util::num::{AsPrimitive, Integer}; #[cfg(feature = "radix")] use crate::bigint::Bigfloat; -use crate::bigint::{Bigint, Limb, LIMB_BITS}; +use crate::bigint::{Bigint, Limb}; use crate::float::{extended_to_float, ExtendedFloat80, RawFloat}; use crate::limits::{u32_power_limit, u64_power_limit}; use crate::number::Number; @@ -413,7 +413,7 @@ pub fn parse_mantissa(num: Number, max_digits: usize) -> (Bi let mut result = Bigint::new(); // Now use our pre-computed small powers iteratively. - let step = if LIMB_BITS == 32 { + let step = if Limb::BITS == 32 { u32_power_limit(format.radix()) } else { u64_power_limit(format.radix()) @@ -645,11 +645,11 @@ pub fn byte_comp( num.shl(shift).unwrap(); num.exp -= shift as i32; } else if diff > 0 { - // Need to shift denominator left, go by a power of LIMB_BITS. + // Need to shift denominator left, go by a power of Limb::BITS. // After this, the numerator will be non-normalized, and the // denominator will be normalized. We need to add one to the // quotient,since we're calculating the ceiling of the divmod. - let (q, r) = shift.ceil_divmod(LIMB_BITS); + let (q, r) = shift.ceil_divmod(Limb::BITS as usize); let r = -r; if r != 0 { num.shl_bits(r as usize).unwrap(); @@ -657,7 +657,7 @@ pub fn byte_comp( } if q != 0 { den.shl_limbs(q).unwrap(); - den.exp -= LIMB_BITS as i32 * q as i32; + den.exp -= Limb::BITS as i32 * q as i32; } } diff --git a/lexical-parse-float/tests/bigfloat_tests.rs b/lexical-parse-float/tests/bigfloat_tests.rs index 02f20ee9..91b48aa2 100644 --- a/lexical-parse-float/tests/bigfloat_tests.rs +++ b/lexical-parse-float/tests/bigfloat_tests.rs @@ -2,7 +2,7 @@ mod stackvec; -use lexical_parse_float::bigint::{Bigfloat, LIMB_BITS}; +use lexical_parse_float::bigint::{Bigfloat, Limb}; use lexical_parse_float::float::ExtendedFloat80; use stackvec::vec_from_u32; @@ -35,11 +35,11 @@ fn simple_test() { assert_eq!(&*x.data, &[0, 19531250]); assert_eq!(x.exp, 10); - assert_eq!(x.leading_zeros(), LIMB_BITS as u32 - 25); + assert_eq!(x.leading_zeros(), Limb::BITS - 25); // y has a 0 for 32-bit limbs, no 0s for 64-bit limbs. x *= &y; - let expected = if LIMB_BITS == 32 { + let expected = if Limb::BITS == 32 { vec_from_u32(&[0, 0, 0, 9765625]) } else { vec_from_u32(&[0, 0, 0, 0, 9765625]) @@ -52,12 +52,12 @@ fn simple_test() { fn leading_zeros_test() { assert_eq!(Bigfloat::new().leading_zeros(), 0); - assert_eq!(Bigfloat::from_u32(0xFF).leading_zeros(), LIMB_BITS as u32 - 8); + assert_eq!(Bigfloat::from_u32(0xFF).leading_zeros(), Limb::BITS - 8); assert_eq!(Bigfloat::from_u64(0xFF00000000).leading_zeros(), 24); - assert_eq!(Bigfloat::from_u32(0xF).leading_zeros(), LIMB_BITS as u32 - 4); + assert_eq!(Bigfloat::from_u32(0xF).leading_zeros(), Limb::BITS - 4); assert_eq!(Bigfloat::from_u64(0xF00000000).leading_zeros(), 28); - assert_eq!(Bigfloat::from_u32(0xF0).leading_zeros(), LIMB_BITS as u32 - 8); + assert_eq!(Bigfloat::from_u32(0xF0).leading_zeros(), Limb::BITS - 8); assert_eq!(Bigfloat::from_u64(0xF000000000).leading_zeros(), 24); } diff --git a/lexical-parse-float/tests/stackvec_tests.rs b/lexical-parse-float/tests/stackvec_tests.rs index 40fff904..2460a785 100644 --- a/lexical-parse-float/tests/stackvec_tests.rs +++ b/lexical-parse-float/tests/stackvec_tests.rs @@ -2,7 +2,7 @@ mod stackvec; use core::cmp; -use lexical_parse_float::bigint::{self, Limb, StackVec, LIMB_BITS}; +use lexical_parse_float::bigint::{self, Limb, StackVec}; use stackvec::vec_from_u32; const SIZE: usize = 50; @@ -34,7 +34,7 @@ fn simple_test() { assert_eq!(x.len(), 2); assert_eq!(x.is_empty(), false); assert_eq!(x.hi16(), (0x8000, true)); - if LIMB_BITS == 32 { + if Limb::BITS == 32 { assert_eq!(x.hi32(), (0x80000002, true)); assert_eq!(x.hi64(), (0x8000000280000000, false)); } else { @@ -128,7 +128,7 @@ fn math_test() { x.mul_small(3); assert_eq!(&*x, &[0, 6, 27]); x.mul_small(Limb::MAX); - let expected: VecType = if LIMB_BITS == 32 { + let expected: VecType = if Limb::BITS == 32 { vec_from_u32(&[0, 4294967290, 4294967274, 26]) } else { vec_from_u32(&[0, 0, 4294967290, 4294967295, 4294967274, 4294967295, 26]) @@ -419,7 +419,7 @@ fn shl_bits_test() { fn shl_limbs_test() { let mut x = VecType::from_u32(0xD2210408); bigint::shl_limbs(&mut x, 2); - let expected: VecType = if LIMB_BITS == 32 { + let expected: VecType = if Limb::BITS == 32 { vec_from_u32(&[0, 0, 0xD2210408]) } else { vec_from_u32(&[0, 0, 0, 0, 0xD2210408]) diff --git a/lexical-util/src/num.rs b/lexical-util/src/num.rs index ca310d13..09d4d17c 100644 --- a/lexical-util/src/num.rs +++ b/lexical-util/src/num.rs @@ -471,10 +471,10 @@ pub trait Integer: // this is heavily optimized for base10 and it's a way under estimate // that said, it's fast and works. if radix <= 16 { - core::mem::size_of::() * 2 - Self::IS_SIGNED as usize + mem::size_of::() * 2 - Self::IS_SIGNED as usize } else { // way under approximation but always works and is fast - core::mem::size_of::() + mem::size_of::() } } } @@ -487,8 +487,7 @@ macro_rules! integer_impl { const TWO: $t = 2; const MAX: $t = $t::MAX; const MIN: $t = $t::MIN; - // DEPRECATE: when we drop support for <= 1.53.0, change to `<$t>::BITS` - const BITS: usize = mem::size_of::<$t>() * 8; + const BITS: usize = $t::BITS as usize; #[inline(always)] fn leading_zeros(self) -> u32 {