Skip to content

Commit

Permalink
Use modern BITS constant
Browse files Browse the repository at this point in the history
  • Loading branch information
raldone01 committed Nov 28, 2024
1 parent 7d6a081 commit dab0e28
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 33 deletions.
45 changes: 21 additions & 24 deletions lexical-parse-float/src/bigint.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ const BIGINT_BITS: usize = 6000;
const BIGINT_BITS: usize = 4000;

/// The number of limbs for the bigint.
const BIGINT_LIMBS: usize = BIGINT_BITS / LIMB_BITS;
const BIGINT_LIMBS: usize = BIGINT_BITS / Limb::BITS as usize;

/// Storage for a big integer type.
///
Expand Down Expand Up @@ -144,7 +144,7 @@ const BIGFLOAT_BITS: usize = 1200;

/// The number of limbs for the Bigfloat.
#[cfg(feature = "radix")]
const BIGFLOAT_LIMBS: usize = BIGFLOAT_BITS / LIMB_BITS;
const BIGFLOAT_LIMBS: usize = BIGFLOAT_BITS / Limb::BITS as usize;

/// Storage for a big floating-point type.
///
Expand Down Expand Up @@ -544,9 +544,9 @@ impl<const SIZE: usize> StackVec<SIZE> {
unsafe {
match rview.len() {
0 => (0, false),
1 if LIMB_BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi16_1),
1 if Limb::BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi16_1),
1 => hi!(@1 self, rview, u64, u64_to_hi16_1),
_ if LIMB_BITS == 32 => hi!(@nonzero2 self, rview, u32, u32_to_hi16_2),
_ if Limb::BITS == 32 => hi!(@nonzero2 self, rview, u32, u32_to_hi16_2),
_ => hi!(@nonzero2 self, rview, u64, u64_to_hi16_2),
}
}
Expand All @@ -561,9 +561,9 @@ impl<const SIZE: usize> StackVec<SIZE> {
unsafe {
match rview.len() {
0 => (0, false),
1 if LIMB_BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi32_1),
1 if Limb::BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi32_1),
1 => hi!(@1 self, rview, u64, u64_to_hi32_1),
_ if LIMB_BITS == 32 => hi!(@nonzero2 self, rview, u32, u32_to_hi32_2),
_ if Limb::BITS == 32 => hi!(@nonzero2 self, rview, u32, u32_to_hi32_2),
_ => hi!(@nonzero2 self, rview, u64, u64_to_hi32_2),
}
}
Expand All @@ -578,11 +578,11 @@ impl<const SIZE: usize> StackVec<SIZE> {
unsafe {
match rview.len() {
0 => (0, false),
1 if LIMB_BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi64_1),
1 if Limb::BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi64_1),
1 => hi!(@1 self, rview, u64, u64_to_hi64_1),
2 if LIMB_BITS == 32 => hi!(@2 self, rview, u32, u32_to_hi64_2),
2 if Limb::BITS == 32 => hi!(@2 self, rview, u32, u32_to_hi64_2),
2 => hi!(@2 self, rview, u64, u64_to_hi64_2),
_ if LIMB_BITS == 32 => hi!(@nonzero3 self, rview, u32, u32_to_hi64_3),
_ if Limb::BITS == 32 => hi!(@nonzero3 self, rview, u32, u32_to_hi64_3),
_ => hi!(@nonzero2 self, rview, u64, u64_to_hi64_2),
}
}
Expand Down Expand Up @@ -620,7 +620,7 @@ impl<const SIZE: usize> StackVec<SIZE> {
let mut vec = Self::new();
debug_assert!(2 <= vec.capacity(), "cannot exceed our array bounds");
assert!(2 <= SIZE, "cannot exceed our array bounds");
if LIMB_BITS == 32 {
if Limb::BITS == 32 {
_ = vec.try_push(x as Limb);
_ = vec.try_push((x >> 32) as Limb);
} else {
Expand Down Expand Up @@ -1015,7 +1015,7 @@ pub fn pow<const SIZE: usize>(x: &mut StackVec<SIZE>, base: u32, mut exp: u32) -
}

// Now use our pre-computed small powers iteratively.
let small_step = if LIMB_BITS == 32 {
let small_step = if Limb::BITS == 32 {
u32_power_limit(base)
} else {
u64_power_limit(base)
Expand Down Expand Up @@ -1054,7 +1054,7 @@ pub const fn scalar_mul(x: Limb, y: Limb, carry: Limb) -> (Limb, Limb) {
// the following is always true:
// `Wide::MAX - (Narrow::MAX * Narrow::MAX) >= Narrow::MAX`
let z: Wide = (x as Wide) * (y as Wide) + (carry as Wide);
(z as Limb, (z >> LIMB_BITS) as Limb)
(z as Limb, (z >> Limb::BITS) as Limb)
}

// SMALL
Expand Down Expand Up @@ -1300,10 +1300,10 @@ pub fn large_quorem<const SIZE: usize>(x: &mut StackVec<SIZE>, y: &[Limb]) -> Li
for j in 0..x.len() {
let yj = y[j] as Wide;
let p = yj * q as Wide + carry;
carry = p >> LIMB_BITS;
carry = p >> Limb::BITS;
let xj = x[j] as Wide;
let t = xj.wrapping_sub(p & mask).wrapping_sub(borrow);
borrow = (t >> LIMB_BITS) & 1;
borrow = (t >> Limb::BITS) & 1;
x[j] = t as Limb;
}
x.normalize();
Expand All @@ -1317,10 +1317,10 @@ pub fn large_quorem<const SIZE: usize>(x: &mut StackVec<SIZE>, y: &[Limb]) -> Li
for j in 0..x.len() {
let yj = y[j] as Wide;
let p = yj + carry;
carry = p >> LIMB_BITS;
carry = p >> Limb::BITS;
let xj = x[j] as Wide;
let t = xj.wrapping_sub(p & mask).wrapping_sub(borrow);
borrow = (t >> LIMB_BITS) & 1;
borrow = (t >> Limb::BITS) & 1;
x[j] = t as Limb;
}
x.normalize();
Expand Down Expand Up @@ -1365,8 +1365,8 @@ pub fn shl_bits<const SIZE: usize>(x: &mut StackVec<SIZE>, n: usize) -> Option<(
// For example, we transform (for u8) shifted left 2, to:
// b10100100 b01000010
// b10 b10010001 b00001000
debug_assert!(n < LIMB_BITS, "cannot shift left more bits than in our limb");
let rshift = LIMB_BITS - n;
debug_assert!(n < Limb::BITS as usize, "cannot shift left more bits than in our limb");
let rshift = Limb::BITS as usize - n;
let lshift = n;
let mut prev: Limb = 0;
for xi in x.iter_mut() {
Expand Down Expand Up @@ -1415,8 +1415,8 @@ pub fn shl_limbs<const SIZE: usize>(x: &mut StackVec<SIZE>, n: usize) -> Option<
#[must_use]
#[inline(always)]
pub fn shl<const SIZE: usize>(x: &mut StackVec<SIZE>, n: usize) -> Option<()> {
let rem = n % LIMB_BITS;
let div = n / LIMB_BITS;
let rem = n % Limb::BITS as usize;
let div = n / Limb::BITS as usize;
if rem != 0 {
shl_bits(x, rem)?;
}
Expand Down Expand Up @@ -1444,7 +1444,7 @@ pub fn leading_zeros(x: &[Limb]) -> u32 {
#[inline(always)]
pub fn bit_length(x: &[Limb]) -> u32 {
let nlz = leading_zeros(x);
LIMB_BITS as u32 * x.len() as u32 - nlz
Limb::BITS as u32 * x.len() as u32 - nlz
}

// RADIX
Expand Down Expand Up @@ -1618,6 +1618,3 @@ pub type Limb = u32;
pub type Wide = u64;
#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))]
pub type SignedWide = i64;

// DEPRECATE: when we drop support for <= 1.53.0, this can just be removed and `Limb::BITS` used in the places where `LIMB_BITS` is used.
pub const LIMB_BITS: usize = mem::size_of::<Limb>() * 8;
10 changes: 5 additions & 5 deletions lexical-parse-float/src/slow.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ use lexical_util::num::{AsPrimitive, Integer};

#[cfg(feature = "radix")]
use crate::bigint::Bigfloat;
use crate::bigint::{Bigint, Limb, LIMB_BITS};
use crate::bigint::{Bigint, Limb};
use crate::float::{extended_to_float, ExtendedFloat80, RawFloat};
use crate::limits::{u32_power_limit, u64_power_limit};
use crate::number::Number;
Expand Down Expand Up @@ -413,7 +413,7 @@ pub fn parse_mantissa<const FORMAT: u128>(num: Number, max_digits: usize) -> (Bi
let mut result = Bigint::new();

// Now use our pre-computed small powers iteratively.
let step = if LIMB_BITS == 32 {
let step = if Limb::BITS == 32 {
u32_power_limit(format.radix())
} else {
u64_power_limit(format.radix())
Expand Down Expand Up @@ -645,19 +645,19 @@ pub fn byte_comp<F: RawFloat, const FORMAT: u128>(
num.shl(shift).unwrap();
num.exp -= shift as i32;
} else if diff > 0 {
// Need to shift denominator left, go by a power of LIMB_BITS.
// Need to shift denominator left, go by a power of Limb::BITS.
// After this, the numerator will be non-normalized, and the
// denominator will be normalized. We need to add one to the
// quotient,since we're calculating the ceiling of the divmod.
let (q, r) = shift.ceil_divmod(LIMB_BITS);
let (q, r) = shift.ceil_divmod(Limb::BITS);
let r = -r;
if r != 0 {
num.shl_bits(r as usize).unwrap();
num.exp -= r;
}
if q != 0 {
den.shl_limbs(q).unwrap();
den.exp -= LIMB_BITS as i32 * q as i32;
den.exp -= Limb::BITS as i32 * q as i32;
}
}

Expand Down
7 changes: 3 additions & 4 deletions lexical-util/src/num.rs
Original file line number Diff line number Diff line change
Expand Up @@ -471,10 +471,10 @@ pub trait Integer:
// this is heavily optimized for base10 and it's a way under estimate
// that said, it's fast and works.
if radix <= 16 {
core::mem::size_of::<Self>() * 2 - Self::IS_SIGNED as usize
mem::size_of::<Self>() * 2 - Self::IS_SIGNED as usize
} else {
// way under approximation but always works and is fast
core::mem::size_of::<Self>()
mem::size_of::<Self>()
}
}
}
Expand All @@ -487,8 +487,7 @@ macro_rules! integer_impl {
const TWO: $t = 2;
const MAX: $t = $t::MAX;
const MIN: $t = $t::MIN;
// DEPRECATE: when we drop support for <= 1.53.0, change to `<$t>::BITS`
const BITS: usize = mem::size_of::<$t>() * 8;
const BITS: usize = $t::BITS as usize;

#[inline(always)]
fn leading_zeros(self) -> u32 {
Expand Down

0 comments on commit dab0e28

Please sign in to comment.