Skip to content

Commit

Permalink
Replace LIMB_BITS with the modern BITS constant.
Browse files Browse the repository at this point in the history
  • Loading branch information
raldone01 committed Dec 2, 2024
1 parent 07eb3a3 commit 5430dba
Show file tree
Hide file tree
Showing 7 changed files with 47 additions and 53 deletions.
4 changes: 2 additions & 2 deletions lexical-benchmark/algorithm/bigint.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ fn standard_pow(big: &mut bigint::Bigint, exp: u32) {
fn small_pow(big: &mut bigint::Bigint, mut exp: u32) {
let shift = exp as usize;
// Mul pow5
let small_step = if bigint::LIMB_BITS == 32 {
let small_step = if bigint::Limb::BITS == 32 {
u32_power_limit(5)
} else {
u64_power_limit(5)
Expand Down Expand Up @@ -189,7 +189,7 @@ fn karatsuba_mul_algo(big: &mut bigint::Bigint, y: &[bigint::Limb]) {

#[inline(always)]
fn new_limb(rng: &mut Rng) -> bigint::Limb {
if bigint::LIMB_BITS == 32 {
if bigint::Limb::BITS == 32 {
rng.u32(..) as bigint::Limb
} else {
rng.u64(..) as bigint::Limb
Expand Down
53 changes: 24 additions & 29 deletions lexical-parse-float/src/bigint.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ const BIGINT_BITS: usize = 6000;
const BIGINT_BITS: usize = 4000;

/// The number of limbs for the bigint.
const BIGINT_LIMBS: usize = BIGINT_BITS / LIMB_BITS;
const BIGINT_LIMBS: usize = BIGINT_BITS / Limb::BITS as usize;

/// Storage for a big integer type.
///
Expand Down Expand Up @@ -144,7 +144,7 @@ const BIGFLOAT_BITS: usize = 1200;

/// The number of limbs for the Bigfloat.
#[cfg(feature = "radix")]
const BIGFLOAT_LIMBS: usize = BIGFLOAT_BITS / LIMB_BITS;
const BIGFLOAT_LIMBS: usize = BIGFLOAT_BITS / Limb::BITS as usize;

/// Storage for a big floating-point type.
///
Expand Down Expand Up @@ -247,7 +247,7 @@ impl Bigfloat {
impl ops::MulAssign<&Bigfloat> for Bigfloat {
#[inline(always)]
#[allow(clippy::suspicious_op_assign_impl)] // reason="intended increment"
#[allow(clippy::unwrap_used)] // reason="exceeding the bounds is a developper error"
#[allow(clippy::unwrap_used)] // reason="exceeding the bounds is a developer error"
fn mul_assign(&mut self, rhs: &Bigfloat) {
large_mul(&mut self.data, &rhs.data).unwrap();
self.exp += rhs.exp;
Expand Down Expand Up @@ -544,9 +544,9 @@ impl<const SIZE: usize> StackVec<SIZE> {
unsafe {
match rview.len() {
0 => (0, false),
1 if LIMB_BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi16_1),
1 if Limb::BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi16_1),
1 => hi!(@1 self, rview, u64, u64_to_hi16_1),
_ if LIMB_BITS == 32 => hi!(@nonzero2 self, rview, u32, u32_to_hi16_2),
_ if Limb::BITS == 32 => hi!(@nonzero2 self, rview, u32, u32_to_hi16_2),
_ => hi!(@nonzero2 self, rview, u64, u64_to_hi16_2),
}
}
Expand All @@ -561,9 +561,9 @@ impl<const SIZE: usize> StackVec<SIZE> {
unsafe {
match rview.len() {
0 => (0, false),
1 if LIMB_BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi32_1),
1 if Limb::BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi32_1),
1 => hi!(@1 self, rview, u64, u64_to_hi32_1),
_ if LIMB_BITS == 32 => hi!(@nonzero2 self, rview, u32, u32_to_hi32_2),
_ if Limb::BITS == 32 => hi!(@nonzero2 self, rview, u32, u32_to_hi32_2),
_ => hi!(@nonzero2 self, rview, u64, u64_to_hi32_2),
}
}
Expand All @@ -578,11 +578,11 @@ impl<const SIZE: usize> StackVec<SIZE> {
unsafe {
match rview.len() {
0 => (0, false),
1 if LIMB_BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi64_1),
1 if Limb::BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi64_1),
1 => hi!(@1 self, rview, u64, u64_to_hi64_1),
2 if LIMB_BITS == 32 => hi!(@2 self, rview, u32, u32_to_hi64_2),
2 if Limb::BITS == 32 => hi!(@2 self, rview, u32, u32_to_hi64_2),
2 => hi!(@2 self, rview, u64, u64_to_hi64_2),
_ if LIMB_BITS == 32 => hi!(@nonzero3 self, rview, u32, u32_to_hi64_3),
_ if Limb::BITS == 32 => hi!(@nonzero3 self, rview, u32, u32_to_hi64_3),
_ => hi!(@nonzero2 self, rview, u64, u64_to_hi64_2),
}
}
Expand Down Expand Up @@ -620,7 +620,7 @@ impl<const SIZE: usize> StackVec<SIZE> {
let mut vec = Self::new();
debug_assert!(2 <= vec.capacity(), "cannot exceed our array bounds");
assert!(2 <= SIZE, "cannot exceed our array bounds");
if LIMB_BITS == 32 {
if Limb::BITS == 32 {
_ = vec.try_push(x as Limb);
_ = vec.try_push((x >> 32) as Limb);
} else {
Expand Down Expand Up @@ -700,8 +700,7 @@ impl<const SIZE: usize> PartialEq for StackVec<SIZE> {
}
}

impl<const SIZE: usize> Eq for StackVec<SIZE> {
}
impl<const SIZE: usize> Eq for StackVec<SIZE> {}

impl<const SIZE: usize> cmp::PartialOrd for StackVec<SIZE> {
#[inline(always)]
Expand Down Expand Up @@ -746,7 +745,7 @@ impl<const SIZE: usize> ops::DerefMut for StackVec<SIZE> {

impl<const SIZE: usize> ops::MulAssign<&[Limb]> for StackVec<SIZE> {
#[inline(always)]
#[allow(clippy::unwrap_used)] // reason="exceeding the bounds is a developper error"
#[allow(clippy::unwrap_used)] // reason="exceeding the bounds is a developer error"
fn mul_assign(&mut self, rhs: &[Limb]) {
large_mul(self, rhs).unwrap();
}
Expand Down Expand Up @@ -1016,7 +1015,7 @@ pub fn pow<const SIZE: usize>(x: &mut StackVec<SIZE>, base: u32, mut exp: u32) -
}

// Now use our pre-computed small powers iteratively.
let small_step = if LIMB_BITS == 32 {
let small_step = if Limb::BITS == 32 {
u32_power_limit(base)
} else {
u64_power_limit(base)
Expand Down Expand Up @@ -1055,7 +1054,7 @@ pub const fn scalar_mul(x: Limb, y: Limb, carry: Limb) -> (Limb, Limb) {
// the following is always true:
// `Wide::MAX - (Narrow::MAX * Narrow::MAX) >= Narrow::MAX`
let z: Wide = (x as Wide) * (y as Wide) + (carry as Wide);
(z as Limb, (z >> LIMB_BITS) as Limb)
(z as Limb, (z >> Limb::BITS) as Limb)
}

// SMALL
Expand Down Expand Up @@ -1301,10 +1300,10 @@ pub fn large_quorem<const SIZE: usize>(x: &mut StackVec<SIZE>, y: &[Limb]) -> Li
for j in 0..x.len() {
let yj = y[j] as Wide;
let p = yj * q as Wide + carry;
carry = p >> LIMB_BITS;
carry = p >> Limb::BITS;
let xj = x[j] as Wide;
let t = xj.wrapping_sub(p & mask).wrapping_sub(borrow);
borrow = (t >> LIMB_BITS) & 1;
borrow = (t >> Limb::BITS) & 1;
x[j] = t as Limb;
}
x.normalize();
Expand All @@ -1318,10 +1317,10 @@ pub fn large_quorem<const SIZE: usize>(x: &mut StackVec<SIZE>, y: &[Limb]) -> Li
for j in 0..x.len() {
let yj = y[j] as Wide;
let p = yj + carry;
carry = p >> LIMB_BITS;
carry = p >> Limb::BITS;
let xj = x[j] as Wide;
let t = xj.wrapping_sub(p & mask).wrapping_sub(borrow);
borrow = (t >> LIMB_BITS) & 1;
borrow = (t >> Limb::BITS) & 1;
x[j] = t as Limb;
}
x.normalize();
Expand Down Expand Up @@ -1366,8 +1365,8 @@ pub fn shl_bits<const SIZE: usize>(x: &mut StackVec<SIZE>, n: usize) -> Option<(
// For example, we transform (for u8) shifted left 2, to:
// b10100100 b01000010
// b10 b10010001 b00001000
debug_assert!(n < LIMB_BITS, "cannot shift left more bits than in our limb");
let rshift = LIMB_BITS - n;
debug_assert!(n < Limb::BITS as usize, "cannot shift left more bits than in our limb");
let rshift = Limb::BITS as usize - n;
let lshift = n;
let mut prev: Limb = 0;
for xi in x.iter_mut() {
Expand Down Expand Up @@ -1416,8 +1415,8 @@ pub fn shl_limbs<const SIZE: usize>(x: &mut StackVec<SIZE>, n: usize) -> Option<
#[must_use]
#[inline(always)]
pub fn shl<const SIZE: usize>(x: &mut StackVec<SIZE>, n: usize) -> Option<()> {
let rem = n % LIMB_BITS;
let div = n / LIMB_BITS;
let rem = n % Limb::BITS as usize;
let div = n / Limb::BITS as usize;
if rem != 0 {
shl_bits(x, rem)?;
}
Expand Down Expand Up @@ -1445,7 +1444,7 @@ pub fn leading_zeros(x: &[Limb]) -> u32 {
#[inline(always)]
pub fn bit_length(x: &[Limb]) -> u32 {
let nlz = leading_zeros(x);
LIMB_BITS as u32 * x.len() as u32 - nlz
Limb::BITS as u32 * x.len() as u32 - nlz
}

// RADIX
Expand Down Expand Up @@ -1612,14 +1611,10 @@ pub type Limb = u64;
pub type Wide = u128;
#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))]
pub type SignedWide = i128;
#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))]
pub const LIMB_BITS: usize = 64;

#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))]
pub type Limb = u32;
#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))]
pub type Wide = u64;
#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))]
pub type SignedWide = i64;
#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))]
pub const LIMB_BITS: usize = 32;
6 changes: 3 additions & 3 deletions lexical-parse-float/src/number.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ impl Number<'_> {
&& !self.many_digits
}

/// The fast path algorithmn using machine-sized integers and floats.
/// The fast path algorithm using machine-sized integers and floats.
///
/// This is extracted into a separate function so that it can be attempted
/// before constructing a Decimal. This only works if both the mantissa
Expand All @@ -60,7 +60,7 @@ impl Number<'_> {
// `set_precision` doesn't return a unit value on x87 FPUs.
#[must_use]
#[allow(clippy::missing_inline_in_public_items)] // reason = "only public for testing"
#[allow(clippy::let_unit_value)] // reason = "untentional ASM drop for X87 FPUs"
#[allow(clippy::let_unit_value)] // reason = "intentional ASM drop for X87 FPUs"
pub fn try_fast_path<F: RawFloat, const FORMAT: u128>(&self) -> Option<F> {
let format = NumberFormat::<FORMAT> {};
debug_assert!(
Expand Down Expand Up @@ -110,7 +110,7 @@ impl Number<'_> {
// `set_precision` doesn't return a unit value on x87 FPUs.
#[must_use]
#[allow(clippy::missing_inline_in_public_items)] // reason = "only public for testing"
#[allow(clippy::let_unit_value)] // reason = "untentional ASM drop for X87 FPUs"
#[allow(clippy::let_unit_value)] // reason = "intentional ASM drop for X87 FPUs"
pub fn force_fast_path<F: RawFloat, const FORMAT: u128>(&self) -> F {
let format = NumberFormat::<FORMAT> {};
debug_assert!(
Expand Down
10 changes: 5 additions & 5 deletions lexical-parse-float/src/slow.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ use lexical_util::num::{AsPrimitive, Integer};

#[cfg(feature = "radix")]
use crate::bigint::Bigfloat;
use crate::bigint::{Bigint, Limb, LIMB_BITS};
use crate::bigint::{Bigint, Limb};
use crate::float::{extended_to_float, ExtendedFloat80, RawFloat};
use crate::limits::{u32_power_limit, u64_power_limit};
use crate::number::Number;
Expand Down Expand Up @@ -413,7 +413,7 @@ pub fn parse_mantissa<const FORMAT: u128>(num: Number, max_digits: usize) -> (Bi
let mut result = Bigint::new();

// Now use our pre-computed small powers iteratively.
let step = if LIMB_BITS == 32 {
let step = if Limb::BITS == 32 {
u32_power_limit(format.radix())
} else {
u64_power_limit(format.radix())
Expand Down Expand Up @@ -645,19 +645,19 @@ pub fn byte_comp<F: RawFloat, const FORMAT: u128>(
num.shl(shift).unwrap();
num.exp -= shift as i32;
} else if diff > 0 {
// Need to shift denominator left, go by a power of LIMB_BITS.
// Need to shift denominator left, go by a power of Limb::BITS.
// After this, the numerator will be non-normalized, and the
// denominator will be normalized. We need to add one to the
// quotient,since we're calculating the ceiling of the divmod.
let (q, r) = shift.ceil_divmod(LIMB_BITS);
let (q, r) = shift.ceil_divmod(Limb::BITS as usize);
let r = -r;
if r != 0 {
num.shl_bits(r as usize).unwrap();
num.exp -= r;
}
if q != 0 {
den.shl_limbs(q).unwrap();
den.exp -= LIMB_BITS as i32 * q as i32;
den.exp -= Limb::BITS as i32 * q as i32;
}
}

Expand Down
12 changes: 6 additions & 6 deletions lexical-parse-float/tests/bigfloat_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

mod stackvec;

use lexical_parse_float::bigint::{Bigfloat, LIMB_BITS};
use lexical_parse_float::bigint::{Bigfloat, Limb};
use lexical_parse_float::float::ExtendedFloat80;
use stackvec::vec_from_u32;

Expand Down Expand Up @@ -35,11 +35,11 @@ fn simple_test() {
assert_eq!(&*x.data, &[0, 19531250]);
assert_eq!(x.exp, 10);

assert_eq!(x.leading_zeros(), LIMB_BITS as u32 - 25);
assert_eq!(x.leading_zeros(), Limb::BITS - 25);

// y has a 0 for 32-bit limbs, no 0s for 64-bit limbs.
x *= &y;
let expected = if LIMB_BITS == 32 {
let expected = if Limb::BITS == 32 {
vec_from_u32(&[0, 0, 0, 9765625])
} else {
vec_from_u32(&[0, 0, 0, 0, 9765625])
Expand All @@ -52,12 +52,12 @@ fn simple_test() {
fn leading_zeros_test() {
assert_eq!(Bigfloat::new().leading_zeros(), 0);

assert_eq!(Bigfloat::from_u32(0xFF).leading_zeros(), LIMB_BITS as u32 - 8);
assert_eq!(Bigfloat::from_u32(0xFF).leading_zeros(), Limb::BITS - 8);
assert_eq!(Bigfloat::from_u64(0xFF00000000).leading_zeros(), 24);

assert_eq!(Bigfloat::from_u32(0xF).leading_zeros(), LIMB_BITS as u32 - 4);
assert_eq!(Bigfloat::from_u32(0xF).leading_zeros(), Limb::BITS - 4);
assert_eq!(Bigfloat::from_u64(0xF00000000).leading_zeros(), 28);

assert_eq!(Bigfloat::from_u32(0xF0).leading_zeros(), LIMB_BITS as u32 - 8);
assert_eq!(Bigfloat::from_u32(0xF0).leading_zeros(), Limb::BITS - 8);
assert_eq!(Bigfloat::from_u64(0xF000000000).leading_zeros(), 24);
}
8 changes: 4 additions & 4 deletions lexical-parse-float/tests/stackvec_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ mod stackvec;

use core::cmp;

use lexical_parse_float::bigint::{self, Limb, StackVec, LIMB_BITS};
use lexical_parse_float::bigint::{self, Limb, StackVec};
use stackvec::vec_from_u32;

const SIZE: usize = 50;
Expand Down Expand Up @@ -34,7 +34,7 @@ fn simple_test() {
assert_eq!(x.len(), 2);
assert_eq!(x.is_empty(), false);
assert_eq!(x.hi16(), (0x8000, true));
if LIMB_BITS == 32 {
if Limb::BITS == 32 {
assert_eq!(x.hi32(), (0x80000002, true));
assert_eq!(x.hi64(), (0x8000000280000000, false));
} else {
Expand Down Expand Up @@ -128,7 +128,7 @@ fn math_test() {
x.mul_small(3);
assert_eq!(&*x, &[0, 6, 27]);
x.mul_small(Limb::MAX);
let expected: VecType = if LIMB_BITS == 32 {
let expected: VecType = if Limb::BITS == 32 {
vec_from_u32(&[0, 4294967290, 4294967274, 26])
} else {
vec_from_u32(&[0, 0, 4294967290, 4294967295, 4294967274, 4294967295, 26])
Expand Down Expand Up @@ -419,7 +419,7 @@ fn shl_bits_test() {
fn shl_limbs_test() {
let mut x = VecType::from_u32(0xD2210408);
bigint::shl_limbs(&mut x, 2);
let expected: VecType = if LIMB_BITS == 32 {
let expected: VecType = if Limb::BITS == 32 {
vec_from_u32(&[0, 0, 0xD2210408])
} else {
vec_from_u32(&[0, 0, 0, 0, 0xD2210408])
Expand Down
7 changes: 3 additions & 4 deletions lexical-util/src/num.rs
Original file line number Diff line number Diff line change
Expand Up @@ -471,10 +471,10 @@ pub trait Integer:
// this is heavily optimized for base10 and it's a way under estimate
// that said, it's fast and works.
if radix <= 16 {
core::mem::size_of::<Self>() * 2 - Self::IS_SIGNED as usize
mem::size_of::<Self>() * 2 - Self::IS_SIGNED as usize
} else {
// way under approximation but always works and is fast
core::mem::size_of::<Self>()
mem::size_of::<Self>()
}
}
}
Expand All @@ -487,8 +487,7 @@ macro_rules! integer_impl {
const TWO: $t = 2;
const MAX: $t = $t::MAX;
const MIN: $t = $t::MIN;
// DEPRECATE: when we drop support for <= 1.53.0, change to `<$t>::BITS`
const BITS: usize = mem::size_of::<$t>() * 8;
const BITS: usize = $t::BITS as usize;

#[inline(always)]
fn leading_zeros(self) -> u32 {
Expand Down

0 comments on commit 5430dba

Please sign in to comment.