Skip to content
This repository has been archived by the owner on Feb 18, 2024. It is now read-only.

Commit

Permalink
Use simd comparison trait from latest nightly
Browse files Browse the repository at this point in the history
  • Loading branch information
gyscos committed Jul 25, 2022
1 parent 5c745f4 commit 9cb2f7d
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 15 deletions.
10 changes: 6 additions & 4 deletions src/compute/aggregate/simd/packed.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
use std::simd::{SimdFloat as _, SimdInt as _, SimdOrd as _, SimdUint as _};

use crate::types::simd::*;

use super::super::min_max::SimdOrd;
Expand Down Expand Up @@ -43,12 +45,12 @@ macro_rules! simd_ord_int {

#[inline]
fn max_lane(self, x: Self) -> Self {
self.max(x)
self.simd_max(x)
}

#[inline]
fn min_lane(self, x: Self) -> Self {
self.min(x)
self.simd_min(x)
}

#[inline]
Expand Down Expand Up @@ -82,12 +84,12 @@ macro_rules! simd_ord_float {

#[inline]
fn max_lane(self, x: Self) -> Self {
self.max(x)
self.simd_max(x)
}

#[inline]
fn min_lane(self, x: Self) -> Self {
self.min(x)
self.simd_min(x)
}

#[inline]
Expand Down
14 changes: 7 additions & 7 deletions src/compute/comparison/simd/packed.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use std::convert::TryInto;
use std::simd::ToBitMask;
use std::simd::{SimdPartialEq, SimdPartialOrd, ToBitMask};

use crate::types::simd::*;
use crate::types::{days_ms, f16, months_days_ns};
Expand Down Expand Up @@ -29,34 +29,34 @@ macro_rules! simd8 {
impl Simd8PartialEq for $md {
#[inline]
fn eq(self, other: Self) -> u8 {
self.lanes_eq(other).to_bitmask()
self.simd_eq(other).to_bitmask()
}

#[inline]
fn neq(self, other: Self) -> u8 {
self.lanes_ne(other).to_bitmask()
self.simd_ne(other).to_bitmask()
}
}

impl Simd8PartialOrd for $md {
#[inline]
fn lt_eq(self, other: Self) -> u8 {
self.lanes_le(other).to_bitmask()
self.simd_le(other).to_bitmask()
}

#[inline]
fn lt(self, other: Self) -> u8 {
self.lanes_lt(other).to_bitmask()
self.simd_lt(other).to_bitmask()
}

#[inline]
fn gt_eq(self, other: Self) -> u8 {
self.lanes_ge(other).to_bitmask()
self.simd_ge(other).to_bitmask()
}

#[inline]
fn gt(self, other: Self) -> u8 {
self.lanes_gt(other).to_bitmask()
self.simd_gt(other).to_bitmask()
}
}
};
Expand Down
9 changes: 5 additions & 4 deletions src/types/simd/packed.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
pub use std::simd::{
f32x16, f32x8, f64x8, i16x32, i16x8, i32x16, i32x8, i64x8, i8x64, i8x8, mask32x16 as m32x16,
mask64x8 as m64x8, mask8x64 as m8x64, u16x32, u16x8, u32x16, u32x8, u64x8, u8x64, u8x8,
SimdPartialEq,
};

/// Vector of 32 16-bit masks
Expand Down Expand Up @@ -74,7 +75,7 @@ fn from_chunk_u8(chunk: u8) -> m64x8 {
let idx = u64x8::from_array([1, 2, 4, 8, 16, 32, 64, 128]);
let vecmask = u64x8::splat(chunk as u64);

(idx & vecmask).lanes_eq(idx)
(idx & vecmask).simd_eq(idx)
}

#[inline]
Expand All @@ -84,7 +85,7 @@ fn from_chunk_u16(chunk: u16) -> m32x16 {
]);
let vecmask = u32x16::splat(chunk as u32);

(idx & vecmask).lanes_eq(idx)
(idx & vecmask).simd_eq(idx)
}

#[inline]
Expand All @@ -109,7 +110,7 @@ fn from_chunk_u32(chunk: u32) -> m16x32 {
let vecmask1 = u16x32::splat(a1);
let vecmask2 = u16x32::splat(a2);

(idx & left & vecmask1).lanes_eq(idx) | (idx & right & vecmask2).lanes_eq(idx)
(idx & left & vecmask1).simd_eq(idx) | (idx & right & vecmask2).simd_eq(idx)
}

#[inline]
Expand Down Expand Up @@ -166,7 +167,7 @@ fn from_chunk_u64(chunk: u64) -> m8x64 {

let mut result = m8x64::default();
for i in 0..8 {
result |= (idxs[i] & u8x64::splat(a[i])).lanes_eq(idx)
result |= (idxs[i] & u8x64::splat(a[i])).simd_eq(idx)
}

result
Expand Down

0 comments on commit 9cb2f7d

Please sign in to comment.