diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index b5bb7630ca6c9..3ff4e576f0f6f 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -12,6 +12,7 @@ use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::*; use rustc_data_structures::small_c_str::SmallCStr; use rustc_hir::def_id::DefId; +use rustc_middle::bug; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs; use rustc_middle::ty::layout::{ FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasTypingEnv, LayoutError, LayoutOfHelpers, @@ -873,6 +874,34 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) } } + fn three_way_compare( + &mut self, + ty: Ty<'tcx>, + lhs: Self::Value, + rhs: Self::Value, + ) -> Option { + if crate::llvm_util::get_version() < (19, 0, 0) { + return None; + } + + let name = match (ty.is_signed(), ty.primitive_size(self.tcx).bits()) { + (true, 8) => "llvm.scmp.i8.i8", + (true, 16) => "llvm.scmp.i8.i16", + (true, 32) => "llvm.scmp.i8.i32", + (true, 64) => "llvm.scmp.i8.i64", + (true, 128) => "llvm.scmp.i8.i128", + + (false, 8) => "llvm.ucmp.i8.i8", + (false, 16) => "llvm.ucmp.i8.i16", + (false, 32) => "llvm.ucmp.i8.i32", + (false, 64) => "llvm.ucmp.i8.i64", + (false, 128) => "llvm.ucmp.i8.i128", + + _ => bug!("three-way compare unsupported for type {ty:?}"), + }; + Some(self.call_intrinsic(name, &[lhs, rhs])) + } + /* Miscellaneous instructions */ fn memcpy( &mut self, diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 8218126ea29c3..5ed824eff71ed 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -1036,6 +1036,18 @@ impl<'ll> CodegenCx<'ll, '_> { ifn!("llvm.usub.sat.i64", fn(t_i64, t_i64) -> t_i64); ifn!("llvm.usub.sat.i128", fn(t_i128, t_i128) -> t_i128); + ifn!("llvm.scmp.i8.i8", fn(t_i8, t_i8) -> t_i8); + ifn!("llvm.scmp.i8.i16", fn(t_i16, t_i16) -> t_i8); + ifn!("llvm.scmp.i8.i32", fn(t_i32, t_i32) -> t_i8); + ifn!("llvm.scmp.i8.i64", fn(t_i64, t_i64) -> t_i8); + ifn!("llvm.scmp.i8.i128", fn(t_i128, t_i128) -> t_i8); + + ifn!("llvm.ucmp.i8.i8", fn(t_i8, t_i8) -> t_i8); + ifn!("llvm.ucmp.i8.i16", fn(t_i16, t_i16) -> t_i8); + ifn!("llvm.ucmp.i8.i32", fn(t_i32, t_i32) -> t_i8); + ifn!("llvm.ucmp.i8.i64", fn(t_i64, t_i64) -> t_i8); + ifn!("llvm.ucmp.i8.i128", fn(t_i128, t_i128) -> t_i8); + ifn!("llvm.lifetime.start.p0i8", fn(t_i64, ptr) -> void); ifn!("llvm.lifetime.end.p0i8", fn(t_i64, ptr) -> void); diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index f63b2d139c5f9..24561b2f13f1e 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -954,6 +954,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::BinOp::Cmp => { use std::cmp::Ordering; assert!(!is_float); + if let Some(value) = bx.three_way_compare(input_ty, lhs, rhs) { + return value; + } let pred = |op| base::bin_op_to_icmp_predicate(op, is_signed); if bx.cx().tcx().sess.opts.optimize == OptLevel::No { // FIXME: This actually generates tighter assembly, and is a classic trick diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs index b0138ac8bfed6..a22969e457890 100644 --- a/compiler/rustc_codegen_ssa/src/traits/builder.rs +++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs @@ -309,6 +309,18 @@ pub trait BuilderMethods<'a, 'tcx>: fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + /// Returns `-1` if `lhs < rhs`, `0` if `lhs == rhs`, and `1` if `lhs > rhs`. + // FIXME: Move the default implementation from `codegen_scalar_binop` into this method and + // remove the `Option` return once LLVM 19 is the minimum version. + fn three_way_compare( + &mut self, + _ty: Ty<'tcx>, + _lhs: Self::Value, + _rhs: Self::Value, + ) -> Option { + None + } + fn memcpy( &mut self, dst: Self::Value, diff --git a/tests/codegen/integer-cmp.rs b/tests/codegen/integer-cmp.rs index 9bbf243946d1a..75cfb377e6669 100644 --- a/tests/codegen/integer-cmp.rs +++ b/tests/codegen/integer-cmp.rs @@ -1,10 +1,10 @@ // This is test for more optimal Ord implementation for integers. // See for more info. -//@ revisions: llvm-pre-20 llvm-20 -//@ [llvm-20] min-llvm-version: 20 -//@ [llvm-pre-20] max-llvm-major-version: 19 -//@ compile-flags: -C opt-level=3 +//@ revisions: llvm-pre-19 llvm-19 +//@ [llvm-19] min-llvm-version: 19 +//@ [llvm-pre-19] max-llvm-major-version: 18 +//@ compile-flags: -C opt-level=3 -Zmerge-functions=disabled #![crate_type = "lib"] @@ -13,21 +13,50 @@ use std::cmp::Ordering; // CHECK-LABEL: @cmp_signed #[no_mangle] pub fn cmp_signed(a: i64, b: i64) -> Ordering { - // llvm-20: @llvm.scmp.i8.i64 - // llvm-pre-20: icmp slt - // llvm-pre-20: icmp ne - // llvm-pre-20: zext i1 - // llvm-pre-20: select i1 + // llvm-19: call{{.*}} i8 @llvm.scmp.i8.i64 + // llvm-pre-19: icmp slt + // llvm-pre-19: icmp ne + // llvm-pre-19: zext i1 + // llvm-pre-19: select i1 a.cmp(&b) } // CHECK-LABEL: @cmp_unsigned #[no_mangle] pub fn cmp_unsigned(a: u32, b: u32) -> Ordering { - // llvm-20: @llvm.ucmp.i8.i32 - // llvm-pre-20: icmp ult - // llvm-pre-20: icmp ne - // llvm-pre-20: zext i1 - // llvm-pre-20: select i1 + // llvm-19: call{{.*}} i8 @llvm.ucmp.i8.i32 + // llvm-pre-19: icmp ult + // llvm-pre-19: icmp ne + // llvm-pre-19: zext i1 + // llvm-pre-19: select i1 + a.cmp(&b) +} + +// CHECK-LABEL: @cmp_char +#[no_mangle] +pub fn cmp_char(a: char, b: char) -> Ordering { + // llvm-19: call{{.*}} i8 @llvm.ucmp.i8.i32 + // llvm-pre-19: icmp ult + // llvm-pre-19: icmp ne + // llvm-pre-19: zext i1 + // llvm-pre-19: select i1 + a.cmp(&b) +} + +// CHECK-LABEL: @cmp_tuple +#[no_mangle] +pub fn cmp_tuple(a: (i16, u16), b: (i16, u16)) -> Ordering { + // llvm-19-DAG: call{{.*}} i8 @llvm.ucmp.i8.i16 + // llvm-19-DAG: call{{.*}} i8 @llvm.scmp.i8.i16 + // llvm-19: select i1 + // llvm-pre-19: icmp slt + // llvm-pre-19: icmp ne + // llvm-pre-19: zext i1 + // llvm-pre-19: select i1 + // llvm-pre-19: icmp ult + // llvm-pre-19: icmp ne + // llvm-pre-19: zext i1 + // llvm-pre-19: select i1 + // llvm-pre-19: select i1 a.cmp(&b) }