Skip to content

Commit

Permalink
cg_ssa, cg_llvm: remove unnecessary pointercast/bitcast-of-ptr
Browse files Browse the repository at this point in the history
  • Loading branch information
erikdesjardins committed Dec 7, 2022
1 parent 0fcc9be commit 68cabc0
Show file tree
Hide file tree
Showing 19 changed files with 46 additions and 218 deletions.
4 changes: 1 addition & 3 deletions compiler/rustc_codegen_llvm/src/abi.rs
Original file line number Diff line number Diff line change
Expand Up @@ -226,9 +226,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
let can_store_through_cast_ptr = false;
if can_store_through_cast_ptr {
let cast_ptr_llty = bx.type_ptr();
let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
bx.store(val, cast_dst, self.layout.align.abi);
bx.store(val, dst.llval, self.layout.align.abi);
} else {
// The actual return type is a struct, but the ABI
// adaptation code has cast it into some scalar type. The
Expand Down
3 changes: 1 addition & 2 deletions compiler/rustc_codegen_llvm/src/back/write.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ use crate::back::profiling::{
};
use crate::base;
use crate::common;
use crate::consts;
use crate::llvm::{self, DiagnosticInfo, PassManager};
use crate::llvm_util;
use crate::type_::Type;
Expand Down Expand Up @@ -948,7 +947,7 @@ fn create_msvc_imps(

for (imp_name, val) in globals {
let imp = llvm::LLVMAddGlobal(llmod, ptr_ty, imp_name.as_ptr().cast());
llvm::LLVMSetInitializer(imp, consts::ptrcast(val, ptr_ty));
llvm::LLVMSetInitializer(imp, val);
llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage);
}
}
Expand Down
3 changes: 1 addition & 2 deletions compiler/rustc_codegen_llvm/src/base.rs
Original file line number Diff line number Diff line change
Expand Up @@ -123,8 +123,7 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen
// happen after the llvm.used variables are created.
for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() {
unsafe {
let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g));
llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
llvm::LLVMReplaceAllUsesWith(old_g, new_g);
llvm::LLVMDeleteGlobal(old_g);
}
}
Expand Down
18 changes: 1 addition & 17 deletions compiler/rustc_codegen_llvm/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -854,8 +854,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
let size = self.intcast(size, self.type_isize(), false);
let is_volatile = flags.contains(MemFlags::VOLATILE);
let dst = self.pointercast(dst, self.type_ptr());
let src = self.pointercast(src, self.type_ptr());
unsafe {
llvm::LLVMRustBuildMemCpy(
self.llbuilder,
Expand All @@ -881,8 +879,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported");
let size = self.intcast(size, self.type_isize(), false);
let is_volatile = flags.contains(MemFlags::VOLATILE);
let dst = self.pointercast(dst, self.type_ptr());
let src = self.pointercast(src, self.type_ptr());
unsafe {
llvm::LLVMRustBuildMemMove(
self.llbuilder,
Expand All @@ -905,7 +901,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
flags: MemFlags,
) {
let is_volatile = flags.contains(MemFlags::VOLATILE);
let ptr = self.pointercast(ptr, self.type_ptr());
unsafe {
llvm::LLVMRustBuildMemSet(
self.llbuilder,
Expand Down Expand Up @@ -1342,20 +1337,10 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {

fn check_store(&mut self, ptr: &'ll Value) -> &'ll Value {
let dest_ptr_ty = self.cx.val_ty(ptr);
let stored_ptr_ty = self.cx.type_ptr();

assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);

if dest_ptr_ty == stored_ptr_ty {
ptr
} else {
debug!(
"type mismatch in store. \
Expected {:?}, got {:?}; inserting bitcast",
dest_ptr_ty, stored_ptr_ty
);
self.bitcast(ptr, stored_ptr_ty)
}
ptr
}

fn check_call<'b>(
Expand Down Expand Up @@ -1420,7 +1405,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
return;
}

let ptr = self.pointercast(ptr, self.cx.type_ptr());
self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]);
}

Expand Down
36 changes: 1 addition & 35 deletions compiler/rustc_codegen_llvm/src/callee.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,11 @@
//! and methods are represented as just a fn ptr and not a full
//! closure.
use crate::abi::FnAbiLlvmExt;
use crate::attributes;
use crate::common;
use crate::context::CodegenCx;
use crate::llvm;
use crate::value::Value;
use rustc_codegen_ssa::traits::*;

use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
use rustc_middle::ty::{self, Instance, TypeVisitable};
Expand Down Expand Up @@ -45,39 +43,7 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) ->
let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());

let llfn = if let Some(llfn) = cx.get_declared_value(sym) {
// Create a fn pointer with the new signature.
let llptrty = fn_abi.ptr_to_llvm_type(cx);

// This is subtle and surprising, but sometimes we have to bitcast
// the resulting fn pointer. The reason has to do with external
// functions. If you have two crates that both bind the same C
// library, they may not use precisely the same types: for
// example, they will probably each declare their own structs,
// which are distinct types from LLVM's point of view (nominal
// types).
//
// Now, if those two crates are linked into an application, and
// they contain inlined code, you can wind up with a situation
// where both of those functions wind up being loaded into this
// application simultaneously. In that case, the same function
// (from LLVM's point of view) requires two types. But of course
// LLVM won't allow one function to have two types.
//
// What we currently do, therefore, is declare the function with
// one of the two types (whichever happens to come first) and then
// bitcast as needed when the function is referenced to make sure
// it has the type we expect.
//
// This can occur on either a crate-local or crate-external
// reference. It also occurs when testing libcore and in some
// other weird situations. Annoying.
if cx.val_ty(llfn) != llptrty {
debug!("get_fn: casting {:?} to {:?}", llfn, llptrty);
cx.const_ptrcast(llfn, llptrty)
} else {
debug!("get_fn: not casting pointer!");
llfn
}
llfn
} else {
let instance_def_id = instance.def_id();
let llfn = if tcx.sess.target.arch == "x86" &&
Expand Down
13 changes: 4 additions & 9 deletions compiler/rustc_codegen_llvm/src/common.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
//! Code that is useful in various codegen modules.
use crate::consts::{self, const_alloc_to_llvm};
use crate::consts::const_alloc_to_llvm;
pub use crate::context::CodegenCx;
use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, OperandBundleDef, True};
use crate::type_::Type;
Expand Down Expand Up @@ -202,8 +202,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
})
.1;
let len = s.len();
let cs = consts::ptrcast(str_global, self.type_ptr());
(cs, self.const_usize(len as u64))
(str_global, self.const_usize(len as u64))
}

fn const_struct(&self, elts: &[&'ll Value], packed: bool) -> &'ll Value {
Expand Down Expand Up @@ -312,19 +311,15 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
let llval = unsafe {
llvm::LLVMRustConstInBoundsGEP2(
self.type_i8(),
self.const_bitcast(base_addr, self.type_ptr()),
base_addr,
&self.const_usize(offset.bytes()),
1,
)
};
self.const_bitcast(llval, llty)
llval
};
PlaceRef::new_sized(llval, layout)
}

fn const_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
consts::ptrcast(val, ty)
}
}

/// Get the [LLVM type][Type] of a [`Value`].
Expand Down
10 changes: 2 additions & 8 deletions compiler/rustc_codegen_llvm/src/consts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -209,10 +209,6 @@ fn check_and_apply_linkage<'ll, 'tcx>(
}
}

pub fn ptrcast<'ll>(val: &'ll Value, ty: &'ll Type) -> &'ll Value {
unsafe { llvm::LLVMConstPointerCast(val, ty) }
}

impl<'ll> CodegenCx<'ll, '_> {
pub(crate) fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
unsafe { llvm::LLVMConstBitCast(val, ty) }
Expand Down Expand Up @@ -572,14 +568,12 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> {

/// Add a global value to a list to be stored in the `llvm.used` variable, an array of ptr.
fn add_used_global(&self, global: &'ll Value) {
let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_ptr()) };
self.used_statics.borrow_mut().push(cast);
self.used_statics.borrow_mut().push(global);
}

/// Add a global value to a list to be stored in the `llvm.compiler.used` variable,
/// an array of ptr.
fn add_compiler_used_global(&self, global: &'ll Value) {
let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_ptr()) };
self.compiler_used_statics.borrow_mut().push(cast);
self.compiler_used_statics.borrow_mut().push(global);
}
}
13 changes: 0 additions & 13 deletions compiler/rustc_codegen_llvm/src/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,17 +59,6 @@ pub struct CodegenCx<'ll, 'tcx> {
/// Cache of constant strings,
pub const_str_cache: RefCell<FxHashMap<String, &'ll Value>>,

/// Reverse-direction for const ptrs cast from globals.
///
/// Key is a Value holding a `*T`,
/// Val is a Value holding a `*[T]`.
///
/// Needed because LLVM loses pointer->pointee association
/// when we ptrcast, and we have to ptrcast during codegen
/// of a `[T]` const because we form a slice, a `(*T,usize)` pair, not
/// a pointer to an LLVM array type. Similar for trait objects.
pub const_unsized: RefCell<FxHashMap<&'ll Value, &'ll Value>>,

/// Cache of emitted const globals (value -> global)
pub const_globals: RefCell<FxHashMap<&'ll Value, &'ll Value>>,

Expand Down Expand Up @@ -435,7 +424,6 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
instances: Default::default(),
vtables: Default::default(),
const_str_cache: Default::default(),
const_unsized: Default::default(),
const_globals: Default::default(),
statics_to_rauw: RefCell::new(Vec::new()),
used_statics: RefCell::new(Vec::new()),
Expand Down Expand Up @@ -903,7 +891,6 @@ impl<'ll> CodegenCx<'ll, '_> {
self.declare_global("rust_eh_catch_typeinfo", ty)
}
};
let eh_catch_typeinfo = self.const_bitcast(eh_catch_typeinfo, self.type_ptr());
self.eh_catch_typeinfo.set(Some(eh_catch_typeinfo));
eh_catch_typeinfo
}
Expand Down
3 changes: 1 addition & 2 deletions compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,7 @@ use rustc_span::DebuggerVisualizerType;
/// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, '_, '_>) {
if needs_gdb_debug_scripts_section(bx) {
let gdb_debug_scripts_section =
bx.const_bitcast(get_or_insert_gdb_debug_scripts_section_global(bx), bx.type_ptr());
let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx);
// Load just the first byte as that's all that's necessary to force
// LLVM to keep around the reference to the global.
let volative_load_instruction = bx.volatile_load(bx.type_i8(), gdb_debug_scripts_section);
Expand Down
32 changes: 7 additions & 25 deletions compiler/rustc_codegen_llvm/src/intrinsic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,6 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
let ptr = args[0].immediate();
let load = if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
let llty = ty.llvm_type(self);
let ptr = self.pointercast(ptr, self.type_ptr());
self.volatile_load(llty, ptr)
} else {
self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr)
Expand Down Expand Up @@ -319,18 +318,12 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
self.const_bool(true)
} else if use_integer_compare {
let integer_ty = self.type_ix(layout.size().bits());
let ptr_ty = self.type_ptr();
let a_ptr = self.bitcast(a, ptr_ty);
let a_val = self.load(integer_ty, a_ptr, layout.align().abi);
let b_ptr = self.bitcast(b, ptr_ty);
let b_val = self.load(integer_ty, b_ptr, layout.align().abi);
let a_val = self.load(integer_ty, a, layout.align().abi);
let b_val = self.load(integer_ty, b, layout.align().abi);
self.icmp(IntPredicate::IntEQ, a_val, b_val)
} else {
let ptr_ty = self.type_ptr();
let a_ptr = self.bitcast(a, ptr_ty);
let b_ptr = self.bitcast(b, ptr_ty);
let n = self.const_usize(layout.size().bytes());
let cmp = self.call_intrinsic("memcmp", &[a_ptr, b_ptr, n]);
let cmp = self.call_intrinsic("memcmp", &[a, b, n]);
match self.cx.sess().target.arch.as_ref() {
"avr" | "msp430" => self.icmp(IntPredicate::IntEQ, cmp, self.const_i16(0)),
_ => self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0)),
Expand Down Expand Up @@ -386,9 +379,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {

if !fn_abi.ret.is_ignore() {
if let PassMode::Cast(_, _) = &fn_abi.ret.mode {
let ptr_llty = self.type_ptr();
let ptr = self.pointercast(result.llval, ptr_llty);
self.store(llval, ptr, result.align);
self.store(llval, result.llval, result.align);
} else {
OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
.val
Expand All @@ -412,9 +403,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
fn type_test(&mut self, pointer: Self::Value, typeid: Self::Value) -> Self::Value {
// Test the called operand using llvm.type.test intrinsic. The LowerTypeTests link-time
// optimization pass replaces calls to this intrinsic with code to test type membership.
let ptr_ty = self.type_ptr();
let bitcast = self.bitcast(pointer, ptr_ty);
self.call_intrinsic("llvm.type.test", &[bitcast, typeid])
self.call_intrinsic("llvm.type.test", &[pointer, typeid])
}

fn type_checked_load(
Expand Down Expand Up @@ -748,7 +737,6 @@ fn codegen_emcc_try<'ll>(
let catch_data_1 =
bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(1)]);
bx.store(is_rust_panic, catch_data_1, i8_align);
let catch_data = bx.bitcast(catch_data, bx.type_ptr());

let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
bx.call(catch_ty, None, catch_func, &[data, catch_data], None);
Expand Down Expand Up @@ -897,8 +885,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let place = PlaceRef::alloca(bx, args[0].layout);
args[0].val.store(bx, place);
let int_ty = bx.type_ix(expected_bytes * 8);
let ptr = bx.pointercast(place.llval, bx.cx.type_ptr());
bx.load(int_ty, ptr, Align::ONE)
bx.load(int_ty, place.llval, Align::ONE)
}
_ => return_error!(
"invalid bitmask `{}`, expected `u{}` or `[u8; {}]`",
Expand Down Expand Up @@ -1145,7 +1132,6 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE);
bx.store(ze, ptr, Align::ONE);
let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
let ptr = bx.pointercast(ptr, bx.cx.type_ptr());
return Ok(bx.load(array_ty, ptr, Align::ONE));
}
_ => return_error!(
Expand Down Expand Up @@ -1763,11 +1749,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
_ => return_error!("expected pointer, got `{}`", out_elem),
}

if in_elem == out_elem {
return Ok(args[0].immediate());
} else {
return Ok(bx.pointercast(args[0].immediate(), llret_ty));
}
return Ok(args[0].immediate());
}

if name == sym::simd_expose_addr {
Expand Down
12 changes: 3 additions & 9 deletions compiler/rustc_codegen_llvm/src/va_arg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,7 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>(
allow_higher_align: bool,
) -> (&'ll Value, Align) {
let va_list_ty = bx.type_ptr();
let va_list_ptr_ty = bx.type_ptr();
let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty {
bx.bitcast(list.immediate(), va_list_ptr_ty)
} else {
list.immediate()
};
let va_list_addr = list.immediate();

let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi);

Expand All @@ -55,9 +50,9 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>(
if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big {
let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
let adjusted = bx.inbounds_gep(bx.type_i8(), addr, &[adjusted_size]);
(bx.bitcast(adjusted, bx.type_ptr()), addr_align)
(adjusted, addr_align)
} else {
(bx.bitcast(addr, bx.type_ptr()), addr_align)
(addr, addr_align)
}
}

Expand Down Expand Up @@ -157,7 +152,6 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
reg_addr = bx.gep(bx.type_i8(), reg_addr, &[offset]);
}
let reg_type = layout.llvm_type(bx);
let reg_addr = bx.bitcast(reg_addr, bx.type_ptr());
let reg_value = bx.load(reg_type, reg_addr, layout.align.abi);
bx.br(end);

Expand Down
Loading

0 comments on commit 68cabc0

Please sign in to comment.