diff --git a/src/values/instruction_value.rs b/src/values/instruction_value.rs index c9b96580da2ee..3c46e47f8f489 100644 --- a/src/values/instruction_value.rs +++ b/src/values/instruction_value.rs @@ -1,5 +1,7 @@ use either::{Either, Either::{Left, Right}}; use llvm_sys::core::{LLVMGetAlignment, LLVMSetAlignment, LLVMGetInstructionOpcode, LLVMIsTailCall, LLVMGetPreviousInstruction, LLVMGetNextInstruction, LLVMGetInstructionParent, LLVMInstructionEraseFromParent, LLVMInstructionClone, LLVMSetVolatile, LLVMGetVolatile, LLVMGetNumOperands, LLVMGetOperand, LLVMGetOperandUse, LLVMSetOperand, LLVMValueAsBasicBlock, LLVMIsABasicBlock, LLVMGetICmpPredicate, LLVMGetFCmpPredicate, LLVMIsAAllocaInst, LLVMIsALoadInst, LLVMIsAStoreInst}; +#[llvm_versions(3.8..=latest)] +use llvm_sys::core::{LLVMGetOrdering, LLVMSetOrdering}; #[llvm_versions(3.9..=latest)] use llvm_sys::core::LLVMInstructionRemoveFromParent; use llvm_sys::LLVMOpcode; @@ -8,7 +10,7 @@ use llvm_sys::prelude::LLVMValueRef; use crate::basic_block::BasicBlock; use crate::values::traits::AsValueRef; use crate::values::{BasicValue, BasicValueEnum, BasicValueUse, Value}; -use crate::{IntPredicate, FloatPredicate}; +use crate::{AtomicOrdering, IntPredicate, FloatPredicate}; // REVIEW: Split up into structs for SubTypes on InstructionValues? // REVIEW: This should maybe be split up into InstructionOpcode and ConstOpcode? @@ -98,6 +100,16 @@ pub struct InstructionValue { } impl InstructionValue { + fn is_a_load_inst(&self) -> bool { + !unsafe { LLVMIsALoadInst(self.as_value_ref()) }.is_null() + } + fn is_a_store_inst(&self) -> bool { + !unsafe { LLVMIsAStoreInst(self.as_value_ref()) }.is_null() + } + fn is_a_alloca_inst(&self) -> bool { + !unsafe { LLVMIsAAllocaInst(self.as_value_ref()) }.is_null() + } + pub(crate) fn new(instruction_value: LLVMValueRef) -> Self { debug_assert!(!instruction_value.is_null()); @@ -185,45 +197,32 @@ impl InstructionValue { // SubTypes: Only apply to memory access instructions /// Returns whether or not a memory access instruction is volatile. pub fn get_volatile(&self) -> Result { - let value_ref = self.as_value_ref(); - unsafe { - // Although cmpxchg and atomicrmw can have volatile, LLVM's C API - // does not export that functionality. - if LLVMIsALoadInst(value_ref).is_null() && - LLVMIsAStoreInst(value_ref).is_null() { - return Err("Value is not a load or store."); - } - Ok(LLVMGetVolatile(value_ref) == 1) + // Although cmpxchg and atomicrmw can have volatile, LLVM's C API + // does not export that functionality. + if !self.is_a_load_inst() && !self.is_a_store_inst() { + return Err("Value is not a load or store."); } + Ok(unsafe { LLVMGetVolatile(self.as_value_ref()) } == 1) } // SubTypes: Only apply to memory access instructions /// Sets whether or not a memory access instruction is volatile. pub fn set_volatile(&self, volatile: bool) -> Result<(), &'static str> { - let value_ref = self.as_value_ref(); - unsafe { - // Although cmpxchg and atomicrmw can have volatile, LLVM's C API - // does not export that functionality. - if LLVMIsALoadInst(value_ref).is_null() && - LLVMIsAStoreInst(value_ref).is_null() { - return Err("Value is not a load or store."); - } - Ok(LLVMSetVolatile(value_ref, volatile as i32)) + // Although cmpxchg and atomicrmw can have volatile, LLVM's C API + // does not export that functionality. + if !self.is_a_load_inst() && !self.is_a_store_inst() { + return Err("Value is not a load or store."); } + Ok(unsafe { LLVMSetVolatile(self.as_value_ref(), volatile as i32) }) } // SubTypes: Only apply to memory access and alloca instructions /// Returns alignment on a memory access instruction or alloca. pub fn get_alignment(&self) -> Result { - let value_ref = self.as_value_ref(); - unsafe { - if LLVMIsAAllocaInst(value_ref).is_null() && - LLVMIsALoadInst(value_ref).is_null() && - LLVMIsAStoreInst(value_ref).is_null() { - return Err("Value is not an alloca, load or store."); - } - Ok(LLVMGetAlignment(value_ref)) + if !self.is_a_alloca_inst() && !self.is_a_load_inst() && !self.is_a_store_inst() { + return Err("Value is not an alloca, load or store."); } + Ok(unsafe { LLVMGetAlignment(self.as_value_ref()) }) } // SubTypes: Only apply to memory access and alloca instructions @@ -232,15 +231,42 @@ impl InstructionValue { if !alignment.is_power_of_two() && alignment != 0 { return Err("Alignment is not a power of 2!"); } - let value_ref = self.as_value_ref(); - unsafe { - if LLVMIsAAllocaInst(value_ref).is_null() && - LLVMIsALoadInst(value_ref).is_null() && - LLVMIsAStoreInst(value_ref).is_null() { - return Err("Value is not an alloca, load or store."); - } - Ok(LLVMSetAlignment(value_ref, alignment)) + if !self.is_a_alloca_inst() && !self.is_a_load_inst() && !self.is_a_store_inst() { + return Err("Value is not an alloca, load or store."); } + Ok(unsafe { LLVMSetAlignment(self.as_value_ref(), alignment) }) + } + + // SubTypes: Only apply to memory access instructions + /// Returns atomic ordering on a memory access instruction. + #[llvm_versions(3.8..=latest)] + pub fn get_atomic_ordering(&self) -> Result { + if !self.is_a_load_inst() && !self.is_a_store_inst() { + return Err("Value is not a load or store."); + } + Ok(unsafe { LLVMGetOrdering(self.as_value_ref()) }.into()) + } + + // SubTypes: Only apply to memory access instructions + /// Sets atomic ordering on a memory access instruction. + #[llvm_versions(3.8..=latest)] + pub fn set_atomic_ordering(&self, ordering: AtomicOrdering) -> Result<(), &'static str> { + // Although fence and atomicrmw both have an ordering, the LLVM C API + // does not support them. The cmpxchg instruction has two orderings and + // does not work with this API. + if !self.is_a_load_inst() && !self.is_a_store_inst() { + return Err("Value is not a load or store instruction."); + } + match ordering { + AtomicOrdering::Release if self.is_a_load_inst() => + return Err("The release ordering is not valid on load instructions."), + AtomicOrdering::AcquireRelease => + return Err("The acq_rel ordering is not valid on load or store instructions."), + AtomicOrdering::Acquire if self.is_a_store_inst() => + return Err("The acquire ordering is not valid on store instructions."), + _ => { }, + }; + Ok(unsafe { LLVMSetOrdering(self.as_value_ref(), ordering.into()) }) } /// Obtains the number of operands an `InstructionValue` has. diff --git a/tests/all/test_instruction_values.rs b/tests/all/test_instruction_values.rs index 8a06055ec4656..38d2b4cc28a59 100644 --- a/tests/all/test_instruction_values.rs +++ b/tests/all/test_instruction_values.rs @@ -1,6 +1,6 @@ extern crate inkwell; -use self::inkwell::{AddressSpace, IntPredicate, FloatPredicate}; +use self::inkwell::{AddressSpace, AtomicOrdering, IntPredicate, FloatPredicate}; use self::inkwell::context::Context; use self::inkwell::values::{BasicValue, InstructionOpcode::*}; @@ -310,3 +310,49 @@ fn test_mem_instructions() { assert!(fadd_instruction.get_alignment().is_err()); assert!(fadd_instruction.set_alignment(16).is_err()); } + +#[llvm_versions(3.8..=latest)] +#[test] +fn test_atomic_ordering_mem_instructions() { + let context = Context::create(); + let module = context.create_module("testing"); + let builder = context.create_builder(); + + let void_type = context.void_type(); + let f32_type = context.f32_type(); + let f32_ptr_type = f32_type.ptr_type(AddressSpace::Generic); + let fn_type = void_type.fn_type(&[f32_ptr_type.into(), f32_type.into()], false); + + let function = module.add_function("mem_inst", fn_type, None); + let basic_block = context.append_basic_block(&function, "entry"); + + builder.position_at_end(&basic_block); + + let arg1 = function.get_first_param().unwrap().into_pointer_value(); + let arg2 = function.get_nth_param(1).unwrap().into_float_value(); + + assert!(arg1.get_first_use().is_none()); + assert!(arg2.get_first_use().is_none()); + + let f32_val = f32_type.const_float(::std::f64::consts::PI); + + let store_instruction = builder.build_store(arg1, f32_val); + let load = builder.build_load(arg1, ""); + let load_instruction = load.as_instruction_value().unwrap(); + + assert_eq!(store_instruction.get_atomic_ordering().unwrap(), AtomicOrdering::NotAtomic); + assert_eq!(load_instruction.get_atomic_ordering().unwrap(), AtomicOrdering::NotAtomic); + assert!(store_instruction.set_atomic_ordering(AtomicOrdering::Monotonic).is_ok()); + assert_eq!(store_instruction.get_atomic_ordering().unwrap(), AtomicOrdering::Monotonic); + assert!(store_instruction.set_atomic_ordering(AtomicOrdering::Release).is_ok()); + assert!(load_instruction.set_atomic_ordering(AtomicOrdering::Acquire).is_ok()); + + assert!(store_instruction.set_atomic_ordering(AtomicOrdering::Acquire).is_err()); + assert!(store_instruction.set_atomic_ordering(AtomicOrdering::AcquireRelease).is_err()); + assert!(load_instruction.set_atomic_ordering(AtomicOrdering::AcquireRelease).is_err()); + assert!(load_instruction.set_atomic_ordering(AtomicOrdering::Release).is_err()); + + let fadd_instruction = builder.build_float_add(load.into_float_value(), f32_val, "").as_instruction_value().unwrap(); + assert!(fadd_instruction.get_atomic_ordering().is_err()); + assert!(fadd_instruction.set_atomic_ordering(AtomicOrdering::NotAtomic).is_err()); +}