Skip to content

Conversation

AMS21
Copy link
Contributor

@AMS21 AMS21 commented Oct 10, 2025

While working on rust-lang/rust#147474 it was discovered that LLVMSetVolatile does not work with memory intrinsic functions like memset, memcpy and memmove.

To quote @nikic source

LLVMSetVolatile does not support volatile on intrinsics. It only works on the instruction-level volatile flag.

This patch fixes that so that getting and setting the volatile flag now works with these memory intrinsic functions.

@AMS21 AMS21 requested a review from nikic as a code owner October 10, 2025 09:19
@llvmbot
Copy link
Member

llvmbot commented Oct 10, 2025

@llvm/pr-subscribers-llvm-ir

Author: None (AMS21)

Changes

While working on rust-lang/rust#147474 it was discovered that LLVMSetVolatile does not work with memory intrinsic functions like memset, memcpy and memmove.

To quote @nikic source
> LLVMSetVolatile does not support volatile on intrinsics. It only works on the instruction-level volatile flag.

This patch fixes that so that getting and setting the volatile flag now works with these memory intrinsic functions.


Full diff: https://github.com/llvm/llvm-project/pull/162810.diff

3 Files Affected:

  • (modified) llvm/docs/ReleaseNotes.md (+1)
  • (modified) llvm/lib/IR/Core.cpp (+4)
  • (modified) llvm/unittests/IR/InstructionsTest.cpp (+107)
diff --git a/llvm/docs/ReleaseNotes.md b/llvm/docs/ReleaseNotes.md
index 79d93d08b8398..80fc20c5d341a 100644
--- a/llvm/docs/ReleaseNotes.md
+++ b/llvm/docs/ReleaseNotes.md
@@ -147,6 +147,7 @@ Changes to the C API
 --------------------
 
 * Add `LLVMGetOrInsertFunction` to get or insert a function, replacing the combination of `LLVMGetNamedFunction` and `LLVMAddFunction`.
+* Allow `LLVMGetVolatile` and `LLVMSetVolatile` to work on memory intrinsics like `memset`, `memcpy`, and `memmove`.
 
 Changes to the CodeGen infrastructure
 -------------------------------------
diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp
index 3f1cc1e4e6d0e..3c330bb3c476e 100644
--- a/llvm/lib/IR/Core.cpp
+++ b/llvm/lib/IR/Core.cpp
@@ -4106,6 +4106,8 @@ LLVMBool LLVMGetVolatile(LLVMValueRef MemAccessInst) {
     return SI->isVolatile();
   if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(P))
     return AI->isVolatile();
+  if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(P))
+    return MI->isVolatile();
   return cast<AtomicCmpXchgInst>(P)->isVolatile();
 }
 
@@ -4117,6 +4119,8 @@ void LLVMSetVolatile(LLVMValueRef MemAccessInst, LLVMBool isVolatile) {
     return SI->setVolatile(isVolatile);
   if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(P))
     return AI->setVolatile(isVolatile);
+  if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(P))
+    return MI->setVolatile(ConstantInt::getBool(P->getContext(), isVolatile));
   return cast<AtomicCmpXchgInst>(P)->setVolatile(isVolatile);
 }
 
diff --git a/llvm/unittests/IR/InstructionsTest.cpp b/llvm/unittests/IR/InstructionsTest.cpp
index fe9e7e8228490..020512ba7225a 100644
--- a/llvm/unittests/IR/InstructionsTest.cpp
+++ b/llvm/unittests/IR/InstructionsTest.cpp
@@ -1933,5 +1933,112 @@ TEST(InstructionsTest, StripAndAccumulateConstantOffset) {
   EXPECT_TRUE(Offset.isZero());
 }
 
+TEST(InstructionsTest, LLVMGetSetVolatile) {
+  LLVMContext Ctx;
+  Module M("M", Ctx);
+  FunctionType *FT = FunctionType::get(Type::getVoidTy(Ctx), {}, false);
+  Function *F = Function::Create(FT, Function::ExternalLinkage, "F", M);
+  BasicBlock *BB = BasicBlock::Create(Ctx, "entry", F);
+  IRBuilder<> B(BB);
+
+  Type *Int32Ty = Type::getInt32Ty(Ctx);
+  Value *Val = B.getInt32(1);
+  Value* Ptr = B.CreateAlloca(Int32Ty);
+
+  // LoadInst
+  {
+    LoadInst *Load = B.CreateLoad(Int32Ty, Ptr, "");
+
+    EXPECT_FALSE(LLVMGetVolatile(wrap(Load)));
+    LLVMSetVolatile(wrap(Load), true);
+    EXPECT_TRUE(LLVMGetVolatile(wrap(Load)));
+    LLVMSetVolatile(wrap(Load), false);
+    EXPECT_FALSE(LLVMGetVolatile(wrap(Load)));
+  }
+
+  // StoreInst
+  {
+    StoreInst *Store = B.CreateStore(Val, Ptr);
+
+    EXPECT_FALSE(LLVMGetVolatile(wrap(Store)));
+    LLVMSetVolatile(wrap(Store), true);
+    EXPECT_TRUE(LLVMGetVolatile(wrap(Store)));
+    LLVMSetVolatile(wrap(Store), false);
+    EXPECT_FALSE(LLVMGetVolatile(wrap(Store)));
+  }
+
+  // AtomicRMWInst
+  {
+    AtomicRMWInst *RMW =
+        B.CreateAtomicRMW(AtomicRMWInst::Add, Ptr, Val,
+                                MaybeAlign(), AtomicOrdering::Monotonic);
+
+    EXPECT_FALSE(LLVMGetVolatile(wrap(RMW)));
+    LLVMSetVolatile(wrap(RMW), true);
+    EXPECT_TRUE(LLVMGetVolatile(wrap(RMW)));
+    LLVMSetVolatile(wrap(RMW), false);
+    EXPECT_FALSE(LLVMGetVolatile(wrap(RMW)));
+  }
+
+  // MemCpy
+  {
+    CallInst *MemCpy =
+        B.CreateMemCpy(Ptr, MaybeAlign(), Ptr, MaybeAlign(), 4);
+
+    EXPECT_FALSE(LLVMGetVolatile(wrap(MemCpy)));
+    LLVMSetVolatile(wrap(MemCpy), true);
+    EXPECT_TRUE(LLVMGetVolatile(wrap(MemCpy)));
+    LLVMSetVolatile(wrap(MemCpy), false);
+    EXPECT_FALSE(LLVMGetVolatile(wrap(MemCpy)));
+  }
+
+  // MemMove
+  {
+    CallInst *MemMove =
+        B.CreateMemMove(Ptr, MaybeAlign(), Ptr, MaybeAlign(), 4);
+
+    EXPECT_FALSE(LLVMGetVolatile(wrap(MemMove)));
+    LLVMSetVolatile(wrap(MemMove), true);
+    EXPECT_TRUE(LLVMGetVolatile(wrap(MemMove)));
+    LLVMSetVolatile(wrap(MemMove), false);
+    EXPECT_FALSE(LLVMGetVolatile(wrap(MemMove)));
+  }
+
+  // MemSet
+  {
+    CallInst *MemSet = B.CreateMemSet(Ptr, B.getInt8(0), 1, Align(1));
+
+    EXPECT_FALSE(LLVMGetVolatile(wrap(MemSet)));
+    LLVMSetVolatile(wrap(MemSet), true);
+    EXPECT_TRUE(LLVMGetVolatile(wrap(MemSet)));
+    LLVMSetVolatile(wrap(MemSet), false);
+    EXPECT_FALSE(LLVMGetVolatile(wrap(MemSet)));
+  }
+
+  // MemSetInline
+  {
+    CallInst *MemSetInline =
+        B.CreateMemSetInline(Ptr, Align(1), B.getInt8(0), Val);
+
+    EXPECT_FALSE(LLVMGetVolatile(wrap(MemSetInline)));
+    LLVMSetVolatile(wrap(MemSetInline), true);
+    EXPECT_TRUE(LLVMGetVolatile(wrap(MemSetInline)));
+    LLVMSetVolatile(wrap(MemSetInline), false);
+    EXPECT_FALSE(LLVMGetVolatile(wrap(MemSetInline)));
+  }
+
+  // MemCpyInline
+  {
+    CallInst *MemCpyInline = B.CreateMemCpyInline(
+        Ptr, MaybeAlign(), Val, MaybeAlign(), Val);
+
+    EXPECT_FALSE(LLVMGetVolatile(wrap(MemCpyInline)));
+    LLVMSetVolatile(wrap(MemCpyInline), true);
+    EXPECT_TRUE(LLVMGetVolatile(wrap(MemCpyInline)));
+    LLVMSetVolatile(wrap(MemCpyInline), false);
+    EXPECT_FALSE(LLVMGetVolatile(wrap(MemCpyInline)));
+  }
+}
+
 } // end anonymous namespace
 } // end namespace llvm

Copy link

github-actions bot commented Oct 10, 2025

✅ With the latest revision this PR passed the C/C++ code formatter.

@AMS21 AMS21 force-pushed the volatile_memory_intrinsics branch from 271c620 to 39ce818 Compare October 10, 2025 09:24
return AI->isVolatile();
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(P))
return MI->isVolatile();
return cast<AtomicCmpXchgInst>(P)->isVolatile();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We could replace this whole function with cast<Instruction>(P)->isVolatile(). This would just return false for instructions that don't support volatile.

if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(P))
return AI->setVolatile(isVolatile);
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(P))
return MI->setVolatile(ConstantInt::getBool(P->getContext(), isVolatile));
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

After thinking about this a bit more, I'm not sure we should do this. I am planning to change the volatile representation for memory intrinsics from an argument to an operand bundle. This means that it will no longer be possible to make the intrinsic volatile in-place, it requires creating a new one. So there is a high risk that this support is going to be very short-lived.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Labels

Projects

None yet

Development

Successfully merging this pull request may close these issues.

3 participants