diff --git a/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp b/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp index 435b80ecaec64..ee1db54446cb8 100644 --- a/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp +++ b/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp @@ -42,6 +42,15 @@ class DXILIntrinsicExpansionLegacy : public ModulePass { static char ID; // Pass identification. }; +static bool resourceAccessNeeds64BitExpansion(Module *M, Type *OverloadTy, + bool IsRaw) { + if (IsRaw && M->getTargetTriple().getDXILVersion() > VersionTuple(1, 2)) + return false; + + Type *ScalarTy = OverloadTy->getScalarType(); + return ScalarTy->isDoubleTy() || ScalarTy->isIntegerTy(64); +} + static bool isIntrinsicExpansion(Function &F) { switch (F.getIntrinsicID()) { case Intrinsic::abs: @@ -71,17 +80,20 @@ static bool isIntrinsicExpansion(Function &F) { case Intrinsic::vector_reduce_add: case Intrinsic::vector_reduce_fadd: return true; - case Intrinsic::dx_resource_load_typedbuffer: { - // We need to handle i64, doubles, and vectors of them. - Type *ScalarTy = - F.getReturnType()->getStructElementType(0)->getScalarType(); - return ScalarTy->isDoubleTy() || ScalarTy->isIntegerTy(64); - } - case Intrinsic::dx_resource_store_typedbuffer: { - // We need to handle i64 and doubles and vectors of i64 and doubles. - Type *ScalarTy = F.getFunctionType()->getParamType(2)->getScalarType(); - return ScalarTy->isDoubleTy() || ScalarTy->isIntegerTy(64); - } + case Intrinsic::dx_resource_load_rawbuffer: + return resourceAccessNeeds64BitExpansion( + F.getParent(), F.getReturnType()->getStructElementType(0), + /*IsRaw*/ true); + case Intrinsic::dx_resource_load_typedbuffer: + return resourceAccessNeeds64BitExpansion( + F.getParent(), F.getReturnType()->getStructElementType(0), + /*IsRaw*/ false); + case Intrinsic::dx_resource_store_rawbuffer: + return resourceAccessNeeds64BitExpansion( + F.getParent(), F.getFunctionType()->getParamType(3), /*IsRaw*/ true); + case Intrinsic::dx_resource_store_typedbuffer: + return resourceAccessNeeds64BitExpansion( + F.getParent(), F.getFunctionType()->getParamType(2), /*IsRaw*/ false); } return false; } @@ -544,7 +556,7 @@ static Value *expandRadiansIntrinsic(CallInst *Orig) { return Builder.CreateFMul(X, PiOver180); } -static bool expandTypedBufferLoadIntrinsic(CallInst *Orig) { +static bool expandBufferLoadIntrinsic(CallInst *Orig, bool IsRaw) { IRBuilder<> Builder(Orig); Type *BufferTy = Orig->getType()->getStructElementType(0); @@ -552,55 +564,74 @@ static bool expandTypedBufferLoadIntrinsic(CallInst *Orig) { bool IsDouble = ScalarTy->isDoubleTy(); assert(IsDouble || ScalarTy->isIntegerTy(64) && "Only expand double or int64 scalars or vectors"); - + bool IsVector = false; unsigned ExtractNum = 2; if (auto *VT = dyn_cast(BufferTy)) { - assert(VT->getNumElements() == 2 && - "TypedBufferLoad vector must be size 2"); - ExtractNum = 4; + ExtractNum = 2 * VT->getNumElements(); + IsVector = true; + assert(IsRaw || ExtractNum == 4 && "TypedBufferLoad vector must be size 2"); } - Type *Ty = VectorType::get(Builder.getInt32Ty(), ExtractNum, false); - - Type *LoadType = StructType::get(Ty, Builder.getInt1Ty()); - CallInst *Load = - Builder.CreateIntrinsic(LoadType, Intrinsic::dx_resource_load_typedbuffer, - {Orig->getOperand(0), Orig->getOperand(1)}); - - // extract the buffer load's result - Value *Extract = Builder.CreateExtractValue(Load, {0}); - - SmallVector ExtractElements; - for (unsigned I = 0; I < ExtractNum; ++I) - ExtractElements.push_back( - Builder.CreateExtractElement(Extract, Builder.getInt32(I))); - - // combine into double(s) or int64(s) + SmallVector Loads; Value *Result = PoisonValue::get(BufferTy); - for (unsigned I = 0; I < ExtractNum; I += 2) { - Value *Combined = nullptr; - if (IsDouble) - // For doubles, use dx_asdouble intrinsic - Combined = - Builder.CreateIntrinsic(Builder.getDoubleTy(), Intrinsic::dx_asdouble, - {ExtractElements[I], ExtractElements[I + 1]}); - else { - // For int64, manually combine two int32s - // First, zero-extend both values to i64 - Value *Lo = Builder.CreateZExt(ExtractElements[I], Builder.getInt64Ty()); - Value *Hi = - Builder.CreateZExt(ExtractElements[I + 1], Builder.getInt64Ty()); - // Shift the high bits left by 32 bits - Value *ShiftedHi = Builder.CreateShl(Hi, Builder.getInt64(32)); - // OR the high and low bits together - Combined = Builder.CreateOr(Lo, ShiftedHi); + unsigned Base = 0; + // If we need to extract more than 4 i32; we need to break it up into + // more than one load. LoadNum tells us how many i32s we are loading in + // each load + while (ExtractNum > 0) { + unsigned LoadNum = std::min(ExtractNum, 4u); + Type *Ty = VectorType::get(Builder.getInt32Ty(), LoadNum, false); + + Type *LoadType = StructType::get(Ty, Builder.getInt1Ty()); + Intrinsic::ID LoadIntrinsic = Intrinsic::dx_resource_load_typedbuffer; + SmallVector Args = {Orig->getOperand(0), Orig->getOperand(1)}; + if (IsRaw) { + LoadIntrinsic = Intrinsic::dx_resource_load_rawbuffer; + Value *Tmp = Builder.getInt32(4 * Base * 2); + Args.push_back(Builder.CreateAdd(Orig->getOperand(2), Tmp)); } - if (ExtractNum == 4) - Result = Builder.CreateInsertElement(Result, Combined, - Builder.getInt32(I / 2)); - else - Result = Combined; + CallInst *Load = Builder.CreateIntrinsic(LoadType, LoadIntrinsic, Args); + Loads.push_back(Load); + + // extract the buffer load's result + Value *Extract = Builder.CreateExtractValue(Load, {0}); + + SmallVector ExtractElements; + for (unsigned I = 0; I < LoadNum; ++I) + ExtractElements.push_back( + Builder.CreateExtractElement(Extract, Builder.getInt32(I))); + + // combine into double(s) or int64(s) + for (unsigned I = 0; I < LoadNum; I += 2) { + Value *Combined = nullptr; + if (IsDouble) + // For doubles, use dx_asdouble intrinsic + Combined = Builder.CreateIntrinsic( + Builder.getDoubleTy(), Intrinsic::dx_asdouble, + {ExtractElements[I], ExtractElements[I + 1]}); + else { + // For int64, manually combine two int32s + // First, zero-extend both values to i64 + Value *Lo = + Builder.CreateZExt(ExtractElements[I], Builder.getInt64Ty()); + Value *Hi = + Builder.CreateZExt(ExtractElements[I + 1], Builder.getInt64Ty()); + // Shift the high bits left by 32 bits + Value *ShiftedHi = Builder.CreateShl(Hi, Builder.getInt64(32)); + // OR the high and low bits together + Combined = Builder.CreateOr(Lo, ShiftedHi); + } + + if (IsVector) + Result = Builder.CreateInsertElement(Result, Combined, + Builder.getInt32((I / 2) + Base)); + else + Result = Combined; + } + + ExtractNum -= LoadNum; + Base += LoadNum / 2; } Value *CheckBit = nullptr; @@ -620,8 +651,14 @@ static bool expandTypedBufferLoadIntrinsic(CallInst *Orig) { } else { // Use of the check bit assert(Indices[0] == 1 && "Unexpected type for typedbufferload"); - if (!CheckBit) - CheckBit = Builder.CreateExtractValue(Load, {1}); + // Note: This does not always match the historical behaviour of DXC. + // See https://github.com/microsoft/DirectXShaderCompiler/issues/7622 + if (!CheckBit) { + SmallVector CheckBits; + for (Value *L : Loads) + CheckBits.push_back(Builder.CreateExtractValue(L, {1})); + CheckBit = Builder.CreateAnd(CheckBits); + } EVI->replaceAllUsesWith(CheckBit); } EVI->eraseFromParent(); @@ -630,30 +667,35 @@ static bool expandTypedBufferLoadIntrinsic(CallInst *Orig) { return true; } -static bool expandTypedBufferStoreIntrinsic(CallInst *Orig) { +static bool expandBufferStoreIntrinsic(CallInst *Orig, bool IsRaw) { IRBuilder<> Builder(Orig); - Type *BufferTy = Orig->getFunctionType()->getParamType(2); + unsigned ValIndex = IsRaw ? 3 : 2; + Type *BufferTy = Orig->getFunctionType()->getParamType(ValIndex); Type *ScalarTy = BufferTy->getScalarType(); bool IsDouble = ScalarTy->isDoubleTy(); assert((IsDouble || ScalarTy->isIntegerTy(64)) && "Only expand double or int64 scalars or vectors"); // Determine if we're dealing with a vector or scalar - bool IsVector = isa(BufferTy); - if (IsVector) { - assert(cast(BufferTy)->getNumElements() == 2 && - "TypedBufferStore vector must be size 2"); + bool IsVector = false; + unsigned ExtractNum = 2; + unsigned VecLen = 0; + if (auto *VT = dyn_cast(BufferTy)) { + VecLen = VT->getNumElements(); + assert(IsRaw || VecLen == 2 && "TypedBufferStore vector must be size 2"); + ExtractNum = VecLen * 2; + IsVector = true; } // Create the appropriate vector type for the result Type *Int32Ty = Builder.getInt32Ty(); - Type *ResultTy = VectorType::get(Int32Ty, IsVector ? 4 : 2, false); + Type *ResultTy = VectorType::get(Int32Ty, ExtractNum, false); Value *Val = PoisonValue::get(ResultTy); Type *SplitElementTy = Int32Ty; if (IsVector) - SplitElementTy = VectorType::get(SplitElementTy, 2, false); + SplitElementTy = VectorType::get(SplitElementTy, VecLen, false); Value *LowBits = nullptr; Value *HighBits = nullptr; @@ -661,15 +703,16 @@ static bool expandTypedBufferStoreIntrinsic(CallInst *Orig) { if (IsDouble) { auto *SplitTy = llvm::StructType::get(SplitElementTy, SplitElementTy); Value *Split = Builder.CreateIntrinsic(SplitTy, Intrinsic::dx_splitdouble, - {Orig->getOperand(2)}); + {Orig->getOperand(ValIndex)}); LowBits = Builder.CreateExtractValue(Split, 0); HighBits = Builder.CreateExtractValue(Split, 1); } else { // Handle int64 type(s) - Value *InputVal = Orig->getOperand(2); + Value *InputVal = Orig->getOperand(ValIndex); Constant *ShiftAmt = Builder.getInt64(32); if (IsVector) - ShiftAmt = ConstantVector::getSplat(ElementCount::getFixed(2), ShiftAmt); + ShiftAmt = + ConstantVector::getSplat(ElementCount::getFixed(VecLen), ShiftAmt); // Split into low and high 32-bit parts LowBits = Builder.CreateTrunc(InputVal, SplitElementTy); @@ -678,17 +721,48 @@ static bool expandTypedBufferStoreIntrinsic(CallInst *Orig) { } if (IsVector) { - Val = Builder.CreateShuffleVector(LowBits, HighBits, {0, 2, 1, 3}); + SmallVector Mask; + for (unsigned I = 0; I < VecLen; ++I) { + Mask.push_back(I); + Mask.push_back(I + VecLen); + } + Val = Builder.CreateShuffleVector(LowBits, HighBits, Mask); } else { Val = Builder.CreateInsertElement(Val, LowBits, Builder.getInt32(0)); Val = Builder.CreateInsertElement(Val, HighBits, Builder.getInt32(1)); } - // Create the final intrinsic call - Builder.CreateIntrinsic(Builder.getVoidTy(), - Intrinsic::dx_resource_store_typedbuffer, - {Orig->getOperand(0), Orig->getOperand(1), Val}); + // If we need to extract more than 4 i32; we need to break it up into + // more than one store. StoreNum tells us how many i32s we are storing in + // each store + unsigned Base = 0; + while (ExtractNum > 0) { + unsigned StoreNum = std::min(ExtractNum, 4u); + + Intrinsic::ID StoreIntrinsic = Intrinsic::dx_resource_store_typedbuffer; + SmallVector Args = {Orig->getOperand(0), Orig->getOperand(1)}; + if (IsRaw) { + StoreIntrinsic = Intrinsic::dx_resource_store_rawbuffer; + Value *Tmp = Builder.getInt32(4 * Base); + Args.push_back(Builder.CreateAdd(Orig->getOperand(2), Tmp)); + } + + SmallVector Mask; + for (unsigned I = 0; I < StoreNum; ++I) { + Mask.push_back(Base + I); + } + + Value *SubVal = Val; + if (VecLen > 2) + SubVal = Builder.CreateShuffleVector(Val, Mask); + + Args.push_back(SubVal); + // Create the final intrinsic call + Builder.CreateIntrinsic(Builder.getVoidTy(), StoreIntrinsic, Args); + ExtractNum -= StoreNum; + Base += StoreNum; + } Orig->eraseFromParent(); return true; } @@ -821,12 +895,20 @@ static bool expandIntrinsic(Function &F, CallInst *Orig) { case Intrinsic::dx_radians: Result = expandRadiansIntrinsic(Orig); break; + case Intrinsic::dx_resource_load_rawbuffer: + if (expandBufferLoadIntrinsic(Orig, /*IsRaw*/ true)) + return true; + break; + case Intrinsic::dx_resource_store_rawbuffer: + if (expandBufferStoreIntrinsic(Orig, /*IsRaw*/ true)) + return true; + break; case Intrinsic::dx_resource_load_typedbuffer: - if (expandTypedBufferLoadIntrinsic(Orig)) + if (expandBufferLoadIntrinsic(Orig, /*IsRaw*/ false)) return true; break; case Intrinsic::dx_resource_store_typedbuffer: - if (expandTypedBufferStoreIntrinsic(Orig)) + if (expandBufferStoreIntrinsic(Orig, /*IsRaw*/ false)) return true; break; case Intrinsic::usub_sat: diff --git a/llvm/test/CodeGen/DirectX/RawBufferLoad-error64.ll b/llvm/test/CodeGen/DirectX/RawBufferLoad-error64.ll deleted file mode 100644 index d8b6311c8ff2e..0000000000000 --- a/llvm/test/CodeGen/DirectX/RawBufferLoad-error64.ll +++ /dev/null @@ -1,24 +0,0 @@ -; We use llc for this test so that we don't abort after the first error. -; RUN: not llc %s -o /dev/null 2>&1 | FileCheck %s - -target triple = "dxil-pc-shadermodel6.2-compute" - -declare void @v4f64_user(<4 x double>) - -; Can't load 64 bit types directly until SM6.3 (byteaddressbuf.Load) -; CHECK: error: -; CHECK-SAME: in function loadv4f64_byte -; CHECK-SAME: Cannot create RawBufferLoad operation: Invalid overload type -define void @loadv4f64_byte(i32 %offset) "hlsl.export" { - %buffer = call target("dx.RawBuffer", i8, 0, 0, 0) - @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_i8_0_0_0( - i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) - - %load = call {<4 x double>, i1} @llvm.dx.resource.load.rawbuffer.v4i64( - target("dx.RawBuffer", i8, 0, 0, 0) %buffer, i32 %offset, i32 0) - %data = extractvalue {<4 x double>, i1} %load, 0 - - call void @v4f64_user(<4 x double> %data) - - ret void -} diff --git a/llvm/test/CodeGen/DirectX/RawBufferLoadDouble.ll b/llvm/test/CodeGen/DirectX/RawBufferLoadDouble.ll new file mode 100644 index 0000000000000..9213d60c9b496 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/RawBufferLoadDouble.ll @@ -0,0 +1,229 @@ +; RUN: opt -mtriple=dxil-pc-shadermodel6.2-compute -S -dxil-intrinsic-expansion %s | FileCheck %s --check-prefixes=CHECK,CHECK62 +; RUN: opt -mtriple=dxil-pc-shadermodel6.3-compute -S -dxil-intrinsic-expansion %s | FileCheck %s --check-prefixes=CHECK,CHECK63 + +define void @loadf64(i32 %index) { + ; check the handle from binding is unchanged + ; CHECK: [[B:%.*]] = call target("dx.Rawbuffer", double, 0, 0) + ; CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.Rawbuffer_f64_0_0t( + ; CHECK-SAME: i32 0, i32 1, i32 1, i32 0, i1 false, ptr null) + %buffer = call target("dx.Rawbuffer", double, 0, 0) + @llvm.dx.resource.handlefrombinding.tdx.Rawbuffer_f64_1_0_0t( + i32 0, i32 1, i32 1, i32 0, i1 false, ptr null) + + ; check we don't modify the code in sm6.3 or later + ; CHECK63: [[L0:%.*]] = call { double, i1 } @llvm.dx.resource.load.rawbuffer + ; CHECK63-SAME: target("dx.Rawbuffer", double, 0, 0) [[B]], i32 %index, i32 0) + + ; check we load an <2 x i32> instead of a double + ; CHECK62-NOT: call {double, i1} @llvm.dx.resource.load.rawbuffer + ; CHECK62: [[L0:%.*]] = call { <2 x i32>, i1 } + ; CHECK62-SAME: @llvm.dx.resource.load.rawbuffer.v2i32.tdx.Rawbuffer_f64_0_0t( + ; CHECK62-SAME: target("dx.Rawbuffer", double, 0, 0) [[B]], i32 %index, i32 0) + %load0 = call {double, i1} @llvm.dx.resource.load.rawbuffer( + target("dx.Rawbuffer", double, 0, 0) %buffer, i32 %index, i32 0) + + ; CHECK63: extractvalue { double, i1 } [[L0]], 0 + + ; check we extract the two i32 and construct a double + ; CHECK62: [[D0:%.*]] = extractvalue { <2 x i32>, i1 } [[L0]], 0 + ; CHECK62: [[Lo:%.*]] = extractelement <2 x i32> [[D0]], i32 0 + ; CHECK62: [[Hi:%.*]] = extractelement <2 x i32> [[D0]], i32 1 + ; CHECK62: [[DBL:%.*]] = call double @llvm.dx.asdouble.i32(i32 [[Lo]], i32 [[Hi]]) + ; CHECK62-NOT: extractvalue { double, i1 } + %data0 = extractvalue {double, i1} %load0, 0 + ret void +} + +define void @loadv2f64(i32 %index) { + ; check the handle from binding is unchanged + ; CHECK: [[B:%.*]] = call target("dx.Rawbuffer", <2 x double>, 0, 0) + ; CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.Rawbuffer_v2f64_0_0t( + ; CHECK-SAME: i32 0, i32 1, i32 1, i32 0, i1 false, ptr null) + %buffer = call target("dx.Rawbuffer", <2 x double>, 0, 0) + @llvm.dx.resource.handlefrombinding.tdx.Rawbuffer_v2f64_1_0_0t( + i32 0, i32 1, i32 1, i32 0, i1 false, ptr null) + + ; check we don't modify the code in sm6.3 or later + ; CHECK63: [[L0:%.*]] = call { <2 x double>, i1 } @llvm.dx.resource.load.rawbuffer + ; CHECK63-SAME: target("dx.Rawbuffer", <2 x double>, 0, 0) [[B]], i32 %index, i32 0) + + ; check we load an <4 x i32> instead of a double2 + ; CHECK62: [[L0:%.*]] = call { <4 x i32>, i1 } + ; CHECK62-SAME: @llvm.dx.resource.load.rawbuffer.v4i32.tdx.Rawbuffer_v2f64_0_0t( + ; CHECK62-SAME: target("dx.Rawbuffer", <2 x double>, 0, 0) [[B]], i32 %index, i32 0) + %load0 = call { <2 x double>, i1 } @llvm.dx.resource.load.rawbuffer( + target("dx.Rawbuffer", <2 x double>, 0, 0) %buffer, i32 %index, i32 0) + + ; CHECK63: extractvalue { <2 x double>, i1 } [[L0]], 0 + + ; check we extract the 4 i32 and construct a <2 x double> + ; CHECK62: [[D0:%.*]] = extractvalue { <4 x i32>, i1 } [[L0]], 0 + ; CHECK62: [[Lo1:%.*]] = extractelement <4 x i32> [[D0]], i32 0 + ; CHECK62: [[Hi1:%.*]] = extractelement <4 x i32> [[D0]], i32 1 + ; CHECK62: [[Lo2:%.*]] = extractelement <4 x i32> [[D0]], i32 2 + ; CHECK62: [[Hi2:%.*]] = extractelement <4 x i32> [[D0]], i32 3 + ; CHECK62: [[Dbl1:%.*]] = call double @llvm.dx.asdouble.i32(i32 [[Lo1]], i32 [[Hi1]]) + ; CHECK62: [[Vec:%.*]] = insertelement <2 x double> poison, double [[Dbl1]], i32 0 + ; CHECK62: [[Dbl2:%.*]] = call double @llvm.dx.asdouble.i32(i32 [[Lo2]], i32 [[Hi2]]) + ; CHECK62: [[Vec2:%.*]] = insertelement <2 x double> [[Vec]], double [[Dbl2]], i32 1 + ; CHECK62-NOT: extractvalue { <2 x double>, i1 } + %data0 = extractvalue { <2 x double>, i1 } %load0, 0 + ret void +} + +; show we properly handle extracting the check bit +define void @loadf64WithCheckBit(i32 %index) { + ; check the handle from binding is unchanged + ; CHECK: [[B:%.*]] = call target("dx.Rawbuffer", double, 0, 0) + ; CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.Rawbuffer_f64_0_0t( + ; CHECK-SAME: i32 0, i32 1, i32 1, i32 0, i1 false, ptr null) + %buffer = call target("dx.Rawbuffer", double, 0, 0) + @llvm.dx.resource.handlefrombinding.tdx.Rawbuffer_f64_1_0_0t( + i32 0, i32 1, i32 1, i32 0, i1 false, ptr null) + + ; check we don't modify the code in sm6.3 or later + ; CHECK63: [[L0:%.*]] = call { double, i1 } @llvm.dx.resource.load.rawbuffer + ; CHECK63-SAME: target("dx.Rawbuffer", double, 0, 0) [[B]], i32 %index, i32 0) + + ; check we load an <2 x i32> instead of a double + ; CHECK62-NOT: call {double, i1} @llvm.dx.resource.load.rawbuffer + ; CHECK62: [[L0:%.*]] = call { <2 x i32>, i1 } + ; CHECK62-SAME: @llvm.dx.resource.load.rawbuffer.v2i32.tdx.Rawbuffer_f64_0_0t( + ; CHECK62-SAME: target("dx.Rawbuffer", double, 0, 0) [[B]], i32 %index, i32 0) + %load0 = call {double, i1} @llvm.dx.resource.load.rawbuffer( + target("dx.Rawbuffer", double, 0, 0) %buffer, i32 %index, i32 0) + + ; CHECK63: extractvalue { double, i1 } [[L0]], 0 + ; CHECK63: extractvalue { double, i1 } [[L0]], 1 + + ; check we extract the two i32 and construct a double + ; CHECK62: [[D0:%.*]] = extractvalue { <2 x i32>, i1 } [[L0]], 0 + ; CHECK62: [[Lo:%.*]] = extractelement <2 x i32> [[D0]], i32 0 + ; CHECK62: [[Hi:%.*]] = extractelement <2 x i32> [[D0]], i32 1 + ; CHECK62: [[DBL:%.*]] = call double @llvm.dx.asdouble.i32(i32 [[Lo]], i32 [[Hi]]) + %data0 = extractvalue {double, i1} %load0, 0 + ; CHECK62: extractvalue { <2 x i32>, i1 } [[L0]], 1 + ; CHECK62-NOT: extractvalue { double, i1 } + %cb = extractvalue {double, i1} %load0, 1 + ret void +} + +; Raw Buffer Load allows for double3 and double4 to be loaded +; In SM6.2 and below, two loads will be performed. +; Show we and the checkbits together + +define void @loadv3f64(i32 %index) { + ; check the handle from binding is unchanged + ; CHECK: [[B:%.*]] = call target("dx.Rawbuffer", <3 x double>, 0, 0) + ; CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.Rawbuffer_v3f64_0_0t( + ; CHECK-SAME: i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + %buffer = call target("dx.Rawbuffer", <3 x double>, 0, 0) + @llvm.dx.resource.handlefrombinding.tdx.Rawbuffer_v3f64_0_0t( + i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + + ; check we don't modify the code in sm6.3 or later + ; CHECK63: [[L0:%.*]] = call { <3 x double>, i1 } @llvm.dx.resource.load.rawbuffer + ; CHECK63-SAME: target("dx.Rawbuffer", <3 x double>, 0, 0) [[B]], i32 %index, i32 0) + + ; check we perform two loads + ; and do 6 extracts and construct 3 doubles + ; CHECK62-NOT: call {<3 x double>, i1} @llvm.dx.resource.load.rawbuffer + ; CHECK62: [[L0:%.*]] = call { <4 x i32>, i1 } + ; CHECK62-SAME: @llvm.dx.resource.load.rawbuffer.v4i32.tdx.Rawbuffer_v3f64_0_0t( + ; CHECK62-SAME: target("dx.Rawbuffer", <3 x double>, 0, 0) [[B]], i32 %index, i32 0) + + ; CHECK62: [[D0:%.*]] = extractvalue { <4 x i32>, i1 } [[L0]], 0 + ; CHECK62: [[Lo1:%.*]] = extractelement <4 x i32> [[D0]], i32 0 + ; CHECK62: [[Hi1:%.*]] = extractelement <4 x i32> [[D0]], i32 1 + ; CHECK62: [[Lo2:%.*]] = extractelement <4 x i32> [[D0]], i32 2 + ; CHECK62: [[Hi2:%.*]] = extractelement <4 x i32> [[D0]], i32 3 + ; CHECK62: [[DBL1:%.*]] = call double @llvm.dx.asdouble.i32(i32 [[Lo1]], i32 [[Hi1]]) + ; CHECK62: [[Vec1:%.*]] = insertelement <3 x double> poison, double [[DBL1]], i32 0 + ; CHECK62: [[DBL2:%.*]] = call double @llvm.dx.asdouble.i32(i32 [[Lo2]], i32 [[Hi2]]) + ; CHECK62: [[Vec2:%.*]] = insertelement <3 x double> [[Vec1]], double [[DBL2]], i32 1 + + ; 2nd load + ; CHECK62: [[L2:%.*]] = call { <2 x i32>, i1 } + ; CHECK62-SAME: @llvm.dx.resource.load.rawbuffer.v2i32.tdx.Rawbuffer_v3f64_0_0t( + ; CHECK62-SAME: target("dx.Rawbuffer", <3 x double>, 0, 0) [[B]], i32 %index, i32 16) + + ; CHECK62: [[D2:%.*]] = extractvalue { <2 x i32>, i1 } [[L2]], 0 + ; CHECK62: [[Lo3:%.*]] = extractelement <2 x i32> [[D2]], i32 0 + ; CHECK62: [[Hi3:%.*]] = extractelement <2 x i32> [[D2]], i32 1 + ; CHECK62: [[DBL3:%.*]] = call double @llvm.dx.asdouble.i32(i32 [[Lo3]], i32 [[Hi3]]) + ; CHECK62: [[Vec3:%.*]] = insertelement <3 x double> [[Vec2]], double [[DBL3]], i32 2 + %load0 = call {<3 x double>, i1} @llvm.dx.resource.load.rawbuffer( + target("dx.Rawbuffer", <3 x double>, 0, 0) %buffer, i32 %index, i32 0) + + ; CHECK63: extractvalue { <3 x double>, i1 } [[L0]], 0 + ; CHECK63: extractvalue { <3 x double>, i1 } [[L0]], 1 + + ; CHECK62-NOT: extractvalue {<3 x double>, i1 } + %data0 = extractvalue {<3 x double>, i1} %load0, 0 + ; check we extract checkbit from both loads and and them together + ; CHECK62: [[B1:%.*]] = extractvalue { <4 x i32>, i1 } [[L0]], 1 + ; CHECK62: [[B2:%.*]] = extractvalue { <2 x i32>, i1 } [[L2]], 1 + ; CHECK62: and i1 [[B1]], [[B2]] + %cb = extractvalue {<3 x double>, i1} %load0, 1 + ret void +} + +define void @loadv4f64(i32 %index) { + ; check the handle from binding is unchanged + ; CHECK: [[B:%.*]] = call target("dx.Rawbuffer", <4 x double>, 0, 0) + ; CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.Rawbuffer_v4f64_0_0t( + ; CHECK-SAME: i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + %buffer = call target("dx.Rawbuffer", <4 x double>, 0, 0) + @llvm.dx.resource.handlefrombinding.tdx.Rawbuffer_v4f64_0_0t( + i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + + ; check we don't modify the code in sm6.3 or later + ; CHECK63: [[L0:%.*]] = call { <4 x double>, i1 } @llvm.dx.resource.load.rawbuffer + ; CHECK63-SAME: target("dx.Rawbuffer", <4 x double>, 0, 0) [[B]], i32 %index, i32 0) + + ; check we perform two loads + ; and do 8 extracts and construct 4 doubles + ; CHECK62-NOT: call {<4 x double>, i1} @llvm.dx.resource.load.rawbuffer + ; CHECK62: [[L0:%.*]] = call { <4 x i32>, i1 } + ; CHECK62-SAME: @llvm.dx.resource.load.rawbuffer.v4i32.tdx.Rawbuffer_v4f64_0_0t( + ; CHECK62-SAME: target("dx.Rawbuffer", <4 x double>, 0, 0) [[B]], i32 %index, i32 0) + + ; CHECK62: [[D0:%.*]] = extractvalue { <4 x i32>, i1 } [[L0]], 0 + ; CHECK62: [[Lo1:%.*]] = extractelement <4 x i32> [[D0]], i32 0 + ; CHECK62: [[Hi1:%.*]] = extractelement <4 x i32> [[D0]], i32 1 + ; CHECK62: [[Lo2:%.*]] = extractelement <4 x i32> [[D0]], i32 2 + ; CHECK62: [[Hi2:%.*]] = extractelement <4 x i32> [[D0]], i32 3 + ; CHECK62: [[DBL1:%.*]] = call double @llvm.dx.asdouble.i32(i32 [[Lo1]], i32 [[Hi1]]) + ; CHECK62: [[Vec1:%.*]] = insertelement <4 x double> poison, double [[DBL1]], i32 0 + ; CHECK62: [[DBL2:%.*]] = call double @llvm.dx.asdouble.i32(i32 [[Lo2]], i32 [[Hi2]]) + ; CHECK62: [[Vec2:%.*]] = insertelement <4 x double> [[Vec1]], double [[DBL2]], i32 1 + + ; 2nd load + ; CHECK62: [[L2:%.*]] = call { <4 x i32>, i1 } + ; CHECK62-SAME: @llvm.dx.resource.load.rawbuffer.v4i32.tdx.Rawbuffer_v4f64_0_0t( + ; CHECK62-SAME: target("dx.Rawbuffer", <4 x double>, 0, 0) [[B]], i32 %index, i32 16) + + ; CHECK62: [[D2:%.*]] = extractvalue { <4 x i32>, i1 } [[L2]], 0 + ; CHECK62: [[Lo3:%.*]] = extractelement <4 x i32> [[D2]], i32 0 + ; CHECK62: [[Hi3:%.*]] = extractelement <4 x i32> [[D2]], i32 1 + ; CHECK62: [[Lo4:%.*]] = extractelement <4 x i32> [[D2]], i32 2 + ; CHECK62: [[Hi4:%.*]] = extractelement <4 x i32> [[D2]], i32 3 + ; CHECK62: [[DBL3:%.*]] = call double @llvm.dx.asdouble.i32(i32 [[Lo3]], i32 [[Hi3]]) + ; CHECK62: [[Vec3:%.*]] = insertelement <4 x double> [[Vec2]], double [[DBL3]], i32 2 + ; CHECK62: [[DBL4:%.*]] = call double @llvm.dx.asdouble.i32(i32 [[Lo4]], i32 [[Hi4]]) + ; CHECK62: [[Vec4:%.*]] = insertelement <4 x double> [[Vec3]], double [[DBL4]], i32 3 + %load0 = call {<4 x double>, i1} @llvm.dx.resource.load.rawbuffer( + target("dx.Rawbuffer", <4 x double>, 0, 0) %buffer, i32 %index, i32 0) + + ; CHECK63: extractvalue { <4 x double>, i1 } [[L0]], 0 + ; CHECK63: extractvalue { <4 x double>, i1 } [[L0]], 1 + + ; CHECK62-NOT: extractvalue {<4 x double>, i1 } + %data0 = extractvalue {<4 x double>, i1} %load0, 0 + ; check we extract checkbit from both loads and and them together + ; CHECK62: [[B1:%.*]] = extractvalue { <4 x i32>, i1 } [[L0]], 1 + ; CHECK62: [[B2:%.*]] = extractvalue { <4 x i32>, i1 } [[L2]], 1 + ; CHECK62: and i1 [[B1]], [[B2]] + %cb = extractvalue {<4 x double>, i1} %load0, 1 + ret void +} diff --git a/llvm/test/CodeGen/DirectX/RawBufferLoadInt64.ll b/llvm/test/CodeGen/DirectX/RawBufferLoadInt64.ll new file mode 100644 index 0000000000000..a1c153f2c0c84 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/RawBufferLoadInt64.ll @@ -0,0 +1,262 @@ +; RUN: opt -mtriple=dxil-pc-shadermodel6.2-compute -S -dxil-intrinsic-expansion %s | FileCheck %s --check-prefixes=CHECK,CHECK62 +; RUN: opt -mtriple=dxil-pc-shadermodel6.3-compute -S -dxil-intrinsic-expansion %s | FileCheck %s --check-prefixes=CHECK,CHECK63 + +define void @loadi64(i32 %index) { + ; check the handle from binding is unchanged + ; CHECK: [[B:%.*]] = call target("dx.Rawbuffer", i64, 1, 0, 0) + ; CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.Rawbuffer_i64_1_0_0t( + ; CHECK-SAME: i32 0, i32 1, i32 1, i32 0, i1 false, ptr null) + %buffer = call target("dx.Rawbuffer", i64, 1, 0, 0) + @llvm.dx.resource.handlefrombinding.tdx.Rawbuffer_i64_1_0_0t( + i32 0, i32 1, i32 1, i32 0, i1 false, ptr null) + + ; check we don't modify the code in sm6.3 or later + ; CHECK63: [[L0:%.*]] = call { i64, i1 } @llvm.dx.resource.load.rawbuffer + ; CHECK63-SAME: target("dx.Rawbuffer", i64, 1, 0, 0) [[B]], i32 %index, i32 0) + + ; check we load an <2 x i32> instead of a i64 + ; CHECK62-NOT: call {i64, i1} @llvm.dx.resource.load.rawbuffer + ; CHECK62: [[L0:%.*]] = call { <2 x i32>, i1 } + ; CHECK62-SAME: @llvm.dx.resource.load.rawbuffer.v2i32.tdx.Rawbuffer_i64_1_0_0t( + ; CHECK62-SAME: target("dx.Rawbuffer", i64, 1, 0, 0) [[B]], i32 %index, i32 0) + %load0 = call {i64, i1} @llvm.dx.resource.load.rawbuffer( + target("dx.Rawbuffer", i64, 1, 0, 0) %buffer, i32 %index, i32 0) + + ; CHECK63: extractvalue { i64, i1 } [[L0]], 0 + + ; check we extract the two i32 and construct a i64 + ; CHECK62: [[D0:%.*]] = extractvalue { <2 x i32>, i1 } [[L0]], 0 + ; CHECK62: [[Lo:%.*]] = extractelement <2 x i32> [[D0]], i32 0 + ; CHECK62: [[Hi:%.*]] = extractelement <2 x i32> [[D0]], i32 1 + ; CHECK62: [[ZLo1:%.*]] = zext i32 [[Lo]] to i64 + ; CHECK62: [[ZHi1:%.*]] = zext i32 [[Hi]] to i64 + ; CHECK62: [[A:%.*]] = shl i64 [[ZHi1]], 32 + ; CHECK62: [[B:%.*]] = or i64 [[ZLo1]], [[A]] + ; CHECK62-NOT: extractvalue { i64, i1 } + %data0 = extractvalue {i64, i1} %load0, 0 + ret void +} + +define void @loadv2i64(i32 %index) { + ; check the handle from binding is unchanged + ; CHECK: [[B:%.*]] = call target("dx.Rawbuffer", <2 x i64>, 1, 0, 0) + ; CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.Rawbuffer_v2i64_1_0_0t( + ; CHECK-SAME: i32 0, i32 1, i32 1, i32 0, i1 false, ptr null) + %buffer = call target("dx.Rawbuffer", <2 x i64>, 1, 0, 0) + @llvm.dx.resource.handlefrombinding.tdx.Rawbuffer_v2i64_1_0_0t( + i32 0, i32 1, i32 1, i32 0, i1 false, ptr null) + + ; check we don't modify the code in sm6.3 or later + ; CHECK63: [[L0:%.*]] = call { <2 x i64>, i1 } @llvm.dx.resource.load.rawbuffer + ; CHECK63-SAME: target("dx.Rawbuffer", <2 x i64>, 1, 0, 0) [[B]], i32 %index, i32 0) + + ; check we load an <4 x i32> instead of a i642 + ; CHECK62: [[L0:%.*]] = call { <4 x i32>, i1 } + ; CHECK62-SAME: @llvm.dx.resource.load.rawbuffer.v4i32.tdx.Rawbuffer_v2i64_1_0_0t( + ; CHECK62-SAME: target("dx.Rawbuffer", <2 x i64>, 1, 0, 0) [[B]], i32 %index, i32 0) + %load0 = call { <2 x i64>, i1 } @llvm.dx.resource.load.rawbuffer( + target("dx.Rawbuffer", <2 x i64>, 1, 0, 0) %buffer, i32 %index, i32 0) + + ; CHECK63: extractvalue { <2 x i64>, i1 } [[L0]], 0 + + ; check we extract the 4 i32 and construct a <2 x i64> + ; CHECK62: [[D0:%.*]] = extractvalue { <4 x i32>, i1 } [[L0]], 0 + ; CHECK62: [[Lo1:%.*]] = extractelement <4 x i32> [[D0]], i32 0 + ; CHECK62: [[Hi1:%.*]] = extractelement <4 x i32> [[D0]], i32 1 + ; CHECK62: [[Lo2:%.*]] = extractelement <4 x i32> [[D0]], i32 2 + ; CHECK62: [[Hi2:%.*]] = extractelement <4 x i32> [[D0]], i32 3 + ; CHECK62: [[ZLo1:%.*]] = zext i32 [[Lo1]] to i64 + ; CHECK62: [[ZHi1:%.*]] = zext i32 [[Hi1]] to i64 + ; CHECK62: [[A:%.*]] = shl i64 [[ZHi1]], 32 + ; CHECK62: [[B:%.*]] = or i64 [[ZLo1]], [[A]] + ; CHECK62: [[Vec:%.*]] = insertelement <2 x i64> poison, i64 [[B]], i32 0 + ; CHECK62: [[ZLo2:%.*]] = zext i32 [[Lo2]] to i64 + ; CHECK62: [[ZHi2:%.*]] = zext i32 [[Hi2]] to i64 + ; CHECK62: [[A:%.*]] = shl i64 [[ZHi2]], 32 + ; CHECK62: [[B:%.*]] = or i64 [[ZLo2]], [[A]] + ; CHECK62: [[Vec2:%.*]] = insertelement <2 x i64> [[Vec]], i64 [[B]], i32 1 + ; CHECK62-NOT: extractvalue { <2 x i64>, i1 } + %data0 = extractvalue { <2 x i64>, i1 } %load0, 0 + ret void +} + +; show we properly handle extracting the check bit +define void @loadi64WithCheckBit(i32 %index) { + ; check the handle from binding is unchanged + ; CHECK: [[B:%.*]] = call target("dx.Rawbuffer", i64, 1, 0, 0) + ; CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.Rawbuffer_i64_1_0_0t( + ; CHECK-SAME: i32 0, i32 1, i32 1, i32 0, i1 false, ptr null) + %buffer = call target("dx.Rawbuffer", i64, 1, 0, 0) + @llvm.dx.resource.handlefrombinding.tdx.Rawbuffer_i64_1_0_0t( + i32 0, i32 1, i32 1, i32 0, i1 false, ptr null) + + ; check we don't modify the code in sm6.3 or later + ; CHECK63: [[L0:%.*]] = call { i64, i1 } @llvm.dx.resource.load.rawbuffer + ; CHECK63-SAME: target("dx.Rawbuffer", i64, 1, 0, 0) [[B]], i32 %index, i32 0) + + ; check we load an <2 x i32> instead of a i64 + ; CHECK62-NOT: call {i64, i1} @llvm.dx.resource.load.rawbuffer + ; CHECK62: [[L0:%.*]] = call { <2 x i32>, i1 } + ; CHECK62-SAME: @llvm.dx.resource.load.rawbuffer.v2i32.tdx.Rawbuffer_i64_1_0_0t( + ; CHECK62-SAME: target("dx.Rawbuffer", i64, 1, 0, 0) [[B]], i32 %index, i32 0) + %load0 = call {i64, i1} @llvm.dx.resource.load.rawbuffer( + target("dx.Rawbuffer", i64, 1, 0, 0) %buffer, i32 %index, i32 0) + + ; CHECK63: extractvalue { i64, i1 } [[L0]], 0 + ; CHECK63: extractvalue { i64, i1 } [[L0]], 1 + + ; check we extract the two i32 and construct a i64 + ; CHECK62: [[D0:%.*]] = extractvalue { <2 x i32>, i1 } [[L0]], 0 + ; CHECK62: [[Lo:%.*]] = extractelement <2 x i32> [[D0]], i32 0 + ; CHECK62: [[Hi:%.*]] = extractelement <2 x i32> [[D0]], i32 1 + ; CHECK62: [[ZLo1:%.*]] = zext i32 [[Lo]] to i64 + ; CHECK62: [[ZHi1:%.*]] = zext i32 [[Hi]] to i64 + ; CHECK62: [[A:%.*]] = shl i64 [[ZHi1]], 32 + ; CHECK62: [[B:%.*]] = or i64 [[ZLo1]], [[A]] + %data0 = extractvalue {i64, i1} %load0, 0 + ; CHECK62: extractvalue { <2 x i32>, i1 } [[L0]], 1 + ; CHECK62-NOT: extractvalue { i64, i1 } + %cb = extractvalue {i64, i1} %load0, 1 + ret void +} + +; Raw Buffer Load allows for i64_t3 and i64_t4 to be loaded +; In SM6.2 and below, two loads will be performed. +; Show we and the checkbits together + +define void @loadv3i64(i32 %index) { + ; check the handle from binding is unchanged + ; CHECK: [[Buf:%.*]] = call target("dx.Rawbuffer", <3 x i64>, 0, 0) + ; CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.Rawbuffer_v3i64_0_0t( + ; CHECK-SAME: i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + %buffer = call target("dx.Rawbuffer", <3 x i64>, 0, 0) + @llvm.dx.resource.handlefrombinding.tdx.Rawbuffer_v3i64_0_0t( + i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + + ; check we don't modify the code in sm6.3 or later + ; CHECK63: [[L0:%.*]] = call { <3 x i64>, i1 } @llvm.dx.resource.load.rawbuffer + ; CHECK63-SAME: target("dx.Rawbuffer", <3 x i64>, 0, 0) [[B]], i32 %index, i32 0) + + ; check we perform two loads + ; and do 6 extracts and construct 3 i64s + ; CHECK62-NOT: call {<3 x i64>, i1} @llvm.dx.resource.load.rawbuffer + ; CHECK62: [[L0:%.*]] = call { <4 x i32>, i1 } + ; CHECK62-SAME: @llvm.dx.resource.load.rawbuffer.v4i32.tdx.Rawbuffer_v3i64_0_0t( + ; CHECK62-SAME: target("dx.Rawbuffer", <3 x i64>, 0, 0) [[Buf]], i32 %index, i32 0) + + ; CHECK62: [[D0:%.*]] = extractvalue { <4 x i32>, i1 } [[L0]], 0 + ; CHECK62: [[Lo1:%.*]] = extractelement <4 x i32> [[D0]], i32 0 + ; CHECK62: [[Hi1:%.*]] = extractelement <4 x i32> [[D0]], i32 1 + ; CHECK62: [[Lo2:%.*]] = extractelement <4 x i32> [[D0]], i32 2 + ; CHECK62: [[Hi2:%.*]] = extractelement <4 x i32> [[D0]], i32 3 + ; CHECK62: [[ZLo1:%.*]] = zext i32 [[Lo1]] to i64 + ; CHECK62: [[ZHi1:%.*]] = zext i32 [[Hi1]] to i64 + ; CHECK62: [[A:%.*]] = shl i64 [[ZHi1]], 32 + ; CHECK62: [[B:%.*]] = or i64 [[ZLo1]], [[A]] + ; CHECK62: [[Vec1:%.*]] = insertelement <3 x i64> poison, i64 [[B]], i32 0 + ; CHECK62: [[ZLo2:%.*]] = zext i32 [[Lo2]] to i64 + ; CHECK62: [[ZHi2:%.*]] = zext i32 [[Hi2]] to i64 + ; CHECK62: [[A:%.*]] = shl i64 [[ZHi2]], 32 + ; CHECK62: [[B:%.*]] = or i64 [[ZLo2]], [[A]] + ; CHECK62: [[Vec2:%.*]] = insertelement <3 x i64> [[Vec1]], i64 [[B]], i32 1 + + ; 2nd load + ; CHECK62: [[L2:%.*]] = call { <2 x i32>, i1 } + ; CHECK62-SAME: @llvm.dx.resource.load.rawbuffer.v2i32.tdx.Rawbuffer_v3i64_0_0t( + ; CHECK62-SAME: target("dx.Rawbuffer", <3 x i64>, 0, 0) [[Buf]], i32 %index, i32 16) + + ; CHECK62: [[D2:%.*]] = extractvalue { <2 x i32>, i1 } [[L2]], 0 + ; CHECK62: [[Lo3:%.*]] = extractelement <2 x i32> [[D2]], i32 0 + ; CHECK62: [[Hi3:%.*]] = extractelement <2 x i32> [[D2]], i32 1 + ; CHECK62: [[ZLo3:%.*]] = zext i32 [[Lo3]] to i64 + ; CHECK62: [[ZHi3:%.*]] = zext i32 [[Hi3]] to i64 + ; CHECK62: [[A:%.*]] = shl i64 [[ZHi3]], 32 + ; CHECK62: [[B:%.*]] = or i64 [[ZLo3]], [[A]] + ; CHECK62: [[Vec3:%.*]] = insertelement <3 x i64> [[Vec2]], i64 [[B]], i32 2 + %load0 = call {<3 x i64>, i1} @llvm.dx.resource.load.rawbuffer( + target("dx.Rawbuffer", <3 x i64>, 0, 0) %buffer, i32 %index, i32 0) + + ; CHECK63: extractvalue { <3 x i64>, i1 } [[L0]], 0 + ; CHECK63: extractvalue { <3 x i64>, i1 } [[L0]], 1 + + ; CHECK62-NOT: extractvalue {<3 x i64>, i1 } + %data0 = extractvalue {<3 x i64>, i1} %load0, 0 + ; check we extract checkbit from both loads and and them together + ; CHECK62: [[B1:%.*]] = extractvalue { <4 x i32>, i1 } [[L0]], 1 + ; CHECK62: [[B2:%.*]] = extractvalue { <2 x i32>, i1 } [[L2]], 1 + ; CHECK62: and i1 [[B1]], [[B2]] + %cb = extractvalue {<3 x i64>, i1} %load0, 1 + ret void +} + +define void @loadv4i64(i32 %index) { + ; check the handle from binding is unchanged + ; CHECK62: [[Buf:%.*]] = call target("dx.Rawbuffer", <4 x i64>, 0, 0) + ; CHECK62-SAME: @llvm.dx.resource.handlefrombinding.tdx.Rawbuffer_v4i64_0_0t( + ; CHECK62-SAME: i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + %buffer = call target("dx.Rawbuffer", <4 x i64>, 0, 0) + @llvm.dx.resource.handlefrombinding.tdx.Rawbuffer_v4i64_0_0t( + i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + + ; check we don't modify the code in sm6.3 or later + ; CHECK63: [[L0:%.*]] = call { <4 x i64>, i1 } @llvm.dx.resource.load.rawbuffer + ; CHECK63-SAME: target("dx.Rawbuffer", <4 x i64>, 0, 0) [[B]], i32 %index, i32 0) + + ; check we perform two loads + ; and do 8 extracts and construct 4 i64s + ; CHECK62-NOT: call {<4 x i64>, i1} @llvm.dx.resource.load.rawbuffer + ; CHECK62: [[L0:%.*]] = call { <4 x i32>, i1 } + ; CHECK62-SAME: @llvm.dx.resource.load.rawbuffer.v4i32.tdx.Rawbuffer_v4i64_0_0t( + ; CHECK62-SAME: target("dx.Rawbuffer", <4 x i64>, 0, 0) [[Buf]], i32 %index, i32 0) + + ; CHECK62: [[D0:%.*]] = extractvalue { <4 x i32>, i1 } [[L0]], 0 + ; CHECK62: [[Lo1:%.*]] = extractelement <4 x i32> [[D0]], i32 0 + ; CHECK62: [[Hi1:%.*]] = extractelement <4 x i32> [[D0]], i32 1 + ; CHECK62: [[Lo2:%.*]] = extractelement <4 x i32> [[D0]], i32 2 + ; CHECK62: [[Hi2:%.*]] = extractelement <4 x i32> [[D0]], i32 3 + ; CHECK62: [[ZLo1:%.*]] = zext i32 [[Lo1]] to i64 + ; CHECK62: [[ZHi1:%.*]] = zext i32 [[Hi1]] to i64 + ; CHECK62: [[A:%.*]] = shl i64 [[ZHi1]], 32 + ; CHECK62: [[B:%.*]] = or i64 [[ZLo1]], [[A]] + ; CHECK62: [[Vec1:%.*]] = insertelement <4 x i64> poison, i64 [[B]], i32 0 + ; CHECK62: [[ZLo2:%.*]] = zext i32 [[Lo2]] to i64 + ; CHECK62: [[ZHi2:%.*]] = zext i32 [[Hi2]] to i64 + ; CHECK62: [[A:%.*]] = shl i64 [[ZHi2]], 32 + ; CHECK62: [[B:%.*]] = or i64 [[ZLo2]], [[A]] + ; CHECK62: [[Vec2:%.*]] = insertelement <4 x i64> [[Vec1]], i64 [[B]], i32 1 + + ; 2nd load + ; CHECK62: [[L2:%.*]] = call { <4 x i32>, i1 } + ; CHECK62-SAME: @llvm.dx.resource.load.rawbuffer.v4i32.tdx.Rawbuffer_v4i64_0_0t( + ; CHECK62-SAME: target("dx.Rawbuffer", <4 x i64>, 0, 0) [[Buf]], i32 %index, i32 16) + + ; CHECK62: [[D2:%.*]] = extractvalue { <4 x i32>, i1 } [[L2]], 0 + ; CHECK62: [[Lo3:%.*]] = extractelement <4 x i32> [[D2]], i32 0 + ; CHECK62: [[Hi3:%.*]] = extractelement <4 x i32> [[D2]], i32 1 + ; CHECK62: [[Lo4:%.*]] = extractelement <4 x i32> [[D2]], i32 2 + ; CHECK62: [[Hi4:%.*]] = extractelement <4 x i32> [[D2]], i32 3 + ; CHECK62: [[ZLo3:%.*]] = zext i32 [[Lo3]] to i64 + ; CHECK62: [[ZHi3:%.*]] = zext i32 [[Hi3]] to i64 + ; CHECK62: [[A:%.*]] = shl i64 [[ZHi3]], 32 + ; CHECK62: [[B:%.*]] = or i64 [[ZLo3]], [[A]] + ; CHECK62: [[Vec3:%.*]] = insertelement <4 x i64> [[Vec2]], i64 [[B]], i32 2 + ; CHECK62: [[ZLo4:%.*]] = zext i32 [[Lo4]] to i64 + ; CHECK62: [[ZHi4:%.*]] = zext i32 [[Hi4]] to i64 + ; CHECK62: [[A:%.*]] = shl i64 [[ZHi4]], 32 + ; CHECK62: [[B:%.*]] = or i64 [[ZLo4]], [[A]] + ; CHECK62: [[Vec4:%.*]] = insertelement <4 x i64> [[Vec3]], i64 [[B]], i32 3 + %load0 = call {<4 x i64>, i1} @llvm.dx.resource.load.rawbuffer( + target("dx.Rawbuffer", <4 x i64>, 0, 0) %buffer, i32 %index, i32 0) + + ; CHECK63: extractvalue { <4 x i64>, i1 } [[L0]], 0 + ; CHECK63: extractvalue { <4 x i64>, i1 } [[L0]], 1 + + ; CHECK62-NOT: extractvalue {<4 x i64>, i1 } + %data0 = extractvalue {<4 x i64>, i1} %load0, 0 + ; check we extract checkbit from both loads and and them together + ; CHECK62: [[B1:%.*]] = extractvalue { <4 x i32>, i1 } [[L0]], 1 + ; CHECK62: [[B2:%.*]] = extractvalue { <4 x i32>, i1 } [[L2]], 1 + ; CHECK62: and i1 [[B1]], [[B2]] + %cb = extractvalue {<4 x i64>, i1} %load0, 1 + ret void +} diff --git a/llvm/test/CodeGen/DirectX/RawBufferStore-error64.ll b/llvm/test/CodeGen/DirectX/RawBufferStore-error64.ll deleted file mode 100644 index 895c4c3b9f5fe..0000000000000 --- a/llvm/test/CodeGen/DirectX/RawBufferStore-error64.ll +++ /dev/null @@ -1,20 +0,0 @@ -; We use llc for this test so that we don't abort after the first error. -; RUN: not llc %s -o /dev/null 2>&1 | FileCheck %s - -target triple = "dxil-pc-shadermodel6.2-compute" - -; Can't store 64 bit types directly until SM6.3 (byteaddressbuf.Store) -; CHECK: error: -; CHECK-SAME: in function storev4f64_byte -; CHECK-SAME: Cannot create RawBufferStore operation: Invalid overload type -define void @storev4f64_byte(i32 %offset, <4 x double> %data) "hlsl.export" { - %buffer = call target("dx.RawBuffer", i8, 1, 0, 0) - @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_i8_0_0_0( - i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) - - call void @llvm.dx.resource.store.rawbuffer.v4i64( - target("dx.RawBuffer", i8, 1, 0, 0) %buffer, - i32 %offset, i32 0, <4 x double> %data) - - ret void -} diff --git a/llvm/test/CodeGen/DirectX/RawBufferStoreDouble.ll b/llvm/test/CodeGen/DirectX/RawBufferStoreDouble.ll new file mode 100644 index 0000000000000..ddcd761d812fa --- /dev/null +++ b/llvm/test/CodeGen/DirectX/RawBufferStoreDouble.ll @@ -0,0 +1,105 @@ +; RUN: opt -mtriple=dxil-pc-shadermodel6.2-compute -S -dxil-intrinsic-expansion %s | FileCheck %s --check-prefixes=CHECK,CHECK62 +; RUN: opt -mtriple=dxil-pc-shadermodel6.3-compute -S -dxil-intrinsic-expansion %s | FileCheck %s --check-prefixes=CHECK,CHECK63 + +define void @storef64(double %0, i32 %index) { + ; CHECK: [[B:%.*]] = tail call target("dx.RawBuffer", double, 1, 0) + ; CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_f64_1_0t( + ; CHECK-SAME: i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + %buffer = tail call target("dx.RawBuffer", double, 1, 0) + @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_f64_1_0t( + i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + + ; check we don't modify the code in sm6.3 or later + ; CHECK63: call void @llvm.dx.resource.store.rawbuffer + ; CHECK63-SAME: target("dx.RawBuffer", double, 1, 0) [[B]], i32 %index, i32 0, double %0) + + ; check we split the double and store the lo and hi bits + ; CHECK62: [[SD:%.*]] = call { i32, i32 } @llvm.dx.splitdouble.i32(double %0) + ; CHECK62: [[Lo:%.*]] = extractvalue { i32, i32 } [[SD]], 0 + ; CHECK62: [[Hi:%.*]] = extractvalue { i32, i32 } [[SD]], 1 + ; CHECK62: [[Vec1:%.*]] = insertelement <2 x i32> poison, i32 [[Lo]], i32 0 + ; CHECK62: [[Vec2:%.*]] = insertelement <2 x i32> [[Vec1]], i32 [[Hi]], i32 1 + ; CHECK62: call void @llvm.dx.resource.store.rawbuffer.tdx.RawBuffer_f64_1_0t.v2i32( + ; CHECK62-SAME: target("dx.RawBuffer", double, 1, 0) [[B]], i32 %index, i32 0, <2 x i32> [[Vec2]]) + call void @llvm.dx.resource.store.rawbuffer( + target("dx.RawBuffer", double, 1, 0) %buffer, i32 %index, i32 0, + double %0) + ret void +} + +define void @storev2f64(<2 x double> %0, i32 %index) { + ; CHECK: [[B:%.*]] = tail call target("dx.RawBuffer", <2 x double>, 1, 0) + ; CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_v2f64_1_0t( + ; CHECK-SAME: i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + %buffer = tail call target("dx.RawBuffer", <2 x double>, 1, 0) + @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_v2f64_1_0t( + i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + + ; check we don't modify the code in sm6.3 or later + ; CHECK63: call void @llvm.dx.resource.store.rawbuffer + ; CHECK63-SAME: target("dx.RawBuffer", <2 x double>, 1, 0) [[B]], i32 %index, i32 0, <2 x double> %0) + + ; CHECK62: [[SD:%.*]] = call { <2 x i32>, <2 x i32> } + ; CHECK62-SAME: @llvm.dx.splitdouble.v2i32(<2 x double> %0) + ; CHECK62: [[Lo:%.*]] = extractvalue { <2 x i32>, <2 x i32> } [[SD]], 0 + ; CHECK62: [[Hi:%.*]] = extractvalue { <2 x i32>, <2 x i32> } [[SD]], 1 + ; CHECK62: [[Vec:%.*]] = shufflevector <2 x i32> [[Lo]], <2 x i32> [[Hi]], <4 x i32> + ; CHECK62: call void @llvm.dx.resource.store.rawbuffer.tdx.RawBuffer_v2f64_1_0t.v4i32( + ; CHECK62-SAME: target("dx.RawBuffer", <2 x double>, 1, 0) [[B]], i32 %index, i32 0, <4 x i32> [[Vec]]) + call void @llvm.dx.resource.store.rawbuffer( + target("dx.RawBuffer", <2 x double>, 1, 0) %buffer, i32 %index, i32 0, + <2 x double> %0) + ret void +} + +define void @storev3f64(<3 x double> %0, i32 %index) { + ; CHECK: [[Buf:%.*]] = tail call target("dx.RawBuffer", <3 x double>, 1, 0) + ; CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_v3f64_1_0t( + ; CHECK-SAME: i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + %buffer = tail call target("dx.RawBuffer", <3 x double>, 1, 0) + @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_v3f64_1_0t( + i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + + ; check we don't modify the code in sm6.3 or later + ; CHECK63: call void @llvm.dx.resource.store.rawbuffer + ; CHECK63-SAME: target("dx.RawBuffer", <3 x double>, 1, 0) [[B]], i32 %index, i32 0, <3 x double> %0) + + ; CHECK62: [[A:%.*]] = call { <3 x i32>, <3 x i32> } @llvm.dx.splitdouble.v3i32(<3 x double> %0) + ; CHECK62: [[B:%.*]] = extractvalue { <3 x i32>, <3 x i32> } [[A]], 0 + ; CHECK62: [[C:%.*]] = extractvalue { <3 x i32>, <3 x i32> } [[A]], 1 + ; CHECK62: [[D:%.*]] = shufflevector <3 x i32> [[B]], <3 x i32> [[C]], <6 x i32> + ; CHECK62: [[E:%.*]] = shufflevector <6 x i32> [[D]], <6 x i32> poison, <4 x i32> + ; CHECK62: call void @llvm.dx.resource.store.rawbuffer.tdx.RawBuffer_v3f64_1_0t.v4i32(target("dx.RawBuffer", <3 x double>, 1, 0) [[Buf]], i32 %index, i32 0, <4 x i32> [[E]]) + ; CHECK62: [[F:%.*]] = shufflevector <6 x i32> [[D]], <6 x i32> poison, <2 x i32> + ; CHECK62: call void @llvm.dx.resource.store.rawbuffer.tdx.RawBuffer_v3f64_1_0t.v2i32(target("dx.RawBuffer", <3 x double>, 1, 0) [[Buf]], i32 %index, i32 16, <2 x i32> [[F]]) + call void @llvm.dx.resource.store.rawbuffer( + target("dx.RawBuffer", <3 x double>, 1, 0) %buffer, i32 %index, i32 0, + <3 x double> %0) + ret void +} + +define void @storev4f64(<4 x double> %0, i32 %index) { + ; CHECK: [[Buf:%.*]] = tail call target("dx.RawBuffer", <4 x double>, 1, 0) + ; CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_v4f64_1_0t( + ; CHECK-SAME: i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + %buffer = tail call target("dx.RawBuffer", <4 x double>, 1, 0) + @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_v4f64_1_0t( + i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + + ; check we don't modify the code in sm6.3 or later + ; CHECK63: call void @llvm.dx.resource.store.rawbuffer + ; CHECK63-SAME: target("dx.RawBuffer", <4 x double>, 1, 0) [[B]], i32 %index, i32 0, <4 x double> %0) + + ; CHECK62: [[A:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.dx.splitdouble.v4i32(<4 x double> %0) + ; CHECK62: [[B:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[A]], 0 + ; CHECK62: [[C:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[A]], 1 + ; CHECK62: [[D:%.*]] = shufflevector <4 x i32> [[B]], <4 x i32> [[C]], <8 x i32> + ; CHECK62: [[E:%.*]] = shufflevector <8 x i32> [[D]], <8 x i32> poison, <4 x i32> + ; CHECK62: call void @llvm.dx.resource.store.rawbuffer.tdx.RawBuffer_v4f64_1_0t.v4i32(target("dx.RawBuffer", <4 x double>, 1, 0) [[Buf]], i32 %index, i32 0, <4 x i32> [[E]]) + ; CHECK62: [[F:%.*]] = shufflevector <8 x i32> [[D]], <8 x i32> poison, <4 x i32> + ; CHECK62: call void @llvm.dx.resource.store.rawbuffer.tdx.RawBuffer_v4f64_1_0t.v4i32(target("dx.RawBuffer", <4 x double>, 1, 0) [[Buf]], i32 %index, i32 16, <4 x i32> [[F]]) + call void @llvm.dx.resource.store.rawbuffer( + target("dx.RawBuffer", <4 x double>, 1, 0) %buffer, i32 %index, i32 0, + <4 x double> %0) + ret void +} diff --git a/llvm/test/CodeGen/DirectX/RawBufferStoreInt64.ll b/llvm/test/CodeGen/DirectX/RawBufferStoreInt64.ll new file mode 100644 index 0000000000000..54ec4d2cd2fb7 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/RawBufferStoreInt64.ll @@ -0,0 +1,103 @@ +; RUN: opt -mtriple=dxil-pc-shadermodel6.2-compute -S -dxil-intrinsic-expansion %s | FileCheck %s --check-prefixes=CHECK,CHECK62 +; RUN: opt -mtriple=dxil-pc-shadermodel6.3-compute -S -dxil-intrinsic-expansion %s | FileCheck %s --check-prefixes=CHECK,CHECK63 + +define void @storei64(i64 %0, i32 %index) { + ; CHECK: [[Buf:%.*]] = tail call target("dx.RawBuffer", i64, 1, 0) + ; CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_i64_1_0t( + ; CHECK-SAME: i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + %buffer = tail call target("dx.RawBuffer", i64, 1, 0) + @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_i64_1_0t( + i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + + ; check we don't modify the code in sm6.3 or later + ; CHECK63: call void @llvm.dx.resource.store.rawbuffer + ; CHECK63-SAME: target("dx.RawBuffer", i64, 1, 0) [[Buf]], i32 %index, i32 0, i64 %0) + + ; check we split the i64 and store the lo and hi bits + ; CHECK62: [[A:%.*]] = trunc i64 %0 to i32 + ; CHECK62: [[B:%.*]] = lshr i64 %0, 32 + ; CHECK62: [[C:%.*]] = trunc i64 [[B]] to i32 + ; CHECK62: [[Vec1:%.*]] = insertelement <2 x i32> poison, i32 [[A]], i32 0 + ; CHECK62: [[Vec2:%.*]] = insertelement <2 x i32> [[Vec1]], i32 [[C]], i32 1 + ; CHECK62: call void @llvm.dx.resource.store.rawbuffer.tdx.RawBuffer_i64_1_0t.v2i32( + ; CHECK62-SAME: target("dx.RawBuffer", i64, 1, 0) [[Buf]], i32 %index, i32 0, <2 x i32> [[Vec2]]) + call void @llvm.dx.resource.store.rawbuffer( + target("dx.RawBuffer", i64, 1, 0) %buffer, i32 %index, i32 0, + i64 %0) + ret void +} + +define void @storev2i64(<2 x i64> %0, i32 %index) { + ; CHECK: [[Buf:%.*]] = tail call target("dx.RawBuffer", <2 x i64>, 1, 0) + ; CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_v2i64_1_0t( + ; CHECK-SAME: i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + %buffer = tail call target("dx.RawBuffer", <2 x i64>, 1, 0) + @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_v2i64_1_0t( + i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + + ; check we don't modify the code in sm6.3 or later + ; CHECK63: call void @llvm.dx.resource.store.rawbuffer + ; CHECK63-SAME: target("dx.RawBuffer", <2 x i64>, 1, 0) [[Buf]], i32 %index, i32 0, <2 x i64> %0) + + ; CHECK62: [[A:%.*]] = trunc <2 x i64> %0 to <2 x i32> + ; CHECK62: [[B:%.*]] = lshr <2 x i64> %0, splat (i64 32) + ; CHECK62: [[C:%.*]] = trunc <2 x i64> [[B]] to <2 x i32> + ; CHECK62: [[Vec:%.*]] = shufflevector <2 x i32> [[A]], <2 x i32> [[C]], <4 x i32> + ; CHECK62: call void @llvm.dx.resource.store.rawbuffer.tdx.RawBuffer_v2i64_1_0t.v4i32( + ; CHECK62-SAME: target("dx.RawBuffer", <2 x i64>, 1, 0) [[Buf]], i32 %index, i32 0, <4 x i32> [[Vec]]) + call void @llvm.dx.resource.store.rawbuffer( + target("dx.RawBuffer", <2 x i64>, 1, 0) %buffer, i32 %index, i32 0, + <2 x i64> %0) + ret void +} + +define void @storev3i64(<3 x i64> %0, i32 %index) { + ; CHECK: [[Buf:%.*]] = tail call target("dx.RawBuffer", <3 x i64>, 1, 0) + ; CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_v3i64_1_0t( + ; CHECK-SAME: i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + %buffer = tail call target("dx.RawBuffer", <3 x i64>, 1, 0) + @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_v3i64_1_0t( + i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + + ; check we don't modify the code in sm6.3 or later + ; CHECK63: call void @llvm.dx.resource.store.rawbuffer + ; CHECK63-SAME: target("dx.RawBuffer", <3 x i64>, 1, 0) [[Buf]], i32 %index, i32 0, <3 x i64> %0) + + ; CHECK62: [[A:%.*]] = trunc <3 x i64> %0 to <3 x i32> + ; CHECK62: [[B:%.*]] = lshr <3 x i64> %0, splat (i64 32) + ; CHECK62: [[C:%.*]] = trunc <3 x i64> [[B]] to <3 x i32> + ; CHECK62: [[D:%.*]] = shufflevector <3 x i32> [[A]], <3 x i32> [[C]], <6 x i32> + ; CHECK62: [[E:%.*]] = shufflevector <6 x i32> [[D]], <6 x i32> poison, <4 x i32> + ; CHECK62: call void @llvm.dx.resource.store.rawbuffer.tdx.RawBuffer_v3i64_1_0t.v4i32(target("dx.RawBuffer", <3 x i64>, 1, 0) [[Buf]], i32 %index, i32 0, <4 x i32> [[E]]) + ; CHECK62: [[F:%.*]] = shufflevector <6 x i32> [[D]], <6 x i32> poison, <2 x i32> + ; CHECK62: call void @llvm.dx.resource.store.rawbuffer.tdx.RawBuffer_v3i64_1_0t.v2i32(target("dx.RawBuffer", <3 x i64>, 1, 0) [[Buf]], i32 %index, i32 16, <2 x i32> [[F]]) + call void @llvm.dx.resource.store.rawbuffer( + target("dx.RawBuffer", <3 x i64>, 1, 0) %buffer, i32 %index, i32 0, + <3 x i64> %0) + ret void +} + +define void @storev4i64(<4 x i64> %0, i32 %index) { + ; CHECK: [[Buf:%.*]] = tail call target("dx.RawBuffer", <4 x i64>, 1, 0) + ; CHECK-SAME: @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_v4i64_1_0t( + ; CHECK-SAME: i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + %buffer = tail call target("dx.RawBuffer", <4 x i64>, 1, 0) + @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_v4i64_1_0t( + i32 0, i32 0, i32 1, i32 0, i1 false, ptr null) + ; check we don't modify the code in sm6.3 or later + ; CHECK63: call void @llvm.dx.resource.store.rawbuffer + ; CHECK63-SAME: target("dx.RawBuffer", <4 x i64>, 1, 0) [[Buf]], i32 %index, i32 0, <4 x i64> %0) + + ; CHECK62: [[A:%.*]] = trunc <4 x i64> %0 to <4 x i32> + ; CHECK62: [[B:%.*]] = lshr <4 x i64> %0, splat (i64 32) + ; CHECK62: [[C:%.*]] = trunc <4 x i64> [[B]] to <4 x i32> + ; CHECK62: [[D:%.*]] = shufflevector <4 x i32> [[A]], <4 x i32> [[C]], <8 x i32> + ; CHECK62: [[E:%.*]] = shufflevector <8 x i32> [[D]], <8 x i32> poison, <4 x i32> + ; CHECK62: call void @llvm.dx.resource.store.rawbuffer.tdx.RawBuffer_v4i64_1_0t.v4i32(target("dx.RawBuffer", <4 x i64>, 1, 0) [[Buf]], i32 %index, i32 0, <4 x i32> [[E]]) + ; CHECK62: [[F:%.*]] = shufflevector <8 x i32> [[D]], <8 x i32> poison, <4 x i32> + ; CHECK62: call void @llvm.dx.resource.store.rawbuffer.tdx.RawBuffer_v4i64_1_0t.v4i32(target("dx.RawBuffer", <4 x i64>, 1, 0) [[Buf]], i32 %index, i32 16, <4 x i32> [[F]]) + call void @llvm.dx.resource.store.rawbuffer( + target("dx.RawBuffer", <4 x i64>, 1, 0) %buffer, i32 %index, i32 0, + <4 x i64> %0) + ret void +}