-
Notifications
You must be signed in to change notification settings - Fork 13.6k
[Asan][RISCV] Teach AddressSanitizer to support indexed load/store. #100930
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Conversation
@llvm/pr-subscribers-llvm-transforms @llvm/pr-subscribers-llvm-analysis Author: Yeting Kuo (yetingk) ChangesThis is based on #97070. Patch is 217.80 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/100930.diff 10 Files Affected:
diff --git a/llvm/include/llvm/Analysis/MemoryRefInfo.h b/llvm/include/llvm/Analysis/MemoryRefInfo.h
new file mode 100644
index 0000000000000..e26348d1e95f3
--- /dev/null
+++ b/llvm/include/llvm/Analysis/MemoryRefInfo.h
@@ -0,0 +1,57 @@
+//===--------- Definition of the MemoryRefInfo class -*- C++ -*------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines MemoryRefInfo class that is used when getting
+// the information of a memory reference instruction.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_ANALYSIS_MEMORYREFINFO_H
+#define LLVM_ANALYSIS_MEMORYREFINFO_H
+
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/Support/TypeSize.h"
+
+namespace llvm {
+class MemoryRefInfo {
+public:
+ Use *PtrUse = nullptr;
+ bool IsWrite;
+ Type *OpType;
+ TypeSize TypeStoreSize = TypeSize::getFixed(0);
+ MaybeAlign Alignment;
+ // The mask Value, if we're looking at a masked load/store.
+ Value *MaybeMask;
+ // The EVL Value, if we're looking at a vp intrinsic.
+ Value *MaybeEVL;
+ // The Stride Value, if we're looking at a strided load/store.
+ Value *MaybeStride;
+ // The Offset Value, if we're looking at a indexed load/store. The
+ // offset actually means byte-offset instead of array index.
+ Value *MaybeOffset;
+
+ MemoryRefInfo() = default;
+ MemoryRefInfo(Instruction *I, unsigned OperandNo, bool IsWrite,
+ class Type *OpType, MaybeAlign Alignment,
+ Value *MaybeMask = nullptr, Value *MaybeEVL = nullptr,
+ Value *MaybeStride = nullptr, Value *MaybeOffset = nullptr)
+ : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment),
+ MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride),
+ MaybeOffset(MaybeOffset) {
+ const DataLayout &DL = I->getDataLayout();
+ TypeStoreSize = DL.getTypeStoreSizeInBits(OpType);
+ PtrUse = &I->getOperandUse(OperandNo);
+ }
+
+ Instruction *getInsn() { return cast<Instruction>(PtrUse->getUser()); }
+ Value *getPtr() { return PtrUse->get(); }
+ operator bool() { return PtrUse != nullptr; }
+};
+
+} // namespace llvm
+#endif
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index cf378008e4c7c..59a6a0390e98c 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -23,6 +23,7 @@
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/Analysis/MemoryRefInfo.h"
#include "llvm/IR/FMF.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/PassManager.h"
@@ -955,6 +956,10 @@ class TargetTransformInfo {
MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
bool IsZeroCmp) const;
+ // Add MemoryRefInfo of Intrinsic \p II into array \p Interesting.
+ bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting,
+ IntrinsicInst *II) const;
+
/// Should the Select Optimization pass be enabled and ran.
bool enableSelectOptimize() const;
@@ -1944,6 +1949,8 @@ class TargetTransformInfo::Concept {
virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0;
virtual MemCmpExpansionOptions
enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const = 0;
+ virtual bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting,
+ IntrinsicInst *II) const = 0;
virtual bool enableSelectOptimize() = 0;
virtual bool shouldTreatInstructionLikeSelect(const Instruction *I) = 0;
virtual bool enableInterleavedAccessVectorization() = 0;
@@ -2500,6 +2507,12 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
bool IsZeroCmp) const override {
return Impl.enableMemCmpExpansion(OptSize, IsZeroCmp);
}
+
+ bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting,
+ IntrinsicInst *II) const override {
+ return Impl.getMemoryRefInfo(Interesting, II);
+ }
+
bool enableSelectOptimize() override {
return Impl.enableSelectOptimize();
}
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 47fde08735c0c..eccb8d4157765 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -394,6 +394,11 @@ class TargetTransformInfoImplBase {
return {};
}
+ bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting,
+ IntrinsicInst *II) const {
+ return false;
+ }
+
bool enableSelectOptimize() const { return true; }
bool shouldTreatInstructionLikeSelect(const Instruction *I) {
diff --git a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
index 9fe2716220e83..f7bd36c2def03 100644
--- a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
+++ b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
@@ -14,6 +14,7 @@
#define LLVM_TRANSFORMS_INSTRUMENTATION_ADDRESSSANITIZERCOMMON_H
#include "llvm/Analysis/CFG.h"
+#include "llvm/Analysis/MemoryRefInfo.h"
#include "llvm/Analysis/PostDominators.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instruction.h"
@@ -22,37 +23,6 @@
namespace llvm {
-class InterestingMemoryOperand {
-public:
- Use *PtrUse;
- bool IsWrite;
- Type *OpType;
- TypeSize TypeStoreSize = TypeSize::getFixed(0);
- MaybeAlign Alignment;
- // The mask Value, if we're looking at a masked load/store.
- Value *MaybeMask;
- // The EVL Value, if we're looking at a vp intrinsic.
- Value *MaybeEVL;
- // The Stride Value, if we're looking at a strided load/store.
- Value *MaybeStride;
-
- InterestingMemoryOperand(Instruction *I, unsigned OperandNo, bool IsWrite,
- class Type *OpType, MaybeAlign Alignment,
- Value *MaybeMask = nullptr,
- Value *MaybeEVL = nullptr,
- Value *MaybeStride = nullptr)
- : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment),
- MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride) {
- const DataLayout &DL = I->getDataLayout();
- TypeStoreSize = DL.getTypeStoreSizeInBits(OpType);
- PtrUse = &I->getOperandUse(OperandNo);
- }
-
- Instruction *getInsn() { return cast<Instruction>(PtrUse->getUser()); }
-
- Value *getPtr() { return PtrUse->get(); }
-};
-
// Get AddressSanitizer parameters.
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize,
bool IsKasan, uint64_t *ShadowBase,
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 693f7a5bb7af5..f5adff1787b11 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -622,6 +622,11 @@ TargetTransformInfo::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp);
}
+bool TargetTransformInfo::getMemoryRefInfo(
+ SmallVectorImpl<MemoryRefInfo> &Interesting, IntrinsicInst *II) const {
+ return TTIImpl->getMemoryRefInfo(Interesting, II);
+}
+
bool TargetTransformInfo::enableSelectOptimize() const {
return TTIImpl->enableSelectOptimize();
}
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index f9eef60f77b7a..26e412e019fe0 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -13,7 +13,9 @@
#include "llvm/CodeGen/BasicTTIImpl.h"
#include "llvm/CodeGen/CostTable.h"
#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/IR/PatternMatch.h"
#include <cmath>
#include <optional>
@@ -36,6 +38,105 @@ static cl::opt<unsigned> SLPMaxVF(
"exclusively by SLP vectorizer."),
cl::Hidden);
+bool RISCVTTIImpl::getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting,
+ IntrinsicInst *II) const {
+ const DataLayout &DL = getDataLayout();
+ Intrinsic::ID IntNo = II->getIntrinsicID();
+ LLVMContext &C = II->getContext();
+ Type *XLenIntTy = IntegerType::get(C, ST->getXLen());
+ IRBuilder<> IB(II);
+ bool HasMask = false;
+
+ switch (IntNo) {
+ case Intrinsic::riscv_vle_mask:
+ case Intrinsic::riscv_vse_mask:
+ HasMask = true;
+ [[fallthrough]];
+ case Intrinsic::riscv_vle:
+ case Intrinsic::riscv_vse: {
+ bool IsWrite = II->getType()->isVoidTy();
+ Type *Ty = IsWrite ? II->getArgOperand(0)->getType() : II->getType();
+ const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
+ unsigned VLIndex = RVVIInfo->VLOperand;
+ unsigned PtrOperandNo = VLIndex - 1 - HasMask;
+ MaybeAlign Alignment =
+ II->getArgOperand(PtrOperandNo)->getPointerAlignment(DL);
+ Type *MaskType = Ty->getWithNewType(Type::getInt1Ty(C));
+ Value *Mask = ConstantInt::get(MaskType, 1);
+ if (HasMask)
+ Mask = II->getArgOperand(VLIndex - 1);
+ Value *EVL = II->getArgOperand(VLIndex);
+ Interesting.emplace_back(II, PtrOperandNo, IsWrite, Ty, Alignment, Mask,
+ EVL);
+ return true;
+ }
+ case Intrinsic::riscv_vlse_mask:
+ case Intrinsic::riscv_vsse_mask:
+ HasMask = true;
+ [[fallthrough]];
+ case Intrinsic::riscv_vlse:
+ case Intrinsic::riscv_vsse: {
+ bool IsWrite = II->getType()->isVoidTy();
+ Type *Ty = IsWrite ? II->getArgOperand(0)->getType() : II->getType();
+ const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
+ unsigned VLIndex = RVVIInfo->VLOperand;
+ unsigned PtrOperandNo = VLIndex - 2 - HasMask;
+ MaybeAlign Alignment =
+ II->getArgOperand(PtrOperandNo)->getPointerAlignment(DL);
+
+ Value *Stride = II->getArgOperand(PtrOperandNo + 1);
+ // Use the pointer alignment as the element alignment if the stride is a
+ // multiple of the pointer alignment. Otherwise, the element alignment
+ // should be Align(1).
+ unsigned PointerAlign = Alignment.valueOrOne().value();
+ if (!isa<ConstantInt>(Stride) ||
+ cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
+ Alignment = Align(1);
+
+ Type *MaskType = Ty->getWithNewType(Type::getInt1Ty(C));
+ Value *Mask = ConstantInt::get(MaskType, 1);
+ if (HasMask)
+ Mask = II->getArgOperand(VLIndex - 1);
+ Value *EVL = II->getArgOperand(VLIndex);
+ Interesting.emplace_back(II, PtrOperandNo, IsWrite, Ty, Alignment, Mask,
+ EVL, Stride);
+ return true;
+ }
+ case Intrinsic::riscv_vloxei_mask:
+ case Intrinsic::riscv_vluxei_mask:
+ case Intrinsic::riscv_vsoxei_mask:
+ case Intrinsic::riscv_vsuxei_mask:
+ HasMask = true;
+ [[fallthrough]];
+ case Intrinsic::riscv_vloxei:
+ case Intrinsic::riscv_vluxei:
+ case Intrinsic::riscv_vsoxei:
+ case Intrinsic::riscv_vsuxei: {
+ bool IsWrite = II->getType()->isVoidTy();
+ Type *Ty = IsWrite ? II->getArgOperand(0)->getType() : II->getType();
+ const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
+ unsigned VLIndex = RVVIInfo->VLOperand;
+ unsigned PtrOperandNo = VLIndex - 2 - HasMask;
+ // RVV indexed loads/stores zero-extend offset operands which are narrower
+ // than XLEN to XLEN.
+ Value *OffsetOp = II->getArgOperand(PtrOperandNo + 1);
+ Type *OffsetTy = OffsetOp->getType();
+ if (OffsetTy->getScalarType()->getIntegerBitWidth() < ST->getXLen()) {
+ VectorType *OrigType = cast<VectorType>(OffsetTy);
+ Type *ExtendTy = VectorType::get(XLenIntTy, OrigType);
+ OffsetOp = IB.CreateZExt(OffsetOp, ExtendTy);
+ }
+ Value *Mask = ConstantInt::get(OffsetTy->getWithNewBitWidth(1), 1);
+ if (HasMask)
+ Mask = II->getArgOperand(VLIndex - 1);
+ Value *EVL = II->getArgOperand(VLIndex);
+ Interesting.emplace_back(II, PtrOperandNo, IsWrite, Ty, Align(1), Mask, EVL,
+ /* Stride */ nullptr, OffsetOp);
+ }
+ }
+ return false;
+}
+
InstructionCost
RISCVTTIImpl::getRISCVInstructionCost(ArrayRef<unsigned> OpCodes, MVT VT,
TTI::TargetCostKind CostKind) {
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index 9c37a4f6ec2d0..e9a4721c37890 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -60,6 +60,9 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
: BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)),
TLI(ST->getTargetLowering()) {}
+ bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting,
+ IntrinsicInst *II) const;
+
bool areInlineCompatible(const Function *Caller,
const Function *Callee) const;
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 149866a8e4200..32639ebb73c59 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -29,6 +29,7 @@
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/StackSafetyAnalysis.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/Demangle/Demangle.h"
@@ -754,12 +755,13 @@ struct AddressSanitizer {
bool isInterestingAlloca(const AllocaInst &AI);
bool ignoreAccess(Instruction *Inst, Value *Ptr);
- void getInterestingMemoryOperands(
- Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting);
+ void getMemoryRefInfos(Instruction *I,
+ SmallVectorImpl<MemoryRefInfo> &Interesting,
+ const TargetTransformInfo *TTI);
- void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
- InterestingMemoryOperand &O, bool UseCalls,
- const DataLayout &DL, RuntimeCallInserter &RTCI);
+ void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, MemoryRefInfo &O,
+ bool UseCalls, const DataLayout &DL,
+ RuntimeCallInserter &RTCI);
void instrumentPointerComparisonOrSubtraction(Instruction *I,
RuntimeCallInserter &RTCI);
void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
@@ -795,7 +797,8 @@ struct AddressSanitizer {
void instrumentMemIntrinsic(MemIntrinsic *MI, RuntimeCallInserter &RTCI);
Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
bool suppressInstrumentationSiteForDebug(int &Instrumented);
- bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI);
+ bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI,
+ const TargetTransformInfo *TTI);
bool maybeInsertAsanInitAtFunctionEntry(Function &F);
bool maybeInsertDynamicShadowAtFunctionEntry(Function &F);
void markEscapedLocalAllocas(Function &F);
@@ -1264,7 +1267,8 @@ PreservedAnalyses AddressSanitizerPass::run(Module &M,
Options.MaxInlinePoisoningSize, Options.CompileKernel, Options.Recover,
Options.UseAfterScope, Options.UseAfterReturn);
const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
- Modified |= FunctionSanitizer.instrumentFunction(F, &TLI);
+ const TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
+ Modified |= FunctionSanitizer.instrumentFunction(F, &TLI, &TTI);
}
Modified |= ModuleSanitizer.instrumentModule(M);
if (!Modified)
@@ -1401,8 +1405,9 @@ bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
return false;
}
-void AddressSanitizer::getInterestingMemoryOperands(
- Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting) {
+void AddressSanitizer::getMemoryRefInfos(
+ Instruction *I, SmallVectorImpl<MemoryRefInfo> &Interesting,
+ const TargetTransformInfo *TTI) {
// Do not instrument the load fetching the dynamic shadow address.
if (LocalDynamicShadow == I)
return;
@@ -1520,6 +1525,9 @@ void AddressSanitizer::getInterestingMemoryOperands(
break;
}
default:
+ if (auto *II = dyn_cast<IntrinsicInst>(I))
+ if (TTI->getMemoryRefInfo(Interesting, II))
+ return;
for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
ignoreAccess(I, CI->getArgOperand(ArgNo)))
@@ -1682,7 +1690,7 @@ void AddressSanitizer::instrumentMaskedLoadOrStore(
}
void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
- InterestingMemoryOperand &O, bool UseCalls,
+ MemoryRefInfo &O, bool UseCalls,
const DataLayout &DL,
RuntimeCallInserter &RTCI) {
Value *Addr = O.getPtr();
@@ -1725,6 +1733,12 @@ void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
else
NumInstrumentedReads++;
+ if (O.MaybeOffset) {
+ Type *Ty = Type::getInt8Ty(*C);
+ IRBuilder IB(O.getInsn());
+ Addr = IB.CreateGEP(Ty, Addr, {O.MaybeOffset});
+ }
+
unsigned Granularity = 1 << Mapping.Scale;
if (O.MaybeMask) {
instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.MaybeEVL,
@@ -2941,7 +2955,8 @@ bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) {
}
bool AddressSanitizer::instrumentFunction(Function &F,
- const TargetLibraryInfo *TLI) {
+ const TargetLibraryInfo *TLI,
+ const TargetTransformInfo *TTI) {
if (F.empty())
return false;
if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false;
@@ -2979,7 +2994,7 @@ bool AddressSanitizer::instrumentFunction(Function &F,
// We want to instrument every address only once per basic block (unless there
// are calls between uses).
SmallPtrSet<Value *, 16> TempsToInstrument;
- SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
+ SmallVector<MemoryRefInfo, 16> OperandsToInstrument;
SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
SmallVector<Instruction *, 8> NoReturnCalls;
SmallVector<BasicBlock *, 16> AllBlocks;
@@ -2995,8 +3010,8 @@ bool AddressSanitizer::instrumentFunction(Function &F,
// Skip instructions inserted by another instrumentation.
if (Inst.hasMetadata(LLVMContext::MD_nosanitize))
continue;
- SmallVector<InterestingMemoryOperand, 1> InterestingOperands;
- getInterestingMemoryOperands(&Inst, InterestingOperands);
+ SmallVector<MemoryRefInfo, 1> InterestingOperands;
+ getMemoryRefInfos(&Inst, InterestingOperands, TTI);
if (!InterestingOperands.empty()) {
for (auto &Operand : InterestingOperands) {
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index a0e63bf12400e..47212b2679e81 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -339,16 +339,14 @@ class HWAddressSanitizer {
LoopInfo *LI);
bool ignoreMemIntrinsic(OptimizationRemarkEmitter &ORE, MemIntrinsic *MI);
void instrumentMemIntrinsic(MemIntrinsic *MI);
- bool instrumentMemAccess(InterestingMemoryOperand &O, DomTreeUpdater &DTU,
- LoopInfo *LI);
+ bool instrument...
[truncated]
|
@llvm/pr-subscribers-backend-risc-v Author: Yeting Kuo (yetingk) ChangesThis is based on #97070. Patch is 217.80 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/100930.diff 10 Files Affected:
diff --git a/llvm/include/llvm/Analysis/MemoryRefInfo.h b/llvm/include/llvm/Analysis/MemoryRefInfo.h
new file mode 100644
index 0000000000000..e26348d1e95f3
--- /dev/null
+++ b/llvm/include/llvm/Analysis/MemoryRefInfo.h
@@ -0,0 +1,57 @@
+//===--------- Definition of the MemoryRefInfo class -*- C++ -*------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines MemoryRefInfo class that is used when getting
+// the information of a memory reference instruction.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_ANALYSIS_MEMORYREFINFO_H
+#define LLVM_ANALYSIS_MEMORYREFINFO_H
+
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/Support/TypeSize.h"
+
+namespace llvm {
+class MemoryRefInfo {
+public:
+ Use *PtrUse = nullptr;
+ bool IsWrite;
+ Type *OpType;
+ TypeSize TypeStoreSize = TypeSize::getFixed(0);
+ MaybeAlign Alignment;
+ // The mask Value, if we're looking at a masked load/store.
+ Value *MaybeMask;
+ // The EVL Value, if we're looking at a vp intrinsic.
+ Value *MaybeEVL;
+ // The Stride Value, if we're looking at a strided load/store.
+ Value *MaybeStride;
+ // The Offset Value, if we're looking at a indexed load/store. The
+ // offset actually means byte-offset instead of array index.
+ Value *MaybeOffset;
+
+ MemoryRefInfo() = default;
+ MemoryRefInfo(Instruction *I, unsigned OperandNo, bool IsWrite,
+ class Type *OpType, MaybeAlign Alignment,
+ Value *MaybeMask = nullptr, Value *MaybeEVL = nullptr,
+ Value *MaybeStride = nullptr, Value *MaybeOffset = nullptr)
+ : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment),
+ MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride),
+ MaybeOffset(MaybeOffset) {
+ const DataLayout &DL = I->getDataLayout();
+ TypeStoreSize = DL.getTypeStoreSizeInBits(OpType);
+ PtrUse = &I->getOperandUse(OperandNo);
+ }
+
+ Instruction *getInsn() { return cast<Instruction>(PtrUse->getUser()); }
+ Value *getPtr() { return PtrUse->get(); }
+ operator bool() { return PtrUse != nullptr; }
+};
+
+} // namespace llvm
+#endif
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index cf378008e4c7c..59a6a0390e98c 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -23,6 +23,7 @@
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/Analysis/MemoryRefInfo.h"
#include "llvm/IR/FMF.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/PassManager.h"
@@ -955,6 +956,10 @@ class TargetTransformInfo {
MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
bool IsZeroCmp) const;
+ // Add MemoryRefInfo of Intrinsic \p II into array \p Interesting.
+ bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting,
+ IntrinsicInst *II) const;
+
/// Should the Select Optimization pass be enabled and ran.
bool enableSelectOptimize() const;
@@ -1944,6 +1949,8 @@ class TargetTransformInfo::Concept {
virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0;
virtual MemCmpExpansionOptions
enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const = 0;
+ virtual bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting,
+ IntrinsicInst *II) const = 0;
virtual bool enableSelectOptimize() = 0;
virtual bool shouldTreatInstructionLikeSelect(const Instruction *I) = 0;
virtual bool enableInterleavedAccessVectorization() = 0;
@@ -2500,6 +2507,12 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
bool IsZeroCmp) const override {
return Impl.enableMemCmpExpansion(OptSize, IsZeroCmp);
}
+
+ bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting,
+ IntrinsicInst *II) const override {
+ return Impl.getMemoryRefInfo(Interesting, II);
+ }
+
bool enableSelectOptimize() override {
return Impl.enableSelectOptimize();
}
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 47fde08735c0c..eccb8d4157765 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -394,6 +394,11 @@ class TargetTransformInfoImplBase {
return {};
}
+ bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting,
+ IntrinsicInst *II) const {
+ return false;
+ }
+
bool enableSelectOptimize() const { return true; }
bool shouldTreatInstructionLikeSelect(const Instruction *I) {
diff --git a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
index 9fe2716220e83..f7bd36c2def03 100644
--- a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
+++ b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
@@ -14,6 +14,7 @@
#define LLVM_TRANSFORMS_INSTRUMENTATION_ADDRESSSANITIZERCOMMON_H
#include "llvm/Analysis/CFG.h"
+#include "llvm/Analysis/MemoryRefInfo.h"
#include "llvm/Analysis/PostDominators.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instruction.h"
@@ -22,37 +23,6 @@
namespace llvm {
-class InterestingMemoryOperand {
-public:
- Use *PtrUse;
- bool IsWrite;
- Type *OpType;
- TypeSize TypeStoreSize = TypeSize::getFixed(0);
- MaybeAlign Alignment;
- // The mask Value, if we're looking at a masked load/store.
- Value *MaybeMask;
- // The EVL Value, if we're looking at a vp intrinsic.
- Value *MaybeEVL;
- // The Stride Value, if we're looking at a strided load/store.
- Value *MaybeStride;
-
- InterestingMemoryOperand(Instruction *I, unsigned OperandNo, bool IsWrite,
- class Type *OpType, MaybeAlign Alignment,
- Value *MaybeMask = nullptr,
- Value *MaybeEVL = nullptr,
- Value *MaybeStride = nullptr)
- : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment),
- MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride) {
- const DataLayout &DL = I->getDataLayout();
- TypeStoreSize = DL.getTypeStoreSizeInBits(OpType);
- PtrUse = &I->getOperandUse(OperandNo);
- }
-
- Instruction *getInsn() { return cast<Instruction>(PtrUse->getUser()); }
-
- Value *getPtr() { return PtrUse->get(); }
-};
-
// Get AddressSanitizer parameters.
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize,
bool IsKasan, uint64_t *ShadowBase,
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 693f7a5bb7af5..f5adff1787b11 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -622,6 +622,11 @@ TargetTransformInfo::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp);
}
+bool TargetTransformInfo::getMemoryRefInfo(
+ SmallVectorImpl<MemoryRefInfo> &Interesting, IntrinsicInst *II) const {
+ return TTIImpl->getMemoryRefInfo(Interesting, II);
+}
+
bool TargetTransformInfo::enableSelectOptimize() const {
return TTIImpl->enableSelectOptimize();
}
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index f9eef60f77b7a..26e412e019fe0 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -13,7 +13,9 @@
#include "llvm/CodeGen/BasicTTIImpl.h"
#include "llvm/CodeGen/CostTable.h"
#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/IR/PatternMatch.h"
#include <cmath>
#include <optional>
@@ -36,6 +38,105 @@ static cl::opt<unsigned> SLPMaxVF(
"exclusively by SLP vectorizer."),
cl::Hidden);
+bool RISCVTTIImpl::getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting,
+ IntrinsicInst *II) const {
+ const DataLayout &DL = getDataLayout();
+ Intrinsic::ID IntNo = II->getIntrinsicID();
+ LLVMContext &C = II->getContext();
+ Type *XLenIntTy = IntegerType::get(C, ST->getXLen());
+ IRBuilder<> IB(II);
+ bool HasMask = false;
+
+ switch (IntNo) {
+ case Intrinsic::riscv_vle_mask:
+ case Intrinsic::riscv_vse_mask:
+ HasMask = true;
+ [[fallthrough]];
+ case Intrinsic::riscv_vle:
+ case Intrinsic::riscv_vse: {
+ bool IsWrite = II->getType()->isVoidTy();
+ Type *Ty = IsWrite ? II->getArgOperand(0)->getType() : II->getType();
+ const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
+ unsigned VLIndex = RVVIInfo->VLOperand;
+ unsigned PtrOperandNo = VLIndex - 1 - HasMask;
+ MaybeAlign Alignment =
+ II->getArgOperand(PtrOperandNo)->getPointerAlignment(DL);
+ Type *MaskType = Ty->getWithNewType(Type::getInt1Ty(C));
+ Value *Mask = ConstantInt::get(MaskType, 1);
+ if (HasMask)
+ Mask = II->getArgOperand(VLIndex - 1);
+ Value *EVL = II->getArgOperand(VLIndex);
+ Interesting.emplace_back(II, PtrOperandNo, IsWrite, Ty, Alignment, Mask,
+ EVL);
+ return true;
+ }
+ case Intrinsic::riscv_vlse_mask:
+ case Intrinsic::riscv_vsse_mask:
+ HasMask = true;
+ [[fallthrough]];
+ case Intrinsic::riscv_vlse:
+ case Intrinsic::riscv_vsse: {
+ bool IsWrite = II->getType()->isVoidTy();
+ Type *Ty = IsWrite ? II->getArgOperand(0)->getType() : II->getType();
+ const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
+ unsigned VLIndex = RVVIInfo->VLOperand;
+ unsigned PtrOperandNo = VLIndex - 2 - HasMask;
+ MaybeAlign Alignment =
+ II->getArgOperand(PtrOperandNo)->getPointerAlignment(DL);
+
+ Value *Stride = II->getArgOperand(PtrOperandNo + 1);
+ // Use the pointer alignment as the element alignment if the stride is a
+ // multiple of the pointer alignment. Otherwise, the element alignment
+ // should be Align(1).
+ unsigned PointerAlign = Alignment.valueOrOne().value();
+ if (!isa<ConstantInt>(Stride) ||
+ cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
+ Alignment = Align(1);
+
+ Type *MaskType = Ty->getWithNewType(Type::getInt1Ty(C));
+ Value *Mask = ConstantInt::get(MaskType, 1);
+ if (HasMask)
+ Mask = II->getArgOperand(VLIndex - 1);
+ Value *EVL = II->getArgOperand(VLIndex);
+ Interesting.emplace_back(II, PtrOperandNo, IsWrite, Ty, Alignment, Mask,
+ EVL, Stride);
+ return true;
+ }
+ case Intrinsic::riscv_vloxei_mask:
+ case Intrinsic::riscv_vluxei_mask:
+ case Intrinsic::riscv_vsoxei_mask:
+ case Intrinsic::riscv_vsuxei_mask:
+ HasMask = true;
+ [[fallthrough]];
+ case Intrinsic::riscv_vloxei:
+ case Intrinsic::riscv_vluxei:
+ case Intrinsic::riscv_vsoxei:
+ case Intrinsic::riscv_vsuxei: {
+ bool IsWrite = II->getType()->isVoidTy();
+ Type *Ty = IsWrite ? II->getArgOperand(0)->getType() : II->getType();
+ const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
+ unsigned VLIndex = RVVIInfo->VLOperand;
+ unsigned PtrOperandNo = VLIndex - 2 - HasMask;
+ // RVV indexed loads/stores zero-extend offset operands which are narrower
+ // than XLEN to XLEN.
+ Value *OffsetOp = II->getArgOperand(PtrOperandNo + 1);
+ Type *OffsetTy = OffsetOp->getType();
+ if (OffsetTy->getScalarType()->getIntegerBitWidth() < ST->getXLen()) {
+ VectorType *OrigType = cast<VectorType>(OffsetTy);
+ Type *ExtendTy = VectorType::get(XLenIntTy, OrigType);
+ OffsetOp = IB.CreateZExt(OffsetOp, ExtendTy);
+ }
+ Value *Mask = ConstantInt::get(OffsetTy->getWithNewBitWidth(1), 1);
+ if (HasMask)
+ Mask = II->getArgOperand(VLIndex - 1);
+ Value *EVL = II->getArgOperand(VLIndex);
+ Interesting.emplace_back(II, PtrOperandNo, IsWrite, Ty, Align(1), Mask, EVL,
+ /* Stride */ nullptr, OffsetOp);
+ }
+ }
+ return false;
+}
+
InstructionCost
RISCVTTIImpl::getRISCVInstructionCost(ArrayRef<unsigned> OpCodes, MVT VT,
TTI::TargetCostKind CostKind) {
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index 9c37a4f6ec2d0..e9a4721c37890 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -60,6 +60,9 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
: BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)),
TLI(ST->getTargetLowering()) {}
+ bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting,
+ IntrinsicInst *II) const;
+
bool areInlineCompatible(const Function *Caller,
const Function *Callee) const;
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 149866a8e4200..32639ebb73c59 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -29,6 +29,7 @@
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/StackSafetyAnalysis.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/Demangle/Demangle.h"
@@ -754,12 +755,13 @@ struct AddressSanitizer {
bool isInterestingAlloca(const AllocaInst &AI);
bool ignoreAccess(Instruction *Inst, Value *Ptr);
- void getInterestingMemoryOperands(
- Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting);
+ void getMemoryRefInfos(Instruction *I,
+ SmallVectorImpl<MemoryRefInfo> &Interesting,
+ const TargetTransformInfo *TTI);
- void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
- InterestingMemoryOperand &O, bool UseCalls,
- const DataLayout &DL, RuntimeCallInserter &RTCI);
+ void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, MemoryRefInfo &O,
+ bool UseCalls, const DataLayout &DL,
+ RuntimeCallInserter &RTCI);
void instrumentPointerComparisonOrSubtraction(Instruction *I,
RuntimeCallInserter &RTCI);
void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
@@ -795,7 +797,8 @@ struct AddressSanitizer {
void instrumentMemIntrinsic(MemIntrinsic *MI, RuntimeCallInserter &RTCI);
Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
bool suppressInstrumentationSiteForDebug(int &Instrumented);
- bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI);
+ bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI,
+ const TargetTransformInfo *TTI);
bool maybeInsertAsanInitAtFunctionEntry(Function &F);
bool maybeInsertDynamicShadowAtFunctionEntry(Function &F);
void markEscapedLocalAllocas(Function &F);
@@ -1264,7 +1267,8 @@ PreservedAnalyses AddressSanitizerPass::run(Module &M,
Options.MaxInlinePoisoningSize, Options.CompileKernel, Options.Recover,
Options.UseAfterScope, Options.UseAfterReturn);
const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
- Modified |= FunctionSanitizer.instrumentFunction(F, &TLI);
+ const TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
+ Modified |= FunctionSanitizer.instrumentFunction(F, &TLI, &TTI);
}
Modified |= ModuleSanitizer.instrumentModule(M);
if (!Modified)
@@ -1401,8 +1405,9 @@ bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
return false;
}
-void AddressSanitizer::getInterestingMemoryOperands(
- Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting) {
+void AddressSanitizer::getMemoryRefInfos(
+ Instruction *I, SmallVectorImpl<MemoryRefInfo> &Interesting,
+ const TargetTransformInfo *TTI) {
// Do not instrument the load fetching the dynamic shadow address.
if (LocalDynamicShadow == I)
return;
@@ -1520,6 +1525,9 @@ void AddressSanitizer::getInterestingMemoryOperands(
break;
}
default:
+ if (auto *II = dyn_cast<IntrinsicInst>(I))
+ if (TTI->getMemoryRefInfo(Interesting, II))
+ return;
for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
ignoreAccess(I, CI->getArgOperand(ArgNo)))
@@ -1682,7 +1690,7 @@ void AddressSanitizer::instrumentMaskedLoadOrStore(
}
void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
- InterestingMemoryOperand &O, bool UseCalls,
+ MemoryRefInfo &O, bool UseCalls,
const DataLayout &DL,
RuntimeCallInserter &RTCI) {
Value *Addr = O.getPtr();
@@ -1725,6 +1733,12 @@ void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
else
NumInstrumentedReads++;
+ if (O.MaybeOffset) {
+ Type *Ty = Type::getInt8Ty(*C);
+ IRBuilder IB(O.getInsn());
+ Addr = IB.CreateGEP(Ty, Addr, {O.MaybeOffset});
+ }
+
unsigned Granularity = 1 << Mapping.Scale;
if (O.MaybeMask) {
instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.MaybeEVL,
@@ -2941,7 +2955,8 @@ bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) {
}
bool AddressSanitizer::instrumentFunction(Function &F,
- const TargetLibraryInfo *TLI) {
+ const TargetLibraryInfo *TLI,
+ const TargetTransformInfo *TTI) {
if (F.empty())
return false;
if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false;
@@ -2979,7 +2994,7 @@ bool AddressSanitizer::instrumentFunction(Function &F,
// We want to instrument every address only once per basic block (unless there
// are calls between uses).
SmallPtrSet<Value *, 16> TempsToInstrument;
- SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
+ SmallVector<MemoryRefInfo, 16> OperandsToInstrument;
SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
SmallVector<Instruction *, 8> NoReturnCalls;
SmallVector<BasicBlock *, 16> AllBlocks;
@@ -2995,8 +3010,8 @@ bool AddressSanitizer::instrumentFunction(Function &F,
// Skip instructions inserted by another instrumentation.
if (Inst.hasMetadata(LLVMContext::MD_nosanitize))
continue;
- SmallVector<InterestingMemoryOperand, 1> InterestingOperands;
- getInterestingMemoryOperands(&Inst, InterestingOperands);
+ SmallVector<MemoryRefInfo, 1> InterestingOperands;
+ getMemoryRefInfos(&Inst, InterestingOperands, TTI);
if (!InterestingOperands.empty()) {
for (auto &Operand : InterestingOperands) {
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index a0e63bf12400e..47212b2679e81 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -339,16 +339,14 @@ class HWAddressSanitizer {
LoopInfo *LI);
bool ignoreMemIntrinsic(OptimizationRemarkEmitter &ORE, MemIntrinsic *MI);
void instrumentMemIntrinsic(MemIntrinsic *MI);
- bool instrumentMemAccess(InterestingMemoryOperand &O, DomTreeUpdater &DTU,
- LoopInfo *LI);
+ bool instrument...
[truncated]
|
This is based on llvm#100930. RVV segment is an array of contingous NF elements. This patch emulates RVV segment as a large integer with bitwidth equaled to NF * SEW. The reason to not emulate RVV segment as some aggregated type is that vector type should use premative types as element types. There is another approach is to create NF memoryRefInfo objects. It could avoid create pseudo types, but this approach also generates large code for asan check.
Value *Stride = II->getArgOperand(PtrOperandNo + 1); | ||
// Use the pointer alignment as the element alignment if the stride is a | ||
// multiple of the pointer alignment. Otherwise, the element alignment | ||
// should be Align(1). |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
please add a comment why we do this as well
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Type *Ty = IsWrite ? II->getArgOperand(0)->getType() : II->getType(); | ||
const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo); | ||
unsigned VLIndex = RVVIInfo->VLOperand; | ||
unsigned PtrOperandNo = VLIndex - 1 - HasMask; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
maybe leave a comment about which arguments the relevant instrinsics take?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Updated. And please comment in #97070 until it will be merged.
if (OffsetTy->getScalarType()->getIntegerBitWidth() < ST->getXLen()) { | ||
VectorType *OrigType = cast<VectorType>(OffsetTy); | ||
Type *ExtendTy = VectorType::get(XLenIntTy, OrigType); | ||
OffsetOp = IB.CreateZExt(OffsetOp, ExtendTy); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'm not sure TTI should be modifying IR. Its normally used as a query interface. Can we store the un-extended offset and the extension type and let AddressSanitzer insert the extension?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think maybe TTI could support a callback function to MemoryRefInfo to update its member.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Adding offset extension query make MemoryRefInfo needs 3 members for indexed load/store, since we should also add extended kind (signed, unsingned) and extended size. Do you think specific target extension in generic code like b0edafd is allowable?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Let's extract refactoring into a separate PR after rebase, we can review this one
This comment should be on #97070. |
…structions. This is based on llvm#97070. This patch helps AddressSanitizer to support indexed/segement instructions. It adds a new member maybeOffset inot MemoryRefInfo to describle the offset between the pointer and the address of this memory reference.
This is based on #97070.
This patch helps AddressSanitizer to support indexed/segement instructions.
It adds a new member maybeOffset inot MemoryRefInfo to describle the offset
between the pointer and the address of this memory reference.