From 10feb40331374c2adb1ee0cf58cca1438dae5706 Mon Sep 17 00:00:00 2001 From: serge-sans-paille Date: Fri, 8 Nov 2024 12:21:52 +0100 Subject: [PATCH 1/2] [llvm] Fix handling of very large allocation in llvm.objectsize expansion The internal structure used to carry intermediate computations hold signed values. If an object size happens to overflow signed values, we can get invalid result, so make sure this situation never happens. This is not very limitative as static allocation of such large values should scarcely happen. --- llvm/include/llvm/Analysis/MemoryBuiltins.h | 4 ++ llvm/lib/Analysis/MemoryBuiltins.cpp | 37 ++++++++++-- .../builtin-object-size-phi.ll | 14 +++++ .../objectsize_basic.ll | 56 +++++++++++++++++++ 4 files changed, 105 insertions(+), 6 deletions(-) diff --git a/llvm/include/llvm/Analysis/MemoryBuiltins.h b/llvm/include/llvm/Analysis/MemoryBuiltins.h index c3b11cdf5cf5d..3e2f79550d91d 100644 --- a/llvm/include/llvm/Analysis/MemoryBuiltins.h +++ b/llvm/include/llvm/Analysis/MemoryBuiltins.h @@ -223,6 +223,10 @@ struct SizeOffsetAPInt : public SizeOffsetType { /// OffsetSpan - Used internally by \p ObjectSizeOffsetVisitor. Represents a /// point in memory as a pair of allocated bytes before and after it. +/// +/// \c Before and \c After fields are signed values. It makes it possible to +/// represent out-of-bound access, e.g. as a result of a GEP, at the expense of +/// not being able to represent very large allocation. struct OffsetSpan { APInt Before; /// Number of allocated bytes before this point. APInt After; /// Number of allocated bytes after this point. diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp index d9769c48f2656..0ee5029b2a2ed 100644 --- a/llvm/lib/Analysis/MemoryBuiltins.cpp +++ b/llvm/lib/Analysis/MemoryBuiltins.cpp @@ -668,10 +668,14 @@ STATISTIC(ObjectVisitorArgument, STATISTIC(ObjectVisitorLoad, "Number of load instructions with unsolved size and offset"); +/// Align \p Size according to \p Alignment. If \p Size is greater than +/// getSignedMaxValue(), set it as unknown as we can only represent signed value +/// in OffsetSpan. APInt ObjectSizeOffsetVisitor::align(APInt Size, MaybeAlign Alignment) { if (Options.RoundToAlign && Alignment) - return APInt(IntTyBits, alignTo(Size.getZExtValue(), *Alignment)); - return Size; + Size = APInt(IntTyBits, alignTo(Size.getZExtValue(), *Alignment)); + + return Size.isNegative() ? APInt() : Size; } ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout &DL, @@ -733,8 +737,19 @@ OffsetSpan ObjectSizeOffsetVisitor::computeImpl(Value *V) { ORT.After = APInt(); } // If the computed bound is "unknown" we cannot add the stripped offset. - return {(ORT.knownBefore() ? ORT.Before + Offset : ORT.Before), - (ORT.knownAfter() ? ORT.After - Offset : ORT.After)}; + if (ORT.knownBefore()) { + bool Overflow; + ORT.Before = ORT.Before.sadd_ov(Offset, Overflow); + if (Overflow) + ORT.Before = APInt(); + } + if (ORT.knownAfter()) { + bool Overflow; + ORT.After = ORT.After.ssub_ov(Offset, Overflow); + if (Overflow) + ORT.After = APInt(); + } + return ORT; } OffsetSpan ObjectSizeOffsetVisitor::computeValue(Value *V) { @@ -780,6 +795,7 @@ OffsetSpan ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) { if (!isUIntN(IntTyBits, ElemSize.getKnownMinValue())) return ObjectSizeOffsetVisitor::unknown(); APInt Size(IntTyBits, ElemSize.getKnownMinValue()); + if (!I.isArrayAllocation()) return OffsetSpan(Zero, align(Size, I.getAlign())); @@ -791,6 +807,7 @@ OffsetSpan ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) { bool Overflow; Size = Size.umul_ov(NumElems, Overflow); + return Overflow ? ObjectSizeOffsetVisitor::unknown() : OffsetSpan(Zero, align(Size, I.getAlign())); } @@ -810,8 +827,12 @@ OffsetSpan ObjectSizeOffsetVisitor::visitArgument(Argument &A) { } OffsetSpan ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) { - if (std::optional Size = getAllocSize(&CB, TLI)) + if (std::optional Size = getAllocSize(&CB, TLI)) { + // Very large unsigned value cannot be represented as OffsetSpan. + if (Size->isNegative()) + return ObjectSizeOffsetVisitor::unknown(); return OffsetSpan(Zero, *Size); + } return ObjectSizeOffsetVisitor::unknown(); } @@ -944,7 +965,11 @@ OffsetSpan ObjectSizeOffsetVisitor::findLoadOffsetRange( if (!C) return Unknown(); - return Known({APInt(C->getValue().getBitWidth(), 0), C->getValue()}); + APInt CSize = C->getValue(); + if (CSize.isNegative()) + return Unknown(); + + return Known({APInt(CSize.getBitWidth(), 0), CSize}); } return Unknown(); diff --git a/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-phi.ll b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-phi.ll index 2974228e6a830..fbb37560fb2d0 100644 --- a/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-phi.ll +++ b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-phi.ll @@ -118,6 +118,20 @@ if.end: ret i64 %size } +define dso_local i64 @pick_max_large(i1 %c) local_unnamed_addr { +; CHECK-LABEL: @pick_max_large( +; CHECK-NEXT: [[BUFFER:%.*]] = alloca i8, i64 -7, align 1 +; CHECK-NEXT: [[S:%.*]] = select i1 [[C:%.*]], ptr null, ptr [[BUFFER]] +; CHECK-NEXT: ret i64 -1 +; + %buffer = alloca i8, i64 -7 ; Actually a very large positive integer + %s = select i1 %c, ptr null, ptr %buffer + %objsize = tail call i64 @llvm.objectsize.i64.p0(ptr %s, i1 false, i1 false, i1 false) + ret i64 %objsize + +} + + define i64 @pick_negative_offset(i32 %n) { ; CHECK-LABEL: @pick_negative_offset( ; CHECK-NEXT: entry: diff --git a/llvm/test/Transforms/LowerConstantIntrinsics/objectsize_basic.ll b/llvm/test/Transforms/LowerConstantIntrinsics/objectsize_basic.ll index 568070a866069..ae7399a1eafe5 100644 --- a/llvm/test/Transforms/LowerConstantIntrinsics/objectsize_basic.ll +++ b/llvm/test/Transforms/LowerConstantIntrinsics/objectsize_basic.ll @@ -195,6 +195,62 @@ define i64 @out_of_bound_gep() { ret i64 %objsize } +define i64 @wrapping_gep() { +; CHECK-LABEL: @wrapping_gep( +; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i64 4, align 1 +; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i64 9223372036854775807 +; CHECK-NEXT: [[SLIDE_BIS:%.*]] = getelementptr i8, ptr [[SLIDE]], i64 9223372036854775807 +; CHECK-NEXT: ret i64 0 +; + %obj = alloca i8, i64 4 + %slide = getelementptr i8, ptr %obj, i64 9223372036854775807 + %slide.bis = getelementptr i8, ptr %slide, i64 9223372036854775807 + %objsize = call i64 @llvm.objectsize.i64(ptr %slide.bis, i1 false, i1 false, i1 false) + ret i64 %objsize +} + +define i64 @wrapping_gep_neg() { +; CHECK-LABEL: @wrapping_gep_neg( +; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i64 9223372036854775807, align 1 +; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i64 9223372036854775807 +; CHECK-NEXT: [[SLIDE_BIS:%.*]] = getelementptr i8, ptr [[SLIDE]], i64 3 +; CHECK-NEXT: [[SLIDE_TER:%.*]] = getelementptr i8, ptr [[SLIDE_BIS]], i64 -4 +; CHECK-NEXT: ret i64 1 +; + %obj = alloca i8, i64 9223372036854775807 + %slide = getelementptr i8, ptr %obj, i64 9223372036854775807 + %slide.bis = getelementptr i8, ptr %slide, i64 3 + %slide.ter = getelementptr i8, ptr %slide.bis, i64 -4 + %objsize = call i64 @llvm.objectsize.i64(ptr %slide.ter, i1 false, i1 false, i1 false) + ret i64 %objsize +} + +; We don't analyze allocations larger than platform's ptrdiff_t +define i64 @large_alloca() { +; CHECK-LABEL: @large_alloca( +; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i64 -9223372036854775808, align 1 +; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i64 9223372036854775807 +; CHECK-NEXT: ret i64 -1 +; + %obj = alloca i8, i64 9223372036854775808 + %slide = getelementptr i8, ptr %obj, i64 9223372036854775807 + %objsize = call i64 @llvm.objectsize.i64(ptr %slide, i1 false, i1 false, i1 false) + ret i64 %objsize +} + +; We don't analyze allocations larger than platform's ptrdiff_t +define i64 @large_malloc() { +; CHECK-LABEL: @large_malloc( +; CHECK-NEXT: [[OBJ:%.*]] = call ptr @malloc(i64 -9223372036854775808) +; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i64 9223372036854775807 +; CHECK-NEXT: ret i64 -1 +; + %obj = call ptr @malloc(i64 9223372036854775808) + %slide = getelementptr i8, ptr %obj, i64 9223372036854775807 + %objsize = call i64 @llvm.objectsize.i64(ptr %slide, i1 false, i1 false, i1 false) + ret i64 %objsize +} + define i64 @out_of_bound_negative_gep() { ; CHECK-LABEL: @out_of_bound_negative_gep( ; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i32 4, align 1 From 677b4f097e85d9b57fc508fcf10b806e5291a7b1 Mon Sep 17 00:00:00 2001 From: serge-sans-paille Date: Tue, 12 Nov 2024 21:00:46 +0100 Subject: [PATCH 2/2] [llvm] Fix behavior of llvm.objectsize in presence of negative offset When an object is located before it's allocation point, e.g. char a[10]; char* b = a[-3]; If we ask for the maximum amount of memory addressable from `b` through __builtin_object_size(b, 0) It is better to return -1, even if we actually know everything about the allocation point, than to return 0, which we currently do and that leads to sanitizer raising invalid/incorrect diagnostic. --- llvm/lib/Analysis/MemoryBuiltins.cpp | 16 +++++++-- .../builtin-object-size-phi.ll | 34 +++++++++++++++++++ .../objectsize_basic.ll | 22 +++++++++--- 3 files changed, 66 insertions(+), 6 deletions(-) diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp index 0ee5029b2a2ed..cd8594d670502 100644 --- a/llvm/lib/Analysis/MemoryBuiltins.cpp +++ b/llvm/lib/Analysis/MemoryBuiltins.cpp @@ -564,8 +564,13 @@ Value *llvm::getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI) { static APInt getSizeWithOverflow(const SizeOffsetAPInt &Data) { APInt Size = Data.Size; APInt Offset = Data.Offset; - if (Offset.isNegative() || Size.ult(Offset)) - return APInt(Size.getBitWidth(), 0); + + assert(!Offset.isNegative() && + "size for a pointer before the allocated object is ambiguous"); + + if (Size.ult(Offset)) + return APInt::getZero(Size.getBitWidth()); + return Size - Offset; } @@ -749,6 +754,13 @@ OffsetSpan ObjectSizeOffsetVisitor::computeImpl(Value *V) { if (Overflow) ORT.After = APInt(); } + + // We end up pointing on a location that's outside of the original object. + // This is UB, and we'd rather return an empty location then. + if (ORT.knownBefore() && ORT.Before.isNegative()) { + ORT.Before = APInt::getZero(ORT.Before.getBitWidth()); + ORT.After = APInt::getZero(ORT.Before.getBitWidth()); + } return ORT; } diff --git a/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-phi.ll b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-phi.ll index fbb37560fb2d0..cba4da073ff2a 100644 --- a/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-phi.ll +++ b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-phi.ll @@ -131,6 +131,40 @@ define dso_local i64 @pick_max_large(i1 %c) local_unnamed_addr { } +define dso_local i64 @pick_max_one_oob(i1 %c0, i1 %c1) { +; CHECK-LABEL: @pick_max_one_oob( +; CHECK-NEXT: [[P:%.*]] = alloca [2 x i8], align 1 +; CHECK-NEXT: br i1 [[C0:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] +; CHECK: if.then: +; CHECK-NEXT: [[P_THEN:%.*]] = getelementptr inbounds [2 x i8], ptr [[P]], i64 0, i64 1 +; CHECK-NEXT: br label [[IF_END:%.*]] +; CHECK: if.else: +; CHECK-NEXT: [[P_ELSE:%.*]] = getelementptr inbounds [2 x i8], ptr [[P]], i64 0, i64 -1 +; CHECK-NEXT: br label [[IF_END]] +; CHECK: if.end: +; CHECK-NEXT: [[P_END:%.*]] = phi ptr [ [[P_ELSE]], [[IF_ELSE]] ], [ [[P_THEN]], [[IF_THEN]] ] +; CHECK-NEXT: [[OBJSIZE:%.*]] = select i1 [[C1:%.*]], i64 1, i64 0 +; CHECK-NEXT: ret i64 [[OBJSIZE]] +; + %p = alloca [2 x i8], align 1 + br i1 %c0, label %if.then, label %if.else + +if.then: + %p.then = getelementptr inbounds [2 x i8], ptr %p, i64 0, i64 1 + br label %if.end + +if.else: + %p.else = getelementptr inbounds [2 x i8], ptr %p, i64 0, i64 -1 + br label %if.end + +if.end: + %p.end = phi ptr [%p.else, %if.else], [%p.then, %if.then] + %objsize.max = call i64 @llvm.objectsize.i64.p0(ptr %p.end, i1 false, i1 true, i1 false) + %objsize.min = call i64 @llvm.objectsize.i64.p0(ptr %p.end, i1 true, i1 true, i1 false) + %objsize = select i1 %c1, i64 %objsize.max, i64 %objsize.min + ret i64 %objsize +} + define i64 @pick_negative_offset(i32 %n) { ; CHECK-LABEL: @pick_negative_offset( diff --git a/llvm/test/Transforms/LowerConstantIntrinsics/objectsize_basic.ll b/llvm/test/Transforms/LowerConstantIntrinsics/objectsize_basic.ll index ae7399a1eafe5..212b4a432db3c 100644 --- a/llvm/test/Transforms/LowerConstantIntrinsics/objectsize_basic.ll +++ b/llvm/test/Transforms/LowerConstantIntrinsics/objectsize_basic.ll @@ -195,9 +195,23 @@ define i64 @out_of_bound_gep() { ret i64 %objsize } -define i64 @wrapping_gep() { +define i64 @wrapping_gep(i1 %c) { ; CHECK-LABEL: @wrapping_gep( ; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i64 4, align 1 +; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i64 -9223372036854775807 +; CHECK-NEXT: [[SLIDE_BIS:%.*]] = getelementptr i8, ptr [[SLIDE]], i64 -9223372036854775808 +; CHECK-NEXT: ret i64 3 +; + %obj = alloca i8, i64 4 + %slide = getelementptr i8, ptr %obj, i64 9223372036854775809 + %slide.bis = getelementptr i8, ptr %slide, i64 9223372036854775808 + %objsize = call i64 @llvm.objectsize.i64(ptr %slide.bis, i1 false, i1 false, i1 false) + ret i64 %objsize +} + +define i64 @wrapping_gep_neg(i1 %c) { +; CHECK-LABEL: @wrapping_gep_neg( +; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i64 4, align 1 ; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i64 9223372036854775807 ; CHECK-NEXT: [[SLIDE_BIS:%.*]] = getelementptr i8, ptr [[SLIDE]], i64 9223372036854775807 ; CHECK-NEXT: ret i64 0 @@ -209,8 +223,8 @@ define i64 @wrapping_gep() { ret i64 %objsize } -define i64 @wrapping_gep_neg() { -; CHECK-LABEL: @wrapping_gep_neg( +define i64 @wrapping_gep_large_alloc(i1 %c) { +; CHECK-LABEL: @wrapping_gep_large_alloc( ; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i64 9223372036854775807, align 1 ; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i64 9223372036854775807 ; CHECK-NEXT: [[SLIDE_BIS:%.*]] = getelementptr i8, ptr [[SLIDE]], i64 3 @@ -251,7 +265,7 @@ define i64 @large_malloc() { ret i64 %objsize } -define i64 @out_of_bound_negative_gep() { +define i64 @out_of_bound_negative_gep(i1 %c) { ; CHECK-LABEL: @out_of_bound_negative_gep( ; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i32 4, align 1 ; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i8 -8