diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp index 840a5e3f31dfd..e3b77885c4efc 100644 --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -118,6 +118,9 @@ static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46; static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000; static const uint64_t kPS_ShadowOffset64 = 1ULL << 40; static const uint64_t kWindowsShadowOffset32 = 3ULL << 28; +static const uint64_t kAIXShadowOffset32 = 0x40000000; +// 64-BIT AIX is not yet ready. +static const uint64_t kAIXShadowOffset64 = 0x0a01000000000000ULL; static const uint64_t kWebAssemblyShadowOffset = 0; // The shadow memory space is dynamically allocated. @@ -128,6 +131,8 @@ static const size_t kMaxStackMallocSize = 1 << 16; // 64K static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3; static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E; +static const uint32_t kAIXHighBits = 6; + const char kAsanModuleCtorName[] = "asan.module_ctor"; const char kAsanModuleDtorName[] = "asan.module_dtor"; static const uint64_t kAsanCtorAndDtorPriority = 1; @@ -463,11 +468,14 @@ namespace { /// This struct defines the shadow mapping using the rule: /// shadow = (mem >> Scale) ADD-or-OR Offset. +/// However, on 64-bit AIX, we use HighBits to reduce the mapped address space: +/// shadow = ((mem << HighBits) >> (HighBits + Scale)) + Offset /// If InGlobal is true, then /// extern char __asan_shadow[]; /// shadow = (mem >> Scale) + &__asan_shadow struct ShadowMapping { int Scale; + int HighBits; uint64_t Offset; bool OrShadowOffset; bool InGlobal; @@ -487,6 +495,7 @@ static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, bool IsLinux = TargetTriple.isOSLinux(); bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 || TargetTriple.getArch() == Triple::ppc64le; + bool IsAIX = TargetTriple.isOSAIX(); bool IsSystemZ = TargetTriple.getArch() == Triple::systemz; bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64; bool IsMIPSN32ABI = TargetTriple.isABIN32(); @@ -525,6 +534,8 @@ static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, Mapping.Offset = kDynamicShadowSentinel; else if (IsWindows) Mapping.Offset = kWindowsShadowOffset32; + else if (IsAIX) + Mapping.Offset = kAIXShadowOffset32; else if (IsWasm) Mapping.Offset = kWebAssemblyShadowOffset; else @@ -534,7 +545,9 @@ static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, // space is always available. if (IsFuchsia) Mapping.Offset = 0; - else if (IsPPC64) + else if (IsAIX) + Mapping.Offset = kAIXShadowOffset64; + else if (IsPPC64 && !IsAIX) Mapping.Offset = kPPC64_ShadowOffset64; else if (IsSystemZ) Mapping.Offset = kSystemZ_ShadowOffset64; @@ -596,13 +609,16 @@ static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, // SystemZ, we could OR the constant in a single instruction, but it's more // efficient to load it once and use indexed addressing. Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS && - !IsRISCV64 && !IsLoongArch64 && + !IsRISCV64 && !IsLoongArch64 && !IsAIX && !(Mapping.Offset & (Mapping.Offset - 1)) && Mapping.Offset != kDynamicShadowSentinel; bool IsAndroidWithIfuncSupport = IsAndroid && !TargetTriple.isAndroidVersionLT(21); Mapping.InGlobal = ClWithIfunc && IsAndroidWithIfuncSupport && IsArmOrThumb; + if (IsAIX && LongSize == 64) + Mapping.HighBits = kAIXHighBits; + return Mapping; } @@ -1380,7 +1396,11 @@ static bool isUnsupportedAMDGPUAddrspace(Value *Addr) { Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) { // Shadow >> scale - Shadow = IRB.CreateLShr(Shadow, Mapping.Scale); + if (TargetTriple.isOSAIX() && TargetTriple.getArch() == Triple::ppc64) + Shadow = IRB.CreateLShr(IRB.CreateShl(Shadow, Mapping.HighBits), + Mapping.Scale + Mapping.HighBits); + else + Shadow = IRB.CreateLShr(Shadow, Mapping.Scale); if (Mapping.Offset == 0) return Shadow; // (Shadow >> scale) | offset Value *ShadowBase; diff --git a/llvm/test/Instrumentation/AddressSanitizer/mapping-aix.ll b/llvm/test/Instrumentation/AddressSanitizer/mapping-aix.ll new file mode 100644 index 0000000000000..267ea605e6755 --- /dev/null +++ b/llvm/test/Instrumentation/AddressSanitizer/mapping-aix.ll @@ -0,0 +1,21 @@ +; Test shadow memory mapping on AIX + +; RUN: opt -passes=asan -mtriple=powerpc64-ibm-aix -S < %s | FileCheck %s -check-prefix=CHECK-64 +; RUN: opt -passes=asan -mtriple=powerpc-ibm-aix -S < %s | FileCheck %s -check-prefix=CHECK-32 + +; CHECK: @test +; On 64-bit AIX, we expect a left shift of 6 (HIGH_BITS) followed by a right shift of 9 (HIGH_BITS +; + ASAN_SHADOW_SCALE) and an offset of 0x0a01000000000000. +; CHECK-64: shl {{.*}} 6 +; CHECK-64-NEXT: lshr {{.*}} 9 +; CHECK-64-NEXT: add {{.*}} 720857415355990016 +; On 32-bit AIX, we expect just a right shift of 3 and an offset of 0x40000000. +; CHECK-32: lshr {{.*}} 3 +; CHECK-32-NEXT: add {{.*}} 1073741824 +; CHECK: ret + +define i32 @test(i32* %a) sanitize_address { +entry: + %tmp1 = load i32, i32* %a, align 4 + ret i32 %tmp1 +}