diff --git a/contrib/subrepo-cheri-libunwind/include/__libunwind_config.h b/contrib/subrepo-cheri-libunwind/include/__libunwind_config.h index fd5fec8c0b27..de23bf8e21a6 100644 --- a/contrib/subrepo-cheri-libunwind/include/__libunwind_config.h +++ b/contrib/subrepo-cheri-libunwind/include/__libunwind_config.h @@ -11,6 +11,14 @@ #define _LIBUNWIND_VERSION 15000 +#if defined(_LIBUNWIND_SANDBOX_HARDENED) && !defined(_LIBUNWIND_SANDBOX_OTYPES) +#error "_LIBUNWIND_SANDBOX_HARDENED is invalid without a sandboxing mechanism" +#endif + +#if defined(_LIBUNWIND_SANDBOX_OTYPES) && defined(_LIBUNWIND_NO_HEAP) +#error "_LIBUNWIND_NO_HEAP cannot be used with _LIBUNWIND_SANDBOX_OTYPES" +#endif + #if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \ !defined(__ARM_DWARF_EH__) && !defined(__SEH__) #define _LIBUNWIND_ARM_EHABI @@ -20,7 +28,7 @@ #define _LIBUNWIND_HIGHEST_DWARF_REGISTER_X86_64 32 #define _LIBUNWIND_HIGHEST_DWARF_REGISTER_PPC 112 #define _LIBUNWIND_HIGHEST_DWARF_REGISTER_PPC64 116 -#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_MORELLO 229 +#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_MORELLO 230 #define _LIBUNWIND_HIGHEST_DWARF_REGISTER_ARM64 95 #define _LIBUNWIND_HIGHEST_DWARF_REGISTER_ARM 287 #define _LIBUNWIND_HIGHEST_DWARF_REGISTER_OR1K 32 @@ -76,11 +84,11 @@ # elif defined(__aarch64__) # define _LIBUNWIND_TARGET_AARCH64 1 # if defined(__CHERI_PURE_CAPABILITY__) -# define _LIBUNWIND_CONTEXT_SIZE 100 +# define _LIBUNWIND_CONTEXT_SIZE 102 # if defined(__SEH__) # error "Pure-capability aarch64 SEH not supported" # else -# define _LIBUNWIND_CURSOR_SIZE 124 +# define _LIBUNWIND_CURSOR_SIZE 126 # endif # define _LIBUNWIND_HIGHEST_DWARF_REGISTER _LIBUNWIND_HIGHEST_DWARF_REGISTER_MORELLO # else diff --git a/contrib/subrepo-cheri-libunwind/include/libunwind.h b/contrib/subrepo-cheri-libunwind/include/libunwind.h index fb38d3fe53f7..c1a1d432794e 100644 --- a/contrib/subrepo-cheri-libunwind/include/libunwind.h +++ b/contrib/subrepo-cheri-libunwind/include/libunwind.h @@ -678,7 +678,8 @@ enum { UNW_ARM64_C30 = 228, UNW_ARM64_CLR = 228, UNW_ARM64_C31 = 229, - UNW_ARM64_CSP = 229 + UNW_ARM64_CSP = 229, + UNW_ARM64_ECSP = 230, }; // 32-bit ARM registers. Numbers match DWARF for ARM spec #3.1 Table 1. diff --git a/contrib/subrepo-cheri-libunwind/src/AddressSpace.hpp b/contrib/subrepo-cheri-libunwind/src/AddressSpace.hpp index 56e5de51e334..af5179ff2b88 100644 --- a/contrib/subrepo-cheri-libunwind/src/AddressSpace.hpp +++ b/contrib/subrepo-cheri-libunwind/src/AddressSpace.hpp @@ -22,6 +22,7 @@ #include "dwarf2.h" #include "EHHeaderParser.hpp" #include "Registers.hpp" +#include "unwind_cheri.h" // We can no longer include C++ headers so duplicate std::min() here template T uw_min(T a, T b) { return a < b ? a : b; } @@ -320,6 +321,12 @@ class _LIBUNWIND_HIDDEN LocalAddressSpace { return get(addr); } capability_t getCapability(pint_t addr) { return get(addr); } +#if defined(__CHERI_PURE_CAPABILITY__) && defined(_LIBUNWIND_SANDBOX_OTYPES) + static uintcap_t getUnwindSealer(); + static bool isValidSealer(uintcap_t sealer) { + return __builtin_cheri_tag_get(sealer); + } +#endif // __CHERI_PURE_CAPABILITY__ && _LIBUNWIND_SANDBOX_OTYPES __attribute__((always_inline)) uintptr_t getP(pint_t addr); uint64_t getRegister(pint_t addr); @@ -408,6 +415,25 @@ inline uint64_t LocalAddressSpace::getRegister(pint_t addr) { #endif } +#if defined(__CHERI_PURE_CAPABILITY__) && defined(_LIBUNWIND_SANDBOX_OTYPES) +extern "C" { +/// Call into the RTLD to get a sealer capability. This sealer will be used to +/// seal information in the unwinding context if _LIBUNWIND_SANDBOX_HARDENED is +/// specified. +uintptr_t _rtld_unw_getsealer(void); +uintptr_t __rtld_unw_getsealer(); +_LIBUNWIND_HIDDEN uintptr_t __rtld_unw_getsealer() { + return (uintptr_t)0; +} +_LIBUNWIND_WEAK_ALIAS(__rtld_unw_getsealer, _rtld_unw_getsealer) +} + +/// C++ wrapper for calling into RTLD. +inline uintcap_t LocalAddressSpace::getUnwindSealer() { + return _rtld_unw_getsealer(); +} +#endif // __CHERI_PURE_CAPABILITY__ && _LIBUNWIND_SANDBOX_OTYPES + /// Read a ULEB128 into a 64-bit word. inline uint64_t LocalAddressSpace::getULEB128(pint_t &addr, pint_t end) { const uint8_t *p = (uint8_t *)addr; @@ -932,7 +958,8 @@ inline bool LocalAddressSpace::findUnwindSections(pc_t targetAddr, return true; #elif defined(_LIBUNWIND_USE_DL_ITERATE_PHDR) dl_iterate_cb_data cb_data = {this, &info, targetAddr}; - CHERI_DBG("Calling dl_iterate_phdr()\n"); + CHERI_DBG("Calling dl_iterate_phdr(0x%jx)\n", + (uintmax_t)targetAddr.address()); int found = dl_iterate_phdr(findUnwindSectionsByPhdr, &cb_data); return static_cast(found); #endif diff --git a/contrib/subrepo-cheri-libunwind/src/CompartmentInfo.hpp b/contrib/subrepo-cheri-libunwind/src/CompartmentInfo.hpp new file mode 100644 index 000000000000..771ac999cf8d --- /dev/null +++ b/contrib/subrepo-cheri-libunwind/src/CompartmentInfo.hpp @@ -0,0 +1,39 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +// +// Abstracts unwind information when used with a compartmentalizing runtime +// linker. +// +//===----------------------------------------------------------------------===// + +#ifndef __COMPARTMENT_INFO_HPP__ +#define __COMPARTMENT_INFO_HPP__ + +namespace libunwind { +class _LIBUNWIND_HIDDEN CompartmentInfo { +public: + static CompartmentInfo sThisCompartmentInfo; +#if defined(__CHERI_PURE_CAPABILITY__) + static const uintcap_t kInvalidRCSP = (uintcap_t)0; + // Per-architecture trusted stack frame layout. +#if defined(_LIBUNWIND_TARGET_AARCH64) + static const uint32_t kNewSPOffset = 48; + static const uint32_t kNextOffset = 32; + static const uint32_t kFPOffset = 0; + static const uint32_t kCalleeSavedOffset = 80; + static const uint32_t kCalleeSavedCount = 10; + static const uint32_t kCalleeSavedSize = 16; + static const uint32_t kReturnAddressOffset = 40; + static const uint32_t kPCOffset = 16; + // kCalleeSavedCount - 1 because kCalleeSavedOffset is the first one. + static const uint32_t kTrustedFrameSize = + kCalleeSavedOffset + (kCalleeSavedCount - 1) * kCalleeSavedSize; +#endif // _LIBUNWIND_TARGET_AARCH64 +#endif // __CHERI_PURE_CAPABILITY__ +}; +} // namespace libunwind +#endif // __COMPARTMENT_INFO_HPP__ diff --git a/contrib/subrepo-cheri-libunwind/src/DwarfInstructions.hpp b/contrib/subrepo-cheri-libunwind/src/DwarfInstructions.hpp index e19383a4027e..b41145cffe30 100644 --- a/contrib/subrepo-cheri-libunwind/src/DwarfInstructions.hpp +++ b/contrib/subrepo-cheri-libunwind/src/DwarfInstructions.hpp @@ -20,6 +20,7 @@ #include "Registers.hpp" #include "DwarfParser.hpp" #include "config.h" +#include "CompartmentInfo.hpp" namespace libunwind { @@ -54,6 +55,15 @@ class DwarfInstructions { typedef typename CFI_Parser::FDE_Info FDE_Info; typedef typename CFI_Parser::CIE_Info CIE_Info; +#if defined(__CHERI_PURE_CAPABILITY__) && defined(_LIBUNWIND_SANDBOX_OTYPES) + static uintptr_t restoreRegistersFromSandbox(uintcap_t csp, A &addressSpace, + R &newRegisters, + CompartmentInfo &CI, + uintcap_t sealer); + static bool isEndOfExecutiveStack(uintcap_t csp, CompartmentInfo &CI); + static bool isTrampoline(uintcap_t ecsp, A &addressSpace, CompartmentInfo &CI, + uintcap_t returnAddress); +#endif static pint_t evaluateExpression(pint_t expression, A &addressSpace, const R ®isters, pint_t initialStackValue); @@ -72,9 +82,16 @@ class DwarfInstructions { *success = true; pint_t result = (pint_t)-1; if (prolog.cfaRegister != 0) { - result = - (pint_t)((sint_t)registers.getRegister((int)prolog.cfaRegister) + - prolog.cfaRegisterOffset); + result = registers.getRegister((int)prolog.cfaRegister); +#if defined(__CHERI_PURE_CAPABILITY__) && defined(_LIBUNWIND_SANDBOX_OTYPES) + CHERI_DBG("getRegister(%d) = %#p\n", (int)prolog.cfaRegister, + (void *)result); +#ifdef _LIBUNWIND_SANDBOX_HARDENED + if (__builtin_cheri_sealed_get(result)) + result = __builtin_cheri_unseal(result, addressSpace.getUnwindSealer()); +#endif // _LIBUNWIND_SANDBOX_HARDENED +#endif // __CHERI_PURE_CAPABILITY__ && _LIBUNWIND_SANDBOX_OTYPES + result = (pint_t)((sint_t)result + prolog.cfaRegisterOffset); } else if (prolog.cfaExpression != 0) { result = evaluateExpression((pint_t)prolog.cfaExpression, addressSpace, registers, 0); @@ -246,6 +263,134 @@ bool DwarfInstructions::getRA_SIGN_STATE(A &addressSpace, R registers, } #endif +#if defined(__CHERI_PURE_CAPABILITY__) && defined(_LIBUNWIND_SANDBOX_OTYPES) +#if defined(_LIBUNWIND_TARGET_AARCH64) +template +size_t restoreCalleeSavedRegisters(uintcap_t csp, A &addressSpace, + R &newRegisters, CompartmentInfo &CI, + uintcap_t sealer) { + // Restore callee-saved registers. We seal these if they aren't sealed + // already. + // + // XXX: When _LIBUNWIND_SANDBOX_HARDENED is specified, sentries get handed out + // and we can't really prevent the untrusted context from using those right + // now. + size_t i; + size_t offset; + // Restore: c19-c28 + for (i = 0, offset = CI.kCalleeSavedOffset; i < CI.kCalleeSavedCount; + ++i, offset += CI.kCalleeSavedSize) { + uintcap_t regValue = addressSpace.getCapability(csp + offset); +#ifdef _LIBUNWIND_SANDBOX_HARDENED + if (addressSpace.isValidSealer(sealer) && + !__builtin_cheri_sealed_get(regValue)) + regValue = __builtin_cheri_seal(regValue, sealer); +#endif + newRegisters.setCapabilityRegister(UNW_ARM64_C19 + i, regValue); + CHERI_DBG("SETTING CALLEE SAVED CAPABILITY REGISTER: %lu (%s): %#p " + "(offset=%zu)\n", + UNW_ARM64_C19 + i, + newRegisters.getRegisterName(UNW_ARM64_C19 + i), (void *)regValue, + offset); + } + + return offset; +} + +template +uintptr_t DwarfInstructions::restoreRegistersFromSandbox( + uintcap_t csp, A &addressSpace, R &newRegisters, CompartmentInfo &CI, + uintcap_t sealer) { + // Get the unsealed executive CSP + assert(__builtin_cheri_tag_get((void *)csp) && + "Executive stack should be tagged!"); + // Derive the new executive CSP + ptraddr_t nextCSPAddr = addressSpace.get64(csp + CI.kNextOffset); + uintcap_t nextCSP = __builtin_cheri_address_set(csp, nextCSPAddr); +#ifdef _LIBUNWIND_SANDBOX_HARDENED + // Seal ECSP + nextCSP = __builtin_cheri_seal(nextCSP, sealer); +#endif + assert(__builtin_cheri_tag_get((void *)nextCSP) && + "Next executive stack should be tagged!"); + CHERI_DBG("SANDBOX: SETTING EXECUTIVE CSP %#p\n", (void *)nextCSP); + newRegisters.setECSP(nextCSP); + // Restore the next RCSP + uintcap_t nextRCSP = addressSpace.getCapability(csp + CI.kNewSPOffset); +#ifdef _LIBUNWIND_SANDBOX_HARDENED + // Seal RCSP + nextRCSP = __builtin_cheri_seal(nextRCSP, sealer); +#endif + newRegisters.setSP(nextRCSP); + CHERI_DBG("SANDBOX: SETTING RESTRICTED CSP: %#p\n", + (void *)newRegisters.getSP()); + size_t offset = + restoreCalleeSavedRegisters(csp, addressSpace, newRegisters, CI, sealer); + // Restore the frame pointer + uintcap_t newFP = addressSpace.getCapability(csp); +#ifdef _LIBUNWIND_SANDBOX_HARDENED + newFP = __builtin_cheri_seal(newFP, sealer); +#endif + CHERI_DBG("SANDBOX: SETTING CFP %#p (offset=%zu)\n", (void *)newFP, offset); + newRegisters.setFP(newFP); + // Get the new return address. We can't seal this because a return address + // will be a sentry. + return addressSpace.getCapability(csp + CI.kPCOffset); +} + +template +bool DwarfInstructions::isEndOfExecutiveStack(uintcap_t csp, + CompartmentInfo &CI) { + CHERI_DBG("isEndOfExecutiveStack(): csp: %#p\n", (void *)csp); + ptraddr_t cspAddr = (ptraddr_t)csp; + ptraddr_t cspEndAddr = + __builtin_cheri_base_get(csp) + __builtin_cheri_length_get(csp); + // Ensure this has the correct trusted frame size. + return cspAddr > (cspEndAddr - CI.kTrustedFrameSize); +} + +template +bool DwarfInstructions::isTrampoline(uintcap_t ecsp, A &addressSpace, + CompartmentInfo &CI, + uintcap_t returnAddress) { + // TODO(cheri): Use a cfp-based approach rather than the cookie. + ptraddr_t expectedReturnAddress = + addressSpace.get64(ecsp + CI.kReturnAddressOffset) & (~0b11ULL); + CHERI_DBG("isTrampoline(): expectedReturnAddress: 0x%lx\n", + expectedReturnAddress); + return expectedReturnAddress == returnAddress - 1; +} +#else // _LIBUNWIND_TARGET_AARCH64 +template +size_t restoreCalleeSavedRegisters(A &addressSpace, R ®isters, + R &newRegisters, CompartmentInfo &CI, + uintcap_t sealer) { + assert(0 && "not implemented on this architecture"); + return 0; +} +template +uintptr_t DwarfInstructions::restoreRegistersFromSandbox( + uintcap_t csp, A &addressSpace, R &newRegisters, CompartmentInfo &CI, + uintcap_t sealer) { + assert(0 && "not implemented on this architecture"); + return (uintptr_t)0; +} +template +bool DwarfInstructions::isEndOfExecutiveStack(uintcap_t csp, + CompartmentInfo &CI) { + assert(0 && "not implemented on this architecture"); + return false; +} +template +bool DwarfInstructions::isTrampoline(uintcap_t ecsp, A &addressSpace, + CompartmentInfo &CI, + uintcap_t returnAddress) { + assert(0 && "not implemented on this architecture"); + return false; +} +#endif // _LIBUNWIND_TARGET_AARCH64 +#endif // __CHERI_PURE_CAPABILITY__ && _LIBUNWIND_SANDBOX_OTYPES + template int DwarfInstructions::stepWithDwarf(A &addressSpace, pc_t pc, pint_t fdeStart, R ®isters, @@ -274,7 +419,16 @@ int DwarfInstructions::stepWithDwarf(A &addressSpace, pc_t pc, // // We set the SP here to the CFA, allowing for it to be overridden // by a CFI directive later on. - newRegisters.setSP(cfa); + uintptr_t newSP = cfa; +#if defined(__CHERI_PURE_CAPABILITY__) && defined(_LIBUNWIND_SANDBOX_OTYPES) + uintcap_t sealer = addressSpace.getUnwindSealer(); +#ifdef _LIBUNWIND_SANDBOX_HARDENED + if (addressSpace.isValidSealer(sealer)) + newSP = __builtin_cheri_seal(newSP, sealer); +#endif // _LIBUNWIND_SANDBOX_HARDENED +#endif // __CHERI_PURE_CAPABILITY__ && _LIBUNWIND_SANDBOX_OTYPES + CHERI_DBG("SETTING SP: %#p\n", (void *)newSP); + newRegisters.setSP(newSP); pint_t returnAddress = 0; constexpr int lastReg = R::lastDwarfRegNum(); @@ -297,15 +451,24 @@ int DwarfInstructions::stepWithDwarf(A &addressSpace, pc_t pc, else if (i == (int)cieInfo.returnAddressRegister) { returnAddress = getSavedRegister(i, addressSpace, registers, cfa, prolog.savedRegisters[i]); - CHERI_DBG("SETTING RETURN REGISTER %d (%s): %#p \n", - i, newRegisters.getRegisterName(i), (void*)returnAddress); + CHERI_DBG("GETTING RETURN ADDRESS (saved) %d (%s): %#p \n", i, + newRegisters.getRegisterName(i), (void *)returnAddress); } else if (registers.validCapabilityRegister(i)) { - newRegisters.setCapabilityRegister( - i, getSavedCapabilityRegister(addressSpace, registers, cfa, - prolog.savedRegisters[i])); - CHERI_DBG("SETTING CAPABILITY REGISTER %d (%s): %#p \n", - i, newRegisters.getRegisterName(i), - (void*)A::to_pint_t(newRegisters.getCapabilityRegister(i))); + capability_t savedReg = getSavedCapabilityRegister( + addressSpace, registers, cfa, prolog.savedRegisters[i]); +#if defined(__CHERI_PURE_CAPABILITY__) && \ + defined(_LIBUNWIND_SANDBOX_OTYPES) && defined(_LIBUNWIND_SANDBOX_HARDENED) + // Seal all the capability registers. This enforces the invariant + // that unsealed capabilities are never stored in the context that + // aren't explicitly set through unw_set_reg() by a consumer. + if (addressSpace.isValidSealer(sealer) && + !__builtin_cheri_sealed_get(savedReg)) + savedReg = __builtin_cheri_seal(savedReg, sealer); +#endif // __CHERI_PURE_CAPABILITY__ && _LIBUNWIND_SANDBOX_OTYPES && + // _LIBUNWIND_SANDBOX_HARDENED + newRegisters.setCapabilityRegister(i, savedReg); + CHERI_DBG("SETTING CAPABILITY REGISTER %d (%s): %#p \n", i, + newRegisters.getRegisterName(i), (void *)savedReg); } else if (registers.validRegister(i)) newRegisters.setRegister( i, getSavedRegister(i, addressSpace, registers, cfa, @@ -313,9 +476,11 @@ int DwarfInstructions::stepWithDwarf(A &addressSpace, pc_t pc, else return UNW_EBADREG; } else if (i == (int)cieInfo.returnAddressRegister) { - // Leaf function keeps the return address in register and there is no - // explicit intructions how to restore it. - returnAddress = registers.getRegister(cieInfo.returnAddressRegister); + // Leaf function keeps the return address in register and there is no + // explicit intructions how to restore it. + returnAddress = registers.getRegister(cieInfo.returnAddressRegister); + CHERI_DBG("GETTING RETURN ADDRESS (leaf) %d (%s): %#p \n", i, + registers.getRegisterName(i), (void *)returnAddress); } } @@ -403,9 +568,38 @@ int DwarfInstructions::stepWithDwarf(A &addressSpace, pc_t pc, } #endif +#if defined(__CHERI_PURE_CAPABILITY__) && defined(_LIBUNWIND_SANDBOX_OTYPES) + // If the sealer is not valid (only the case when we're running with + // c18n), check if the return address has the executive mode bit set. + // If so, we should be calling into the c18n RTLD as this is a + // compartment boundary. We need to restore registers from the executive + // stack and ask rtld for it. + if (addressSpace.isValidSealer(sealer)) { + // Iteratively unwind all the executive mode return addresses. This is + // necessary to support tail calls to trampolines. + uintcap_t csp = registers.getUnsealedECSP(sealer); + CompartmentInfo &CI = CompartmentInfo::sThisCompartmentInfo; + for (;;) { + if (isEndOfExecutiveStack(csp, CI)) { + return UNW_ESTOPUNWIND; + } + if (isTrampoline(csp, addressSpace, CI, returnAddress)) { + CHERI_DBG("%#p: detected a trampoline, unwinding from sandbox\n", + (void *)returnAddress); + returnAddress = restoreRegistersFromSandbox( + csp, addressSpace, newRegisters, CI, sealer); + csp = newRegisters.getUnsealedECSP(sealer); + } else { + break; + } + } + } +#endif + // Return address is address after call site instruction, so setting IP to // that does simualates a return. newRegisters.setIP(returnAddress); + CHERI_DBG("SETTING RETURN ADDRESS %#p\n", (void *)returnAddress); // Simulate the step by replacing the register set with the new ones. registers = newRegisters; diff --git a/contrib/subrepo-cheri-libunwind/src/Registers.hpp b/contrib/subrepo-cheri-libunwind/src/Registers.hpp index 92b0b8b9bb9a..63851a21e5e0 100644 --- a/contrib/subrepo-cheri-libunwind/src/Registers.hpp +++ b/contrib/subrepo-cheri-libunwind/src/Registers.hpp @@ -18,6 +18,7 @@ #include "cet_unwind.h" #include "config.h" #include "libunwind.h" +#include "unwind_cheri.h" namespace libunwind { @@ -1851,6 +1852,45 @@ class _LIBUNWIND_HIDDEN Registers_arm64 { void setVectorRegister(int num, v128 value); static const char *getRegisterName(int num); void jumpto() { __libunwind_Registers_arm64_jumpto(this); } +#if defined(__CHERI_PURE_CAPABILITY__) && \ + defined(_LIBUNWIND_SANDBOX_OTYPES) && defined(_LIBUNWIND_SANDBOX_HARDENED) + void unsealSP(uintcap_t sealer) { + assert(__builtin_cheri_sealed_get(_registers.__sp) && "Value must be sealed"); + _registers.__sp = __builtin_cheri_unseal(_registers.__sp, sealer); + } + void unsealFP(uintcap_t sealer) { + assert(__builtin_cheri_sealed_get(_registers.__fp) && "Value must be sealed"); + _registers.__fp = __builtin_cheri_unseal(_registers.__fp, sealer); + } + void unsealCalleeSavedRegisters(uintcap_t sealer) { + for (auto i = 0; i < 10; ++i) { + uintcap_t sealedValue = getRegister(UNW_ARM64_C19 + i); + // FIXME: Would be nice to enforce this invariant, but right now + // unw_set_reg() gets called to set what ends up in private_1, and it + // would require breaking libunwind's public API to seal registers through + // that particular path, and therefore we can't assert this. +#if 0 + assert((!__builtin_cheri_tag_get(sealedValue) || + __builtin_cheri_sealed_get(sealedValue)) && + "Value must be sealed"); +#endif + uintcap_t unsealedValue = sealedValue; + // If the tag gets cleared when we attempt to unseal our value, that means + // that we either have a capability that was sealed to begin with, and + // therefore we should just return it that way, or we have a sentry which + // we cannot unseal. + if (__builtin_cheri_tag_get(sealedValue) && + __builtin_cheri_sealed_get(sealedValue)) { + unsealedValue = __builtin_cheri_unseal(sealedValue, sealer); + if (!__builtin_cheri_tag_get(unsealedValue)) { + unsealedValue = sealedValue; + } + } + setCapabilityRegister(UNW_ARM64_C19 + i, unsealedValue); + } + } +#endif // __CHERI_PURE_CAPABILITY__ && _LIBUNWIND_SANDBOX_OTYPES && + // _LIBUNWIND_SANDBOX_HARDENED static constexpr int lastDwarfRegNum() { #ifdef __CHERI_PURE_CAPABILITY__ return _LIBUNWIND_HIGHEST_DWARF_REGISTER_MORELLO; @@ -1863,10 +1903,13 @@ class _LIBUNWIND_HIDDEN Registers_arm64 { #ifdef __CHERI_PURE_CAPABILITY__ bool validCapabilityRegister(int num) const; uintcap_t getCapabilityRegister(int num) const; +#ifdef _LIBUNWIND_SANDBOX_OTYPES + uintcap_t getUnsealedECSP(uintcap_t sealer) const; +#endif void setCapabilityRegister(int num, uintcap_t value); #else CAPABILITIES_NOT_SUPPORTED -#endif +#endif // __CHERI_PURE_CAPABILITY__ uintptr_t getSP() const { return _registers.__sp; } void setSP(uintptr_t value) { _registers.__sp = value; } @@ -1874,6 +1917,9 @@ class _LIBUNWIND_HIDDEN Registers_arm64 { void setIP(uintptr_t value) { _registers.__pc = value; } uintptr_t getFP() const { return _registers.__fp; } void setFP(uintptr_t value) { _registers.__fp = value; } +#ifdef __CHERI_PURE_CAPABILITY__ + void setECSP(uintptr_t value) { _registers.__csp = value; } +#endif private: struct GPRs { @@ -1882,6 +1928,9 @@ class _LIBUNWIND_HIDDEN Registers_arm64 { uintptr_t __lr; // Link register r30 uintptr_t __sp; // Stack pointer r31 uintptr_t __pc; // Program counter +#ifdef __CHERI_PURE_CAPABILITY__ + uintptr_t __csp; // Executive stack pointer. +#endif uint64_t __ra_sign_state; // RA sign state register }; @@ -1898,8 +1947,8 @@ inline Registers_arm64::Registers_arm64(const void *registers) { "arm64 registers do not fit into unw_context_t"); memcpy(&_registers, registers, sizeof(_registers)); #ifdef __CHERI_PURE_CAPABILITY__ - static_assert(sizeof(GPRs) == 0x220, - "expected VFP registers to be at offset 544"); + static_assert(sizeof(GPRs) == 0x230, + "expected VFP registers to be at offset 560"); #else static_assert(sizeof(GPRs) == 0x110, "expected VFP registers to be at offset 272"); @@ -1924,6 +1973,8 @@ inline bool Registers_arm64::validRegister(int regNum) const { #ifdef __CHERI_PURE_CAPABILITY__ if ((regNum >= UNW_ARM64_C0) && (regNum <= UNW_ARM64_C31)) return true; + if (regNum == UNW_ARM64_ECSP) + return true; #endif if (regNum > 95) return false; @@ -1970,6 +2021,8 @@ inline void Registers_arm64::setRegister(int regNum, uintptr_t value) { #ifdef __CHERI_PURE_CAPABILITY__ else if ((regNum >= UNW_ARM64_C0) && (regNum <= UNW_ARM64_C31)) _registers.__x[regNum - UNW_ARM64_C0] = value; + else if (regNum == UNW_ARM64_ECSP) + _registers.__csp = value; #endif else _LIBUNWIND_ABORT("unsupported arm64 register"); @@ -1983,9 +2036,23 @@ inline bool Registers_arm64::validCapabilityRegister(int regNum) const { return true; if ((regNum >= UNW_ARM64_C0) && (regNum <= UNW_ARM64_C31)) return true; + if (regNum == UNW_ARM64_ECSP) + return true; return false; } +#ifdef _LIBUNWIND_SANDBOX_OTYPES +inline uintcap_t +Registers_arm64::getUnsealedECSP(uintcap_t sealer) const { + uintcap_t csp = _registers.__csp; +#ifdef _LIBUNWIND_SANDBOX_HARDENED + if (__builtin_cheri_sealed_get(csp)) + csp = __builtin_cheri_unseal(csp, sealer); +#endif // _LIBUNWIND_SANDBOX_HARDENED + return csp; +} +#endif // _LIBUNWIND_SANDBOX_OTYPES + inline uintcap_t Registers_arm64::getCapabilityRegister(int regNum) const { assert(validCapabilityRegister(regNum)); return getRegister(regNum); @@ -2198,6 +2265,8 @@ inline const char *Registers_arm64::getRegisterName(int regNum) { return "clr"; case UNW_ARM64_C31: return "csp"; + case UNW_ARM64_ECSP: + return "ecsp"; default: return "unknown register"; } @@ -4544,6 +4613,14 @@ class _LIBUNWIND_HIDDEN Registers_riscv { void setSP(reg_t value) { _registers[2] = value; } reg_t getIP() const { return _registers[0]; } void setIP(reg_t value) { _registers[0] = value; } +#if defined(__CHERI_PURE_CAPABILITY__) && defined(_LIBUNWIND_SANDBOX_OTYPES) + reg_t getUnsealedECSP(uintcap_t sealer) { return getSP(); } +#ifdef _LIBUNWIND_SANDBOX_HARDENED + void unsealSP(uintcap_t sealer) {} + void unsealFP(uintcap_t sealer) {} + void unsealCalleeSavedRegisters(uintcap_t sealer) {} +#endif // _LIBUNWIND_SANDBOX_HARDENED +#endif // __CHERI_PURE_CAPABILITY__ && _LIBUNWIND_SANDBOX_OTYPES && private: // _registers[0] holds the pc diff --git a/contrib/subrepo-cheri-libunwind/src/UnwindCursor.hpp b/contrib/subrepo-cheri-libunwind/src/UnwindCursor.hpp index 2b4e51e295ab..643d5968b896 100644 --- a/contrib/subrepo-cheri-libunwind/src/UnwindCursor.hpp +++ b/contrib/subrepo-cheri-libunwind/src/UnwindCursor.hpp @@ -474,6 +474,19 @@ class _LIBUNWIND_HIDDEN AbstractUnwindCursor { } #endif +#if defined(__CHERI_PURE_CAPABILITY__) && \ + defined(_LIBUNWIND_SANDBOX_OTYPES) && defined(_LIBUNWIND_SANDBOX_HARDENED) + virtual void unsealSP(uintcap_t) { + _LIBUNWIND_ABORT("unsealSP not implemented"); + } + virtual void unsealFP(uintcap_t) { + _LIBUNWIND_ABORT("unsealFP not implemented"); + } + virtual void unsealCalleeSavedRegisters(uintcap_t) { + _LIBUNWIND_ABORT("unsealCalleeSavedRegisters not implemented"); + } +#endif + #if defined(_LIBUNWIND_USE_CET) virtual void *get_registers() { _LIBUNWIND_ABORT("get_registers not implemented"); @@ -941,6 +954,12 @@ class UnwindCursor : public AbstractUnwindCursor{ virtual bool getFunctionName(char *buf, size_t len, unw_word_t *off); virtual void setInfoBasedOnIPRegister(bool isReturnAddress = false); virtual const char *getRegisterName(int num); +#if defined(__CHERI_PURE_CAPABILITY__) && \ + defined(_LIBUNWIND_SANDBOX_OTYPES) && defined(_LIBUNWIND_SANDBOX_HARDENED) + virtual void unsealSP(uintcap_t sealer); + virtual void unsealFP(uintcap_t sealer); + virtual void unsealCalleeSavedRegisters(uintcap_t sealer); +#endif #ifdef __arm__ virtual void saveVFPAsX(); #endif @@ -1294,7 +1313,7 @@ class UnwindCursor : public AbstractUnwindCursor{ } #endif // defined(_LIBUNWIND_SUPPORT_TBTAB_UNWIND) - A &_addressSpace; + A &_addressSpace; R _registers; unw_proc_info_t _info; bool _unwindInfoMissing; @@ -1384,6 +1403,23 @@ const char *UnwindCursor::getRegisterName(int regNum) { return _registers.getRegisterName(regNum); } +#if defined(__CHERI_PURE_CAPABILITY__) && \ + defined(_LIBUNWIND_SANDBOX_OTYPES) && defined(_LIBUNWIND_SANDBOX_HARDENED) +template +void UnwindCursor::unsealSP(uintcap_t sealer) { + return _registers.unsealSP(sealer); +} +template +void UnwindCursor::unsealFP(uintcap_t sealer) { + return _registers.unsealFP(sealer); +} +template +void UnwindCursor::unsealCalleeSavedRegisters(uintcap_t sealer) { + return _registers.unsealCalleeSavedRegisters(sealer); +} +#endif // __CHERI_PURE_CAPABILITY__ && _LIBUNWIND_SANDBOX_OTYPES && + // _LIBUNWIND_SANDBOX_HARDENED + template bool UnwindCursor::isSignalFrame() { return _isSignalFrame; } diff --git a/contrib/subrepo-cheri-libunwind/src/UnwindRegistersRestore.S b/contrib/subrepo-cheri-libunwind/src/UnwindRegistersRestore.S index dde5fd117307..beec52460443 100644 --- a/contrib/subrepo-cheri-libunwind/src/UnwindRegistersRestore.S +++ b/contrib/subrepo-cheri-libunwind/src/UnwindRegistersRestore.S @@ -703,6 +703,25 @@ Lnovec: #elif defined(__aarch64__) +#if defined(__CHERI_PURE_CAPABILITY__) +DEFINE_LIBUNWIND_FUNCTION(__rtld_unw_setcontext) + mov c16, c2 + ldp c2, c3, [c3, #(-0x210 + 0x20)] + mov csp, c16 +#ifdef __ARM_MORELLO_PURECAP_BENCHMARK_ABI + and x30, x30, #~1; + ret x30 +#else + ret +#endif +END_LIBUNWIND_FUNCTION(__rtld_unw_setcontext) +#ifdef _LIBUNWIND_SANDBOX_HARDENED +WEAK_ALIAS(__rtld_unw_setcontext, _rtld_unw_setcontext) +#else +WEAK_ALIAS(__rtld_unw_setcontext, _rtld_unw_setcontext_unsealed) +#endif +#endif + // // extern "C" void __libunwind_Registers_arm64_jumpto(Registers_arm64 *); // @@ -729,8 +748,8 @@ DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_arm64_jumpto) ldp c26,c27, [c0, #0x1a0] ldp c28,c29, [c0, #0x1c0] ldr c30, [c0, #0x200] // restore pcc into clr + add c16,c0, #0x230 - add c16,c0, #0x220 ldp d0, d1, [c16, #0x000] ldp d2, d3, [c16, #0x010] ldp d4, d5, [c16, #0x020] @@ -753,14 +772,13 @@ DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_arm64_jumpto) // context struct, because it is allocated on the stack, and an exception // could clobber the de-allocated portion of the stack after csp has been // restored. - ldr c16, [c0, #0x1f0] - ldp c0, c1, [c0, #0x000] // restore c0,c1 - mov csp,c16 // restore csp -#ifdef __ARM_MORELLO_PURECAP_BENCHMARK_ABI - and x30, x30, #~1 - ret x30 // jump to pc + ldr c2, [c0, #0x1f0] + add c3, c0, #0x210 + ldp c0, c1, [c0, #0x000] +#ifdef _LIBUNWIND_SANDBOX_HARDENED + b _rtld_unw_setcontext #else - ret // jump to pcc + b _rtld_unw_setcontext_unsealed #endif #else // skip restore of x0,x1 for now diff --git a/contrib/subrepo-cheri-libunwind/src/UnwindRegistersSave.S b/contrib/subrepo-cheri-libunwind/src/UnwindRegistersSave.S index 61c8c9d16b73..56d6fa9a3b72 100644 --- a/contrib/subrepo-cheri-libunwind/src/UnwindRegistersSave.S +++ b/contrib/subrepo-cheri-libunwind/src/UnwindRegistersSave.S @@ -837,6 +837,19 @@ DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) #elif defined(__aarch64__) +#if defined(__CHERI_PURE_CAPABILITY__) +DEFINE_LIBUNWIND_FUNCTION(__rtld_unw_getcontext) + mov c2, csp + str c2, [c1] + ret c30 +END_LIBUNWIND_FUNCTION(__rtld_unw_getcontext) +#ifdef _LIBUNWIND_SANDBOX_HARDENED +WEAK_ALIAS(__rtld_unw_getcontext, _rtld_unw_getcontext) +#else +WEAK_ALIAS(__rtld_unw_getcontext, _rtld_unw_getcontext_unsealed) +#endif +#endif + // // extern int __unw_getcontext(unw_context_t* thread_state) // @@ -861,12 +874,15 @@ DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) stp c24,c25, [c0, #0x180] stp c26,c27, [c0, #0x1a0] stp c28,c29, [c0, #0x1c0] - str c30, [c0, #0x1e0] mov c1,csp - str c1, [c0, #0x1f0] + stp c30, c1, [c0, #0x1e0] str c30, [c0, #0x200] // store return address as pcc + + // Prepare c1 to get our executive stack. + add c1, c0, #0x210 + // skip cpsr - add c0, c0, #0x220 + add c0, c0, #0x230 stp d0, d1, [c0, #0x000] stp d2, d3, [c0, #0x010] stp d4, d5, [c0, #0x020] @@ -885,7 +901,11 @@ DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) str d30, [c0, #0x0f0] str d31, [c0, #0x0f8] mov x0, #0 // return UNW_ESUCCESS - ret +#ifdef _LIBUNWIND_SANDBOX_HARDENED + b _rtld_unw_getcontext +#else + b _rtld_unw_getcontext_unsealed +#endif #else stp x0, x1, [x0, #0x000] stp x2, x3, [x0, #0x010] diff --git a/contrib/subrepo-cheri-libunwind/src/libunwind.cpp b/contrib/subrepo-cheri-libunwind/src/libunwind.cpp index 83648894a740..316446c18ced 100644 --- a/contrib/subrepo-cheri-libunwind/src/libunwind.cpp +++ b/contrib/subrepo-cheri-libunwind/src/libunwind.cpp @@ -28,9 +28,9 @@ #if !defined(__USING_SJLJ_EXCEPTIONS__) #include "AddressSpace.hpp" +#include "CompartmentInfo.hpp" #include "UnwindCursor.hpp" - template constexpr bool check_less_eq_than() { static_assert(A <= B, "Constants need to be updated!"); @@ -42,6 +42,9 @@ using namespace libunwind; /// internal object to represent this processes address space LocalAddressSpace LocalAddressSpace::sThisAddressSpace; +/// internal object to represent this processes compartment information +CompartmentInfo CompartmentInfo::sThisCompartmentInfo; + _LIBUNWIND_EXPORT unw_addr_space_t unw_local_addr_space = (unw_addr_space_t)&LocalAddressSpace::sThisAddressSpace; @@ -223,6 +226,17 @@ _LIBUNWIND_HIDDEN int __unw_resume(unw_cursor_t *cursor) { __asan_handle_no_return(); #endif AbstractUnwindCursor *co = (AbstractUnwindCursor *)cursor; +#if defined(__CHERI_PURE_CAPABILITY__) && defined(_LIBUNWIND_SANDBOX_OTYPES) + LocalAddressSpace &addressSpace = LocalAddressSpace::sThisAddressSpace; + uintcap_t sealer = addressSpace.getUnwindSealer(); + if (addressSpace.isValidSealer(sealer)) { +#ifdef _LIBUNWIND_SANDBOX_HARDENED + co->unsealSP(sealer); + co->unsealFP(sealer); + co->unsealCalleeSavedRegisters(sealer); +#endif // _LIBUNWIND_SANDBOX_HARDENED + } +#endif // __CHERI_PURE_CAPABILITY__ && _LIBUNWIND_SANDBOX_OTYPES co->jumpto(); return UNW_EUNSPEC; } diff --git a/contrib/subrepo-cheri-libunwind/src/unwind_cheri.h b/contrib/subrepo-cheri-libunwind/src/unwind_cheri.h new file mode 100644 index 000000000000..3228027f11e4 --- /dev/null +++ b/contrib/subrepo-cheri-libunwind/src/unwind_cheri.h @@ -0,0 +1,36 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +// Provides the constants and helpers for CHERI. +//===----------------------------------------------------------------------===// + +#ifndef __UNWIND_CHERI_H__ +#define __UNWIND_CHERI_H__ + +#ifdef __CHERI_PURE_CAPABILITY__ +#define _LIBUNWIND_CHERI_PERM_GLOBAL (1 << 0) /* 0x00000001 */ +#define _LIBUNWIND_CHERI_PERM_EXECUTIVE (1 << 1) /* 0x00000002 */ +#define _LIBUNWIND_CHERI_PERM_SW0 (1 << 2) /* 0x00000004 */ +#define _LIBUNWIND_CHERI_PERM_SW1 (1 << 3) /* 0x00000008 */ +#define _LIBUNWIND_CHERI_PERM_SW2 (1 << 4) /* 0x00000010 */ +#define _LIBUNWIND_CHERI_PERM_SW3 (1 << 5) /* 0x00000020 */ +#define _LIBUNWIND_CHERI_PERM_MUTABLE_LOAD (1 << 6) /* 0x00000040 */ +#define _LIBUNWIND_CHERI_PERM_COMPARTMENT_ID (1 << 7) /* 0x00000080 */ +#define _LIBUNWIND_CHERI_PERM_BRANCH_SEALED_PAIR (1 << 8) /* 0x00000100 */ +#define _LIBUNWIND_CHERI_PERM_INVOKE CHERI_PERM_BRANCH_SEALED_PAIR +#define _LIBUNWIND_CHERI_PERM_SYSTEM (1 << 9) /* 0x00000200 */ +#define _LIBUNWIND_CHERI_PERM_SYSTEM_REGS CHERI_PERM_SYSTEM +#define _LIBUNWIND_CHERI_PERM_UNSEAL (1 << 10) /* 0x00000400 */ +#define _LIBUNWIND_CHERI_PERM_SEAL (1 << 11) /* 0x00000800 */ +#define _LIBUNWIND_CHERI_PERM_STORE_LOCAL_CAP (1 << 12) /* 0x00001000 */ +#define _LIBUNWIND_CHERI_PERM_STORE_CAP (1 << 13) /* 0x00002000 */ +#define _LIBUNWIND_CHERI_PERM_LOAD_CAP (1 << 14) /* 0x00004000 */ +#define _LIBUNWIND_CHERI_PERM_EXECUTE (1 << 15) /* 0x00008000 */ +#define _LIBUNWIND_CHERI_PERM_STORE (1 << 16) /* 0x00010000 */ +#define _LIBUNWIND_CHERI_PERM_LOAD (1 << 17) /* 0x00020000 */ +#endif // __CHERI_PURE_CAPABILITY__ + +#endif // __UNWIND_CHERI_H__