From f1bf7badbaaf3ce7782c6eb755fdf858d1013d8e Mon Sep 17 00:00:00 2001 From: Jonathan Grynspan Date: Wed, 20 Oct 2021 18:31:13 -0400 Subject: [PATCH] [SE-0322] Temporary uninitialized buffers Adds two new IRGen-level builtins (one for allocating, the other for deallocating), a stdlib shim function for enhanced stack-promotion heuristics, and the proposed public stdlib functions. --- include/swift/AST/Builtins.def | 23 ++ include/swift/AST/DiagnosticsIRGen.def | 9 + lib/AST/Builtins.cpp | 17 ++ lib/IRGen/GenOpaque.cpp | 14 +- lib/IRGen/IRGenFunction.h | 5 +- lib/IRGen/IRGenSIL.cpp | 138 ++++++++- lib/SIL/IR/OperandOwnership.cpp | 2 + lib/SIL/IR/SILInstruction.cpp | 13 + lib/SIL/IR/ValueOwnership.cpp | 2 + lib/SIL/Utils/MemAccessUtils.cpp | 2 + lib/SIL/Verifier/SILVerifier.cpp | 1 + .../AccessEnforcementReleaseSinking.cpp | 2 + stdlib/public/SwiftShims/RuntimeShims.h | 32 +++ stdlib/public/SwiftShims/Visibility.h | 6 + stdlib/public/core/Builtin.swift | 14 + stdlib/public/core/CMakeLists.txt | 1 + stdlib/public/core/GroupInfo.json | 1 + stdlib/public/core/TemporaryAllocation.swift | 265 ++++++++++++++++++ stdlib/public/stubs/Stubs.cpp | 100 +++++++ test/IRGen/temporary_allocation/async.swift | 13 + .../temporary_allocation/bad_constants.swift | 9 + test/IRGen/temporary_allocation/codegen.swift | 97 +++++++ .../negative_alignment.swift | 9 + .../temporary_allocation/negative_size.swift | 9 + .../non_power2_alignment.swift | 9 + test/stdlib/TemporaryAllocation.swift | 124 ++++++++ 26 files changed, 906 insertions(+), 11 deletions(-) create mode 100644 stdlib/public/core/TemporaryAllocation.swift create mode 100644 test/IRGen/temporary_allocation/async.swift create mode 100644 test/IRGen/temporary_allocation/bad_constants.swift create mode 100644 test/IRGen/temporary_allocation/codegen.swift create mode 100644 test/IRGen/temporary_allocation/negative_alignment.swift create mode 100644 test/IRGen/temporary_allocation/negative_size.swift create mode 100644 test/IRGen/temporary_allocation/non_power2_alignment.swift create mode 100644 test/stdlib/TemporaryAllocation.swift diff --git a/include/swift/AST/Builtins.def b/include/swift/AST/Builtins.def index 638ece1880fca..eb43af3a51333 100644 --- a/include/swift/AST/Builtins.def +++ b/include/swift/AST/Builtins.def @@ -603,6 +603,29 @@ BUILTIN_MISC_OPERATION(AllocRaw, "allocRaw", "", Special) /// was allocated. BUILTIN_MISC_OPERATION(DeallocRaw, "deallocRaw", "", Special) +/// StackAlloc has type (Int, Int, Int) -> Builtin.RawPointer +/// +/// Parameters: capacity, stride, alignment +/// +/// The resulting pointer comes from the stack (as in the non-standard C +/// extension `alloca()`.) It is at least as aligned as specified and is valid +/// until the end of the calling scope. +/// +/// The count and stride are multiplied together to get the byte count to use +/// for the allocation. +/// +/// The passed alignment must be a positive power of two. If the alignment value +/// is not known at compile time, MaximumAlignment is assumed. +BUILTIN_MISC_OPERATION(StackAlloc, "stackAlloc", "", Special) + +/// StackDealloc has type (Builtin.RawPointer) -> () +/// +/// Parameters: address. +/// +/// The range starting at `address`, previously allocated with +/// Builtin.stackAlloc(), is deallocated from the stack. +BUILTIN_MISC_OPERATION(StackDealloc, "stackDealloc", "", Special) + /// Fence has type () -> (). BUILTIN_MISC_OPERATION(Fence, "fence", "", None) diff --git a/include/swift/AST/DiagnosticsIRGen.def b/include/swift/AST/DiagnosticsIRGen.def index bf1cff4ce4503..f4f9b068528d8 100644 --- a/include/swift/AST/DiagnosticsIRGen.def +++ b/include/swift/AST/DiagnosticsIRGen.def @@ -50,5 +50,14 @@ ERROR(alignment_more_than_maximum,none, "@_alignment cannot increase alignment above maximum alignment of %0", (unsigned)) +ERROR(temporary_allocation_size_negative,none, + "allocation capacity must be greater than or equal to zero", ()) +ERROR(temporary_allocation_size_overflow,none, + "allocation byte count too large", ()) +ERROR(temporary_allocation_alignment_not_positive,none, + "alignment value must be greater than zero", ()) +ERROR(temporary_allocation_alignment_not_power_of_2,none, + "alignment value must be a power of two", ()) + #define UNDEFINE_DIAGNOSTIC_MACROS #include "DefineDiagnosticMacros.h" diff --git a/lib/AST/Builtins.cpp b/lib/AST/Builtins.cpp index b7d9f105b2710..08fa8b9e8e9cf 100644 --- a/lib/AST/Builtins.cpp +++ b/lib/AST/Builtins.cpp @@ -1024,6 +1024,18 @@ static ValueDecl *getDeallocOperation(ASTContext &ctx, Identifier id) { _void); } +static ValueDecl *getStackAllocOperation(ASTContext &ctx, Identifier id) { + return getBuiltinFunction(ctx, id, _thin, + _parameters(_word, _word, _word), + _rawPointer); +} + +static ValueDecl *getStackDeallocOperation(ASTContext &ctx, Identifier id) { + return getBuiltinFunction(ctx, id, _thin, + _parameters(_rawPointer), + _void); +} + static ValueDecl *getFenceOperation(ASTContext &ctx, Identifier id) { return getBuiltinFunction(ctx, id, _thin, _parameters(), _void); } @@ -2635,6 +2647,11 @@ ValueDecl *swift::getBuiltinValueDecl(ASTContext &Context, Identifier Id) { case BuiltinValueKind::DeallocRaw: return getDeallocOperation(Context, Id); + case BuiltinValueKind::StackAlloc: + return getStackAllocOperation(Context, Id); + case BuiltinValueKind::StackDealloc: + return getStackDeallocOperation(Context, Id); + case BuiltinValueKind::CastToNativeObject: case BuiltinValueKind::UnsafeCastToNativeObject: case BuiltinValueKind::CastFromNativeObject: diff --git a/lib/IRGen/GenOpaque.cpp b/lib/IRGen/GenOpaque.cpp index 776f953c95a88..fb62ca43beda5 100644 --- a/lib/IRGen/GenOpaque.cpp +++ b/lib/IRGen/GenOpaque.cpp @@ -533,15 +533,16 @@ irgen::emitInitializeBufferWithCopyOfBufferCall(IRGenFunction &IGF, StackAddress IRGenFunction::emitDynamicAlloca(SILType T, const llvm::Twine &name) { llvm::Value *size = emitLoadOfSize(*this, T); - return emitDynamicAlloca(IGM.Int8Ty, size, Alignment(16), name); + return emitDynamicAlloca(IGM.Int8Ty, size, Alignment(16), true, name); } StackAddress IRGenFunction::emitDynamicAlloca(llvm::Type *eltTy, llvm::Value *arraySize, Alignment align, + bool allowTaskAlloc, const llvm::Twine &name) { // Async functions call task alloc. - if (isAsync()) { + if (allowTaskAlloc && isAsync()) { llvm::Value *byteCount; auto eltSize = IGM.DataLayout.getTypeAllocSize(eltTy); if (eltSize == 1) { @@ -556,6 +557,8 @@ StackAddress IRGenFunction::emitDynamicAlloca(llvm::Type *eltTy, return {address, address.getAddress()}; // In coroutines, call llvm.coro.alloca.alloc. } else if (isCoroutine()) { + // NOTE: llvm does not support dynamic allocas in coroutines. + // Compute the number of bytes to allocate. llvm::Value *byteCount; auto eltSize = IGM.DataLayout.getTypeAllocSize(eltTy); @@ -606,9 +609,10 @@ StackAddress IRGenFunction::emitDynamicAlloca(llvm::Type *eltTy, /// Deallocate dynamic alloca's memory if requested by restoring the stack /// location before the dynamic alloca's call. -void IRGenFunction::emitDeallocateDynamicAlloca(StackAddress address) { +void IRGenFunction::emitDeallocateDynamicAlloca(StackAddress address, + bool allowTaskDealloc) { // Async function use taskDealloc. - if (isAsync() && address.getAddress().isValid()) { + if (allowTaskDealloc && isAsync() && address.getAddress().isValid()) { emitTaskDealloc(Address(address.getExtraInfo(), address.getAlignment())); return; } @@ -617,6 +621,8 @@ void IRGenFunction::emitDeallocateDynamicAlloca(StackAddress address) { // for a partial_apply [stack] that did not need a context object on the // stack. else if (isCoroutine() && address.getAddress().isValid()) { + // NOTE: llvm does not support dynamic allocas in coroutines. + auto allocToken = address.getExtraInfo(); assert(allocToken && "dynamic alloca in coroutine without alloc token?"); auto freeFn = llvm::Intrinsic::getDeclaration( diff --git a/lib/IRGen/IRGenFunction.h b/lib/IRGen/IRGenFunction.h index a7a45224b6483..b42351ae2c4b6 100644 --- a/lib/IRGen/IRGenFunction.h +++ b/lib/IRGen/IRGenFunction.h @@ -222,9 +222,10 @@ class IRGenFunction { StackAddress emitDynamicAlloca(SILType type, const llvm::Twine &name = ""); StackAddress emitDynamicAlloca(llvm::Type *eltTy, llvm::Value *arraySize, - Alignment align, + Alignment align, bool allowTaskAlloc = true, const llvm::Twine &name = ""); - void emitDeallocateDynamicAlloca(StackAddress address); + void emitDeallocateDynamicAlloca(StackAddress address, + bool allowTaskDealloc = true); llvm::BasicBlock *createBasicBlock(const llvm::Twine &Name); const TypeInfo &getTypeInfoForUnlowered(Type subst); diff --git a/lib/IRGen/IRGenSIL.cpp b/lib/IRGen/IRGenSIL.cpp index e6b71bd3ac6eb..2374c35896f3b 100644 --- a/lib/IRGen/IRGenSIL.cpp +++ b/lib/IRGen/IRGenSIL.cpp @@ -18,6 +18,7 @@ #define DEBUG_TYPE "irgensil" #include "swift/AST/ASTContext.h" +#include "swift/AST/DiagnosticsIRGen.h" #include "swift/AST/IRGenOptions.h" #include "swift/AST/ParameterList.h" #include "swift/AST/Pattern.h" @@ -57,6 +58,7 @@ #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Module.h" #include "llvm/Support/Debug.h" +#include "llvm/Support/MathExtras.h" #include "llvm/Support/SaveAndRestore.h" #include "llvm/Transforms/Utils/Local.h" @@ -436,6 +438,11 @@ class IRGenSILFunction : /// Calculates EstimatedStackSize. void estimateStackSize(); + inline bool isAddress(SILValue v) const { + SILType type = v->getType(); + return type.isAddress() || type.getASTType() == IGM.Context.TheRawPointerType; + } + void setLoweredValue(SILValue v, LoweredValue &&lv) { auto inserted = LoweredValues.insert({v, std::move(lv)}); assert(inserted.second && "already had lowered value for sil value?!"); @@ -444,31 +451,31 @@ class IRGenSILFunction : /// Create a new Address corresponding to the given SIL address value. void setLoweredAddress(SILValue v, const Address &address) { - assert(v->getType().isAddress() && "address for non-address value?!"); + assert(isAddress(v) && "address for non-address value?!"); setLoweredValue(v, address); } void setLoweredStackAddress(SILValue v, const StackAddress &address) { - assert(v->getType().isAddress() && "address for non-address value?!"); + assert(isAddress(v) && "address for non-address value?!"); setLoweredValue(v, address); } void setLoweredDynamicallyEnforcedAddress(SILValue v, const Address &address, llvm::Value *scratch) { - assert(v->getType().isAddress() && "address for non-address value?!"); + assert(isAddress(v) && "address for non-address value?!"); setLoweredValue(v, DynamicallyEnforcedAddress{address, scratch}); } void setContainerOfUnallocatedAddress(SILValue v, const Address &buffer) { - assert(v->getType().isAddress() && "address for non-address value?!"); + assert(isAddress(v) && "address for non-address value?!"); setLoweredValue(v, LoweredValue(buffer, LoweredValue::ContainerForUnallocatedAddress)); } void overwriteAllocatedAddress(SILValue v, const Address &address) { - assert(v->getType().isAddress() && "address for non-address value?!"); + assert(isAddress(v) && "address for non-address value?!"); auto it = LoweredValues.find(v); assert(it != LoweredValues.end() && "no existing entry for overwrite?"); assert(it->second.isUnallocatedAddressInBuffer() && @@ -1623,6 +1630,9 @@ void LoweredValue::getExplosion(IRGenFunction &IGF, SILType type, Explosion &ex) const { switch (kind) { case Kind::StackAddress: + ex.add(Storage.get(kind).getAddressPointer()); + return; + case Kind::ContainedAddress: case Kind::DynamicallyEnforcedAddress: case Kind::CoroutineState: @@ -2958,9 +2968,127 @@ static std::unique_ptr getCallEmissionForLoweredValue( return callEmission; } +/// Get the size passed to stackAlloc(). +static llvm::Value *getStackAllocationSize(IRGenSILFunction &IGF, + SILValue vCapacity, + SILValue vStride, + SourceLoc loc) { + auto &Diags = IGF.IGM.Context.Diags; + + // Check for a negative capacity, which is invalid. + auto capacity = IGF.getLoweredSingletonExplosion(vCapacity); + Optional capacityValue; + if (auto capacityConst = dyn_cast(capacity)) { + capacityValue = capacityConst->getSExtValue(); + if (*capacityValue < 0) { + Diags.diagnose(loc, diag::temporary_allocation_size_negative); + } + } + + // Check for a negative stride, which should never occur because the caller + // should always be using MemoryLayout.stride to produce this value. + auto stride = IGF.getLoweredSingletonExplosion(vStride); + Optional strideValue; + if (auto strideConst = dyn_cast(stride)) { + strideValue = strideConst->getSExtValue(); + if (*strideValue < 0) { + llvm_unreachable("Builtin.stackAlloc() caller passed an invalid stride"); + } + } + + // Get the byte count (the product of capacity and stride.) + llvm::Value *result = nullptr; + if (capacityValue && strideValue) { + int64_t byteCount = 0; + auto overflow = llvm::MulOverflow(*capacityValue, *strideValue, byteCount); + if (overflow) { + Diags.diagnose(loc, diag::temporary_allocation_size_overflow); + } + result = llvm::ConstantInt::get(IGF.IGM.SizeTy, byteCount); + + } else { + // If either value is not known at compile-time, preconditions must be + // tested at runtime by Builtin.stackAlloc()'s caller. See + // _byteCountForTemporaryAllocation(of:capacity:). + result = IGF.Builder.CreateMul(capacity, stride); + } + + // If the caller requests a zero-byte allocation, allocate one byte instead + // to ensure that the resulting pointer is valid and unique on the stack. + return IGF.Builder.CreateIntrinsicCall(llvm::Intrinsic::umax, + {IGF.IGM.SizeTy}, {llvm::ConstantInt::get(IGF.IGM.SizeTy, 1), result}); +} + +/// Get the alignment passed to stackAlloc() as a compile-time constant. +/// +/// If the specified alignment is not known at compile time or is not valid, +/// the default maximum alignment is substituted. +static Alignment getStackAllocationAlignment(IRGenSILFunction &IGF, + SILValue v, + SourceLoc loc) { + auto &Diags = IGF.IGM.Context.Diags; + + // Check for a non-positive alignment, which is invalid. + auto align = IGF.getLoweredSingletonExplosion(v); + if (auto alignConst = dyn_cast(align)) { + auto alignValue = alignConst->getSExtValue(); + if (alignValue <= 0) { + Diags.diagnose(loc, diag::temporary_allocation_alignment_not_positive); + } else if (!llvm::isPowerOf2_64(alignValue)) { + Diags.diagnose(loc, diag::temporary_allocation_alignment_not_power_of_2); + } else { + return Alignment(alignValue); + } + } + + // If the alignment is not known at compile-time, preconditions must be tested + // at runtime by Builtin.stackAlloc()'s caller. See + // _isStackAllocationSafe(byteCount:alignment:). + return Alignment(MaximumAlignment); +} + +/// Emit a call to a stack allocation builtin (stackAlloc() or stackDealloc().) +/// +/// Returns whether or not `i` was such a builtin (true if so, false if it was +/// some other builtin.) +static bool emitStackAllocBuiltinCall(IRGenSILFunction &IGF, + swift::BuiltinInst *i) { + if (i->getBuiltinKind() == BuiltinValueKind::StackAlloc) { + // Stack-allocate a buffer with the specified size/alignment. + auto loc = i->getLoc().getSourceLoc(); + auto size = getStackAllocationSize( + IGF, i->getOperand(0), i->getOperand(1), loc); + auto align = getStackAllocationAlignment(IGF, i->getOperand(2), loc); + + auto stackAddress = IGF.emitDynamicAlloca(IGF.IGM.Int8Ty, size, align, + false, "temp_alloc"); + IGF.setLoweredStackAddress(i, stackAddress); + + return true; + + } else if (i->getBuiltinKind() == BuiltinValueKind::StackDealloc) { + // Deallocate a stack address previously allocated with the StackAlloc + // builtin above. + auto address = i->getOperand(0); + auto stackAddress = IGF.getLoweredStackAddress(address); + + if (stackAddress.getAddress().isValid()) { + IGF.emitDeallocateDynamicAlloca(stackAddress, false); + } + + return true; + } + + return false; +} + void IRGenSILFunction::visitBuiltinInst(swift::BuiltinInst *i) { const BuiltinInfo &builtin = getSILModule().getBuiltinInfo(i->getName()); + if (emitStackAllocBuiltinCall(*this, i)) { + return; + } + auto argValues = i->getArguments(); Explosion args; SmallVector argTypes; diff --git a/lib/SIL/IR/OperandOwnership.cpp b/lib/SIL/IR/OperandOwnership.cpp index 4d5c2557a6037..8a9a9d387cb67 100644 --- a/lib/SIL/IR/OperandOwnership.cpp +++ b/lib/SIL/IR/OperandOwnership.cpp @@ -732,6 +732,8 @@ BUILTIN_OPERAND_OWNERSHIP(InstantaneousUse, SMulOver) BUILTIN_OPERAND_OWNERSHIP(InstantaneousUse, SRem) BUILTIN_OPERAND_OWNERSHIP(InstantaneousUse, GenericSRem) BUILTIN_OPERAND_OWNERSHIP(InstantaneousUse, SSubOver) +BUILTIN_OPERAND_OWNERSHIP(InstantaneousUse, StackAlloc) +BUILTIN_OPERAND_OWNERSHIP(InstantaneousUse, StackDealloc) BUILTIN_OPERAND_OWNERSHIP(InstantaneousUse, SToSCheckedTrunc) BUILTIN_OPERAND_OWNERSHIP(InstantaneousUse, SToUCheckedTrunc) BUILTIN_OPERAND_OWNERSHIP(InstantaneousUse, Expect) diff --git a/lib/SIL/IR/SILInstruction.cpp b/lib/SIL/IR/SILInstruction.cpp index fbe7b72d1c7ab..15968190b1902 100644 --- a/lib/SIL/IR/SILInstruction.cpp +++ b/lib/SIL/IR/SILInstruction.cpp @@ -1264,6 +1264,12 @@ bool SILInstruction::isAllocatingStack() const { if (auto *PA = dyn_cast(this)) return PA->isOnStack(); + if (auto *BI = dyn_cast(this)) { + if (BI->getBuiltinKind() == BuiltinValueKind::StackAlloc) { + return true; + } + } + return false; } @@ -1275,6 +1281,13 @@ bool SILInstruction::isDeallocatingStack() const { if (DRI->canAllocOnStack()) return true; } + + if (auto *BI = dyn_cast(this)) { + if (BI->getBuiltinKind() == BuiltinValueKind::StackDealloc) { + return true; + } + } + return false; } diff --git a/lib/SIL/IR/ValueOwnership.cpp b/lib/SIL/IR/ValueOwnership.cpp index 1912b94313cf3..a71768ed4c692 100644 --- a/lib/SIL/IR/ValueOwnership.cpp +++ b/lib/SIL/IR/ValueOwnership.cpp @@ -487,6 +487,8 @@ CONSTANT_OWNERSHIP_BUILTIN(None, Alignof) CONSTANT_OWNERSHIP_BUILTIN(None, AllocRaw) CONSTANT_OWNERSHIP_BUILTIN(None, AssertConf) CONSTANT_OWNERSHIP_BUILTIN(None, UToSCheckedTrunc) +CONSTANT_OWNERSHIP_BUILTIN(None, StackAlloc) +CONSTANT_OWNERSHIP_BUILTIN(None, StackDealloc) CONSTANT_OWNERSHIP_BUILTIN(None, SToSCheckedTrunc) CONSTANT_OWNERSHIP_BUILTIN(None, SToUCheckedTrunc) CONSTANT_OWNERSHIP_BUILTIN(None, UToUCheckedTrunc) diff --git a/lib/SIL/Utils/MemAccessUtils.cpp b/lib/SIL/Utils/MemAccessUtils.cpp index 1506728e92b3a..e2aaa2d383920 100644 --- a/lib/SIL/Utils/MemAccessUtils.cpp +++ b/lib/SIL/Utils/MemAccessUtils.cpp @@ -2154,6 +2154,8 @@ static void visitBuiltinAddress(BuiltinInst *builtin, case BuiltinValueKind::CondFailMessage: case BuiltinValueKind::AllocRaw: case BuiltinValueKind::DeallocRaw: + case BuiltinValueKind::StackAlloc: + case BuiltinValueKind::StackDealloc: case BuiltinValueKind::Fence: case BuiltinValueKind::StaticReport: case BuiltinValueKind::Once: diff --git a/lib/SIL/Verifier/SILVerifier.cpp b/lib/SIL/Verifier/SILVerifier.cpp index 78b4caa5e238d..7acbabf3ae374 100644 --- a/lib/SIL/Verifier/SILVerifier.cpp +++ b/lib/SIL/Verifier/SILVerifier.cpp @@ -5410,6 +5410,7 @@ class SILVerifier : public SILVerifierBase { "stack dealloc with empty stack"); if (op != state.Stack.back()) { llvm::errs() << "Recent stack alloc: " << *state.Stack.back(); + llvm::errs() << "Matching stack alloc: " << *op; require(op == state.Stack.back(), "stack dealloc does not match most recent stack alloc"); } diff --git a/lib/SILOptimizer/Transforms/AccessEnforcementReleaseSinking.cpp b/lib/SILOptimizer/Transforms/AccessEnforcementReleaseSinking.cpp index 371a702885f44..521e107a9feb7 100644 --- a/lib/SILOptimizer/Transforms/AccessEnforcementReleaseSinking.cpp +++ b/lib/SILOptimizer/Transforms/AccessEnforcementReleaseSinking.cpp @@ -148,6 +148,8 @@ static bool isBarrier(SILInstruction *inst) { case BuiltinValueKind::EndAsyncLetLifetime: case BuiltinValueKind::CreateTaskGroup: case BuiltinValueKind::DestroyTaskGroup: + case BuiltinValueKind::StackAlloc: + case BuiltinValueKind::StackDealloc: return false; // Handle some rare builtins that may be sensitive to object lifetime diff --git a/stdlib/public/SwiftShims/RuntimeShims.h b/stdlib/public/SwiftShims/RuntimeShims.h index b5ffb04c81958..f197dbcf7a942 100644 --- a/stdlib/public/SwiftShims/RuntimeShims.h +++ b/stdlib/public/SwiftShims/RuntimeShims.h @@ -18,6 +18,7 @@ #ifndef SWIFT_STDLIB_SHIMS_RUNTIMESHIMS_H #define SWIFT_STDLIB_SHIMS_RUNTIMESHIMS_H +#include "SwiftStdbool.h" #include "SwiftStddef.h" #include "SwiftStdint.h" #include "Visibility.h" @@ -81,6 +82,37 @@ __swift_size_t _swift_stdlib_getHardwareConcurrency(void); /// the Swift runtime and user binaries need to agree on this value. #define _swift_MinAllocationAlignment (__swift_size_t) 16 +/// Checks if the @em current thread's stack has room for an allocation with +/// the specified size and alignment. +/// +/// @param byteCount The size of the desired allocation in bytes. +/// @param alignment The alignment of the desired allocation in bytes. +/// +/// @returns Whether or not the desired allocation can be safely performed on +/// the current thread's stack. +/// +/// This function is used by +/// @c withUnsafeTemporaryAllocation(byteCount:alignment:_:). +SWIFT_RUNTIME_STDLIB_API SWIFT_WEAK_IMPORT +__swift_bool swift_stdlib_isStackAllocationSafe(__swift_size_t byteCount, + __swift_size_t alignment); + +/// Get the bounds of the current thread's stack. +/// +/// @param outBegin On successful return, the beginning (lower bound) of the +/// current thread's stack. +/// @param outEnd On successful return, the end (upper bound) of the current +/// thread's stack. +/// +/// @returns Whether or not the stack bounds could be read. Not all platforms +/// support reading these values. +/// +/// This function is used by the stdlib test suite when testing +/// @c withUnsafeTemporaryAllocation(byteCount:alignment:_:). +SWIFT_RUNTIME_STDLIB_SPI +__swift_bool _swift_stdlib_getCurrentStackBounds(__swift_uintptr_t *outBegin, + __swift_uintptr_t *outEnd); + #ifdef __cplusplus } // extern "C" #endif diff --git a/stdlib/public/SwiftShims/Visibility.h b/stdlib/public/SwiftShims/Visibility.h index 6ad87aa8ea180..9fd8b0453b733 100644 --- a/stdlib/public/SwiftShims/Visibility.h +++ b/stdlib/public/SwiftShims/Visibility.h @@ -103,6 +103,12 @@ #define SWIFT_ATTRIBUTE_UNAVAILABLE #endif +#if (__has_attribute(weak_import)) +#define SWIFT_WEAK_IMPORT __attribute__((weak_import)) +#else +#define SWIFT_WEAK_IMPORT +#endif + // Define the appropriate attributes for sharing symbols across // image (executable / shared-library) boundaries. // diff --git a/stdlib/public/core/Builtin.swift b/stdlib/public/core/Builtin.swift index ddf55f3a6403e..09a4209fd91bf 100644 --- a/stdlib/public/core/Builtin.swift +++ b/stdlib/public/core/Builtin.swift @@ -736,6 +736,20 @@ func _isOptional(_ type: T.Type) -> Bool { return Bool(Builtin.isOptional(type)) } +/// Test whether a value is computed (i.e. it is not a compile-time constant.) +/// +/// - Parameters: +/// - value: The value to test. +/// +/// - Returns: Whether or not `value` is computed (not known at compile-time.) +/// +/// Optimizations performed at various stages during compilation may affect the +/// result of this function. +@_alwaysEmitIntoClient @inline(__always) +internal func _isComputed(_ value: Int) -> Bool { + return !Bool(Builtin.int_is_constant_Word(value._builtinWordValue)) +} + /// Extract an object reference from an Any known to contain an object. @inlinable internal func _unsafeDowncastToAnyObject(fromAny any: Any) -> AnyObject { diff --git a/stdlib/public/core/CMakeLists.txt b/stdlib/public/core/CMakeLists.txt index c7bf03eeca4f1..cecf748bebecb 100644 --- a/stdlib/public/core/CMakeLists.txt +++ b/stdlib/public/core/CMakeLists.txt @@ -171,6 +171,7 @@ set(SWIFTLIB_ESSENTIAL StringUTF8Validation.swift Substring.swift SwiftNativeNSArray.swift + TemporaryAllocation.swift ThreadLocalStorage.swift UIntBuffer.swift UnavailableStringAPIs.swift diff --git a/stdlib/public/core/GroupInfo.json b/stdlib/public/core/GroupInfo.json index 6c5bf916fde33..cdc44175c1d19 100644 --- a/stdlib/public/core/GroupInfo.json +++ b/stdlib/public/core/GroupInfo.json @@ -180,6 +180,7 @@ ], "Pointer": [ "Pointer.swift", + "TemporaryAllocation.swift", "UnsafePointer.swift", "UnsafeRawPointer.swift", "UnsafeBufferPointer.swift", diff --git a/stdlib/public/core/TemporaryAllocation.swift b/stdlib/public/core/TemporaryAllocation.swift new file mode 100644 index 0000000000000..660654d05436b --- /dev/null +++ b/stdlib/public/core/TemporaryAllocation.swift @@ -0,0 +1,265 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +import SwiftShims + +// MARK: Support functions + +/// What is the byte count required for an allocation with the specified +/// capacity and stride? +/// +/// - Parameters: +/// - byteCount: The number of bytes to temporarily allocate. `byteCount` must +/// not be negative. +/// - alignment: The alignment of the temporary allocation. `alignment` must +/// be a whole power of 2. +/// +/// - Returns: Whether or not there is sufficient space on the stack to allocate +/// `byteCount` bytes of memory. +@_alwaysEmitIntoClient @_transparent +internal func _byteCountForTemporaryAllocation( + of type: T.Type, + capacity: Int +) -> Int { + // PRECONDITIONS: Negatively-sized buffers obviously cannot be allocated on + // the stack (or anywhere else.) + // + // NOTE: This function only makes its precondition checks for non-constant + // inputs. If it makes them for constant inputs, it prevents the compiler from + // emitting equivalent compile-time diagnostics because the call to + // Builtin.stackAlloc() becomes unreachable. + if _isComputed(capacity) { + _precondition(capacity > 0, "Allocation capacity must be greater than or equal to zero") + } + let stride = MemoryLayout.stride + let (byteCount, overflow) = capacity.multipliedReportingOverflow(by: stride) + if _isComputed(capacity) { + _precondition(!overflow, "Allocation byte count too large") + } + return byteCount +} + +/// Will an allocation of the specified size fit on the stack? +/// +/// - Parameters: +/// - byteCount: The number of bytes to temporarily allocate. `byteCount` must +/// not be negative. +/// - alignment: The alignment of the temporary allocation. `alignment` must +/// be a whole power of 2. +/// +/// - Returns: Whether or not there is sufficient space on the stack to allocate +/// `byteCount` bytes of memory. +@_alwaysEmitIntoClient @_transparent +internal func _isStackAllocationSafe(byteCount: Int, alignment: Int) -> Bool { + // PRECONDITIONS: Non-positive alignments are nonsensical, as are + // non-power-of-two alignments. + if _isComputed(alignment) { + _precondition(alignment > 0, "Alignment value must be greater than zero") + _precondition(_isPowerOf2(alignment), "Alignment value must be a power of two") + } + + // If the alignment is larger than MaximumAlignment, the allocation is always + // performed on the heap. There are two reasons why: + // 1. llvm's alloca instruction can take any power-of-two alignment value, but + // will produce unsafe assembly when that value is very large (i.e. it + // risks a stack overflow.) + // 2. For non-constant values, we have no way to know what value to pass to + // alloca and always pass MaximumAlignment. This may be incorrect if the + // caller really wants a larger allocation. + if alignment > _minAllocationAlignment() { + return false + } + + // Allocations smaller than this limit are reasonable to allocate on the stack + // without worrying about running out of space, and the compiler would emit + // such allocations on the stack anyway when they represent structures or + // stack-promoted objects. + if byteCount <= 1024 { + return true + } + + // Finally, take a slow path through the standard library to see if the + // current environment can accept a larger stack allocation. + guard #available(macOS 9999, iOS 9999, watchOS 9999, tvOS 9999, *) else { + return false + } + return swift_stdlib_isStackAllocationSafe(byteCount, alignment) +} + +/// Provides scoped access to a raw buffer pointer with the specified byte count +/// and alignment. +/// +/// - Parameters: +/// - type: The type of the elements in the buffer being temporarily +/// allocated. For untyped buffers, use `Int8.self`. +/// - stride: The element stride. `stride` must not be negative. +/// - alignment: The alignment of the new, temporary region of allocated +/// memory, in bytes. `alignment` must be a whole power of 2. +/// - body: A closure to invoke and to which the allocated buffer pointer +/// should be passed. +/// +/// - Returns: Whatever is returned by `body`. +/// +/// - Throws: Whatever is thrown by `body`. +/// +/// This function encapsulates the various calls to builtins required by +/// `withUnsafeTemporaryAllocation()`. +@_alwaysEmitIntoClient @_transparent +internal func _withUnsafeTemporaryAllocation( + of type: T.Type, + capacity: Int, + alignment: Int, + _ body: (Builtin.RawPointer) throws -> R +) rethrows -> R { + // How many bytes do we need to allocate? + let byteCount = _byteCountForTemporaryAllocation(of: type, capacity: capacity) + + guard _isStackAllocationSafe(byteCount: byteCount, alignment: alignment) else { + // Fall back to the heap. This may still be optimizable if escape analysis + // shows that the allocated pointer does not escape. + let buffer = UnsafeMutableRawPointer.allocate( + byteCount: byteCount, + alignment: alignment + ) + defer { + buffer.deallocate() + } + return try body(buffer._rawValue) + } + + // This declaration must come BEFORE Builtin.stackAlloc() or + // Builtin.stackDealloc() will end up blowing it away (and the verifier will + // notice and complain.) + let result: R + + let stackAddress = Builtin.stackAlloc( + capacity._builtinWordValue, + MemoryLayout.stride._builtinWordValue, + alignment._builtinWordValue + ) + + // The multiple calls to Builtin.stackDealloc() are because defer { } produces + // a child function at the SIL layer and that conflicts with the verifier's + // idea of a stack allocation's lifetime. + do { + result = try body(stackAddress) + Builtin.stackDealloc(stackAddress) + return result + + } catch { + Builtin.stackDealloc(stackAddress) + throw error + } +} + +// MARK: - Public interface + +/// Provides scoped access to a raw buffer pointer with the specified byte count +/// and alignment. +/// +/// - Parameters: +/// - byteCount: The number of bytes to temporarily allocate. `byteCount` must +/// not be negative. +/// - alignment: The alignment of the new, temporary region of allocated +/// memory, in bytes. `alignment` must be a whole power of 2. +/// - body: A closure to invoke and to which the allocated buffer pointer +/// should be passed. +/// +/// - Returns: Whatever is returned by `body`. +/// +/// - Throws: Whatever is thrown by `body`. +/// +/// This function is useful for cheaply allocating raw storage for a brief +/// duration. Storage may be allocated on the heap or on the stack, depending on +/// the required size and alignment. +/// +/// When `body` is called, the contents of the buffer pointer passed to it are +/// in an unspecified, uninitialized state. `body` is responsible for +/// initializing the buffer pointer before it is used _and_ for deinitializing +/// it before returning, but deallocation is automatic. +/// +/// The implementation may allocate a larger buffer pointer than is strictly +/// necessary to contain `byteCount` bytes. The behavior of a program that +/// attempts to access any such additional storage is undefined. +/// +/// The buffer pointer passed to `body` (as well as any pointers to elements in +/// the buffer) must not escape. It will be deallocated when `body` returns and +/// cannot be used afterward. +@_alwaysEmitIntoClient @_transparent +public func withUnsafeTemporaryAllocation( + byteCount: Int, + alignment: Int, + _ body: (UnsafeMutableRawBufferPointer) throws -> R +) rethrows -> R { + return try _withUnsafeTemporaryAllocation( + of: Int8.self, + capacity: byteCount, + alignment: alignment + ) { pointer in + let buffer = UnsafeMutableRawBufferPointer( + start: .init(pointer), + count: byteCount + ) + return try body(buffer) + } +} + +/// Provides scoped access to a buffer pointer to memory of the specified type +/// and with the specified capacity. +/// +/// - Parameters: +/// - type: The type of the elements in the buffer being temporarily +/// allocated. +/// - capacity: The capacity of the buffer pointer being temporarily +/// allocated. +/// - body: A closure to invoke and to which the allocated buffer pointer +/// should be passed. +/// +/// - Returns: Whatever is returned by `body`. +/// +/// - Throws: Whatever is thrown by `body`. +/// +/// This function is useful for cheaply allocating storage for a sequence of +/// values for a brief duration. Storage may be allocated on the heap or on the +/// stack, depending on the required size and alignment. +/// +/// When `body` is called, the contents of the buffer pointer passed to it are +/// in an unspecified, uninitialized state. `body` is responsible for +/// initializing the buffer pointer before it is used _and_ for deinitializing +/// it before returning, but deallocation is automatic. +/// +/// The implementation may allocate a larger buffer pointer than is strictly +/// necessary to contain `capacity` values of type `type`. The behavior of a +/// program that attempts to access any such additional storage is undefined. +/// +/// The buffer pointer passed to `body` (as well as any pointers to elements in +/// the buffer) must not escape. It will be deallocated when `body` returns and +/// cannot be used afterward. +@_alwaysEmitIntoClient @_transparent +public func withUnsafeTemporaryAllocation( + of type: T.Type, + capacity: Int, + _ body: (UnsafeMutableBufferPointer) throws -> R +) rethrows -> R { + return try _withUnsafeTemporaryAllocation( + of: type, + capacity: capacity, + alignment: MemoryLayout.alignment + ) { pointer in + Builtin.bindMemory(pointer, capacity._builtinWordValue, type) + let buffer = UnsafeMutableBufferPointer( + start: .init(pointer), + count: capacity + ) + return try body(buffer) + } +} diff --git a/stdlib/public/stubs/Stubs.cpp b/stdlib/public/stubs/Stubs.cpp index 3d579d7be5481..48a06aff313df 100644 --- a/stdlib/public/stubs/Stubs.cpp +++ b/stdlib/public/stubs/Stubs.cpp @@ -58,6 +58,10 @@ #include #endif +#if defined(__FreeBSD__) || defined(__OpenBSD__) +#include +#endif + #include "swift/Runtime/Debug.h" #include "swift/Runtime/SwiftDtoa.h" #include "swift/Basic/Lazy.h" @@ -455,3 +459,99 @@ size_t _swift_stdlib_getHardwareConcurrency() { return std::thread::hardware_concurrency(); #endif } + +__swift_bool swift_stdlib_isStackAllocationSafe(__swift_size_t byteCount, + __swift_size_t alignment) { + // This function is not currently implemented. Future releases of Swift can + // implement heuristics in this function to allow for larger stack allocations + // if conditions are suitable. These heuristics need to be significantly + // cheaper than simply calling malloc(). + // + // A possible implementation is provided below (#iffed out), but has not yet + // been measured for its performance characteristics. In particular, if the + // platform-specific functions we need to use end up calling malloc(), it's + // pointless to use them. + return false; + +#if 0 + uintptr_t stackBegin = 0; + uintptr_t stackEnd = 0; + if (!_swift_stdlib_getCurrentStackBounds(&stackBegin, &stackEnd)) { + return false; + } + + // Locate a value on the stack. The start of this function's stack frame is a + // good approximation. + uintptr_t stackAddress = (uintptr_t)__builtin_frame_address(0); + if (stackAddress < stackBegin || stackAddress >= stackEnd) { + // The stack range we got from the OS doesn't contain the stack address we + // just got. That may indicate that the current thread's stack has been + // moved (e.g. with sigaltstack().) + return false; + } + + // How much space remains on the stack after that stack value right there? + uintptr_t stackRemaining = stackAddress - stackBegin; + + // Make sure we leave some room at the end of the stack for other variables, + // allocations, etc. For a 1MB stack, we'll leave the last 64KB alone. + uintptr_t stackSafetyMargin = (stackEnd - stackBegin) >> 4; + if (stackRemaining < stackSafetyMargin) { + return false; + } + + return stackRemaining >= byteCount; +#endif +} + +__swift_bool _swift_stdlib_getCurrentStackBounds(__swift_uintptr_t *outBegin, + __swift_uintptr_t *outEnd) { +#if defined(__APPLE__) + pthread_t thread = pthread_self(); + // On Apple platforms, the stack grows down, so that the end of the stack + // comes before the beginning on the number line, and an address on the stack + // will be LESS than the start of the stack and GREATER than the end. + void *end = pthread_get_stackaddr_np(thread); + if (!end) { + return false; + } + *outEnd = (uintptr_t)end; + *outBegin = *outEnd - pthread_get_stacksize_np(thread); + return true; + +#elif defined(_WIN32) && (_WIN32_WINNT >= 0x0602) + ULONG_PTR lowLimit = 0; + ULONG_PTR highLimit = 0; + GetCurrentThreadStackLimits(&lowLimit, &highLimit); + *outBegin = lowLimit; + *outEnd = highLimit; + return true; + +#elif defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__ANDROID__) || defined(__linux__) + pthread_attr_t attr; + +#if defined(__FreeBSD__) || defined(__OpenBSD__) + if (0 != pthread_attr_init(&attr) || 0 != pthread_attr_get_np(pthread_self(), &attr)) { + return false; + } +#else + if (0 != pthread_getattr_np(pthread_self(), &attr)) { + return false; + } +#endif + + void *begin = nullptr; + size_t size = 0; + bool success = (0 == pthread_attr_getstack(&attr, &begin, &size)); + + *outBegin = (uintptr_t)begin; + *outEnd = *outBegin + size; + + pthread_attr_destroy(&attr); + return success; + +#else + // FIXME: implement on this platform + return false; +#endif +} diff --git a/test/IRGen/temporary_allocation/async.swift b/test/IRGen/temporary_allocation/async.swift new file mode 100644 index 0000000000000..d9453ff195236 --- /dev/null +++ b/test/IRGen/temporary_allocation/async.swift @@ -0,0 +1,13 @@ +// RUN: %target-swift-frontend -primary-file %s -O -emit-ir -disable-availability-checking | %FileCheck %s +// REQUIRES: concurrency + +@_silgen_name("blackHole") +func blackHole(_ value: UnsafeMutableRawPointer?) -> Void + +func f() async { + withUnsafeTemporaryAllocation(byteCount: 123, alignment: 1) { buffer in + blackHole(buffer.baseAddress) + } +} +// CHECK: alloca [123 x i8], align 1 +// CHECK-NOT: swift_task_alloc diff --git a/test/IRGen/temporary_allocation/bad_constants.swift b/test/IRGen/temporary_allocation/bad_constants.swift new file mode 100644 index 0000000000000..f4bbfbd3b0f4c --- /dev/null +++ b/test/IRGen/temporary_allocation/bad_constants.swift @@ -0,0 +1,9 @@ +// RUN: not %target-swift-frontend -primary-file %s -O -emit-ir -o /dev/null 2>&1 | %FileCheck %s + +@_silgen_name("blackHole") +func blackHole(_ value: UnsafeMutableRawPointer?) -> Void + +withUnsafeTemporaryAllocation(byteCount: 1, alignment: -1) { buffer in + blackHole(buffer.baseAddress) +} +// CHECK: error: alignment value must be greater than zero diff --git a/test/IRGen/temporary_allocation/codegen.swift b/test/IRGen/temporary_allocation/codegen.swift new file mode 100644 index 0000000000000..47b45d3da4dc1 --- /dev/null +++ b/test/IRGen/temporary_allocation/codegen.swift @@ -0,0 +1,97 @@ +// RUN: %target-swift-frontend -primary-file %s -O -emit-ir | %FileCheck %s --check-prefix=CHECK-%target-vendor + +@_silgen_name("blackHole") +func blackHole(_ value: UnsafeMutableRawPointer?) -> Void + +// MARK: Pointer width +do { + let ptr = UnsafeMutableRawPointer.allocate(byteCount: 1, alignment: 1) + blackHole(ptr) + ptr.deallocate() +} +// CHECK: ptrtoint i8* {{.*}} to [[WORD:i[0-9]+]] + +// MARK: Trivial Cases + +withUnsafeTemporaryAllocation(byteCount: 0, alignment: 1) { buffer in + blackHole(buffer.baseAddress) +} +// CHECK: [[ZERO_BYTE_PTR_RAW:%temp_alloc[0-9]*]] = alloca i8, align 1 +// CHECK: [[ZERO_BYTE_PTR:%[0-9]+]] = ptrtoint i8* [[ZERO_BYTE_PTR_RAW]] to [[WORD]] +// CHECK: call swiftcc void @blackHole([[WORD]] [[ZERO_BYTE_PTR]]) + +withUnsafeTemporaryAllocation(byteCount: 1, alignment: 1) { buffer in + blackHole(buffer.baseAddress) +} +// CHECK: [[ONE_BYTE_PTR_RAW:%temp_alloc[0-9]*]] = alloca i8, align 1 +// CHECK: [[ONE_BYTE_PTR:%[0-9]+]] = ptrtoint i8* [[ONE_BYTE_PTR_RAW]] to [[WORD]] +// CHECK: call swiftcc void @blackHole([[WORD]] [[ONE_BYTE_PTR]]) + +withUnsafeTemporaryAllocation(byteCount: 5, alignment: 1) { buffer in + blackHole(buffer.baseAddress) +} +// CHECK: [[FIVE_BYTE_PTR_RAW:%temp_alloc[0-9]*]] = alloca [5 x i8], align 1 +// CHECK: [[FIVE_BYTE_PTR:%[0-9]+]] = ptrtoint [5 x i8]* [[FIVE_BYTE_PTR_RAW]] to [[WORD]] +// CHECK: call swiftcc void @blackHole([[WORD]] [[FIVE_BYTE_PTR]]) + +withUnsafeTemporaryAllocation(byteCount: 1024, alignment: 8) { buffer in + blackHole(buffer.baseAddress) +} +// CHECK: [[ONE_KB_PTR_RAW:%temp_alloc[0-9]*]] = alloca [1024 x i8], align 8 +// CHECK: [[ONE_KB_PTR:%[0-9]+]] = ptrtoint [1024 x i8]* [[ONE_KB_PTR_RAW]] to [[WORD]] +// CHECK: call swiftcc void @blackHole([[WORD]] [[ONE_KB_PTR]]) + +// MARK: Alignment unknown at compile-time + +withUnsafeTemporaryAllocation(byteCount: 1024, alignment: Int.random(in: 0 ..< 16)) { buffer in + blackHole(buffer.baseAddress) +} +// CHECK: [[ONE_KB_RAND_PTR_RAW:%temp_alloc[0-9]*]] = alloca [1024 x i8], align 16 +// CHECK: [[ONE_KB_RAND_PTR:%[0-9]+]] = ptrtoint [1024 x i8]* [[ONE_KB_RAND_PTR_RAW]] to [[WORD]] +// CHECK: call swiftcc void @blackHole([[WORD]] [[ONE_KB_RAND_PTR]]) + +// MARK: Typed buffers + +withUnsafeTemporaryAllocation(of: Int32.self, capacity: 4) { buffer in + blackHole(buffer.baseAddress) +} +// CHECK: [[INT_PTR_RAW:%temp_alloc[0-9]*]] = alloca [16 x i8], align 4 +// CHECK: [[INT_PTR:%[0-9]+]] = ptrtoint [16 x i8]* [[INT_PTR_RAW]] to [[WORD]] +// CHECK: call swiftcc void @blackHole([[WORD]] [[INT_PTR]]) + +withUnsafeTemporaryAllocation(of: Void.self, capacity: 2) { buffer in + blackHole(buffer.baseAddress) +} +// CHECK: [[VOID_PTR_RAW:%temp_alloc[0-9]*]] = alloca [2 x i8], align 1 +// CHECK: [[VOID_PTR:%[0-9]+]] = ptrtoint [2 x i8]* [[VOID_PTR_RAW]] to [[WORD]] +// CHECK: call swiftcc void @blackHole([[WORD]] [[VOID_PTR]]) + +// MARK: Very large allocation + +// A large allocation size should produce an OS version check, call to +// swift_stdlib_isStackAllocationSafe(), and then a branch based on the result +// to either stack-allocate or heap-allocate. +withUnsafeTemporaryAllocation(byteCount: 0x0FFF_FFFF, alignment: 1) { buffer in + blackHole(buffer.baseAddress) +} +// CHECK-apple: [[IS_OS_OK:%[0-9]+]] = call swiftcc i1 @"$ss26_stdlib_isOSVersionAtLeastyBi1_Bw_BwBwtF" +// CHECK-apple: br i1 [[IS_OS_OK]], label %[[OS_OK_BR:[0-9]+]], label %[[UNSAFE_BR:[0-9]+]] + +// CHECK-apple: [[UNSAFE_BR]]: +// CHECK-unknown: [[UNSAFE_BR:[0-9]+]]: +// CHECK: [[HEAP_PTR_RAW:%[0-9]+]] = call noalias i8* @swift_slowAlloc([[WORD]] 268435455, [[WORD]] -1) +// CHECK: [[HEAP_PTR:%[0-9]+]] = ptrtoint i8* [[HEAP_PTR_RAW]] to [[WORD]] +// CHECK: call swiftcc void @blackHole([[WORD]] [[HEAP_PTR]]) +// CHECK: call void @swift_slowDealloc(i8* [[HEAP_PTR_RAW]], [[WORD]] -1, [[WORD]] -1) + +// CHECK-apple: [[OS_OK_BR]]: +// CHECK: [[IS_SAFE:%[0-9]+]] = call zeroext i1 @swift_stdlib_isStackAllocationSafe([[WORD]] 268435455, [[WORD]] 1) +// CHECK: br i1 [[IS_SAFE]], label %[[SAFE_BR:[0-9]+]], label %[[UNSAFE_BR]] + +// CHECK: [[SAFE_BR]]: +// CHECK: [[SPSAVE:%spsave[0-9]*]] = call i8* @llvm.stacksave() +// CHECK: [[STACK_PTR_RAW:%temp_alloc[0-9]*]] = alloca [268435455 x i8], align 1 +// CHECK: [[STACK_PTR:%[0-9]+]] = ptrtoint [268435455 x i8]* [[STACK_PTR_RAW]] to [[WORD]] +// CHECK: call swiftcc void @blackHole([[WORD]] [[STACK_PTR]]) +// CHECK: call void @llvm.stackrestore(i8* [[SPSAVE]]) + diff --git a/test/IRGen/temporary_allocation/negative_alignment.swift b/test/IRGen/temporary_allocation/negative_alignment.swift new file mode 100644 index 0000000000000..f4bbfbd3b0f4c --- /dev/null +++ b/test/IRGen/temporary_allocation/negative_alignment.swift @@ -0,0 +1,9 @@ +// RUN: not %target-swift-frontend -primary-file %s -O -emit-ir -o /dev/null 2>&1 | %FileCheck %s + +@_silgen_name("blackHole") +func blackHole(_ value: UnsafeMutableRawPointer?) -> Void + +withUnsafeTemporaryAllocation(byteCount: 1, alignment: -1) { buffer in + blackHole(buffer.baseAddress) +} +// CHECK: error: alignment value must be greater than zero diff --git a/test/IRGen/temporary_allocation/negative_size.swift b/test/IRGen/temporary_allocation/negative_size.swift new file mode 100644 index 0000000000000..8965eaf8627a8 --- /dev/null +++ b/test/IRGen/temporary_allocation/negative_size.swift @@ -0,0 +1,9 @@ +// RUN: not %target-swift-frontend -primary-file %s -O -emit-ir -o /dev/null 2>&1 | %FileCheck %s + +@_silgen_name("blackHole") +func blackHole(_ value: UnsafeMutableRawPointer?) -> Void + +withUnsafeTemporaryAllocation(byteCount: -1, alignment: 1) { buffer in + blackHole(buffer.baseAddress) +} +// CHECK: error: allocation capacity must be greater than or equal to zero diff --git a/test/IRGen/temporary_allocation/non_power2_alignment.swift b/test/IRGen/temporary_allocation/non_power2_alignment.swift new file mode 100644 index 0000000000000..1b2216d5757ed --- /dev/null +++ b/test/IRGen/temporary_allocation/non_power2_alignment.swift @@ -0,0 +1,9 @@ +// RUN: not %target-swift-frontend -primary-file %s -O -emit-ir -o /dev/null 2>&1 | %FileCheck %s + +@_silgen_name("blackHole") +func blackHole(_ value: UnsafeMutableRawPointer?) -> Void + +withUnsafeTemporaryAllocation(byteCount: 1, alignment: 3) { buffer in + blackHole(buffer.baseAddress) +} +// CHECK: error: alignment value must be a power of two diff --git a/test/stdlib/TemporaryAllocation.swift b/test/stdlib/TemporaryAllocation.swift new file mode 100644 index 0000000000000..0477492634e24 --- /dev/null +++ b/test/stdlib/TemporaryAllocation.swift @@ -0,0 +1,124 @@ +// RUN: %target-run-simple-swiftgyb +// REQUIRES: executable_test + +import StdlibUnittest +import SwiftShims + +var TemporaryAllocationTestSuite = TestSuite("TemporaryAllocation") + +func isStackAllocated(_ pointer: UnsafeRawPointer) -> Bool? { + var stackBegin: UInt = 0 + var stackEnd: UInt = 0 + if _swift_stdlib_getCurrentStackBounds(&stackBegin, &stackEnd) { + var pointerValue = UInt(bitPattern: pointer) + return pointerValue >= stackBegin && pointerValue < stackEnd + } + return nil +} + +func expectStackAllocated(_ pointer: UnsafeRawPointer) { + if let stackAllocated = isStackAllocated(pointer) { + expectTrue(stackAllocated) + } else { + // Could not read stack bounds. Skip. + } +} + +func expectNotStackAllocated(_ pointer: UnsafeRawPointer) { + if let stackAllocated = isStackAllocated(pointer) { + expectFalse(stackAllocated) + } else { + // Could not read stack bounds. Skip. + } +} + +// MARK: Untyped buffers + +TemporaryAllocationTestSuite.test("untypedAllocationOnStack") { + withUnsafeTemporaryAllocation(byteCount: 8, alignment: 1) { buffer in + expectStackAllocated(buffer.baseAddress!) + } +} + +TemporaryAllocationTestSuite.test("untypedAllocationOnHeap") { + // EXPECTATION: a very large allocated buffer is heap-allocated. (Note if + // swift_stdlib_isStackAllocationSafe() gets fleshed out, this test may need + // to be changed.) + withUnsafeTemporaryAllocation(byteCount: 100_000, alignment: 1) { buffer in + expectNotStackAllocated(buffer.baseAddress!) + } +} + +TemporaryAllocationTestSuite.test("untypedEmptyAllocationIsStackAllocated") { + withUnsafeTemporaryAllocation(byteCount: 0, alignment: 1) { buffer in + expectStackAllocated(buffer.baseAddress!) + } +} + +TemporaryAllocationTestSuite.test("crashOnNegativeByteCount") { + expectCrash { + let byteCount = Int.random(in: -2 ..< -1) + withUnsafeTemporaryAllocation(byteCount: byteCount, alignment: 1) { _ in } + } +} + +TemporaryAllocationTestSuite.test("crashOnNegativeAlignment") { + expectCrash { + let alignment = Int.random(in: -2 ..< -1) + withUnsafeTemporaryAllocation(byteCount: 16, alignment: alignment) { _ in } + } +} + +TemporaryAllocationTestSuite.test("untypedAllocationIsAligned") { + withUnsafeTemporaryAllocation(byteCount: 1, alignment: 8) { buffer in + let pointerBits = Int(bitPattern: buffer.baseAddress!) + let alignmentMask = 0b111 + expectEqual(pointerBits & alignmentMask, 0) + } +} + +// MARK: Typed buffers + +TemporaryAllocationTestSuite.test("typedAllocationOnStack") { + withUnsafeTemporaryAllocation(of: Int.self, capacity: 1) { buffer in + expectStackAllocated(buffer.baseAddress!) + } +} + +TemporaryAllocationTestSuite.test("typedAllocationOnHeap") { + // EXPECTATION: a very large allocated buffer is heap-allocated. (Note if + // swift_stdlib_isStackAllocationSafe() gets fleshed out, this test may need + // to be changed.) + withUnsafeTemporaryAllocation(of: Int.self, capacity: 100_000) { buffer in + expectNotStackAllocated(buffer.baseAddress!) + } +} + +TemporaryAllocationTestSuite.test("typedEmptyAllocationIsStackAllocated") { + withUnsafeTemporaryAllocation(of: Int.self, capacity: 0) { buffer in + expectStackAllocated(buffer.baseAddress!) + } +} + +TemporaryAllocationTestSuite.test("voidAllocationIsStackAllocated") { + withUnsafeTemporaryAllocation(of: Void.self, capacity: 1) { buffer in + expectStackAllocated(buffer.baseAddress!) + } +} + +TemporaryAllocationTestSuite.test("crashOnNegativeValueCount") { + expectCrash { + let capacity = Int.random(in: -2 ..< -1) + withUnsafeTemporaryAllocation(of: Int.self, capacity: capacity) { _ in } + } +} + +TemporaryAllocationTestSuite.test("typedAllocationIsAligned") { + withUnsafeTemporaryAllocation(of: Int.self, capacity: 1) { buffer in + let pointerBits = Int(bitPattern: buffer.baseAddress!) + let alignmentMask = MemoryLayout.alignment - 1 + expectEqual(pointerBits & alignmentMask, 0) + } +} + +runAllTests()