Skip to content
Permalink
Browse files

Make internal stdlib functions public, which are called from the stdl…

…ib tests.

And make sure that all those public identifiers are preceeded with underscores.

I marked these public-modifiers with "// @testable" to document why they are public.
If some day we have a @testable attribute it should be used instead of those public-modifiers.

Again, this is needed for enabling dead internal function elimination in the stdlib.



Swift SVN r22657
  • Loading branch information...
eeckstein committed Oct 10, 2014
1 parent 3d80081 commit d0697f2ac1092f74548c4df348194a3ee9ea7cda
@@ -18,7 +18,8 @@
// UnsafeMutablePointer
@transparent
internal func _isDebugAssertConfiguration() -> Bool {
public // @testable
func _isDebugAssertConfiguration() -> Bool {
// The values for the assert_configuration call are:
// 0: Debug
// 1: Release
@@ -36,7 +37,8 @@ internal func _isReleaseAssertConfiguration() -> Bool {
}

@transparent
internal func _isFastAssertConfiguration() -> Bool {
public // @testable
func _isFastAssertConfiguration() -> Bool {
// The values for the assert_configuration call are:
// 0: Debug
// 1: Release
@@ -45,6 +47,7 @@ internal func _isFastAssertConfiguration() -> Bool {
}

@transparent
public // @testable
func _isStdlibInternalChecksEnabled() -> Bool {
#if INTERNAL_CHECKS_ENABLED
return true
@@ -64,7 +64,7 @@ def TypedUnaryIntrinsicFunctions():
% for T, CT, bits, ufunc in TypedUnaryIntrinsicFunctions():
@transparent
public func _${ufunc}(x: ${T}) -> ${T} {
return ${T}(Builtin.int_${ufunc}_FPIEEE${bits}(x.value))
return ${T}(_bits: Builtin.int_${ufunc}_FPIEEE${bits}(x.value))
}

% end
@@ -234,7 +234,8 @@ extension COpaquePointer {
public struct CVaListPointer {
var value: UnsafeMutablePointer<Void>

init(fromUnsafeMutablePointer from: UnsafeMutablePointer<Void>) {
public // @testable
init(_fromUnsafeMutablePointer from: UnsafeMutablePointer<Void>) {
value = from
}
}
@@ -10,10 +10,15 @@
//
//===----------------------------------------------------------------------===//
// Unfortunately it is required to make some types in this file public
// (@testable) just because _lazyConcatenate is called from the
// stdlib unit tests.
/// The `GeneratorType` used by `_ConcatenateSequenceView`,
/// `_ForwardConcatenateView`, and `_BidirectionalConcatenateView`.
/// Generates a sequence of the elements of the elements of its
/// argument.
public // @testable
struct _ConcatenateSequenceGenerator<
Outer: GeneratorType where Outer.Element : SequenceType
>: GeneratorType, SequenceType {
@@ -29,6 +34,7 @@ struct _ConcatenateSequenceGenerator<
/// Requires: `next()` has not been applied to a copy of `self`
/// since the copy was made, and no preceding call to `self.next()`
/// has returned `nil`.
public // @testable
mutating func next() -> Outer.Element.Generator.Element? {
do {
if _fastPath(_inner != nil) {
@@ -48,6 +54,7 @@ struct _ConcatenateSequenceGenerator<

/// `_ConcatenateSequenceGenerator` is also a `SequenceType`, so it
/// `generate`\ 's a copy of itself
public // @testable
func generate() -> _ConcatenateSequenceGenerator {
return self
}
@@ -110,6 +117,7 @@ func _concatenate<

/// A wrapper for a `${IndexProtocol}` for a collection of
/// collections, that can be used to index the inner elements.
public // @testable
struct ${Index}<
C: CollectionType
where C.Index : ${IndexProtocol},
@@ -148,6 +156,7 @@ struct ${Index}<
/// Returns the next consecutive value after `self`.
///
/// Requires: the next value is representable.
public // @testable
func successor() -> ${Index} {
return ${Index}.adjustForward(_data, _outer, _inner!.successor())
}
@@ -156,6 +165,7 @@ struct ${Index}<
/// Returns the previous consecutive value before `self`.
///
/// Requires: the previous value is representable.
public // @testable
func predecessor() -> ${Index} {

var outer = _outer
@@ -169,12 +179,14 @@ struct ${Index}<
% end
}

public // @testable
func == <I> (lhs: ${Index}<I>, rhs: ${Index}<I>) -> Bool {
return lhs._outer == rhs._outer && lhs._inner == rhs._inner
}

/// The lazy `CollectionType` returned by `lazyConcatenate(c)` where `c` is a
/// `CollectionType` having an `Index` conforming to `${IndexProtocol}`
public // @testable
struct ${View}<
C: CollectionType
where C.Index: ${IndexProtocol},
@@ -194,13 +206,15 @@ struct ${View}<
/// Return a *generator* over the elements of this *sequence*.
///
/// Complexity: O(1)
public // @testable
func generate() -> _ConcatenateSequenceGenerator<C.Generator> {
return _ConcatenateSequenceGenerator(_base.generate())
}

/// The position of the first element in a non-empty collection.
///
/// Identical to `endIndex` in an empty collection.
public // @testable
var startIndex: Index {
return ${Index}.adjustForward(_base, _base.startIndex, nil)
}
@@ -210,6 +224,7 @@ struct ${View}<
/// `endIndex` is not a valid argument to `subscript`, and is always
/// reachable from `startIndex` by zero or more applications of
/// `successor()`.
public // @testable
var endIndex: Index {
return ${Index}(_base, _base.endIndex, nil)
}
@@ -218,6 +233,7 @@ struct ${View}<
///
/// Requires: `position` is a valid position in `self` and
/// `position != endIndex`.
public // @testable
subscript(position: Index) -> C.Generator.Element.Generator.Element {
return _base[position._outer][position._inner!]
}
@@ -227,6 +243,7 @@ struct ${View}<

/// Return a collection that is a concatenation of the elements of
/// `source`\ 's elements
public // @testable
func _lazyConcatenate<
C: CollectionType
where C.Index: ${IndexProtocol},
@@ -133,7 +133,8 @@ public struct ${Self} {
}

@transparent
init(_ v: Builtin.FPIEEE${bits}) {
public // @testable
init(_bits v: Builtin.FPIEEE${bits}) {
value = v
}

@@ -156,7 +157,7 @@ extension ${Self} : FloatingPointType {
public typealias _BitsType = UInt${bits}

public static func _fromBitPattern(bits: _BitsType) -> ${Self} {
return ${Self}(Builtin.bitcast_Int${bits}_FPIEEE${bits}(bits.value))
return ${Self}(_bits: Builtin.bitcast_Int${bits}_FPIEEE${bits}(bits.value))
}

public func _toBitPattern() -> _BitsType {
@@ -308,12 +309,12 @@ extension ${Self} /* : FloatingPointType */ {
extension ${Self} : _BuiltinIntegerLiteralConvertible, IntegerLiteralConvertible {
public
init(_builtinIntegerLiteral value: Builtin.Int${builtinIntLiteralBits}){
self = ${Self}(Builtin.itofp_with_overflow_Int${builtinIntLiteralBits}_FPIEEE${bits}(value))
self = ${Self}(_bits: Builtin.itofp_with_overflow_Int${builtinIntLiteralBits}_FPIEEE${bits}(value))
}

/// Create an instance initialized to `value`.
public init(integerLiteral value: Int64) {
self = ${Self}(Builtin.uitofp_Int64_FPIEEE${bits}(value.value))
self = ${Self}(_bits: Builtin.uitofp_Int64_FPIEEE${bits}(value.value))
}
}

@@ -325,9 +326,9 @@ extension ${Self} : _BuiltinFloatLiteralConvertible {
public
init(_builtinFloatLiteral value: Builtin.FPIEEE${builtinFloatLiteralBits}) {
% if bits == builtinFloatLiteralBits:
self = ${Self}(value)
self = ${Self}(_bits: value)
% elif bits < builtinFloatLiteralBits:
self = ${Self}(Builtin.fptrunc_FPIEEE${builtinFloatLiteralBits}_FPIEEE${bits}(value))
self = ${Self}(_bits: Builtin.fptrunc_FPIEEE${builtinFloatLiteralBits}_FPIEEE${bits}(value))
% else:
// FIXME: This is actually losing precision <rdar://problem/14073102>.
self = ${Self}(Builtin.fpext_FPIEEE${builtinFloatLiteralBits}_FPIEEE${bits}(value))
@@ -402,7 +403,7 @@ extension ${Self} : AbsoluteValuable {
/// Returns the absolute value of `x`
@transparent
public static func abs(x: ${Self}) -> ${Self} {
return ${Self}(Builtin.int_fabs_FPIEEE${bits}(x.value))
return ${Self}(_bits: Builtin.int_fabs_FPIEEE${bits}(x.value))
}
}

@@ -413,7 +414,7 @@ public prefix func +(x: ${Self}) -> ${Self} {

@transparent
public prefix func -(x: ${Self}) -> ${Self} {
return ${Self}(Builtin.fneg_FPIEEE${bits}(x.value))
return ${Self}(_bits: Builtin.fneg_FPIEEE${bits}(x.value))
}

//===----------------------------------------------------------------------===//
@@ -500,7 +501,7 @@ extension ${Self} : Strideable {
% for op, name in ('+','fadd'), ('-','fsub'),('*','fmul'), ('/','fdiv'):
@transparent
public func ${op} (lhs: ${Self}, rhs: ${Self}) -> ${Self} {
return ${Self}(Builtin.${name}_FPIEEE${bits}(lhs.value, rhs.value))
return ${Self}(_bits: Builtin.${name}_FPIEEE${bits}(lhs.value, rhs.value))
}
% end

@@ -54,11 +54,9 @@ struct _HashingDetail {
// their inputs and just exhibit avalance effect.
//
// TODO: This function is only public because it is used in the
// stdlib/HashingAvalanche.swift validation test. Check if there is another
// way to let the test access the function.
@transparent
public func _mixUInt32(value: UInt32) -> UInt32 {
public // @testable
func _mixUInt32(value: UInt32) -> UInt32 {
// Zero-extend to 64 bits, hash, select 32 bits from the hash.
//
// NOTE: this differs from LLVM's implementation, which selects the lower
@@ -70,15 +68,14 @@ public func _mixUInt32(value: UInt32) -> UInt32 {
}

@transparent
public // @testable
func _mixInt32(value: Int32) -> Int32 {
return Int32(bitPattern: _mixUInt32(UInt32(bitPattern: value)))
}

// TODO: This function is only public because it is used in the
// stdlib/HashingAvalanche.swift validation test. Check if there is another
// way to let the test access the function.
@transparent
public func _mixUInt64(value: UInt64) -> UInt64 {
public // @testable
func _mixUInt64(value: UInt64) -> UInt64 {
// Similar to hash_4to8_bytes but using a seed instead of length.
let seed: UInt64 = _HashingDetail.getExecutionSeed()
let low: UInt64 = value & 0xffff_ffff
@@ -87,11 +84,13 @@ public func _mixUInt64(value: UInt64) -> UInt64 {
}

@transparent
public // @testable
func _mixInt64(value: Int64) -> Int64 {
return Int64(bitPattern: _mixUInt64(UInt64(bitPattern: value)))
}

@transparent
public // @testable
func _mixUInt(value: UInt) -> UInt {
#if arch(i386) || arch(arm)
return UInt(_mixUInt32(UInt32(value)))
@@ -101,6 +100,7 @@ func _mixUInt(value: UInt) -> UInt {
}

@transparent
public // @testable
func _mixInt(value: Int) -> Int {
#if arch(i386) || arch(arm)
return Int(_mixInt32(Int32(value)))
@@ -126,6 +126,7 @@ func _mixInt(value: Int) -> Int {
/// hash value does not change anything fundamentally: collisions are still
/// possible, and it does not prevent malicious users from constructing data
/// sets that will exhibit pathological collisions.
public // @testable
func _squeezeHashValue(hashValue: Int, resultRange: Range<Int>) -> Int {
// Length of a Range<Int> does not fit into an Int, but fits into an UInt.
// An efficient way to compute the length is to rely on two's complement
@@ -147,6 +148,7 @@ func _squeezeHashValue(hashValue: Int, resultRange: Range<Int>) -> Int {
UInt(bitPattern: resultRange.startIndex) &+ unsignedResult)
}

public // @testable
func _squeezeHashValue(hashValue: Int, resultRange: Range<UInt>) -> UInt {
let mixedHashValue = UInt(bitPattern: _mixInt(hashValue))
let resultCardinality: UInt = resultRange.endIndex - resultRange.startIndex
@@ -15,12 +15,14 @@
// FIXME: Once we have an FFI interface, make these have proper function bodies
@transparent
public // @testable
func _countLeadingZeros(value: Int64) -> Int64 {
return Int64(Builtin.int_ctlz_Int64(value.value, false.value))
}

/// Returns if `x` is a power of 2.
@transparent
public // @testable
func _isPowerOf2(x: UInt) -> Bool {
if x == 0 {
return false
@@ -32,6 +34,7 @@ func _isPowerOf2(x: UInt) -> Bool {

/// Returns if `x` is a power of 2.
@transparent
public // @testable
func _isPowerOf2(x: Int) -> Bool {
if x <= 0 {
return false
@@ -142,6 +145,7 @@ public func _stdlib_demangleName(mangledName: String) -> String {
///
/// TODO: Implement version working on Int instead of Int64.
@transparent
public // @testable
func _floorLog2(x: Int64) -> Int {
_sanityCheck(x > 0, "_floorLog2 operates only on non-negative integers")
// Note: use unchecked subtraction because we this expression can not
@@ -231,7 +231,7 @@ public func ... <Pos : ForwardIndexType where Pos: Comparable> (
}

// FIXME: This doesn't work yet: <rdar://problem/17668465>
func ~= <I : ForwardIndexType where I: Comparable> (
public func ~= <I : ForwardIndexType where I: Comparable> (
pattern: Range<I>, value: I
) -> Bool {
// convert to an interval and check that.
@@ -56,6 +56,7 @@ func _stdlib_atomicCompareExchangeStrongPtrImpl(
/// compare-and-exchange instruction will operate on the writeback buffer, and
/// you will get a *race* while doing writeback into shared memory.
@transparent
public // @testable
func _stdlib_atomicCompareExchangeStrongPtr<T>(
#object: UnsafeMutablePointer<UnsafeMutablePointer<T>>,
#expected: UnsafeMutablePointer<UnsafeMutablePointer<T>>,
@@ -67,6 +68,7 @@ func _stdlib_atomicCompareExchangeStrongPtr<T>(
}

@transparent
public // @testable
func _stdlib_atomicInitializeARCRef(
#object: UnsafeMutablePointer<AnyObject?>,
#desired: AnyObject) -> Bool {
@@ -195,6 +197,7 @@ func _swift_stdlib_atomicLoadPtrImpl(
) -> COpaquePointer

@transparent
public // @testable
func _stdlib_atomicLoadARCRef(
#object: UnsafeMutablePointer<AnyObject?>
) -> AnyObject? {
@@ -353,6 +356,7 @@ func _uint64ToStringImpl(
bufferLength: UWord, value: UInt64, radix: Int64, uppercase: Bool
) -> UWord

public // @testable
func _uint64ToString(
value: UInt64, radix: Int64 = 10, uppercase: Bool = false
) -> String {
@@ -28,7 +28,7 @@ var _fastEnumerationStorageMutationsTarget: CUnsignedLong = 0

/// A dummy pointer to be used as `mutationsPtr` in fast enumeration
/// implementations.
internal
public // @testable
var _fastEnumerationStorageMutationsPtr: UnsafeMutablePointer<CUnsignedLong> {
return UnsafeMutablePointer(
Builtin.addressof(&_fastEnumerationStorageMutationsTarget))
@@ -134,13 +134,13 @@ public final class _NSContiguousString : _NSSwiftString {
_precondition(aRange.location + aRange.length <= Int(_core.count))

if _core.elementWidth == 2 {
UTF16.copy(
UTF16._copy(
_core.startUTF16 + aRange.location,
destination: UnsafeMutablePointer<UInt16>(buffer),
count: aRange.length)
}
else {
UTF16.copy(
UTF16._copy(
_core.startASCII + aRange.location,
destination: UnsafeMutablePointer<UInt16>(buffer),
count: aRange.length)
Oops, something went wrong.

0 comments on commit d0697f2

Please sign in to comment.
You can’t perform that action at this time.