From 1614b34b287a4a926ae6aa0c6e0e2e494c206599 Mon Sep 17 00:00:00 2001 From: closure Date: Sun, 16 Apr 2017 16:56:46 +0800 Subject: [PATCH] objc-709 --- debug-objc/main.m | 2 +- include/sys/reason.h | 26 +- runtime/Messengers.subproj/objc-msg-x86_64.s | 45 --- runtime/NSObject.h | 2 +- runtime/NSObject.mm | 103 +++-- runtime/hashtable2.mm | 4 +- runtime/objc-accessors.mm | 7 +- runtime/objc-class-old.mm | 2 +- runtime/objc-class.mm | 6 +- runtime/objc-errors.mm | 2 +- runtime/objc-exception.mm | 14 +- runtime/objc-initialize.mm | 2 +- runtime/objc-internal.h | 8 + runtime/objc-lockdebug.h | 22 ++ runtime/objc-lockdebug.mm | 382 ++++++++++++------- runtime/objc-locks-new.h | 38 ++ runtime/objc-locks-old.h | 40 ++ runtime/objc-locks.h | 64 ++++ runtime/objc-opt.mm | 16 + runtime/objc-os.h | 93 ++++- runtime/objc-os.mm | 222 ++++++++++- runtime/objc-private.h | 53 ++- runtime/objc-references.mm | 15 +- runtime/objc-runtime-new.h | 42 +- runtime/objc-runtime-new.mm | 128 +++++-- runtime/objc-runtime-old.mm | 4 +- runtime/objc-sync.mm | 4 +- runtime/objc.h | 37 +- runtime/runtime.h | 44 --- 29 files changed, 1050 insertions(+), 377 deletions(-) create mode 100644 runtime/objc-locks-new.h create mode 100644 runtime/objc-locks-old.h create mode 100644 runtime/objc-locks.h diff --git a/debug-objc/main.m b/debug-objc/main.m index 8169edb9..2f52cb38 100644 --- a/debug-objc/main.m +++ b/debug-objc/main.m @@ -1,4 +1,4 @@ -// + // main.m // debug-objc // diff --git a/include/sys/reason.h b/include/sys/reason.h index 13a49e3e..30ccb718 100644 --- a/include/sys/reason.h +++ b/include/sys/reason.h @@ -41,14 +41,14 @@ __BEGIN_DECLS #include typedef struct os_reason { - decl_lck_mtx_data(, osr_lock) - int osr_refcount; - uint32_t osr_namespace; - uint64_t osr_code; - uint64_t osr_flags; - uint32_t osr_bufsize; - struct kcdata_descriptor osr_kcd_descriptor; - char *osr_kcd_buf; + decl_lck_mtx_data(, osr_lock) + int osr_refcount; + uint32_t osr_namespace; + uint64_t osr_code; + uint64_t osr_flags; + uint32_t osr_bufsize; + struct kcdata_descriptor osr_kcd_descriptor; + char *osr_kcd_buf; } *os_reason_t; #define OS_REASON_NULL ((os_reason_t) 0) @@ -59,7 +59,7 @@ typedef struct os_reason { void os_reason_init(void); os_reason_t build_userspace_exit_reason(uint32_t reason_namespace, uint64_t reason_code, user_addr_t payload, uint32_t payload_size, - user_addr_t reason_string, uint64_t reason_flags); + user_addr_t reason_string, uint64_t reason_flags); char *launchd_exit_reason_get_string_desc(os_reason_t exit_reason); #else /* XNU_KERNEL_PRIVATE */ @@ -122,9 +122,9 @@ void os_reason_free(os_reason_t cur_reason); * looses higher 32 bits of exit reason code. */ #define ENCODE_OSR_NAMESPACE_TO_MACH_EXCEPTION_CODE(code, osr_namespace) \ - (code) = (code) | (((osr_namespace) & ((uint64_t)UINT32_MAX)) << 32) +(code) = (code) | (((osr_namespace) & ((uint64_t)UINT32_MAX)) << 32) #define ENCODE_OSR_CODE_TO_MACH_EXCEPTION_CODE(code, osr_code) \ - (code) = (code) | ((osr_code) & ((uint64_t)UINT32_MAX)) +(code) = (code) | ((osr_code) & ((uint64_t)UINT32_MAX)) #ifndef KERNEL /* @@ -156,7 +156,7 @@ void abort_with_reason(uint32_t reason_namespace, uint64_t reason_code, const ch * Outputs: Does not return. */ void abort_with_payload(uint32_t reason_namespace, uint64_t reason_code, void *payload, uint32_t payload_size, const char *reason_string, - uint64_t reason_flags) __attribute__((noreturn)); + uint64_t reason_flags) __attribute__((noreturn)); /* * terminate_with_reason: Used to terminate a specific process and pass along @@ -195,7 +195,7 @@ int terminate_with_reason(int pid, uint32_t reason_namespace, uint64_t reason_co * returns 0 otherwise */ int terminate_with_payload(int pid, uint32_t reason_namespace, uint64_t reason_code, void *payload, uint32_t payload_size, - const char *reason_string, uint64_t reason_flags); + const char *reason_string, uint64_t reason_flags); #endif /* KERNEL */ /* diff --git a/runtime/Messengers.subproj/objc-msg-x86_64.s b/runtime/Messengers.subproj/objc-msg-x86_64.s index 343b300a..86705557 100644 --- a/runtime/Messengers.subproj/objc-msg-x86_64.s +++ b/runtime/Messengers.subproj/objc-msg-x86_64.s @@ -1281,49 +1281,4 @@ LCacheMiss: .quad 0 .quad 0 - - // Workaround for Skype evil (rdar://19715989) - - .text - .align 4 - .private_extern _map_images - .private_extern _map_2_images - .private_extern _hax -_hax: - nop - nop - nop - nop - nop - nop - nop - nop - nop - nop - nop - nop - nop - nop - nop - nop - nop -_map_images: - nop - nop - nop - nop - nop - nop - nop - nop - nop - nop - nop - nop - nop - nop - nop - nop - jmp _map_2_images - #endif diff --git a/runtime/NSObject.h b/runtime/NSObject.h index f42b446e..7bd86113 100644 --- a/runtime/NSObject.h +++ b/runtime/NSObject.h @@ -18,7 +18,7 @@ @property (readonly) NSUInteger hash; @property (readonly) Class superclass; -- (Class)class OBJC_SWIFT_UNAVAILABLE("use 'anObject.dynamicType' instead"); +- (Class)class OBJC_SWIFT_UNAVAILABLE("use 'type(of: anObject)' instead"); - (instancetype)self; - (id)performSelector:(SEL)aSelector; diff --git a/runtime/NSObject.mm b/runtime/NSObject.mm index c7e7f431..7a7b3912 100644 --- a/runtime/NSObject.mm +++ b/runtime/NSObject.mm @@ -136,6 +136,10 @@ void _objc_setBadAllocHandler(id(*newHandler)(Class)) // don't want the table to act as a root for `leaks`. typedef objc::DenseMap,size_t,true> RefcountMap; +// Template parameters. +enum HaveOld { DontHaveOld = false, DoHaveOld = true }; +enum HaveNew { DontHaveNew = false, DoHaveNew = true }; + struct SideTable { spinlock_t slock; RefcountMap refcnts; @@ -151,46 +155,58 @@ void _objc_setBadAllocHandler(id(*newHandler)(Class)) void lock() { slock.lock(); } void unlock() { slock.unlock(); } + void forceReset() { slock.forceReset(); } // Address-ordered lock discipline for a pair of side tables. - template + template static void lockTwo(SideTable *lock1, SideTable *lock2); - template + template static void unlockTwo(SideTable *lock1, SideTable *lock2); }; template<> -void SideTable::lockTwo(SideTable *lock1, SideTable *lock2) { +void SideTable::lockTwo + (SideTable *lock1, SideTable *lock2) +{ spinlock_t::lockTwo(&lock1->slock, &lock2->slock); } template<> -void SideTable::lockTwo(SideTable *lock1, SideTable *) { +void SideTable::lockTwo + (SideTable *lock1, SideTable *) +{ lock1->lock(); } template<> -void SideTable::lockTwo(SideTable *, SideTable *lock2) { +void SideTable::lockTwo + (SideTable *, SideTable *lock2) +{ lock2->lock(); } template<> -void SideTable::unlockTwo(SideTable *lock1, SideTable *lock2) { +void SideTable::unlockTwo + (SideTable *lock1, SideTable *lock2) +{ spinlock_t::unlockTwo(&lock1->slock, &lock2->slock); } template<> -void SideTable::unlockTwo(SideTable *lock1, SideTable *) { +void SideTable::unlockTwo + (SideTable *lock1, SideTable *) +{ lock1->unlock(); } template<> -void SideTable::unlockTwo(SideTable *, SideTable *lock2) { +void SideTable::unlockTwo + (SideTable *, SideTable *lock2) +{ lock2->unlock(); } - // We cannot use a C++ static initializer to initialize SideTables because @@ -211,6 +227,29 @@ static void SideTableInit() { // anonymous namespace }; +void SideTableLockAll() { + SideTables().lockAll(); +} + +void SideTableUnlockAll() { + SideTables().unlockAll(); +} + +void SideTableForceResetAll() { + SideTables().forceResetAll(); +} + +void SideTableDefineLockOrder() { + SideTables().defineLockOrder(); +} + +void SideTableLocksPrecedeLock(const void *newlock) { + SideTables().precedeLock(newlock); +} + +void SideTableLocksSucceedLock(const void *oldlock) { + SideTables().succeedLock(oldlock); +} // // The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block} @@ -256,12 +295,16 @@ BOOL objc_should_deallocate(id object) { // If CrashIfDeallocating is true, the process is halted if newObj is // deallocating or newObj's class does not support weak references. // If CrashIfDeallocating is false, nil is stored instead. -template +enum CrashIfDeallocating { + DontCrashIfDeallocating = false, DoCrashIfDeallocating = true +}; +template static id storeWeak(id *location, objc_object *newObj) { - assert(HaveOld || HaveNew); - if (!HaveNew) assert(newObj == nil); + assert(haveOld || haveNew); + if (!haveNew) assert(newObj == nil); Class previouslyInitializedClass = nil; id oldObj; @@ -272,34 +315,34 @@ BOOL objc_should_deallocate(id object) { // Order by lock address to prevent lock ordering problems. // Retry if the old value changes underneath us. retry: - if (HaveOld) { + if (haveOld) { oldObj = *location; oldTable = &SideTables()[oldObj]; } else { oldTable = nil; } - if (HaveNew) { + if (haveNew) { newTable = &SideTables()[newObj]; } else { newTable = nil; } - SideTable::lockTwo(oldTable, newTable); + SideTable::lockTwo(oldTable, newTable); - if (HaveOld && *location != oldObj) { - SideTable::unlockTwo(oldTable, newTable); + if (haveOld && *location != oldObj) { + SideTable::unlockTwo(oldTable, newTable); goto retry; } // Prevent a deadlock between the weak reference machinery // and the +initialize machinery by ensuring that no // weakly-referenced object has an un-+initialized isa. - if (HaveNew && newObj) { + if (haveNew && newObj) { Class cls = newObj->getIsa(); if (cls != previouslyInitializedClass && !((objc_class *)cls)->isInitialized()) { - SideTable::unlockTwo(oldTable, newTable); + SideTable::unlockTwo(oldTable, newTable); _class_initialize(_class_getNonMetaClass(cls, (id)newObj)); // If this class is finished with +initialize then we're good. @@ -315,15 +358,15 @@ BOOL objc_should_deallocate(id object) { } // Clean up old value, if any. - if (HaveOld) { + if (haveOld) { weak_unregister_no_lock(&oldTable->weak_table, oldObj, location); } // Assign new value, if any. - if (HaveNew) { - newObj = (objc_object *)weak_register_no_lock(&newTable->weak_table, - (id)newObj, location, - CrashIfDeallocating); + if (haveNew) { + newObj = (objc_object *) + weak_register_no_lock(&newTable->weak_table, (id)newObj, location, + crashIfDeallocating); // weak_register_no_lock returns nil if weak store should be rejected // Set is-weakly-referenced bit in refcount table. @@ -338,7 +381,7 @@ BOOL objc_should_deallocate(id object) { // No new value. The storage is not changed. } - SideTable::unlockTwo(oldTable, newTable); + SideTable::unlockTwo(oldTable, newTable); return (id)newObj; } @@ -356,7 +399,7 @@ BOOL objc_should_deallocate(id object) { id objc_storeWeak(id *location, id newObj) { - return storeWeak + return storeWeak (location, (objc_object *)newObj); } @@ -374,7 +417,7 @@ BOOL objc_should_deallocate(id object) { id objc_storeWeakOrNil(id *location, id newObj) { - return storeWeak + return storeWeak (location, (objc_object *)newObj); } @@ -403,7 +446,7 @@ BOOL objc_should_deallocate(id object) { return nil; } - return storeWeak + return storeWeak (location, (objc_object*)newObj); } @@ -415,7 +458,7 @@ BOOL objc_should_deallocate(id object) { return nil; } - return storeWeak + return storeWeak (location, (objc_object*)newObj); } @@ -434,7 +477,7 @@ BOOL objc_should_deallocate(id object) { void objc_destroyWeak(id *location) { - (void)storeWeak + (void)storeWeak (location, nil); } diff --git a/runtime/hashtable2.mm b/runtime/hashtable2.mm index 238f3818..e869f1de 100644 --- a/runtime/hashtable2.mm +++ b/runtime/hashtable2.mm @@ -562,7 +562,7 @@ static int isEqualStrStructKey (const void *info, const void *data1, const void static char *z = NULL; static size_t zSize = 0; -static mutex_t uniquerLock; +mutex_t NXUniqueStringLock; static const char *CopyIntoReadOnly (const char *str) { size_t len = strlen (str) + 1; @@ -574,7 +574,7 @@ static int isEqualStrStructKey (const void *info, const void *data1, const void return result; } - mutex_locker_t lock(uniquerLock); + mutex_locker_t lock(NXUniqueStringLock); if (zSize < len) { zSize = CHUNK_SIZE *((len + CHUNK_SIZE - 1) / CHUNK_SIZE); /* not enough room, we try to allocate. If no room left, too bad */ diff --git a/runtime/objc-accessors.mm b/runtime/objc-accessors.mm index 25ec71b0..612abea3 100644 --- a/runtime/objc-accessors.mm +++ b/runtime/objc-accessors.mm @@ -39,10 +39,9 @@ @interface __NSMutableCopyable - (id)mutableCopyWithZone:(void *)zone; @end -// These locks must not be at function scope. -static StripedMap PropertyLocks; -static StripedMap StructLocks; -static StripedMap CppObjectLocks; +StripedMap PropertyLocks; +StripedMap StructLocks; +StripedMap CppObjectLocks; #define MUTABLE_COPY 2 diff --git a/runtime/objc-class-old.mm b/runtime/objc-class-old.mm index f62bdeeb..2fe27d89 100644 --- a/runtime/objc-class-old.mm +++ b/runtime/objc-class-old.mm @@ -1504,7 +1504,7 @@ unsigned int method_getArgumentInfo(Method m, int arg, } -static spinlock_t impLock; +spinlock_t impLock; IMP method_setImplementation(Method m_gen, IMP imp) { diff --git a/runtime/objc-class.mm b/runtime/objc-class.mm index 60fff7f0..8baef38b 100644 --- a/runtime/objc-class.mm +++ b/runtime/objc-class.mm @@ -158,9 +158,9 @@ #include "objc-private.h" #include "objc-abi.h" -#include "objc-os.h" #include + /* overriding the default object allocation and error handling routines */ OBJC_EXPORT id (*_alloc)(Class, size_t); @@ -816,6 +816,9 @@ IMP class_getMethodImplementation_stret(Class cls, SEL sel) /*********************************************************************** * instrumentObjcMessageSends **********************************************************************/ +// Define this everywhere even if it isn't used to simplify fork() safety code. +spinlock_t objcMsgLogLock; + #if !SUPPORT_MESSAGE_LOGGING void instrumentObjcMessageSends(BOOL flag) @@ -826,7 +829,6 @@ void instrumentObjcMessageSends(BOOL flag) bool objcMsgLogEnabled = false; static int objcMsgLogFD = -1; -static spinlock_t objcMsgLogLock; bool logMessageSend(bool isClassMethod, const char *objectsClass, diff --git a/runtime/objc-errors.mm b/runtime/objc-errors.mm index 4c426b0b..6d65ca25 100644 --- a/runtime/objc-errors.mm +++ b/runtime/objc-errors.mm @@ -85,7 +85,7 @@ static bool isUTF8Continuation(char c) } // Add "message" to any forthcoming crash log. -static mutex_t crashlog_lock; +mutex_t crashlog_lock; static void _objc_crashlog(const char *message) { char *newmsg; diff --git a/runtime/objc-exception.mm b/runtime/objc-exception.mm index d6b1d834..d510d23b 100644 --- a/runtime/objc-exception.mm +++ b/runtime/objc-exception.mm @@ -1059,7 +1059,6 @@ static struct frame_range findHandler(void) struct alt_handler_list *next_DEBUGONLY; }; -static mutex_t DebugLock; static struct alt_handler_list *DebugLists; static uintptr_t DebugCounter; @@ -1080,7 +1079,7 @@ static struct frame_range findHandler(void) if (DebugAltHandlers) { // Save this list so the debug code can find it from other threads - mutex_locker_t lock(DebugLock); + mutex_locker_t lock(AltHandlerDebugLock); list->next_DEBUGONLY = DebugLists; DebugLists = list; } @@ -1095,7 +1094,7 @@ void _destroyAltHandlerList(struct alt_handler_list *list) if (list) { if (DebugAltHandlers) { // Detach from the list-of-lists. - mutex_locker_t lock(DebugLock); + mutex_locker_t lock(AltHandlerDebugLock); struct alt_handler_list **listp = &DebugLists; while (*listp && *listp != list) listp = &(*listp)->next_DEBUGONLY; if (*listp) *listp = (*listp)->next_DEBUGONLY; @@ -1160,7 +1159,7 @@ uintptr_t objc_addExceptionHandler(objc_exception_handler fn, void *context) if (DebugAltHandlers) { // Record backtrace in case this handler is misused later. - mutex_locker_t lock(DebugLock); + mutex_locker_t lock(AltHandlerDebugLock); token = DebugCounter++; if (token == 0) token = DebugCounter++; @@ -1274,7 +1273,7 @@ void alt_handler_error(uintptr_t token) "or break in objc_alt_handler_error() to debug."); if (DebugAltHandlers) { - DebugLock.lock(); + AltHandlerDebugLock.lock(); // Search other threads' alt handler lists for this handler. struct alt_handler_list *list; @@ -1314,7 +1313,7 @@ void alt_handler_error(uintptr_t token) } } done: - DebugLock.unlock(); + AltHandlerDebugLock.unlock(); } @@ -1394,3 +1393,6 @@ void exception_init(void) // __OBJC2__ #endif + +// Define this everywhere even if it isn't used, to simplify fork() safety code +mutex_t AltHandlerDebugLock; diff --git a/runtime/objc-initialize.mm b/runtime/objc-initialize.mm index 0857305e..0f410b6f 100644 --- a/runtime/objc-initialize.mm +++ b/runtime/objc-initialize.mm @@ -99,7 +99,7 @@ /* classInitLock protects CLS_INITIALIZED and CLS_INITIALIZING, and * is signalled when any class is done initializing. * Threads that are waiting for a class to finish initializing wait on this. */ -static monitor_t classInitLock; +monitor_t classInitLock; /*********************************************************************** diff --git a/runtime/objc-internal.h b/runtime/objc-internal.h index 5bcb28c6..6a28c595 100644 --- a/runtime/objc-internal.h +++ b/runtime/objc-internal.h @@ -99,6 +99,14 @@ OBJC_EXPORT void _objc_init(void) OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); #endif +// fork() safety called by libSystem +OBJC_EXPORT void _objc_atfork_prepare(void) + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); +OBJC_EXPORT void _objc_atfork_parent(void) + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); +OBJC_EXPORT void _objc_atfork_child(void) + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); + // Return YES if GC is on and `object` is a GC allocation. OBJC_EXPORT BOOL objc_isAuto(id object) __OSX_DEPRECATED(10.4, 10.8, "it always returns NO") diff --git a/runtime/objc-lockdebug.h b/runtime/objc-lockdebug.h index 071064d2..211b7ebf 100644 --- a/runtime/objc-lockdebug.h +++ b/runtime/objc-lockdebug.h @@ -21,12 +21,26 @@ * @APPLE_LICENSE_HEADER_END@ */ +#if DEBUG +extern void lockdebug_assert_all_locks_locked(); +extern void lockdebug_assert_no_locks_locked(); +extern void lockdebug_setInForkPrepare(bool); +extern void lockdebug_lock_precedes_lock(const void *oldlock, const void *newlock); +#else +static inline void lockdebug_assert_all_locks_locked() { } +static inline void lockdebug_assert_no_locks_locked() { } +static inline void lockdebug_setInForkPrepare(bool) { } +static inline void lockdebug_lock_precedes_lock(const void *, const void *) { } +#endif + +extern void lockdebug_remember_mutex(mutex_tt *lock); extern void lockdebug_mutex_lock(mutex_tt *lock); extern void lockdebug_mutex_try_lock(mutex_tt *lock); extern void lockdebug_mutex_unlock(mutex_tt *lock); extern void lockdebug_mutex_assert_locked(mutex_tt *lock); extern void lockdebug_mutex_assert_unlocked(mutex_tt *lock); +static inline void lockdebug_remember_mutex(mutex_tt *lock) { } static inline void lockdebug_mutex_lock(mutex_tt *lock) { } static inline void lockdebug_mutex_try_lock(mutex_tt *lock) { } static inline void lockdebug_mutex_unlock(mutex_tt *lock) { } @@ -34,12 +48,14 @@ static inline void lockdebug_mutex_assert_locked(mutex_tt *lock) { } static inline void lockdebug_mutex_assert_unlocked(mutex_tt *lock) { } +extern void lockdebug_remember_monitor(monitor_tt *lock); extern void lockdebug_monitor_enter(monitor_tt *lock); extern void lockdebug_monitor_leave(monitor_tt *lock); extern void lockdebug_monitor_wait(monitor_tt *lock); extern void lockdebug_monitor_assert_locked(monitor_tt *lock); extern void lockdebug_monitor_assert_unlocked(monitor_tt *lock); +static inline void lockdebug_remember_monitor(monitor_tt *lock) { } static inline void lockdebug_monitor_enter(monitor_tt *lock) { } static inline void lockdebug_monitor_leave(monitor_tt *lock) { } static inline void lockdebug_monitor_wait(monitor_tt *lock) { } @@ -47,6 +63,8 @@ static inline void lockdebug_monitor_assert_locked(monitor_tt *lock) { } static inline void lockdebug_monitor_assert_unlocked(monitor_tt *lock) {} +extern void +lockdebug_remember_recursive_mutex(recursive_mutex_tt *lock); extern void lockdebug_recursive_mutex_lock(recursive_mutex_tt *lock); extern void @@ -56,6 +74,8 @@ lockdebug_recursive_mutex_assert_locked(recursive_mutex_tt *lock); extern void lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_tt *lock); +static inline void +lockdebug_remember_recursive_mutex(recursive_mutex_tt *lock) { } static inline void lockdebug_recursive_mutex_lock(recursive_mutex_tt *lock) { } static inline void @@ -66,6 +86,7 @@ static inline void lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_tt *lock) { } +extern void lockdebug_remember_rwlock(rwlock_tt *lock); extern void lockdebug_rwlock_read(rwlock_tt *lock); extern void lockdebug_rwlock_try_read_success(rwlock_tt *lock); extern void lockdebug_rwlock_unlock_read(rwlock_tt *lock); @@ -77,6 +98,7 @@ extern void lockdebug_rwlock_assert_writing(rwlock_tt *lock); extern void lockdebug_rwlock_assert_locked(rwlock_tt *lock); extern void lockdebug_rwlock_assert_unlocked(rwlock_tt *lock); +static inline void lockdebug_remember_rwlock(rwlock_tt *) { } static inline void lockdebug_rwlock_read(rwlock_tt *) { } static inline void lockdebug_rwlock_try_read_success(rwlock_tt *) { } static inline void lockdebug_rwlock_unlock_read(rwlock_tt *) { } diff --git a/runtime/objc-lockdebug.mm b/runtime/objc-lockdebug.mm index ed940115..7b38de68 100644 --- a/runtime/objc-lockdebug.mm +++ b/runtime/objc-lockdebug.mm @@ -30,115 +30,224 @@ #if DEBUG && !TARGET_OS_WIN32 +#include + + +/*********************************************************************** +* Thread-local bool set during _objc_atfork_prepare(). +* That function is allowed to break some lock ordering rules. +**********************************************************************/ + +static tls_key_t fork_prepare_tls; + +void +lockdebug_setInForkPrepare(bool inForkPrepare) +{ + INIT_ONCE_PTR(fork_prepare_tls, tls_create(nil), (void)0); + tls_set(fork_prepare_tls, (void*)inForkPrepare); +} + +static bool +inForkPrepare() +{ + INIT_ONCE_PTR(fork_prepare_tls, tls_create(nil), (void)0); + return (bool)tls_get(fork_prepare_tls); +} + + + +/*********************************************************************** +* Lock order graph. +* "lock X precedes lock Y" means that X must be acquired first. +* This property is transitive. +**********************************************************************/ + +struct lockorder { + const void *l; + std::vector predecessors; +}; + +static std::unordered_map lockOrderList; + +static bool +lockPrecedesLock(const lockorder& oldlock, const lockorder& newlock) +{ + for (const auto *pre : newlock.predecessors) { + if (&oldlock == pre) return true; + if (lockPrecedesLock(oldlock, *pre)) return true; + } + return false; +} + +static bool +lockPrecedesLock(const void *oldlock, const void *newlock) +{ + auto oldorder = lockOrderList.find(oldlock); + auto neworder = lockOrderList.find(newlock); + if (neworder == lockOrderList.end() || oldorder == lockOrderList.end()) { + return false; + } + return lockPrecedesLock(oldorder->second, neworder->second); +} + +static bool +lockUnorderedWithLock(const void *oldlock, const void *newlock) +{ + auto oldorder = lockOrderList.find(oldlock); + auto neworder = lockOrderList.find(newlock); + if (neworder == lockOrderList.end() || oldorder == lockOrderList.end()) { + return true; + } + + if (lockPrecedesLock(oldorder->second, neworder->second) || + lockPrecedesLock(neworder->second, oldorder->second)) + { + return false; + } + + return true; +} + +void lockdebug_lock_precedes_lock(const void *oldlock, const void *newlock) +{ + if (lockPrecedesLock(newlock, oldlock)) { + _objc_fatal("contradiction in lock order declaration"); + } + + auto oldorder = lockOrderList.find(oldlock); + auto neworder = lockOrderList.find(newlock); + if (oldorder == lockOrderList.end()) { + lockOrderList[oldlock] = lockorder{oldlock, {}}; + oldorder = lockOrderList.find(oldlock); + } + if (neworder == lockOrderList.end()) { + lockOrderList[newlock] = lockorder{newlock, {}}; + neworder = lockOrderList.find(newlock); + } + + neworder->second.predecessors.push_back(&oldorder->second); +} + + /*********************************************************************** * Recording - per-thread list of mutexes and monitors held **********************************************************************/ -typedef struct { - void *l; // the lock itself - int k; // the kind of lock it is (MUTEX, MONITOR, etc) - int i; // the lock's nest count -} lockcount; +enum class lockkind { + MUTEX = 1, MONITOR = 2, RDLOCK = 3, WRLOCK = 4, RECURSIVE = 5 +}; + +#define MUTEX lockkind::MUTEX +#define MONITOR lockkind::MONITOR +#define RDLOCK lockkind::RDLOCK +#define WRLOCK lockkind::WRLOCK +#define RECURSIVE lockkind::RECURSIVE + +struct lockcount { + lockkind k; // the kind of lock it is (MUTEX, MONITOR, etc) + int i; // the lock's nest count +}; -#define MUTEX 1 -#define MONITOR 2 -#define RDLOCK 3 -#define WRLOCK 4 -#define RECURSIVE 5 +using objc_lock_list = std::unordered_map; -typedef struct _objc_lock_list { - int allocated; - int used; - lockcount list[0]; -} _objc_lock_list; +// Thread-local list of locks owned by a thread. +// Used by lock ownership checks. static tls_key_t lock_tls; +// Global list of all locks. +// Used by fork() safety check. +// This can't be a static struct because of C++ initialization order problems. +static objc_lock_list& AllLocks() { + static objc_lock_list *locks; + INIT_ONCE_PTR(locks, new objc_lock_list, (void)0); + return *locks; +} + + static void destroyLocks(void *value) { - _objc_lock_list *locks = (_objc_lock_list *)value; + auto locks = (objc_lock_list *)value; // fixme complain about any still-held locks? - if (locks) free(locks); + if (locks) delete locks; } -static struct _objc_lock_list * -getLocks(BOOL create) +static objc_lock_list& +ownedLocks() { - _objc_lock_list *locks; - // Use a dedicated tls key to prevent differences vs non-debug in // usage of objc's other tls keys (required for some unit tests). INIT_ONCE_PTR(lock_tls, tls_create(&destroyLocks), (void)0); - locks = (_objc_lock_list *)tls_get(lock_tls); + auto locks = (objc_lock_list *)tls_get(lock_tls); if (!locks) { - if (!create) { - return NULL; - } else { - locks = (_objc_lock_list *)calloc(1, sizeof(_objc_lock_list) + sizeof(lockcount) * 16); - locks->allocated = 16; - locks->used = 0; - tls_set(lock_tls, locks); - } - } - - if (locks->allocated == locks->used) { - if (!create) { - return locks; - } else { - _objc_lock_list *oldlocks = locks; - locks = (_objc_lock_list *)calloc(1, sizeof(_objc_lock_list) + 2 * oldlocks->used * sizeof(lockcount)); - locks->used = oldlocks->used; - locks->allocated = oldlocks->used * 2; - memcpy(locks->list, oldlocks->list, locks->used * sizeof(lockcount)); - tls_set(lock_tls, locks); - free(oldlocks); - } + locks = new objc_lock_list; + tls_set(lock_tls, locks); } - return locks; + return *locks; } -static BOOL -hasLock(_objc_lock_list *locks, void *lock, int kind) +static bool +hasLock(objc_lock_list& locks, const void *lock, lockkind kind) { - int i; - if (!locks) return NO; - - for (i = 0; i < locks->used; i++) { - if (locks->list[i].l == lock && locks->list[i].k == kind) return YES; - } - return NO; + auto iter = locks.find(lock); + if (iter != locks.end() && iter->second.k == kind) return true; + return false; } +static const char *sym(const void *lock) +{ + Dl_info info; + int ok = dladdr(lock, &info); + if (ok && info.dli_sname && info.dli_sname[0]) return info.dli_sname; + else return "??"; +} + static void -setLock(_objc_lock_list *locks, void *lock, int kind) +setLock(objc_lock_list& locks, const void *lock, lockkind kind) { - int i; - for (i = 0; i < locks->used; i++) { - if (locks->list[i].l == lock && locks->list[i].k == kind) { - locks->list[i].i++; - return; + // Check if we already own this lock. + auto iter = locks.find(lock); + if (iter != locks.end() && iter->second.k == kind) { + iter->second.i++; + return; + } + + // Newly-acquired lock. Verify lock ordering. + // Locks not in AllLocks are exempt (i.e. @synchronize locks) + if (&locks != &AllLocks() && AllLocks().find(lock) != AllLocks().end()) { + for (auto& oldlock : locks) { + if (lockPrecedesLock(lock, oldlock.first)) { + _objc_fatal("lock %p (%s) incorrectly acquired before %p (%s)", + oldlock.first, sym(oldlock.first), lock, sym(lock)); + } + if (!inForkPrepare() && + lockUnorderedWithLock(lock, oldlock.first)) + { + // _objc_atfork_prepare is allowed to acquire + // otherwise-unordered locks, but nothing else may. + _objc_fatal("lock %p (%s) acquired before %p (%s) " + "with no defined lock order", + oldlock.first, sym(oldlock.first), lock, sym(lock)); + } } } - locks->list[locks->used].l = lock; - locks->list[locks->used].i = 1; - locks->list[locks->used].k = kind; - locks->used++; + locks[lock] = lockcount{kind, 1}; } static void -clearLock(_objc_lock_list *locks, void *lock, int kind) -{ - int i; - for (i = 0; i < locks->used; i++) { - if (locks->list[i].l == lock && locks->list[i].k == kind) { - if (--locks->list[i].i == 0) { - locks->list[i].l = NULL; - locks->list[i] = locks->list[--locks->used]; +clearLock(objc_lock_list& locks, const void *lock, lockkind kind) +{ + auto iter = locks.find(lock); + if (iter != locks.end()) { + auto& l = iter->second; + if (l.k == kind) { + if (--l.i == 0) { + locks.erase(iter); } return; } @@ -149,49 +258,67 @@ /*********************************************************************** -* Mutex checking +* fork() safety checking **********************************************************************/ -#if !TARGET_OS_SIMULATOR -// Non-simulator platforms have lock debugging built into os_unfair_lock. - +void +lockdebug_remember_mutex(mutex_t *lock) +{ + setLock(AllLocks(), lock, MUTEX); +} void -lockdebug_mutex_lock(mutex_t *lock) +lockdebug_remember_recursive_mutex(recursive_mutex_t *lock) { - // empty + setLock(AllLocks(), lock, RECURSIVE); } void -lockdebug_mutex_unlock(mutex_t *lock) +lockdebug_remember_monitor(monitor_t *lock) { - // empty + setLock(AllLocks(), lock, MONITOR); } void -lockdebug_mutex_assert_locked(mutex_t *lock) +lockdebug_remember_rwlock(rwlock_t *lock) { - os_unfair_lock_assert_owner((os_unfair_lock *)lock); + setLock(AllLocks(), lock, WRLOCK); } void -lockdebug_mutex_assert_unlocked(mutex_t *lock) +lockdebug_assert_all_locks_locked() { - os_unfair_lock_assert_not_owner((os_unfair_lock *)lock); + auto& owned = ownedLocks(); + + for (const auto& l : AllLocks()) { + if (!hasLock(owned, l.first, l.second.k)) { + _objc_fatal("lock %p:%d is incorrectly not owned", + l.first, l.second.k); + } + } } +void +lockdebug_assert_no_locks_locked() +{ + auto& owned = ownedLocks(); -// !TARGET_OS_SIMULATOR -#else -// TARGET_OS_SIMULATOR + for (const auto& l : AllLocks()) { + if (hasLock(owned, l.first, l.second.k)) { + _objc_fatal("lock %p:%d is incorrectly owned", l.first, l.second.k); + } + } +} -// Simulator platforms have no built-in lock debugging in os_unfair_lock. +/*********************************************************************** +* Mutex checking +**********************************************************************/ void lockdebug_mutex_lock(mutex_t *lock) { - _objc_lock_list *locks = getLocks(YES); + auto& locks = ownedLocks(); if (hasLock(locks, lock, MUTEX)) { _objc_fatal("deadlock: relocking mutex"); @@ -205,14 +332,14 @@ void lockdebug_mutex_try_lock_success(mutex_t *lock) { - _objc_lock_list *locks = getLocks(YES); + auto& locks = ownedLocks(); setLock(locks, lock, MUTEX); } void lockdebug_mutex_unlock(mutex_t *lock) { - _objc_lock_list *locks = getLocks(NO); + auto& locks = ownedLocks(); if (!hasLock(locks, lock, MUTEX)) { _objc_fatal("unlocking unowned mutex"); @@ -224,7 +351,7 @@ void lockdebug_mutex_assert_locked(mutex_t *lock) { - _objc_lock_list *locks = getLocks(NO); + auto& locks = ownedLocks(); if (!hasLock(locks, lock, MUTEX)) { _objc_fatal("mutex incorrectly not locked"); @@ -234,7 +361,7 @@ void lockdebug_mutex_assert_unlocked(mutex_t *lock) { - _objc_lock_list *locks = getLocks(NO); + auto& locks = ownedLocks(); if (hasLock(locks, lock, MUTEX)) { _objc_fatal("mutex incorrectly locked"); @@ -242,24 +369,21 @@ } -// TARGET_OS_SIMULATOR -#endif - /*********************************************************************** * Recursive mutex checking **********************************************************************/ void -lockdebug_recursive_mutex_lock(recursive_mutex_tt *lock) +lockdebug_recursive_mutex_lock(recursive_mutex_t *lock) { - _objc_lock_list *locks = getLocks(YES); + auto& locks = ownedLocks(); setLock(locks, lock, RECURSIVE); } void -lockdebug_recursive_mutex_unlock(recursive_mutex_tt *lock) +lockdebug_recursive_mutex_unlock(recursive_mutex_t *lock) { - _objc_lock_list *locks = getLocks(NO); + auto& locks = ownedLocks(); if (!hasLock(locks, lock, RECURSIVE)) { _objc_fatal("unlocking unowned recursive mutex"); @@ -269,9 +393,9 @@ void -lockdebug_recursive_mutex_assert_locked(recursive_mutex_tt *lock) +lockdebug_recursive_mutex_assert_locked(recursive_mutex_t *lock) { - _objc_lock_list *locks = getLocks(NO); + auto& locks = ownedLocks(); if (!hasLock(locks, lock, RECURSIVE)) { _objc_fatal("recursive mutex incorrectly not locked"); @@ -279,9 +403,9 @@ } void -lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_tt *lock) +lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_t *lock) { - _objc_lock_list *locks = getLocks(NO); + auto& locks = ownedLocks(); if (hasLock(locks, lock, RECURSIVE)) { _objc_fatal("recursive mutex incorrectly locked"); @@ -296,7 +420,7 @@ void lockdebug_monitor_enter(monitor_t *lock) { - _objc_lock_list *locks = getLocks(YES); + auto& locks = ownedLocks(); if (hasLock(locks, lock, MONITOR)) { _objc_fatal("deadlock: relocking monitor"); @@ -307,7 +431,7 @@ void lockdebug_monitor_leave(monitor_t *lock) { - _objc_lock_list *locks = getLocks(NO); + auto& locks = ownedLocks(); if (!hasLock(locks, lock, MONITOR)) { _objc_fatal("unlocking unowned monitor"); @@ -318,7 +442,7 @@ void lockdebug_monitor_wait(monitor_t *lock) { - _objc_lock_list *locks = getLocks(NO); + auto& locks = ownedLocks(); if (!hasLock(locks, lock, MONITOR)) { _objc_fatal("waiting in unowned monitor"); @@ -329,7 +453,7 @@ void lockdebug_monitor_assert_locked(monitor_t *lock) { - _objc_lock_list *locks = getLocks(NO); + auto& locks = ownedLocks(); if (!hasLock(locks, lock, MONITOR)) { _objc_fatal("monitor incorrectly not locked"); @@ -339,7 +463,7 @@ void lockdebug_monitor_assert_unlocked(monitor_t *lock) { - _objc_lock_list *locks = getLocks(NO); + auto& locks = ownedLocks(); if (hasLock(locks, lock, MONITOR)) { _objc_fatal("monitor incorrectly held"); @@ -352,9 +476,9 @@ **********************************************************************/ void -lockdebug_rwlock_read(rwlock_tt *lock) +lockdebug_rwlock_read(rwlock_t *lock) { - _objc_lock_list *locks = getLocks(YES); + auto& locks = ownedLocks(); if (hasLock(locks, lock, RDLOCK)) { // Recursive rwlock read is bad (may deadlock vs pending writer) @@ -371,16 +495,16 @@ // try-read when already writing is OK (will fail) // try-read failure does nothing. void -lockdebug_rwlock_try_read_success(rwlock_tt *lock) +lockdebug_rwlock_try_read_success(rwlock_t *lock) { - _objc_lock_list *locks = getLocks(YES); + auto& locks = ownedLocks(); setLock(locks, lock, RDLOCK); } void -lockdebug_rwlock_unlock_read(rwlock_tt *lock) +lockdebug_rwlock_unlock_read(rwlock_t *lock) { - _objc_lock_list *locks = getLocks(NO); + auto& locks = ownedLocks(); if (!hasLock(locks, lock, RDLOCK)) { _objc_fatal("un-reading unowned rwlock"); @@ -390,9 +514,9 @@ void -lockdebug_rwlock_write(rwlock_tt *lock) +lockdebug_rwlock_write(rwlock_t *lock) { - _objc_lock_list *locks = getLocks(YES); + auto& locks = ownedLocks(); if (hasLock(locks, lock, RDLOCK)) { // Lock promotion not allowed (may deadlock) @@ -409,16 +533,16 @@ // try-write when already writing is OK (will fail) // try-write failure does nothing. void -lockdebug_rwlock_try_write_success(rwlock_tt *lock) +lockdebug_rwlock_try_write_success(rwlock_t *lock) { - _objc_lock_list *locks = getLocks(YES); + auto& locks = ownedLocks(); setLock(locks, lock, WRLOCK); } void -lockdebug_rwlock_unlock_write(rwlock_tt *lock) +lockdebug_rwlock_unlock_write(rwlock_t *lock) { - _objc_lock_list *locks = getLocks(NO); + auto& locks = ownedLocks(); if (!hasLock(locks, lock, WRLOCK)) { _objc_fatal("un-writing unowned rwlock"); @@ -428,9 +552,9 @@ void -lockdebug_rwlock_assert_reading(rwlock_tt *lock) +lockdebug_rwlock_assert_reading(rwlock_t *lock) { - _objc_lock_list *locks = getLocks(NO); + auto& locks = ownedLocks(); if (!hasLock(locks, lock, RDLOCK)) { _objc_fatal("rwlock incorrectly not reading"); @@ -438,9 +562,9 @@ } void -lockdebug_rwlock_assert_writing(rwlock_tt *lock) +lockdebug_rwlock_assert_writing(rwlock_t *lock) { - _objc_lock_list *locks = getLocks(NO); + auto& locks = ownedLocks(); if (!hasLock(locks, lock, WRLOCK)) { _objc_fatal("rwlock incorrectly not writing"); @@ -448,9 +572,9 @@ } void -lockdebug_rwlock_assert_locked(rwlock_tt *lock) +lockdebug_rwlock_assert_locked(rwlock_t *lock) { - _objc_lock_list *locks = getLocks(NO); + auto& locks = ownedLocks(); if (!hasLock(locks, lock, RDLOCK) && !hasLock(locks, lock, WRLOCK)) { _objc_fatal("rwlock incorrectly neither reading nor writing"); @@ -458,9 +582,9 @@ } void -lockdebug_rwlock_assert_unlocked(rwlock_tt *lock) +lockdebug_rwlock_assert_unlocked(rwlock_t *lock) { - _objc_lock_list *locks = getLocks(NO); + auto& locks = ownedLocks(); if (hasLock(locks, lock, RDLOCK) || hasLock(locks, lock, WRLOCK)) { _objc_fatal("rwlock incorrectly not unlocked"); diff --git a/runtime/objc-locks-new.h b/runtime/objc-locks-new.h new file mode 100644 index 00000000..73e3dd0d --- /dev/null +++ b/runtime/objc-locks-new.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2017 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*********************************************************************** +* objc-locks-new.h +* Declarations of all locks used in the runtime. +**********************************************************************/ + +#ifndef _OBJC_LOCKS_NEW_H +#define _OBJC_LOCKS_NEW_H + +// fork() safety requires careful tracking of all locks used in the runtime. +// Thou shalt not declare any locks outside this file. + +extern rwlock_t runtimeLock; +extern mutex_t DemangleCacheLock; + +#endif diff --git a/runtime/objc-locks-old.h b/runtime/objc-locks-old.h new file mode 100644 index 00000000..1926136b --- /dev/null +++ b/runtime/objc-locks-old.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2017 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*********************************************************************** +* objc-locks-old.h +* Declarations of all locks used in the runtime. +**********************************************************************/ + +#ifndef _OBJC_LOCKS_OLD_H +#define _OBJC_LOCKS_OLD_H + +// fork() safety requires careful tracking of all locks used in the runtime. +// Thou shalt not declare any locks outside this file. + +extern mutex_t classLock; +extern mutex_t methodListLock; +extern mutex_t NXUniqueStringLock; +extern spinlock_t impLock; + +#endif diff --git a/runtime/objc-locks.h b/runtime/objc-locks.h new file mode 100644 index 00000000..05bd22f5 --- /dev/null +++ b/runtime/objc-locks.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2017 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*********************************************************************** +* objc-locks.h +* Declarations of all locks used in the runtime. +**********************************************************************/ + +#ifndef _OBJC_LOCKS_H +#define _OBJC_LOCKS_H + +// fork() safety requires careful tracking of all locks used in the runtime. +// Thou shalt not declare any locks outside this file. + +// Lock ordering is declared in _objc_fork_prepare() +// and is enforced by lockdebug. + +extern monitor_t classInitLock; +extern rwlock_t selLock; +extern mutex_t cacheUpdateLock; +extern recursive_mutex_t loadMethodLock; +extern mutex_t crashlog_lock; +extern spinlock_t objcMsgLogLock; +extern mutex_t AltHandlerDebugLock; +extern mutex_t AssociationsManagerLock; +extern StripedMap PropertyLocks; +extern StripedMap StructLocks; +extern StripedMap CppObjectLocks; + +// SideTable lock is buried awkwardly. Call a function to manipulate it. +extern void SideTableLockAll(); +extern void SideTableUnlockAll(); +extern void SideTableForceResetAll(); +extern void SideTableDefineLockOrder(); +extern void SideTableLocksPrecedeLock(const void *newlock); +extern void SideTableLocksSucceedLock(const void *oldlock); + +#if __OBJC2__ +#include "objc-locks-new.h" +#else +#include "objc-locks-old.h" +#endif + +#endif diff --git a/runtime/objc-opt.mm b/runtime/objc-opt.mm index 19533df3..45c13b3f 100644 --- a/runtime/objc-opt.mm +++ b/runtime/objc-opt.mm @@ -59,6 +59,11 @@ bool noMissingWeakSuperclasses(void) return nil; } +unsigned int getPreoptimizedClassUnreasonableCount() +{ + return 0; +} + Class getPreoptimizedClass(const char *name) { return nil; @@ -165,6 +170,17 @@ bool noMissingWeakSuperclasses(void) } +unsigned int getPreoptimizedClassUnreasonableCount() +{ + objc_clsopt_t *classes = opt ? opt->clsopt() : nil; + if (!classes) return 0; + + // This is an overestimate: each set of duplicates + // gets double-counted in `capacity` as well. + return classes->capacity + classes->duplicateCount(); +} + + Class getPreoptimizedClass(const char *name) { objc_clsopt_t *classes = opt ? opt->clsopt() : nil; diff --git a/runtime/objc-os.h b/runtime/objc-os.h index 9523de05..6d5e76bd 100644 --- a/runtime/objc-os.h +++ b/runtime/objc-os.h @@ -120,8 +120,6 @@ void vsyslog(int, const char *, va_list) UNAVAILABLE_ATTRIBUTE; #define fastpath(x) (__builtin_expect(bool(x), 1)) #define slowpath(x) (__builtin_expect(bool(x), 0)) -#include - typedef OSSpinLock os_lock_handoff_s; #define OS_LOCK_HANDOFF_INIT OS_SPINLOCK_INIT @@ -137,7 +135,6 @@ ALWAYS_INLINE bool os_lock_trylock(volatile os_lock_handoff_s *lock) { return OSSpinLockTry(lock); } - static ALWAYS_INLINE uintptr_t addc(uintptr_t lhs, uintptr_t rhs, uintptr_t carryin, uintptr_t *carryout) { @@ -271,7 +268,7 @@ ClearExclusive(uintptr_t *dst __unused) __BEGIN_DECLS extern const char *CRSetCrashLogMessage(const char *msg); extern const char *CRGetCrashLogMessage(void); - extern const char *CRSetCrashLogMessage2(const char *msg); + extern const char *CRGetCrashLogMessage2(const char *msg); __END_DECLS #endif @@ -820,8 +817,14 @@ using monitor_t = monitor_tt; using rwlock_t = rwlock_tt; using recursive_mutex_t = recursive_mutex_tt; -#include "objc-lockdebug.h" +// Use fork_unsafe_lock to get a lock that isn't +// acquired and released around fork(). +// All fork-safe locks are checked in debug builds. +struct fork_unsafe_lock_t { }; +extern const fork_unsafe_lock_t fork_unsafe_lock; +extern "C" void os_unfair_lock_assert_owner(os_unfair_lock *); +extern "C" void os_unfair_lock_assert_not_owner(os_unfair_lock *); extern "C" void os_unfair_lock_unlock(os_unfair_lock *); extern "C" void os_unfair_lock_lock_with_options(os_unfair_lock *, uint32_t options); @@ -833,18 +836,21 @@ inline void os_unfair_lock_unlock_inline(os_unfair_lock *unfair_lock) { os_unfair_lock_unlock(unfair_lock); } -extern "C" void os_unfair_lock_assert_owner(os_unfair_lock *); -extern "C" void os_unfair_lock_assert_not_owner(os_unfair_lock *); - #ifndef OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION #define OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION 0x10000 #endif +#include "objc-lockdebug.h" + template class mutex_tt : nocopy_t { os_unfair_lock mLock; public: - mutex_tt() : mLock(OS_UNFAIR_LOCK_INIT) { } + mutex_tt() : mLock(OS_UNFAIR_LOCK_INIT) { + lockdebug_remember_mutex(this); + } + + mutex_tt(const fork_unsafe_lock_t unsafe) : mLock(OS_UNFAIR_LOCK_INIT) { } void lock() { lockdebug_mutex_lock(this); @@ -859,6 +865,13 @@ class mutex_tt : nocopy_t { os_unfair_lock_unlock_inline(&mLock); } + void forceReset() { + lockdebug_mutex_unlock(this); + + bzero(&mLock, sizeof(mLock)); + mLock = os_unfair_lock OS_UNFAIR_LOCK_INIT; + } + void assertLocked() { lockdebug_mutex_assert_locked(this); } @@ -871,7 +884,7 @@ class mutex_tt : nocopy_t { // Address-ordered lock discipline for a pair of locks. static void lockTwo(mutex_tt *lock1, mutex_tt *lock2) { - if (lock1 > lock2) { + if (lock1 < lock2) { lock1->lock(); lock2->lock(); } else { @@ -892,7 +905,13 @@ class recursive_mutex_tt : nocopy_t { pthread_mutex_t mLock; public: - recursive_mutex_tt() : mLock(PTHREAD_RECURSIVE_MUTEX_INITIALIZER) { } + recursive_mutex_tt() : mLock(PTHREAD_RECURSIVE_MUTEX_INITIALIZER) { + lockdebug_remember_recursive_mutex(this); + } + + recursive_mutex_tt(const fork_unsafe_lock_t unsafe) + : mLock(PTHREAD_RECURSIVE_MUTEX_INITIALIZER) + { } void lock() { @@ -910,6 +929,14 @@ class recursive_mutex_tt : nocopy_t { if (err) _objc_fatal("pthread_mutex_unlock failed (%d)", err); } + void forceReset() + { + lockdebug_recursive_mutex_unlock(this); + + bzero(&mLock, sizeof(mLock)); + mLock = pthread_mutex_t PTHREAD_RECURSIVE_MUTEX_INITIALIZER; + } + bool tryUnlock() { int err = pthread_mutex_unlock(&mLock); @@ -941,7 +968,14 @@ class monitor_tt { public: monitor_tt() - : mutex(PTHREAD_MUTEX_INITIALIZER), cond(PTHREAD_COND_INITIALIZER) { } + : mutex(PTHREAD_MUTEX_INITIALIZER), cond(PTHREAD_COND_INITIALIZER) + { + lockdebug_remember_monitor(this); + } + + monitor_tt(const fork_unsafe_lock_t unsafe) + : mutex(PTHREAD_MUTEX_INITIALIZER), cond(PTHREAD_COND_INITIALIZER) + { } void enter() { @@ -979,6 +1013,16 @@ class monitor_tt { if (err) _objc_fatal("pthread_cond_broadcast failed (%d)", err); } + void forceReset() + { + lockdebug_monitor_leave(this); + + bzero(&mutex, sizeof(mutex)); + bzero(&cond, sizeof(cond)); + mutex = pthread_mutex_t PTHREAD_MUTEX_INITIALIZER; + cond = pthread_cond_t PTHREAD_COND_INITIALIZER; + } + void assertLocked() { lockdebug_monitor_assert_locked(this); @@ -1058,12 +1102,16 @@ static inline void qosEndOverride() { } template class rwlock_tt : nocopy_t { - pthread_rwlock_t mLock = PTHREAD_RWLOCK_INITIALIZER; + pthread_rwlock_t mLock; public: rwlock_tt() : mLock(PTHREAD_RWLOCK_INITIALIZER) { - pthread_rwlock_init(&mLock, nullptr); + lockdebug_remember_rwlock(this); } + + rwlock_tt(const fork_unsafe_lock_t unsafe) + : mLock(PTHREAD_RWLOCK_INITIALIZER) + { } void read() { @@ -1131,6 +1179,14 @@ class rwlock_tt : nocopy_t { } } + void forceReset() + { + lockdebug_rwlock_unlock_write(this); + + bzero(&mLock, sizeof(mLock)); + mLock = pthread_rwlock_t PTHREAD_RWLOCK_INITIALIZER; + } + void assertReading() { lockdebug_rwlock_assert_reading(this); @@ -1267,4 +1323,13 @@ ustrdupMaybeNil(const uint8_t *str) (unsigned char)(((uint32_t)(v))>>8), \ (unsigned char)(((uint32_t)(v))>>0) +// fork() safety requires careful tracking of all locks. +// Our custom lock types check this in debug builds. +// Disallow direct use of all other lock types. +typedef __darwin_pthread_mutex_t pthread_mutex_t UNAVAILABLE_ATTRIBUTE; +typedef __darwin_pthread_rwlock_t pthread_rwlock_t UNAVAILABLE_ATTRIBUTE; +typedef int32_t OSSpinLock UNAVAILABLE_ATTRIBUTE; +typedef struct os_unfair_lock_s os_unfair_lock UNAVAILABLE_ATTRIBUTE; + + #endif diff --git a/runtime/objc-os.mm b/runtime/objc-os.mm index cb077e55..edf9a46e 100644 --- a/runtime/objc-os.mm +++ b/runtime/objc-os.mm @@ -34,6 +34,8 @@ #include "objc-runtime-old.h" #include "objcrt.h" +const fork_unsafe_lock_t fork_unsafe_lock; + int monitor_init(monitor_t *c) { // fixme error checking @@ -603,6 +605,224 @@ static void static_init() } +/*********************************************************************** +* _objc_atfork_prepare +* _objc_atfork_parent +* _objc_atfork_child +* Allow ObjC to be used between fork() and exec(). +* libc requires this because it has fork-safe functions that use os_objects. +* +* _objc_atfork_prepare() acquires all locks. +* _objc_atfork_parent() releases the locks again. +* _objc_atfork_child() forcibly resets the locks. +**********************************************************************/ + +// Declare lock ordering. +#if DEBUG +__attribute__((constructor)) +static void defineLockOrder() +{ + // Every lock precedes crashlog_lock + // on the assumption that fatal errors could be anywhere. + lockdebug_lock_precedes_lock(&loadMethodLock, &crashlog_lock); + lockdebug_lock_precedes_lock(&classInitLock, &crashlog_lock); +#if __OBJC2__ + lockdebug_lock_precedes_lock(&runtimeLock, &crashlog_lock); + lockdebug_lock_precedes_lock(&DemangleCacheLock, &crashlog_lock); +#else + lockdebug_lock_precedes_lock(&classLock, &crashlog_lock); + lockdebug_lock_precedes_lock(&methodListLock, &crashlog_lock); + lockdebug_lock_precedes_lock(&NXUniqueStringLock, &crashlog_lock); + lockdebug_lock_precedes_lock(&impLock, &crashlog_lock); +#endif + lockdebug_lock_precedes_lock(&selLock, &crashlog_lock); + lockdebug_lock_precedes_lock(&cacheUpdateLock, &crashlog_lock); + lockdebug_lock_precedes_lock(&objcMsgLogLock, &crashlog_lock); + lockdebug_lock_precedes_lock(&AltHandlerDebugLock, &crashlog_lock); + lockdebug_lock_precedes_lock(&AssociationsManagerLock, &crashlog_lock); + SideTableLocksPrecedeLock(&crashlog_lock); + PropertyLocks.precedeLock(&crashlog_lock); + StructLocks.precedeLock(&crashlog_lock); + CppObjectLocks.precedeLock(&crashlog_lock); + + // loadMethodLock precedes everything + // because it is held while +load methods run + lockdebug_lock_precedes_lock(&loadMethodLock, &classInitLock); +#if __OBJC2__ + lockdebug_lock_precedes_lock(&loadMethodLock, &runtimeLock); + lockdebug_lock_precedes_lock(&loadMethodLock, &DemangleCacheLock); +#else + lockdebug_lock_precedes_lock(&loadMethodLock, &methodListLock); + lockdebug_lock_precedes_lock(&loadMethodLock, &classLock); + lockdebug_lock_precedes_lock(&loadMethodLock, &NXUniqueStringLock); + lockdebug_lock_precedes_lock(&loadMethodLock, &impLock); +#endif + lockdebug_lock_precedes_lock(&loadMethodLock, &selLock); + lockdebug_lock_precedes_lock(&loadMethodLock, &cacheUpdateLock); + lockdebug_lock_precedes_lock(&loadMethodLock, &objcMsgLogLock); + lockdebug_lock_precedes_lock(&loadMethodLock, &AltHandlerDebugLock); + lockdebug_lock_precedes_lock(&loadMethodLock, &AssociationsManagerLock); + SideTableLocksSucceedLock(&loadMethodLock); + PropertyLocks.succeedLock(&loadMethodLock); + StructLocks.succeedLock(&loadMethodLock); + CppObjectLocks.succeedLock(&loadMethodLock); + + // PropertyLocks and CppObjectLocks precede everything + // because they are held while objc_retain() or C++ copy are called. + // (StructLocks do not precede everything because it calls memmove only.) + PropertyLocks.precedeLock(&classInitLock); + CppObjectLocks.precedeLock(&classInitLock); +#if __OBJC2__ + PropertyLocks.precedeLock(&runtimeLock); + CppObjectLocks.precedeLock(&runtimeLock); + PropertyLocks.precedeLock(&DemangleCacheLock); + CppObjectLocks.precedeLock(&DemangleCacheLock); +#else + PropertyLocks.precedeLock(&methodListLock); + CppObjectLocks.precedeLock(&methodListLock); + PropertyLocks.precedeLock(&classLock); + CppObjectLocks.precedeLock(&classLock); + PropertyLocks.precedeLock(&NXUniqueStringLock); + CppObjectLocks.precedeLock(&NXUniqueStringLock); + PropertyLocks.precedeLock(&impLock); + CppObjectLocks.precedeLock(&impLock); +#endif + PropertyLocks.precedeLock(&selLock); + CppObjectLocks.precedeLock(&selLock); + PropertyLocks.precedeLock(&cacheUpdateLock); + CppObjectLocks.precedeLock(&cacheUpdateLock); + PropertyLocks.precedeLock(&objcMsgLogLock); + CppObjectLocks.precedeLock(&objcMsgLogLock); + PropertyLocks.precedeLock(&AltHandlerDebugLock); + CppObjectLocks.precedeLock(&AltHandlerDebugLock); + PropertyLocks.precedeLock(&AssociationsManagerLock); + CppObjectLocks.precedeLock(&AssociationsManagerLock); + // fixme side table + +#if __OBJC2__ + lockdebug_lock_precedes_lock(&classInitLock, &runtimeLock); +#endif + +#if __OBJC2__ + // Runtime operations may occur inside SideTable locks + // (such as storeWeak calling getMethodImplementation) + SideTableLocksPrecedeLock(&runtimeLock); + // Some operations may occur inside runtimeLock. + lockdebug_lock_precedes_lock(&runtimeLock, &selLock); + lockdebug_lock_precedes_lock(&runtimeLock, &cacheUpdateLock); + lockdebug_lock_precedes_lock(&runtimeLock, &DemangleCacheLock); +#else + // Runtime operations may occur inside SideTable locks + // (such as storeWeak calling getMethodImplementation) + SideTableLocksPrecedeLock(&methodListLock); + // Method lookup and fixup. + lockdebug_lock_precedes_lock(&methodListLock, &classLock); + lockdebug_lock_precedes_lock(&methodListLock, &selLock); + lockdebug_lock_precedes_lock(&methodListLock, &cacheUpdateLock); + lockdebug_lock_precedes_lock(&methodListLock, &impLock); + lockdebug_lock_precedes_lock(&classLock, &selLock); + lockdebug_lock_precedes_lock(&classLock, &cacheUpdateLock); +#endif + + // Striped locks use address order internally. + SideTableDefineLockOrder(); + PropertyLocks.defineLockOrder(); + StructLocks.defineLockOrder(); + CppObjectLocks.defineLockOrder(); +} +// DEBUG +#endif + +void _objc_atfork_prepare() +{ + lockdebug_assert_no_locks_locked(); + lockdebug_setInForkPrepare(true); + + loadMethodLock.lock(); + PropertyLocks.lockAll(); + CppObjectLocks.lockAll(); + classInitLock.enter(); + SideTableLockAll(); +#if __OBJC2__ + runtimeLock.write(); + DemangleCacheLock.lock(); +#else + methodListLock.lock(); + classLock.lock(); + NXUniqueStringLock.lock(); + impLock.lock(); +#endif + selLock.write(); + cacheUpdateLock.lock(); + objcMsgLogLock.lock(); + AltHandlerDebugLock.lock(); + AssociationsManagerLock.lock(); + StructLocks.lockAll(); + crashlog_lock.lock(); + + lockdebug_assert_all_locks_locked(); + lockdebug_setInForkPrepare(false); +} + +void _objc_atfork_parent() +{ + lockdebug_assert_all_locks_locked(); + + CppObjectLocks.unlockAll(); + StructLocks.unlockAll(); + PropertyLocks.unlockAll(); + AssociationsManagerLock.unlock(); + AltHandlerDebugLock.unlock(); + objcMsgLogLock.unlock(); + crashlog_lock.unlock(); + loadMethodLock.unlock(); + cacheUpdateLock.unlock(); + selLock.unlockWrite(); + SideTableUnlockAll(); +#if __OBJC2__ + DemangleCacheLock.unlock(); + runtimeLock.unlockWrite(); +#else + impLock.unlock(); + NXUniqueStringLock.unlock(); + methodListLock.unlock(); + classLock.unlock(); +#endif + classInitLock.leave(); + + lockdebug_assert_no_locks_locked(); +} + +void _objc_atfork_child() +{ + lockdebug_assert_all_locks_locked(); + + CppObjectLocks.forceResetAll(); + StructLocks.forceResetAll(); + PropertyLocks.forceResetAll(); + AssociationsManagerLock.forceReset(); + AltHandlerDebugLock.forceReset(); + objcMsgLogLock.forceReset(); + crashlog_lock.forceReset(); + loadMethodLock.forceReset(); + cacheUpdateLock.forceReset(); + selLock.forceReset(); + SideTableForceResetAll(); +#if __OBJC2__ + DemangleCacheLock.forceReset(); + runtimeLock.forceReset(); +#else + impLock.forceReset(); + NXUniqueStringLock.forceReset(); + methodListLock.forceReset(); + classLock.forceReset(); +#endif + classInitLock.forceReset(); + + lockdebug_assert_no_locks_locked(); +} + + /*********************************************************************** * _objc_init * Bootstrap initialization. Registers our image notifier with dyld. @@ -622,7 +842,7 @@ void _objc_init(void) lock_init(); exception_init(); - _dyld_objc_notify_register(&map_2_images, load_images, unmap_image); + _dyld_objc_notify_register(&map_images, load_images, unmap_image); } diff --git a/runtime/objc-private.h b/runtime/objc-private.h index 07bf78ce..b34b591c 100644 --- a/runtime/objc-private.h +++ b/runtime/objc-private.h @@ -541,6 +541,7 @@ extern objc_selopt_t *preoptimizedSelectors(void); extern Protocol *getPreoptimizedProtocol(const char *name); +extern unsigned getPreoptimizedClassUnreasonableCount(); extern Class getPreoptimizedClass(const char *name); extern Class* copyPreoptimizedClasses(const char *name, int *outCount); @@ -594,15 +595,6 @@ extern char *copyPropertyAttributeValue(const char *attrs, const char *name); /* locking */ extern void lock_init(void); -extern rwlock_t selLock; -extern mutex_t cacheUpdateLock; -extern recursive_mutex_t loadMethodLock; -#if __OBJC2__ -extern rwlock_t runtimeLock; -#else -extern mutex_t classLock; -extern mutex_t methodListLock; -#endif class monitor_locker_t : nocopy_t { monitor_t& lock; @@ -724,8 +716,8 @@ extern void layout_bitmap_print(layout_bitmap bits); // fixme runtime extern Class look_up_class(const char *aClassName, bool includeUnconnected, bool includeClassHandler); -extern "C" void map_2_images(unsigned count, const char * const paths[], - const struct mach_header * const mhdrs[]); +extern "C" void map_images(unsigned count, const char * const paths[], + const struct mach_header * const mhdrs[]); extern void map_images_nolock(unsigned count, const char * const paths[], const struct mach_header * const mhdrs[]); extern void load_images(const char *path, const struct mach_header *mh); @@ -894,6 +886,41 @@ class StripedMap { return const_cast>(this)[p]; } + // Shortcuts for StripedMaps of locks. + void lockAll() { + for (unsigned int i = 0; i < StripeCount; i++) { + array[i].value.lock(); + } + } + + void unlockAll() { + for (unsigned int i = 0; i < StripeCount; i++) { + array[i].value.unlock(); + } + } + + void forceResetAll() { + for (unsigned int i = 0; i < StripeCount; i++) { + array[i].value.forceReset(); + } + } + + void defineLockOrder() { + for (unsigned int i = 1; i < StripeCount; i++) { + lockdebug_lock_precedes_lock(&array[i-1].value, &array[i].value); + } + } + + void precedeLock(const void *newlock) { + // assumes defineLockOrder is also called + lockdebug_lock_precedes_lock(&array[StripeCount-1].value, newlock); + } + + void succeedLock(const void *oldlock) { + // assumes defineLockOrder is also called + lockdebug_lock_precedes_lock(oldlock, &array[0].value); + } + #if DEBUG StripedMap() { // Verify alignment expectations. @@ -1015,6 +1042,10 @@ static uint32_t ptr_hash(uint32_t key) */ + +// Lock declarations +#include "objc-locks.h" + // Inlined parts of objc_object's implementation #include "objc-object.h" diff --git a/runtime/objc-references.mm b/runtime/objc-references.mm index 712b88ec..20bf8079 100644 --- a/runtime/objc-references.mm +++ b/runtime/objc-references.mm @@ -187,15 +187,17 @@ void construct(pointer p, const value_type& x) { using namespace objc_references_support; // class AssociationsManager manages a lock / hash table singleton pair. -// Allocating an instance acquires the lock, and calling its assocations() method -// lazily allocates it. +// Allocating an instance acquires the lock, and calling its assocations() +// method lazily allocates the hash table. + +spinlock_t AssociationsManagerLock; class AssociationsManager { - static spinlock_t _lock; - static AssociationsHashMap *_map; // associative references: object pointer -> PtrPtrHashMap. + // associative references: object pointer -> PtrPtrHashMap. + static AssociationsHashMap *_map; public: - AssociationsManager() { _lock.lock(); } - ~AssociationsManager() { _lock.unlock(); } + AssociationsManager() { AssociationsManagerLock.lock(); } + ~AssociationsManager() { AssociationsManagerLock.unlock(); } AssociationsHashMap &associations() { if (_map == NULL) @@ -204,7 +206,6 @@ void construct(pointer p, const value_type& x) { } }; -spinlock_t AssociationsManager::_lock; AssociationsHashMap *AssociationsManager::_map = NULL; // expanded policy bits. diff --git a/runtime/objc-runtime-new.h b/runtime/objc-runtime-new.h index 7b7dbacc..dff7c109 100644 --- a/runtime/objc-runtime-new.h +++ b/runtime/objc-runtime-new.h @@ -901,7 +901,11 @@ struct class_data_bits_t { { assert(!data() || (newData->flags & (RW_REALIZING | RW_FUTURE))); // Set during realization or construction only. No locking needed. - bits = (bits & ~FAST_DATA_MASK) | (uintptr_t)newData; + // Use a store-release fence because there may be concurrent + // readers of data and data's contents. + uintptr_t newBits = (bits & ~FAST_DATA_MASK) | (uintptr_t)newData; + atomic_thread_fence(memory_order_release); + bits = newBits; } bool hasDefaultRR() { @@ -1351,12 +1355,16 @@ struct message_ref_t { extern Method protocol_getMethod(protocol_t *p, SEL sel, bool isRequiredMethod, bool isInstanceMethod, bool recursive); static inline void -foreach_realized_class_and_subclass_2(Class top, bool (^code)(Class)) +foreach_realized_class_and_subclass_2(Class top, unsigned& count, + std::function code) { // runtimeLock.assertWriting(); assert(top); Class cls = top; while (1) { + if (--count == 0) { + _objc_fatal("Memory corruption in class list."); + } if (!code(cls)) break; if (cls->data()->firstSubclass) { @@ -1364,6 +1372,9 @@ foreach_realized_class_and_subclass_2(Class top, bool (^code)(Class)) } else { while (!cls->data()->nextSiblingClass && cls != top) { cls = cls->superclass; + if (--count == 0) { + _objc_fatal("Memory corruption in class list."); + } } if (cls == top) break; cls = cls->data()->nextSiblingClass; @@ -1371,26 +1382,39 @@ foreach_realized_class_and_subclass_2(Class top, bool (^code)(Class)) } } +extern Class firstRealizedClass(); +extern unsigned int unreasonableClassCount(); + // Enumerates a class and all of its realized subclasses. static inline void -foreach_realized_class_and_subclass(Class top, void (^code)(Class)) +foreach_realized_class_and_subclass(Class top, + std::function code) { - foreach_realized_class_and_subclass_2(top, ^bool(Class cls) { - code(cls); return true; + unsigned int count = unreasonableClassCount(); + + foreach_realized_class_and_subclass_2(top, count, + [&code](Class cls) -> bool + { + code(cls); + return true; }); } // Enumerates all realized classes and metaclasses. -extern Class firstRealizedClass(); static inline void -foreach_realized_class_and_metaclass(void (^code)(Class)) +foreach_realized_class_and_metaclass(std::function code) { + unsigned int count = unreasonableClassCount(); + for (Class top = firstRealizedClass(); top != nil; top = top->data()->nextSiblingClass) { - foreach_realized_class_and_subclass_2(top, ^bool(Class cls) { - code(cls); return true; + foreach_realized_class_and_subclass_2(top, count, + [&code](Class cls) -> bool + { + code(cls); + return true; }); } diff --git a/runtime/objc-runtime-new.mm b/runtime/objc-runtime-new.mm index 4e601932..de67e38d 100644 --- a/runtime/objc-runtime-new.mm +++ b/runtime/objc-runtime-new.mm @@ -1035,6 +1035,25 @@ static void removeNamedClass(Class cls, const char *name) } +/*********************************************************************** +* unreasonableClassCount +* Provides an upper bound for any iteration of classes, +* to prevent spins when runtime metadata is corrupted. +**********************************************************************/ +unsigned unreasonableClassCount() +{ + runtimeLock.assertLocked(); + + int base = NXCountMapTable(gdb_objc_realized_classes) + + getPreoptimizedClassUnreasonableCount(); + + // Provide lots of slack here. Some iterations touch metaclasses too. + // Some iterations backtrack (like realized class iteration). + // We don't need an efficient bound, merely one that prevents spins. + return (base + 1) * 16; +} + + /*********************************************************************** * futureNamedClasses * Returns the classname => future class map for unrealized future classes. @@ -1992,8 +2011,8 @@ void _objc_flush_caches(Class cls) * Locking: write-locks runtimeLock **********************************************************************/ void -map_2_images(unsigned count, const char * const paths[], - const struct mach_header * const mhdrs[]) +map_images(unsigned count, const char * const paths[], + const struct mach_header * const mhdrs[]) { rwlock_writer_t lock(runtimeLock); return map_images_nolock(count, paths, mhdrs); @@ -4264,7 +4283,7 @@ void objc_registerProtocol(Protocol *proto_gen) * If realize=false, the class must already be realized or future. * Locking: If realize=true, runtimeLock must be held for writing by the caller. **********************************************************************/ -static mutex_t DemangleCacheLock; +mutex_t DemangleCacheLock; static NXHashTable *DemangleCache; const char * objc_class::demangledName(bool realize) @@ -4568,9 +4587,7 @@ IMP _class_lookupMethodAndLoadCache3(id obj, SEL sel, Class cls) IMP lookUpImpOrForward(Class cls, SEL sel, id inst, bool initialize, bool cache, bool resolver) { - Class curClass; IMP imp = nil; - Method meth; bool triedResolver = NO; runtimeLock.assertUnlocked(); @@ -4581,25 +4598,43 @@ IMP lookUpImpOrForward(Class cls, SEL sel, id inst, if (imp) return imp; } + // runtimeLock is held during isRealized and isInitialized checking + // to prevent races against concurrent realization. + + // runtimeLock is held during method search to make + // method-lookup + cache-fill atomic with respect to method addition. + // Otherwise, a category could be added but ignored indefinitely because + // the cache was re-filled with the old value after the cache flush on + // behalf of the category. + + runtimeLock.read(); + if (!cls->isRealized()) { - rwlock_writer_t lock(runtimeLock); + // Drop the read-lock and acquire the write-lock. + // realizeClass() checks isRealized() again to prevent + // a race while the lock is down. + runtimeLock.unlockRead(); + runtimeLock.write(); + realizeClass(cls); + + runtimeLock.unlockWrite(); + runtimeLock.read(); } if (initialize && !cls->isInitialized()) { + runtimeLock.unlockRead(); _class_initialize (_class_getNonMetaClass(cls, inst)); + runtimeLock.read(); // If sel == initialize, _class_initialize will send +initialize and // then the messenger will send +initialize again after this // procedure finishes. Of course, if this is not being called // from the messenger then it won't happen. 2778172 } - // The lock is held to make method-lookup + cache-fill atomic - // with respect to method addition. Otherwise, a category could - // be added but ignored indefinitely because the cache was re-filled - // with the old value after the cache flush on behalf of the category. - retry: - runtimeLock.read(); + + retry: + runtimeLock.assertReading(); // Try this class's cache. @@ -4607,40 +4642,50 @@ IMP lookUpImpOrForward(Class cls, SEL sel, id inst, if (imp) goto done; // Try this class's method lists. - - meth = getMethodNoSuper_nolock(cls, sel); - if (meth) { - log_and_fill_cache(cls, meth->imp, sel, inst, cls); - imp = meth->imp; - goto done; + { + Method meth = getMethodNoSuper_nolock(cls, sel); + if (meth) { + log_and_fill_cache(cls, meth->imp, sel, inst, cls); + imp = meth->imp; + goto done; + } } // Try superclass caches and method lists. - - curClass = cls; - while ((curClass = curClass->superclass)) { - // Superclass cache. - imp = cache_getImp(curClass, sel); - if (imp) { - if (imp != (IMP)_objc_msgForward_impcache) { - // Found the method in a superclass. Cache it in this class. - log_and_fill_cache(cls, imp, sel, inst, curClass); - goto done; + { + unsigned attempts = unreasonableClassCount(); + for (Class curClass = cls; + curClass != nil; + curClass = curClass->superclass) + { + // Halt if there is a cycle in the superclass chain. + if (--attempts == 0) { + _objc_fatal("Memory corruption in class list."); } - else { - // Found a forward:: entry in a superclass. - // Stop searching, but don't cache yet; call method - // resolver for this class first. - break; + + // Superclass cache. + imp = cache_getImp(curClass, sel); + if (imp) { + if (imp != (IMP)_objc_msgForward_impcache) { + // Found the method in a superclass. Cache it in this class. + log_and_fill_cache(cls, imp, sel, inst, curClass); + goto done; + } + else { + // Found a forward:: entry in a superclass. + // Stop searching, but don't cache yet; call method + // resolver for this class first. + break; + } + } + + // Superclass method list. + Method meth = getMethodNoSuper_nolock(curClass, sel); + if (meth) { + log_and_fill_cache(cls, meth->imp, sel, inst, curClass); + imp = meth->imp; + goto done; } - } - - // Superclass method list. - meth = getMethodNoSuper_nolock(curClass, sel); - if (meth) { - log_and_fill_cache(cls, meth->imp, sel, inst, curClass); - imp = meth->imp; - goto done; } } @@ -4649,6 +4694,7 @@ IMP lookUpImpOrForward(Class cls, SEL sel, id inst, if (resolver && !triedResolver) { runtimeLock.unlockRead(); _class_resolveMethod(cls, sel, inst); + runtimeLock.read(); // Don't cache the result; we don't hold the lock so it may have // changed already. Re-do the search from scratch instead. triedResolver = YES; diff --git a/runtime/objc-runtime-old.mm b/runtime/objc-runtime-old.mm index d6b393ee..7c95bd70 100644 --- a/runtime/objc-runtime-old.mm +++ b/runtime/objc-runtime-old.mm @@ -2157,8 +2157,8 @@ static inline bool _is_threaded() { * Calls ABI-agnostic code after taking ABI-specific locks. **********************************************************************/ void -map_2_images(unsigned count, const char * const paths[], - const struct mach_header * const mhdrs[]) +map_images(unsigned count, const char * const paths[], + const struct mach_header * const mhdrs[]) { recursive_mutex_locker_t lock(loadMethodLock); map_images_nolock(count, paths, mhdrs); diff --git a/runtime/objc-sync.mm b/runtime/objc-sync.mm index cb699818..daf743db 100644 --- a/runtime/objc-sync.mm +++ b/runtime/objc-sync.mm @@ -60,7 +60,7 @@ SyncData *data; spinlock_t lock; - SyncList() : data(nil) { } + SyncList() : data(nil), lock(fork_unsafe_lock) { } }; // Use multiple parallel lists to decrease contention among unrelated objects. @@ -235,7 +235,7 @@ void _destroySyncCache(struct SyncCache *cache) result = (SyncData*)calloc(sizeof(SyncData), 1); result->object = (objc_object *)object; result->threadCount = 1; - new (&result->mutex) recursive_mutex_t(); + new (&result->mutex) recursive_mutex_t(fork_unsafe_lock); result->nextData = *listp; *listp = result; diff --git a/runtime/objc.h b/runtime/objc.h index 7417ebc7..6cf776b4 100644 --- a/runtime/objc.h +++ b/runtime/objc.h @@ -56,19 +56,35 @@ typedef void (*IMP)(void /* id, SEL, ... */ ); typedef id (*IMP)(id, SEL, ...); #endif -#define OBJC_BOOL_DEFINED - /// Type to represent a boolean value. -#if (TARGET_OS_IPHONE && __LP64__) || TARGET_OS_WATCH -#define OBJC_BOOL_IS_BOOL 1 -typedef bool BOOL; + +#if defined(__OBJC_BOOL_IS_BOOL) + // Honor __OBJC_BOOL_IS_BOOL when available. +# if __OBJC_BOOL_IS_BOOL +# define OBJC_BOOL_IS_BOOL 1 +# else +# define OBJC_BOOL_IS_BOOL 0 +# endif #else -#define OBJC_BOOL_IS_CHAR 1 -typedef signed char BOOL; -// BOOL is explicitly signed so @encode(BOOL) == "c" rather than "C" -// even if -funsigned-char is used. + // __OBJC_BOOL_IS_BOOL not set. +# if TARGET_OS_OSX || (TARGET_OS_IOS && !__LP64__ && !__ARM_ARCH_7K) +# define OBJC_BOOL_IS_BOOL 0 +# else +# define OBJC_BOOL_IS_BOOL 1 +# endif #endif +#if OBJC_BOOL_IS_BOOL + typedef bool BOOL; +#else +# define OBJC_BOOL_IS_CHAR 1 + typedef signed char BOOL; + // BOOL is explicitly signed so @encode(BOOL) == "c" rather than "C" + // even if -funsigned-char is used. +#endif + +#define OBJC_BOOL_DEFINED + #if __has_feature(objc_bool) #define YES __objc_yes #define NO __objc_no @@ -164,7 +180,8 @@ OBJC_EXPORT const char *object_getClassName(id obj) * @note In a garbage-collected environment, the memory is scanned conservatively. */ OBJC_EXPORT void *object_getIndexedIvars(id obj) - OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0) + OBJC_ARC_UNAVAILABLE; /** * Identifies a selector as being valid or invalid. diff --git a/runtime/runtime.h b/runtime/runtime.h index 38e74f12..8ed2f439 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -152,36 +152,6 @@ OBJC_EXPORT BOOL object_isClass(id obj) OBJC_AVAILABLE(10.10, 8.0, 9.0, 1.0); -/** - * Returns the class name of a given object. - * - * @param obj An Objective-C object. - * - * @return The name of the class of which \e obj is an instance. - */ -OBJC_EXPORT const char *object_getClassName(id obj) - OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); - -/** - * Returns a pointer to any extra bytes allocated with an instance given object. - * - * @param obj An Objective-C object. - * - * @return A pointer to any extra bytes allocated with \e obj. If \e obj was - * not allocated with any extra bytes, then dereferencing the returned pointer is undefined. - * - * @note This function returns a pointer to any extra bytes allocated with the instance - * (as specified by \c class_createInstance with extraBytes>0). This memory follows the - * object's ordinary ivars, but may not be adjacent to the last ivar. - * @note The returned pointer is guaranteed to be pointer-size aligned, even if the area following - * the object's last ivar is less aligned than that. Alignment greater than pointer-size is never - * guaranteed, even if the area following the object's last ivar is more aligned than that. - * @note In a garbage-collected environment, the memory is scanned conservatively. - */ -OBJC_EXPORT void *object_getIndexedIvars(id obj) - OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0) - OBJC_ARC_UNAVAILABLE; - /** * Reads the value of an instance variable in an object. * @@ -1396,20 +1366,6 @@ OBJC_EXPORT const char **objc_copyClassNamesForImage(const char *image, OBJC_EXPORT const char *sel_getName(SEL sel) OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); -/** - * Registers a method name with the Objective-C runtime system. - * - * @param str A pointer to a C string. Pass the name of the method you wish to register. - * - * @return A pointer of type SEL specifying the selector for the named method. - * - * @note The implementation of this method is identical to the implementation of \c sel_registerName. - * @note Prior to OS X version 10.0, this method tried to find the selector mapped to the given name - * and returned \c NULL if the selector was not found. This was changed for safety, because it was - * observed that many of the callers of this function did not check the return value for \c NULL. - */ -OBJC_EXPORT SEL sel_getUid(const char *str) - OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); /** * Registers a method with the Objective-C runtime system, maps the method