Skip to content

Commit

Permalink
objc-709
Browse files Browse the repository at this point in the history
  • Loading branch information
RetVal committed Apr 16, 2017
1 parent cf60791 commit 1614b34
Show file tree
Hide file tree
Showing 29 changed files with 1,050 additions and 377 deletions.
2 changes: 1 addition & 1 deletion debug-objc/main.m
@@ -1,4 +1,4 @@
//

// main.m
// debug-objc
//
Expand Down
26 changes: 13 additions & 13 deletions include/sys/reason.h
Expand Up @@ -41,14 +41,14 @@ __BEGIN_DECLS
#include <kern/locks.h>

typedef struct os_reason {
decl_lck_mtx_data(, osr_lock)
int osr_refcount;
uint32_t osr_namespace;
uint64_t osr_code;
uint64_t osr_flags;
uint32_t osr_bufsize;
struct kcdata_descriptor osr_kcd_descriptor;
char *osr_kcd_buf;
decl_lck_mtx_data(, osr_lock)
int osr_refcount;
uint32_t osr_namespace;
uint64_t osr_code;
uint64_t osr_flags;
uint32_t osr_bufsize;
struct kcdata_descriptor osr_kcd_descriptor;
char *osr_kcd_buf;
} *os_reason_t;

#define OS_REASON_NULL ((os_reason_t) 0)
Expand All @@ -59,7 +59,7 @@ typedef struct os_reason {
void os_reason_init(void);

os_reason_t build_userspace_exit_reason(uint32_t reason_namespace, uint64_t reason_code, user_addr_t payload, uint32_t payload_size,
user_addr_t reason_string, uint64_t reason_flags);
user_addr_t reason_string, uint64_t reason_flags);
char *launchd_exit_reason_get_string_desc(os_reason_t exit_reason);

#else /* XNU_KERNEL_PRIVATE */
Expand Down Expand Up @@ -122,9 +122,9 @@ void os_reason_free(os_reason_t cur_reason);
* looses higher 32 bits of exit reason code.
*/
#define ENCODE_OSR_NAMESPACE_TO_MACH_EXCEPTION_CODE(code, osr_namespace) \
(code) = (code) | (((osr_namespace) & ((uint64_t)UINT32_MAX)) << 32)
(code) = (code) | (((osr_namespace) & ((uint64_t)UINT32_MAX)) << 32)
#define ENCODE_OSR_CODE_TO_MACH_EXCEPTION_CODE(code, osr_code) \
(code) = (code) | ((osr_code) & ((uint64_t)UINT32_MAX))
(code) = (code) | ((osr_code) & ((uint64_t)UINT32_MAX))

#ifndef KERNEL
/*
Expand Down Expand Up @@ -156,7 +156,7 @@ void abort_with_reason(uint32_t reason_namespace, uint64_t reason_code, const ch
* Outputs: Does not return.
*/
void abort_with_payload(uint32_t reason_namespace, uint64_t reason_code, void *payload, uint32_t payload_size, const char *reason_string,
uint64_t reason_flags) __attribute__((noreturn));
uint64_t reason_flags) __attribute__((noreturn));

/*
* terminate_with_reason: Used to terminate a specific process and pass along
Expand Down Expand Up @@ -195,7 +195,7 @@ int terminate_with_reason(int pid, uint32_t reason_namespace, uint64_t reason_co
* returns 0 otherwise
*/
int terminate_with_payload(int pid, uint32_t reason_namespace, uint64_t reason_code, void *payload, uint32_t payload_size,
const char *reason_string, uint64_t reason_flags);
const char *reason_string, uint64_t reason_flags);
#endif /* KERNEL */

/*
Expand Down
45 changes: 0 additions & 45 deletions runtime/Messengers.subproj/objc-msg-x86_64.s
Expand Up @@ -1281,49 +1281,4 @@ LCacheMiss:
.quad 0
.quad 0


// Workaround for Skype evil (rdar://19715989)

.text
.align 4
.private_extern _map_images
.private_extern _map_2_images
.private_extern _hax
_hax:
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
_map_images:
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
jmp _map_2_images

#endif
2 changes: 1 addition & 1 deletion runtime/NSObject.h
Expand Up @@ -18,7 +18,7 @@
@property (readonly) NSUInteger hash;

@property (readonly) Class superclass;
- (Class)class OBJC_SWIFT_UNAVAILABLE("use 'anObject.dynamicType' instead");
- (Class)class OBJC_SWIFT_UNAVAILABLE("use 'type(of: anObject)' instead");
- (instancetype)self;

- (id)performSelector:(SEL)aSelector;
Expand Down
103 changes: 73 additions & 30 deletions runtime/NSObject.mm
Expand Up @@ -136,6 +136,10 @@ void _objc_setBadAllocHandler(id(*newHandler)(Class))
// don't want the table to act as a root for `leaks`.
typedef objc::DenseMap<DisguisedPtr<objc_object>,size_t,true> RefcountMap;

// Template parameters.
enum HaveOld { DontHaveOld = false, DoHaveOld = true };
enum HaveNew { DontHaveNew = false, DoHaveNew = true };

struct SideTable {
spinlock_t slock;
RefcountMap refcnts;
Expand All @@ -151,46 +155,58 @@ void _objc_setBadAllocHandler(id(*newHandler)(Class))

void lock() { slock.lock(); }
void unlock() { slock.unlock(); }
void forceReset() { slock.forceReset(); }

// Address-ordered lock discipline for a pair of side tables.

template<bool HaveOld, bool HaveNew>
template<HaveOld, HaveNew>
static void lockTwo(SideTable *lock1, SideTable *lock2);
template<bool HaveOld, bool HaveNew>
template<HaveOld, HaveNew>
static void unlockTwo(SideTable *lock1, SideTable *lock2);
};


template<>
void SideTable::lockTwo<true, true>(SideTable *lock1, SideTable *lock2) {
void SideTable::lockTwo<DoHaveOld, DoHaveNew>
(SideTable *lock1, SideTable *lock2)
{
spinlock_t::lockTwo(&lock1->slock, &lock2->slock);
}

template<>
void SideTable::lockTwo<true, false>(SideTable *lock1, SideTable *) {
void SideTable::lockTwo<DoHaveOld, DontHaveNew>
(SideTable *lock1, SideTable *)
{
lock1->lock();
}

template<>
void SideTable::lockTwo<false, true>(SideTable *, SideTable *lock2) {
void SideTable::lockTwo<DontHaveOld, DoHaveNew>
(SideTable *, SideTable *lock2)
{
lock2->lock();
}

template<>
void SideTable::unlockTwo<true, true>(SideTable *lock1, SideTable *lock2) {
void SideTable::unlockTwo<DoHaveOld, DoHaveNew>
(SideTable *lock1, SideTable *lock2)
{
spinlock_t::unlockTwo(&lock1->slock, &lock2->slock);
}

template<>
void SideTable::unlockTwo<true, false>(SideTable *lock1, SideTable *) {
void SideTable::unlockTwo<DoHaveOld, DontHaveNew>
(SideTable *lock1, SideTable *)
{
lock1->unlock();
}

template<>
void SideTable::unlockTwo<false, true>(SideTable *, SideTable *lock2) {
void SideTable::unlockTwo<DontHaveOld, DoHaveNew>
(SideTable *, SideTable *lock2)
{
lock2->unlock();
}



// We cannot use a C++ static initializer to initialize SideTables because
Expand All @@ -211,6 +227,29 @@ static void SideTableInit() {
// anonymous namespace
};

void SideTableLockAll() {
SideTables().lockAll();
}

void SideTableUnlockAll() {
SideTables().unlockAll();
}

void SideTableForceResetAll() {
SideTables().forceResetAll();
}

void SideTableDefineLockOrder() {
SideTables().defineLockOrder();
}

void SideTableLocksPrecedeLock(const void *newlock) {
SideTables().precedeLock(newlock);
}

void SideTableLocksSucceedLock(const void *oldlock) {
SideTables().succeedLock(oldlock);
}

//
// The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
Expand Down Expand Up @@ -256,12 +295,16 @@ BOOL objc_should_deallocate(id object) {
// If CrashIfDeallocating is true, the process is halted if newObj is
// deallocating or newObj's class does not support weak references.
// If CrashIfDeallocating is false, nil is stored instead.
template <bool HaveOld, bool HaveNew, bool CrashIfDeallocating>
enum CrashIfDeallocating {
DontCrashIfDeallocating = false, DoCrashIfDeallocating = true
};
template <HaveOld haveOld, HaveNew haveNew,
CrashIfDeallocating crashIfDeallocating>
static id
storeWeak(id *location, objc_object *newObj)
{
assert(HaveOld || HaveNew);
if (!HaveNew) assert(newObj == nil);
assert(haveOld || haveNew);
if (!haveNew) assert(newObj == nil);

Class previouslyInitializedClass = nil;
id oldObj;
Expand All @@ -272,34 +315,34 @@ BOOL objc_should_deallocate(id object) {
// Order by lock address to prevent lock ordering problems.
// Retry if the old value changes underneath us.
retry:
if (HaveOld) {
if (haveOld) {
oldObj = *location;
oldTable = &SideTables()[oldObj];
} else {
oldTable = nil;
}
if (HaveNew) {
if (haveNew) {
newTable = &SideTables()[newObj];
} else {
newTable = nil;
}

SideTable::lockTwo<HaveOld, HaveNew>(oldTable, newTable);
SideTable::lockTwo<haveOld, haveNew>(oldTable, newTable);

if (HaveOld && *location != oldObj) {
SideTable::unlockTwo<HaveOld, HaveNew>(oldTable, newTable);
if (haveOld && *location != oldObj) {
SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
goto retry;
}

// Prevent a deadlock between the weak reference machinery
// and the +initialize machinery by ensuring that no
// weakly-referenced object has an un-+initialized isa.
if (HaveNew && newObj) {
if (haveNew && newObj) {
Class cls = newObj->getIsa();
if (cls != previouslyInitializedClass &&
!((objc_class *)cls)->isInitialized())
{
SideTable::unlockTwo<HaveOld, HaveNew>(oldTable, newTable);
SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
_class_initialize(_class_getNonMetaClass(cls, (id)newObj));

// If this class is finished with +initialize then we're good.
Expand All @@ -315,15 +358,15 @@ BOOL objc_should_deallocate(id object) {
}

// Clean up old value, if any.
if (HaveOld) {
if (haveOld) {
weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
}

// Assign new value, if any.
if (HaveNew) {
newObj = (objc_object *)weak_register_no_lock(&newTable->weak_table,
(id)newObj, location,
CrashIfDeallocating);
if (haveNew) {
newObj = (objc_object *)
weak_register_no_lock(&newTable->weak_table, (id)newObj, location,
crashIfDeallocating);
// weak_register_no_lock returns nil if weak store should be rejected

// Set is-weakly-referenced bit in refcount table.
Expand All @@ -338,7 +381,7 @@ BOOL objc_should_deallocate(id object) {
// No new value. The storage is not changed.
}

SideTable::unlockTwo<HaveOld, HaveNew>(oldTable, newTable);
SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);

return (id)newObj;
}
Expand All @@ -356,7 +399,7 @@ BOOL objc_should_deallocate(id object) {
id
objc_storeWeak(id *location, id newObj)
{
return storeWeak<true/*old*/, true/*new*/, true/*crash*/>
return storeWeak<DoHaveOld, DoHaveNew, DoCrashIfDeallocating>
(location, (objc_object *)newObj);
}

Expand All @@ -374,7 +417,7 @@ BOOL objc_should_deallocate(id object) {
id
objc_storeWeakOrNil(id *location, id newObj)
{
return storeWeak<true/*old*/, true/*new*/, false/*crash*/>
return storeWeak<DoHaveOld, DoHaveNew, DontCrashIfDeallocating>
(location, (objc_object *)newObj);
}

Expand Down Expand Up @@ -403,7 +446,7 @@ BOOL objc_should_deallocate(id object) {
return nil;
}

return storeWeak<false/*old*/, true/*new*/, true/*crash*/>
return storeWeak<DontHaveOld, DoHaveNew, DoCrashIfDeallocating>
(location, (objc_object*)newObj);
}

Expand All @@ -415,7 +458,7 @@ BOOL objc_should_deallocate(id object) {
return nil;
}

return storeWeak<false/*old*/, true/*new*/, false/*crash*/>
return storeWeak<DontHaveOld, DoHaveNew, DontCrashIfDeallocating>
(location, (objc_object*)newObj);
}

Expand All @@ -434,7 +477,7 @@ BOOL objc_should_deallocate(id object) {
void
objc_destroyWeak(id *location)
{
(void)storeWeak<true/*old*/, false/*new*/, false/*crash*/>
(void)storeWeak<DoHaveOld, DontHaveNew, DontCrashIfDeallocating>
(location, nil);
}

Expand Down
4 changes: 2 additions & 2 deletions runtime/hashtable2.mm
Expand Up @@ -562,7 +562,7 @@ static int isEqualStrStructKey (const void *info, const void *data1, const void

static char *z = NULL;
static size_t zSize = 0;
static mutex_t uniquerLock;
mutex_t NXUniqueStringLock;

static const char *CopyIntoReadOnly (const char *str) {
size_t len = strlen (str) + 1;
Expand All @@ -574,7 +574,7 @@ static int isEqualStrStructKey (const void *info, const void *data1, const void
return result;
}

mutex_locker_t lock(uniquerLock);
mutex_locker_t lock(NXUniqueStringLock);
if (zSize < len) {
zSize = CHUNK_SIZE *((len + CHUNK_SIZE - 1) / CHUNK_SIZE);
/* not enough room, we try to allocate. If no room left, too bad */
Expand Down

0 comments on commit 1614b34

Please sign in to comment.