Skip to content
This repository

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
tree: ad89f72f5b
Fetching contributors…

Cannot retrieve contributors at this time

file 128 lines (114 sloc) 4.806 kb
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
/*
* Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/

#include "private/gc_priv.h" /* For GC_compare_and_exchange, GC_memory_barrier */

#if defined(GC_LINUX_THREADS) || defined(GC_NETBSD_THREADS)

#include "private/specific.h"

static tse invalid_tse = {INVALID_QTID, 0, 0, INVALID_THREADID};
/* A thread-specific data entry which will never */
/* appear valid to a reader. Used to fill in empty */
/* cache entries to avoid a check for 0. */

int PREFIXED(key_create) (tsd ** key_ptr, void (* destructor)(void *)) {
    int i;
    tsd * result = (tsd *)MALLOC_CLEAR(sizeof (tsd));

    /* A quick alignment check, since we need atomic stores */
      GC_ASSERT((unsigned long)(&invalid_tse.next) % sizeof(tse *) == 0);
    if (0 == result) return ENOMEM;
    pthread_mutex_init(&(result -> lock), NULL);
    for (i = 0; i < TS_CACHE_SIZE; ++i) {
result -> cache[i] = &invalid_tse;
    }
# ifdef GC_ASSERTIONS
      for (i = 0; i < TS_HASH_SIZE; ++i) {
GC_ASSERT(result -> hash[i] == 0);
      }
# endif
    *key_ptr = result;
    return 0;
}

int PREFIXED(setspecific) (tsd * key, void * value) {
    pthread_t self = pthread_self();
    int hash_val = HASH(self);
    volatile tse * entry = (volatile tse *)MALLOC_CLEAR(sizeof (tse));
    
    GC_ASSERT(self != INVALID_THREADID);
    if (0 == entry) return ENOMEM;
    pthread_mutex_lock(&(key -> lock));
    /* Could easily check for an existing entry here. */
    entry -> next = key -> hash[hash_val];
    entry -> thread = self;
    entry -> value = value;
    GC_ASSERT(entry -> qtid == INVALID_QTID);
    /* There can only be one writer at a time, but this needs to be */
    /* atomic with respect to concurrent readers. */
    *(volatile tse **)(key -> hash + hash_val) = entry;
    pthread_mutex_unlock(&(key -> lock));
    return 0;
}

/* Remove thread-specific data for this thread. Should be called on */
/* thread exit. */
void PREFIXED(remove_specific) (tsd * key) {
    pthread_t self = pthread_self();
    unsigned hash_val = HASH(self);
    tse *entry;
    tse **link = key -> hash + hash_val;

    pthread_mutex_lock(&(key -> lock));
    entry = *link;
    while (entry != NULL && entry -> thread != self) {
link = &(entry -> next);
        entry = *link;
    }
    /* Invalidate qtid field, since qtids may be reused, and a later */
    /* cache lookup could otherwise find this entry. */
        entry -> qtid = INVALID_QTID;
    if (entry != NULL) {
*link = entry -> next;
/* Atomic! concurrent accesses still work. */
/* They must, since readers don't lock. */
/* We shouldn't need a volatile access here, */
/* since both this and the preceding write */
/* should become visible no later than */
/* the pthread_mutex_unlock() call. */
    }
    /* If we wanted to deallocate the entry, we'd first have to clear */
    /* any cache entries pointing to it. That probably requires */
    /* additional synchronization, since we can't prevent a concurrent */
    /* cache lookup, which should still be examining deallocated memory.*/
    /* This can only happen if the concurrent access is from another */
    /* thread, and hence has missed the cache, but still... */

    /* With GC, we're done, since the pointers from the cache will */
    /* be overwritten, all local pointers to the entries will be */
    /* dropped, and the entry will then be reclaimed. */
    pthread_mutex_unlock(&(key -> lock));
}

/* Note that even the slow path doesn't lock. */
void * PREFIXED(slow_getspecific) (tsd * key, unsigned long qtid,
tse * volatile * cache_ptr) {
    pthread_t self = pthread_self();
    unsigned hash_val = HASH(self);
    tse *entry = key -> hash[hash_val];

    GC_ASSERT(qtid != INVALID_QTID);
    while (entry != NULL && entry -> thread != self) {
entry = entry -> next;
    }
    if (entry == NULL) return NULL;
    /* Set cache_entry. */
        entry -> qtid = qtid;
/* It's safe to do this asynchronously. Either value */
/* is safe, though may produce spurious misses. */
/* We're replacing one qtid with another one for the */
/* same thread. */
*cache_ptr = entry;
/* Again this is safe since pointer assignments are */
/* presumed atomic, and either pointer is valid. */
    return entry -> value;
}

#endif /* GC_LINUX_THREADS */
Something went wrong with that request. Please try again.