-------------------------------------------------------------------------------- -- Auto-annotated source: /home/njn/dev/rust2/build/x86_64-unknown-linux-gnu/stage0-rustc/x86_64-unknown-linux-gnu/release/build/tikv-jemalloc-sys-c536e82b571990b4/out/build/src/jemalloc.c -------------------------------------------------------------------------------- Ir -- line 201 ---------------------------------------- . static bool malloc_init_hard(void); . . /******************************************************************************/ . /* . * Begin miscellaneous support functions. . */ . . bool 4 ( 0.00%) malloc_initialized(void) { 13 ( 0.00%) return (malloc_init_state == malloc_init_initialized); 4 ( 0.00%) } . . JEMALLOC_ALWAYS_INLINE bool . malloc_init_a0(void) { 8 ( 0.00%) if (unlikely(malloc_init_state == malloc_init_uninitialized)) { . return malloc_init_hard_a0(); . } . return false; . } . . JEMALLOC_ALWAYS_INLINE bool . malloc_init(void) { 6 ( 0.00%) if (unlikely(!malloc_initialized()) && malloc_init_hard()) { . return true; . } . return false; . } . . /* . * The a0*() functions are used instead of i{d,}alloc() in situations that . * cannot tolerate TLS variable access. . */ . . static void * 56 ( 0.00%) a0ialloc(size_t size, bool zero, bool is_internal) { . if (unlikely(malloc_init_a0())) { . return NULL; . } . 4 ( 0.00%) return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL, . is_internal, arena_get(TSDN_NULL, 0, true), true); 48 ( 0.00%) } . . static void 36 ( 0.00%) a0idalloc(void *ptr, bool is_internal) { . idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true); 33 ( 0.00%) } . . void * . a0malloc(size_t size) { 16 ( 0.00%) return a0ialloc(size, false, true); . } . . void . a0dalloc(void *ptr) { 9 ( 0.00%) a0idalloc(ptr, true); . } . . /* . * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive . * situations that cannot tolerate TLS variable access (TLS allocation and very . * early internal data structure initialization). . */ . -- line 264 ---------------------------------------- -- line 289 ---------------------------------------- . if (unlikely(ptr == NULL)) { . return; . } . . a0idalloc(ptr, false); . } . . void 4 ( 0.00%) arena_set(unsigned ind, arena_t *arena) { . atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE); 4 ( 0.00%) } . . static void . narenas_total_set(unsigned narenas) { . atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE); . } . . static void . narenas_total_inc(void) { . atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE); 2 ( 0.00%) } . . unsigned 1 ( 0.00%) narenas_total_get(void) { . return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE); 1 ( 0.00%) } . . /* Create a new arena and insert it into the arenas array at index ind. */ . static arena_t * . arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { . arena_t *arena; . . assert(ind <= narenas_total_get()); 14 ( 0.00%) if (ind >= MALLOCX_ARENA_LIMIT) { . return NULL; . } 14 ( 0.00%) if (ind == narenas_total_get()) { . narenas_total_inc(); . } . . /* . * Another thread may have already initialized arenas[ind] if it's an . * auto arena. . */ . arena = arena_get(tsdn, ind, false); 14 ( 0.00%) if (arena != NULL) { . assert(arena_is_auto(arena)); . return arena; . } . . /* Actually initialize the arena. */ 17 ( 0.00%) arena = arena_new(tsdn, ind, extent_hooks); . 1 ( 0.00%) return arena; . } . . static void . arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) { 14 ( 0.00%) if (ind == 0) { . return; . } . /* . * Avoid creating a new background thread just for the huge arena, which . * purges eagerly by default. . */ 24 ( 0.00%) if (have_background_thread && !arena_is_huge(ind)) { 30 ( 0.00%) if (background_thread_create(tsdn_tsd(tsdn), ind)) { . malloc_printf(": error in background thread " . "creation for arena %u. Abort.\n", ind); . abort(); . } . } . } . . arena_t * 12 ( 0.00%) arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { . arena_t *arena; . . malloc_mutex_lock(tsdn, &arenas_lock); . arena = arena_init_locked(tsdn, ind, extent_hooks); . malloc_mutex_unlock(tsdn, &arenas_lock); . . arena_new_create_background_thread(tsdn, ind); . . return arena; 9 ( 0.00%) } . . static void . arena_bind(tsd_t *tsd, unsigned ind, bool internal) { . arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false); 32 ( 0.00%) arena_nthreads_inc(arena, internal); . 24 ( 0.00%) if (internal) { . tsd_iarena_set(tsd, arena); . } else { . tsd_arena_set(tsd, arena); . unsigned shard = atomic_fetch_add_u(&arena->binshard_next, 1, . ATOMIC_RELAXED); . tsd_binshards_t *bins = tsd_binshardsp_get(tsd); 80 ( 0.00%) for (unsigned i = 0; i < SC_NBINS; i++) { . assert(bin_infos[i].n_shards > 0 && . bin_infos[i].n_shards <= BIN_SHARDS_MAX); 576 ( 0.00%) bins->binshard[i] = shard % bin_infos[i].n_shards; . } . } . } . . void . arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { . arena_t *oldarena, *newarena; . -- line 399 ---------------------------------------- -- line 404 ---------------------------------------- . tsd_arena_set(tsd, newarena); . } . . static void . arena_unbind(tsd_t *tsd, unsigned ind, bool internal) { . arena_t *arena; . . arena = arena_get(tsd_tsdn(tsd), ind, false); 12 ( 0.00%) arena_nthreads_dec(arena, internal); . . if (internal) { . tsd_iarena_set(tsd, NULL); . } else { . tsd_arena_set(tsd, NULL); . } . } . . arena_tdata_t * 781 ( 0.00%) arena_tdata_get_hard(tsd_t *tsd, unsigned ind) { . arena_tdata_t *tdata, *arenas_tdata_old; . arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); . unsigned narenas_tdata_old, i; . unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); . unsigned narenas_actual = narenas_total_get(); . . /* . * Dissociate old tdata array (and set up for deallocation upon return) . * if it's too small. . */ 142 ( 0.00%) if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { . arenas_tdata_old = arenas_tdata; . narenas_tdata_old = narenas_tdata; . arenas_tdata = NULL; . narenas_tdata = 0; . tsd_arenas_tdata_set(tsd, arenas_tdata); . tsd_narenas_tdata_set(tsd, narenas_tdata); . } else { 71 ( 0.00%) arenas_tdata_old = NULL; 142 ( 0.00%) narenas_tdata_old = 0; . } . . /* Allocate tdata array if it's missing. */ . if (arenas_tdata == NULL) { . bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); 213 ( 0.00%) narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; . 284 ( 0.00%) if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { 8 ( 0.00%) *arenas_tdata_bypassp = true; 12 ( 0.00%) arenas_tdata = (arena_tdata_t *)a0malloc( . sizeof(arena_tdata_t) * narenas_tdata); 8 ( 0.00%) *arenas_tdata_bypassp = false; . } 8 ( 0.00%) if (arenas_tdata == NULL) { 71 ( 0.00%) tdata = NULL; . goto label_return; . } . assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); . tsd_arenas_tdata_set(tsd, arenas_tdata); . tsd_narenas_tdata_set(tsd, narenas_tdata); . } . . /* -- line 465 ---------------------------------------- -- line 466 ---------------------------------------- . * Copy to tdata array. It's possible that the actual number of arenas . * has increased since narenas_total_get() was called above, but that . * causes no correctness issues unless two threads concurrently execute . * the arenas.create mallctl, which we trust mallctl synchronization to . * prevent. . */ . . /* Copy/initialize tickers. */ 179 ( 0.00%) for (i = 0; i < narenas_actual; i++) { . if (i < narenas_tdata_old) { . ticker_copy(&arenas_tdata[i].decay_ticker, 340 ( 0.00%) &arenas_tdata_old[i].decay_ticker); . } else { . ticker_init(&arenas_tdata[i].decay_ticker, . DECAY_NTICKS_PER_UPDATE); . } . } 8 ( 0.00%) if (narenas_tdata > narenas_actual) { . memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) . * (narenas_tdata - narenas_actual)); . } . . /* Read the refreshed tdata array. */ 12 ( 0.00%) tdata = &arenas_tdata[ind]; . label_return: 142 ( 0.00%) if (arenas_tdata_old != NULL) { . a0dalloc(arenas_tdata_old); . } . return tdata; 639 ( 0.00%) } . . /* Slow path, called only by arena_choose(). */ . arena_t * 56 ( 0.00%) arena_choose_hard(tsd_t *tsd, bool internal) { 4 ( 0.00%) arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); . 12 ( 0.00%) if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) { . unsigned choose = percpu_arena_choose(); . ret = arena_get(tsd_tsdn(tsd), choose, true); . assert(ret != NULL); . arena_bind(tsd, arena_ind_get(ret), false); . arena_bind(tsd, arena_ind_get(ret), true); . . return ret; . } . 12 ( 0.00%) if (narenas_auto > 1) { . unsigned i, j, choose[2], first_null; . bool is_new_arena[2]; . . /* . * Determine binding for both non-internal and internal . * allocation. . * . * choose[0]: For application allocation. . * choose[1]: For internal metadata allocation. . */ . . for (j = 0; j < 2; j++) { 4 ( 0.00%) choose[j] = 0; 8 ( 0.00%) is_new_arena[j] = false; . } . . first_null = narenas_auto; . malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); . assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); 1,363 ( 0.00%) for (i = 1; i < narenas_auto; i++) { 888 ( 0.00%) if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { . /* . * Choose the first arena that has the lowest . * number of threads assigned to it. . */ 12 ( 0.00%) for (j = 0; j < 2; j++) { 36 ( 0.00%) if (arena_nthreads_get(arena_get( . tsd_tsdn(tsd), i, false), !!j) < 12 ( 0.00%) arena_nthreads_get(arena_get( . tsd_tsdn(tsd), choose[j], false), . !!j)) { . choose[j] = i; . } . } 1,323 ( 0.00%) } else if (first_null == narenas_auto) { . /* . * Record the index of the first uninitialized . * arena, in case all extant arenas are in use. . * . * NB: It is possible for there to be . * discontinuities in terms of initialized . * versus uninitialized arenas, due to the . * "thread.arena" mallctl. . */ . first_null = i; . } . } . . for (j = 0; j < 2; j++) { 70 ( 0.00%) if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), 12 ( 0.00%) choose[j], false), !!j) == 0 || first_null == . narenas_auto) { . /* . * Use an unloaded arena, or the least loaded . * arena if all arenas are already initialized. . */ 4 ( 0.00%) if (!!j == internal) { . ret = arena_get(tsd_tsdn(tsd), . choose[j], false); . } . } else { . arena_t *arena; . . /* Initialize a new arena. */ 6 ( 0.00%) choose[j] = first_null; . arena = arena_init_locked(tsd_tsdn(tsd), . choose[j], . (extent_hooks_t *)&extent_hooks_default); 6 ( 0.00%) if (arena == NULL) { . malloc_mutex_unlock(tsd_tsdn(tsd), . &arenas_lock); . return NULL; . } 6 ( 0.00%) is_new_arena[j] = true; 24 ( 0.00%) if (!!j == internal) { . ret = arena; . } . } . arena_bind(tsd, choose[j], !!j); . } . malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); . . for (j = 0; j < 2; j++) { 16 ( 0.00%) if (is_new_arena[j]) { . assert(choose[j] > 0); 6 ( 0.00%) arena_new_create_background_thread( . tsd_tsdn(tsd), choose[j]); . } . } . . } else { . ret = arena_get(tsd_tsdn(tsd), 0, false); . arena_bind(tsd, 0, false); . arena_bind(tsd, 0, true); . } . . return ret; 48 ( 0.00%) } . . void 18 ( 0.00%) iarena_cleanup(tsd_t *tsd) { . arena_t *iarena; . . iarena = tsd_iarena_get(tsd); 6 ( 0.00%) if (iarena != NULL) { . arena_unbind(tsd, arena_ind_get(iarena), true); . } 9 ( 0.00%) } . . void 18 ( 0.00%) arena_cleanup(tsd_t *tsd) { . arena_t *arena; . . arena = tsd_arena_get(tsd); 6 ( 0.00%) if (arena != NULL) { . arena_unbind(tsd, arena_ind_get(arena), false); . } 9 ( 0.00%) } . . void 3 ( 0.00%) arenas_tdata_cleanup(tsd_t *tsd) { . arena_tdata_t *arenas_tdata; . . /* Prevent tsd->arenas_tdata from being (re)created. */ 3 ( 0.00%) *tsd_arenas_tdata_bypassp_get(tsd) = true; . . arenas_tdata = tsd_arenas_tdata_get(tsd); 6 ( 0.00%) if (arenas_tdata != NULL) { . tsd_arenas_tdata_set(tsd, NULL); . a0dalloc(arenas_tdata); . } . } . . static void . stats_print_atexit(void) { . if (config_stats) { -- line 648 ---------------------------------------- -- line 706 ---------------------------------------- . /******************************************************************************/ . /* . * Begin initialization functions. . */ . . static char * . jemalloc_secure_getenv(const char *name) { . #ifdef JEMALLOC_HAVE_SECURE_GETENV 8 ( 0.00%) return secure_getenv(name); . #else . # ifdef JEMALLOC_HAVE_ISSETUGID . if (issetugid() != 0) { . return NULL; . } . # endif . return getenv(name); . #endif -- line 722 ---------------------------------------- -- line 740 ---------------------------------------- . */ . { . cpu_set_t set; . . pthread_getaffinity_np(pthread_self(), sizeof(set), &set); . result = CPU_COUNT(&set); . } . #else 2 ( 0.00%) result = sysconf(_SC_NPROCESSORS_ONLN); . #endif 3 ( 0.00%) return ((result == -1) ? 1 : (unsigned)result); . } . . static void . init_opt_stats_print_opts(const char *v, size_t vlen) { . size_t opts_len = strlen(opt_stats_print_opts); . assert(opts_len <= stats_print_tot_num_options); . . for (size_t i = 0; i < vlen; i++) { -- line 758 ---------------------------------------- -- line 823 ---------------------------------------- . malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, . char const **v_p, size_t *vlen_p) { . bool accept; . const char *opts = *opts_p; . . *k_p = opts; . . for (accept = false; !accept;) { 572 ( 0.00%) switch (*opts) { . case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': . case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': . case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': . case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': . case 'Y': case 'Z': . case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': . case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': . case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': . case 's': case 't': case 'u': case 'v': case 'w': case 'x': . case 'y': case 'z': . case '0': case '1': case '2': case '3': case '4': case '5': . case '6': case '7': case '8': case '9': . case '_': 56 ( 0.00%) opts++; . break; . case ':': 8 ( 0.00%) opts++; 16 ( 0.00%) *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; . *v_p = opts; . accept = true; . break; . case '\0': . if (opts != *opts_p) { . malloc_write(": Conf string ends " . "with key\n"); . } . return true; . default: . malloc_write(": Malformed conf string\n"); . return true; . } . } . 2 ( 0.00%) for (accept = false; !accept;) { 36 ( 0.00%) switch (*opts) { . case ',': 2 ( 0.00%) opts++; . /* . * Look ahead one character here, because the next time . * this function is called, it will assume that end of . * input has been cleanly reached if no input remains, . * but we have optimistically already consumed the . * comma if one exists. . */ 4 ( 0.00%) if (*opts == '\0') { . malloc_write(": Conf string ends " . "with comma\n"); . } 6 ( 0.00%) *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; . accept = true; . break; . case '\0': 6 ( 0.00%) *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; . accept = true; . break; . default: 4 ( 0.00%) opts++; . break; . } . } . . *opts_p = opts; . return false; . } . -- line 896 ---------------------------------------- -- line 917 ---------------------------------------- . } . . static void . malloc_slow_flag_init(void) { . /* . * Combine the runtime options into malloc_slow for fast path. Called . * after processing all the options. . */ 1 ( 0.00%) malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) 2 ( 0.00%) | (opt_junk_free ? flag_opt_junk_free : 0) 4 ( 0.00%) | (opt_zero ? flag_opt_zero : 0) 3 ( 0.00%) | (opt_utrace ? flag_opt_utrace : 0) 3 ( 0.00%) | (opt_xmalloc ? flag_opt_xmalloc : 0); . 1 ( 0.00%) malloc_slow = (malloc_slow_flags != 0); . } . . /* Number of sources for initializing malloc_conf */ . #define MALLOC_CONF_NSOURCES 4 . . static const char * . obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) { . if (config_debug) { -- line 939 ---------------------------------------- -- line 942 ---------------------------------------- . * Each source should only be read once, to minimize # of . * syscalls on init. . */ . assert(read_source++ == which_source); . } . assert(which_source < MALLOC_CONF_NSOURCES); . . const char *ret; 20 ( 0.00%) switch (which_source) { . case 0: . ret = config_malloc_conf; . break; . case 1: 3 ( 0.00%) if (je_malloc_conf != NULL) { . /* Use options that were compiled into the program. */ . ret = je_malloc_conf; . } else { . /* No configuration specified. */ . ret = NULL; . } . break; . case 2: { . ssize_t linklen = 0; . #ifndef _WIN32 3 ( 0.00%) int saved_errno = errno; . const char *linkname = . # ifdef JEMALLOC_PREFIX . "/etc/"JEMALLOC_PREFIX"malloc.conf" . # else . "/etc/malloc.conf" . # endif . ; . . /* . * Try to use the contents of the "/etc/malloc.conf" symbolic . * link's name. . */ . #ifndef JEMALLOC_READLINKAT 4 ( 0.00%) linklen = readlink(linkname, buf, PATH_MAX); . #else . linklen = readlinkat(AT_FDCWD, linkname, buf, PATH_MAX); . #endif 2 ( 0.00%) if (linklen == -1) { . /* No configuration specified. */ . linklen = 0; . /* Restore errno. */ . set_errno(saved_errno); . } . #endif 1 ( 0.00%) buf[linklen] = '\0'; . ret = buf; 2 ( 0.00%) break; . } case 3: { . const char *envname = . #ifdef JEMALLOC_PREFIX . JEMALLOC_CPREFIX"MALLOC_CONF" . #else . "MALLOC_CONF" . #endif . ; -- line 1001 ---------------------------------------- -- line 1015 ---------------------------------------- . ret = NULL; . } . return ret; . } . . static void . malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], . bool initial_call, const char *opts_cache[MALLOC_CONF_NSOURCES], 34 ( 0.00%) char buf[PATH_MAX + 1]) { . static const char *opts_explain[MALLOC_CONF_NSOURCES] = { . "string specified via --with-malloc-conf", . "string pointed to by the global variable malloc_conf", . "\"name\" of the file referenced by the symbolic link named " . "/etc/malloc.conf", . "value of the environment variable MALLOC_CONF" . }; . unsigned i; . const char *opts, *k, *v; . size_t klen, vlen; . 32 ( 0.00%) for (i = 0; i < MALLOC_CONF_NSOURCES; i++) { . /* Get runtime configuration. */ 24 ( 0.00%) if (initial_call) { 12 ( 0.00%) opts_cache[i] = obtain_malloc_conf(i, buf); . } 12 ( 0.00%) opts = opts_cache[i]; 12 ( 0.00%) if (!initial_call && opt_confirm_conf) { . malloc_printf( . ": malloc_conf #%u (%s): \"%s\"\n", . i + 1, opts_explain[i], opts != NULL ? opts : ""); . } 16 ( 0.00%) if (opts == NULL) { . continue; . } . 36 ( 0.00%) while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, . &vlen)) { . . #define CONF_ERROR(msg, k, klen, v, vlen) \ . if (!initial_call) { \ . malloc_conf_error( \ . msg, k, klen, v, vlen); \ . cur_opt_valid = false; \ . } -- line 1058 ---------------------------------------- -- line 1162 ---------------------------------------- . sizeof(o)-1; \ . strncpy(o, v, cpylen); \ . o[cpylen] = '\0'; \ . CONF_CONTINUE; \ . } . . bool cur_opt_valid = true; . 8 ( 0.00%) CONF_HANDLE_BOOL(opt_confirm_conf, "confirm_conf") 8 ( 0.00%) if (initial_call) { . continue; . } . 4 ( 0.00%) CONF_HANDLE_BOOL(opt_abort, "abort") 4 ( 0.00%) CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf") 16 ( 0.00%) if (strncmp("metadata_thp", k, klen) == 0) { . int i; . bool match = false; . for (i = 0; i < metadata_thp_mode_limit; i++) { . if (strncmp(metadata_thp_mode_names[i], . v, vlen) == 0) { . opt_metadata_thp = i; . match = true; . break; -- line 1185 ---------------------------------------- -- line 1186 ---------------------------------------- . } . } . if (!match) { . CONF_ERROR("Invalid conf value", . k, klen, v, vlen); . } . CONF_CONTINUE; . } 4 ( 0.00%) CONF_HANDLE_BOOL(opt_retain, "retain") 16 ( 0.00%) if (strncmp("dss", k, klen) == 0) { . int i; . bool match = false; . for (i = 0; i < dss_prec_limit; i++) { . if (strncmp(dss_prec_names[i], v, vlen) . == 0) { . if (extent_dss_prec_set(i)) { . CONF_ERROR( . "Error setting dss", -- line 1203 ---------------------------------------- -- line 1211 ---------------------------------------- . } . } . if (!match) { . CONF_ERROR("Invalid conf value", . k, klen, v, vlen); . } . CONF_CONTINUE; . } 4 ( 0.00%) CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, . UINT_MAX, CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, . false) 4 ( 0.00%) if (CONF_MATCH("bin_shards")) { . const char *bin_shards_segment_cur = v; . size_t vlen_left = vlen; . do { . size_t size_start; . size_t size_end; . size_t nshards; . bool err = malloc_conf_multi_sizes_next( . &bin_shards_segment_cur, &vlen_left, -- line 1230 ---------------------------------------- -- line 1236 ---------------------------------------- . "Invalid settings for " . "bin_shards", k, klen, v, . vlen); . break; . } . } while (vlen_left > 0); . CONF_CONTINUE; . } 35 ( 0.00%) CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms, . "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < . QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : . SSIZE_MAX); 25 ( 0.00%) CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms, . "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < . QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : . SSIZE_MAX); . CONF_HANDLE_BOOL(opt_stats_print, "stats_print") . if (CONF_MATCH("stats_print_opts")) { . init_opt_stats_print_opts(v, vlen); . CONF_CONTINUE; . } -- line 1256 ---------------------------------------- -- line 1424 ---------------------------------------- . #undef CONF_HANDLE_T_U . #undef CONF_HANDLE_UNSIGNED . #undef CONF_HANDLE_SIZE_T . #undef CONF_HANDLE_SSIZE_T . #undef CONF_HANDLE_CHAR_P . /* Re-enable diagnostic "-Wtype-limits" */ . JEMALLOC_DIAGNOSTIC_POP . } 12 ( 0.00%) if (opt_abort_conf && had_conf_error) { . malloc_abort_invalid_conf(); . } . } . atomic_store_b(&log_init_done, true, ATOMIC_RELEASE); 22 ( 0.00%) } . . static void . malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { 3 ( 0.00%) const char *opts_cache[MALLOC_CONF_NSOURCES] = {NULL, NULL, NULL, NULL}; . char buf[PATH_MAX + 1]; . . /* The first call only set the confirm_conf option and opts_cache */ 7 ( 0.00%) malloc_conf_init_helper(NULL, NULL, true, opts_cache, buf); 6 ( 0.00%) malloc_conf_init_helper(sc_data, bin_shard_sizes, false, opts_cache, . NULL); . } . . #undef MALLOC_CONF_NSOURCES . . static bool . malloc_init_hard_needed(void) { 7 ( 0.00%) if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == . malloc_init_recursible)) { . /* . * Another thread initialized the allocator before this one . * acquired init_lock, or this thread is the initializing . * thread, and it is recursively allocating. . */ . return false; . } . #ifdef JEMALLOC_THREADED_INIT 2 ( 0.00%) if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { . /* Busy-wait until the initializing thread completes. */ . spin_t spinner = SPIN_INITIALIZER; . do { . malloc_mutex_unlock(TSDN_NULL, &init_lock); . spin_adaptive(&spinner); . malloc_mutex_lock(TSDN_NULL, &init_lock); . } while (!malloc_initialized()); . return false; . } . #endif . return true; . } . . static bool 13 ( 0.00%) malloc_init_hard_a0_locked() { 2 ( 0.00%) malloc_initializer = INITIALIZER; . . JEMALLOC_DIAGNOSTIC_PUSH . JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS 827 ( 0.00%) sc_data_t sc_data = {0}; . JEMALLOC_DIAGNOSTIC_POP . . /* . * Ordering here is somewhat tricky; we need sc_boot() first, since that . * determines what the size classes will be, and then . * malloc_conf_init(), since any slab size tweaking will need to be done . * before sz_boot and bin_boot, which assume that the values they read . * out of sc_data_global are final. . */ 2 ( 0.00%) sc_boot(&sc_data); . unsigned bin_shard_sizes[SC_NBINS]; 3 ( 0.00%) bin_shard_sizes_boot(bin_shard_sizes); . /* . * prof_boot0 only initializes opt_prof_prefix. We need to do it before . * we parse malloc_conf options, in case malloc_conf parsing overwrites . * it. . */ . if (config_prof) { . prof_boot0(); . } . malloc_conf_init(&sc_data, bin_shard_sizes); 2 ( 0.00%) sz_boot(&sc_data); 3 ( 0.00%) bin_boot(&sc_data, bin_shard_sizes); . 3 ( 0.00%) if (opt_stats_print) { . /* Print statistics at exit. */ . if (atexit(stats_print_atexit) != 0) { . malloc_write(": Error in atexit()\n"); . if (opt_abort) { . abort(); . } . } . } 3 ( 0.00%) if (pages_boot()) { . return true; . } 4 ( 0.00%) if (base_boot(TSDN_NULL)) { . return true; . } 3 ( 0.00%) if (extent_boot()) { . return true; . } 3 ( 0.00%) if (ctl_boot()) { . return true; . } . if (config_prof) { . prof_boot1(); . } 2 ( 0.00%) arena_boot(&sc_data); 4 ( 0.00%) if (tcache_boot(TSDN_NULL)) { . return true; . } 8 ( 0.00%) if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS, . malloc_mutex_rank_exclusive)) { . return true; . } 2 ( 0.00%) hook_boot(); . /* . * Create enough scaffolding to allow recursive allocation in . * malloc_ncpus(). . */ 1 ( 0.00%) narenas_auto = 1; 1 ( 0.00%) manual_arena_base = narenas_auto + 1; . memset(arenas, 0, sizeof(arena_t *) * narenas_auto); . /* . * Initialize one arena here. The rest are lazily created in . * arena_choose_hard(). . */ 6 ( 0.00%) if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default) . == NULL) { . return true; . } . a0 = arena_get(TSDN_NULL, 0, false); 1 ( 0.00%) malloc_init_state = malloc_init_a0_initialized; . 1 ( 0.00%) return false; 10 ( 0.00%) } . . static bool . malloc_init_hard_a0(void) { . bool ret; . . malloc_mutex_lock(TSDN_NULL, &init_lock); . ret = malloc_init_hard_a0_locked(); . malloc_mutex_unlock(TSDN_NULL, &init_lock); . return ret; . } . . /* Initialize data structures which may trigger recursive allocation. */ . static bool . malloc_init_hard_recursible(void) { 1 ( 0.00%) malloc_init_state = malloc_init_recursible; . 1 ( 0.00%) ncpus = malloc_ncpus(); . . #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ . && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ . !defined(__native_client__)) . /* LinuxThreads' pthread_atfork() allocates. */ 6 ( 0.00%) if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, . jemalloc_postfork_child) != 0) { . malloc_write(": Error in pthread_atfork()\n"); . if (opt_abort) { . abort(); . } . return true; . } . #endif . 3 ( 0.00%) if (background_thread_boot0()) { . return true; . } . . return false; . } . . static unsigned . malloc_narenas_default(void) { . assert(ncpus > 0); . /* . * For SMP systems, create more than one arena per CPU by . * default. . */ 3 ( 0.00%) if (ncpus > 1) { 1 ( 0.00%) return ncpus << 2; . } else { . return 1; . } . } . . static percpu_arena_mode_t . percpu_arena_as_initialized(percpu_arena_mode_t mode) { . assert(!malloc_initialized()); . assert(mode <= percpu_arena_disabled); . . if (mode != percpu_arena_disabled) { 3 ( 0.00%) mode += percpu_arena_mode_enabled_base; . } . . return mode; . } . . static bool . malloc_init_narenas(void) { . assert(ncpus > 0); . 3 ( 0.00%) if (opt_percpu_arena != percpu_arena_disabled) { . if (!have_percpu_arena || malloc_getcpu() < 0) { . opt_percpu_arena = percpu_arena_disabled; . malloc_printf(": perCPU arena getcpu() not " . "available. Setting narenas to %u.\n", opt_narenas ? . opt_narenas : malloc_narenas_default()); . if (opt_abort) { . abort(); . } -- line 1639 ---------------------------------------- -- line 1672 ---------------------------------------- . * of affinity setting from numactl), reserving . * narenas this way provides a workaround for . * percpu_arena. . */ . opt_narenas = n; . } . } . } 3 ( 0.00%) if (opt_narenas == 0) { 1 ( 0.00%) opt_narenas = malloc_narenas_default(); . } . assert(opt_narenas > 0); . 1 ( 0.00%) narenas_auto = opt_narenas; . /* . * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). . */ 2 ( 0.00%) if (narenas_auto >= MALLOCX_ARENA_LIMIT) { . narenas_auto = MALLOCX_ARENA_LIMIT - 1; . malloc_printf(": Reducing narenas to limit (%d)\n", . narenas_auto); . } . narenas_total_set(narenas_auto); 3 ( 0.00%) if (arena_init_huge()) { . narenas_total_inc(); . } 1 ( 0.00%) manual_arena_base = narenas_total_get(); . . return false; . } . . static void . malloc_init_percpu(void) { 2 ( 0.00%) opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena); . } . . static bool . malloc_init_hard_finish(void) { 4 ( 0.00%) if (malloc_mutex_boot()) { . return true; . } . 1 ( 0.00%) malloc_init_state = malloc_init_initialized; . malloc_slow_flag_init(); . . return false; . } . . static void . malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) { . malloc_mutex_assert_owner(tsdn, &init_lock); -- line 1722 ---------------------------------------- -- line 1725 ---------------------------------------- . assert(!tsdn_null(tsdn)); . tsd_t *tsd = tsdn_tsd(tsdn); . assert(tsd_reentrancy_level_get(tsd) > 0); . post_reentrancy(tsd); . } . } . . static bool 7 ( 0.00%) malloc_init_hard(void) { . tsd_t *tsd; . . #if defined(_WIN32) && _WIN32_WINNT < 0x0600 . _init_init_lock(); . #endif . malloc_mutex_lock(TSDN_NULL, &init_lock); . . #define UNLOCK_RETURN(tsdn, ret, reentrancy) \ . malloc_init_hard_cleanup(tsdn, reentrancy); \ . return ret; . . if (!malloc_init_hard_needed()) { . UNLOCK_RETURN(TSDN_NULL, false, false) . } . 4 ( 0.00%) if (malloc_init_state != malloc_init_a0_initialized && 3 ( 0.00%) malloc_init_hard_a0_locked()) { . UNLOCK_RETURN(TSDN_NULL, true, false) . } . . malloc_mutex_unlock(TSDN_NULL, &init_lock); . /* Recursive allocation relies on functional tsd. */ 2 ( 0.00%) tsd = malloc_tsd_boot0(); 2 ( 0.00%) if (tsd == NULL) { . return true; . } . if (malloc_init_hard_recursible()) { . return true; . } . . malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); . /* Set reentrancy level to 1 during init. */ . pre_reentrancy(tsd, NULL); . /* Initialize narenas before prof_boot2 (for allocation). */ 4 ( 0.00%) if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) { . UNLOCK_RETURN(tsd_tsdn(tsd), true, true) . } . if (config_prof && prof_boot2(tsd)) { . UNLOCK_RETURN(tsd_tsdn(tsd), true, true) . } . . malloc_init_percpu(); . -- line 1776 ---------------------------------------- -- line 1777 ---------------------------------------- . if (malloc_init_hard_finish()) { . UNLOCK_RETURN(tsd_tsdn(tsd), true, true) . } . post_reentrancy(tsd); . malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); . . witness_assert_lockless(witness_tsd_tsdn( . tsd_witness_tsdp_get_unsafe(tsd))); 1 ( 0.00%) malloc_tsd_boot1(); . /* Update TSD after tsd_boot1. */ . tsd = tsd_fetch(); 4 ( 0.00%) if (opt_background_thread) { . assert(have_background_thread); . /* . * Need to finish init & unlock first before creating background . * threads (pthread_create depends on malloc). ctl_init (which . * sets isthreaded) needs to be called without holding any lock. . */ . background_thread_ctl_init(tsd_tsdn(tsd)); . if (background_thread_create(tsd, 0)) { . return true; . } . } . #undef UNLOCK_RETURN . return false; 8 ( 0.00%) } . . /* . * End initialization functions. . */ . /******************************************************************************/ . /* . * Begin allocation-path internal functions and data structures. . */ -- line 1810 ---------------------------------------- -- line 1931 ---------------------------------------- . . /* Fill in the arena. */ . if (dopts->arena_ind == ARENA_IND_AUTOMATIC) { . /* . * In case of automatic arena management, we defer arena . * computation until as late as we can, hoping to fill the . * allocation out of the tcache. . */ 2 ( 0.00%) arena = NULL; . } else { . arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true); . } . . if (unlikely(dopts->alignment != 0)) { . return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment, . dopts->zero, tcache, arena); . } -- line 1947 ---------------------------------------- -- line 1998 ---------------------------------------- . assert(dopts->num_items == 1); . *size = dopts->item_size; . return false; . } . . /* A size_t with its high-half bits all set to 1. */ . static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2); . 3,896,250 ( 0.08%) *size = dopts->item_size * dopts->num_items; . 1,558,500 ( 0.03%) if (unlikely(*size == 0)) { . return (dopts->num_items != 0 && dopts->item_size != 0); . } . . /* . * We got a non-zero size, but we don't know if we overflowed to get . * there. To avoid having to do a divide, we'll be clever and note that . * if both A and B can be represented in N/2 bits, then their product . * can be represented in N bits (without the possibility of overflow). . */ 3,117,000 ( 0.06%) if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) { . return false; . } 1,558,500 ( 0.03%) if (likely(*size / dopts->item_size == dopts->num_items)) { . return false; . } . return true; . } . . JEMALLOC_ALWAYS_INLINE int . imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { . /* Where the actual allocated memory will live. */ -- line 2029 ---------------------------------------- -- line 2055 ---------------------------------------- . || (dopts->alignment & (dopts->alignment - 1)) != 0)) { . goto label_invalid_alignment; . } . . /* This is the beginning of the "core" algorithm. */ . . if (dopts->alignment == 0) { . ind = sz_size2index(size); 1,610,764 ( 0.03%) if (unlikely(ind >= SC_NSIZES)) { . goto label_oom; . } . if (config_stats || (config_prof && opt_prof) || sopts->usize) { . usize = sz_index2size(ind); . dopts->usize = usize; . assert(usize > 0 && usize . <= SC_LARGE_MAXCLASS); . } -- line 2071 ---------------------------------------- -- line 2089 ---------------------------------------- . . check_entry_exit_locking(tsd_tsdn(tsd)); . . /* . * If we need to handle reentrancy, we can do it out of a . * known-initialized arena (i.e. arena 0). . */ . reentrancy_level = tsd_reentrancy_level_get(tsd); 2 ( 0.00%) if (sopts->slow && unlikely(reentrancy_level > 0)) { . /* . * We should never specify particular arenas or tcaches from . * within our internal allocations. . */ . assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC || . dopts->tcache_ind == TCACHE_IND_NONE); . assert(dopts->arena_ind == ARENA_IND_AUTOMATIC); . dopts->tcache_ind = TCACHE_IND_NONE; -- line 2105 ---------------------------------------- -- line 2143 ---------------------------------------- . /* . * If dopts->alignment > 0, then ind is still 0, but usize was . * computed in the previous if statement. Down the positive . * alignment path, imalloc_no_sample ignores ind and size . * (relying only on usize). . */ . allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize, . ind); 52,078 ( 0.00%) if (unlikely(allocation == NULL)) { . goto label_oom; . } . } . . /* . * Allocation has been done at this point. We still have some . * post-allocation work to do though. . */ . assert(dopts->alignment == 0 . || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0)); . . if (config_stats) { . assert(usize == isalloc(tsd_tsdn(tsd), allocation)); 805,382 ( 0.02%) *tsd_thread_allocatedp_get(tsd) += usize; . } . . if (sopts->slow) { . UTRACE(0, size, allocation); . } . . /* Success! */ . check_entry_exit_locking(tsd_tsdn(tsd)); . *dopts->result = allocation; 12 ( 0.00%) return 0; . . label_oom: . if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) { . malloc_write(sopts->oom_string); . abort(); . } . . if (sopts->slow) { -- line 2183 ---------------------------------------- -- line 2221 ---------------------------------------- . *dopts->result = NULL; . } . . return EINVAL; . } . . JEMALLOC_ALWAYS_INLINE bool . imalloc_init_check(static_opts_t *sopts, dynamic_opts_t *dopts) { 3 ( 0.00%) if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) { . if (config_xmalloc && unlikely(opt_xmalloc)) { . malloc_write(sopts->oom_string); . abort(); . } . UTRACE(NULL, dopts->num_items * dopts->item_size, NULL); . set_errno(ENOMEM); . *dopts->result = NULL; . -- line 2237 ---------------------------------------- -- line 2246 ---------------------------------------- . imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { . if (tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) { . return ENOMEM; . } . . /* We always need the tsd. Let's grab it right away. */ . tsd_t *tsd = tsd_fetch(); . assert(tsd); 8 ( 0.00%) if (likely(tsd_fast(tsd))) { . /* Fast and common path. */ . tsd_assert_fast(tsd); . sopts->slow = false; . return imalloc_body(sopts, dopts, tsd); . } else { . if (!tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) { . return ENOMEM; . } -- line 2262 ---------------------------------------- -- line 2263 ---------------------------------------- . . sopts->slow = true; . return imalloc_body(sopts, dopts, tsd); . } . } . . JEMALLOC_NOINLINE . void * 261,200 ( 0.01%) malloc_default(size_t size) { . void *ret; . static_opts_t sopts; . dynamic_opts_t dopts; . . LOG("core.malloc.entry", "size: %zu", size); . . static_opts_init(&sopts); . dynamic_opts_init(&dopts); -- line 2279 ---------------------------------------- -- line 2287 ---------------------------------------- . dopts.item_size = size; . . imalloc(&sopts, &dopts); . /* . * Note that this branch gets optimized away -- it immediately follows . * the check on tsd_fast that sets sopts.slow. . */ . if (sopts.slow) { 3 ( 0.00%) uintptr_t args[3] = {size}; 8 ( 0.00%) hook_invoke_alloc(hook_alloc_malloc, ret, (uintptr_t)ret, args); . } . . LOG("core.malloc.exit", "result: %p", ret); . . return ret; 313,440 ( 0.01%) } . . /******************************************************************************/ . /* . * Begin malloc(3)-compatible functions. . */ . . /* . * malloc() fastpath. -- line 2310 ---------------------------------------- -- line 2315 ---------------------------------------- . * registers. . * . * fastpath supports ticker and profiling, both of which will also . * tail-call to the slowpath if they fire. . */ . JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN . void JEMALLOC_NOTHROW * . JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 3,536,444 ( 0.07%) je_malloc(size_t size) { . LOG("core.malloc.entry", "size: %zu", size); . . if (tsd_get_allocates() && unlikely(!malloc_initialized())) { . return malloc_default(size); . } . . tsd_t *tsd = tsd_get(false); 14,145,776 ( 0.28%) if (unlikely(!tsd || !tsd_fast(tsd) || (size > SC_LOOKUP_MAXCLASS))) { . return malloc_default(size); . } . . tcache_t *tcache = tsd_tcachep_get(tsd); . . if (unlikely(ticker_trytick(&tcache->gc_ticker))) { . return malloc_default(size); . } -- line 2339 ---------------------------------------- -- line 2367 ---------------------------------------- . } . . cache_bin_t *bin = tcache_small_bin_get(tcache, ind); . bool tcache_success; . void* ret = cache_bin_alloc_easy(bin, &tcache_success); . . if (tcache_success) { . if (config_stats) { 3,510,324 ( 0.07%) *tsd_thread_allocatedp_get(tsd) += usize; 3,510,324 ( 0.07%) bin->tstats.nrequests++; . } . if (config_prof) { . tcache->prof_accumbytes += usize; . } . . LOG("core.malloc.exit", "result: %p", ret); . . /* Fastpath success */ . return ret; . } . 26,120 ( 0.00%) return malloc_default(size); 3,510,324 ( 0.07%) } . . JEMALLOC_EXPORT int JEMALLOC_NOTHROW . JEMALLOC_ATTR(nonnull(1)) . je_posix_memalign(void **memptr, size_t alignment, size_t size) { . int ret; . static_opts_t sopts; . dynamic_opts_t dopts; . -- line 2397 ---------------------------------------- -- line 2466 ---------------------------------------- . LOG("core.aligned_alloc.exit", "result: %p", ret); . . return ret; . } . . JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN . void JEMALLOC_NOTHROW * . JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) 13,247,250 ( 0.26%) je_calloc(size_t num, size_t size) { . void *ret; . static_opts_t sopts; . dynamic_opts_t dopts; . . LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size); . . static_opts_init(&sopts); . dynamic_opts_init(&dopts); -- line 2482 ---------------------------------------- -- line 2495 ---------------------------------------- . if (sopts.slow) { . uintptr_t args[3] = {(uintptr_t)num, (uintptr_t)size}; . hook_invoke_alloc(hook_alloc_calloc, ret, (uintptr_t)ret, args); . } . . LOG("core.calloc.exit", "result: %p", ret); . . return ret; 9,351,000 ( 0.19%) } . . static void * . irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, . prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) { . void *p; . . if (tctx == NULL) { . return NULL; -- line 2511 ---------------------------------------- -- line 2574 ---------------------------------------- . size_t usize; . if (config_prof && opt_prof) { . usize = sz_index2size(alloc_ctx.szind); . prof_free(tsd, ptr, usize, &alloc_ctx); . } else if (config_stats) { . usize = sz_index2size(alloc_ctx.szind); . } . if (config_stats) { 87 ( 0.00%) *tsd_thread_deallocatedp_get(tsd) += usize; . } . . if (likely(!slow_path)) { . idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, . false); . } else { . idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, . true); -- line 2590 ---------------------------------------- -- line 2632 ---------------------------------------- . } else { . ctx = NULL; . } . . if (config_prof && opt_prof) { . prof_free(tsd, ptr, usize, ctx); . } . if (config_stats) { 23,425 ( 0.00%) *tsd_thread_deallocatedp_get(tsd) += usize; . } . . if (likely(!slow_path)) { . isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false); . } else { . isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true); . } . } . . JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN . void JEMALLOC_NOTHROW * . JEMALLOC_ALLOC_SIZE(2) 5,605,572 ( 0.11%) je_realloc(void *ptr, size_t arg_size) { . void *ret; . tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); . size_t usize JEMALLOC_CC_SILENCE_INIT(0); . size_t old_usize = 0; . size_t size = arg_size; . . LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); . 800,796 ( 0.02%) if (unlikely(size == 0)) { . if (ptr != NULL) { . /* realloc(ptr, 0) is equivalent to free(ptr). */ . UTRACE(ptr, 0, 0); . tcache_t *tcache; . tsd_t *tsd = tsd_fetch(); . if (tsd_reentrancy_level_get(tsd) == 0) { . tcache = tcache_get(tsd); . } else { -- line 2670 ---------------------------------------- -- line 2677 ---------------------------------------- . ifree(tsd, ptr, tcache, true); . . LOG("core.realloc.exit", "result: %p", NULL); . return NULL; . } . size = 1; . } . 800,796 ( 0.02%) if (likely(ptr != NULL)) { . assert(malloc_initialized() || IS_INITIALIZER); . tsd_t *tsd = tsd_fetch(); . . check_entry_exit_locking(tsd_tsdn(tsd)); . . 3,203,088 ( 0.06%) hook_ralloc_args_t hook_args = {true, {(uintptr_t)ptr, . (uintptr_t)arg_size, 0, 0}}; . . alloc_ctx_t alloc_ctx; . rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); . rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, . (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); . assert(alloc_ctx.szind != SC_NSIZES); . old_usize = sz_index2size(alloc_ctx.szind); -- line 2700 ---------------------------------------- -- line 2738 ---------------------------------------- . uintptr_t args[3] = {(uintptr_t)ptr, arg_size}; . hook_invoke_alloc(hook_alloc_realloc, ret, . (uintptr_t)ret, args); . } . . return ret; . } . 1,201,158 ( 0.02%) if (unlikely(ret == NULL)) { . if (config_xmalloc && unlikely(opt_xmalloc)) { . malloc_write(": Error in realloc(): " . "out of memory\n"); . abort(); . } . set_errno(ENOMEM); . } . if (config_stats && likely(ret != NULL)) { . tsd_t *tsd; . . assert(usize == isalloc(tsdn, ret)); . tsd = tsdn_tsd(tsdn); 1,201,158 ( 0.02%) *tsd_thread_allocatedp_get(tsd) += usize; . *tsd_thread_deallocatedp_get(tsd) += old_usize; . } . UTRACE(ptr, size, ret); . check_entry_exit_locking(tsdn); . . LOG("core.realloc.exit", "result: %p", ret); . return ret; 4,804,776 ( 0.10%) } . . JEMALLOC_NOINLINE . void 456 ( 0.00%) free_default(void *ptr) { . UTRACE(ptr, 0, 0); 76 ( 0.00%) if (likely(ptr != NULL)) { . /* . * We avoid setting up tsd fully (e.g. tcache, arena binding) . * based on only free() calls -- other activities trigger the . * minimal to full transition. This is because free() may . * happen during thread shutdown after tls deallocation: if a . * thread never had any malloc activities until then, a . * fully-setup tsd won't be destructed properly. . */ -- line 2781 ---------------------------------------- -- line 2795 ---------------------------------------- . tcache = NULL; . } . uintptr_t args_raw[3] = {(uintptr_t)ptr}; . hook_invoke_dalloc(hook_dalloc_free, ptr, args_raw); . ifree(tsd, ptr, tcache, true); . } . check_entry_exit_locking(tsd_tsdn(tsd)); . } 370 ( 0.00%) } . . JEMALLOC_ALWAYS_INLINE . bool free_fastpath(void *ptr, size_t size, bool size_hint) { . tsd_t *tsd = tsd_get(false); 8,625,786 ( 0.17%) if (unlikely(!tsd || !tsd_fast(tsd))) { . return false; . } . . tcache_t *tcache = tsd_tcachep_get(tsd); . . alloc_ctx_t alloc_ctx; . /* . * If !config_cache_oblivious, we can check PAGE alignment to -- line 2816 ---------------------------------------- -- line 2820 ---------------------------------------- . */ . if (!size_hint || config_cache_oblivious) { . rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); . bool res = rtree_szind_slab_read_fast(tsd_tsdn(tsd), &extents_rtree, . rtree_ctx, (uintptr_t)ptr, . &alloc_ctx.szind, &alloc_ctx.slab); . . /* Note: profiled objects will have alloc_ctx.slab set */ 8,625,774 ( 0.17%) if (!res || !alloc_ctx.slab) { . return false; . } . assert(alloc_ctx.szind != SC_NSIZES); . } else { . /* . * Check for both sizes that are too large, and for sampled objects. . * Sampled objects are always page-aligned. The sampled object check . * will also check for null ptr. -- line 2836 ---------------------------------------- -- line 2848 ---------------------------------------- . cache_bin_t *bin = tcache_small_bin_get(tcache, alloc_ctx.szind); . cache_bin_info_t *bin_info = &tcache_bin_info[alloc_ctx.szind]; . if (!cache_bin_dalloc_easy(bin, bin_info, ptr)) { . return false; . } . . if (config_stats) { . size_t usize = sz_index2size(alloc_ctx.szind); 12,868,290 ( 0.26%) *tsd_thread_deallocatedp_get(tsd) += usize; . } . . return true; . } . . JEMALLOC_EXPORT void JEMALLOC_NOTHROW 1,923 ( 0.00%) je_free(void *ptr) { . LOG("core.free.entry", "ptr: %p", ptr); . . if (!free_fastpath(ptr, 0, false)) { 38 ( 0.00%) free_default(ptr); . } . . LOG("core.free.exit", ""); 1,885 ( 0.00%) } . . /* . * End malloc(3)-compatible functions. . */ . /******************************************************************************/ . /* . * Begin non-standard override functions. . */ -- line 2879 ---------------------------------------- -- line 3530 ---------------------------------------- . LOG("core.dallocx.exit", ""); . } . . JEMALLOC_ALWAYS_INLINE size_t . inallocx(tsdn_t *tsdn, size_t size, int flags) { . check_entry_exit_locking(tsdn); . . size_t usize; 70,275 ( 0.00%) if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) { . usize = sz_s2u(size); . } else { . usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); . } . check_entry_exit_locking(tsdn); . return usize; . } . . JEMALLOC_NOINLINE void 351,375 ( 0.01%) sdallocx_default(void *ptr, size_t size, int flags) { . assert(ptr != NULL); . assert(malloc_initialized() || IS_INITIALIZER); . . tsd_t *tsd = tsd_fetch(); . bool fast = tsd_fast(tsd); . size_t usize = inallocx(tsd_tsdn(tsd), size, flags); . assert(usize == isalloc(tsd_tsdn(tsd), ptr)); . check_entry_exit_locking(tsd_tsdn(tsd)); . . tcache_t *tcache; 70,275 ( 0.00%) if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { . /* Not allowed to be reentrant and specify a custom tcache. */ . assert(tsd_reentrancy_level_get(tsd) == 0); . if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { . tcache = NULL; . } else { . tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); . } . } else { 46,850 ( 0.00%) if (likely(fast)) { . tcache = tsd_tcachep_get(tsd); . assert(tcache == tcache_get(tsd)); . } else { . if (likely(tsd_reentrancy_level_get(tsd) == 0)) { . tcache = tcache_get(tsd); . } else { . tcache = NULL; . } -- line 3576 ---------------------------------------- -- line 3583 ---------------------------------------- . isfree(tsd, ptr, usize, tcache, false); . } else { . uintptr_t args_raw[3] = {(uintptr_t)ptr, size, flags}; . hook_invoke_dalloc(hook_dalloc_sdallocx, ptr, args_raw); . isfree(tsd, ptr, usize, tcache, true); . } . check_entry_exit_locking(tsd_tsdn(tsd)); . 180,991 ( 0.00%) } . . JEMALLOC_EXPORT void JEMALLOC_NOTHROW 4,310,970 ( 0.09%) je_sdallocx(void *ptr, size_t size, int flags) { . LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, . size, flags); . 8,621,940 ( 0.17%) if (flags !=0 || !free_fastpath(ptr, size, true)) { 23,425 ( 0.00%) sdallocx_default(ptr, size, flags); . } . . LOG("core.sdallocx.exit", ""); 4,287,545 ( 0.09%) } . . void JEMALLOC_NOTHROW . je_sdallocx_noflags(void *ptr, size_t size) { . LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: 0", ptr, . size); . . if (!free_fastpath(ptr, size, true)) { . sdallocx_default(ptr, size, 0); -- line 3611 ---------------------------------------- -- line 3766 ---------------------------------------- . * the allocator isn't fully initialized at fork time. The following library . * constructor is a partial solution to this problem. It may still be possible . * to trigger the deadlock described above, but doing so would involve forking . * via a library constructor that runs before jemalloc's runs. . */ . #ifndef JEMALLOC_JET . JEMALLOC_ATTR(constructor) . static void 1 ( 0.00%) jemalloc_constructor(void) { . malloc_init(); 1 ( 0.00%) } . #endif . . #ifndef JEMALLOC_MUTEX_INIT_CB . void . jemalloc_prefork(void) . #else . JEMALLOC_EXPORT void . _malloc_prefork(void) -- line 3784 ----------------------------------------