Skip to content

Reduce the overhead from malloc usable #384

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 15 commits into
base: unstable
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,6 @@ void jemalloc_prefork(void);
void jemalloc_postfork_parent(void);
void jemalloc_postfork_child(void);
void je_sdallocx_noflags(void *ptr, size_t size);
void *malloc_default(size_t size);
void *malloc_default(size_t size, size_t *usize);

#endif /* JEMALLOC_INTERNAL_EXTERNS_H */
Original file line number Diff line number Diff line change
Expand Up @@ -255,15 +255,15 @@ malloc_initialized(void) {
* tail-call to the slowpath if they fire.
*/
JEMALLOC_ALWAYS_INLINE void *
imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) {
imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t, size_t *), size_t *usable_size) {
LOG("core.malloc.entry", "size: %zu", size);
if (tsd_get_allocates() && unlikely(!malloc_initialized())) {
return fallback_alloc(size);
return fallback_alloc(size, usable_size);
}

tsd_t *tsd = tsd_get(false);
if (unlikely((size > SC_LOOKUP_MAXCLASS) || tsd == NULL)) {
return fallback_alloc(size);
return fallback_alloc(size, usable_size);
}
/*
* The code below till the branch checking the next_event threshold may
Expand Down Expand Up @@ -307,7 +307,7 @@ imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) {
* 0) in a single branch.
*/
if (unlikely(allocated_after >= threshold)) {
return fallback_alloc(size);
return fallback_alloc(size, usable_size);
}
assert(tsd_fast(tsd));

Expand All @@ -326,15 +326,17 @@ imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) {
ret = cache_bin_alloc_easy(bin, &tcache_success);
if (tcache_success) {
fastpath_success_finish(tsd, allocated_after, bin, ret);
if (usable_size) *usable_size = usize;
return ret;
}
ret = cache_bin_alloc(bin, &tcache_success);
if (tcache_success) {
fastpath_success_finish(tsd, allocated_after, bin, ret);
if (usable_size) *usable_size = usize;
return ret;
}

return fallback_alloc(size);
return fallback_alloc(size, usable_size);
}

JEMALLOC_ALWAYS_INLINE int
Expand Down
3 changes: 3 additions & 0 deletions deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
Original file line number Diff line number Diff line change
Expand Up @@ -151,3 +151,6 @@
/* This version of Jemalloc, modified for Redis, has the je_get_defrag_hint()
* function. */
#define JEMALLOC_FRAG_HINT

/* This version of Jemalloc, modified for Redis, has the je_*_usable() family functions. */
#define JEMALLOC_ALLOC_WITH_USIZE
114 changes: 84 additions & 30 deletions deps/jemalloc/src/jemalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -2697,7 +2697,7 @@ imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {

JEMALLOC_NOINLINE
void *
malloc_default(size_t size) {
malloc_default(size_t size, size_t *usize) {
void *ret;
static_opts_t sopts;
dynamic_opts_t dopts;
Expand Down Expand Up @@ -2731,6 +2731,7 @@ malloc_default(size_t size) {

LOG("core.malloc.exit", "result: %p", ret);

if (usize) *usize = dopts.usize;
return ret;
}

Expand All @@ -2739,11 +2740,15 @@ malloc_default(size_t size) {
* Begin malloc(3)-compatible functions.
*/

static inline void *je_malloc_internal(size_t size, size_t *usize) {
return imalloc_fastpath(size, &malloc_default, usize);
}

JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
je_malloc(size_t size) {
return imalloc_fastpath(size, &malloc_default);
return je_malloc_internal(size, NULL);
}

JEMALLOC_EXPORT int JEMALLOC_NOTHROW
Expand Down Expand Up @@ -2826,10 +2831,7 @@ je_aligned_alloc(size_t alignment, size_t size) {
return ret;
}

JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
je_calloc(size_t num, size_t size) {
static void *je_calloc_internal(size_t num, size_t size, size_t *usize) {
void *ret;
static_opts_t sopts;
dynamic_opts_t dopts;
Expand Down Expand Up @@ -2857,11 +2859,19 @@ je_calloc(size_t num, size_t size) {

LOG("core.calloc.exit", "result: %p", ret);

if (usize) *usize = dopts.usize;
return ret;
}

JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
je_calloc(size_t num, size_t size) {
return je_calloc_internal(num, size, NULL);
}

JEMALLOC_ALWAYS_INLINE void
ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path, size_t *usable) {
if (!slow_path) {
tsd_assert_fast(tsd);
}
Expand Down Expand Up @@ -2894,6 +2904,7 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
true);
}
thread_dalloc_event(tsd, usize);
if (usable) *usable = usize;
}

JEMALLOC_ALWAYS_INLINE bool
Expand Down Expand Up @@ -2993,7 +3004,7 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {

JEMALLOC_NOINLINE
void
free_default(void *ptr) {
free_default(void *ptr, size_t *usize) {
UTRACE(ptr, 0, 0);
if (likely(ptr != NULL)) {
/*
Expand All @@ -3011,14 +3022,14 @@ free_default(void *ptr) {
tcache_t *tcache = tcache_get_from_ind(tsd,
TCACHE_IND_AUTOMATIC, /* slow */ false,
/* is_alloc */ false);
ifree(tsd, ptr, tcache, /* slow */ false);
ifree(tsd, ptr, tcache, /* slow */ false, usize);
} else {
tcache_t *tcache = tcache_get_from_ind(tsd,
TCACHE_IND_AUTOMATIC, /* slow */ true,
/* is_alloc */ false);
uintptr_t args_raw[3] = {(uintptr_t)ptr};
hook_invoke_dalloc(hook_dalloc_free, ptr, args_raw);
ifree(tsd, ptr, tcache, /* slow */ true);
ifree(tsd, ptr, tcache, /* slow */ true, usize);
}

check_entry_exit_locking(tsd_tsdn(tsd));
Expand Down Expand Up @@ -3062,7 +3073,7 @@ free_fastpath_nonfast_aligned(void *ptr, bool check_prof) {

/* Returns whether or not the free attempt was successful. */
JEMALLOC_ALWAYS_INLINE
bool free_fastpath(void *ptr, size_t size, bool size_hint) {
bool free_fastpath(void *ptr, size_t size, bool size_hint, size_t *usable_size) {
tsd_t *tsd = tsd_get(false);
/* The branch gets optimized away unless tsd_get_allocates(). */
if (unlikely(tsd == NULL)) {
Expand Down Expand Up @@ -3131,6 +3142,7 @@ bool free_fastpath(void *ptr, size_t size, bool size_hint) {
bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx);
if (fail) {
/* See the comment in isfree. */
if (usable_size) *usable_size = usize;
return true;
}

Expand All @@ -3151,20 +3163,25 @@ bool free_fastpath(void *ptr, size_t size, bool size_hint) {

*tsd_thread_deallocatedp_get(tsd) = deallocated_after;

if (usable_size) *usable_size = usize;
return true;
}

JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_free(void *ptr) {
static inline void je_free_internal(void *ptr, size_t *usize) {
LOG("core.free.entry", "ptr: %p", ptr);

if (!free_fastpath(ptr, 0, false)) {
free_default(ptr);
if (!free_fastpath(ptr, 0, false, usize)) {
free_default(ptr, usize);
}

LOG("core.free.exit", "");
}

JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_free(void *ptr) {
je_free_internal(ptr, NULL);
}

/*
* End malloc(3)-compatible functions.
*/
Expand Down Expand Up @@ -3490,7 +3507,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
}

static void *
do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) {
do_rallocx(void *ptr, size_t size, int flags, bool is_realloc, size_t *old_usable_size, size_t *new_usable_size) {
void *p;
tsd_t *tsd;
size_t usize;
Expand Down Expand Up @@ -3555,6 +3572,8 @@ do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) {
junk_alloc_callback(excess_start, excess_len);
}

if (old_usable_size) *old_usable_size = old_usize;
if (new_usable_size) *new_usable_size = usize;
return p;
label_oom:
if (config_xmalloc && unlikely(opt_xmalloc)) {
Expand All @@ -3573,13 +3592,13 @@ JEMALLOC_ALLOC_SIZE(2)
je_rallocx(void *ptr, size_t size, int flags) {
LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
size, flags);
void *ret = do_rallocx(ptr, size, flags, false);
void *ret = do_rallocx(ptr, size, flags, false, NULL, NULL);
LOG("core.rallocx.exit", "result: %p", ret);
return ret;
}

static void *
do_realloc_nonnull_zero(void *ptr) {
do_realloc_nonnull_zero(void *ptr, size_t *old_usize, size_t *new_usize) {
if (config_stats) {
atomic_fetch_add_zu(&zero_realloc_count, 1, ATOMIC_RELAXED);
}
Expand All @@ -3590,7 +3609,7 @@ do_realloc_nonnull_zero(void *ptr) {
* reduce the harm, and turn off the tcache while allocating, so
* that we'll get a true first fit.
*/
return do_rallocx(ptr, 1, MALLOCX_TCACHE_NONE, true);
return do_rallocx(ptr, 1, MALLOCX_TCACHE_NONE, true, old_usize, new_usize);
} else if (opt_zero_realloc_action == zero_realloc_action_free) {
UTRACE(ptr, 0, 0);
tsd_t *tsd = tsd_fetch();
Expand All @@ -3601,7 +3620,10 @@ do_realloc_nonnull_zero(void *ptr) {
/* is_alloc */ false);
uintptr_t args[3] = {(uintptr_t)ptr, 0};
hook_invoke_dalloc(hook_dalloc_realloc, ptr, args);
ifree(tsd, ptr, tcache, true);
size_t usize;
ifree(tsd, ptr, tcache, true, &usize);
if (old_usize) *old_usize = usize;
if (new_usize) *new_usize = 0;

check_entry_exit_locking(tsd_tsdn(tsd));
return NULL;
Expand All @@ -3617,18 +3639,15 @@ do_realloc_nonnull_zero(void *ptr) {
}
}

JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ALLOC_SIZE(2)
je_realloc(void *ptr, size_t size) {
static inline void *je_realloc_internal(void *ptr, size_t size, size_t *old_usize, size_t *new_usize) {
LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size);

if (likely(ptr != NULL && size != 0)) {
void *ret = do_rallocx(ptr, size, 0, true);
void *ret = do_rallocx(ptr, size, 0, true, old_usize, new_usize);
LOG("core.realloc.exit", "result: %p", ret);
return ret;
} else if (ptr != NULL && size == 0) {
void *ret = do_realloc_nonnull_zero(ptr);
void *ret = do_realloc_nonnull_zero(ptr, old_usize, new_usize);
LOG("core.realloc.exit", "result: %p", ret);
return ret;
} else {
Expand Down Expand Up @@ -3657,10 +3676,19 @@ je_realloc(void *ptr, size_t size) {
(uintptr_t)ret, args);
}
LOG("core.realloc.exit", "result: %p", ret);
if (old_usize) *old_usize = 0;
if (new_usize) *new_usize = dopts.usize;
return ret;
}
}

JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ALLOC_SIZE(2)
je_realloc(void *ptr, size_t size) {
return je_realloc_internal(ptr, size, NULL, NULL);
}

JEMALLOC_ALWAYS_INLINE size_t
ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
size_t extra, size_t alignment, bool zero) {
Expand Down Expand Up @@ -3883,11 +3911,11 @@ je_dallocx(void *ptr, int flags) {
UTRACE(ptr, 0, 0);
if (likely(fast)) {
tsd_assert_fast(tsd);
ifree(tsd, ptr, tcache, false);
ifree(tsd, ptr, tcache, false, NULL);
} else {
uintptr_t args_raw[3] = {(uintptr_t)ptr, flags};
hook_invoke_dalloc(hook_dalloc_dallocx, ptr, args_raw);
ifree(tsd, ptr, tcache, true);
ifree(tsd, ptr, tcache, true, NULL);
}
check_entry_exit_locking(tsd_tsdn(tsd));

Expand Down Expand Up @@ -3935,7 +3963,7 @@ je_sdallocx(void *ptr, size_t size, int flags) {
LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
size, flags);

if (flags != 0 || !free_fastpath(ptr, size, true)) {
if (flags != 0 || !free_fastpath(ptr, size, true, NULL)) {
sdallocx_default(ptr, size, flags);
}

Expand All @@ -3947,7 +3975,7 @@ je_sdallocx_noflags(void *ptr, size_t size) {
LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: 0", ptr,
size);

if (!free_fastpath(ptr, size, true)) {
if (!free_fastpath(ptr, size, true, NULL)) {
sdallocx_default(ptr, size, 0);
}

Expand Down Expand Up @@ -4483,3 +4511,29 @@ get_defrag_hint(void* ptr) {
assert(ptr != NULL);
return iget_defrag_hint(TSDN_NULL, ptr);
}

JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
malloc_with_usize(size_t size, size_t *usize) {
return je_malloc_internal(size, usize);
}

JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
calloc_with_usize(size_t num, size_t size, size_t *usize) {
return je_calloc_internal(num, size, usize);
}

JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ALLOC_SIZE(2)
realloc_with_usize(void *ptr, size_t size, size_t *old_usize, size_t *new_usize) {
return je_realloc_internal(ptr, size, old_usize, new_usize);
}

JEMALLOC_EXPORT void JEMALLOC_NOTHROW
free_with_usize(void *ptr, size_t *usize) {
je_free_internal(ptr, usize);
}
6 changes: 3 additions & 3 deletions deps/jemalloc/src/jemalloc_cpp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,8 @@ handleOOM(std::size_t size, bool nothrow) {
template <bool IsNoExcept>
JEMALLOC_NOINLINE
static void *
fallback_impl(std::size_t size) noexcept(IsNoExcept) {
void *ptr = malloc_default(size);
fallback_impl(std::size_t size, std::size_t *usize) noexcept(IsNoExcept) {
void *ptr = malloc_default(size, NULL);
if (likely(ptr != nullptr)) {
return ptr;
}
Expand All @@ -106,7 +106,7 @@ template <bool IsNoExcept>
JEMALLOC_ALWAYS_INLINE
void *
newImpl(std::size_t size) noexcept(IsNoExcept) {
return imalloc_fastpath(size, &fallback_impl<IsNoExcept>);
return imalloc_fastpath(size, &fallback_impl<IsNoExcept>, NULL);
}

void *
Expand Down
Loading
Loading