From 091992f3a1c4286e1d3c9e0a649a47b2be83871a Mon Sep 17 00:00:00 2001 From: "debing.sun" Date: Mon, 26 May 2025 10:56:11 +0800 Subject: [PATCH 01/14] Reduce the overhead from malloc usable --- .../internal/jemalloc_internal_externs.h | 2 +- .../internal/jemalloc_internal_inlines_c.h | 13 ++-- .../include/jemalloc/jemalloc_macros.h.in | 4 ++ deps/jemalloc/src/jemalloc.c | 66 +++++++++++++------ deps/jemalloc/src/jemalloc_cpp.cpp | 6 +- src/zmalloc.c | 30 +++++++-- src/zmalloc.h | 7 ++ tests/integration/corrupt-dump-fuzzer.tcl | 10 +-- 8 files changed, 100 insertions(+), 38 deletions(-) diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h index fc834c67373..0f179d25c7a 100644 --- a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h +++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h @@ -70,6 +70,6 @@ void jemalloc_prefork(void); void jemalloc_postfork_parent(void); void jemalloc_postfork_child(void); void je_sdallocx_noflags(void *ptr, size_t size); -void *malloc_default(size_t size); +void *malloc_default(size_t size, size_t *usize); #endif /* JEMALLOC_INTERNAL_EXTERNS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h index 2cd7e7ce937..c97ccf80c3a 100644 --- a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h +++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h @@ -255,15 +255,15 @@ malloc_initialized(void) { * tail-call to the slowpath if they fire. */ JEMALLOC_ALWAYS_INLINE void * -imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) { +imalloc_fastpath(size_t size, size_t *usable_size, void *(fallback_alloc)(size_t, size_t *)) { LOG("core.malloc.entry", "size: %zu", size); if (tsd_get_allocates() && unlikely(!malloc_initialized())) { - return fallback_alloc(size); + return fallback_alloc(size, usable_size); } tsd_t *tsd = tsd_get(false); if (unlikely((size > SC_LOOKUP_MAXCLASS) || tsd == NULL)) { - return fallback_alloc(size); + return fallback_alloc(size, usable_size); } /* * The code below till the branch checking the next_event threshold may @@ -282,6 +282,7 @@ imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) { */ size_t usize; sz_size2index_usize_fastpath(size, &ind, &usize); + if (usable_size) *usable_size = usize; /* Fast path relies on size being a bin. */ assert(ind < SC_NBINS); assert((SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS) && @@ -307,7 +308,7 @@ imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) { * 0) in a single branch. */ if (unlikely(allocated_after >= threshold)) { - return fallback_alloc(size); + return fallback_alloc(size, usable_size); } assert(tsd_fast(tsd)); @@ -325,16 +326,18 @@ imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) { */ ret = cache_bin_alloc_easy(bin, &tcache_success); if (tcache_success) { + if (usable_size) *usable_size = usize; fastpath_success_finish(tsd, allocated_after, bin, ret); return ret; } ret = cache_bin_alloc(bin, &tcache_success); if (tcache_success) { + if (usable_size) *usable_size = usize; fastpath_success_finish(tsd, allocated_after, bin, ret); return ret; } - return fallback_alloc(size); + return fallback_alloc(size, usable_size); } JEMALLOC_ALWAYS_INLINE int diff --git a/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in b/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in index d04af34d937..47054e567d3 100644 --- a/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in +++ b/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in @@ -151,3 +151,7 @@ /* This version of Jemalloc, modified for Redis, has the je_get_defrag_hint() * function. */ #define JEMALLOC_FRAG_HINT + +/* This version of Jemalloc, modified for Redis, has the je_malloc_usable() function and + * je_free_usable() function. */ +#define JEMALLOC_USABLE_EXT diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c index 83026093be4..a3d4d298aeb 100644 --- a/deps/jemalloc/src/jemalloc.c +++ b/deps/jemalloc/src/jemalloc.c @@ -2697,7 +2697,7 @@ imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { JEMALLOC_NOINLINE void * -malloc_default(size_t size) { +malloc_default(size_t size, size_t *usize) { void *ret; static_opts_t sopts; dynamic_opts_t dopts; @@ -2731,6 +2731,7 @@ malloc_default(size_t size) { LOG("core.malloc.exit", "result: %p", ret); + if (usize) *usize = dopts.usize; return ret; } @@ -2743,7 +2744,7 @@ JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) je_malloc(size_t size) { - return imalloc_fastpath(size, &malloc_default); + return imalloc_fastpath(size, NULL, &malloc_default); } JEMALLOC_EXPORT int JEMALLOC_NOTHROW @@ -2861,7 +2862,7 @@ je_calloc(size_t num, size_t size) { } JEMALLOC_ALWAYS_INLINE void -ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { +ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path, size_t *usable) { if (!slow_path) { tsd_assert_fast(tsd); } @@ -2894,6 +2895,7 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { true); } thread_dalloc_event(tsd, usize); + if (usable) *usable = usize; } JEMALLOC_ALWAYS_INLINE bool @@ -2993,7 +2995,7 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { JEMALLOC_NOINLINE void -free_default(void *ptr) { +free_default(void *ptr, size_t *usable) { UTRACE(ptr, 0, 0); if (likely(ptr != NULL)) { /* @@ -3011,14 +3013,14 @@ free_default(void *ptr) { tcache_t *tcache = tcache_get_from_ind(tsd, TCACHE_IND_AUTOMATIC, /* slow */ false, /* is_alloc */ false); - ifree(tsd, ptr, tcache, /* slow */ false); + ifree(tsd, ptr, tcache, /* slow */ false, usable); } else { tcache_t *tcache = tcache_get_from_ind(tsd, TCACHE_IND_AUTOMATIC, /* slow */ true, /* is_alloc */ false); uintptr_t args_raw[3] = {(uintptr_t)ptr}; hook_invoke_dalloc(hook_dalloc_free, ptr, args_raw); - ifree(tsd, ptr, tcache, /* slow */ true); + ifree(tsd, ptr, tcache, /* slow */ true, usable); } check_entry_exit_locking(tsd_tsdn(tsd)); @@ -3062,7 +3064,7 @@ free_fastpath_nonfast_aligned(void *ptr, bool check_prof) { /* Returns whether or not the free attempt was successful. */ JEMALLOC_ALWAYS_INLINE -bool free_fastpath(void *ptr, size_t size, bool size_hint) { +bool free_fastpath(void *ptr, size_t size, bool size_hint, size_t *usable) { tsd_t *tsd = tsd_get(false); /* The branch gets optimized away unless tsd_get_allocates(). */ if (unlikely(tsd == NULL)) { @@ -3116,6 +3118,7 @@ bool free_fastpath(void *ptr, size_t size, bool size_hint) { te_free_fastpath_ctx(tsd, &deallocated, &threshold); size_t usize = sz_index2size(alloc_ctx.szind); + if (usable) *usable = usize; uint64_t deallocated_after = deallocated + usize; /* * Check for events and tsd non-nominal (fast_threshold will be set to @@ -3158,8 +3161,8 @@ JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr) { LOG("core.free.entry", "ptr: %p", ptr); - if (!free_fastpath(ptr, 0, false)) { - free_default(ptr); + if (!free_fastpath(ptr, 0, false, NULL)) { + free_default(ptr, NULL); } LOG("core.free.exit", ""); @@ -3490,7 +3493,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, } static void * -do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) { +do_rallocx(void *ptr, size_t size, int flags, bool is_realloc, size_t *old_usable_size, size_t *new_usable_size) { void *p; tsd_t *tsd; size_t usize; @@ -3555,6 +3558,8 @@ do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) { junk_alloc_callback(excess_start, excess_len); } + if (old_usable_size) *old_usable_size = old_usize; + if (new_usable_size) *new_usable_size = usize; return p; label_oom: if (config_xmalloc && unlikely(opt_xmalloc)) { @@ -3573,13 +3578,13 @@ JEMALLOC_ALLOC_SIZE(2) je_rallocx(void *ptr, size_t size, int flags) { LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, size, flags); - void *ret = do_rallocx(ptr, size, flags, false); + void *ret = do_rallocx(ptr, size, flags, false, NULL, NULL); LOG("core.rallocx.exit", "result: %p", ret); return ret; } static void * -do_realloc_nonnull_zero(void *ptr) { +do_realloc_nonnull_zero(void *ptr, size_t *old_usize, size_t *new_usize) { if (config_stats) { atomic_fetch_add_zu(&zero_realloc_count, 1, ATOMIC_RELAXED); } @@ -3590,7 +3595,7 @@ do_realloc_nonnull_zero(void *ptr) { * reduce the harm, and turn off the tcache while allocating, so * that we'll get a true first fit. */ - return do_rallocx(ptr, 1, MALLOCX_TCACHE_NONE, true); + return do_rallocx(ptr, 1, MALLOCX_TCACHE_NONE, true, old_usize, new_usize); } else if (opt_zero_realloc_action == zero_realloc_action_free) { UTRACE(ptr, 0, 0); tsd_t *tsd = tsd_fetch(); @@ -3601,7 +3606,10 @@ do_realloc_nonnull_zero(void *ptr) { /* is_alloc */ false); uintptr_t args[3] = {(uintptr_t)ptr, 0}; hook_invoke_dalloc(hook_dalloc_realloc, ptr, args); - ifree(tsd, ptr, tcache, true); + size_t usize; + ifree(tsd, ptr, tcache, true, &usize); + if (old_usize) *old_usize = usize; + if (new_usize) *new_usize = 0; check_entry_exit_locking(tsd_tsdn(tsd)); return NULL; @@ -3624,11 +3632,11 @@ je_realloc(void *ptr, size_t size) { LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); if (likely(ptr != NULL && size != 0)) { - void *ret = do_rallocx(ptr, size, 0, true); + void *ret = do_rallocx(ptr, size, 0, true, NULL, NULL); LOG("core.realloc.exit", "result: %p", ret); return ret; } else if (ptr != NULL && size == 0) { - void *ret = do_realloc_nonnull_zero(ptr); + void *ret = do_realloc_nonnull_zero(ptr, NULL, NULL); LOG("core.realloc.exit", "result: %p", ret); return ret; } else { @@ -3883,11 +3891,11 @@ je_dallocx(void *ptr, int flags) { UTRACE(ptr, 0, 0); if (likely(fast)) { tsd_assert_fast(tsd); - ifree(tsd, ptr, tcache, false); + ifree(tsd, ptr, tcache, false, NULL); } else { uintptr_t args_raw[3] = {(uintptr_t)ptr, flags}; hook_invoke_dalloc(hook_dalloc_dallocx, ptr, args_raw); - ifree(tsd, ptr, tcache, true); + ifree(tsd, ptr, tcache, true, NULL); } check_entry_exit_locking(tsd_tsdn(tsd)); @@ -3935,7 +3943,7 @@ je_sdallocx(void *ptr, size_t size, int flags) { LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, size, flags); - if (flags != 0 || !free_fastpath(ptr, size, true)) { + if (flags != 0 || !free_fastpath(ptr, size, true, NULL)) { sdallocx_default(ptr, size, flags); } @@ -3947,7 +3955,7 @@ je_sdallocx_noflags(void *ptr, size_t size) { LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: 0", ptr, size); - if (!free_fastpath(ptr, size, true)) { + if (!free_fastpath(ptr, size, true, NULL)) { sdallocx_default(ptr, size, 0); } @@ -4483,3 +4491,21 @@ get_defrag_hint(void* ptr) { assert(ptr != NULL); return iget_defrag_hint(TSDN_NULL, ptr); } + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) +malloc_usable(size_t size, size_t *usize) { + return imalloc_fastpath(size, usize, &malloc_default); +} + +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +free_usable(void *ptr, size_t *usable) { + LOG("core.free.entry", "ptr: %p", ptr); + + if (!free_fastpath(ptr, 0, false, usable)) { + free_default(ptr, usable); + } + + LOG("core.free.exit", ""); +} diff --git a/deps/jemalloc/src/jemalloc_cpp.cpp b/deps/jemalloc/src/jemalloc_cpp.cpp index 451655f1b5a..6b5f08aaec6 100644 --- a/deps/jemalloc/src/jemalloc_cpp.cpp +++ b/deps/jemalloc/src/jemalloc_cpp.cpp @@ -94,8 +94,8 @@ handleOOM(std::size_t size, bool nothrow) { template JEMALLOC_NOINLINE static void * -fallback_impl(std::size_t size) noexcept(IsNoExcept) { - void *ptr = malloc_default(size); +fallback_impl(std::size_t size, std::size_t *usize) noexcept(IsNoExcept) { + void *ptr = malloc_default(size, NULL); if (likely(ptr != nullptr)) { return ptr; } @@ -106,7 +106,7 @@ template JEMALLOC_ALWAYS_INLINE void * newImpl(std::size_t size) noexcept(IsNoExcept) { - return imalloc_fastpath(size, &fallback_impl); + return imalloc_fastpath(size, NULL, &fallback_impl); } void * diff --git a/src/zmalloc.c b/src/zmalloc.c index b2f57184fbf..31f4ed4bfd4 100644 --- a/src/zmalloc.c +++ b/src/zmalloc.c @@ -67,6 +67,12 @@ void zlibc_free(void *ptr) { #define mallocx(size,flags) je_mallocx(size,flags) #define rallocx(ptr,size,flags) je_rallocx(ptr,size,flags) #define dallocx(ptr,flags) je_dallocx(ptr,flags) +#if defined(HAVE_USABLE_EXT) +void *je_malloc_usable(size_t size, size_t *usize); +void je_free_usable(void *ptr, size_t *usize); +#define malloc_usable(size,usize) je_malloc_usable(size,usize) +#define free_usable(ptr,usize) je_free_usable(ptr,usize) +#endif #endif #define MAX_THREADS 16 /* Keep it a power of 2 so we can use '&' instead of '%'. */ @@ -119,10 +125,17 @@ void *extend_to_usable(void *ptr, size_t size) { static inline void *ztrymalloc_usable_internal(size_t size, size_t *usable) { /* Possible overflow, return NULL, so that the caller can panic or handle a failed allocation. */ if (size >= SIZE_MAX/2) return NULL; +#ifdef HAVE_USABLE_EXT + void *ptr = malloc_usable(MALLOC_MIN_SIZE(size)+PREFIX_SIZE, &size); +#else void *ptr = malloc(MALLOC_MIN_SIZE(size)+PREFIX_SIZE); - +#endif if (!ptr) return NULL; -#ifdef HAVE_MALLOC_SIZE +#ifdef HAVE_USABLE_EXT + update_zmalloc_stat_alloc(size); + if (usable) *usable = size; + return ptr; +#elif HAVE_MALLOC_SIZE size = zmalloc_size(ptr); update_zmalloc_stat_alloc(size); if (usable) *usable = size; @@ -423,7 +436,12 @@ void zfree(void *ptr) { #endif if (ptr == NULL) return; -#ifdef HAVE_MALLOC_SIZE + +#ifdef HAVE_USABLE_EXT + size_t usize; + free_usable(ptr, &usize); + update_zmalloc_stat_free(usize); +#elif HAVE_MALLOC_SIZE update_zmalloc_stat_free(zmalloc_size(ptr)); free(ptr); #else @@ -442,7 +460,11 @@ void zfree_usable(void *ptr, size_t *usable) { #endif if (ptr == NULL) return; -#ifdef HAVE_MALLOC_SIZE + +#ifdef HAVE_USABLE_EXT + free_usable(ptr, usable); + update_zmalloc_stat_free(*usable); +#elif HAVE_MALLOC_SIZE update_zmalloc_stat_free(*usable = zmalloc_size(ptr)); free(ptr); #else diff --git a/src/zmalloc.h b/src/zmalloc.h index bbb74a0deb1..47bfdb35bd5 100644 --- a/src/zmalloc.h +++ b/src/zmalloc.h @@ -80,6 +80,13 @@ #define HAVE_DEFRAG #endif +/* We can enable the Redis defrag capabilities only if we are using Jemalloc + * and the version used is our special version modified for Redis having + * the ability to return usable size during allocation or deallocation. */ +#if defined(USE_JEMALLOC) && defined(JEMALLOC_USABLE_EXT) +#define HAVE_USABLE_EXT +#endif + /* 'noinline' attribute is intended to prevent the `-Wstringop-overread` warning * when using gcc-12 later with LTO enabled. It may be removed once the * bug[https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96503] is fixed. */ diff --git a/tests/integration/corrupt-dump-fuzzer.tcl b/tests/integration/corrupt-dump-fuzzer.tcl index 5c7c9923ad6..60210d0582c 100644 --- a/tests/integration/corrupt-dump-fuzzer.tcl +++ b/tests/integration/corrupt-dump-fuzzer.tcl @@ -16,8 +16,8 @@ if { ! [ catch { proc generate_collections {suffix elements} { set rd [redis_deferring_client] set numcmd 7 - set has_vsets [server_has_command vadd] - if {$has_vsets} {incr numcmd} + # set has_vsets [server_has_command vadd] + # if {$has_vsets} {incr numcmd} for {set j 0} {$j < $elements} {incr j} { # add both string values and integers @@ -29,9 +29,9 @@ proc generate_collections {suffix elements} { $rd zadd zset$suffix $j $val $rd sadd set$suffix $val $rd xadd stream$suffix * item 1 value $val - if {$has_vsets} { - $rd vadd vset$suffix VALUES 3 1 1 1 $j - } + # if {$has_vsets} { + # $rd vadd vset$suffix VALUES 3 1 1 1 $j + # } } for {set j 0} {$j < $elements * $numcmd} {incr j} { $rd read ; # Discard replies From f15adb94f44f495d93a2bb349349e5ed9a57708a Mon Sep 17 00:00:00 2001 From: "debing.sun" Date: Mon, 26 May 2025 16:18:25 +0800 Subject: [PATCH 02/14] Add je_realloc_usable() --- deps/jemalloc/src/jemalloc.c | 47 ++++++++++++++++++++++++++++++++++++ src/zmalloc.c | 15 ++++++++++-- 2 files changed, 60 insertions(+), 2 deletions(-) diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c index a3d4d298aeb..d16465dd596 100644 --- a/deps/jemalloc/src/jemalloc.c +++ b/deps/jemalloc/src/jemalloc.c @@ -4509,3 +4509,50 @@ free_usable(void *ptr, size_t *usable) { LOG("core.free.exit", ""); } + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ALLOC_SIZE(2) +realloc_usable(void *ptr, size_t size, size_t *old_usize, size_t *new_usize) { + LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); + + if (likely(ptr != NULL && size != 0)) { + void *ret = do_rallocx(ptr, size, 0, true, old_usize, new_usize); + LOG("core.realloc.exit", "result: %p", ret); + return ret; + } else if (ptr != NULL && size == 0) { + void *ret = do_realloc_nonnull_zero(ptr, old_usize, new_usize); + LOG("core.realloc.exit", "result: %p", ret); + return ret; + } else { + /* realloc(NULL, size) is equivalent to malloc(size). */ + void *ret; + + static_opts_t sopts; + dynamic_opts_t dopts; + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.null_out_result_on_error = true; + sopts.set_errno_on_error = true; + sopts.oom_string = + ": Error in realloc(): out of memory\n"; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + + imalloc(&sopts, &dopts); + if (sopts.slow) { + printf("11111111111\n"); + uintptr_t args[3] = {(uintptr_t)ptr, size}; + hook_invoke_alloc(hook_alloc_realloc, ret, + (uintptr_t)ret, args); + } + LOG("core.realloc.exit", "result: %p", ret); + if (old_usize) *old_usize = 0; + if (new_usize) *new_usize = dopts.usize; + return ret; + } +} diff --git a/src/zmalloc.c b/src/zmalloc.c index 31f4ed4bfd4..ad0aa077dc7 100644 --- a/src/zmalloc.c +++ b/src/zmalloc.c @@ -69,8 +69,10 @@ void zlibc_free(void *ptr) { #define dallocx(ptr,flags) je_dallocx(ptr,flags) #if defined(HAVE_USABLE_EXT) void *je_malloc_usable(size_t size, size_t *usize); +void *je_realloc_usable(void *ptr, size_t size, size_t *old_usize, size_t *new_usize); void je_free_usable(void *ptr, size_t *usize); #define malloc_usable(size,usize) je_malloc_usable(size,usize) +#define realloc_usable(ptr,size,old_usize,new_usize) je_realloc_usable(ptr,size,old_usize,new_usize) #define free_usable(ptr,usize) je_free_usable(ptr,usize) #endif #endif @@ -348,8 +350,17 @@ static inline void *ztryrealloc_usable_internal(void *ptr, size_t size, size_t * if (usable) *usable = 0; return NULL; } - -#ifdef HAVE_MALLOC_SIZE +#ifdef HAVE_USABLE_EXT + newptr = realloc_usable(ptr, size, &oldsize, &size); + if (newptr == NULL) { + if (usable) *usable = 0; + return NULL; + } + update_zmalloc_stat_free(oldsize); + update_zmalloc_stat_alloc(size); + if (usable) *usable = size; + return newptr; +#elif HAVE_MALLOC_SIZE oldsize = zmalloc_size(ptr); newptr = realloc(ptr,size); if (newptr == NULL) { From 2aea18207d7274a505a6e8dcad6d8cf7785e1579 Mon Sep 17 00:00:00 2001 From: "debing.sun" Date: Mon, 26 May 2025 19:58:46 +0800 Subject: [PATCH 03/14] Add je_calloc_usable() --- deps/jemalloc/src/jemalloc.c | 40 ++++++++++++++++++++++++++--- src/zmalloc.c | 50 +++++++++++++++++++++++++++++++++++- 2 files changed, 86 insertions(+), 4 deletions(-) diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c index d16465dd596..552f30cf7a0 100644 --- a/deps/jemalloc/src/jemalloc.c +++ b/deps/jemalloc/src/jemalloc.c @@ -4545,14 +4545,48 @@ realloc_usable(void *ptr, size_t size, size_t *old_usize, size_t *new_usize) { imalloc(&sopts, &dopts); if (sopts.slow) { - printf("11111111111\n"); uintptr_t args[3] = {(uintptr_t)ptr, size}; hook_invoke_alloc(hook_alloc_realloc, ret, (uintptr_t)ret, args); } LOG("core.realloc.exit", "result: %p", ret); - if (old_usize) *old_usize = 0; - if (new_usize) *new_usize = dopts.usize; + *old_usize = 0; + *new_usize = dopts.usize; return ret; } } + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) +calloc_usable(size_t num, size_t size, size_t *usize) { + void *ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size); + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.may_overflow = true; + sopts.null_out_result_on_error = true; + sopts.set_errno_on_error = true; + sopts.oom_string = ": Error in calloc(): out of memory\n"; + + dopts.result = &ret; + dopts.num_items = num; + dopts.item_size = size; + dopts.zero = true; + + imalloc(&sopts, &dopts); + if (sopts.slow) { + uintptr_t args[3] = {(uintptr_t)num, (uintptr_t)size}; + hook_invoke_alloc(hook_alloc_calloc, ret, (uintptr_t)ret, args); + } + + LOG("core.calloc.exit", "result: %p", ret); + + *usize = dopts.usize; + return ret; +} diff --git a/src/zmalloc.c b/src/zmalloc.c index ad0aa077dc7..88837eca49e 100644 --- a/src/zmalloc.c +++ b/src/zmalloc.c @@ -69,9 +69,11 @@ void zlibc_free(void *ptr) { #define dallocx(ptr,flags) je_dallocx(ptr,flags) #if defined(HAVE_USABLE_EXT) void *je_malloc_usable(size_t size, size_t *usize); +void *je_calloc_usable(size_t num, size_t size, size_t *usize); void *je_realloc_usable(void *ptr, size_t size, size_t *old_usize, size_t *new_usize); void je_free_usable(void *ptr, size_t *usize); #define malloc_usable(size,usize) je_malloc_usable(size,usize) +#define calloc_usable(num,size,usize) je_calloc_usable(num,size,usize) #define realloc_usable(ptr,size,old_usize,new_usize) je_realloc_usable(ptr,size,old_usize,new_usize) #define free_usable(ptr,usize) je_free_usable(ptr,usize) #endif @@ -258,10 +260,18 @@ void zfree_no_tcache(void *ptr) { static inline void *ztrycalloc_usable_internal(size_t size, size_t *usable) { /* Possible overflow, return NULL, so that the caller can panic or handle a failed allocation. */ if (size >= SIZE_MAX/2) return NULL; +#ifdef HAVE_USABLE_EXT + void *ptr = calloc_usable(1, MALLOC_MIN_SIZE(size)+PREFIX_SIZE, &size); +#else void *ptr = calloc(1, MALLOC_MIN_SIZE(size)+PREFIX_SIZE); +#endif if (ptr == NULL) return NULL; -#ifdef HAVE_MALLOC_SIZE +#ifdef HAVE_USABLE_EXT + update_zmalloc_stat_alloc(size); + if (usable) *usable = size; + return ptr; +#elif HAVE_MALLOC_SIZE size = zmalloc_size(ptr); update_zmalloc_stat_alloc(size); if (usable) *usable = size; @@ -1073,6 +1083,33 @@ size_t zmalloc_get_memory_size(void) { #define TEST(name) printf("test — %s\n", name); +#include + +/* Return the UNIX time in microseconds */ +static long long ustime(void) { + struct timeval tv; + long long ust; + + gettimeofday(&tv, NULL); + ust = ((long long)tv.tv_sec) * 1000000; + ust += tv.tv_usec; + return ust; +} + +/* Return the UNIX time in milliseconds */ +static long long mstime(void) { return ustime() / 1000; } + +void test(int sz) { + long long start = mstime(); + for (int i = 0; i < 2000000000; i++) { + // for (int i = 0; i < 500000000; i++) { + void *ptr = zmalloc(sz); + ptr = zrealloc(ptr, sz*2); + zfree(ptr); + } + printf("size: %d, during: %ld\n", sz, mstime() - start); +} + int zmalloc_test(int argc, char **argv, int flags) { void *ptr, *ptr2; @@ -1080,6 +1117,17 @@ int zmalloc_test(int argc, char **argv, int flags) { UNUSED(argv); UNUSED(flags); + test(8); + test(16); + test(32); + test(64); + test(128); + test(256); + test(512); + test(1024); + test(2048); + exit(0); + printf("Malloc prefix size: %d\n", (int) PREFIX_SIZE); TEST("Initial used memory is 0") { From 04a030a19ab97ff08a8b7839c182b83a84c38230 Mon Sep 17 00:00:00 2001 From: "debing.sun" Date: Mon, 26 May 2025 20:10:42 +0800 Subject: [PATCH 04/14] Make usable_size paramter at the end --- .../include/jemalloc/internal/jemalloc_internal_inlines_c.h | 2 +- deps/jemalloc/src/jemalloc.c | 4 ++-- deps/jemalloc/src/jemalloc_cpp.cpp | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h index c97ccf80c3a..81e42afb94a 100644 --- a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h +++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h @@ -255,7 +255,7 @@ malloc_initialized(void) { * tail-call to the slowpath if they fire. */ JEMALLOC_ALWAYS_INLINE void * -imalloc_fastpath(size_t size, size_t *usable_size, void *(fallback_alloc)(size_t, size_t *)) { +imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t, size_t *), size_t *usable_size) { LOG("core.malloc.entry", "size: %zu", size); if (tsd_get_allocates() && unlikely(!malloc_initialized())) { return fallback_alloc(size, usable_size); diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c index 552f30cf7a0..056e86e120c 100644 --- a/deps/jemalloc/src/jemalloc.c +++ b/deps/jemalloc/src/jemalloc.c @@ -2744,7 +2744,7 @@ JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) je_malloc(size_t size) { - return imalloc_fastpath(size, NULL, &malloc_default); + return imalloc_fastpath(size, &malloc_default, NULL); } JEMALLOC_EXPORT int JEMALLOC_NOTHROW @@ -4496,7 +4496,7 @@ JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) malloc_usable(size_t size, size_t *usize) { - return imalloc_fastpath(size, usize, &malloc_default); + return imalloc_fastpath(size, &malloc_default, usize); } JEMALLOC_EXPORT void JEMALLOC_NOTHROW diff --git a/deps/jemalloc/src/jemalloc_cpp.cpp b/deps/jemalloc/src/jemalloc_cpp.cpp index 6b5f08aaec6..aeff8c6f60d 100644 --- a/deps/jemalloc/src/jemalloc_cpp.cpp +++ b/deps/jemalloc/src/jemalloc_cpp.cpp @@ -106,7 +106,7 @@ template JEMALLOC_ALWAYS_INLINE void * newImpl(std::size_t size) noexcept(IsNoExcept) { - return imalloc_fastpath(size, NULL, &fallback_impl); + return imalloc_fastpath(size, &fallback_impl, NULL); } void * From 8f8d39902cb9683b871e144760b2fdd69f6455dc Mon Sep 17 00:00:00 2001 From: "debing.sun" Date: Mon, 26 May 2025 20:11:03 +0800 Subject: [PATCH 05/14] Revert temp tests --- src/zmalloc.c | 38 -------------------------------------- 1 file changed, 38 deletions(-) diff --git a/src/zmalloc.c b/src/zmalloc.c index 88837eca49e..dc03bb867a2 100644 --- a/src/zmalloc.c +++ b/src/zmalloc.c @@ -1083,33 +1083,6 @@ size_t zmalloc_get_memory_size(void) { #define TEST(name) printf("test — %s\n", name); -#include - -/* Return the UNIX time in microseconds */ -static long long ustime(void) { - struct timeval tv; - long long ust; - - gettimeofday(&tv, NULL); - ust = ((long long)tv.tv_sec) * 1000000; - ust += tv.tv_usec; - return ust; -} - -/* Return the UNIX time in milliseconds */ -static long long mstime(void) { return ustime() / 1000; } - -void test(int sz) { - long long start = mstime(); - for (int i = 0; i < 2000000000; i++) { - // for (int i = 0; i < 500000000; i++) { - void *ptr = zmalloc(sz); - ptr = zrealloc(ptr, sz*2); - zfree(ptr); - } - printf("size: %d, during: %ld\n", sz, mstime() - start); -} - int zmalloc_test(int argc, char **argv, int flags) { void *ptr, *ptr2; @@ -1117,17 +1090,6 @@ int zmalloc_test(int argc, char **argv, int flags) { UNUSED(argv); UNUSED(flags); - test(8); - test(16); - test(32); - test(64); - test(128); - test(256); - test(512); - test(1024); - test(2048); - exit(0); - printf("Malloc prefix size: %d\n", (int) PREFIX_SIZE); TEST("Initial used memory is 0") { From 40d4934db5be198aa221de12412ff1f6d2b22878 Mon Sep 17 00:00:00 2001 From: "debing.sun" Date: Tue, 27 May 2025 10:21:02 +0800 Subject: [PATCH 06/14] Refine name --- deps/jemalloc/src/jemalloc.c | 12 ++++++------ src/zmalloc.c | 26 +++++++++++++------------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c index 056e86e120c..779a7b8f053 100644 --- a/deps/jemalloc/src/jemalloc.c +++ b/deps/jemalloc/src/jemalloc.c @@ -4495,16 +4495,16 @@ get_defrag_hint(void* ptr) { JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) -malloc_usable(size_t size, size_t *usize) { +malloc_with_usize(size_t size, size_t *usize) { return imalloc_fastpath(size, &malloc_default, usize); } JEMALLOC_EXPORT void JEMALLOC_NOTHROW -free_usable(void *ptr, size_t *usable) { +free_with_usize(void *ptr, size_t *usize) { LOG("core.free.entry", "ptr: %p", ptr); - if (!free_fastpath(ptr, 0, false, usable)) { - free_default(ptr, usable); + if (!free_fastpath(ptr, 0, false, usize)) { + free_default(ptr, usize); } LOG("core.free.exit", ""); @@ -4513,7 +4513,7 @@ free_usable(void *ptr, size_t *usable) { JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ALLOC_SIZE(2) -realloc_usable(void *ptr, size_t size, size_t *old_usize, size_t *new_usize) { +realloc_with_usize(void *ptr, size_t size, size_t *old_usize, size_t *new_usize) { LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); if (likely(ptr != NULL && size != 0)) { @@ -4559,7 +4559,7 @@ realloc_usable(void *ptr, size_t size, size_t *old_usize, size_t *new_usize) { JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) -calloc_usable(size_t num, size_t size, size_t *usize) { +calloc_with_usize(size_t num, size_t size, size_t *usize) { void *ret; static_opts_t sopts; dynamic_opts_t dopts; diff --git a/src/zmalloc.c b/src/zmalloc.c index dc03bb867a2..fb34e738209 100644 --- a/src/zmalloc.c +++ b/src/zmalloc.c @@ -68,14 +68,14 @@ void zlibc_free(void *ptr) { #define rallocx(ptr,size,flags) je_rallocx(ptr,size,flags) #define dallocx(ptr,flags) je_dallocx(ptr,flags) #if defined(HAVE_USABLE_EXT) -void *je_malloc_usable(size_t size, size_t *usize); -void *je_calloc_usable(size_t num, size_t size, size_t *usize); -void *je_realloc_usable(void *ptr, size_t size, size_t *old_usize, size_t *new_usize); -void je_free_usable(void *ptr, size_t *usize); -#define malloc_usable(size,usize) je_malloc_usable(size,usize) -#define calloc_usable(num,size,usize) je_calloc_usable(num,size,usize) -#define realloc_usable(ptr,size,old_usize,new_usize) je_realloc_usable(ptr,size,old_usize,new_usize) -#define free_usable(ptr,usize) je_free_usable(ptr,usize) +void *je_malloc_with_usize(size_t size, size_t *usize); +void *je_calloc_with_usize(size_t num, size_t size, size_t *usize); +void *je_realloc_with_usize(void *ptr, size_t size, size_t *old_usize, size_t *new_usize); +void je_free_with_usize(void *ptr, size_t *usize); +#define malloc_with_usize(size,usize) je_malloc_with_usize(size,usize) +#define calloc_with_usize(num,size,usize) je_calloc_with_usize(num,size,usize) +#define realloc_with_usize(ptr,size,old_usize,new_usize) je_realloc_with_usize(ptr,size,old_usize,new_usize) +#define free_with_usize(ptr,usize) je_free_with_usize(ptr,usize) #endif #endif @@ -130,7 +130,7 @@ static inline void *ztrymalloc_usable_internal(size_t size, size_t *usable) { /* Possible overflow, return NULL, so that the caller can panic or handle a failed allocation. */ if (size >= SIZE_MAX/2) return NULL; #ifdef HAVE_USABLE_EXT - void *ptr = malloc_usable(MALLOC_MIN_SIZE(size)+PREFIX_SIZE, &size); + void *ptr = malloc_with_usize(MALLOC_MIN_SIZE(size)+PREFIX_SIZE, &size); #else void *ptr = malloc(MALLOC_MIN_SIZE(size)+PREFIX_SIZE); #endif @@ -261,7 +261,7 @@ static inline void *ztrycalloc_usable_internal(size_t size, size_t *usable) { /* Possible overflow, return NULL, so that the caller can panic or handle a failed allocation. */ if (size >= SIZE_MAX/2) return NULL; #ifdef HAVE_USABLE_EXT - void *ptr = calloc_usable(1, MALLOC_MIN_SIZE(size)+PREFIX_SIZE, &size); + void *ptr = calloc_with_usize(1, MALLOC_MIN_SIZE(size)+PREFIX_SIZE, &size); #else void *ptr = calloc(1, MALLOC_MIN_SIZE(size)+PREFIX_SIZE); #endif @@ -361,7 +361,7 @@ static inline void *ztryrealloc_usable_internal(void *ptr, size_t size, size_t * return NULL; } #ifdef HAVE_USABLE_EXT - newptr = realloc_usable(ptr, size, &oldsize, &size); + newptr = realloc_with_usize(ptr, size, &oldsize, &size); if (newptr == NULL) { if (usable) *usable = 0; return NULL; @@ -460,7 +460,7 @@ void zfree(void *ptr) { #ifdef HAVE_USABLE_EXT size_t usize; - free_usable(ptr, &usize); + free_with_usize(ptr, &usize); update_zmalloc_stat_free(usize); #elif HAVE_MALLOC_SIZE update_zmalloc_stat_free(zmalloc_size(ptr)); @@ -483,7 +483,7 @@ void zfree_usable(void *ptr, size_t *usable) { if (ptr == NULL) return; #ifdef HAVE_USABLE_EXT - free_usable(ptr, usable); + free_with_usize(ptr, usable); update_zmalloc_stat_free(*usable); #elif HAVE_MALLOC_SIZE update_zmalloc_stat_free(*usable = zmalloc_size(ptr)); From 1de941ca5ab809c6642213791fdf397bea279155 Mon Sep 17 00:00:00 2001 From: "debing.sun" Date: Tue, 27 May 2025 10:46:05 +0800 Subject: [PATCH 07/14] Refine code --- .../jemalloc/internal/jemalloc_internal_inlines_c.h | 5 ++--- deps/jemalloc/src/jemalloc.c | 11 ++++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h index 81e42afb94a..620d097f631 100644 --- a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h +++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h @@ -282,7 +282,6 @@ imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t, size_t *), size_t * */ size_t usize; sz_size2index_usize_fastpath(size, &ind, &usize); - if (usable_size) *usable_size = usize; /* Fast path relies on size being a bin. */ assert(ind < SC_NBINS); assert((SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS) && @@ -326,14 +325,14 @@ imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t, size_t *), size_t * */ ret = cache_bin_alloc_easy(bin, &tcache_success); if (tcache_success) { - if (usable_size) *usable_size = usize; fastpath_success_finish(tsd, allocated_after, bin, ret); + if (usable_size) *usable_size = usize; return ret; } ret = cache_bin_alloc(bin, &tcache_success); if (tcache_success) { - if (usable_size) *usable_size = usize; fastpath_success_finish(tsd, allocated_after, bin, ret); + if (usable_size) *usable_size = usize; return ret; } diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c index 779a7b8f053..76779796248 100644 --- a/deps/jemalloc/src/jemalloc.c +++ b/deps/jemalloc/src/jemalloc.c @@ -2995,7 +2995,7 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { JEMALLOC_NOINLINE void -free_default(void *ptr, size_t *usable) { +free_default(void *ptr, size_t *usize) { UTRACE(ptr, 0, 0); if (likely(ptr != NULL)) { /* @@ -3013,14 +3013,14 @@ free_default(void *ptr, size_t *usable) { tcache_t *tcache = tcache_get_from_ind(tsd, TCACHE_IND_AUTOMATIC, /* slow */ false, /* is_alloc */ false); - ifree(tsd, ptr, tcache, /* slow */ false, usable); + ifree(tsd, ptr, tcache, /* slow */ false, usize); } else { tcache_t *tcache = tcache_get_from_ind(tsd, TCACHE_IND_AUTOMATIC, /* slow */ true, /* is_alloc */ false); uintptr_t args_raw[3] = {(uintptr_t)ptr}; hook_invoke_dalloc(hook_dalloc_free, ptr, args_raw); - ifree(tsd, ptr, tcache, /* slow */ true, usable); + ifree(tsd, ptr, tcache, /* slow */ true, usize); } check_entry_exit_locking(tsd_tsdn(tsd)); @@ -3064,7 +3064,7 @@ free_fastpath_nonfast_aligned(void *ptr, bool check_prof) { /* Returns whether or not the free attempt was successful. */ JEMALLOC_ALWAYS_INLINE -bool free_fastpath(void *ptr, size_t size, bool size_hint, size_t *usable) { +bool free_fastpath(void *ptr, size_t size, bool size_hint, size_t *usable_size) { tsd_t *tsd = tsd_get(false); /* The branch gets optimized away unless tsd_get_allocates(). */ if (unlikely(tsd == NULL)) { @@ -3118,7 +3118,6 @@ bool free_fastpath(void *ptr, size_t size, bool size_hint, size_t *usable) { te_free_fastpath_ctx(tsd, &deallocated, &threshold); size_t usize = sz_index2size(alloc_ctx.szind); - if (usable) *usable = usize; uint64_t deallocated_after = deallocated + usize; /* * Check for events and tsd non-nominal (fast_threshold will be set to @@ -3134,6 +3133,7 @@ bool free_fastpath(void *ptr, size_t size, bool size_hint, size_t *usable) { bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx); if (fail) { /* See the comment in isfree. */ + if (usable_size) *usable_size = usize; return true; } @@ -3154,6 +3154,7 @@ bool free_fastpath(void *ptr, size_t size, bool size_hint, size_t *usable) { *tsd_thread_deallocatedp_get(tsd) = deallocated_after; + if (usable_size) *usable_size = usize; return true; } From a638a2245379b3723317a3d85735e2fb702a0d91 Mon Sep 17 00:00:00 2001 From: "debing.sun" Date: Tue, 27 May 2025 10:49:12 +0800 Subject: [PATCH 08/14] Refine code --- src/zmalloc.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/src/zmalloc.c b/src/zmalloc.c index fb34e738209..11c73416185 100644 --- a/src/zmalloc.c +++ b/src/zmalloc.c @@ -451,22 +451,18 @@ size_t zmalloc_usable_size(void *ptr) { #endif void zfree(void *ptr) { -#ifndef HAVE_MALLOC_SIZE - void *realptr; - size_t oldsize; -#endif - if (ptr == NULL) return; #ifdef HAVE_USABLE_EXT - size_t usize; - free_with_usize(ptr, &usize); - update_zmalloc_stat_free(usize); + size_t oldsize; + free_with_usize(ptr, &oldsize); + update_zmalloc_stat_free(oldsize); #elif HAVE_MALLOC_SIZE update_zmalloc_stat_free(zmalloc_size(ptr)); free(ptr); #else - realptr = (char*)ptr-PREFIX_SIZE; + size_t oldsize; + void *realptr = (char*)ptr-PREFIX_SIZE; oldsize = *((size_t*)realptr); update_zmalloc_stat_free(oldsize+PREFIX_SIZE); free(realptr); From 69d173522c4c57359617d65c3dfe73eb004d5cc6 Mon Sep 17 00:00:00 2001 From: "debing.sun" Date: Tue, 27 May 2025 17:02:28 +0800 Subject: [PATCH 09/14] Refine code --- .../include/jemalloc/jemalloc_macros.h.in | 5 +- deps/jemalloc/src/jemalloc.c | 72 +++++++++---------- src/zmalloc.c | 16 ++--- src/zmalloc.h | 4 +- 4 files changed, 48 insertions(+), 49 deletions(-) diff --git a/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in b/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in index 47054e567d3..8d81a75c9b7 100644 --- a/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in +++ b/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in @@ -152,6 +152,5 @@ * function. */ #define JEMALLOC_FRAG_HINT -/* This version of Jemalloc, modified for Redis, has the je_malloc_usable() function and - * je_free_usable() function. */ -#define JEMALLOC_USABLE_EXT +/* This version of Jemalloc, modified for Redis, has the je_*_usable() family functions. */ +#define JEMALLOC_ALLOC_WITH_USIZE diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c index 76779796248..a8052bb0d42 100644 --- a/deps/jemalloc/src/jemalloc.c +++ b/deps/jemalloc/src/jemalloc.c @@ -4500,15 +4500,39 @@ malloc_with_usize(size_t size, size_t *usize) { return imalloc_fastpath(size, &malloc_default, usize); } -JEMALLOC_EXPORT void JEMALLOC_NOTHROW -free_with_usize(void *ptr, size_t *usize) { - LOG("core.free.entry", "ptr: %p", ptr); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) +calloc_with_usize(size_t num, size_t size, size_t *usize) { + void *ret; + static_opts_t sopts; + dynamic_opts_t dopts; - if (!free_fastpath(ptr, 0, false, usize)) { - free_default(ptr, usize); + LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size); + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.may_overflow = true; + sopts.null_out_result_on_error = true; + sopts.set_errno_on_error = true; + sopts.oom_string = ": Error in calloc(): out of memory\n"; + + dopts.result = &ret; + dopts.num_items = num; + dopts.item_size = size; + dopts.zero = true; + + imalloc(&sopts, &dopts); + if (sopts.slow) { + uintptr_t args[3] = {(uintptr_t)num, (uintptr_t)size}; + hook_invoke_alloc(hook_alloc_calloc, ret, (uintptr_t)ret, args); } - LOG("core.free.exit", ""); + LOG("core.calloc.exit", "result: %p", ret); + + *usize = dopts.usize; + return ret; } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN @@ -4557,37 +4581,13 @@ realloc_with_usize(void *ptr, size_t size, size_t *old_usize, size_t *new_usize) } } -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) -calloc_with_usize(size_t num, size_t size, size_t *usize) { - void *ret; - static_opts_t sopts; - dynamic_opts_t dopts; - - LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size); - - static_opts_init(&sopts); - dynamic_opts_init(&dopts); - - sopts.may_overflow = true; - sopts.null_out_result_on_error = true; - sopts.set_errno_on_error = true; - sopts.oom_string = ": Error in calloc(): out of memory\n"; - - dopts.result = &ret; - dopts.num_items = num; - dopts.item_size = size; - dopts.zero = true; +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +free_with_usize(void *ptr, size_t *usize) { + LOG("core.free.entry", "ptr: %p", ptr); - imalloc(&sopts, &dopts); - if (sopts.slow) { - uintptr_t args[3] = {(uintptr_t)num, (uintptr_t)size}; - hook_invoke_alloc(hook_alloc_calloc, ret, (uintptr_t)ret, args); + if (!free_fastpath(ptr, 0, false, usize)) { + free_default(ptr, usize); } - LOG("core.calloc.exit", "result: %p", ret); - - *usize = dopts.usize; - return ret; + LOG("core.free.exit", ""); } diff --git a/src/zmalloc.c b/src/zmalloc.c index 11c73416185..39386c0dbea 100644 --- a/src/zmalloc.c +++ b/src/zmalloc.c @@ -67,7 +67,7 @@ void zlibc_free(void *ptr) { #define mallocx(size,flags) je_mallocx(size,flags) #define rallocx(ptr,size,flags) je_rallocx(ptr,size,flags) #define dallocx(ptr,flags) je_dallocx(ptr,flags) -#if defined(HAVE_USABLE_EXT) +#if defined(HAVE_ALLOC_WITH_USIZE) void *je_malloc_with_usize(size_t size, size_t *usize); void *je_calloc_with_usize(size_t num, size_t size, size_t *usize); void *je_realloc_with_usize(void *ptr, size_t size, size_t *old_usize, size_t *new_usize); @@ -129,13 +129,13 @@ void *extend_to_usable(void *ptr, size_t size) { static inline void *ztrymalloc_usable_internal(size_t size, size_t *usable) { /* Possible overflow, return NULL, so that the caller can panic or handle a failed allocation. */ if (size >= SIZE_MAX/2) return NULL; -#ifdef HAVE_USABLE_EXT +#ifdef HAVE_ALLOC_WITH_USIZE void *ptr = malloc_with_usize(MALLOC_MIN_SIZE(size)+PREFIX_SIZE, &size); #else void *ptr = malloc(MALLOC_MIN_SIZE(size)+PREFIX_SIZE); #endif if (!ptr) return NULL; -#ifdef HAVE_USABLE_EXT +#ifdef HAVE_ALLOC_WITH_USIZE update_zmalloc_stat_alloc(size); if (usable) *usable = size; return ptr; @@ -260,14 +260,14 @@ void zfree_no_tcache(void *ptr) { static inline void *ztrycalloc_usable_internal(size_t size, size_t *usable) { /* Possible overflow, return NULL, so that the caller can panic or handle a failed allocation. */ if (size >= SIZE_MAX/2) return NULL; -#ifdef HAVE_USABLE_EXT +#ifdef HAVE_ALLOC_WITH_USIZE void *ptr = calloc_with_usize(1, MALLOC_MIN_SIZE(size)+PREFIX_SIZE, &size); #else void *ptr = calloc(1, MALLOC_MIN_SIZE(size)+PREFIX_SIZE); #endif if (ptr == NULL) return NULL; -#ifdef HAVE_USABLE_EXT +#ifdef HAVE_ALLOC_WITH_USIZE update_zmalloc_stat_alloc(size); if (usable) *usable = size; return ptr; @@ -360,7 +360,7 @@ static inline void *ztryrealloc_usable_internal(void *ptr, size_t size, size_t * if (usable) *usable = 0; return NULL; } -#ifdef HAVE_USABLE_EXT +#ifdef HAVE_ALLOC_WITH_USIZE newptr = realloc_with_usize(ptr, size, &oldsize, &size); if (newptr == NULL) { if (usable) *usable = 0; @@ -453,7 +453,7 @@ size_t zmalloc_usable_size(void *ptr) { void zfree(void *ptr) { if (ptr == NULL) return; -#ifdef HAVE_USABLE_EXT +#ifdef HAVE_ALLOC_WITH_USIZE size_t oldsize; free_with_usize(ptr, &oldsize); update_zmalloc_stat_free(oldsize); @@ -478,7 +478,7 @@ void zfree_usable(void *ptr, size_t *usable) { if (ptr == NULL) return; -#ifdef HAVE_USABLE_EXT +#ifdef HAVE_ALLOC_WITH_USIZE free_with_usize(ptr, usable); update_zmalloc_stat_free(*usable); #elif HAVE_MALLOC_SIZE diff --git a/src/zmalloc.h b/src/zmalloc.h index 47bfdb35bd5..9ad2f55a818 100644 --- a/src/zmalloc.h +++ b/src/zmalloc.h @@ -83,8 +83,8 @@ /* We can enable the Redis defrag capabilities only if we are using Jemalloc * and the version used is our special version modified for Redis having * the ability to return usable size during allocation or deallocation. */ -#if defined(USE_JEMALLOC) && defined(JEMALLOC_USABLE_EXT) -#define HAVE_USABLE_EXT +#if defined(USE_JEMALLOC) && defined(JEMALLOC_ALLOC_WITH_USIZE) +#define HAVE_ALLOC_WITH_USIZE #endif /* 'noinline' attribute is intended to prevent the `-Wstringop-overread` warning From 164407e72da23d9ad65c1e7f828896550cc6a49f Mon Sep 17 00:00:00 2001 From: "debing.sun" Date: Wed, 28 May 2025 17:48:12 +0800 Subject: [PATCH 10/14] Extract the same code and add comment Co-authored-by: oranagra --- deps/jemalloc/src/jemalloc.c | 130 ++++++++++------------------------- src/zmalloc.h | 2 +- 2 files changed, 39 insertions(+), 93 deletions(-) diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c index a8052bb0d42..b44c4ce00d6 100644 --- a/deps/jemalloc/src/jemalloc.c +++ b/deps/jemalloc/src/jemalloc.c @@ -2740,11 +2740,15 @@ malloc_default(size_t size, size_t *usize) { * Begin malloc(3)-compatible functions. */ +static inline void *je_malloc_internal(size_t size, size_t *usize) { + return imalloc_fastpath(size, &malloc_default, usize); +} + JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) je_malloc(size_t size) { - return imalloc_fastpath(size, &malloc_default, NULL); + return je_malloc_internal(size, NULL); } JEMALLOC_EXPORT int JEMALLOC_NOTHROW @@ -2827,10 +2831,7 @@ je_aligned_alloc(size_t alignment, size_t size) { return ret; } -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) -je_calloc(size_t num, size_t size) { +static void *je_calloc_internal(size_t num, size_t size, size_t *usize) { void *ret; static_opts_t sopts; dynamic_opts_t dopts; @@ -2858,9 +2859,17 @@ je_calloc(size_t num, size_t size) { LOG("core.calloc.exit", "result: %p", ret); + *usize = dopts.usize; return ret; } +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) +je_calloc(size_t num, size_t size) { + return je_calloc_internal(num, size, NULL);; +} + JEMALLOC_ALWAYS_INLINE void ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path, size_t *usable) { if (!slow_path) { @@ -3158,17 +3167,21 @@ bool free_fastpath(void *ptr, size_t size, bool size_hint, size_t *usable_size) return true; } -JEMALLOC_EXPORT void JEMALLOC_NOTHROW -je_free(void *ptr) { +static inline void je_free_internal(void *ptr, size_t *usize) { LOG("core.free.entry", "ptr: %p", ptr); - if (!free_fastpath(ptr, 0, false, NULL)) { - free_default(ptr, NULL); + if (!free_fastpath(ptr, 0, false, usize)) { + free_default(ptr, usize); } LOG("core.free.exit", ""); } +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_free(void *ptr) { + je_free_internal(ptr, NULL); +} + /* * End malloc(3)-compatible functions. */ @@ -3626,18 +3639,15 @@ do_realloc_nonnull_zero(void *ptr, size_t *old_usize, size_t *new_usize) { } } -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ALLOC_SIZE(2) -je_realloc(void *ptr, size_t size) { +static inline void *je_realloc_internal(void *ptr, size_t size, size_t *old_usize, size_t *new_usize) { LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); if (likely(ptr != NULL && size != 0)) { - void *ret = do_rallocx(ptr, size, 0, true, NULL, NULL); + void *ret = do_rallocx(ptr, size, 0, true, old_usize, new_usize); LOG("core.realloc.exit", "result: %p", ret); return ret; } else if (ptr != NULL && size == 0) { - void *ret = do_realloc_nonnull_zero(ptr, NULL, NULL); + void *ret = do_realloc_nonnull_zero(ptr, old_usize, new_usize); LOG("core.realloc.exit", "result: %p", ret); return ret; } else { @@ -3666,10 +3676,19 @@ je_realloc(void *ptr, size_t size) { (uintptr_t)ret, args); } LOG("core.realloc.exit", "result: %p", ret); + *old_usize = 0; + *new_usize = dopts.usize; return ret; } } +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ALLOC_SIZE(2) +je_realloc(void *ptr, size_t size) { + return je_realloc_internal(ptr, size, NULL, NULL); +} + JEMALLOC_ALWAYS_INLINE size_t ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero) { @@ -4497,97 +4516,24 @@ JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) malloc_with_usize(size_t size, size_t *usize) { - return imalloc_fastpath(size, &malloc_default, usize); + return je_malloc_internal(size, usize); } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) calloc_with_usize(size_t num, size_t size, size_t *usize) { - void *ret; - static_opts_t sopts; - dynamic_opts_t dopts; - - LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size); - - static_opts_init(&sopts); - dynamic_opts_init(&dopts); - - sopts.may_overflow = true; - sopts.null_out_result_on_error = true; - sopts.set_errno_on_error = true; - sopts.oom_string = ": Error in calloc(): out of memory\n"; - - dopts.result = &ret; - dopts.num_items = num; - dopts.item_size = size; - dopts.zero = true; - - imalloc(&sopts, &dopts); - if (sopts.slow) { - uintptr_t args[3] = {(uintptr_t)num, (uintptr_t)size}; - hook_invoke_alloc(hook_alloc_calloc, ret, (uintptr_t)ret, args); - } - - LOG("core.calloc.exit", "result: %p", ret); - - *usize = dopts.usize; - return ret; + return je_calloc_internal(num, size, usize); } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ALLOC_SIZE(2) realloc_with_usize(void *ptr, size_t size, size_t *old_usize, size_t *new_usize) { - LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); - - if (likely(ptr != NULL && size != 0)) { - void *ret = do_rallocx(ptr, size, 0, true, old_usize, new_usize); - LOG("core.realloc.exit", "result: %p", ret); - return ret; - } else if (ptr != NULL && size == 0) { - void *ret = do_realloc_nonnull_zero(ptr, old_usize, new_usize); - LOG("core.realloc.exit", "result: %p", ret); - return ret; - } else { - /* realloc(NULL, size) is equivalent to malloc(size). */ - void *ret; - - static_opts_t sopts; - dynamic_opts_t dopts; - - static_opts_init(&sopts); - dynamic_opts_init(&dopts); - - sopts.null_out_result_on_error = true; - sopts.set_errno_on_error = true; - sopts.oom_string = - ": Error in realloc(): out of memory\n"; - - dopts.result = &ret; - dopts.num_items = 1; - dopts.item_size = size; - - imalloc(&sopts, &dopts); - if (sopts.slow) { - uintptr_t args[3] = {(uintptr_t)ptr, size}; - hook_invoke_alloc(hook_alloc_realloc, ret, - (uintptr_t)ret, args); - } - LOG("core.realloc.exit", "result: %p", ret); - *old_usize = 0; - *new_usize = dopts.usize; - return ret; - } + return je_realloc_internal(ptr, size, old_usize, new_usize); } JEMALLOC_EXPORT void JEMALLOC_NOTHROW free_with_usize(void *ptr, size_t *usize) { - LOG("core.free.entry", "ptr: %p", ptr); - - if (!free_fastpath(ptr, 0, false, usize)) { - free_default(ptr, usize); - } - - LOG("core.free.exit", ""); + je_free_internal(ptr, usize); } diff --git a/src/zmalloc.h b/src/zmalloc.h index 9ad2f55a818..0704467fd48 100644 --- a/src/zmalloc.h +++ b/src/zmalloc.h @@ -80,7 +80,7 @@ #define HAVE_DEFRAG #endif -/* We can enable the Redis defrag capabilities only if we are using Jemalloc +/* We can enable allocation with usable size capabilities only if we are using Jemalloc * and the version used is our special version modified for Redis having * the ability to return usable size during allocation or deallocation. */ #if defined(USE_JEMALLOC) && defined(JEMALLOC_ALLOC_WITH_USIZE) From 7c6c6b471ee35cb862ba529fb7ed2d0c9a2290a3 Mon Sep 17 00:00:00 2001 From: "debing.sun" Date: Thu, 29 May 2025 10:28:23 +0800 Subject: [PATCH 11/14] Fix the NULL check for usize in je_calloc_internal() --- deps/jemalloc/src/jemalloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c index b44c4ce00d6..784f317407b 100644 --- a/deps/jemalloc/src/jemalloc.c +++ b/deps/jemalloc/src/jemalloc.c @@ -2859,7 +2859,7 @@ static void *je_calloc_internal(size_t num, size_t size, size_t *usize) { LOG("core.calloc.exit", "result: %p", ret); - *usize = dopts.usize; + if (usize) *usize = dopts.usize; return ret; } From 225e52dd6ac820895a01225562f07154a8ef6b65 Mon Sep 17 00:00:00 2001 From: "debing.sun" Date: Thu, 29 May 2025 10:38:18 +0800 Subject: [PATCH 12/14] Fix the NULL check for old_usize and new_usize in je_calloc_internal() --- deps/jemalloc/src/jemalloc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c index 784f317407b..2e54d4a2321 100644 --- a/deps/jemalloc/src/jemalloc.c +++ b/deps/jemalloc/src/jemalloc.c @@ -3676,8 +3676,8 @@ static inline void *je_realloc_internal(void *ptr, size_t size, size_t *old_usiz (uintptr_t)ret, args); } LOG("core.realloc.exit", "result: %p", ret); - *old_usize = 0; - *new_usize = dopts.usize; + if (old_usize) *old_usize = 0; + if (new_usize) *new_usize = dopts.usize; return ret; } } From a23cd0471df28c955743b3f18871ec310b8f6fee Mon Sep 17 00:00:00 2001 From: "debing.sun" Date: Thu, 29 May 2025 12:43:15 +0800 Subject: [PATCH 13/14] Update deps/jemalloc/src/jemalloc.c Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- deps/jemalloc/src/jemalloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c index 2e54d4a2321..9a115f8d9a3 100644 --- a/deps/jemalloc/src/jemalloc.c +++ b/deps/jemalloc/src/jemalloc.c @@ -2867,7 +2867,7 @@ JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) je_calloc(size_t num, size_t size) { - return je_calloc_internal(num, size, NULL);; + return je_calloc_internal(num, size, NULL); } JEMALLOC_ALWAYS_INLINE void From d459ef1fad038f7ec75c8c7583134abf9b0ece88 Mon Sep 17 00:00:00 2001 From: "debing.sun" Date: Wed, 11 Jun 2025 20:54:48 +0800 Subject: [PATCH 14/14] Revert fuzzer test --- tests/integration/corrupt-dump-fuzzer.tcl | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/integration/corrupt-dump-fuzzer.tcl b/tests/integration/corrupt-dump-fuzzer.tcl index 60210d0582c..5c7c9923ad6 100644 --- a/tests/integration/corrupt-dump-fuzzer.tcl +++ b/tests/integration/corrupt-dump-fuzzer.tcl @@ -16,8 +16,8 @@ if { ! [ catch { proc generate_collections {suffix elements} { set rd [redis_deferring_client] set numcmd 7 - # set has_vsets [server_has_command vadd] - # if {$has_vsets} {incr numcmd} + set has_vsets [server_has_command vadd] + if {$has_vsets} {incr numcmd} for {set j 0} {$j < $elements} {incr j} { # add both string values and integers @@ -29,9 +29,9 @@ proc generate_collections {suffix elements} { $rd zadd zset$suffix $j $val $rd sadd set$suffix $val $rd xadd stream$suffix * item 1 value $val - # if {$has_vsets} { - # $rd vadd vset$suffix VALUES 3 1 1 1 $j - # } + if {$has_vsets} { + $rd vadd vset$suffix VALUES 3 1 1 1 $j + } } for {set j 0} {$j < $elements * $numcmd} {incr j} { $rd read ; # Discard replies