From eedb2ae5c26a733612ccb4b8ee80600f8fbef699 Mon Sep 17 00:00:00 2001 From: Lukasz Dorau Date: Wed, 7 Feb 2024 16:09:55 +0100 Subject: [PATCH 1/2] Make it possible to allocate more than initial pool size Make it possible to allocate more than the initial pool size of the linear allocator. Signed-off-by: Lukasz Dorau --- src/base_alloc/base_alloc_linear.c | 32 +++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/src/base_alloc/base_alloc_linear.c b/src/base_alloc/base_alloc_linear.c index 99a9a7704..a6f8c2f5b 100644 --- a/src/base_alloc/base_alloc_linear.c +++ b/src/base_alloc/base_alloc_linear.c @@ -23,7 +23,7 @@ typedef struct umf_ba_next_linear_pool_t umf_ba_next_linear_pool_t; // metadata is set and used only in the main (the first) pool typedef struct umf_ba_main_linear_pool_meta_t { - size_t pool_size; // size of each pool (argument of each ba_os_alloc() call) + size_t pool_size; // size of this pool (argument of ba_os_alloc() call) os_mutex_t lock; char *data_ptr; size_t size_left; @@ -52,6 +52,8 @@ struct umf_ba_next_linear_pool_t { // to be freed in umf_ba_linear_destroy()) umf_ba_next_linear_pool_t *next_pool; + size_t pool_size; // size of this pool (argument of ba_os_alloc() call) + // data area of all pools except of the main (the first one) starts here char data[]; }; @@ -70,8 +72,8 @@ static void ba_debug_checks(umf_ba_linear_pool_t *pool) { #endif /* NDEBUG */ umf_ba_linear_pool_t *umf_ba_linear_create(size_t pool_size) { - size_t metadata_size = sizeof(umf_ba_main_linear_pool_meta_t); - pool_size = pool_size + metadata_size; + pool_size += sizeof(umf_ba_next_linear_pool_t *) + + sizeof(umf_ba_main_linear_pool_meta_t); if (pool_size < MINIMUM_LINEAR_POOL_SIZE) { pool_size = MINIMUM_LINEAR_POOL_SIZE; } @@ -110,16 +112,29 @@ void *umf_ba_linear_alloc(umf_ba_linear_pool_t *pool, size_t size) { size_t aligned_size = align_size(size, MEMORY_ALIGNMENT); util_mutex_lock(&pool->metadata.lock); if (pool->metadata.size_left < aligned_size) { + size_t pool_size = pool->metadata.pool_size; + size_t usable_size = + pool_size - offsetof(umf_ba_next_linear_pool_t, data); + if (usable_size < aligned_size) { + pool_size += aligned_size - usable_size; + pool_size = align_size(pool_size, ba_os_get_page_size()); + } + + assert(pool_size - offsetof(umf_ba_next_linear_pool_t, data) >= + aligned_size); + umf_ba_next_linear_pool_t *new_pool = - (umf_ba_next_linear_pool_t *)ba_os_alloc(pool->metadata.pool_size); + (umf_ba_next_linear_pool_t *)ba_os_alloc(pool_size); if (!new_pool) { util_mutex_unlock(&pool->metadata.lock); return NULL; } + new_pool->pool_size = pool_size; + void *data_ptr = &new_pool->data; - size_t size_left = pool->metadata.pool_size - - offsetof(umf_ba_next_linear_pool_t, data); + size_t size_left = + new_pool->pool_size - offsetof(umf_ba_next_linear_pool_t, data); align_ptr_size(&data_ptr, &size_left, MEMORY_ALIGNMENT); pool->metadata.data_ptr = data_ptr; @@ -148,15 +163,14 @@ void umf_ba_linear_destroy(umf_ba_linear_pool_t *pool) { #ifndef NDEBUG ba_debug_checks(pool); #endif /* NDEBUG */ - size_t size = pool->metadata.pool_size; umf_ba_next_linear_pool_t *current_pool; umf_ba_next_linear_pool_t *next_pool = pool->next_pool; while (next_pool) { current_pool = next_pool; next_pool = next_pool->next_pool; - ba_os_free(current_pool, size); + ba_os_free(current_pool, current_pool->pool_size); } util_mutex_destroy_not_free(&pool->metadata.lock); - ba_os_free(pool, size); + ba_os_free(pool, pool->metadata.pool_size); } From 3e27c1e5ef52b8658068d75574a36d7f9ad47a3e Mon Sep 17 00:00:00 2001 From: Lukasz Dorau Date: Wed, 7 Feb 2024 18:12:57 +0100 Subject: [PATCH 2/2] Add umf_ba_linear_pool_contains_pointer Add umf_ba_linear_pool_contains_pointer(). It returns: - 0 if ptr does not belong to the pool or - size (> 0) of the memory region from ptr to the end of the pool if ptr belongs to the pool. It will be useful to implement realloc() in the proxy library. Signed-off-by: Lukasz Dorau --- src/base_alloc/base_alloc_linear.c | 30 ++++++++++++++++++++++++++++++ src/base_alloc/base_alloc_linear.h | 2 ++ 2 files changed, 32 insertions(+) diff --git a/src/base_alloc/base_alloc_linear.c b/src/base_alloc/base_alloc_linear.c index a6f8c2f5b..fa5dd3a3c 100644 --- a/src/base_alloc/base_alloc_linear.c +++ b/src/base_alloc/base_alloc_linear.c @@ -174,3 +174,33 @@ void umf_ba_linear_destroy(umf_ba_linear_pool_t *pool) { util_mutex_destroy_not_free(&pool->metadata.lock); ba_os_free(pool, pool->metadata.pool_size); } + +// umf_ba_linear_pool_contains_pointer() returns: +// - 0 if ptr does not belong to the pool or +// - size (> 0) of the memory region from ptr +// to the end of the pool if ptr belongs to the pool +size_t umf_ba_linear_pool_contains_pointer(umf_ba_linear_pool_t *pool, + void *ptr) { + util_mutex_lock(&pool->metadata.lock); + char *cptr = (char *)ptr; + if (cptr >= pool->data && + cptr < ((char *)(pool)) + pool->metadata.pool_size) { + size_t size = ((char *)(pool)) + pool->metadata.pool_size - cptr; + util_mutex_unlock(&pool->metadata.lock); + return size; + } + + umf_ba_next_linear_pool_t *next_pool = pool->next_pool; + while (next_pool) { + if (cptr >= next_pool->data && + cptr < ((char *)(next_pool)) + next_pool->pool_size) { + size_t size = ((char *)(next_pool)) + next_pool->pool_size - cptr; + util_mutex_unlock(&pool->metadata.lock); + return size; + } + next_pool = next_pool->next_pool; + } + + util_mutex_unlock(&pool->metadata.lock); + return 0; +} diff --git a/src/base_alloc/base_alloc_linear.h b/src/base_alloc/base_alloc_linear.h index 710bc560d..ccb408b53 100644 --- a/src/base_alloc/base_alloc_linear.h +++ b/src/base_alloc/base_alloc_linear.h @@ -26,6 +26,8 @@ typedef struct umf_ba_linear_pool umf_ba_linear_pool_t; umf_ba_linear_pool_t *umf_ba_linear_create(size_t pool_size); void *umf_ba_linear_alloc(umf_ba_linear_pool_t *pool, size_t size); void umf_ba_linear_destroy(umf_ba_linear_pool_t *pool); +size_t umf_ba_linear_pool_contains_pointer(umf_ba_linear_pool_t *pool, + void *ptr); #ifdef __cplusplus }