From c12dcb1a3dbb95c2c6102549d84db3bf8e053d32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Plewa?= Date: Thu, 11 Sep 2025 12:09:36 +0200 Subject: [PATCH 01/11] add defaults to providers --- include/umf/memory_provider_ops.h | 2 +- src/CMakeLists.txt | 1 + src/ctl/ctl_defaults.c | 146 +++++++++++++++++++++++++++++ src/ctl/ctl_defaults.h | 51 ++++++++++ src/libumf.c | 1 + src/memory_pool.c | 149 +++--------------------------- src/memory_provider.c | 34 +++++++ src/memory_provider_internal.h | 1 + src/provider/provider_tracking.c | 5 + test/common/provider_trace.c | 5 + test/ctl/ctl_api.cpp | 4 +- 11 files changed, 257 insertions(+), 142 deletions(-) create mode 100644 src/ctl/ctl_defaults.c create mode 100644 src/ctl/ctl_defaults.h diff --git a/include/umf/memory_provider_ops.h b/include/umf/memory_provider_ops.h index a520ed8891..430afc09d5 100644 --- a/include/umf/memory_provider_ops.h +++ b/include/umf/memory_provider_ops.h @@ -126,7 +126,7 @@ typedef struct umf_memory_provider_ops_t { /// \details /// * Implementations *must* return a literal null-terminated string. /// - /// * Implementations *must* return default pool name when NULL is provided, + /// * Implementations *must* return default provider name when NULL is provided, /// otherwise the pool's name is returned. /// /// * The returned name should not exceed 64 characters and may contain diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 2948393091..45046fdc6d 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -38,6 +38,7 @@ set(UMF_LIBS umf_utils umf_ba umf_coarse $) set(UMF_SOURCES ctl/ctl.c + ctl/ctl_defaults.c libumf.c ipc.c ipc_cache.c diff --git a/src/ctl/ctl_defaults.c b/src/ctl/ctl_defaults.c new file mode 100644 index 0000000000..3d087d51bf --- /dev/null +++ b/src/ctl/ctl_defaults.c @@ -0,0 +1,146 @@ +/* + * + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include "ctl_defaults.h" + +#include + +#include "base_alloc_global.h" +#include "utils_concurrency.h" +#include "utils_log.h" +#include "utlist.h" + +static umf_result_t default_ctl_helper(ctl_ext_ctl_fn fn, void *ctl, + const char *name, void *arg, size_t size, + ...) { + va_list empty_args; + va_start(empty_args, size); + umf_result_t ret = fn(ctl, CTL_QUERY_PROGRAMMATIC, name, arg, size, + CTL_QUERY_WRITE, empty_args); + va_end(empty_args); + return ret; +} + +umf_result_t ctl_default_subtree(ctl_default_entry_t **list, utils_mutex_t *mtx, + umf_ctl_query_source_t source, void *arg, + size_t size, const char *extra_name, + umf_ctl_query_type_t queryType) { + (void)source; + if (strstr(extra_name, "{}") != NULL) { + LOG_ERR("%s, default setting do not support wildcard parameters {}", + extra_name); + return UMF_RESULT_ERROR_NOT_SUPPORTED; + } + + utils_mutex_lock(mtx); + + ctl_default_entry_t *entry = NULL; + LL_FOREACH(*list, entry) { + if (strcmp(entry->name, extra_name) == 0) { + break; + } + } + + if (queryType == CTL_QUERY_WRITE) { + bool is_new_entry = false; + if (!entry) { + entry = umf_ba_global_alloc(sizeof(*entry)); + if (!entry) { + utils_mutex_unlock(mtx); + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + entry->name = NULL; + entry->value = NULL; + entry->next = NULL; + is_new_entry = true; + } + + char *new_name = umf_ba_global_strdup(extra_name); + if (!new_name) { + utils_mutex_unlock(mtx); + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + + if (entry->name) { + umf_ba_global_free(entry->name); + } + entry->name = new_name; + + void *new_value = NULL; + if (size > 0) { + new_value = umf_ba_global_alloc(size); + if (!new_value) { + utils_mutex_unlock(mtx); + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + memcpy(new_value, arg, size); + } + if (entry->value) { + umf_ba_global_free(entry->value); + } + entry->value = new_value; + entry->value_size = size; + entry->source = source; + + if (is_new_entry) { + LL_APPEND(*list, entry); + } + } else if (queryType == CTL_QUERY_READ) { + if (!entry) { + LOG_WARN("Wrong path name: %s", extra_name); + utils_mutex_unlock(mtx); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (entry->value_size > size) { + LOG_ERR("Provided buffer size %zu is smaller than field size %zu", + size, entry->value_size); + utils_mutex_unlock(mtx); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + memcpy(arg, entry->value, entry->value_size); + } + + utils_mutex_unlock(mtx); + return UMF_RESULT_SUCCESS; +} + +void ctl_default_apply(ctl_default_entry_t *list, const char *pname, + ctl_ext_ctl_fn ext_ctl, void *priv) { + if (!pname || !ext_ctl) { + return; + } + + size_t pname_len = strlen(pname); + ctl_default_entry_t *it = NULL; + LL_FOREACH(list, it) { + if (strlen(it->name) > pname_len + 1 && + strncmp(it->name, pname, pname_len) == 0 && + it->name[pname_len] == '.') { + const char *ctl_name = it->name + pname_len + 1; + default_ctl_helper(ext_ctl, priv, ctl_name, it->value, + it->value_size); + } + } +} + +void ctl_default_destroy(ctl_default_entry_t **list, utils_mutex_t *mtx) { + utils_mutex_lock(mtx); + ctl_default_entry_t *entry = NULL, *tmp = NULL; + LL_FOREACH_SAFE(*list, entry, tmp) { + LL_DELETE(*list, entry); + if (entry->name) { + umf_ba_global_free(entry->name); + } + if (entry->value) { + umf_ba_global_free(entry->value); + } + umf_ba_global_free(entry); + } + utils_mutex_unlock(mtx); +} diff --git a/src/ctl/ctl_defaults.h b/src/ctl/ctl_defaults.h new file mode 100644 index 0000000000..2e57175d60 --- /dev/null +++ b/src/ctl/ctl_defaults.h @@ -0,0 +1,51 @@ +/* + * + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#ifndef UMF_CTL_DEFAULTS_H +#define UMF_CTL_DEFAULTS_H 1 + +#include +#include + +#include + +#include "ctl_internal.h" +#include "utils_concurrency.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct ctl_default_entry_t { + char *name; + void *value; + size_t value_size; + umf_ctl_query_source_t source; + struct ctl_default_entry_t *next; +} ctl_default_entry_t; + +umf_result_t ctl_default_subtree(ctl_default_entry_t **list, utils_mutex_t *mtx, + umf_ctl_query_source_t source, void *arg, + size_t size, const char *extra_name, + umf_ctl_query_type_t queryType); + +typedef umf_result_t (*ctl_ext_ctl_fn)(void *obj, umf_ctl_query_source_t source, + const char *name, void *arg, size_t size, + umf_ctl_query_type_t queryType, + va_list args); + +void ctl_default_apply(ctl_default_entry_t *list, const char *pname, + ctl_ext_ctl_fn ext_ctl, void *priv); + +void ctl_default_destroy(ctl_default_entry_t **list, utils_mutex_t *mtx); + +#ifdef __cplusplus +} +#endif + +#endif /* UMF_CTL_DEFAULTS_H */ diff --git a/src/libumf.c b/src/libumf.c index 33604ebb31..b11a55cc9e 100644 --- a/src/libumf.c +++ b/src/libumf.c @@ -139,6 +139,7 @@ umf_result_t umfTearDown(void) { umfMemoryTrackerDestroy(t); LOG_DEBUG("UMF tracker destroyed"); + umfProviderCtlDefaultsDestroy(); umfPoolCtlDefaultsDestroy(); umf_ba_destroy_global(); diff --git a/src/memory_pool.c b/src/memory_pool.c index cd3f15522e..83069fa50d 100644 --- a/src/memory_pool.c +++ b/src/memory_pool.c @@ -17,6 +17,7 @@ #include "base_alloc_global.h" #include "ctl/ctl_internal.h" +#include "ctl/ctl_defaults.h" #include "libumf.h" #include "memory_pool_internal.h" #include "memory_provider_internal.h" @@ -38,17 +39,8 @@ static bool uthash_oom = false; #include "uthash.h" -typedef struct ctl_default_entry_t { - char *name; - void *value; - size_t value_size; - umf_ctl_query_source_t source; - struct ctl_default_entry_t *next; -} ctl_default_entry_t; - -static ctl_default_entry_t *ctl_default_list = NULL; - -utils_mutex_t ctl_mtx; +static ctl_default_entry_t *pool_default_list = NULL; +static utils_mutex_t pool_default_mtx; static UTIL_ONCE_FLAG mem_pool_ctl_initialized = UTIL_ONCE_FLAG_INIT; static struct ctl umf_pool_ctl_root; @@ -187,95 +179,12 @@ static umf_result_t CTL_SUBTREE_HANDLER(default)( void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, umf_ctl_index_utlist_t *indexes, const char *extra_name, umf_ctl_query_type_t queryType, va_list args) { - (void)indexes, (void)source, (void)ctx, (void)args; + (void)indexes; + (void)ctx; + (void)args; utils_init_once(&mem_pool_ctl_initialized, pool_ctl_init); - - if (strstr(extra_name, "{}") != NULL) { - // We might implement it in future - it requires store copy of va_list - // in defaults entries array, which according to C standard is possible, - // but quite insane. - LOG_ERR("%s, default setting do not support wildcard parameters {}", - extra_name); - return UMF_RESULT_ERROR_NOT_SUPPORTED; - } - - utils_mutex_lock(&ctl_mtx); - - ctl_default_entry_t *entry = NULL; - LL_FOREACH(ctl_default_list, entry) { - if (strcmp(entry->name, extra_name) == 0) { - break; - } - } - - if (queryType == CTL_QUERY_WRITE) { - bool is_new_entry = false; - if (entry == NULL) { - entry = umf_ba_global_alloc(sizeof(*entry)); - if (entry == NULL) { - utils_mutex_unlock(&ctl_mtx); - return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; - } - - entry->name = NULL; - entry->value = NULL; - entry->next = NULL; - is_new_entry = true; - } - - size_t name_len = strlen(extra_name) + 1; - char *new_name = umf_ba_global_alloc(name_len); - if (new_name == NULL) { - utils_mutex_unlock(&ctl_mtx); - return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; - } - - memcpy(new_name, extra_name, name_len); - if (entry->name) { - umf_ba_global_free(entry->name); - } - entry->name = new_name; - - void *new_value = NULL; - if (size > 0) { - new_value = umf_ba_global_alloc(size); - if (new_value == NULL) { - utils_mutex_unlock(&ctl_mtx); - return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; - } - memcpy(new_value, arg, size); - } - - if (entry->value) { - umf_ba_global_free(entry->value); - } - - entry->value = new_value; - entry->value_size = size; - entry->source = source; - - if (is_new_entry) { - LL_APPEND(ctl_default_list, entry); - } - } else if (queryType == CTL_QUERY_READ) { - if (entry == NULL) { - LOG_WARN("Wrong path name: %s", extra_name); - utils_mutex_unlock(&ctl_mtx); - return UMF_RESULT_ERROR_INVALID_ARGUMENT; - } - - if (entry->value_size > size) { - LOG_ERR("Provided buffer size %zu is smaller than field size %zu", - size, entry->value_size); - utils_mutex_unlock(&ctl_mtx); - return UMF_RESULT_ERROR_INVALID_ARGUMENT; - } - memcpy(arg, entry->value, entry->value_size); - } - - utils_mutex_unlock(&ctl_mtx); - - return UMF_RESULT_SUCCESS; + return ctl_default_subtree(&pool_default_list, &pool_default_mtx, source, + arg, size, extra_name, queryType); } static umf_result_t @@ -438,7 +347,7 @@ umf_ctl_node_t CTL_NODE(pool)[] = {CTL_CHILD_WITH_ARG(by_handle), CTL_LEAF_SUBTREE(default), CTL_NODE_END}; static void pool_ctl_init(void) { - utils_mutex_init(&ctl_mtx); + utils_mutex_init(&pool_default_mtx); CTL_REGISTER_MODULE(&umf_pool_ctl_root, stats); } @@ -468,17 +377,6 @@ static const umf_pool_create_flags_t UMF_POOL_CREATE_FLAG_ALL = UMF_POOL_CREATE_FLAG_OWN_PROVIDER | UMF_POOL_CREATE_FLAG_DISABLE_TRACKING; // windows do not allow to use uninitialized va_list so this function help us to initialize it. -static umf_result_t default_ctl_helper(const umf_memory_pool_ops_t *ops, - void *ctl, const char *name, void *arg, - size_t size, ...) { - va_list empty_args; - va_start(empty_args, size); - umf_result_t ret = ops->ext_ctl(ctl, CTL_QUERY_PROGRAMMATIC, name, arg, - size, CTL_QUERY_WRITE, empty_args); - va_end(empty_args); - return ret; -} - static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, umf_memory_provider_handle_t provider, const void *params, @@ -567,17 +465,7 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, } assert(pname != NULL); - size_t pname_len = strlen(pname); - ctl_default_entry_t *it = NULL; - LL_FOREACH(ctl_default_list, it) { - if (strlen(it->name) > pname_len + 1 && - strncmp(it->name, pname, pname_len) == 0 && - it->name[pname_len] == '.') { - const char *ctl_name = it->name + pname_len + 1; - default_ctl_helper(ops, pool->pool_priv, ctl_name, it->value, - it->value_size); - } - } + ctl_default_apply(pool_default_list, pname, ops->ext_ctl, pool->pool_priv); *hPool = pool; pools_by_name_add(pool); @@ -803,20 +691,5 @@ umf_result_t umfPoolTrimMemory(umf_memory_pool_handle_t hPool, void umfPoolCtlDefaultsDestroy(void) { utils_init_once(&mem_pool_ctl_initialized, pool_ctl_init); - - utils_mutex_lock(&ctl_mtx); - - ctl_default_entry_t *entry = NULL, *tmp = NULL; - LL_FOREACH_SAFE(ctl_default_list, entry, tmp) { - LL_DELETE(ctl_default_list, entry); - if (entry->name) { - umf_ba_global_free(entry->name); - } - if (entry->value) { - umf_ba_global_free(entry->value); - } - umf_ba_global_free(entry); - } - - utils_mutex_unlock(&ctl_mtx); + ctl_default_destroy(&pool_default_list, &pool_default_mtx); } diff --git a/src/memory_provider.c b/src/memory_provider.c index 324fa751bd..183355b80c 100644 --- a/src/memory_provider.c +++ b/src/memory_provider.c @@ -19,6 +19,7 @@ #include "base_alloc.h" #include "base_alloc_global.h" #include "ctl/ctl_internal.h" +#include "ctl/ctl_defaults.h" #include "libumf.h" #include "memory_provider_internal.h" #include "utils_assert.h" @@ -45,7 +46,28 @@ static umf_ctl_node_t CTL_NODE(by_handle)[] = { static const struct ctl_argument CTL_ARG(by_handle) = CTL_ARG_PTR; +static ctl_default_entry_t *provider_default_list = NULL; +static utils_mutex_t provider_default_mtx; +static UTIL_ONCE_FLAG mem_provider_ctl_initialized = UTIL_ONCE_FLAG_INIT; + +static void provider_ctl_init(void) { + utils_mutex_init(&provider_default_mtx); +} + +static umf_result_t CTL_SUBTREE_HANDLER(default)( + void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, + umf_ctl_index_utlist_t *indexes, const char *extra_name, + umf_ctl_query_type_t queryType, va_list args) { + (void)ctx; + (void)indexes; + (void)args; + utils_init_once(&mem_provider_ctl_initialized, provider_ctl_init); + return ctl_default_subtree(&provider_default_list, &provider_default_mtx, + source, arg, size, extra_name, queryType); +} + umf_ctl_node_t CTL_NODE(provider)[] = {CTL_CHILD_WITH_ARG(by_handle), + CTL_LEAF_SUBTREE(default), CTL_NODE_END}; static umf_result_t umfDefaultPurgeLazy(void *provider, void *ptr, @@ -310,6 +332,13 @@ umf_result_t umfMemoryProviderCreate(const umf_memory_provider_ops_t *ops, provider->provider_priv = provider_priv; + utils_init_once(&mem_provider_ctl_initialized, provider_ctl_init); + const char *pname = NULL; + if (provider->ops.get_name(NULL, &pname) == UMF_RESULT_SUCCESS && pname) { + ctl_default_apply(provider_default_list, pname, provider->ops.ext_ctl, + provider->provider_priv); + } + *hProvider = provider; const char *provider_name = NULL; @@ -606,3 +635,8 @@ umf_result_t umfMemoryProviderGetAllocationPropertiesSize( checkErrorAndSetLastProvider(res, hProvider); return res; } + +void umfProviderCtlDefaultsDestroy(void) { + utils_init_once(&mem_provider_ctl_initialized, provider_ctl_init); + ctl_default_destroy(&provider_default_list, &provider_default_mtx); +} diff --git a/src/memory_provider_internal.h b/src/memory_provider_internal.h index 65ba5d41c7..b486377acd 100644 --- a/src/memory_provider_internal.h +++ b/src/memory_provider_internal.h @@ -28,6 +28,7 @@ typedef struct umf_memory_provider_t { void *umfMemoryProviderGetPriv(umf_memory_provider_handle_t hProvider); umf_memory_provider_handle_t *umfGetLastFailedMemoryProviderPtr(void); +void umfProviderCtlDefaultsDestroy(void); extern umf_ctl_node_t CTL_NODE(provider)[]; diff --git a/src/provider/provider_tracking.c b/src/provider/provider_tracking.c index cf76d2be74..10bb48e4cc 100644 --- a/src/provider/provider_tracking.c +++ b/src/provider/provider_tracking.c @@ -1060,6 +1060,11 @@ static umf_result_t trackingPurgeForce(void *provider, void *ptr, size_t size) { static umf_result_t trackingName(void *provider, const char **name) { umf_tracking_memory_provider_t *p = (umf_tracking_memory_provider_t *)provider; + // if ops->get_name is called with null provider it must return default provider name + if (!p) { + *name = "tracking"; + return UMF_RESULT_SUCCESS; + } return umfMemoryProviderGetName(p->hUpstream, name); } diff --git a/test/common/provider_trace.c b/test/common/provider_trace.c index 403870511a..c780197c95 100644 --- a/test/common/provider_trace.c +++ b/test/common/provider_trace.c @@ -100,6 +100,11 @@ static umf_result_t traceName(void *provider, const char **name) { umf_provider_trace_params_t *traceProvider = (umf_provider_trace_params_t *)provider; + // if ops->get_name is called with null provider it must return default provider name + if (!provider) { + *name = "trace"; + return UMF_RESULT_SUCCESS; + } traceProvider->trace_handler(traceProvider->trace_context, "name"); return umfMemoryProviderGetName(traceProvider->hUpstreamProvider, name); } diff --git a/test/ctl/ctl_api.cpp b/test/ctl/ctl_api.cpp index a3050645d7..9a4303b04b 100644 --- a/test/ctl/ctl_api.cpp +++ b/test/ctl/ctl_api.cpp @@ -348,12 +348,11 @@ TEST_F(CtlTest, DISABLED_ctlExecInvalidSize) { UMF_RESULT_ERROR_INVALID_ARGUMENT); } -#ifdef PROVIDER_DEFAULTS_NOT_IMPLEMENTED_YET TEST_F(CtlTest, ctlDefaultMultithreadedProvider) { std::vector threads; std::atomic totalRecords = 0; const char *predefined_value = "xyzzyx"; - std::string name_prefix = "umf.provider.default.some_pool."; + std::string name_prefix = "umf.provider.default.some_provider."; for (int i = 0; i < 8; i++) { threads.emplace_back( [i, &totalRecords, &predefined_value, &name_prefix]() { @@ -377,7 +376,6 @@ TEST_F(CtlTest, ctlDefaultMultithreadedProvider) { ASSERT_EQ(std::string(output), std::string(predefined_value)); } } -#endif TEST_F(test, ctl_logger_basic_rw) { bool ts_set = true; From 8208bf90f53692c63a27e108df2346bc2dd3db3a Mon Sep 17 00:00:00 2001 From: Krzysztof Filipek Date: Thu, 17 Jul 2025 15:44:19 +0200 Subject: [PATCH 02/11] add post-initialize function to pools and providers Split between initialize and post-initialize function is necessary for properly handling CTL defaults. --- .github/workflows/reusable_compatibility.yml | 24 ++++- include/umf/memory_pool.h | 2 +- include/umf/memory_pool_ops.h | 1 + include/umf/memory_provider_ops.h | 1 + src/memory_pool.c | 28 ++++- src/memory_provider.c | 22 ++++ src/pool/pool_disjoint.c | 31 +++++- src/pool/pool_jemalloc.c | 105 +++++++++++++++---- src/pool/pool_scalable.c | 74 +++++++++---- src/provider/provider_cuda.c | 55 +++++++--- src/provider/provider_devdax_memory.c | 3 +- src/provider/provider_file_memory.c | 44 ++++++-- src/provider/provider_fixed_memory.c | 46 ++++++-- src/provider/provider_os_memory.c | 1 - src/provider/provider_tracking.c | 1 - test/pools/disjoint_pool.cpp | 18 ++++ 16 files changed, 371 insertions(+), 85 deletions(-) diff --git a/.github/workflows/reusable_compatibility.yml b/.github/workflows/reusable_compatibility.yml index 2db924bf23..79c02f4f98 100644 --- a/.github/workflows/reusable_compatibility.yml +++ b/.github/workflows/reusable_compatibility.yml @@ -102,8 +102,16 @@ jobs: UMF_LOG: level:warning;flush:debug;output:stderr;pid:no LD_LIBRARY_PATH: ${{github.workspace}}/latest_version/build/lib/ run: | - ctest --verbose -E "test_memoryProvider" + ctest --verbose -E "test_memoryProvider|test_disjoint_pool" + + - name: Run disabled tests individually with latest UMF libs (warnings enabled) + working-directory: ${{github.workspace}}/tag_version/build + env: + UMF_LOG: level:warning;flush:debug;output:stderr;pid:no + LD_LIBRARY_PATH: ${{github.workspace}}/latest_version/build/lib/ + run: | test/test_memoryProvider --gtest_filter="-*Trace" + test/test_disjoint_pool --gtest_filter="-test.internals" # Browse all folders in the examples directory, build them using the # latest UMF version, and run them, excluding those in the exclude list. @@ -228,10 +236,12 @@ jobs: env: UMF_LOG: level:warning;flush:debug;output:stderr;pid:no run: | + $env:UMF_LOG="level:warning;flush:debug;output:stderr;pid:no" cp ${{github.workspace}}/latest_version/build/bin/Debug/umf.dll ${{github.workspace}}/tag_version/build/bin/Debug/umf.dll - ctest -C Debug --verbose -E "test_memoryProvider" + ctest -C Debug --verbose -E "test_memoryProvider|test_disjoint_pool" $env:Path = "${{github.workspace}}/tag_version/build/bin/Debug;${{env.VCPKG_BIN_PATH}};$env:Path" test/Debug/test_memoryProvider.exe --gtest_filter="-*Trace" + test/Debug/test_disjoint_pool.exe --gtest_filter="-test.internals" # Browse all folders in the examples directory, build them using the # latest UMF version, and run them, excluding those in the exclude list. @@ -373,8 +383,16 @@ jobs: UMF_LOG: level:warning;flush:debug;output:stderr;pid:no LD_LIBRARY_PATH: ${{github.workspace}}/latest_version/build/lib/ run: | - ctest --verbose -E "test_memoryProvider" + ctest --verbose -E "test_memoryProvider|test_disjoint_pool" + + - name: Run disabled tests individually with latest UMF libs (warnings enabled) + working-directory: ${{github.workspace}}/tag_version/build + env: + UMF_LOG: level:warning;flush:debug;output:stderr;pid:no + LD_LIBRARY_PATH: ${{github.workspace}}/latest_version/build/lib/ + run: | test/test_memoryProvider --gtest_filter="-*Trace" + test/test_disjoint_pool --gtest_filter="-test.internals" # Browse all folders in the examples directory, build them using the # latest UMF version, and run them, excluding those in the exclude list. diff --git a/include/umf/memory_pool.h b/include/umf/memory_pool.h index f467840515..e1ed9fbbad 100644 --- a/include/umf/memory_pool.h +++ b/include/umf/memory_pool.h @@ -43,7 +43,7 @@ typedef enum umf_pool_create_flag_t { /// @brief Type for combinations of pool creation flags typedef uint32_t umf_pool_create_flags_t; -/// +/// @anchor umfPoolCreate /// @brief Creates new memory pool. /// @param ops instance of umf_memory_pool_ops_t /// @param provider memory provider that will be used for coarse-grain allocations. diff --git a/include/umf/memory_pool_ops.h b/include/umf/memory_pool_ops.h index c9628c77e0..e13ed9ca94 100644 --- a/include/umf/memory_pool_ops.h +++ b/include/umf/memory_pool_ops.h @@ -191,6 +191,7 @@ typedef struct umf_memory_pool_ops_t { /// failure. /// umf_result_t (*ext_trim_memory)(void *pool, size_t minBytesToKeep); + } umf_memory_pool_ops_t; #ifdef __cplusplus diff --git a/include/umf/memory_provider_ops.h b/include/umf/memory_provider_ops.h index 430afc09d5..d7df5f8236 100644 --- a/include/umf/memory_provider_ops.h +++ b/include/umf/memory_provider_ops.h @@ -307,6 +307,7 @@ typedef struct umf_memory_provider_ops_t { void *provider, const void *ptr, umf_memory_property_id_t memory_property_id, void *property_value); + /// /// @brief Retrieve size of the provider-specific properties of the memory /// allocation. /// \details diff --git a/src/memory_pool.c b/src/memory_pool.c index 83069fa50d..9260398895 100644 --- a/src/memory_pool.c +++ b/src/memory_pool.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -372,6 +373,20 @@ static umf_result_t umfDefaultTrimMemory(void *provider, return UMF_RESULT_ERROR_NOT_SUPPORTED; } +static umf_result_t umfPoolPostInitialize(const umf_memory_pool_ops_t *ops, + void *pool_priv, ...) { + va_list args; + va_start(args, pool_priv); + umf_result_t ret = ops->ext_ctl(pool_priv, CTL_QUERY_PROGRAMMATIC, + "post_initialize", NULL, 0, + CTL_QUERY_RUNNABLE, args); + va_end(args); + if (ret == UMF_RESULT_ERROR_INVALID_ARGUMENT) { + ret = UMF_RESULT_ERROR_NOT_SUPPORTED; + } + return ret; +} + // logical sum (OR) of all umf_pool_create_flags_t flags static const umf_pool_create_flags_t UMF_POOL_CREATE_FLAG_ALL = UMF_POOL_CREATE_FLAG_OWN_PROVIDER | UMF_POOL_CREATE_FLAG_DISABLE_TRACKING; @@ -393,7 +408,6 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, } umf_result_t ret = UMF_RESULT_SUCCESS; - umf_memory_pool_ops_t compatible_ops; if (ops->version != UMF_POOL_OPS_VERSION_CURRENT) { LOG_WARN("Memory Pool ops version \"%d\" is different than the current " @@ -402,8 +416,8 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, // Create a new ops compatible structure with the current version memset(&compatible_ops, 0, sizeof(compatible_ops)); - if (UMF_MINOR_VERSION(ops->version) == 0) { - LOG_INFO("Detected 1.0 version of Memory Pool ops, " + if (ops->version < UMF_MAKE_VERSION(1, 1)) { + LOG_INFO("Detected 1.0 version or below of Memory Pool ops, " "upgrading to current version"); memcpy(&compatible_ops, ops, offsetof(umf_memory_pool_ops_t, ext_trim_memory)); @@ -451,7 +465,7 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, goto err_lock_init; } - ret = ops->initialize(pool->provider, params, &pool->pool_priv); + ret = pool->ops.initialize(pool->provider, params, &pool->pool_priv); if (ret != UMF_RESULT_SUCCESS) { goto err_pool_init; } @@ -467,6 +481,12 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, ctl_default_apply(pool_default_list, pname, ops->ext_ctl, pool->pool_priv); + ret = umfPoolPostInitialize(ops, pool->pool_priv); + if (ret != UMF_RESULT_SUCCESS && ret != UMF_RESULT_ERROR_NOT_SUPPORTED) { + LOG_ERR("Failed to post-initialize pool"); + goto err_pool_init; + } + *hPool = pool; pools_by_name_add(pool); diff --git a/src/memory_provider.c b/src/memory_provider.c index 183355b80c..c147cc3a96 100644 --- a/src/memory_provider.c +++ b/src/memory_provider.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -205,6 +206,7 @@ void assignOpsExtDefaults(umf_memory_provider_ops_t *ops) { ops->ext_get_allocation_properties_size = umfDefaultGetAllocationPropertiesSize; } + } void assignOpsIpcDefaults(umf_memory_provider_ops_t *ops) { @@ -229,6 +231,20 @@ void assignOpsIpcDefaults(umf_memory_provider_ops_t *ops) { } } +static umf_result_t umfProviderPostInitialize(umf_memory_provider_ops_t *ops, + void *provider_priv, ...) { + va_list args; + va_start(args, provider_priv); + umf_result_t ret = ops->ext_ctl(provider_priv, CTL_QUERY_PROGRAMMATIC, + "post_initialize", NULL, 0, + CTL_QUERY_RUNNABLE, args); + va_end(args); + if (ret == UMF_RESULT_ERROR_INVALID_ARGUMENT) { + ret = UMF_RESULT_ERROR_NOT_SUPPORTED; + } + return ret; +} + #define CHECK_OP(ops, fn) \ if (!(ops)->fn) { \ LOG_ERR("missing function pointer: %s\n", #fn); \ @@ -338,6 +354,12 @@ umf_result_t umfMemoryProviderCreate(const umf_memory_provider_ops_t *ops, ctl_default_apply(provider_default_list, pname, provider->ops.ext_ctl, provider->provider_priv); } + ret = umfProviderPostInitialize(&provider->ops, provider_priv); + if (ret != UMF_RESULT_SUCCESS && ret != UMF_RESULT_ERROR_NOT_SUPPORTED) { + LOG_ERR("Failed to post-initialize provider"); + umf_ba_global_free(provider); + return ret; + } *hProvider = provider; diff --git a/src/pool/pool_disjoint.c b/src/pool/pool_disjoint.c index b9a37d98e8..22055554f0 100644 --- a/src/pool/pool_disjoint.c +++ b/src/pool/pool_disjoint.c @@ -33,6 +33,17 @@ static char *DEFAULT_NAME = "disjoint"; struct ctl disjoint_ctl_root; static UTIL_ONCE_FLAG ctl_initialized = UTIL_ONCE_FLAG_INIT; +umf_result_t disjoint_pool_post_initialize(void *ppPool); +static umf_result_t CTL_RUNNABLE_HANDLER(post_initialize)( + void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source; + (void)arg; + (void)size; + (void)indexes; + return disjoint_pool_post_initialize(ctx); +} + // Disable name ctl for 1.0 release #if 0 static umf_result_t CTL_READ_HANDLER(name)(void *ctx, @@ -326,6 +337,13 @@ static void initialize_disjoint_ctl(void) { // TODO: this is hack. Need some way to register module as node with argument disjoint_ctl_root.root[disjoint_ctl_root.first_free - 1].arg = &CTL_ARG(buckets); + disjoint_ctl_root.root[disjoint_ctl_root.first_free++] = + (umf_ctl_node_t){ + .name = "post_initialize", + .type = CTL_NODE_LEAF, + .runnable_cb = + CTL_RUNNABLE_HANDLER(post_initialize), + }; } umf_result_t disjoint_pool_ctl(void *hPool, @@ -930,6 +948,14 @@ umf_result_t disjoint_pool_initialize(umf_memory_provider_handle_t provider, disjoint_pool->provider = provider; disjoint_pool->params = *dp_params; + *ppPool = (void *)disjoint_pool; + + return UMF_RESULT_SUCCESS; +} + +umf_result_t disjoint_pool_post_initialize(void *ppPool) { + disjoint_pool_t *disjoint_pool = (disjoint_pool_t *)ppPool; + disjoint_pool->known_slabs = critnib_new(free_slab, NULL); if (disjoint_pool->known_slabs == NULL) { goto err_free_disjoint_pool; @@ -988,13 +1014,11 @@ umf_result_t disjoint_pool_initialize(umf_memory_provider_handle_t provider, } umf_result_t ret = umfMemoryProviderGetMinPageSize( - provider, NULL, &disjoint_pool->provider_min_page_size); + disjoint_pool->provider, NULL, &disjoint_pool->provider_min_page_size); if (ret != UMF_RESULT_SUCCESS) { disjoint_pool->provider_min_page_size = 0; } - *ppPool = (void *)disjoint_pool; - return UMF_RESULT_SUCCESS; err_free_buckets: @@ -1013,7 +1037,6 @@ umf_result_t disjoint_pool_initialize(umf_memory_provider_handle_t provider, err_free_disjoint_pool: umf_ba_global_free(disjoint_pool); - return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; } diff --git a/src/pool/pool_jemalloc.c b/src/pool/pool_jemalloc.c index 6882e2dd6f..9090904572 100644 --- a/src/pool/pool_jemalloc.c +++ b/src/pool/pool_jemalloc.c @@ -11,6 +11,7 @@ #include #include +#include "ctl/ctl_internal.h" #include "base_alloc_global.h" #include "memory_provider_internal.h" #include "provider_tracking.h" @@ -68,15 +69,19 @@ typedef struct umf_jemalloc_pool_params_t { typedef struct jemalloc_memory_pool_t { umf_memory_provider_handle_t provider; + umf_jemalloc_pool_params_t params; size_t n_arenas; char name[64]; - unsigned int arena_index[]; + unsigned int *arena_index; } jemalloc_memory_pool_t; static __TLS umf_result_t TLS_last_allocation_error; static jemalloc_memory_pool_t *pool_by_arena_index[MALLCTL_ARENAS_ALL]; +struct ctl jemalloc_ctl_root; +static UTIL_ONCE_FLAG ctl_initialized = UTIL_ONCE_FLAG_INIT; + static jemalloc_memory_pool_t *get_pool_by_arena_index(unsigned arena_ind) { // there is no way to obtain MALLOCX_ARENA_MAX from jemalloc // so this checks if arena_ind does not exceed assumed range @@ -483,11 +488,35 @@ static umf_result_t op_initialize(umf_memory_provider_handle_t provider, return ret; } + jemalloc_memory_pool_t *pool = umf_ba_global_alloc(sizeof(*pool)); + if (!pool) { + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + memset(pool, 0, sizeof(*pool)); + + pool->provider = provider; + if (params) { + pool->params = *(const umf_jemalloc_pool_params_t *)params; + } else { + // Set default values + memset(&pool->params, 0, sizeof(pool->params)); + strncpy(pool->params.name, DEFAULT_NAME, sizeof(pool->params.name) - 1); + } + + *out_pool = pool; + + return UMF_RESULT_SUCCESS; +} + +static umf_result_t op_post_initialize(void *pool) { + assert(pool); + extent_hooks_t *pHooks = &arena_extent_hooks; size_t unsigned_size = sizeof(unsigned); int n_arenas_set_from_params = 0; + jemalloc_memory_pool_t *je_pool = (jemalloc_memory_pool_t *)pool; int err; - const umf_jemalloc_pool_params_t *jemalloc_params = params; + const umf_jemalloc_pool_params_t *jemalloc_params = &je_pool->params; size_t n_arenas = 0; if (jemalloc_params) { @@ -497,32 +526,34 @@ static umf_result_t op_initialize(umf_memory_provider_handle_t provider, if (n_arenas == 0) { n_arenas = utils_get_num_cores() * 4; - if (n_arenas > MALLOCX_ARENA_MAX) { - n_arenas = MALLOCX_ARENA_MAX; - } + n_arenas = utils_min(n_arenas, (size_t)MALLOCX_ARENA_MAX); } if (n_arenas > MALLOCX_ARENA_MAX) { LOG_ERR("Number of arenas %zu exceeds the limit (%i).", n_arenas, MALLOCX_ARENA_MAX); + umf_ba_global_free(je_pool); return UMF_RESULT_ERROR_INVALID_ARGUMENT; } - jemalloc_memory_pool_t *pool = umf_ba_global_alloc( - sizeof(*pool) + n_arenas * sizeof(*pool->arena_index)); - if (!pool) { + je_pool->arena_index = + umf_ba_global_alloc(n_arenas * sizeof(*je_pool->arena_index)); + if (!je_pool->arena_index) { + LOG_ERR("Could not allocate memory for arena indices."); + umf_ba_global_free(je_pool); return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; } - memset(pool, 0, sizeof(*pool) + n_arenas * sizeof(*pool->arena_index)); + + memset(je_pool->arena_index, 0, n_arenas * sizeof(*je_pool->arena_index)); + const char *pool_name = DEFAULT_NAME; if (jemalloc_params) { pool_name = jemalloc_params->name; } - snprintf(pool->name, sizeof(pool->name), "%s", pool_name); + snprintf(je_pool->name, sizeof(je_pool->name), "%s", pool_name); - pool->provider = provider; - pool->n_arenas = n_arenas; + je_pool->n_arenas = n_arenas; size_t num_created = 0; for (size_t i = 0; i < n_arenas; i++) { @@ -547,13 +578,13 @@ static umf_result_t op_initialize(umf_memory_provider_handle_t provider, break; } - pool->arena_index[num_created++] = arena_index; + je_pool->arena_index[num_created++] = arena_index; if (arena_index >= MALLOCX_ARENA_MAX) { LOG_ERR("Number of arenas exceeds the limit."); goto err_cleanup; } - pool_by_arena_index[arena_index] = pool; + pool_by_arena_index[arena_index] = je_pool; // Setup extent_hooks for the newly created arena. char cmd[64]; @@ -564,9 +595,8 @@ static umf_result_t op_initialize(umf_memory_provider_handle_t provider, goto err_cleanup; } } - *out_pool = (umf_memory_pool_handle_t)pool; - VALGRIND_DO_CREATE_MEMPOOL(pool, 0, 0); + VALGRIND_DO_CREATE_MEMPOOL(je_pool, 0, 0); return UMF_RESULT_SUCCESS; @@ -574,14 +604,49 @@ static umf_result_t op_initialize(umf_memory_provider_handle_t provider, // Destroy any arenas that were successfully created. for (size_t i = 0; i < num_created; i++) { char cmd[64]; - unsigned arena = pool->arena_index[i]; + unsigned arena = je_pool->arena_index[i]; snprintf(cmd, sizeof(cmd), "arena.%u.destroy", arena); (void)je_mallctl(cmd, NULL, 0, NULL, 0); } - umf_ba_global_free(pool); + if (je_pool->arena_index) { + umf_ba_global_free(je_pool->arena_index); + je_pool->arena_index = NULL; + } + umf_ba_global_free(je_pool); return UMF_RESULT_ERROR_MEMORY_PROVIDER_SPECIFIC; } +static umf_result_t CTL_RUNNABLE_HANDLER(post_initialize)( + void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source; + (void)arg; + (void)size; + (void)indexes; + return op_post_initialize(ctx); +} + +static void initialize_jemalloc_ctl(void) { + jemalloc_ctl_root.root[jemalloc_ctl_root.first_free++] = + (umf_ctl_node_t){ + .name = "post_initialize", + .type = CTL_NODE_LEAF, + .runnable_cb = CTL_RUNNABLE_HANDLER(post_initialize), + }; +} + +static umf_result_t op_ctl(void *pool, umf_ctl_query_source_t operationType, + const char *name, void *arg, size_t size, + umf_ctl_query_type_t queryType, va_list args) { + (void)operationType; + (void)arg; + (void)size; + (void)args; + utils_init_once(&ctl_initialized, initialize_jemalloc_ctl); + return ctl_query(&jemalloc_ctl_root, pool, CTL_QUERY_PROGRAMMATIC, name, + queryType, arg, size, args); +} + static umf_result_t op_finalize(void *pool) { assert(pool); umf_result_t ret = UMF_RESULT_SUCCESS; @@ -595,6 +660,9 @@ static umf_result_t op_finalize(void *pool) { ret = UMF_RESULT_ERROR_UNKNOWN; } } + if (je_pool->arena_index) { + umf_ba_global_free(je_pool->arena_index); + } umf_ba_global_free(je_pool); VALGRIND_DO_DESTROY_MEMPOOL(pool); @@ -665,6 +733,7 @@ static umf_memory_pool_ops_t UMF_JEMALLOC_POOL_OPS = { .free = op_free, .get_last_allocation_error = op_get_last_allocation_error, .get_name = op_get_name, + .ext_ctl = op_ctl, .ext_trim_memory = op_trim_memory, }; diff --git a/src/pool/pool_scalable.c b/src/pool/pool_scalable.c index 72afce2672..99b4fa1a2d 100644 --- a/src/pool/pool_scalable.c +++ b/src/pool/pool_scalable.c @@ -71,6 +71,7 @@ typedef struct tbb_callbacks_t { typedef struct tbb_memory_pool_t { umf_memory_provider_handle_t mem_provider; + umf_scalable_pool_params_t params; void *tbb_pool; char name[64]; } tbb_memory_pool_t; @@ -291,6 +292,33 @@ umfScalablePoolParamsSetName(umf_scalable_pool_params_handle_t hParams, static umf_result_t tbb_pool_initialize(umf_memory_provider_handle_t provider, const void *params, void **pool) { + tbb_memory_pool_t *pool_data = + umf_ba_global_alloc(sizeof(tbb_memory_pool_t)); + if (!pool_data) { + LOG_ERR("cannot allocate memory for metadata"); + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + + memset(pool_data, 0, sizeof(*pool_data)); + pool_data->mem_provider = provider; + + if (params) { + pool_data->params = *(const umf_scalable_pool_params_t *)params; + } else { + // Set default values + memset(&pool_data->params, 0, sizeof(pool_data->params)); + pool_data->params.granularity = DEFAULT_GRANULARITY; + pool_data->params.keep_all_memory = false; + strncpy(pool_data->params.name, DEFAULT_NAME, + sizeof(pool_data->params.name) - 1); + } + + *pool = (void *)pool_data; + + return UMF_RESULT_SUCCESS; +} + +static umf_result_t tbb_pool_post_initialize(void *pool) { tbb_mem_pool_policy_t policy = {.pAlloc = tbb_raw_alloc_wrapper, .pFree = tbb_raw_free_wrapper, .granularity = DEFAULT_GRANULARITY, @@ -299,22 +327,16 @@ static umf_result_t tbb_pool_initialize(umf_memory_provider_handle_t provider, .keep_all_memory = false, .reserved = 0}; - const char *pool_name = DEFAULT_NAME; - // If params is provided, override defaults - if (params) { - const umf_scalable_pool_params_t *scalable_params = params; - policy.granularity = scalable_params->granularity; - policy.keep_all_memory = scalable_params->keep_all_memory; - pool_name = scalable_params->name; - } + assert(pool); + tbb_memory_pool_t *pool_data = (tbb_memory_pool_t *)pool; + + const umf_scalable_pool_params_t *scalable_params = &pool_data->params; + const char *pool_name = scalable_params->name; + + // Use stored params + policy.granularity = scalable_params->granularity; + policy.keep_all_memory = scalable_params->keep_all_memory; - tbb_memory_pool_t *pool_data = - umf_ba_global_alloc(sizeof(tbb_memory_pool_t)); - if (!pool_data) { - LOG_ERR("cannot allocate memory for metadata"); - return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; - } - memset(pool_data, 0, sizeof(*pool_data)); snprintf(pool_data->name, sizeof(pool_data->name), "%s", pool_name); umf_result_t res = UMF_RESULT_SUCCESS; @@ -325,7 +347,6 @@ static umf_result_t tbb_pool_initialize(umf_memory_provider_handle_t provider, goto err_tbb_init; } - pool_data->mem_provider = provider; ret = tbb_callbacks.pool_create_v1((intptr_t)pool_data, &policy, &(pool_data->tbb_pool)); if (ret != 0 /* TBBMALLOC_OK */) { @@ -333,8 +354,6 @@ static umf_result_t tbb_pool_initialize(umf_memory_provider_handle_t provider, goto err_tbb_init; } - *pool = (void *)pool_data; - return res; err_tbb_init: @@ -450,7 +469,24 @@ static umf_result_t tbb_get_last_allocation_error(void *pool) { return TLS_last_allocation_error; } -static void initialize_pool_ctl(void) {} +static umf_result_t CTL_RUNNABLE_HANDLER(post_initialize)( + void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source; + (void)arg; + (void)size; + (void)indexes; + return tbb_pool_post_initialize(ctx); +} + +static void initialize_pool_ctl(void) { + pool_scallable_ctl_root.root[pool_scallable_ctl_root.first_free++] = + (umf_ctl_node_t){ + .name = "post_initialize", + .type = CTL_NODE_LEAF, + .runnable_cb = CTL_RUNNABLE_HANDLER(post_initialize), + }; +} static umf_result_t pool_ctl(void *hPool, umf_ctl_query_source_t operationType, const char *name, void *arg, size_t size, diff --git a/src/provider/provider_cuda.c b/src/provider/provider_cuda.c index b6c15d2d1b..ec8037f1ec 100644 --- a/src/provider/provider_cuda.c +++ b/src/provider/provider_cuda.c @@ -109,6 +109,17 @@ static bool Init_cu_global_state_failed; struct ctl cu_memory_ctl_root; static UTIL_ONCE_FLAG ctl_initialized = UTIL_ONCE_FLAG_INIT; +static umf_result_t cu_memory_provider_post_initialize(void *provider); +static umf_result_t CTL_RUNNABLE_HANDLER(post_initialize)( + void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source; + (void)arg; + (void)size; + (void)indexes; + return cu_memory_provider_post_initialize(ctx); +} + // forward decl needed for alloc static umf_result_t cu_memory_provider_free(void *provider, void *ptr, size_t bytes); @@ -148,6 +159,12 @@ static umf_result_t cu2umf_result(CUresult result) { static void initialize_cu_ctl(void) { CTL_REGISTER_MODULE(&cu_memory_ctl_root, stats); + cu_memory_ctl_root.root[cu_memory_ctl_root.first_free++] = + (umf_ctl_node_t){ + .name = "post_initialize", + .type = CTL_NODE_LEAF, + .runnable_cb = CTL_RUNNABLE_HANDLER(post_initialize), + }; } static void init_cu_global_state(void) { @@ -375,13 +392,32 @@ static umf_result_t cu_memory_provider_initialize(const void *params, snprintf(cu_provider->name, sizeof(cu_provider->name), "%s", cu_params->name); + cu_provider->context = cu_params->cuda_context_handle; + cu_provider->device = cu_params->cuda_device_handle; + cu_provider->memory_type = cu_params->memory_type; + cu_provider->alloc_flags = cu_params->alloc_flags; + + *provider = cu_provider; + + return UMF_RESULT_SUCCESS; +} + +static umf_result_t cu_memory_provider_finalize(void *provider) { + umf_ba_global_free(provider); + return UMF_RESULT_SUCCESS; +} + +static umf_result_t cu_memory_provider_post_initialize(void *provider) { + cu_memory_provider_t *cu_provider = (cu_memory_provider_t *)provider; + + assert(provider); // CUDA alloc functions doesn't allow to provide user alignment - get the // minimum one from the driver size_t min_alignment = 0; CUmemAllocationProp allocProps = {0}; allocProps.location.type = CU_MEM_LOCATION_TYPE_DEVICE; allocProps.type = CU_MEM_ALLOCATION_TYPE_PINNED; - allocProps.location.id = cu_params->cuda_device_handle; + allocProps.location.id = cu_provider->device; CUresult cu_result = g_cu_ops.cuMemGetAllocationGranularity( &min_alignment, &allocProps, CU_MEM_ALLOC_GRANULARITY_MINIMUM); if (cu_result != CUDA_SUCCESS) { @@ -389,29 +425,16 @@ static umf_result_t cu_memory_provider_initialize(const void *params, return cu2umf_result(cu_result); } - cu_provider->context = cu_params->cuda_context_handle; - cu_provider->device = cu_params->cuda_device_handle; - cu_provider->memory_type = cu_params->memory_type; cu_provider->min_alignment = min_alignment; // If the memory type is shared (CUDA managed), the allocation flags must // be set. NOTE: we do not check here if the flags are valid - // this will be done by CUDA runtime. - if (cu_params->memory_type == UMF_MEMORY_TYPE_SHARED && - cu_params->alloc_flags == 0) { + if (cu_provider->memory_type == UMF_MEMORY_TYPE_SHARED && + cu_provider->alloc_flags == 0) { // the default setting is CU_MEM_ATTACH_GLOBAL cu_provider->alloc_flags = CU_MEM_ATTACH_GLOBAL; - } else { - cu_provider->alloc_flags = cu_params->alloc_flags; } - - *provider = cu_provider; - - return UMF_RESULT_SUCCESS; -} - -static umf_result_t cu_memory_provider_finalize(void *provider) { - umf_ba_global_free(provider); return UMF_RESULT_SUCCESS; } diff --git a/src/provider/provider_devdax_memory.c b/src/provider/provider_devdax_memory.c index 7ddf3c72a8..6addea3502 100644 --- a/src/provider/provider_devdax_memory.c +++ b/src/provider/provider_devdax_memory.c @@ -608,7 +608,8 @@ static umf_memory_provider_ops_t UMF_DEVDAX_MEMORY_PROVIDER_OPS = { .ext_put_ipc_handle = devdax_put_ipc_handle, .ext_open_ipc_handle = devdax_open_ipc_handle, .ext_close_ipc_handle = devdax_close_ipc_handle, - .ext_ctl = devdax_ctl}; + .ext_ctl = devdax_ctl, +}; const umf_memory_provider_ops_t *umfDevDaxMemoryProviderOps(void) { return &UMF_DEVDAX_MEMORY_PROVIDER_OPS; diff --git a/src/provider/provider_file_memory.c b/src/provider/provider_file_memory.c index bff4034b2f..830740f206 100644 --- a/src/provider/provider_file_memory.c +++ b/src/provider/provider_file_memory.c @@ -160,6 +160,17 @@ static __TLS file_last_native_error_t TLS_last_native_error; struct ctl file_memory_ctl_root; static UTIL_ONCE_FLAG ctl_initialized = UTIL_ONCE_FLAG_INIT; +static umf_result_t file_post_initialize(void *provider); +static umf_result_t CTL_RUNNABLE_HANDLER(post_initialize)( + void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source; + (void)arg; + (void)size; + (void)indexes; + return file_post_initialize(ctx); +} + static const char *Native_error_str[] = { [_UMF_FILE_RESULT_SUCCESS] = "success", [_UMF_FILE_RESULT_ERROR_ALLOC_FAILED] = "memory allocation failed", @@ -175,6 +186,12 @@ static void file_store_last_native_error(int32_t native_error, static void initialize_file_ctl(void) { CTL_REGISTER_MODULE(&file_memory_ctl_root, stats); + file_memory_ctl_root.root[file_memory_ctl_root.first_free++] = + (umf_ctl_node_t){ + .name = "post_initialize", + .type = CTL_NODE_LEAF, + .runnable_cb = CTL_RUNNABLE_HANDLER(post_initialize), + }; } static umf_result_t @@ -295,6 +312,22 @@ static umf_result_t file_initialize(const void *params, void **provider) { file_provider->coarse = coarse; + *provider = file_provider; + return UMF_RESULT_SUCCESS; + +err_close_fd: + utils_close_fd(file_provider->fd); +err_free_file_provider: + umf_ba_global_free(file_provider); + return ret; +} + +static umf_result_t file_post_initialize(void *provider) { + umf_result_t ret = UMF_RESULT_SUCCESS; + file_memory_provider_t *file_provider = provider; + + assert(provider); + if (utils_mutex_init(&file_provider->lock) == NULL) { LOG_ERR("lock init failed"); ret = UMF_RESULT_ERROR_UNKNOWN; @@ -315,8 +348,6 @@ static umf_result_t file_initialize(const void *params, void **provider) { goto err_delete_fd_offset_map; } - *provider = file_provider; - return UMF_RESULT_SUCCESS; err_delete_fd_offset_map: @@ -325,9 +356,9 @@ static umf_result_t file_initialize(const void *params, void **provider) { utils_mutex_destroy_not_free(&file_provider->lock); err_coarse_delete: coarse_delete(file_provider->coarse); -err_close_fd: - utils_close_fd(file_provider->fd); -err_free_file_provider: + if (utils_close_fd(file_provider->fd)) { + LOG_PERR("closing file descriptor %d failed", file_provider->fd); + } umf_ba_global_free(file_provider); return ret; } @@ -935,7 +966,8 @@ static umf_memory_provider_ops_t UMF_FILE_MEMORY_PROVIDER_OPS = { .ext_put_ipc_handle = file_put_ipc_handle, .ext_open_ipc_handle = file_open_ipc_handle, .ext_close_ipc_handle = file_close_ipc_handle, - .ext_ctl = file_ctl}; + .ext_ctl = file_ctl, +}; const umf_memory_provider_ops_t *umfFileMemoryProviderOps(void) { return &UMF_FILE_MEMORY_PROVIDER_OPS; diff --git a/src/provider/provider_fixed_memory.c b/src/provider/provider_fixed_memory.c index d761a4024d..a38c981c64 100644 --- a/src/provider/provider_fixed_memory.c +++ b/src/provider/provider_fixed_memory.c @@ -65,8 +65,25 @@ static __TLS fixed_last_native_error_t TLS_last_native_error; struct ctl fixed_memory_ctl_root; static UTIL_ONCE_FLAG ctl_initialized = UTIL_ONCE_FLAG_INIT; +static umf_result_t fixed_post_initialize(void *provider); +static umf_result_t CTL_RUNNABLE_HANDLER(post_initialize)( + void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source; + (void)arg; + (void)size; + (void)indexes; + return fixed_post_initialize(ctx); +} + static void initialize_fixed_ctl(void) { CTL_REGISTER_MODULE(&fixed_memory_ctl_root, stats); + fixed_memory_ctl_root.root[fixed_memory_ctl_root.first_free++] = + (umf_ctl_node_t){ + .name = "post_initialize", + .type = CTL_NODE_LEAF, + .runnable_cb = CTL_RUNNABLE_HANDLER(post_initialize), + }; } static const char *Native_error_str[] = { @@ -141,20 +158,10 @@ static umf_result_t fixed_initialize(const void *params, void **provider) { fixed_provider->base = in_params->ptr; fixed_provider->size = in_params->size; - // add the entire memory as a single block - ret = coarse_add_memory_fixed(coarse, fixed_provider->base, - fixed_provider->size); - if (ret != UMF_RESULT_SUCCESS) { - LOG_ERR("adding memory block failed"); - goto err_coarse_delete; - } - *provider = fixed_provider; return UMF_RESULT_SUCCESS; -err_coarse_delete: - coarse_delete(fixed_provider->coarse); err_free_fixed_provider: umf_ba_global_free(fixed_provider); return ret; @@ -297,6 +304,22 @@ static umf_result_t fixed_free(void *provider, void *ptr, size_t size) { return ret; } +static umf_result_t fixed_post_initialize(void *provider) { + fixed_memory_provider_t *fixed_provider = + (fixed_memory_provider_t *)provider; + assert(provider); + + umf_result_t ret = coarse_add_memory_fixed( + fixed_provider->coarse, fixed_provider->base, fixed_provider->size); + if (ret != UMF_RESULT_SUCCESS) { + LOG_ERR("adding memory block failed"); + coarse_delete(fixed_provider->coarse); + umf_ba_global_free(fixed_provider); + return ret; + } + return UMF_RESULT_SUCCESS; +} + static umf_result_t fixed_ctl(void *provider, umf_ctl_query_source_t operationType, const char *name, void *arg, size_t size, @@ -325,7 +348,8 @@ static umf_memory_provider_ops_t UMF_FIXED_MEMORY_PROVIDER_OPS = { .ext_put_ipc_handle = NULL, .ext_open_ipc_handle = NULL, .ext_close_ipc_handle = NULL, - .ext_ctl = fixed_ctl}; + .ext_ctl = fixed_ctl, +}; const umf_memory_provider_ops_t *umfFixedMemoryProviderOps(void) { return &UMF_FIXED_MEMORY_PROVIDER_OPS; diff --git a/src/provider/provider_os_memory.c b/src/provider/provider_os_memory.c index 6bbb36ad25..edaf315dc4 100644 --- a/src/provider/provider_os_memory.c +++ b/src/provider/provider_os_memory.c @@ -611,7 +611,6 @@ static umf_result_t os_initialize(const void *params, void **provider) { } *provider = os_provider; - return UMF_RESULT_SUCCESS; err_destroy_bitmaps: diff --git a/src/provider/provider_tracking.c b/src/provider/provider_tracking.c index 10bb48e4cc..6461429405 100644 --- a/src/provider/provider_tracking.c +++ b/src/provider/provider_tracking.c @@ -1360,7 +1360,6 @@ umf_memory_provider_ops_t UMF_TRACKING_MEMORY_PROVIDER_OPS = { .ext_put_ipc_handle = trackingPutIpcHandle, .ext_open_ipc_handle = trackingOpenIpcHandle, .ext_close_ipc_handle = trackingCloseIpcHandle, - .ext_ctl = NULL, .ext_get_allocation_properties = trackingGetAllocationProperties, .ext_get_allocation_properties_size = trackingGetAllocationPropertiesSize, }; diff --git a/test/pools/disjoint_pool.cpp b/test/pools/disjoint_pool.cpp index c638bfc3e2..049ce277e3 100644 --- a/test/pools/disjoint_pool.cpp +++ b/test/pools/disjoint_pool.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include "pool.hpp" #include "pool/pool_disjoint_internal.h" @@ -19,6 +20,8 @@ using umf_test::test; using namespace umf_test; +static void get_test_va_list(va_list *a, ...) { va_start(*a, a); } + TEST_F(test, internals) { static umf_result_t expectedResult = UMF_RESULT_SUCCESS; struct memory_provider : public umf_test::provider_base_t { @@ -65,6 +68,13 @@ TEST_F(test, internals) { disjoint_pool_t *pool; umf_result_t res = ops->initialize(provider_handle, params, (void **)&pool); EXPECT_EQ(res, UMF_RESULT_SUCCESS); + va_list empty_args; + get_test_va_list(&empty_args); + res = ops->ext_ctl((void *)pool, CTL_QUERY_PROGRAMMATIC, + "post_initialize", nullptr, 0, CTL_QUERY_RUNNABLE, + empty_args); + va_end(empty_args); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); EXPECT_NE(pool, nullptr); EXPECT_EQ(pool->provider_min_page_size, (size_t)1024); @@ -315,6 +325,14 @@ TEST_F(test, disjointPoolTrim) { EXPECT_EQ(res, UMF_RESULT_SUCCESS); EXPECT_NE(pool, nullptr); + va_list empty_args; + get_test_va_list(&empty_args); + res = ops->ext_ctl((void *)pool, CTL_QUERY_PROGRAMMATIC, + "post_initialize", nullptr, 0, CTL_QUERY_RUNNABLE, + empty_args); + va_end(empty_args); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); + // do 4 allocs, then free all of them size_t size = 64; void *ptrs[4] = {0}; From f602262ca2a744cd55ae754c3eda6543dbd502ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Plewa?= Date: Thu, 11 Sep 2025 15:17:56 +0200 Subject: [PATCH 03/11] Initial documentation --- docs/config/ctl.rst | 1345 +++++++++++++++++++++++++ docs/config/examples.rst | 30 + examples/CMakeLists.txt | 36 + examples/README.md | 11 + examples/ctl/CMakeLists.txt | 81 ++ examples/ctl/ctl_example.c | 310 ++++++ examples/ctl/ctl_statistics_example.c | 292 ++++++ include/umf/memory_pool.h | 1 - src/ctl/ctl_defaults.c | 7 +- src/memory_provider.c | 4 +- 10 files changed, 2111 insertions(+), 6 deletions(-) create mode 100644 docs/config/ctl.rst create mode 100644 examples/ctl/CMakeLists.txt create mode 100644 examples/ctl/ctl_example.c create mode 100644 examples/ctl/ctl_statistics_example.c diff --git a/docs/config/ctl.rst b/docs/config/ctl.rst new file mode 100644 index 0000000000..7ef7fa9548 --- /dev/null +++ b/docs/config/ctl.rst @@ -0,0 +1,1345 @@ +================ +Introduction +================ + +UMF's CTL is a mechanism for advanced configuration and control of UMF pools +and providers. It allows programmatic access to provider- or pool-specific +configuration options, statistics and auxiliary APIs. CTL entries can also be +set through environment variables or a configuration file, allowing adjustment +of UMF behavior without modifying the program. + +Main concepts +============= + +The core concept is a *path*. A path is a string of nodes separated by periods. +You can imagine nodes as directories where the last element is a file that can +be read, written or executed (similar to ``sysfs`` but with periods instead of +slashes). Example path ``umf.logger.level`` controls the log level. You can +access it with:: + + int level; + umf_result_t ret = umfCtlGet("umf.logger.level", &level, sizeof(level)); + +To change the level programmatically use:: + + int level = LOG_WARNING; + umf_result_t ret = umfCtlSet("umf.logger.level", &level, sizeof(level)); + +Accessing pool or provider paths is slightly more involved. For example:: + + size_t alloc_count; + umf_memory_pool_handle_t hPool = createPool(); + umf_result_t ret = umfCtlGet("umf.pool.by_handle.{}.stats.alloc_count", + &alloc_count, sizeof(alloc_count), hPool); + +The ``umf.pool.by_handle`` prefix selects a pool addressed by its handle. +Every ``{}`` in the path is replaced with an extra argument passed to the CTL +function. Alternative addressing methods are described below. + +Pool / Provider addressing +========================== + +Two addressing schemes are provided: ``by_handle`` and ``by_name``. Each pool +and provider has a unique handle and an optional user-defined name that can be +queried with ``umfMemoryProviderGetName()`` or ``umfMemoryPoolGetName()``. +When using ``by_name`` the name appears in the path, e.g.:: + + umfCtlGet("umf.pool.by_name.myPool.stats.alloc_count", + &alloc_count, sizeof(alloc_count)); + +If multiple pools share a name, read operations must disambiguate the target by +appending an index after the name:: + + umfCtlGet("umf.pool.by_name.myPool.0.stats.alloc_count", + &alloc_count, sizeof(alloc_count)); + +The number of pools with a given name can be obtained with the ``count`` node. + +Wildcards +========= + +A ``{}`` in the path acts as a wildcard and is replaced with successive +arguments of ``umfCtlGet``, ``umfCtlSet`` or ``umfCtlExec``. Wildcards can +replace any node, not only handles. For example:: + + size_t pool_count; + const char *name = "myPool"; + umfCtlGet("umf.pool.by_name.{}.count", &pool_count, sizeof(pool_count), + name); + for (size_t i = 0; i < pool_count; i++) { + umfCtlGet("umf.pool.by_name.{}.{}.stats.alloc_count", &alloc_count, + sizeof(alloc_count), name, i); + } + +Ensure that the types of wildcard arguments match the expected node types. + +Default addressing +================== + +``umf.provider.default`` and ``umf.pool.default`` store default values applied +to providers or pools created after the defaults are set. For example:: + + const char *name = "custom"; + umfCtlSet("umf.pool.default.disjoint.name", (void *)name, strlen(name)+1); + +Every subsequently created disjoint pool will use ``custom`` as its name unless +overridden by explicit parameters. Defaults may be supplied programmatically or +via configuration and are saved internally and applied during initalization of +a matching provider or pool. + +Environment variables +===================== + +CTL entries may also be specified in the ``UMF_CONF`` environment variable or +a configuration file specified in the ``UMF_CONF_FILE``. +Multiple entries are separated with semicolons, e.g.:: + + UMF_CONF="umf.logger.output=stdout;umf.logger.level=0" + +CTL options available through environment variables are limited—you can only +target default nodes when addressing pools. This means that configuration +strings can influence values consumed during pool creation but cannot alter +runtime-only parameters. + +============ +CTL nodes +============ + +Unless noted otherwise, provider and pool nodes accept either ``by_handle`` or +``by_name`` addressing. Replace the ``{provider}``, ``{pool}`` or ``{id}`` +placeholder with the wildcard argument supplied to :c:func:`umfCtlGet`, +:c:func:`umfCtlSet` or :c:func:`umfCtlExec`. + +Logger nodes +------------ + +.. py:function:: umf.logger.timestamp(enabled) + + :param enabled: Receives (or provides) ``0`` when timestamps are disabled and + ``1`` when they are emitted. + :type enabled: ``int *`` + + **Access:** read-write. + **Default addressing:** not supported. + **Environment:** configurable via ``UMF_CONF`` or ``UMF_CONF_FILE`` entries. + + Toggle timestamp prefixes in future log records. The flag is treated as a + boolean value and only affects messages emitted after the change. + +.. py:function:: umf.logger.pid(enabled) + + :param enabled: Receives or supplies ``0`` to omit the process identifier and + ``1`` to include it in every message header. + :type enabled: ``int *`` + + **Access:** read-write. + **Default addressing:** not supported. + **Environment:** configurable via ``UMF_CONF`` or ``UMF_CONF_FILE`` entries. + + Controls whether each log line is annotated with the current process id. + Setting non-boolean values results in coercion to zero/non-zero; the change + applies to subsequent messages only. + +.. py:function:: umf.logger.level(level) + + :param level: Receives or supplies the minimum severity that will be written. + :type level: ``utils_log_level_t *`` (``LOG_DEBUG`` .. ``LOG_FATAL``) + + **Access:** read-write. + **Default addressing:** not supported. + **Environment:** configurable via ``UMF_CONF`` or ``UMF_CONF_FILE`` entries. + + Sets the filtering threshold for the logger. Records below the configured + level are dropped. Writes that fall outside the enumerated range are + rejected. + +.. py:function:: umf.logger.flush_level(level) + + :param level: Receives or supplies the severity at which the logger forces a + flush of the output stream. + :type level: ``utils_log_level_t *`` (``LOG_DEBUG`` .. ``LOG_FATAL``) + + **Access:** read-write. + **Default addressing:** not supported. + **Environment:** configurable via ``UMF_CONF`` or ``UMF_CONF_FILE`` entries. + + Adjusts when buffered log data is synchronously flushed. Writes outside the + valid severity range fail, and lowering the level can incur additional flush + overhead for future messages. + +.. py:function:: umf.logger.output(path) + + :param path: Receives the currently selected sink on reads. On writes, pass + ``"stdout"`` or ``"stderr"`` to redirect to standard streams, a + NUL-terminated file path to append to a file, or ``NULL`` to disable + logging altogether. + :type path: ``char *`` when reading, ``const char *`` when writing + + **Access:** read-write. + **Default addressing:** not supported. + **Environment:** configurable via ``UMF_CONF`` or ``UMF_CONF_FILE`` entries. + + Controls the destination for log messages. The logger closes any previously + opened file when switching targets. Providing a path longer than 256 bytes or + pointing to a file that cannot be opened causes the write to fail. + +Provider nodes +-------------- + +The following entries are available for providers that register CTL support +through ``umf.provider``. ``{provider}`` accepts either a handle or a name with +an optional numeric disambiguator. None of these nodes can be set via default +addressing and they are not configurable through environment variables. + +.. py:function:: umf.provider.by_handle.stats.allocated_memory(provider, bytes) + + **CTL path:** ``umf.provider.by_handle.{provider}.stats.allocated_memory`` + (or ``umf.provider.by_name.{provider}.stats.allocated_memory``). + + :param provider: Handle (or name-based selector) of the provider being + queried. When using ``by_name`` addressing, append ``.{index}`` if the + name is not unique. + :type provider: ``umf_memory_provider_handle_t`` when using handles, or + ``const char *`` for names. + :param bytes: Receives the total number of bytes currently outstanding. + :type bytes: ``size_t *`` + + **Access:** read-only. + **Default addressing:** not supported. + **Environment:** not supported. + + Returns the amount of memory the provider has allocated but not yet freed. + The counter updates atomically as the provider serves requests and is not + resettable. + +.. py:function:: umf.provider.by_handle.stats.peak_memory(provider, bytes) + + **CTL path:** ``umf.provider.by_handle.{provider}.stats.peak_memory`` (or + ``umf.provider.by_name.{provider}.stats.peak_memory``). + + :param provider: Handle or name-based selector of the provider being + queried. Disambiguate duplicate names with an index when using + ``by_name``. + :type provider: ``umf_memory_provider_handle_t`` when using handles, or + ``const char *`` for names. + :param bytes: Receives the highest observed outstanding allocation size since + the last reset. + :type bytes: ``size_t *`` + + **Access:** read-only. + **Default addressing:** not supported. + **Environment:** not supported. + + Reports the historical maximum allocation footprint of the provider. + Combine with :py:func:`umf.provider.by_handle.stats.peak_memory.reset` to + discard stale peaks when desired. + +.. py:function:: umf.provider.by_handle.stats.peak_memory.reset(provider) + + **CTL path:** ``umf.provider.by_handle.{provider}.stats.peak_memory.reset`` + (or ``umf.provider.by_name.{provider}.stats.peak_memory.reset``). + + :param provider: Handle or name-based selector of the provider being + updated. + :type provider: ``umf_memory_provider_handle_t`` when using handles, or + ``const char *`` for names. + + **Access:** execute (via :c:func:`umfCtlExec`). + **Default addressing:** not supported. + **Environment:** not supported. + + Resets the peak allocation counter to the provider's current outstanding + usage. The operation does not affect other statistics and can be invoked at + any time. + +.. py:function:: umf.provider.by_handle.stats.reset(provider) + + **CTL path:** ``umf.provider.by_handle.{provider}.stats.reset`` (or + ``umf.provider.by_name.{provider}.stats.reset``). + + :param provider: Handle or name-based selector of the provider being + updated. + :type provider: ``umf_memory_provider_handle_t`` when using handles, or + ``const char *`` for names. + + **Access:** execute (via :c:func:`umfCtlExec`). + **Default addressing:** not supported. + **Environment:** not supported. + + Clears all provider statistics, including the peak counter. Use this to start + a new measurement interval; the call has no effect on in-flight allocations. + +.. py:function:: umf.provider.by_handle.params.ipc_enabled(provider, enabled) + + **CTL path:** ``umf.provider.by_handle.{provider}.params.ipc_enabled`` (or + ``umf.provider.by_name.{provider}.params.ipc_enabled``). + + :param provider: Handle or name-based selector of the provider being + queried. + :type provider: ``umf_memory_provider_handle_t`` when using handles, or + ``const char *`` for names. + :param enabled: Receives ``0`` when inter-process sharing is disabled and a + non-zero value when it is active. + :type enabled: ``int *`` + + **Access:** read-only. + **Default addressing:** not supported. + **Environment:** not supported. + + Indicates whether the OS memory provider has been initialized with IPC + support. The value is fixed at provider creation time and cannot be modified + afterwards. + +Disjoint pool parameter nodes +----------------------------- + +The following entries apply to disjoint pools. They can be addressed via +``umf.pool.default.disjoint`` to set defaults for future pools. Environment +configuration may only set these defaults because there is no way to provide a +runtime handle through ``UMF_CONF``. + +.. py:function:: umf.pool.by_handle.params.slab_min_size(pool, bytes) + + **CTL path:** ``umf.pool.by_handle.{pool}.params.slab_min_size`` (or + ``umf.pool.by_name.{pool}.params.slab_min_size``). + + :param pool: Handle or name-based selector of the disjoint pool. Append + ``.{index}`` after the name when multiple pools share it. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param bytes: Receives or supplies the minimum slab size requested from the + provider. + :type bytes: ``size_t *`` + + **Access:** read-write. + **Default addressing:** ``umf.pool.default.disjoint.params.slab_min_size``. + **Environment:** defaults configurable via ``UMF_CONF`` or + ``UMF_CONF_FILE``. + + Governs how much memory the pool grabs in each slab. Lower values reduce + per-allocation slack while higher values amortize provider overhead. Writes + are accepted only before the pool completes its ``post_initialize`` phase. + +.. py:function:: umf.pool.by_handle.params.max_poolable_size(pool, bytes) + + **CTL path:** ``umf.pool.by_handle.{pool}.params.max_poolable_size`` (or + ``umf.pool.by_name.{pool}.params.max_poolable_size``). + + :param pool: Handle or name-based selector of the disjoint pool. Append + ``.{index}`` when disambiguating duplicate names. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param bytes: Receives or supplies the largest allocation size that is still + cached by the pool. + :type bytes: ``size_t *`` + + **Access:** read-write. + **Default addressing:** ``umf.pool.default.disjoint.params.max_poolable_size``. + **Environment:** defaults configurable via ``UMF_CONF`` or + ``UMF_CONF_FILE``. + + Sets the cut-off for pooling allocations. Requests larger than this value are + delegated directly to the provider. Updates must occur before + ``post_initialize`` completes. + +.. py:function:: umf.pool.by_handle.params.capacity(pool, count) + + **CTL path:** ``umf.pool.by_handle.{pool}.params.capacity`` (or + ``umf.pool.by_name.{pool}.params.capacity``). + + :param pool: Handle or name-based selector of the disjoint pool. Append + ``.{index}`` when necessary to disambiguate names. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param count: Receives or supplies the maximum number of slabs each bucket + may retain. + :type count: ``size_t *`` + + **Access:** read-write. + **Default addressing:** ``umf.pool.default.disjoint.params.capacity``. + **Environment:** defaults configurable via ``UMF_CONF`` or + ``UMF_CONF_FILE``. + + Caps the pool's cached slabs per bucket to limit memory retention. Shrinking + the capacity may cause future frees to return slabs to the provider. Writes + are rejected after ``post_initialize``. + +.. py:function:: umf.pool.by_handle.params.min_bucket_size(pool, bytes) + + **CTL path:** ``umf.pool.by_handle.{pool}.params.min_bucket_size`` (or + ``umf.pool.by_name.{pool}.params.min_bucket_size``). + + :param pool: Handle or name-based selector of the disjoint pool. Append + ``.{index}`` to the name when needed. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param bytes: Receives or supplies the minimal allocation size a bucket may + serve. + :type bytes: ``size_t *`` + + **Access:** read-write. + **Default addressing:** ``umf.pool.default.disjoint.params.min_bucket_size``. + **Environment:** defaults configurable via ``UMF_CONF`` or + ``UMF_CONF_FILE``. + + Controls the smallest chunk size kept in the pool, which in turn affects the + number of buckets. Writes are validated for size correctness and disallowed + after ``post_initialize``. + +.. py:function:: umf.pool.by_handle.params.pool_trace(pool, level) + + **CTL path:** ``umf.pool.by_handle.{pool}.params.pool_trace`` (or + ``umf.pool.by_name.{pool}.params.pool_trace``). + + :param pool: Handle or name-based selector of the disjoint pool. Append + ``.{index}`` for ambiguous names. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param level: Receives or supplies a non-zero value to enable tracing of + counters, or ``0`` to disable it. + :type level: ``int *`` + + **Access:** read-write. + **Default addressing:** ``umf.pool.default.disjoint.params.pool_trace``. + **Environment:** defaults configurable via ``UMF_CONF`` or + ``UMF_CONF_FILE``. + + Enables collection of per-bucket and aggregated allocation counters. Tracing + must be activated before ``post_initialize``; attempting to change it later + fails with ``UMF_RESULT_ERROR_NOT_SUPPORTED``. + +Disjoint pool statistics +------------------------ + +Statistics are read-only and cannot be set through defaults or environment +variables. Aggregate counters that rely on tracing require +``params.pool_trace`` to be non-zero. + +.. py:function:: umf.pool.by_handle.stats.used_memory(pool, bytes) + + **CTL path:** ``umf.pool.by_handle.{pool}.stats.used_memory`` (or + ``umf.pool.by_name.{pool}.stats.used_memory``). + + :param pool: Handle or name-based selector of the disjoint pool. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param bytes: Receives the amount of memory that is presently allocated by + the pool's clients. + :type bytes: ``size_t *`` + + **Access:** read-only. + **Default addressing:** not supported. + **Environment:** not supported. + + Reports the memory currently in use across all slabs, regardless of tracing + status. + +.. py:function:: umf.pool.by_handle.stats.reserved_memory(pool, bytes) + + **CTL path:** ``umf.pool.by_handle.{pool}.stats.reserved_memory`` (or + ``umf.pool.by_name.{pool}.stats.reserved_memory``). + + :param pool: Handle or name-based selector of the disjoint pool. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param bytes: Receives the total number of bytes reserved in slabs that the + pool owns. + :type bytes: ``size_t *`` + + **Access:** read-only. + **Default addressing:** not supported. + **Environment:** not supported. + + Returns the total memory reserved by the pool, including free capacity held + in slabs. + +.. py:function:: umf.pool.by_handle.stats.alloc_num(pool, count) + + **CTL path:** ``umf.pool.by_handle.{pool}.stats.alloc_num`` (or + ``umf.pool.by_name.{pool}.stats.alloc_num``). + + :param pool: Handle or name-based selector of the disjoint pool. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param count: Receives the number of allocations the pool has issued. + :type count: ``size_t *`` + + **Access:** read-only. + **Default addressing:** not supported. + **Environment:** not supported. + + Requires tracing. Counts every allocation handed out by the pool since the + pool was created. + +.. py:function:: umf.pool.by_handle.stats.alloc_pool_num(pool, count) + + **CTL path:** ``umf.pool.by_handle.{pool}.stats.alloc_pool_num`` (or + ``umf.pool.by_name.{pool}.stats.alloc_pool_num``). + + :param pool: Handle or name-based selector of the disjoint pool. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param count: Receives the number of allocations served directly from cached + slabs. + :type count: ``size_t *`` + + **Access:** read-only. + **Default addressing:** not supported. + **Environment:** not supported. + + Requires tracing. Counts how many allocations were fulfilled from cached + memory without visiting the provider. + +.. py:function:: umf.pool.by_handle.stats.free_num(pool, count) + + **CTL path:** ``umf.pool.by_handle.{pool}.stats.free_num`` (or + ``umf.pool.by_name.{pool}.stats.free_num``). + + :param pool: Handle or name-based selector of the disjoint pool. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param count: Receives the total number of frees processed by the pool. + :type count: ``size_t *`` + + **Access:** read-only. + **Default addressing:** not supported. + **Environment:** not supported. + + Requires tracing. Tracks the number of frees observed by the pool since its + creation. + +.. py:function:: umf.pool.by_handle.stats.curr_slabs_in_use(pool, count) + + **CTL path:** ``umf.pool.by_handle.{pool}.stats.curr_slabs_in_use`` (or + ``umf.pool.by_name.{pool}.stats.curr_slabs_in_use``). + + :param pool: Handle or name-based selector of the disjoint pool. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param count: Receives the current number of slabs actively serving + allocations. + :type count: ``size_t *`` + + **Access:** read-only. + **Default addressing:** not supported. + **Environment:** not supported. + + Requires tracing. Returns the number of slabs that currently have live + allocations. + +.. py:function:: umf.pool.by_handle.stats.curr_slabs_in_pool(pool, count) + + **CTL path:** ``umf.pool.by_handle.{pool}.stats.curr_slabs_in_pool`` (or + ``umf.pool.by_name.{pool}.stats.curr_slabs_in_pool``). + + :param pool: Handle or name-based selector of the disjoint pool. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param count: Receives how many slabs are cached and ready for reuse. + :type count: ``size_t *`` + + **Access:** read-only. + **Default addressing:** not supported. + **Environment:** not supported. + + Requires tracing. Reports the slabs retained in the pool for future reuse. + +.. py:function:: umf.pool.by_handle.stats.max_slabs_in_use(pool, count) + + **CTL path:** ``umf.pool.by_handle.{pool}.stats.max_slabs_in_use`` (or + ``umf.pool.by_name.{pool}.stats.max_slabs_in_use``). + + :param pool: Handle or name-based selector of the disjoint pool. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param count: Receives the historical maximum of simultaneously used slabs. + :type count: ``size_t *`` + + **Access:** read-only. + **Default addressing:** not supported. + **Environment:** not supported. + + Requires tracing. Provides the peak number of slabs that were in use at the + same time. + +.. py:function:: umf.pool.by_handle.stats.max_slabs_in_pool(pool, count) + + **CTL path:** ``umf.pool.by_handle.{pool}.stats.max_slabs_in_pool`` (or + ``umf.pool.by_name.{pool}.stats.max_slabs_in_pool``). + + :param pool: Handle or name-based selector of the disjoint pool. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param count: Receives the largest number of slabs retained in the cache. + :type count: ``size_t *`` + + **Access:** read-only. + **Default addressing:** not supported. + **Environment:** not supported. + + Requires tracing. Returns the highest number of slabs ever retained in the + cache simultaneously. + +Disjoint pool bucket inspection +------------------------------- + +Bucket-specific nodes take an additional ``{id}`` placeholder that must be +supplied as a ``size_t`` argument. Environment configuration cannot target +these entries. + +.. py:function:: umf.pool.by_handle.buckets.count(pool, count) + + **CTL path:** ``umf.pool.by_handle.{pool}.buckets.count`` (or + ``umf.pool.by_name.{pool}.buckets.count``). + + :param pool: Handle or name-based selector of the disjoint pool. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param count: Receives the number of distinct bucket sizes. + :type count: ``size_t *`` + + **Access:** read-only. + **Default addressing:** not supported. + **Environment:** not supported. + + Returns the total number of bucket sizes maintained by the pool. Call the + node without a bucket index; providing one results in + ``UMF_RESULT_ERROR_INVALID_ARGUMENT``. + +.. py:function:: umf.pool.by_handle.buckets.size(pool, bucket, bytes) + + **CTL path:** ``umf.pool.by_handle.{pool}.buckets.{id}.size`` (or + ``umf.pool.by_name.{pool}.buckets.{id}.size``). + + :param pool: Handle or name-based selector of the disjoint pool. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param bucket: Zero-based bucket index. + :type bucket: ``size_t`` + :param bytes: Receives the allocation size that the bucket serves. + :type bytes: ``size_t *`` + + **Access:** read-only. + **Default addressing:** not supported. + **Environment:** not supported. + + Reports the allocation size serviced by the selected bucket. This value is + available even when tracing is disabled. + +.. py:function:: umf.pool.by_handle.buckets.stats.alloc_num(pool, bucket, count) + + **CTL path:** ``umf.pool.by_handle.{pool}.buckets.{id}.stats.alloc_num`` (or + ``umf.pool.by_name.{pool}.buckets.{id}.stats.alloc_num``). + + :param pool: Handle or name-based selector of the disjoint pool. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param bucket: Zero-based bucket index. + :type bucket: ``size_t`` + :param count: Receives the number of allocations performed by this bucket. + :type count: ``size_t *`` + + **Access:** read-only. + **Default addressing:** not supported. + **Environment:** not supported. + + Requires tracing. Counts every allocation that passed through the specified + bucket. + +.. py:function:: umf.pool.by_handle.buckets.stats.alloc_pool_num(pool, bucket, count) + + **CTL path:** ``umf.pool.by_handle.{pool}.buckets.{id}.stats.alloc_pool_num`` + (or ``umf.pool.by_name.{pool}.buckets.{id}.stats.alloc_pool_num``). + + :param pool: Handle or name-based selector of the disjoint pool. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param bucket: Zero-based bucket index. + :type bucket: ``size_t`` + :param count: Receives the number of allocations satisfied from cached slabs + in this bucket. + :type count: ``size_t *`` + + **Access:** read-only. + **Default addressing:** not supported. + **Environment:** not supported. + + Requires tracing. Counts how many allocations were served entirely from the + bucket's cached slabs. + +.. py:function:: umf.pool.by_handle.buckets.stats.free_num(pool, bucket, count) + + **CTL path:** ``umf.pool.by_handle.{pool}.buckets.{id}.stats.free_num`` (or + ``umf.pool.by_name.{pool}.buckets.{id}.stats.free_num``). + + :param pool: Handle or name-based selector of the disjoint pool. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param bucket: Zero-based bucket index. + :type bucket: ``size_t`` + :param count: Receives the number of frees recorded for this bucket. + :type count: ``size_t *`` + + **Access:** read-only. + **Default addressing:** not supported. + **Environment:** not supported. + + Requires tracing. Tracks the number of frees observed for the bucket. + +.. py:function:: umf.pool.by_handle.buckets.stats.curr_slabs_in_use(pool, bucket, count) + + **CTL path:** ``umf.pool.by_handle.{pool}.buckets.{id}.stats.curr_slabs_in_use`` + (or ``umf.pool.by_name.{pool}.buckets.{id}.stats.curr_slabs_in_use``). + + :param pool: Handle or name-based selector of the disjoint pool. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param bucket: Zero-based bucket index. + :type bucket: ``size_t`` + :param count: Receives how many slabs for this bucket currently serve + allocations. + :type count: ``size_t *`` + + **Access:** read-only. + **Default addressing:** not supported. + **Environment:** not supported. + + Requires tracing. Returns the current slab utilization for the bucket. + +.. py:function:: umf.pool.by_handle.buckets.stats.curr_slabs_in_pool(pool, bucket, count) + + **CTL path:** ``umf.pool.by_handle.{pool}.buckets.{id}.stats.curr_slabs_in_pool`` + (or ``umf.pool.by_name.{pool}.buckets.{id}.stats.curr_slabs_in_pool``). + + :param pool: Handle or name-based selector of the disjoint pool. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param bucket: Zero-based bucket index. + :type bucket: ``size_t`` + :param count: Receives the number of slabs cached and immediately available + for this bucket. + :type count: ``size_t *`` + + **Access:** read-only. + **Default addressing:** not supported. + **Environment:** not supported. + + Requires tracing. Reports cached slabs that the bucket can reuse without a + provider call. + +.. py:function:: umf.pool.by_handle.buckets.stats.max_slabs_in_use(pool, bucket, count) + + **CTL path:** ``umf.pool.by_handle.{pool}.buckets.{id}.stats.max_slabs_in_use`` + (or ``umf.pool.by_name.{pool}.buckets.{id}.stats.max_slabs_in_use``). + + :param pool: Handle or name-based selector of the disjoint pool. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param bucket: Zero-based bucket index. + :type bucket: ``size_t`` + :param count: Receives the peak number of slabs in use for this bucket. + :type count: ``size_t *`` + + **Access:** read-only. + **Default addressing:** not supported. + **Environment:** not supported. + + Requires tracing. Provides the historical maximum of slabs simultaneously in + use for the bucket. + +.. py:function:: umf.pool.by_handle.buckets.stats.max_slabs_in_pool(pool, bucket, count) + + **CTL path:** ``umf.pool.by_handle.{pool}.buckets.{id}.stats.max_slabs_in_pool`` + (or ``umf.pool.by_name.{pool}.buckets.{id}.stats.max_slabs_in_pool``). + + :param pool: Handle or name-based selector of the disjoint pool. + :type pool: ``umf_memory_pool_handle_t`` when using handles, or + ``const char *`` for names. + :param bucket: Zero-based bucket index. + :type bucket: ``size_t`` + :param count: Receives the largest number of slabs retained in the bucket's + cache. + :type count: ``size_t *`` + + **Access:** read-only. + **Default addressing:** not supported. + **Environment:** not supported. + + Requires tracing. Returns the maximum number of slabs cached for later use by + the bucket. +>>>>>>> da1363dd (better documentation) + +Reading this reference +---------------------- + +Parameter annotations describe the values stored in the node rather than the +pointer types passed to ``umfCtlGet``/``umfCtlSet``/``umfCtlExec``. The +**Access** field indicates whether the node can be read, written, or executed. +The **Defaults / Env** field notes whether the entry can be controlled through +defaults written under ``umf.provider.default.`` or +``umf.pool.default.`` and via ``UMF_CONF``/``UMF_CONF_FILE``. Nodes that do +not accept either configuration source are marked as not supported. + +Logger nodes +================ + +.. py:function:: umf.logger.timestamp(enabled) + + :param enabled: Receives or supplies ``0`` when timestamps are disabled and + ``1`` when they are emitted. + :type enabled: ``int`` + + **Access:** read-write. + **Defaults / Env:** supported. + + Toggle timestamp prefixes in future log records. Logging starts with + timestamps disabled, and the flag affects only messages emitted after the + change. + +.. py:function:: umf.logger.pid(enabled) + + :param enabled: Receives or supplies ``0`` to omit the process identifier and + ``1`` to include it in every message header. + :type enabled: ``int`` + + **Access:** read-write. + **Defaults / Env:** supported. + + Controls whether each log line is annotated with the current process id. + Logging omits the pid by default. Setting non-boolean values results in + coercion to zero/non-zero; the change applies to subsequent messages only. + +.. py:function:: umf.logger.level(level) + + :param level: Receives or supplies the minimum severity that will be written. + :type level: ``int`` (``0`` .. ``4``) + + **Access:** read-write. + **Defaults / Env:** supported. + + Sets the filtering threshold for the logger. Records below the configured + level are dropped. Writes that fall outside the enumerated range are + rejected. 0 means debug logs, 1 means info logs, 2 means warnings, 3 means + errors, and 4 means fatal logs. Until an output is selected the logger + ignores the level because logging is disabled. + +.. py:function:: umf.logger.flush_level(level) + + :param level: Receives or supplies the severity at which the logger forces a + flush of the output stream. + :type level: ``int`` (``0`` .. ``4``) + + **Access:** read-write. + **Defaults / Env:** supported. + + Adjusts when buffered log data is synchronously flushed. Writes outside the + valid severity range fail, and lowering the level can incur additional flush + overhead for future messages. With logging disabled no flushing occurs. + +.. py:function:: umf.logger.output(path) + + :param path: Receives the currently selected sink on reads. On writes, pass + ``"stdout"`` or ``"stderr"`` to redirect to standard streams, a + NUL-terminated file path to append to a file, or ``NULL`` to disable + logging altogether. + :type path: ``char *`` when reading, ``const char *`` when writing + + **Access:** read-write. + **Defaults / Env:** supported. + + Controls the destination for log messages. The logger closes any previously + opened file when switching targets. Providing a path longer than 256 bytes or + pointing to a file that cannot be opened causes the write to fail. Special + values ``"stdout"`` and ``"stderr"`` redirect output to the corresponding + streams. Passing ``NULL`` disables logging entirely, which is also the + initial state until a path is provided. + +Provider nodes +================ + +Provider entries are organized beneath ``umf.provider``. Use +``umf.provider.by_handle.{provider}`` with a +:type:`umf_memory_provider_handle_t` argument to reach a specific provider. +Providers can also be addressed by name through ``umf.provider.by_name.{provider}``; +append ``.{index}`` to address specyfic provider when multiple providers share the same label. +Defaults for future providers live under ``umf.provider.default.{provider_name}``, +where ``{provider_name}`` matches the canonical provider identifier (``OS``, +``FILE``, ``DEVDAX``, ``FIXED``, ``CUDA`` or ``LEVEL_ZERO``). Values written to +the default tree are saved until a matching provider is created and applied +during provider initialization. Defaults can be supplied programmatically or +through configuration strings. The entries below list only the suffix of each +node; prefix them with the appropriate ``umf.provider`` path. + +Common provider statistics +-------------------------- + +.. py:function:: .stats.allocated_memory(bytes) + + Accessible through both ``umf.provider.by_handle.{provider}`` and + ``umf.provider.by_name.{name}``. Supply the provider handle or name (with an + optional ``.{index}`` suffix for duplicates) as the first wildcard argument. + + :param bytes: Receives the total number of bytes currently outstanding. + :type bytes: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Returns the amount of memory the provider has allocated but not yet freed. + The counter updates atomically as the provider serves requests and is not + resettable. + +.. py:function:: .stats.peak_memory(bytes) + + Available via ``umf.provider.by_handle.{provider}`` or + ``umf.provider.by_name.{name}``. Pass the provider selector as the first + wildcard argument. + + :param bytes: Receives the highest observed outstanding allocation size since + the last reset. + :type bytes: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Reports the historical maximum allocation footprint of the provider. Combine + with :py:func:`.stats.peak_memory.reset()` to discard stale peaks when + desired. + +.. py:function:: .stats.peak_memory.reset() + + Invoke through ``umf.provider.by_handle.{provider}`` or + ``umf.provider.by_name.{name}`` after supplying the provider selector as the + first wildcard argument. + + **Access:** execute. + **Defaults / Env:** not supported. + + Resets the peak allocation counter to the provider's current outstanding + usage. The operation does not affect other statistics and can be invoked at + any time. + +OS memory provider (``OS``) +--------------------------- + +The OS provider supports the common statistics nodes described above and adds +the following parameter entry. + +.. py:function:: .params.ipc_enabled(enabled) + + :param enabled: Receives ``0`` when inter-process sharing is disabled and a + non-zero value when it is active. + :type enabled: ``int`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Indicates whether the OS memory provider has been initialized with IPC + support. The value is fixed at provider creation time and cannot be modified + afterwards. + +Fixed memory provider (``FIXED``) +----------------------------------- + +The fixed-size allocation provider currently exposes only the common statistics +nodes. + +DevDAX memory provider (``DEVDAX``) +------------------------------------- + +The DevDAX provider exposes the common statistics nodes described earlier. + +File memory provider (``FILE``) +----------------------------------- + +The file-backed provider exposes the common statistics nodes. + +CUDA memory provider (``CUDA``) +----------------------------------- + +The CUDA provider currently exposes only the common statistics nodes. + +Level Zero memory provider (``LEVEL_ZERO``) +----------------------------------------------- + +The Level Zero provider implements the same statistics nodes as the other providers. + +Pool nodes +========== + +Pool entries mirror the provider layout. ``umf.pool.by_handle.{pool}`` accepts a +:type:`umf_memory_pool_handle_t`, while ``umf.pool.by_name.{pool}`` addresses +pools by name with an optional ``.{index}`` suffix when names are reused. +Defaults for future pools reside under ``umf.pool.default.{pool}``, where +canonical names include ``disjoint``, ``scalable`` and ``jemalloc``. Defaults +can be written via ``umf.pool.default.`` either programmatically or +through configuration strings. The entries below list only the suffix of each +node; prefix them with the appropriate ``umf.pool`` path. + +Common pool statistics +-------------------------- + +.. py:function:: .stats.alloc_count(count) + + :param count: Receives the number of live allocations tracked by the pool. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Counts the allocations currently outstanding according to the pool's public + allocation API. The value increments on successful allocations and + decrements when memory is released. + +Disjoint pool (``disjoint``) +-------------------------------- + +.. py:function:: .params.slab_min_size(bytes) + + :param bytes: Receives or supplies the minimum slab size requested from the + provider. + :type bytes: ``size_t`` + + **Access:** read-write. (write is only avaiable through defaults) + **Defaults / Env:** supported. + + Governs how much memory the pool grabs in each slab. Lower values reduce + per-allocation slack while higher values amortize provider overhead. Writes + are accepted only before the pool completes its ``post_initialize`` phase. + +.. py:function:: .params.max_poolable_size(bytes) + + :param bytes: Receives or supplies the largest allocation size that is still + cached by the pool. + :type bytes: ``size_t`` + + **Access:** read-write. (write is only avaiable through defaults) + **Defaults / Env:** supported. + + Sets the cut-off for pooling allocations. Requests larger than this value are + delegated directly to the provider. Updates must occur before + ``post_initialize`` completes. + +.. py:function:: .params.capacity(count) + + :param count: Receives or supplies the maximum number of slabs each bucket + may retain. + :type count: ``size_t`` + + **Access:** read-write. (write is only avaiable through defaults) + **Defaults / Env:** supported. + + Caps the pool's cached slabs per bucket to limit memory retention. Shrinking + the capacity may cause future frees to return slabs to the provider. Writes + are rejected after ``post_initialize``. + +.. py:function:: .params.min_bucket_size(bytes) + + :param bytes: Receives or supplies the minimal allocation size a bucket may + serve. + :type bytes: ``size_t`` + + **Access:** read-write. (write is only avaiable through defaults) + **Defaults / Env:** supported. + + Controls the smallest chunk size kept in the pool, which in turn affects the + number of buckets. Writes are validated for size correctness and disallowed + after ``post_initialize``. + +.. py:function:: .params.pool_trace(level) + + :param level: Receives or supplies the tracing level for the pool. + :type level: ``int`` (``0`` disables tracing) + + **Access:** read-write. (write is only avaiable through defaults) + **Defaults / Env:** supported. + + Controls the disjoint pool's tracing features. ``0`` disables tracing. + ``1`` records slab usage totals exposed through the ``.stats.curr_slabs_*`` + and ``.stats.max_slabs_*`` nodes. ``2`` additionally tracks allocation and + free counters and prints a usage summary when the pool is destroyed. Values + greater than ``2`` also emit debug logs for every allocation and free. + Tracing must be activated before ``post_initialize``; attempting to change it + later fails with ``UMF_RESULT_ERROR_NOT_SUPPORTED``. + +.. py:function:: .stats.used_memory(bytes) + + Available under ``umf.pool.by_handle.disjoint`` and + ``umf.pool.by_name.disjoint``. Provide the pool selector as the first wildcard + argument. + + :param bytes: Receives the amount of memory that is presently allocated by + the pool's clients. + :type bytes: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Reports the memory currently in use across all slabs by active allocations. + Available even when ``pool_trace`` is disabled. + +.. py:function:: .stats.reserved_memory(bytes) + + :param bytes: Receives the total number of bytes reserved in slabs that the + pool owns. + :type bytes: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Returns the total slab capacity reserved by the pool, including cached free + space. Available even when ``pool_trace`` is disabled. + +.. py:function:: .stats.alloc_num(count) + + :param count: Receives the number of allocations the pool has issued. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` set to ``2`` or higher. Counts every + allocation handed out by the pool since it was created. + +.. py:function:: .stats.alloc_pool_num(count) + + :param count: Receives the number of allocations served directly from cached + slabs. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` set to ``2`` or higher. Counts + allocations served from cached slabs without visiting the provider. + +.. py:function:: .stats.free_num(count) + + :param count: Receives the total number of frees processed by the pool. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` set to ``2`` or higher. Tracks the + number of frees observed by the pool since its creation. + +.. py:function:: .stats.curr_slabs_in_use(count) + + :param count: Receives the current number of slabs actively serving + allocations. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` of at least ``1``. Returns the number of + slabs that currently have live allocations. + +.. py:function:: .stats.curr_slabs_in_pool(count) + + :param count: Receives how many slabs are cached and ready for reuse. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` of at least ``1``. Reports the slabs + retained in the pool for future reuse. + +.. py:function:: .stats.max_slabs_in_use(count) + + :param count: Receives the historical maximum of simultaneously used slabs. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` of at least ``1``. Provides the peak + number of slabs that were in use at the same time. + +.. py:function:: .stats.max_slabs_in_pool(count) + + :param count: Receives the largest number of slabs retained in the cache. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` of at least ``1``. Returns the highest + number of slabs ever retained in the cache simultaneously. + +.. py:function:: .buckets.count(count) + + :param count: Receives the number of distinct bucket sizes. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Returns the total number of buckets in the pool. + +.. py:function:: .buckets.{id}.size(bytes) + + ``{id}`` denotes a bucket index of type ``size_t``. Valid indices range from + ``0`` to ``.buckets.count - 1``. + + :param bytes: Receives the allocation size that the bucket serves. + :type bytes: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Reports the allocation size serviced by the selected bucket. This value is + available even when tracing is disabled. + +.. py:function:: .buckets.{id}.stats.alloc_num(count) + + ``{id}`` denotes a bucket index of type ``size_t``. Valid indices range from + ``0`` to ``.buckets.count - 1``. + + :param count: Receives the number of allocations performed by this bucket. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` set to ``2`` or higher. Counts every + allocation that passed through the specified bucket. + +.. py:function:: .buckets.{id}.stats.alloc_pool_num(count) + + ``{id}`` denotes a bucket index of type ``size_t``. Valid indices range from + ``0`` to ``.buckets.count - 1``. + + :param count: Receives the number of allocations satisfied from cached slabs + in this bucket. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` set to ``2`` or higher. Counts how many + allocations were served entirely from the bucket's cached slabs. + +.. py:function:: .buckets.{id}.stats.free_num(count) + + ``{id}`` denotes a bucket index of type ``size_t``. Valid indices range from + ``0`` to ``.buckets.count - 1``. + + :param count: Receives the number of frees recorded for this bucket. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` set to ``2`` or higher. Tracks the + number of frees observed for the bucket. + +.. py:function:: .buckets.{id}.stats.curr_slabs_in_use(count) + + ``{id}`` denotes a bucket index of type ``size_t``. Valid indices range from + ``0`` to ``.buckets.count - 1``. + + :param count: Receives how many slabs for this bucket currently serve + allocations. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` of at least ``1``. Returns the current + slab utilization for the bucket. + +.. py:function:: .buckets.{id}.stats.curr_slabs_in_pool(count) + + Available through ``umf.pool.by_handle.disjoint`` and + ``umf.pool.by_name.disjoint``. Provide the pool selector and bucket index as + the first two wildcard arguments. ``{id}`` denotes a bucket index of type + ``size_t``. Valid indices range from ``0`` to ``.buckets.count - 1``. + + :param count: Receives the number of slabs cached and immediately available + for this bucket. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` of at least ``1``. Reports cached slabs + that the bucket can reuse without a provider call. + +.. py:function:: .buckets.{id}.stats.max_slabs_in_use(count) + + ``{id}`` denotes a bucket index of type ``size_t``. Valid indices range from + ``0`` to ``.buckets.count - 1``. + + :param count: Receives the peak number of slabs in use for this bucket. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` of at least ``1``. Provides the + historical maximum of slabs simultaneously in use for the bucket. + +.. py:function:: .buckets.{id}.stats.max_slabs_in_pool(count) + + ``{id}`` denotes a bucket index of type ``size_t``. Valid indices range from + ``0`` to ``.buckets.count - 1``. + + :param count: Receives the largest number of slabs retained in the bucket's + cache. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` of at least ``1``. Returns the maximum + number of slabs cached for later use by the bucket. + +Scalable pool (``scalable``) +------------------------------ + +The scalable pool currently exposes only the common statistics nodes. + +Jemalloc pool (``jemalloc``) +-------------------------------- + +The jemalloc-backed pool currently exposes only the common statistics nodes. + +================================================ +Adding CTL support to custom providers and pools +================================================ + +The :file:`examples/ctl/ctl_example.c` source demonstrates how a minimal +provider can expose configuration entries, statistics and runnables through the +CTL API. To add similar support to your own provider or pool you must implement +an ``ext_ctl`` callback – parse incoming CTL paths and handle +`CTL_QUERY_READ``, ``CTL_QUERY_WRITE`` and ``CTL_QUERY_RUNNABLE`` requests. +The callback receives a ``umf_ctl_query_source_t`` indicating whether the +query came from the application or a configuration source. Programmatic +calls pass typed binary data, while configuration sources deliver strings +that must be parsed. Wildcards (``{}``) may appear in paths and are supplied +as additional arguments. +new entries. + +During initialization UMF will execute ``post_initialize`` on the callback after +applying any queued defaults, allowing the provider or pool to finalize its +state before it is used by the application. The example converts wildcarded +paths into ``printf``-style format strings with ``%s`` and uses ``vsnprintf`` to +resolve the extra arguments. It also shows a helper that accepts integers from +either source, printing the final values from ``post_initialize``. + +Building and running the example: + +.. code-block:: bash + + cmake -B build + cmake --build build + ./build/examples/umf_example_ctl + +An optional modulus can be supplied via the environment: + +.. code-block:: bash + + UMF_CONF="umf.provider.default.ctl.m=10" ./build/examples/umf_example_ctl diff --git a/docs/config/examples.rst b/docs/config/examples.rst index a09638da92..6595b63e78 100644 --- a/docs/config/examples.rst +++ b/docs/config/examples.rst @@ -147,6 +147,35 @@ in the UMF repository. TODO +CTL statistics example +============================================================================== + +You can find the full example code in the `examples/ctl/ctl_statistics_example.c`_ file +in the UMF repository. + +The sample configures an OS memory provider and a disjoint pool, reuses the +provider's canonical ``OS`` selector obtained at runtime, assigns a custom pool +name, and then mixes ``by_handle`` and ``by_name`` selectors to explore CTL +statistics. Wildcard nodes are used to choose provider counters, build a +four-segment ``{}.{}`` chain for the named pool, reset the peak tracker, and +drill into per-bucket disjoint pool telemetry. The program prints hints on ``stderr`` +explaining which tracing level is necessary when a statistic is unavailable. + +Build and run the example with:: + + cmake -B build + cmake --build build + ./build/examples/umf_example_ctl_statistics + +Detailed disjoint pool counters are disabled unless tracing is configured +before pool creation. Enable them through the environment:: + + UMF_CONF="umf.pool.default.disjoint.params.pool_trace=2" ./build/examples/umf_example_ctl_statistics + +Tracing level ``1`` enables slab usage counters, level ``2`` adds allocation +and free statistics, and level ``3`` additionally emits verbose log messages +from the pool implementation. + IPC example with Level Zero Memory Provider ============================================================================== The full code of the example is in the `examples/ipc_level_zero/ipc_level_zero.c`_ file in the UMF repository. @@ -231,6 +260,7 @@ the :any:`umfCloseIPCHandle` function is called. .. _examples/cuda_shared_memory/cuda_shared_memory.c: https://github.com/oneapi-src/unified-memory-framework/blob/main/examples/cuda_shared_memory/cuda_shared_memory.c .. _examples/ipc_level_zero/ipc_level_zero.c: https://github.com/oneapi-src/unified-memory-framework/blob/main/examples/ipc_level_zero/ipc_level_zero.c .. _examples/custom_file_provider/custom_file_provider.c: https://github.com/oneapi-src/unified-memory-framework/blob/main/examples/custom_file_provider/custom_file_provider.c +.. _examples/ctl/ctl_statistics_example.c: https://github.com/oneapi-src/unified-memory-framework/blob/main/examples/ctl/ctl_statistics_example.c .. _examples/memspace: https://github.com/oneapi-src/unified-memory-framework/blob/main/examples/memspace/ .. _README: https://github.com/oneapi-src/unified-memory-framework/blob/main/README.md#memory-pool-managers .. _umf/ipc.h: https://github.com/oneapi-src/unified-memory-framework/blob/main/include/umf/ipc.h diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 91f47901cb..49cbddb257 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -273,6 +273,42 @@ if(LINUX) WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) endif() + set(EXAMPLE_NAME umf_example_ctl) + + add_umf_executable( + NAME ${EXAMPLE_NAME} + SRCS ctl/ctl_example.c + LIBS umf ${UMF_HWLOC_NAME}) + + target_include_directories( + ${EXAMPLE_NAME} PRIVATE ${UMF_CMAKE_SOURCE_DIR}/src/utils + ${UMF_CMAKE_SOURCE_DIR}/include) + + target_link_directories(${EXAMPLE_NAME} PRIVATE ${LIBHWLOC_LIBRARY_DIRS}) + + add_test( + NAME ${EXAMPLE_NAME} + COMMAND ${EXAMPLE_NAME} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + + set(EXAMPLE_NAME umf_example_ctl_statistics) + + add_umf_executable( + NAME ${EXAMPLE_NAME} + SRCS ctl/ctl_statistics_example.c + LIBS umf ${UMF_HWLOC_NAME}) + + target_include_directories( + ${EXAMPLE_NAME} PRIVATE ${UMF_CMAKE_SOURCE_DIR}/src/utils + ${UMF_CMAKE_SOURCE_DIR}/include) + + target_link_directories(${EXAMPLE_NAME} PRIVATE ${LIBHWLOC_LIBRARY_DIRS}) + + add_test( + NAME ${EXAMPLE_NAME} + COMMAND ${EXAMPLE_NAME} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + if(UMF_POOL_JEMALLOC_ENABLED) set(EXAMPLE_NAME umf_example_dram_and_fsdax) diff --git a/examples/README.md b/examples/README.md index 70d114a63a..92da754344 100644 --- a/examples/README.md +++ b/examples/README.md @@ -66,3 +66,14 @@ processes: a producer and a consumer that communicate in the following way - Producer puts the IPC handle - Consumer shuts down - Producer shuts down + +## CTL example + +This example demonstrates how to add CTL support to a custom memory +provider. It sets variables ``a`` and ``b`` through CTL, plus it allows +for the modulus ``m`` loaded from the environment or a configuration file. +Addition and subtraction operations return results modulo ``m`` and the +result ``c`` can be retrieved using the CTL API. For example, to set the +modulus through an environment variable run:: + + UMF_CONF="umf.provider.default.ctl.m=10" ./umf_example_ctl diff --git a/examples/ctl/CMakeLists.txt b/examples/ctl/CMakeLists.txt new file mode 100644 index 0000000000..9e92a7aa22 --- /dev/null +++ b/examples/ctl/CMakeLists.txt @@ -0,0 +1,81 @@ +#Copyright(C) 2024 Intel Corporation +#Under the Apache License v2.0 with LLVM Exceptions.See LICENSE.TXT. +#SPDX - License - Identifier : Apache - 2.0 WITH LLVM - exception + +cmake_minimum_required(VERSION 3.14.0 FATAL_ERROR) +project(umf_example_ctl LANGUAGES C) +enable_testing() + +set(UMF_EXAMPLE_DIR "${CMAKE_SOURCE_DIR}/..") +list(APPEND CMAKE_MODULE_PATH "${UMF_EXAMPLE_DIR}/cmake") +message(STATUS "CMAKE_MODULE_PATH=${CMAKE_" "MODULE_PATH}") + +find_package(PkgConfig) +pkg_check_modules(LIBUMF libumf) +if(NOT LIBUMF_FOUND) + find_package(LIBUMF REQUIRED libumf) +endif() + +pkg_check_modules(LIBHWLOC hwloc >= 2.3.0) +if(NOT LIBHWLOC_FOUND) + find_package(LIBHWLOC 2.3.0 REQUIRED hwloc) +endif() + +# build the example +set(EXAMPLE_NAME umf_example_ctl) +add_executable(${ EXAMPLE_NAME} ctl_example.c) +target_include_directories(${ EXAMPLE_NAME} PRIVATE ${ LIBUMF_INCLUDE_DIRS}) +target_link_directories( + ${ + EXAMPLE_NAME} + PRIVATE + ${ + LIBHWLOC_LIBRARY_DIRS}) +target_link_libraries(${ EXAMPLE_NAME} PRIVATE ${LIBUMF_LIBRARIES} hwloc) + +add_test( + NAME ${EXAMPLE_NAME} + COMMAND ${ EXAMPLE_NAME} + WORKING_DIRECTORY ${ CMAKE_CURRENT_BINARY_DIR}) + +set_tests_properties(${EXAMPLE_NAME} PROPERTIES LABELS "example-standalone") + +if(LINUX) + # set LD_LIBRARY_PATH + set_property( + TEST ${ EXAMPLE_NAME} + PROPERTY ENVIRONMENT_MODIFICATION + "LD_LIBRARY_PATH=path_list_append:" + "${LIBUMF_LIBRARY_DIRS};LD_" + "LIBRARY_PATH=path_list_append:${" + "LIBHWLOC_LIBRARY_DIRS}") +endif() + +set(EXAMPLE_NAME umf_example_ctl_statistics) +add_executable(${ EXAMPLE_NAME} ctl_statistics_example.c) +target_include_directories(${ EXAMPLE_NAME} PRIVATE ${ LIBUMF_INCLUDE_DIRS}) +target_link_directories( + ${ + EXAMPLE_NAME} + PRIVATE + ${ + LIBHWLOC_LIBRARY_DIRS}) +target_link_libraries(${ EXAMPLE_NAME} PRIVATE ${LIBUMF_LIBRARIES} hwloc) + +add_test( + NAME ${EXAMPLE_NAME} + COMMAND ${ EXAMPLE_NAME} + WORKING_DIRECTORY ${ CMAKE_CURRENT_BINARY_DIR}) + +set_tests_properties(${EXAMPLE_NAME} PROPERTIES LABELS "example-standalone") + +if(LINUX) + # set LD_LIBRARY_PATH + set_property( + TEST ${ EXAMPLE_NAME} + PROPERTY ENVIRONMENT_MODIFICATION + "LD_LIBRARY_PATH=path_list_append:" + "${LIBUMF_LIBRARY_DIRS};LD_" + "LIBRARY_PATH=path_list_append:${" + "LIBHWLOC_LIBRARY_DIRS}") +endif() diff --git a/examples/ctl/ctl_example.c b/examples/ctl/ctl_example.c new file mode 100644 index 0000000000..4d1e47eace --- /dev/null +++ b/examples/ctl/ctl_example.c @@ -0,0 +1,310 @@ +#define _GNU_SOURCE 1 +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +// Minimal memory provider demonstrating CTL integration + +// Provider state exposed via CTL +typedef struct ctl_provider_t { + int a; + int b; + int c; + int m; // modulus value, optional +} ctl_provider_t; + +static umf_result_t ctl_init(const void *params, void **provider) { + (void)params; + ctl_provider_t *p = calloc(1, sizeof(*p)); + if (!p) { + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + *provider = p; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t ctl_finalize(void *provider) { + free(provider); + return UMF_RESULT_SUCCESS; +} + +static umf_result_t ctl_alloc(void *provider, size_t size, size_t alignment, + void **ptr) { + (void)provider; + (void)alignment; + *ptr = malloc(size); + if (*ptr == NULL) { + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + return UMF_RESULT_SUCCESS; +} + +static umf_result_t ctl_free(void *provider, void *ptr, size_t size) { + (void)provider; + (void)size; + free(ptr); + return UMF_RESULT_SUCCESS; +} + +static umf_result_t ctl_get_last_native_error(void *provider, + const char **ppMessage, + int32_t *pError) { + (void)provider; + if (ppMessage) { + *ppMessage = NULL; + } + if (pError) { + *pError = 0; + } + return UMF_RESULT_SUCCESS; +} + +static umf_result_t ctl_get_recommended_page_size(void *provider, size_t size, + size_t *pageSize) { + (void)provider; + (void)size; + *pageSize = 4096; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t ctl_get_min_page_size(void *provider, const void *ptr, + size_t *pageSize) { + (void)provider; + (void)ptr; + *pageSize = 4096; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t ctl_get_name(void *provider, const char **name) { + (void)provider; + if (name) { + *name = "ctl"; + } + return UMF_RESULT_SUCCESS; +} + +// Wildcards (`{}`) become extra args; convert them to `%s` for `vsnprintf`. +static void replace_braces_with_percent_s(const char *name, char *fmt, + size_t fmt_size) { + size_t i = 0, j = 0; + while (name[i] != '\0' && j < fmt_size - 1) { + if (name[i] == '{' && name[i + 1] == '}' && j < fmt_size - 2) { + fmt[j++] = '%'; + fmt[j++] = 's'; + i += 2; + } else { + fmt[j++] = name[i++]; + } + } + fmt[j] = '\0'; +} + +// Parse an integer from programmatic (binary) or configuration (string) input. +static umf_result_t parse_int(void *arg, size_t size, + umf_ctl_query_source_t source, int *out) { + if (!arg || !out) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (source == CTL_QUERY_PROGRAMMATIC) { + if (size != sizeof(int)) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + *out = *(int *)arg; + return UMF_RESULT_SUCCESS; + } else if (source == CTL_QUERY_CONFIG_INPUT) { + char *buf = malloc(size + 1); + if (!buf) { + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + memcpy(buf, arg, size); + buf[size] = '\0'; + *out = (int)strtol(buf, NULL, 10); + free(buf); + return UMF_RESULT_SUCCESS; + } + + return UMF_RESULT_ERROR_INVALID_ARGUMENT; +} + +// CTL callback interpreting provider-specific paths and actions +static umf_result_t ctl_ctl(void *provider, umf_ctl_query_source_t source, + const char *name, void *arg, size_t size, + umf_ctl_query_type_t queryType, va_list args) { + ctl_provider_t *p = (ctl_provider_t *)provider; + + char fmt[128]; + char formatted[128]; + replace_braces_with_percent_s(name, fmt, sizeof(fmt)); + va_list args_copy; + va_copy(args_copy, args); + vsnprintf(formatted, sizeof(formatted), fmt, args_copy); + va_end(args_copy); + + if (queryType == CTL_QUERY_RUNNABLE && + strcmp(formatted, "post_initialize") == 0) { + // Called once defaults have been loaded + printf("post_initialize: a=%d b=%d c=%d m=%d\n", p->a, p->b, p->c, + p->m); + return UMF_RESULT_SUCCESS; + } + + if (queryType == CTL_QUERY_WRITE && strcmp(formatted, "a") == 0) { + int val = 0; + umf_result_t ret = parse_int(arg, size, source, &val); + if (ret != UMF_RESULT_SUCCESS) { + return ret; + } + p->a = val; + return UMF_RESULT_SUCCESS; + } + if (queryType == CTL_QUERY_WRITE && strcmp(formatted, "b") == 0) { + int val = 0; + umf_result_t ret = parse_int(arg, size, source, &val); + if (ret != UMF_RESULT_SUCCESS) { + return ret; + } + p->b = val; + return UMF_RESULT_SUCCESS; + } + if (queryType == CTL_QUERY_WRITE && strcmp(formatted, "m") == 0) { + int val = 0; + umf_result_t ret = parse_int(arg, size, source, &val); + if (ret != UMF_RESULT_SUCCESS) { + return ret; + } + p->m = val; + return UMF_RESULT_SUCCESS; + } + if (queryType == CTL_QUERY_RUNNABLE && strcmp(formatted, "addition") == 0) { + if (p->m) { + p->c = (p->a + p->b) % p->m; + } else { + p->c = p->a + p->b; + } + if (arg && size == sizeof(int)) { + *(int *)arg = p->c; + } + return UMF_RESULT_SUCCESS; + } + if (queryType == CTL_QUERY_RUNNABLE && + strcmp(formatted, "substraction") == 0) { + if (p->m) { + p->c = (p->a - p->b) % p->m; + } else { + p->c = p->a - p->b; + } + if (arg && size == sizeof(int)) { + *(int *)arg = p->c; + } + return UMF_RESULT_SUCCESS; + } + if (queryType == CTL_QUERY_READ && strcmp(formatted, "c") == 0) { + if (arg == NULL || size != sizeof(int)) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + *(int *)arg = p->c; + return UMF_RESULT_SUCCESS; + } + if (queryType == CTL_QUERY_READ && strcmp(formatted, "m") == 0) { + if (arg == NULL || size != sizeof(int)) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + *(int *)arg = p->m; + return UMF_RESULT_SUCCESS; + } + + return UMF_RESULT_ERROR_INVALID_CTL_PATH; +} + +static umf_memory_provider_ops_t ctl_ops = { + .version = UMF_PROVIDER_OPS_VERSION_CURRENT, + .initialize = ctl_init, + .finalize = ctl_finalize, + .alloc = ctl_alloc, + .free = ctl_free, + .get_last_native_error = ctl_get_last_native_error, + .get_recommended_page_size = ctl_get_recommended_page_size, + .get_min_page_size = ctl_get_min_page_size, + .get_name = ctl_get_name, + .ext_ctl = ctl_ctl, // register CTL handler +}; + +int main(void) { + umf_result_t res; + umf_memory_provider_handle_t provider; + + // Create provider instance and wire in CTL callbacks + res = umfMemoryProviderCreate(&ctl_ops, NULL, &provider); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to create a memory provider!\n"); + return -1; + } + printf("ctl provider created at %p\n", (void *)provider); + // Defaults are now applied and `post_initialize` has run + + int a = 10; + int b = 7; + // Set variables via CTL; `{}` is replaced by the provider handle + res = umfCtlSet("umf.provider.by_handle.{}.a", &a, sizeof(a), provider); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to set a!\n"); + goto out; + } + res = umfCtlSet("umf.provider.by_handle.{}.b", &b, sizeof(b), provider); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to set b!\n"); + goto out; + } + int m = 0; + // Read optional modulus from config or environment you can use {} to replace any node + res = + umfCtlGet("umf.provider.by_handle.{}.{}", &m, sizeof(m), provider, "c"); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to get m!\n"); + goto out; + } + printf("using modulus m=%d\n", m); + + int result = 0; + + // Execute addition and fetch the result + res = umfCtlExec("umf.provider.by_handle.{}.addition", NULL, 0, provider); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to execute addition!\n"); + goto out; + } + res = umfCtlGet("umf.provider.by_handle.{}.c", &result, sizeof(result), + provider); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to get c!\n"); + goto out; + } + printf("addition result: %d\n", result); + + // Execute subtraction and fetch the result + res = + umfCtlExec("umf.provider.by_handle.{}.substraction", NULL, 0, provider); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to execute substraction!\n"); + goto out; + } + res = umfCtlGet("umf.provider.by_handle.{}.c", &result, sizeof(result), + provider); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to get c!\n"); + goto out; + } + printf("substraction result: %d\n", result); + +out: + umfMemoryProviderDestroy(provider); + return 0; +} diff --git a/examples/ctl/ctl_statistics_example.c b/examples/ctl/ctl_statistics_example.c new file mode 100644 index 0000000000..246d1d913b --- /dev/null +++ b/examples/ctl/ctl_statistics_example.c @@ -0,0 +1,292 @@ +/* + * + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + */ + +#include "umf/base.h" +#include +#include + +#include +#include +#include +#include +#include + +static void print_provider_stats(const char *stage, + umf_memory_provider_handle_t provider, + const char *provider_name) { + size_t outstanding = 0; + size_t peak = 0; + + umf_result_t res = + umfCtlGet("umf.provider.by_handle.{}.stats.allocated_memory", + &outstanding, sizeof(outstanding), provider); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, + "%s: failed to read provider allocated memory (error %d)\n", + stage, (int)res); + return; + } + + /* you can also pass any nodes through va args by using {} */ + res = umfCtlGet("umf.provider.by_handle.{}.stats.{}", &peak, sizeof(peak), + provider, "peak_memory"); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "%s: failed to read provider peak memory (error %d)\n", + stage, (int)res); + return; + } + + printf("%s (%s): outstanding=%zu bytes, peak=%zu bytes\n", stage, + provider_name, outstanding, peak); +} + +/* Provide tutorial guidance when disjoint pool counters require tracing. */ +static bool report_pool_stat_failure(const char *label, + int required_trace_level, + umf_result_t res) { + if (res == UMF_RESULT_SUCCESS) { + return false; + } + + if (res == UMF_RESULT_ERROR_NOT_SUPPORTED) { + int hint_level = required_trace_level; + const char *reason = "verbose logging"; + + if (hint_level <= 1) { + hint_level = 1; + reason = "slab statistics"; + } else if (hint_level == 2) { + reason = "allocation counters"; + } else if (hint_level < 3) { + hint_level = 2; + reason = "allocation counters"; + } else { + hint_level = 3; + } + + fprintf( + stderr, + "Cannot read %s because disjoint pool tracing level %d is " + "required. " + "This example do not set pool_trace so you can enable it through " + "env variable.\n" + "Set UMF_CONF=\"umf.pool.default.disjoint.params.pool_trace=%d\" " + "before running to enable %s%s.\n", + label, hint_level, hint_level, reason, + hint_level < 3 ? " (level 3 also enables verbose logging)" : ""); + } else { + fprintf(stderr, "Failed to read %s (error %d)\n", label, (int)res); + } + + return true; +} + +static void print_pool_stat_by_handle(const char *label, + umf_memory_pool_handle_t pool, + const char *stat_node, + int required_trace_level) { + size_t value = 0; + /* Surround the {} placeholder with literal segments so CTL resolves + * whichever pool handle the allocator hands back. */ + umf_result_t res = + umfCtlGet("umf.pool.by_handle.{}.stats.{}", &value, sizeof(value), + pool, stat_node); + if (report_pool_stat_failure(label, required_trace_level, res)) { + return; + } + + printf("%s: %zu\n", label, value); +} + +static void print_pool_bucket_stat_by_name(const char *label, + const char *pool_name, + size_t bucket_index, + const char *stat_node, + int required_trace_level) { + size_t value = 0; + /* Anchor the pool label with by_name while {} wildcards cover the ordinal + * and statistic nodes to highlight mixed selectors. */ + umf_result_t res = + umfCtlGet("umf.pool.by_name.{}.buckets.{}.stats.{}", &value, + sizeof(value), pool_name, bucket_index, stat_node); + + if (report_pool_stat_failure(label, required_trace_level, res)) { + return; + } + + printf("%s: %zu\n", label, value); +} + +#define pool_name "ctl_stats_pool" +int main(void) { + const size_t provider_allocation_size = 64 * 1024; + const size_t pool_allocation_size = 4096; + const char *provider_name = NULL; + void *pool_memory = NULL; + umf_result_t res = UMF_RESULT_SUCCESS; + + const umf_memory_provider_ops_t *provider_ops = umfOsMemoryProviderOps(); + umf_os_memory_provider_params_handle_t os_params = NULL; + umf_memory_provider_handle_t provider = NULL; + umf_disjoint_pool_params_handle_t disjoint_params = NULL; + umf_memory_pool_handle_t pool = NULL; + + res = umfOsMemoryProviderParamsCreate(&os_params); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, + "Failed to create OS memory provider params (error %d)\n", + (int)res); + return -1; + } + + res = umfMemoryProviderCreate(provider_ops, os_params, &provider); + umfOsMemoryProviderParamsDestroy(os_params); + os_params = NULL; + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to create OS memory provider (error %d)\n", + (int)res); + return -1; + } + + res = umfMemoryProviderGetName(provider, &provider_name); + if (res != UMF_RESULT_SUCCESS || provider_name == NULL) { + provider_name = "OS"; + } + + print_provider_stats("Provider stats before allocation", provider, + provider_name); + + void *provider_memory = NULL; + res = umfMemoryProviderAlloc(provider, provider_allocation_size, 0, + &provider_memory); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Provider allocation failed (error %d)\n", (int)res); + goto cleanup; + } + + print_provider_stats("Provider stats after allocation", provider, + provider_name); + + res = umfMemoryProviderFree(provider, provider_memory, + provider_allocation_size); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Provider free failed (error %d)\n", (int)res); + goto cleanup; + } + provider_memory = NULL; + + print_provider_stats("Provider stats after free", provider, provider_name); + + res = umfCtlExec("umf.provider.by_handle.{}.stats.peak_memory.reset", NULL, + 0, provider); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to reset provider peak memory (error %d)\n", + (int)res); + goto cleanup; + } + + print_provider_stats("Provider stats after peak reset", provider, + provider_name); + + const umf_memory_pool_ops_t *pool_ops = umfDisjointPoolOps(); + res = umfDisjointPoolParamsCreate(&disjoint_params); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to create disjoint pool params (error %d)\n", + (int)res); + goto cleanup; + } + + /* set name of the pool so we can easly ref it by using name */ + res = umfDisjointPoolParamsSetName(disjoint_params, pool_name); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to name disjoint pool (error %d)\n", (int)res); + goto cleanup; + } + + res = umfPoolCreate(pool_ops, provider, disjoint_params, 0, &pool); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to create disjoint pool (error %d)\n", + (int)res); + goto cleanup; + } + + pool_memory = umfPoolMalloc(pool, pool_allocation_size); + if (pool_memory == NULL) { + fprintf(stderr, "Disjoint pool allocation failed\n"); + goto cleanup; + } + + print_pool_stat_by_handle("Disjoint pool used_memory", pool, "used_memory", + 0); + print_pool_stat_by_handle("Disjoint pool curr_slabs_in_use", pool, + "curr_slabs_in_use", 1); + print_pool_stat_by_handle("Disjoint pool alloc_num", pool, "alloc_num", 2); + + size_t pool_name_count = 0; + + res = umfCtlGet("umf.pool.by_name.{}.count", &pool_name_count, + sizeof(pool_name_count), pool_name); + + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to read pool count (error %d)\n", (int)res); + goto cleanup; + } + printf("There is %zu pools with name %s\n", pool_name_count, pool_name); + size_t bucket_count = 0; + + // you can put pool_name directly in ctl string without {} if you want + res = umfCtlGet("umf.pool.by_name." pool_name ".buckets.count", + &bucket_count, sizeof(bucket_count)); + + for (size_t bucket = 0; bucket < bucket_count; bucket++) { + size_t bucket_size = 0; + // after pool name you can add pool index if there are multiple pools with + // the same name, if there is only one it is safe to omit it + // but still you can provide it + res = umfCtlGet("umf.pool.by_name.{}.0.buckets.{}.size", &bucket_size, + sizeof(bucket_size), pool_name, bucket); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to read bucket size (error %d)\n", + (int)res); + goto cleanup; + } + + if (bucket_size == pool_allocation_size) { + printf("Disjoint pool bucket[%zu] size: %zu bytes\n", bucket, + bucket_size); + print_pool_bucket_stat_by_name("Disjoint pool bucket alloc_num", + pool_name, bucket, "alloc_num", 2); + print_pool_bucket_stat_by_name( + "Disjoint pool bucket curr_slabs_in_use", pool_name, bucket, + "curr_slabs_in_use", 1); + goto cleanup; + } + } + +cleanup: + if (pool_memory) { + umfFree(pool_memory); + } + + if (pool) { + umfPoolDestroy(pool); + } + if (disjoint_params) { + umfDisjointPoolParamsDestroy(disjoint_params); + } + if (provider_memory) { + umfMemoryProviderFree(provider, provider_memory, + provider_allocation_size); + } + if (provider) { + umfMemoryProviderDestroy(provider); + } + + return 0; +} diff --git a/include/umf/memory_pool.h b/include/umf/memory_pool.h index e1ed9fbbad..5662684fa7 100644 --- a/include/umf/memory_pool.h +++ b/include/umf/memory_pool.h @@ -43,7 +43,6 @@ typedef enum umf_pool_create_flag_t { /// @brief Type for combinations of pool creation flags typedef uint32_t umf_pool_create_flags_t; -/// @anchor umfPoolCreate /// @brief Creates new memory pool. /// @param ops instance of umf_memory_pool_ops_t /// @param provider memory provider that will be used for coarse-grain allocations. diff --git a/src/ctl/ctl_defaults.c b/src/ctl/ctl_defaults.c index 3d087d51bf..c8c8027641 100644 --- a/src/ctl/ctl_defaults.c +++ b/src/ctl/ctl_defaults.c @@ -16,12 +16,13 @@ #include "utlist.h" static umf_result_t default_ctl_helper(ctl_ext_ctl_fn fn, void *ctl, + umf_ctl_query_source_t source, const char *name, void *arg, size_t size, ...) { va_list empty_args; va_start(empty_args, size); - umf_result_t ret = fn(ctl, CTL_QUERY_PROGRAMMATIC, name, arg, size, - CTL_QUERY_WRITE, empty_args); + umf_result_t ret = + fn(ctl, source, name, arg, size, CTL_QUERY_WRITE, empty_args); va_end(empty_args); return ret; } @@ -123,7 +124,7 @@ void ctl_default_apply(ctl_default_entry_t *list, const char *pname, strncmp(it->name, pname, pname_len) == 0 && it->name[pname_len] == '.') { const char *ctl_name = it->name + pname_len + 1; - default_ctl_helper(ext_ctl, priv, ctl_name, it->value, + default_ctl_helper(ext_ctl, priv, it->source, ctl_name, it->value, it->value_size); } } diff --git a/src/memory_provider.c b/src/memory_provider.c index c147cc3a96..3e50343ec6 100644 --- a/src/memory_provider.c +++ b/src/memory_provider.c @@ -34,8 +34,8 @@ static umf_result_t CTL_SUBTREE_HANDLER(CTL_NONAME, by_handle)( umf_memory_provider_handle_t hProvider = *(umf_memory_provider_handle_t *)indexes->arg; - hProvider->ops.ext_ctl(hProvider->provider_priv, /*unused*/ 0, extra_name, - arg, size, queryType, args); + hProvider->ops.ext_ctl(hProvider->provider_priv, source, extra_name, arg, + size, queryType, args); return UMF_RESULT_SUCCESS; } From 1bdfdf0f7344b274c307e5f5a441432d07edc24c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Plewa?= Date: Fri, 12 Sep 2025 16:39:18 +0200 Subject: [PATCH 04/11] add new error to indicate unknown/unsuported path in ctl --- include/umf/base.h | 4 +++- include/umf/memory_pool_ops.h | 3 +++ include/umf/memory_provider_ops.h | 3 +++ src/ctl/ctl.c | 4 ++-- src/ctl/ctl_defaults.c | 2 +- src/memory_pool.c | 25 ++++++++++++------------- src/memory_provider.c | 27 +++++++++++---------------- src/pool/pool_scalable.c | 6 +++--- test/common/pool.hpp | 2 +- test/common/provider.hpp | 2 +- test/ctl/ctl_unittest.cpp | 6 +++--- test/pools/disjoint_pool_ctl.cpp | 8 ++++---- test/utils/cpp_helpers.hpp | 2 +- 13 files changed, 48 insertions(+), 46 deletions(-) diff --git a/include/umf/base.h b/include/umf/base.h index 096934b184..11d7b723b4 100644 --- a/include/umf/base.h +++ b/include/umf/base.h @@ -48,7 +48,9 @@ typedef enum umf_result_t { UMF_RESULT_ERROR_DEPENDENCY_UNAVAILABLE = 7, ///< External required dependency is unavailable or missing UMF_RESULT_ERROR_OUT_OF_RESOURCES = 8, ///< Out of internal resources - UMF_RESULT_ERROR_UNKNOWN = 0x7ffffffe ///< Unknown error + UMF_RESULT_ERROR_INVALID_CTL_PATH = + 9, ///< CTL path is not supported or not found + UMF_RESULT_ERROR_UNKNOWN = 0x7ffffffe ///< Unknown error } umf_result_t; /// @brief Handle to the memory properties structure diff --git a/include/umf/memory_pool_ops.h b/include/umf/memory_pool_ops.h index e13ed9ca94..8d5dd46fec 100644 --- a/include/umf/memory_pool_ops.h +++ b/include/umf/memory_pool_ops.h @@ -166,6 +166,9 @@ typedef struct umf_memory_pool_ops_t { /// @param args variable arguments for the operation. /// /// @return umf_result_t result of the control operation. + /// Implementations must return + /// UMF_RESULT_ERROR_INVALID_CTL_PATH if the given path is not + /// supported. /// umf_result_t (*ext_ctl)(void *hPool, umf_ctl_query_source_t source, const char *name, void *arg, size_t size, diff --git a/include/umf/memory_provider_ops.h b/include/umf/memory_provider_ops.h index d7df5f8236..80fb28860d 100644 --- a/include/umf/memory_provider_ops.h +++ b/include/umf/memory_provider_ops.h @@ -283,6 +283,9 @@ typedef struct umf_memory_provider_ops_t { /// @param args variable arguments for the operation. /// /// @return umf_result_t result of the control operation. + /// Implementations must return + /// UMF_RESULT_ERROR_INVALID_CTL_PATH if the given path is not + /// supported. /// umf_result_t (*ext_ctl)(void *provider, umf_ctl_query_source_t source, const char *name, void *arg, size_t size, diff --git a/src/ctl/ctl.c b/src/ctl/ctl.c index 3dc8999371..ac9f294ed1 100644 --- a/src/ctl/ctl.c +++ b/src/ctl/ctl.c @@ -492,7 +492,7 @@ ctl_find_and_execute_node(const umf_ctl_node_t *nodes, void *ctx, // if the appropriate node (leaf or subtree) is not found, then return error if (n == NULL || (n->type != CTL_NODE_LEAF && n->type != CTL_NODE_SUBTREE)) { - ret.value = UMF_RESULT_ERROR_INVALID_ARGUMENT; + ret.value = UMF_RESULT_ERROR_INVALID_CTL_PATH; goto out; } @@ -554,7 +554,7 @@ umf_result_t ctl_query(struct ctl *ctl, void *ctx, va_end(args_copy); - return ret.is_valid ? ret.value : UMF_RESULT_ERROR_INVALID_ARGUMENT; + return ret.is_valid ? ret.value : UMF_RESULT_ERROR_INVALID_CTL_PATH; } /* diff --git a/src/ctl/ctl_defaults.c b/src/ctl/ctl_defaults.c index c8c8027641..3290cbe8db 100644 --- a/src/ctl/ctl_defaults.c +++ b/src/ctl/ctl_defaults.c @@ -95,7 +95,7 @@ umf_result_t ctl_default_subtree(ctl_default_entry_t **list, utils_mutex_t *mtx, if (!entry) { LOG_WARN("Wrong path name: %s", extra_name); utils_mutex_unlock(mtx); - return UMF_RESULT_ERROR_INVALID_ARGUMENT; + return UMF_RESULT_ERROR_INVALID_CTL_PATH; } if (entry->value_size > size) { diff --git a/src/memory_pool.c b/src/memory_pool.c index 9260398895..a4663e58d6 100644 --- a/src/memory_pool.c +++ b/src/memory_pool.c @@ -8,17 +8,17 @@ */ #include +#include #include #include -#include #include #include #include #include "base_alloc_global.h" -#include "ctl/ctl_internal.h" #include "ctl/ctl_defaults.h" +#include "ctl/ctl_internal.h" #include "libumf.h" #include "memory_pool_internal.h" #include "memory_provider_internal.h" @@ -167,7 +167,7 @@ static umf_result_t CTL_SUBTREE_HANDLER(CTL_NONAME, by_handle)( queryType, arg, size, args2); va_end(args2); - if (ret == UMF_RESULT_ERROR_INVALID_ARGUMENT) { + if (ret == UMF_RESULT_ERROR_INVALID_CTL_PATH) { // Node was not found in pool_ctl_root, try to query the specific pool ret = hPool->ops.ext_ctl(hPool->pool_priv, source, extra_name, arg, size, queryType, args); @@ -323,7 +323,7 @@ static umf_result_t CTL_SUBTREE_HANDLER(CTL_NONAME, by_name)( extra_name, queryType, arg, size, args2); va_end(args2); - if (r == UMF_RESULT_ERROR_INVALID_ARGUMENT) { + if (r == UMF_RESULT_ERROR_INVALID_CTL_PATH) { va_copy(args2, args); r = it->pool->ops.ext_ctl(it->pool->pool_priv, source, extra_name, arg, size, queryType, args2); @@ -363,7 +363,8 @@ umfDefaultCtlPoolHandle(void *hPool, umf_ctl_query_source_t operationType, (void)size; (void)queryType; (void)args; - return UMF_RESULT_ERROR_NOT_SUPPORTED; + // if given path is not supported implementation should return UMF_RESULT_ERROR_INVALID_CTL_PATH + return UMF_RESULT_ERROR_INVALID_CTL_PATH; } static umf_result_t umfDefaultTrimMemory(void *provider, @@ -377,13 +378,11 @@ static umf_result_t umfPoolPostInitialize(const umf_memory_pool_ops_t *ops, void *pool_priv, ...) { va_list args; va_start(args, pool_priv); - umf_result_t ret = ops->ext_ctl(pool_priv, CTL_QUERY_PROGRAMMATIC, - "post_initialize", NULL, 0, - CTL_QUERY_RUNNABLE, args); + umf_result_t ret = + ops->ext_ctl(pool_priv, CTL_QUERY_PROGRAMMATIC, "post_initialize", NULL, + 0, CTL_QUERY_RUNNABLE, args); va_end(args); - if (ret == UMF_RESULT_ERROR_INVALID_ARGUMENT) { - ret = UMF_RESULT_ERROR_NOT_SUPPORTED; - } + return ret; } @@ -481,8 +480,8 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, ctl_default_apply(pool_default_list, pname, ops->ext_ctl, pool->pool_priv); - ret = umfPoolPostInitialize(ops, pool->pool_priv); - if (ret != UMF_RESULT_SUCCESS && ret != UMF_RESULT_ERROR_NOT_SUPPORTED) { + ret = umfPoolPostInitialize(&pool->ops, pool->pool_priv); + if (ret != UMF_RESULT_SUCCESS && ret != UMF_RESULT_ERROR_INVALID_CTL_PATH) { LOG_ERR("Failed to post-initialize pool"); goto err_pool_init; } diff --git a/src/memory_provider.c b/src/memory_provider.c index 3e50343ec6..f02830ac1a 100644 --- a/src/memory_provider.c +++ b/src/memory_provider.c @@ -8,19 +8,19 @@ */ #include +#include #include #include #include #include -#include #include #include #include "base_alloc.h" #include "base_alloc_global.h" -#include "ctl/ctl_internal.h" #include "ctl/ctl_defaults.h" +#include "ctl/ctl_internal.h" #include "libumf.h" #include "memory_provider_internal.h" #include "utils_assert.h" @@ -51,9 +51,7 @@ static ctl_default_entry_t *provider_default_list = NULL; static utils_mutex_t provider_default_mtx; static UTIL_ONCE_FLAG mem_provider_ctl_initialized = UTIL_ONCE_FLAG_INIT; -static void provider_ctl_init(void) { - utils_mutex_init(&provider_default_mtx); -} +static void provider_ctl_init(void) { utils_mutex_init(&provider_default_mtx); } static umf_result_t CTL_SUBTREE_HANDLER(default)( void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, @@ -68,8 +66,7 @@ static umf_result_t CTL_SUBTREE_HANDLER(default)( } umf_ctl_node_t CTL_NODE(provider)[] = {CTL_CHILD_WITH_ARG(by_handle), - CTL_LEAF_SUBTREE(default), - CTL_NODE_END}; + CTL_LEAF_SUBTREE(default), CTL_NODE_END}; static umf_result_t umfDefaultPurgeLazy(void *provider, void *ptr, size_t size) { @@ -155,7 +152,8 @@ umfDefaultCtlHandle(void *provider, umf_ctl_query_source_t operationType, (void)size; (void)queryType; (void)args; - return UMF_RESULT_ERROR_NOT_SUPPORTED; + // if given path is not supported implementation should return UMF_RESULT_ERROR_INVALID_CTL_PATH + return UMF_RESULT_ERROR_INVALID_CTL_PATH; } static umf_result_t @@ -206,7 +204,6 @@ void assignOpsExtDefaults(umf_memory_provider_ops_t *ops) { ops->ext_get_allocation_properties_size = umfDefaultGetAllocationPropertiesSize; } - } void assignOpsIpcDefaults(umf_memory_provider_ops_t *ops) { @@ -235,13 +232,11 @@ static umf_result_t umfProviderPostInitialize(umf_memory_provider_ops_t *ops, void *provider_priv, ...) { va_list args; va_start(args, provider_priv); - umf_result_t ret = ops->ext_ctl(provider_priv, CTL_QUERY_PROGRAMMATIC, - "post_initialize", NULL, 0, - CTL_QUERY_RUNNABLE, args); + umf_result_t ret = + ops->ext_ctl(provider_priv, CTL_QUERY_PROGRAMMATIC, "post_initialize", + NULL, 0, CTL_QUERY_RUNNABLE, args); va_end(args); - if (ret == UMF_RESULT_ERROR_INVALID_ARGUMENT) { - ret = UMF_RESULT_ERROR_NOT_SUPPORTED; - } + return ret; } @@ -355,7 +350,7 @@ umf_result_t umfMemoryProviderCreate(const umf_memory_provider_ops_t *ops, provider->provider_priv); } ret = umfProviderPostInitialize(&provider->ops, provider_priv); - if (ret != UMF_RESULT_SUCCESS && ret != UMF_RESULT_ERROR_NOT_SUPPORTED) { + if (ret != UMF_RESULT_SUCCESS && ret != UMF_RESULT_ERROR_INVALID_CTL_PATH) { LOG_ERR("Failed to post-initialize provider"); umf_ba_global_free(provider); return ret; diff --git a/src/pool/pool_scalable.c b/src/pool/pool_scalable.c index 99b4fa1a2d..9dd7acf883 100644 --- a/src/pool/pool_scalable.c +++ b/src/pool/pool_scalable.c @@ -492,10 +492,10 @@ static umf_result_t pool_ctl(void *hPool, umf_ctl_query_source_t operationType, const char *name, void *arg, size_t size, umf_ctl_query_type_t query_type, va_list args) { (void)operationType; // unused - umf_memory_pool_handle_t pool_provider = (umf_memory_pool_handle_t)hPool; + utils_init_once(&ctl_initialized, initialize_pool_ctl); - return ctl_query(&pool_scallable_ctl_root, pool_provider->pool_priv, - CTL_QUERY_PROGRAMMATIC, name, query_type, arg, size, args); + return ctl_query(&pool_scallable_ctl_root, hPool, CTL_QUERY_PROGRAMMATIC, + name, query_type, arg, size, args); } static umf_result_t scalable_get_name(void *pool, const char **name) { diff --git a/test/common/pool.hpp b/test/common/pool.hpp index 711472ebc2..30abd08cad 100644 --- a/test/common/pool.hpp +++ b/test/common/pool.hpp @@ -156,7 +156,7 @@ typedef struct pool_base_t { } umf_result_t ext_ctl(umf_ctl_query_source_t, const char *, void *, size_t, umf_ctl_query_type_t, va_list) noexcept { - return UMF_RESULT_ERROR_UNKNOWN; + return UMF_RESULT_ERROR_INVALID_CTL_PATH; } umf_result_t ext_trim_memory(size_t) noexcept { return UMF_RESULT_ERROR_UNKNOWN; diff --git a/test/common/provider.hpp b/test/common/provider.hpp index b46c923058..947392559c 100644 --- a/test/common/provider.hpp +++ b/test/common/provider.hpp @@ -137,7 +137,7 @@ typedef struct provider_base_t { [[maybe_unused]] size_t size, [[maybe_unused]] umf_ctl_query_type_t queryType, [[maybe_unused]] va_list args) noexcept { - return UMF_RESULT_ERROR_UNKNOWN; + return UMF_RESULT_ERROR_INVALID_CTL_PATH; } umf_result_t ext_get_allocation_properties( diff --git a/test/ctl/ctl_unittest.cpp b/test/ctl/ctl_unittest.cpp index d4b0004eea..7ae2a12148 100644 --- a/test/ctl/ctl_unittest.cpp +++ b/test/ctl/ctl_unittest.cpp @@ -179,18 +179,18 @@ TEST_F(test, ctl_debug_node_arg_invalid) { "debug.arg_test.42", CTL_QUERY_READ, &arg, sizeof(arg), empty_args); - ASSERT_EQ(ret, UMF_RESULT_ERROR_INVALID_ARGUMENT); + ASSERT_EQ(ret, UMF_RESULT_ERROR_INVALID_CTL_PATH); ret = ctl_query(ctl_handler, NULL, CTL_QUERY_PROGRAMMATIC, "debug.arg_test.arg_value", CTL_QUERY_READ, &arg, sizeof(arg), empty_args); - ASSERT_EQ(ret, UMF_RESULT_ERROR_INVALID_ARGUMENT); + ASSERT_EQ(ret, UMF_RESULT_ERROR_INVALID_CTL_PATH); ret = ctl_query(ctl_handler, NULL, CTL_QUERY_PROGRAMMATIC, "debug.arg_test.wrong_type.arg_value", CTL_QUERY_READ, &arg, sizeof(arg), empty_args); - ASSERT_EQ(ret, UMF_RESULT_ERROR_INVALID_ARGUMENT); + ASSERT_EQ(ret, UMF_RESULT_ERROR_INVALID_CTL_PATH); va_end(empty_args); } diff --git a/test/pools/disjoint_pool_ctl.cpp b/test/pools/disjoint_pool_ctl.cpp index 37181ea4d3..125c464f87 100644 --- a/test/pools/disjoint_pool_ctl.cpp +++ b/test/pools/disjoint_pool_ctl.cpp @@ -643,7 +643,7 @@ TEST_F(test, disjointCtlBucketStatsInvalid) { ret = umfCtlGet("umf.pool.by_handle.{}.stats.buckets.count", &arg, 1, poolWrapper.get()); - EXPECT_EQ(ret, UMF_RESULT_ERROR_INVALID_ARGUMENT); + EXPECT_EQ(ret, UMF_RESULT_ERROR_INVALID_CTL_PATH); ret = umfCtlGet("umf.pool.by_handle.{}.stats.buckets.1.alloc_num", NULL, sizeof(arg), poolWrapper.get()); @@ -651,17 +651,17 @@ TEST_F(test, disjointCtlBucketStatsInvalid) { ret = umfCtlGet("umf.pool.by_handle.{}.stats.1.alloc_num", &arg, 1, poolWrapper.get()); - EXPECT_EQ(ret, UMF_RESULT_ERROR_INVALID_ARGUMENT); + EXPECT_EQ(ret, UMF_RESULT_ERROR_INVALID_CTL_PATH); // no bucket id ret = umfCtlGet("umf.pool.by_handle.{}.stats.buckets.alloc_num", &arg, sizeof(arg), poolWrapper.get()); - EXPECT_EQ(ret, UMF_RESULT_ERROR_INVALID_ARGUMENT); + EXPECT_EQ(ret, UMF_RESULT_ERROR_INVALID_CTL_PATH); // bucked id + count ret = umfCtlGet("umf.pool.by_handle.{}.stats.buckets.1.count", &arg, sizeof(arg), poolWrapper.get()); - EXPECT_EQ(ret, UMF_RESULT_ERROR_INVALID_ARGUMENT); + EXPECT_EQ(ret, UMF_RESULT_ERROR_INVALID_CTL_PATH); // Clean up ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); diff --git a/test/utils/cpp_helpers.hpp b/test/utils/cpp_helpers.hpp index 69a01a39fe..dc784728cf 100644 --- a/test/utils/cpp_helpers.hpp +++ b/test/utils/cpp_helpers.hpp @@ -87,7 +87,7 @@ template umf_memory_pool_ops_t poolOpsBase() { UMF_ASSIGN_OP(ops, T, malloc_usable_size, UMF_RESULT_ERROR_UNKNOWN); UMF_ASSIGN_OP(ops, T, free, UMF_RESULT_ERROR_UNKNOWN); UMF_ASSIGN_OP(ops, T, get_last_allocation_error, UMF_RESULT_ERROR_UNKNOWN); - UMF_ASSIGN_OP(ops, T, ext_ctl, UMF_RESULT_ERROR_UNKNOWN); + UMF_ASSIGN_OP(ops, T, ext_ctl, UMF_RESULT_ERROR_INVALID_CTL_PATH); UMF_ASSIGN_OP(ops, T, ext_trim_memory, UMF_RESULT_ERROR_UNKNOWN); return ops; } From 68991d46fa86dbbc00ca2ad9970ce434ccc155c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Plewa?= Date: Mon, 15 Sep 2025 18:11:29 +0200 Subject: [PATCH 05/11] add params override for disjointpool --- docs/config/ctl.rst | 3 +- src/pool/pool_disjoint.c | 257 ++++++++++++++++++++++++++-- src/pool/pool_disjoint_internal.h | 6 + test/CMakeLists.txt | 2 + test/ctl/ctl_env_app.cpp | 91 ++++++++++ test/ctl/ctl_env_disjoint_pool.cfg | 7 + test/ctl/ctl_env_driver.cpp | 19 +++ test/pools/disjoint_pool_ctl.cpp | 266 +++++++++++++++++++++++++++++ 8 files changed, 637 insertions(+), 14 deletions(-) create mode 100644 test/ctl/ctl_env_disjoint_pool.cfg diff --git a/docs/config/ctl.rst b/docs/config/ctl.rst index 7ef7fa9548..4840f6eb2f 100644 --- a/docs/config/ctl.rst +++ b/docs/config/ctl.rst @@ -113,8 +113,9 @@ placeholder with the wildcard argument supplied to :c:func:`umfCtlGet`, Logger nodes ------------ -.. py:function:: umf.logger.timestamp(enabled) +.. py:function:: umf.logger.timestamp(enabled) + :param enabled: Receives (or provides) ``0`` when timestamps are disabled and ``1`` when they are emitted. :type enabled: ``int *`` diff --git a/src/pool/pool_disjoint.c b/src/pool/pool_disjoint.c index 22055554f0..1b1441efda 100644 --- a/src/pool/pool_disjoint.c +++ b/src/pool/pool_disjoint.c @@ -7,7 +7,6 @@ #include #include -#include #include #include #include @@ -29,14 +28,23 @@ static char *DEFAULT_NAME = "disjoint"; +enum { + DP_OVERRIDE_SLAB_MIN_SIZE = 1 << 0, + DP_OVERRIDE_MAX_POOLABLE_SIZE = 1 << 1, + DP_OVERRIDE_CAPACITY = 1 << 2, + DP_OVERRIDE_MIN_BUCKET_SIZE = 1 << 3, + DP_OVERRIDE_POOL_TRACE = 1 << 4, +}; + /* Disjoint pool CTL implementation */ struct ctl disjoint_ctl_root; static UTIL_ONCE_FLAG ctl_initialized = UTIL_ONCE_FLAG_INIT; umf_result_t disjoint_pool_post_initialize(void *ppPool); -static umf_result_t CTL_RUNNABLE_HANDLER(post_initialize)( - void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, - umf_ctl_index_utlist_t *indexes) { +static umf_result_t +CTL_RUNNABLE_HANDLER(post_initialize)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { (void)source; (void)arg; (void)size; @@ -86,6 +94,204 @@ static umf_result_t CTL_WRITE_HANDLER(name)(void *ctx, return UMF_RESULT_SUCCESS; } #endif + +static const struct ctl_argument + CTL_ARG(slab_min_size) = CTL_ARG_UNSIGNED_LONG_LONG; +static const struct ctl_argument + CTL_ARG(max_poolable_size) = CTL_ARG_UNSIGNED_LONG_LONG; +static const struct ctl_argument CTL_ARG(capacity) = CTL_ARG_UNSIGNED_LONG_LONG; +static const struct ctl_argument + CTL_ARG(min_bucket_size) = CTL_ARG_UNSIGNED_LONG_LONG; +static const struct ctl_argument CTL_ARG(pool_trace) = CTL_ARG_INT; + +static umf_result_t +CTL_READ_HANDLER(slab_min_size)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + if (arg == NULL || size != sizeof(size_t)) { + LOG_ERR("arg is NULL or size is not sizeof(size_t)"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + *(size_t *)arg = pool->params.slab_min_size; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t +CTL_WRITE_HANDLER(slab_min_size)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + if (pool->post_initialized) { + LOG_ERR("writing parameter after post_initialize is not allowed"); + return UMF_RESULT_ERROR_NOT_SUPPORTED; + } + if (arg == NULL || size != sizeof(size_t)) { + LOG_ERR("arg is NULL or size is not sizeof(size_t)"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + size_t value = *(size_t *)arg; + umf_result_t ret = + umfDisjointPoolParamsSetSlabMinSize(&pool->params, value); + if (ret == UMF_RESULT_SUCCESS) { + pool->params_overridden |= DP_OVERRIDE_SLAB_MIN_SIZE; + } + return ret; +} + +static umf_result_t +CTL_READ_HANDLER(max_poolable_size)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + if (arg == NULL || size != sizeof(size_t)) { + LOG_ERR("arg is NULL or size is not sizeof(size_t)"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + *(size_t *)arg = pool->params.max_poolable_size; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t +CTL_WRITE_HANDLER(max_poolable_size)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + if (pool->post_initialized) { + LOG_ERR("writing parameter after post_initialize is not allowed"); + return UMF_RESULT_ERROR_NOT_SUPPORTED; + } + if (arg == NULL || size != sizeof(size_t)) { + LOG_ERR("arg is NULL or size is not sizeof(size_t)"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + size_t value = *(size_t *)arg; + umf_result_t ret = + umfDisjointPoolParamsSetMaxPoolableSize(&pool->params, value); + if (ret == UMF_RESULT_SUCCESS) { + pool->params_overridden |= DP_OVERRIDE_MAX_POOLABLE_SIZE; + } + return ret; +} + +static umf_result_t +CTL_READ_HANDLER(capacity)(void *ctx, umf_ctl_query_source_t source, void *arg, + size_t size, umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + if (arg == NULL || size != sizeof(size_t)) { + LOG_ERR("arg is NULL or size is not sizeof(size_t)"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + *(size_t *)arg = pool->params.capacity; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t +CTL_WRITE_HANDLER(capacity)(void *ctx, umf_ctl_query_source_t source, void *arg, + size_t size, umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + if (pool->post_initialized) { + LOG_ERR("writing parameter after post_initialize is not allowed"); + return UMF_RESULT_ERROR_NOT_SUPPORTED; + } + if (arg == NULL || size != sizeof(size_t)) { + LOG_ERR("arg is NULL or size is not sizeof(size_t)"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + size_t value = *(size_t *)arg; + umf_result_t ret = + umfDisjointPoolParamsSetCapacity(&pool->params, value); + if (ret == UMF_RESULT_SUCCESS) { + pool->params_overridden |= DP_OVERRIDE_CAPACITY; + } + return ret; +} + +static umf_result_t +CTL_READ_HANDLER(min_bucket_size)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + if (arg == NULL || size != sizeof(size_t)) { + LOG_ERR("arg is NULL or size is not sizeof(size_t)"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + *(size_t *)arg = pool->params.min_bucket_size; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t +CTL_WRITE_HANDLER(min_bucket_size)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + if (pool->post_initialized) { + LOG_ERR("writing parameter after post_initialize is not allowed"); + return UMF_RESULT_ERROR_NOT_SUPPORTED; + } + if (arg == NULL || size != sizeof(size_t)) { + LOG_ERR("arg is NULL or size is not sizeof(size_t)"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + size_t value = *(size_t *)arg; + umf_result_t ret = + umfDisjointPoolParamsSetMinBucketSize(&pool->params, value); + if (ret == UMF_RESULT_SUCCESS) { + pool->params_overridden |= DP_OVERRIDE_MIN_BUCKET_SIZE; + } + return ret; +} + +static umf_result_t +CTL_READ_HANDLER(pool_trace)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + if (arg == NULL || size != sizeof(int)) { + LOG_ERR("arg is NULL or size is not sizeof(int)"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + *(int *)arg = pool->params.pool_trace; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t +CTL_WRITE_HANDLER(pool_trace)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + if (pool->post_initialized) { + LOG_ERR("writing parameter after post_initialize is not allowed"); + return UMF_RESULT_ERROR_NOT_SUPPORTED; + } + if (arg == NULL || size != sizeof(int)) { + LOG_ERR("arg is NULL or size is not sizeof(int)"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + int value = *(int *)arg; + umf_result_t ret = + umfDisjointPoolParamsSetTrace(&pool->params, value); + if (ret == UMF_RESULT_SUCCESS) { + pool->params_overridden |= DP_OVERRIDE_POOL_TRACE; + } + return ret; +} + +static const umf_ctl_node_t CTL_NODE(params)[] = { + CTL_LEAF_RW(slab_min_size), CTL_LEAF_RW(max_poolable_size), + CTL_LEAF_RW(capacity), CTL_LEAF_RW(min_bucket_size), + CTL_LEAF_RW(pool_trace), CTL_NODE_END, +}; static umf_result_t CTL_READ_HANDLER(used_memory)(void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, @@ -332,28 +538,26 @@ static const struct ctl_argument CTL_ARG(buckets) = { CTL_ARG_PARSER_END}}; static void initialize_disjoint_ctl(void) { + CTL_REGISTER_MODULE(&disjoint_ctl_root, params); CTL_REGISTER_MODULE(&disjoint_ctl_root, stats); CTL_REGISTER_MODULE(&disjoint_ctl_root, buckets); // TODO: this is hack. Need some way to register module as node with argument disjoint_ctl_root.root[disjoint_ctl_root.first_free - 1].arg = &CTL_ARG(buckets); - disjoint_ctl_root.root[disjoint_ctl_root.first_free++] = - (umf_ctl_node_t){ - .name = "post_initialize", - .type = CTL_NODE_LEAF, - .runnable_cb = - CTL_RUNNABLE_HANDLER(post_initialize), - }; + disjoint_ctl_root.root[disjoint_ctl_root.first_free++] = (umf_ctl_node_t){ + .name = "post_initialize", + .type = CTL_NODE_LEAF, + .runnable_cb = CTL_RUNNABLE_HANDLER(post_initialize), + }; } umf_result_t disjoint_pool_ctl(void *hPool, umf_ctl_query_source_t operationType, const char *name, void *arg, size_t size, umf_ctl_query_type_t queryType, va_list args) { - (void)operationType; utils_init_once(&ctl_initialized, initialize_disjoint_ctl); - return ctl_query(&disjoint_ctl_root, hPool, CTL_QUERY_PROGRAMMATIC, name, + return ctl_query(&disjoint_ctl_root, hPool, operationType, name, queryType, arg, size, args); } @@ -947,6 +1151,8 @@ umf_result_t disjoint_pool_initialize(umf_memory_provider_handle_t provider, disjoint_pool->provider = provider; disjoint_pool->params = *dp_params; + disjoint_pool->post_initialized = false; + disjoint_pool->params_overridden = 0; *ppPool = (void *)disjoint_pool; @@ -956,6 +1162,31 @@ umf_result_t disjoint_pool_initialize(umf_memory_provider_handle_t provider, umf_result_t disjoint_pool_post_initialize(void *ppPool) { disjoint_pool_t *disjoint_pool = (disjoint_pool_t *)ppPool; + disjoint_pool->post_initialized = true; + + if (disjoint_pool->params_overridden) { + if (disjoint_pool->params_overridden & DP_OVERRIDE_SLAB_MIN_SIZE) { + LOG_INFO("CTL override: slab_min_size=%zu", + disjoint_pool->params.slab_min_size); + } + if (disjoint_pool->params_overridden & DP_OVERRIDE_MAX_POOLABLE_SIZE) { + LOG_INFO("CTL override: max_poolable_size=%zu", + disjoint_pool->params.max_poolable_size); + } + if (disjoint_pool->params_overridden & DP_OVERRIDE_CAPACITY) { + LOG_INFO("CTL override: capacity=%zu", + disjoint_pool->params.capacity); + } + if (disjoint_pool->params_overridden & DP_OVERRIDE_MIN_BUCKET_SIZE) { + LOG_INFO("CTL override: min_bucket_size=%zu", + disjoint_pool->params.min_bucket_size); + } + if (disjoint_pool->params_overridden & DP_OVERRIDE_POOL_TRACE) { + LOG_INFO("CTL override: pool_trace=%d", + disjoint_pool->params.pool_trace); + } + } + disjoint_pool->known_slabs = critnib_new(free_slab, NULL); if (disjoint_pool->known_slabs == NULL) { goto err_free_disjoint_pool; diff --git a/src/pool/pool_disjoint_internal.h b/src/pool/pool_disjoint_internal.h index e52a0f4969..4d934479e0 100644 --- a/src/pool/pool_disjoint_internal.h +++ b/src/pool/pool_disjoint_internal.h @@ -159,6 +159,12 @@ typedef struct disjoint_pool_t { // Coarse-grain allocation min alignment size_t provider_min_page_size; + + // true after post_initialize was successfully called + bool post_initialized; + + // bitmask of parameters overridden via CTL + unsigned params_overridden; } disjoint_pool_t; static inline void slab_set_chunk_bit(slab_t *slab, size_t index, bool value) { diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 8862b7b88d..fae28faba5 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -237,6 +237,8 @@ file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/ctl/ctl_env_config1.cfg DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/ctl) file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/ctl/ctl_env_config2.cfg DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/ctl) +file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/ctl/ctl_env_disjoint_pool.cfg + DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/ctl) add_umf_test( NAME ctl_env_driver diff --git a/test/ctl/ctl_env_app.cpp b/test/ctl/ctl_env_app.cpp index d40af3b272..1d6b3c463b 100644 --- a/test/ctl/ctl_env_app.cpp +++ b/test/ctl/ctl_env_app.cpp @@ -14,6 +14,8 @@ #include #include +#include +#include static int test_env_defaults(int argc, char **argv) { char buf[64] = {0}; @@ -67,6 +69,91 @@ static int test_logger(int argc, char **argv) { return 0; } +static int test_disjoint_pool(int argc, char **argv) { + if (argc % 2 != 0) { + std::cerr << "expected even number of arguments" << std::endl; + std::cerr << "Usage: disjoint_pool param value [param value]..." + << std::endl; + return 1; + } + + if (umfInit() != UMF_RESULT_SUCCESS) { + std::cerr << "umfInit failed" << std::endl; + return 1; + } + + int ret = 1; + umf_os_memory_provider_params_handle_t os_params = nullptr; + umf_memory_provider_handle_t provider = nullptr; + umf_memory_pool_handle_t pool = nullptr; + + if (UMF_RESULT_ERROR_NOT_SUPPORTED == + umfOsMemoryProviderParamsCreate(&os_params)) { + return 0; + } + + if (umfMemoryProviderCreate(umfOsMemoryProviderOps(), os_params, &provider) != + UMF_RESULT_SUCCESS) { + std::cerr << "Failed to create provider" << std::endl; + goto out; + } + + if (umfPoolCreate(umfDisjointPoolOps(), provider, nullptr, 0, &pool) != + UMF_RESULT_SUCCESS) { + std::cerr << "Failed to create disjoint pool" << std::endl; + goto out; + } + + for (int i = 0; i < argc; i += 2) { + const char *name = argv[i]; + const char *value = argv[i + 1]; + char path[128]; + snprintf(path, sizeof(path), + "umf.pool.by_handle.{}.params.%s", name); + + if (strcmp(name, "pool_trace") == 0) { + int got = 0; + if (umfCtlGet(path, &got, sizeof(got), pool) != + UMF_RESULT_SUCCESS) { + std::cerr << "Failed to get " << name << std::endl; + goto out; + } + if (got != atoi(value)) { + std::cerr << "Expected " << name << " to be " << value + << ", but got " << got << std::endl; + goto out; + } + } else { + size_t got = 0; + if (umfCtlGet(path, &got, sizeof(got), pool) != + UMF_RESULT_SUCCESS) { + std::cerr << "Failed to get " << name << std::endl; + goto out; + } + if (got != strtoull(value, nullptr, 10)) { + std::cerr << "Expected " << name << " to be " << value + << ", but got " << got << std::endl; + goto out; + } + } + } + + ret = 0; + +out: + if (pool) { + umfPoolDestroy(pool); + } + if (provider) { + umfMemoryProviderDestroy(provider); + } + if (os_params) { + umfOsMemoryProviderParamsDestroy(os_params); + } + + return ret; +} + int main(int argc, char **argv) { if (argc < 2) { std::cerr << "Usage: " << argv[0] << " args..." @@ -83,5 +170,9 @@ int main(int argc, char **argv) { if (strcmp(test_name, "logger") == 0) { return test_logger(argc, argv); } + + if (strcmp(test_name, "disjoint_pool") == 0) { + return test_disjoint_pool(argc, argv); + } return 1; } diff --git a/test/ctl/ctl_env_disjoint_pool.cfg b/test/ctl/ctl_env_disjoint_pool.cfg new file mode 100644 index 0000000000..7fd2635cd9 --- /dev/null +++ b/test/ctl/ctl_env_disjoint_pool.cfg @@ -0,0 +1,7 @@ +# Copyright (C) 2025 Intel Corporation +# Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +umf.pool.default.disjoint.params.slab_min_size=65536; +umf.pool.default.disjoint.params.capacity=4; +umf.pool.default.disjoint.params.min_bucket_size=8; +umf.pool.default.disjoint.params.pool_trace=0; diff --git a/test/ctl/ctl_env_driver.cpp b/test/ctl/ctl_env_driver.cpp index 3bde34a75b..9b5521dec6 100644 --- a/test/ctl/ctl_env_driver.cpp +++ b/test/ctl/ctl_env_driver.cpp @@ -126,3 +126,22 @@ TEST_F(test, ctl_env_logger) { run_case({{"UMF_CONF", "umf.logger.output=stdout;umf.logger.level=0"}}, {"logger", "stdout", "0"}); } + +TEST_F(test, ctl_env_disjoint_pool_env) { + run_case( + {{"UMF_CONF", + "umf.pool.default.disjoint.params.slab_min_size=65536;" + "umf.pool.default.disjoint.params.capacity=4;" + "umf.pool.default.disjoint.params.min_bucket_size=8;" + "umf.pool.default.disjoint.params.pool_trace=0"}}, + {"disjoint_pool", "slab_min_size", "65536", "capacity", "4", + "min_bucket_size", "8", "pool_trace", "0"}); +} + +TEST_F(test, ctl_env_disjoint_pool_file) { + std::string cfg = CTL_CONF_FILE_DIR "/ctl_env_disjoint_pool.cfg"; + run_case( + {{"UMF_CONF_FILE", cfg}}, + {"disjoint_pool", "slab_min_size", "65536", "capacity", "4", + "min_bucket_size", "8", "pool_trace", "0"}); +} diff --git a/test/pools/disjoint_pool_ctl.cpp b/test/pools/disjoint_pool_ctl.cpp index 125c464f87..a5badabd9e 100644 --- a/test/pools/disjoint_pool_ctl.cpp +++ b/test/pools/disjoint_pool_ctl.cpp @@ -3,6 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exceptiongi #include +#include #include #include #include @@ -10,11 +11,14 @@ #include #include +#include #include +#include #include "base.hpp" #include "utils_assert.h" #include "utils_log.h" +#include "ctl/ctl_internal.h" using umf_test::test; using namespace umf_test; @@ -302,6 +306,129 @@ TEST_F(test, disjointCtlReservedMemory) { ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); } +TEST_F(test, disjointCtlGetParams) { + umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; + if (UMF_RESULT_ERROR_NOT_SUPPORTED == + umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), + os_memory_provider_params); + if (providerWrapper.get() == NULL) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + umf_disjoint_pool_params_handle_t params = nullptr; + ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); + + const size_t slab_min_size = 32 * 1024; + const size_t max_poolable_size = 512 * 1024; + const size_t capacity = 7; + const size_t min_bucket_size = 16; + const int pool_trace = 1; + + ASSERT_SUCCESS(umfDisjointPoolParamsSetSlabMinSize(params, slab_min_size)); + ASSERT_SUCCESS( + umfDisjointPoolParamsSetMaxPoolableSize(params, max_poolable_size)); + ASSERT_SUCCESS(umfDisjointPoolParamsSetCapacity(params, capacity)); + ASSERT_SUCCESS( + umfDisjointPoolParamsSetMinBucketSize(params, min_bucket_size)); + ASSERT_SUCCESS(umfDisjointPoolParamsSetTrace(params, pool_trace)); + + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + + size_t got_size = 0; + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.slab_min_size", + &got_size, sizeof(got_size), + poolWrapper.get())); + EXPECT_EQ(got_size, slab_min_size); + + ASSERT_SUCCESS(umfCtlGet( + "umf.pool.by_handle.{}.params.max_poolable_size", &got_size, + sizeof(got_size), poolWrapper.get())); + EXPECT_EQ(got_size, max_poolable_size); + + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.capacity", + &got_size, sizeof(got_size), + poolWrapper.get())); + EXPECT_EQ(got_size, capacity); + + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.min_bucket_size", + &got_size, sizeof(got_size), + poolWrapper.get())); + EXPECT_EQ(got_size, min_bucket_size); + + int got_trace = 0; + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.pool_trace", + &got_trace, sizeof(got_trace), + poolWrapper.get())); + EXPECT_EQ(got_trace, pool_trace); + + ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); + ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); +} + +TEST_F(test, disjointCtlDefaultsOverride) { + umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; + if (UMF_RESULT_ERROR_NOT_SUPPORTED == + umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), + os_memory_provider_params); + if (providerWrapper.get() == NULL) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + size_t default_capacity = 4; + size_t default_min_bucket = 8; + ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.params.capacity", + &default_capacity, sizeof(default_capacity))); + ASSERT_SUCCESS(umfCtlSet( + "umf.pool.default.disjoint.params.min_bucket_size", + &default_min_bucket, sizeof(default_min_bucket))); + + size_t override_capacity = 2; + size_t override_min_bucket = 32; + ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.params.capacity", + &override_capacity, sizeof(override_capacity))); + ASSERT_SUCCESS(umfCtlSet( + "umf.pool.default.disjoint.params.min_bucket_size", + &override_min_bucket, sizeof(override_min_bucket))); + + umf_disjoint_pool_params_handle_t params = nullptr; + ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); + ASSERT_SUCCESS(umfDisjointPoolParamsSetCapacity(params, 1)); + ASSERT_SUCCESS(umfDisjointPoolParamsSetMinBucketSize(params, 64)); + + { + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + + size_t value = 0; + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.capacity", + &value, sizeof(value), poolWrapper.get())); + EXPECT_EQ(value, override_capacity); + ASSERT_SUCCESS(umfCtlGet( + "umf.pool.by_handle.{}.params.min_bucket_size", &value, + sizeof(value), poolWrapper.get())); + EXPECT_EQ(value, override_min_bucket); + } + + ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); + + ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.params.capacity", + &default_capacity, sizeof(default_capacity))); + ASSERT_SUCCESS(umfCtlSet( + "umf.pool.default.disjoint.params.min_bucket_size", + &default_min_bucket, sizeof(default_min_bucket))); + + ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); +} + TEST_F(test, disjointCtlMemoryMetricsConsistency) { umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; if (UMF_RESULT_ERROR_NOT_SUPPORTED == @@ -667,3 +794,142 @@ TEST_F(test, disjointCtlBucketStatsInvalid) { ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); } + +TEST_F(test, disjointCtlParams) { + umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; + if (UMF_RESULT_ERROR_NOT_SUPPORTED == + umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), + os_memory_provider_params); + if (providerWrapper.get() == NULL) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + umf_disjoint_pool_params_handle_t params = nullptr; + + // slab_min_size + size_t new_slab_min_size = 128 * 1024; + ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.params.slab_min_size", + &new_slab_min_size, sizeof(new_slab_min_size))); + ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); + { + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + size_t value = 0; + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.slab_min_size", + &value, sizeof(value), poolWrapper.get())); + EXPECT_EQ(value, new_slab_min_size); + size_t other = new_slab_min_size + 1024; + umf_result_t ret = umfCtlSet( + "umf.pool.by_handle.{}.params.slab_min_size", &other, sizeof(other), + poolWrapper.get()); + EXPECT_EQ(ret, UMF_RESULT_ERROR_NOT_SUPPORTED); + } + ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); + size_t default_slab_min_size = 64 * 1024; + ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.params.slab_min_size", + &default_slab_min_size, + sizeof(default_slab_min_size))); + + // max_poolable_size + size_t new_max_poolable = 1 * MB; + ASSERT_SUCCESS(umfCtlSet( + "umf.pool.default.disjoint.params.max_poolable_size", &new_max_poolable, + sizeof(new_max_poolable))); + ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); + { + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + size_t value = 0; + ASSERT_SUCCESS(umfCtlGet( + "umf.pool.by_handle.{}.params.max_poolable_size", &value, + sizeof(value), poolWrapper.get())); + EXPECT_EQ(value, new_max_poolable); + size_t other = new_max_poolable * 2; + umf_result_t ret = umfCtlSet( + "umf.pool.by_handle.{}.params.max_poolable_size", &other, + sizeof(other), poolWrapper.get()); + EXPECT_EQ(ret, UMF_RESULT_ERROR_NOT_SUPPORTED); + } + ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); + size_t default_max_poolable = 2 * MB; + ASSERT_SUCCESS(umfCtlSet( + "umf.pool.default.disjoint.params.max_poolable_size", &default_max_poolable, + sizeof(default_max_poolable))); + + // capacity + size_t new_capacity = 8; + ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.params.capacity", + &new_capacity, sizeof(new_capacity))); + ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); + { + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + size_t value = 0; + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.capacity", + &value, sizeof(value), poolWrapper.get())); + EXPECT_EQ(value, new_capacity); + size_t other = 16; + umf_result_t ret = umfCtlSet("umf.pool.by_handle.{}.params.capacity", + &other, sizeof(other), + poolWrapper.get()); + EXPECT_EQ(ret, UMF_RESULT_ERROR_NOT_SUPPORTED); + } + ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); + size_t default_capacity = 4; + ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.params.capacity", + &default_capacity, sizeof(default_capacity))); + + // min_bucket_size + size_t new_min_bucket = 16; + ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.params.min_bucket_size", + &new_min_bucket, sizeof(new_min_bucket))); + ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); + { + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + size_t value = 0; + ASSERT_SUCCESS(umfCtlGet( + "umf.pool.by_handle.{}.params.min_bucket_size", &value, + sizeof(value), poolWrapper.get())); + EXPECT_EQ(value, new_min_bucket); + size_t other = 32; + umf_result_t ret = umfCtlSet( + "umf.pool.by_handle.{}.params.min_bucket_size", &other, + sizeof(other), poolWrapper.get()); + EXPECT_EQ(ret, UMF_RESULT_ERROR_NOT_SUPPORTED); + } + ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); + size_t default_min_bucket = 8; + ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.params.min_bucket_size", + &default_min_bucket, sizeof(default_min_bucket))); + + // pool_trace + int new_trace = 3; + ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.params.pool_trace", + &new_trace, sizeof(new_trace))); + ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); + { + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + int value = 0; + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.pool_trace", + &value, sizeof(value), poolWrapper.get())); + EXPECT_EQ(value, new_trace); + int other = 1; + umf_result_t ret = umfCtlSet("umf.pool.by_handle.{}.params.pool_trace", + &other, sizeof(other), + poolWrapper.get()); + EXPECT_EQ(ret, UMF_RESULT_ERROR_NOT_SUPPORTED); + } + ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); + int default_trace = 0; + ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.params.pool_trace", + &default_trace, sizeof(default_trace))); + + ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); +} + From f8cc704589ff0c33c5447a0fff95f0444ed5c683 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Plewa?= Date: Tue, 16 Sep 2025 16:45:46 +0200 Subject: [PATCH 06/11] use fork for default tests defaults changes global state, so using forks will isolate tests --- src/provider/provider_ctl_stats_impl.h | 2 +- test/common/fork_helpers.hpp | 77 +++++ test/ctl/ctl_api.cpp | 176 ++++++----- test/pools/disjoint_pool_ctl.cpp | 409 ++++++++++++++----------- 4 files changed, 402 insertions(+), 262 deletions(-) create mode 100644 test/common/fork_helpers.hpp diff --git a/src/provider/provider_ctl_stats_impl.h b/src/provider/provider_ctl_stats_impl.h index 6f1fdf910b..72ffa93fb4 100644 --- a/src/provider/provider_ctl_stats_impl.h +++ b/src/provider/provider_ctl_stats_impl.h @@ -74,7 +74,7 @@ static const umf_ctl_node_t CTL_NODE(peak_memory)[] = {CTL_LEAF_RUNNABLE(reset), static const umf_ctl_node_t CTL_NODE(stats)[] = { CTL_LEAF_RO(allocated_memory), CTL_LEAF_RO(peak_memory), - CTL_CHILD(peak_memory), CTL_LEAF_RUNNABLE(reset), CTL_NODE_END}; + CTL_CHILD(peak_memory), CTL_NODE_END}; static inline void provider_ctl_stats_alloc(CTL_PROVIDER_TYPE *provider, size_t size) { diff --git a/test/common/fork_helpers.hpp b/test/common/fork_helpers.hpp new file mode 100644 index 0000000000..42f50de6d1 --- /dev/null +++ b/test/common/fork_helpers.hpp @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#pragma once + +#include "umf.h" +#include + +#include +#include + +#ifndef _WIN32 +#include +#include +#include +#endif + +namespace umf_test { + +constexpr int ForkedTestSuccess = 0; +constexpr int ForkedTestFailure = 1; +constexpr int ForkedTestSkip = 77; + +template void run_in_fork(Func &&func) { +#ifndef _WIN32 + static_assert(std::is_invocable_r_v, + "run_in_fork requires a void-returning callable"); + + pid_t pid = fork(); + ASSERT_NE(pid, -1) << "fork failed"; + + if (pid == 0) { + std::forward(func)(); + + auto *unit = ::testing::UnitTest::GetInstance(); + const ::testing::TestInfo *info = + unit ? unit->current_test_info() : nullptr; + const ::testing::TestResult *result = info ? info->result() : nullptr; + + if (result != nullptr) { + if (result->Skipped()) { + _exit(ForkedTestSkip); + } + if (result->Failed()) { + _exit(ForkedTestFailure); + } + } + umfTearDown(); // exit not call destructor so we need to call it manualy + _exit(ForkedTestSuccess); + } + + int status = 0; + ASSERT_EQ(waitpid(pid, &status, 0), pid) << "waitpid failed"; + + if (!WIFEXITED(status)) { + FAIL() << "Forked test terminated abnormally."; + } + + int exit_code = WEXITSTATUS(status); + if (exit_code == ForkedTestSkip) { + GTEST_SKIP() << "Forked test body requested skip."; + } + + ASSERT_EQ(exit_code, ForkedTestSuccess) + << "Forked test exited with code " << exit_code; +#else + (void)func; + GTEST_SKIP() << "Fork-based tests are not supported on Windows."; +#endif +} + +} // namespace umf_test + diff --git a/test/ctl/ctl_api.cpp b/test/ctl/ctl_api.cpp index 9a4303b04b..a3961297e5 100644 --- a/test/ctl/ctl_api.cpp +++ b/test/ctl/ctl_api.cpp @@ -28,6 +28,7 @@ #include #include "../common/base.hpp" +#include "../common/fork_helpers.hpp" #include "gtest/gtest.h" using namespace umf_test; @@ -184,20 +185,21 @@ class CtlTest : public ::testing::Test { private: }; -/* Case: default settings - * This test sets a default value and then retrieves it */ +// setting default modyfies global state - +// tests doing so should run in fork to ensure correct test isolation TEST_F(CtlTest, ctlDefault) { - const char *arg = "default_name"; - - auto res = umfCtlSet("umf.pool.default.some_pool.some_path", (void *)arg, - strlen(arg)); - ASSERT_EQ(res, UMF_RESULT_SUCCESS); - - char output[64] = {1}; - res = umfCtlGet("umf.pool.default.some_pool.some_path", (void *)output, - sizeof(output)); - ASSERT_EQ(res, UMF_RESULT_SUCCESS); - ASSERT_STREQ(output, arg); + umf_test::run_in_fork([] { + const char *arg = "default_name"; + ASSERT_EQ(umfCtlSet("umf.pool.default.some_pool.some_path", + (void *)arg, strlen(arg)), + UMF_RESULT_SUCCESS); + + char output[64] = {1}; + ASSERT_EQ(umfCtlGet("umf.pool.default.some_pool.some_path", + (void *)output, sizeof(output)), + UMF_RESULT_SUCCESS); + ASSERT_STREQ(output, arg); + }); } /* Case: umfCtlSet negative test */ @@ -234,58 +236,64 @@ TEST_F(CtlTest, ctlGetInvalid) { /* Case: multi-threaded test for pool defaults * This test sets a default value in multiple threads and then retrieves it */ TEST_F(CtlTest, ctlDefaultPoolMultithreaded) { - const size_t max_size = 10; - const size_t num_threads = 8; - std::vector threads; - std::atomic totalRecords = 0; - const char *predefined_value = "xyzzyx"; - std::string name_prefix = "umf.pool.default.some_pool."; - for (size_t i = 0; i < num_threads; i++) { - threads.emplace_back([i, &totalRecords, &predefined_value, &name_prefix, - max_size = max_size]() { - for (size_t j = 0; j < max_size; j++) { - std::string name = name_prefix + std::to_string(i * 10 + j); - umfCtlSet(name.c_str(), (void *)predefined_value, - strlen(predefined_value)); - std::atomic_fetch_add(&totalRecords, 1UL); - } - }); - } - for (auto &thread : threads) { - thread.join(); - } + umf_test::run_in_fork([] { + const size_t max_size = 10; + const size_t num_threads = 8; + std::vector threads; + std::atomic totalRecords = 0; + const char *predefined_value = "xyzzyx"; + std::string name_prefix = "umf.pool.default.some_pool."; + for (size_t i = 0; i < num_threads; i++) { + threads.emplace_back([i, &totalRecords, &predefined_value, + &name_prefix, max_size = max_size]() { + for (size_t j = 0; j < max_size; j++) { + std::string name = + name_prefix + std::to_string(i * 10 + j); + umfCtlSet(name.c_str(), (void *)predefined_value, + strlen(predefined_value)); + std::atomic_fetch_add(&totalRecords, 1UL); + } + }); + } + for (auto &thread : threads) { + thread.join(); + } - // Check if all threads set the value correctly - // and retrieve it - ASSERT_EQ(totalRecords.load(), num_threads * max_size); + ASSERT_EQ(totalRecords.load(), num_threads * max_size); - char output[100] = {0}; - for (size_t i = 0; i < totalRecords.load(); i++) { - std::string name = name_prefix + std::to_string(i); - auto status = umfCtlGet(name.c_str(), (void *)output, sizeof(output)); - ASSERT_EQ(status, UMF_RESULT_SUCCESS); - ASSERT_EQ(std::string(output), std::string(predefined_value)); - } + char output[100] = {0}; + for (size_t i = 0; i < totalRecords.load(); i++) { + std::string name = name_prefix + std::to_string(i); + umf_result_t status = + umfCtlGet(name.c_str(), (void *)output, sizeof(output)); + ASSERT_EQ(status, UMF_RESULT_SUCCESS); + ASSERT_STREQ(output, predefined_value); + } + }); } /* Case: overwriting an existing value for pool defaults * This test sets a default value and then overwrites it with a new value */ TEST_F(CtlTest, ctlDefaultPoolOverwrite) { - constexpr int max_size = 10; - std::vector values; - const std::string name = "umf.pool.default.some_pool"; - - for (int i = 0; i < max_size; i++) { - values.push_back("value_" + std::to_string(i)); - umfCtlSet(name.c_str(), (void *)values.back().c_str(), - values.back().size()); - } + umf_test::run_in_fork([] { + constexpr int max_size = 10; + std::vector values; + const std::string name = "umf.pool.default.some_pool"; + + for (int i = 0; i < max_size; i++) { + values.push_back("value_" + std::to_string(i)); + umf_result_t set_status = + umfCtlSet(name.c_str(), (void *)values.back().c_str(), + values.back().size()); + ASSERT_EQ(set_status, UMF_RESULT_SUCCESS); + } - char output[100] = {0}; - umf_result_t status = - umfCtlGet(name.c_str(), (void *)output, sizeof(output)); - ASSERT_EQ(status, UMF_RESULT_SUCCESS); - ASSERT_EQ(std::string(output), values.back()); + char output[100] = {0}; + umf_result_t status = + umfCtlGet(name.c_str(), (void *)output, sizeof(output)); + ASSERT_EQ(status, UMF_RESULT_SUCCESS); + ASSERT_STREQ(output, values.back().c_str()); + }); } TEST_F(CtlTest, DISABLED_ctlNameValidation) { @@ -349,32 +357,36 @@ TEST_F(CtlTest, DISABLED_ctlExecInvalidSize) { } TEST_F(CtlTest, ctlDefaultMultithreadedProvider) { - std::vector threads; - std::atomic totalRecords = 0; - const char *predefined_value = "xyzzyx"; - std::string name_prefix = "umf.provider.default.some_provider."; - for (int i = 0; i < 8; i++) { - threads.emplace_back( - [i, &totalRecords, &predefined_value, &name_prefix]() { - for (int j = 0; j < 10; j++) { - std::string name = name_prefix + std::to_string(i * 10 + j); - umfCtlSet(name.c_str(), (void *)predefined_value, - strlen(predefined_value)); - std::atomic_fetch_add(&totalRecords, 1); - } - }); - } - for (auto &thread : threads) { - thread.join(); - } + umf_test::run_in_fork([] { + std::vector threads; + std::atomic totalRecords = 0; + const char *predefined_value = "xyzzyx"; + std::string name_prefix = "umf.provider.default.some_provider."; + for (int i = 0; i < 8; i++) { + threads.emplace_back( + [i, &totalRecords, &predefined_value, &name_prefix]() { + for (int j = 0; j < 10; j++) { + std::string name = + name_prefix + std::to_string(i * 10 + j); + umfCtlSet(name.c_str(), (void *)predefined_value, + strlen(predefined_value)); + std::atomic_fetch_add(&totalRecords, 1); + } + }); + } + for (auto &thread : threads) { + thread.join(); + } - char output[100] = {0}; - for (size_t i = 0; i < totalRecords.load(); i++) { - std::string name = name_prefix + std::to_string(i); - auto status = umfCtlGet(name.c_str(), (void *)output, sizeof(output)); - ASSERT_EQ(status, UMF_RESULT_SUCCESS); - ASSERT_EQ(std::string(output), std::string(predefined_value)); - } + char output[100] = {0}; + for (size_t i = 0; i < totalRecords.load(); i++) { + std::string name = name_prefix + std::to_string(i); + umf_result_t status = + umfCtlGet(name.c_str(), (void *)output, sizeof(output)); + ASSERT_EQ(status, UMF_RESULT_SUCCESS); + ASSERT_STREQ(output, predefined_value); + } + }); } TEST_F(test, ctl_logger_basic_rw) { diff --git a/test/pools/disjoint_pool_ctl.cpp b/test/pools/disjoint_pool_ctl.cpp index a5badabd9e..04b9216967 100644 --- a/test/pools/disjoint_pool_ctl.cpp +++ b/test/pools/disjoint_pool_ctl.cpp @@ -11,11 +11,13 @@ #include #include +#include #include +#include #include -#include #include "base.hpp" +#include "common/fork_helpers.hpp" #include "utils_assert.h" #include "utils_log.h" #include "ctl/ctl_internal.h" @@ -371,62 +373,79 @@ TEST_F(test, disjointCtlGetParams) { } TEST_F(test, disjointCtlDefaultsOverride) { - umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; - if (UMF_RESULT_ERROR_NOT_SUPPORTED == - umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) { - GTEST_SKIP() << "OS memory provider is not supported!"; - } - - ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), - os_memory_provider_params); - if (providerWrapper.get() == NULL) { - GTEST_SKIP() << "OS memory provider is not supported!"; - } + umf_test::run_in_fork([] { + umf_os_memory_provider_params_handle_t raw_os_params = nullptr; + umf_result_t res = + umfOsMemoryProviderParamsCreate(&raw_os_params); + if (res == UMF_RESULT_ERROR_NOT_SUPPORTED) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + ASSERT_EQ(res, UMF_RESULT_SUCCESS); - size_t default_capacity = 4; - size_t default_min_bucket = 8; - ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.params.capacity", - &default_capacity, sizeof(default_capacity))); - ASSERT_SUCCESS(umfCtlSet( - "umf.pool.default.disjoint.params.min_bucket_size", - &default_min_bucket, sizeof(default_min_bucket))); + std::unique_ptr + os_params(raw_os_params, &umfOsMemoryProviderParamsDestroy); - size_t override_capacity = 2; - size_t override_min_bucket = 32; - ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.params.capacity", - &override_capacity, sizeof(override_capacity))); - ASSERT_SUCCESS(umfCtlSet( - "umf.pool.default.disjoint.params.min_bucket_size", - &override_min_bucket, sizeof(override_min_bucket))); + ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), + os_params.get()); + if (providerWrapper.get() == nullptr) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } - umf_disjoint_pool_params_handle_t params = nullptr; - ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); - ASSERT_SUCCESS(umfDisjointPoolParamsSetCapacity(params, 1)); - ASSERT_SUCCESS(umfDisjointPoolParamsSetMinBucketSize(params, 64)); + size_t default_capacity = 4; + size_t default_min_bucket = 8; + ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.capacity", + &default_capacity, sizeof(default_capacity)), + UMF_RESULT_SUCCESS); + ASSERT_EQ( + umfCtlSet("umf.pool.default.disjoint.params.min_bucket_size", + &default_min_bucket, sizeof(default_min_bucket)), + UMF_RESULT_SUCCESS); + + size_t override_capacity = 2; + size_t override_min_bucket = 32; + ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.capacity", + &override_capacity, sizeof(override_capacity)), + UMF_RESULT_SUCCESS); + ASSERT_EQ( + umfCtlSet("umf.pool.default.disjoint.params.min_bucket_size", + &override_min_bucket, sizeof(override_min_bucket)), + UMF_RESULT_SUCCESS); + + umf_disjoint_pool_params_handle_t raw_params = nullptr; + ASSERT_EQ(umfDisjointPoolParamsCreate(&raw_params), + UMF_RESULT_SUCCESS); + std::unique_ptr + params(raw_params, &umfDisjointPoolParamsDestroy); + + ASSERT_EQ(umfDisjointPoolParamsSetCapacity(params.get(), 1), + UMF_RESULT_SUCCESS); + ASSERT_EQ(umfDisjointPoolParamsSetMinBucketSize(params.get(), 64), + UMF_RESULT_SUCCESS); - { PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), - params); + params.get()); + ASSERT_NE(poolWrapper.get(), nullptr); size_t value = 0; - ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.capacity", - &value, sizeof(value), poolWrapper.get())); + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.params.capacity", &value, + sizeof(value), poolWrapper.get()), + UMF_RESULT_SUCCESS); EXPECT_EQ(value, override_capacity); - ASSERT_SUCCESS(umfCtlGet( - "umf.pool.by_handle.{}.params.min_bucket_size", &value, - sizeof(value), poolWrapper.get())); + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.params.min_bucket_size", + &value, sizeof(value), poolWrapper.get()), + UMF_RESULT_SUCCESS); EXPECT_EQ(value, override_min_bucket); - } - ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); - - ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.params.capacity", - &default_capacity, sizeof(default_capacity))); - ASSERT_SUCCESS(umfCtlSet( - "umf.pool.default.disjoint.params.min_bucket_size", - &default_min_bucket, sizeof(default_min_bucket))); - - ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); + ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.capacity", + &default_capacity, sizeof(default_capacity)), + UMF_RESULT_SUCCESS); + ASSERT_EQ( + umfCtlSet("umf.pool.default.disjoint.params.min_bucket_size", + &default_min_bucket, sizeof(default_min_bucket)), + UMF_RESULT_SUCCESS); + }); } TEST_F(test, disjointCtlMemoryMetricsConsistency) { @@ -796,140 +815,172 @@ TEST_F(test, disjointCtlBucketStatsInvalid) { } TEST_F(test, disjointCtlParams) { - umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; - if (UMF_RESULT_ERROR_NOT_SUPPORTED == - umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) { - GTEST_SKIP() << "OS memory provider is not supported!"; - } + umf_test::run_in_fork([] { + umf_os_memory_provider_params_handle_t raw_os_params = nullptr; + umf_result_t res = umfOsMemoryProviderParamsCreate(&raw_os_params); + if (res == UMF_RESULT_ERROR_NOT_SUPPORTED) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + ASSERT_EQ(res, UMF_RESULT_SUCCESS); - ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), - os_memory_provider_params); - if (providerWrapper.get() == NULL) { - GTEST_SKIP() << "OS memory provider is not supported!"; - } + std::unique_ptr + os_params(raw_os_params, &umfOsMemoryProviderParamsDestroy); - umf_disjoint_pool_params_handle_t params = nullptr; + ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), + os_params.get()); + if (providerWrapper.get() == nullptr) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } - // slab_min_size - size_t new_slab_min_size = 128 * 1024; - ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.params.slab_min_size", - &new_slab_min_size, sizeof(new_slab_min_size))); - ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); - { - PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), - params); - size_t value = 0; - ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.slab_min_size", - &value, sizeof(value), poolWrapper.get())); - EXPECT_EQ(value, new_slab_min_size); - size_t other = new_slab_min_size + 1024; - umf_result_t ret = umfCtlSet( - "umf.pool.by_handle.{}.params.slab_min_size", &other, sizeof(other), - poolWrapper.get()); - EXPECT_EQ(ret, UMF_RESULT_ERROR_NOT_SUPPORTED); - } - ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); - size_t default_slab_min_size = 64 * 1024; - ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.params.slab_min_size", - &default_slab_min_size, - sizeof(default_slab_min_size))); - - // max_poolable_size - size_t new_max_poolable = 1 * MB; - ASSERT_SUCCESS(umfCtlSet( - "umf.pool.default.disjoint.params.max_poolable_size", &new_max_poolable, - sizeof(new_max_poolable))); - ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); - { - PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), - params); - size_t value = 0; - ASSERT_SUCCESS(umfCtlGet( - "umf.pool.by_handle.{}.params.max_poolable_size", &value, - sizeof(value), poolWrapper.get())); - EXPECT_EQ(value, new_max_poolable); - size_t other = new_max_poolable * 2; - umf_result_t ret = umfCtlSet( - "umf.pool.by_handle.{}.params.max_poolable_size", &other, - sizeof(other), poolWrapper.get()); - EXPECT_EQ(ret, UMF_RESULT_ERROR_NOT_SUPPORTED); - } - ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); - size_t default_max_poolable = 2 * MB; - ASSERT_SUCCESS(umfCtlSet( - "umf.pool.default.disjoint.params.max_poolable_size", &default_max_poolable, - sizeof(default_max_poolable))); - - // capacity - size_t new_capacity = 8; - ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.params.capacity", - &new_capacity, sizeof(new_capacity))); - ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); - { - PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), - params); - size_t value = 0; - ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.capacity", - &value, sizeof(value), poolWrapper.get())); - EXPECT_EQ(value, new_capacity); - size_t other = 16; - umf_result_t ret = umfCtlSet("umf.pool.by_handle.{}.params.capacity", - &other, sizeof(other), - poolWrapper.get()); - EXPECT_EQ(ret, UMF_RESULT_ERROR_NOT_SUPPORTED); - } - ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); - size_t default_capacity = 4; - ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.params.capacity", - &default_capacity, sizeof(default_capacity))); - - // min_bucket_size - size_t new_min_bucket = 16; - ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.params.min_bucket_size", - &new_min_bucket, sizeof(new_min_bucket))); - ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); - { - PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), - params); - size_t value = 0; - ASSERT_SUCCESS(umfCtlGet( - "umf.pool.by_handle.{}.params.min_bucket_size", &value, - sizeof(value), poolWrapper.get())); - EXPECT_EQ(value, new_min_bucket); - size_t other = 32; - umf_result_t ret = umfCtlSet( - "umf.pool.by_handle.{}.params.min_bucket_size", &other, - sizeof(other), poolWrapper.get()); - EXPECT_EQ(ret, UMF_RESULT_ERROR_NOT_SUPPORTED); - } - ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); - size_t default_min_bucket = 8; - ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.params.min_bucket_size", - &default_min_bucket, sizeof(default_min_bucket))); - - // pool_trace - int new_trace = 3; - ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.params.pool_trace", - &new_trace, sizeof(new_trace))); - ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); - { - PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), - params); - int value = 0; - ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.pool_trace", - &value, sizeof(value), poolWrapper.get())); - EXPECT_EQ(value, new_trace); - int other = 1; - umf_result_t ret = umfCtlSet("umf.pool.by_handle.{}.params.pool_trace", - &other, sizeof(other), - poolWrapper.get()); - EXPECT_EQ(ret, UMF_RESULT_ERROR_NOT_SUPPORTED); - } - ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); - int default_trace = 0; - ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.params.pool_trace", - &default_trace, sizeof(default_trace))); + // slab_min_size + { + size_t new_slab_min_size = 128 * 1024; + ASSERT_EQ( + umfCtlSet("umf.pool.default.disjoint.params.slab_min_size", + &new_slab_min_size, sizeof(new_slab_min_size)), + UMF_RESULT_SUCCESS); + + umf_disjoint_pool_params_handle_t params = nullptr; + ASSERT_EQ(umfDisjointPoolParamsCreate(¶ms), UMF_RESULT_SUCCESS); + std::unique_ptr + params_guard(params, &umfDisjointPoolParamsDestroy); + + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + ASSERT_NE(poolWrapper.get(), nullptr); + + size_t value = 0; + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.params.slab_min_size", + &value, sizeof(value), poolWrapper.get()), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, new_slab_min_size); + + size_t other = new_slab_min_size + 1024; + EXPECT_EQ(umfCtlSet("umf.pool.by_handle.{}.params.slab_min_size", + &other, sizeof(other), poolWrapper.get()), + UMF_RESULT_ERROR_NOT_SUPPORTED); + } - ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); -} + // max_poolable_size + { + size_t new_max_poolable = 1 * MB; + ASSERT_EQ( + umfCtlSet("umf.pool.default.disjoint.params.max_poolable_size", + &new_max_poolable, sizeof(new_max_poolable)), + UMF_RESULT_SUCCESS); + + umf_disjoint_pool_params_handle_t params = nullptr; + ASSERT_EQ(umfDisjointPoolParamsCreate(¶ms), UMF_RESULT_SUCCESS); + std::unique_ptr + params_guard(params, &umfDisjointPoolParamsDestroy); + + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + ASSERT_NE(poolWrapper.get(), nullptr); + + size_t value = 0; + ASSERT_EQ( + umfCtlGet("umf.pool.by_handle.{}.params.max_poolable_size", + &value, sizeof(value), poolWrapper.get()), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, new_max_poolable); + + size_t other = new_max_poolable * 2; + EXPECT_EQ( + umfCtlSet("umf.pool.by_handle.{}.params.max_poolable_size", + &other, sizeof(other), poolWrapper.get()), + UMF_RESULT_ERROR_NOT_SUPPORTED); + } + + // capacity + { + size_t new_capacity = 8; + ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.capacity", + &new_capacity, sizeof(new_capacity)), + UMF_RESULT_SUCCESS); + + umf_disjoint_pool_params_handle_t params = nullptr; + ASSERT_EQ(umfDisjointPoolParamsCreate(¶ms), UMF_RESULT_SUCCESS); + std::unique_ptr + params_guard(params, &umfDisjointPoolParamsDestroy); + + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + ASSERT_NE(poolWrapper.get(), nullptr); + + size_t value = 0; + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.params.capacity", &value, + sizeof(value), poolWrapper.get()), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, new_capacity); + + size_t other = 16; + EXPECT_EQ(umfCtlSet("umf.pool.by_handle.{}.params.capacity", &other, + sizeof(other), poolWrapper.get()), + UMF_RESULT_ERROR_NOT_SUPPORTED); + } + // min_bucket_size + { + size_t new_min_bucket = 16; + ASSERT_EQ( + umfCtlSet("umf.pool.default.disjoint.params.min_bucket_size", + &new_min_bucket, sizeof(new_min_bucket)), + UMF_RESULT_SUCCESS); + + umf_disjoint_pool_params_handle_t params = nullptr; + ASSERT_EQ(umfDisjointPoolParamsCreate(¶ms), UMF_RESULT_SUCCESS); + std::unique_ptr + params_guard(params, &umfDisjointPoolParamsDestroy); + + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + ASSERT_NE(poolWrapper.get(), nullptr); + + size_t value = 0; + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.params.min_bucket_size", + &value, sizeof(value), poolWrapper.get()), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, new_min_bucket); + + size_t other = 32; + EXPECT_EQ(umfCtlSet("umf.pool.by_handle.{}.params.min_bucket_size", + &other, sizeof(other), poolWrapper.get()), + UMF_RESULT_ERROR_NOT_SUPPORTED); + } + + // pool_trace + { + int new_trace = 3; + ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.pool_trace", + &new_trace, sizeof(new_trace)), + UMF_RESULT_SUCCESS); + + umf_disjoint_pool_params_handle_t params = nullptr; + ASSERT_EQ(umfDisjointPoolParamsCreate(¶ms), UMF_RESULT_SUCCESS); + std::unique_ptr + params_guard(params, &umfDisjointPoolParamsDestroy); + + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + ASSERT_NE(poolWrapper.get(), nullptr); + + int value = 0; + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.params.pool_trace", + &value, sizeof(value), poolWrapper.get()), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, new_trace); + + int other = 1; + EXPECT_EQ(umfCtlSet("umf.pool.by_handle.{}.params.pool_trace", + &other, sizeof(other), poolWrapper.get()), + UMF_RESULT_ERROR_NOT_SUPPORTED); + } + }); +} From 55d4f0a07c655912afbe66c38bdd4e03dbd3bf51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Plewa?= Date: Wed, 17 Sep 2025 18:01:51 +0200 Subject: [PATCH 07/11] rename name to path in ctl --- include/umf/experimental/ctl.h | 16 +++++++------- src/ctl/ctl.c | 38 +++++++++++++++++----------------- src/ctl/ctl_internal.h | 2 +- src/libumf.c | 18 ++++++++-------- 4 files changed, 37 insertions(+), 37 deletions(-) diff --git a/include/umf/experimental/ctl.h b/include/umf/experimental/ctl.h index 6af3532342..0f7ab860ae 100644 --- a/include/umf/experimental/ctl.h +++ b/include/umf/experimental/ctl.h @@ -17,34 +17,34 @@ extern "C" { #endif /// -/// @brief Get value of a specified attribute at the given name. -/// @param name name of an attribute to be retrieved +/// @brief Get value of a specified attribute at the given path. +/// @param path path of an attribute to be retrieved /// @param arg [out] pointer to the variable where the value will be stored /// @param size size of the value, depends on the context /// @param ... additional arguments that can be passed to the callback /// @return UMF_RESULT_SUCCESS on success or UMF_RESULT_ERROR_UNKNOWN on failure. /// -umf_result_t umfCtlGet(const char *name, void *arg, size_t size, ...); +umf_result_t umfCtlGet(const char *path, void *arg, size_t size, ...); /// -/// @brief Set value of a specified attribute at the given name. -/// @param name name of an attribute to be set +/// @brief Set value of a specified attribute at the given path. +/// @param path path of an attribute to be set /// @param arg [in] pointer to the value that will be set /// @param size [in] size of the value, depends on the context /// @param ... additional arguments that can be passed to the callback /// @return UMF_RESULT_SUCCESS on success or UMF_RESULT_ERROR_UNKNOWN on failure. /// -umf_result_t umfCtlSet(const char *name, void *arg, size_t size, ...); +umf_result_t umfCtlSet(const char *path, void *arg, size_t size, ...); /// /// @brief Execute callback related with the specified attribute. -/// @param name name of an attribute to be executed +/// @param path path of an attribute to be executed /// @param arg [in/out] pointer to the value, can be used as an input or output /// @param size [in] size of the value, depends on the context /// @param ... additional arguments that can be passed to the callback /// @return UMF_RESULT_SUCCESS on success or UMF_RESULT_ERROR_UNKNOWN on failure. /// -umf_result_t umfCtlExec(const char *name, void *arg, size_t size, ...); +umf_result_t umfCtlExec(const char *path, void *arg, size_t size, ...); #ifdef __cplusplus } diff --git a/src/ctl/ctl.c b/src/ctl/ctl.c index ac9f294ed1..2edca576a0 100644 --- a/src/ctl/ctl.c +++ b/src/ctl/ctl.c @@ -298,26 +298,26 @@ ctl_exec_query_subtree(void *ctx, const umf_ctl_node_t *n, * ctl_find_and_execulte_node -- (internal) searches for a matching entry point in the * provided nodes * - * Name offset is used to return the offset of the name in the query string. + * Path offset is used to return the offset of the path in the query string. * The caller is responsible for freeing all of the allocated indexes, * regardless of the return value. */ static optional_umf_result_t ctl_find_and_execute_node(const umf_ctl_node_t *nodes, void *ctx, - umf_ctl_query_source_t source, const char *name, + umf_ctl_query_source_t source, const char *path, umf_ctl_query_type_t type, void *arg, size_t size, va_list args) { assert(nodes != NULL); - assert(name != NULL); + assert(path != NULL); const umf_ctl_node_t *n = NULL; optional_umf_result_t ret; - size_t name_offset = 0; + size_t path_offset = 0; ret.is_valid = true; ret.value = UMF_RESULT_SUCCESS; char *sptr = NULL; - char *parse_str = Strdup(name); + char *parse_str = Strdup(path); if (parse_str == NULL) { ret.is_valid = false; return ret; @@ -336,7 +336,7 @@ ctl_find_and_execute_node(const umf_ctl_node_t *nodes, void *ctx, */ while (node_name != NULL) { char *next_node = strtok_r(NULL, CTL_QUERY_NODE_SEPARATOR, &sptr); - name_offset = node_name - parse_str; + path_offset = node_name - parse_str; if (n != NULL && n->type == CTL_NODE_SUBTREE) { // if a subtree occurs, the subtree handler should be called break; @@ -500,7 +500,7 @@ ctl_find_and_execute_node(const umf_ctl_node_t *nodes, void *ctx, // if the node is a subtree, then we need to call the subtree handler ret.value = ctl_exec_query_subtree(ctx, n, source, arg, size, indexes->next, - name + name_offset, type, args); + path + path_offset, type, args); } else { switch (type) { case CTL_QUERY_READ: @@ -530,14 +530,14 @@ ctl_find_and_execute_node(const umf_ctl_node_t *nodes, void *ctx, } /* - * ctl_query -- (internal) parses the name and calls the appropriate methods + * ctl_query -- (internal) parses the path and calls the appropriate methods * from the ctl tree */ umf_result_t ctl_query(struct ctl *ctl, void *ctx, - umf_ctl_query_source_t source, const char *name, + umf_ctl_query_source_t source, const char *path, umf_ctl_query_type_t type, void *arg, size_t size, va_list args) { - if (name == NULL) { + if (path == NULL) { return UMF_RESULT_ERROR_INVALID_ARGUMENT; } @@ -545,10 +545,10 @@ umf_result_t ctl_query(struct ctl *ctl, void *ctx, va_copy(args_copy, args); optional_umf_result_t ret = ctl_find_and_execute_node( - CTL_NODE(global), ctx, source, name, type, arg, size, args_copy); + CTL_NODE(global), ctx, source, path, type, arg, size, args_copy); if (ret.is_valid == false && ctl) { - ret = ctl_find_and_execute_node(ctl->root, ctx, source, name, type, arg, + ret = ctl_find_and_execute_node(ctl->root, ctx, source, path, type, arg, size, args); } @@ -573,16 +573,16 @@ void ctl_register_module_node(struct ctl *c, const char *name, /* * ctl_parse_query -- (internal) splits an entire query string - * into name and value + * into path and value */ -static int ctl_parse_query(char *qbuf, char **name, char **value) { +static int ctl_parse_query(char *qbuf, char **path, char **value) { if (qbuf == NULL) { return -1; } char *sptr = NULL; - *name = strtok_r(qbuf, CTL_NAME_VALUE_SEPARATOR, &sptr); - if (*name == NULL) { + *path = strtok_r(qbuf, CTL_NAME_VALUE_SEPARATOR, &sptr); + if (*path == NULL) { return -1; } @@ -608,20 +608,20 @@ static umf_result_t ctl_load_config_helper(struct ctl *ctl, void *ctx, char *buf, ...) { umf_result_t ret = UMF_RESULT_SUCCESS; char *sptr = NULL; /* for internal use of strtok */ - char *name; + char *path; char *value; char *qbuf = strtok_r(buf, CTL_STRING_QUERY_SEPARATOR, &sptr); va_list empty_args; va_start(empty_args, buf); while (qbuf != NULL) { - int parse_res = ctl_parse_query(qbuf, &name, &value); + int parse_res = ctl_parse_query(qbuf, &path, &value); if (parse_res != 0) { ret = UMF_RESULT_ERROR_INVALID_ARGUMENT; goto end; } // we do not need to copy va_list before call as we know that for query_config_input // ctl_query will not call va_arg on it. Ref 7.15/3 of C99 standard - ret = ctl_query(ctl, ctx, CTL_QUERY_CONFIG_INPUT, name, CTL_QUERY_WRITE, + ret = ctl_query(ctl, ctx, CTL_QUERY_CONFIG_INPUT, path, CTL_QUERY_WRITE, value, strlen(value) + 1, empty_args); if (ret != UMF_RESULT_SUCCESS && ctx != NULL) { diff --git a/src/ctl/ctl_internal.h b/src/ctl/ctl_internal.h index a043b01c40..aa159199b0 100644 --- a/src/ctl/ctl_internal.h +++ b/src/ctl/ctl_internal.h @@ -198,7 +198,7 @@ int ctl_arg_string(const void *arg, void *dest, size_t dest_size); #define CTL_NODE(name, ...) ctl_node_##__VA_ARGS__##_##name umf_result_t ctl_query(struct ctl *ctl, void *ctx, - umf_ctl_query_source_t source, const char *name, + umf_ctl_query_source_t source, const char *path, umf_ctl_query_type_t type, void *arg, size_t size, va_list args); diff --git a/src/libumf.c b/src/libumf.c index b11a55cc9e..ab6be8704a 100644 --- a/src/libumf.c +++ b/src/libumf.c @@ -157,41 +157,41 @@ umf_result_t umfTearDown(void) { int umfGetCurrentVersion(void) { return UMF_VERSION_CURRENT; } -umf_result_t umfCtlGet(const char *name, void *arg, size_t size, ...) { +umf_result_t umfCtlGet(const char *path, void *arg, size_t size, ...) { libumfInit(); // ctx can be NULL when getting defaults - if (name == NULL || arg == NULL || size == 0) { + if (path == NULL || arg == NULL || size == 0) { return UMF_RESULT_ERROR_INVALID_ARGUMENT; } va_list args; va_start(args, size); - umf_result_t ret = ctl_query(NULL, NULL, CTL_QUERY_PROGRAMMATIC, name, + umf_result_t ret = ctl_query(NULL, NULL, CTL_QUERY_PROGRAMMATIC, path, CTL_QUERY_READ, arg, size, args); va_end(args); return ret; } -umf_result_t umfCtlSet(const char *name, void *arg, size_t size, ...) { +umf_result_t umfCtlSet(const char *path, void *arg, size_t size, ...) { libumfInit(); // ctx can be NULL when setting defaults - if (name == NULL || arg == NULL || size == 0) { + if (path == NULL || arg == NULL || size == 0) { return UMF_RESULT_ERROR_INVALID_ARGUMENT; } va_list args; va_start(args, size); - umf_result_t ret = ctl_query(NULL, NULL, CTL_QUERY_PROGRAMMATIC, name, + umf_result_t ret = ctl_query(NULL, NULL, CTL_QUERY_PROGRAMMATIC, path, CTL_QUERY_WRITE, arg, size, args); va_end(args); return ret; } -umf_result_t umfCtlExec(const char *name, void *arg, size_t size, ...) { +umf_result_t umfCtlExec(const char *path, void *arg, size_t size, ...) { libumfInit(); // arg can be NULL when executing a command // ctx can be NULL when executing defaults // size can depends on the arg - if (name == NULL) { + if (path == NULL) { return UMF_RESULT_ERROR_INVALID_ARGUMENT; } @@ -201,7 +201,7 @@ umf_result_t umfCtlExec(const char *name, void *arg, size_t size, ...) { va_list args; va_start(args, size); - umf_result_t ret = ctl_query(NULL, NULL, CTL_QUERY_PROGRAMMATIC, name, + umf_result_t ret = ctl_query(NULL, NULL, CTL_QUERY_PROGRAMMATIC, path, CTL_QUERY_RUNNABLE, arg, size, args); va_end(args); return ret; From db6df687b84371e17e8b8db31a0c8bbc0851ae9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Plewa?= Date: Wed, 17 Sep 2025 18:41:32 +0200 Subject: [PATCH 08/11] remove tests for changing pool name thru ctl --- examples/CMakeLists.txt | 2 +- examples/ctl/CMakeLists.txt | 6 +- examples/ctl/ctl_example.c | 9 ++ examples/ctl/ctl_statistics_example.c | 5 +- src/ctl/ctl_defaults.h | 2 +- src/pool/pool_disjoint.c | 10 +- src/pool/pool_jemalloc.c | 20 +-- src/pool/pool_scalable.c | 7 +- src/provider/provider_cuda.c | 18 +-- src/provider/provider_file_memory.c | 7 +- src/provider/provider_fixed_memory.c | 7 +- test/common/fork_helpers.hpp | 1 - test/ctl/ctl_api.cpp | 212 ++------------------------ test/ctl/ctl_env_app.cpp | 7 +- test/ctl/ctl_env_driver.cpp | 16 +- test/pools/disjoint_pool.cpp | 12 +- test/pools/disjoint_pool_ctl.cpp | 117 +++----------- 17 files changed, 98 insertions(+), 360 deletions(-) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 49cbddb257..1a4da085f5 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -300,7 +300,7 @@ if(LINUX) target_include_directories( ${EXAMPLE_NAME} PRIVATE ${UMF_CMAKE_SOURCE_DIR}/src/utils - ${UMF_CMAKE_SOURCE_DIR}/include) + ${UMF_CMAKE_SOURCE_DIR}/include) target_link_directories(${EXAMPLE_NAME} PRIVATE ${LIBHWLOC_LIBRARY_DIRS}) diff --git a/examples/ctl/CMakeLists.txt b/examples/ctl/CMakeLists.txt index 9e92a7aa22..2f78ef4e1d 100644 --- a/examples/ctl/CMakeLists.txt +++ b/examples/ctl/CMakeLists.txt @@ -1,6 +1,6 @@ -#Copyright(C) 2024 Intel Corporation -#Under the Apache License v2.0 with LLVM Exceptions.See LICENSE.TXT. -#SPDX - License - Identifier : Apache - 2.0 WITH LLVM - exception +# Copyright (C) 2025 Intel Corporation +# Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception cmake_minimum_required(VERSION 3.14.0 FATAL_ERROR) project(umf_example_ctl LANGUAGES C) diff --git a/examples/ctl/ctl_example.c b/examples/ctl/ctl_example.c index 4d1e47eace..8d1ed9b5b0 100644 --- a/examples/ctl/ctl_example.c +++ b/examples/ctl/ctl_example.c @@ -1,3 +1,12 @@ +/* + * + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + */ + #define _GNU_SOURCE 1 #include #include diff --git a/examples/ctl/ctl_statistics_example.c b/examples/ctl/ctl_statistics_example.c index 246d1d913b..da0874bed9 100644 --- a/examples/ctl/ctl_statistics_example.c +++ b/examples/ctl/ctl_statistics_example.c @@ -94,9 +94,8 @@ static void print_pool_stat_by_handle(const char *label, size_t value = 0; /* Surround the {} placeholder with literal segments so CTL resolves * whichever pool handle the allocator hands back. */ - umf_result_t res = - umfCtlGet("umf.pool.by_handle.{}.stats.{}", &value, sizeof(value), - pool, stat_node); + umf_result_t res = umfCtlGet("umf.pool.by_handle.{}.stats.{}", &value, + sizeof(value), pool, stat_node); if (report_pool_stat_failure(label, required_trace_level, res)) { return; } diff --git a/src/ctl/ctl_defaults.h b/src/ctl/ctl_defaults.h index 2e57175d60..1da73692ee 100644 --- a/src/ctl/ctl_defaults.h +++ b/src/ctl/ctl_defaults.h @@ -9,8 +9,8 @@ #ifndef UMF_CTL_DEFAULTS_H #define UMF_CTL_DEFAULTS_H 1 -#include #include +#include #include diff --git a/src/pool/pool_disjoint.c b/src/pool/pool_disjoint.c index 1b1441efda..3fd5becb02 100644 --- a/src/pool/pool_disjoint.c +++ b/src/pool/pool_disjoint.c @@ -205,8 +205,7 @@ CTL_WRITE_HANDLER(capacity)(void *ctx, umf_ctl_query_source_t source, void *arg, return UMF_RESULT_ERROR_INVALID_ARGUMENT; } size_t value = *(size_t *)arg; - umf_result_t ret = - umfDisjointPoolParamsSetCapacity(&pool->params, value); + umf_result_t ret = umfDisjointPoolParamsSetCapacity(&pool->params, value); if (ret == UMF_RESULT_SUCCESS) { pool->params_overridden |= DP_OVERRIDE_CAPACITY; } @@ -279,8 +278,7 @@ CTL_WRITE_HANDLER(pool_trace)(void *ctx, umf_ctl_query_source_t source, return UMF_RESULT_ERROR_INVALID_ARGUMENT; } int value = *(int *)arg; - umf_result_t ret = - umfDisjointPoolParamsSetTrace(&pool->params, value); + umf_result_t ret = umfDisjointPoolParamsSetTrace(&pool->params, value); if (ret == UMF_RESULT_SUCCESS) { pool->params_overridden |= DP_OVERRIDE_POOL_TRACE; } @@ -557,8 +555,8 @@ umf_result_t disjoint_pool_ctl(void *hPool, umf_ctl_query_type_t queryType, va_list args) { utils_init_once(&ctl_initialized, initialize_disjoint_ctl); - return ctl_query(&disjoint_ctl_root, hPool, operationType, name, - queryType, arg, size, args); + return ctl_query(&disjoint_ctl_root, hPool, operationType, name, queryType, + arg, size, args); } // Temporary solution for disabling memory poisoning. This is needed because diff --git a/src/pool/pool_jemalloc.c b/src/pool/pool_jemalloc.c index 9090904572..50a245e05d 100644 --- a/src/pool/pool_jemalloc.c +++ b/src/pool/pool_jemalloc.c @@ -11,8 +11,8 @@ #include #include -#include "ctl/ctl_internal.h" #include "base_alloc_global.h" +#include "ctl/ctl_internal.h" #include "memory_provider_internal.h" #include "provider_tracking.h" #include "utils_common.h" @@ -616,9 +616,10 @@ static umf_result_t op_post_initialize(void *pool) { return UMF_RESULT_ERROR_MEMORY_PROVIDER_SPECIFIC; } -static umf_result_t CTL_RUNNABLE_HANDLER(post_initialize)( - void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, - umf_ctl_index_utlist_t *indexes) { +static umf_result_t +CTL_RUNNABLE_HANDLER(post_initialize)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { (void)source; (void)arg; (void)size; @@ -627,12 +628,11 @@ static umf_result_t CTL_RUNNABLE_HANDLER(post_initialize)( } static void initialize_jemalloc_ctl(void) { - jemalloc_ctl_root.root[jemalloc_ctl_root.first_free++] = - (umf_ctl_node_t){ - .name = "post_initialize", - .type = CTL_NODE_LEAF, - .runnable_cb = CTL_RUNNABLE_HANDLER(post_initialize), - }; + jemalloc_ctl_root.root[jemalloc_ctl_root.first_free++] = (umf_ctl_node_t){ + .name = "post_initialize", + .type = CTL_NODE_LEAF, + .runnable_cb = CTL_RUNNABLE_HANDLER(post_initialize), + }; } static umf_result_t op_ctl(void *pool, umf_ctl_query_source_t operationType, diff --git a/src/pool/pool_scalable.c b/src/pool/pool_scalable.c index 9dd7acf883..b5211615b8 100644 --- a/src/pool/pool_scalable.c +++ b/src/pool/pool_scalable.c @@ -469,9 +469,10 @@ static umf_result_t tbb_get_last_allocation_error(void *pool) { return TLS_last_allocation_error; } -static umf_result_t CTL_RUNNABLE_HANDLER(post_initialize)( - void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, - umf_ctl_index_utlist_t *indexes) { +static umf_result_t +CTL_RUNNABLE_HANDLER(post_initialize)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { (void)source; (void)arg; (void)size; diff --git a/src/provider/provider_cuda.c b/src/provider/provider_cuda.c index ec8037f1ec..a67f04b255 100644 --- a/src/provider/provider_cuda.c +++ b/src/provider/provider_cuda.c @@ -110,9 +110,10 @@ struct ctl cu_memory_ctl_root; static UTIL_ONCE_FLAG ctl_initialized = UTIL_ONCE_FLAG_INIT; static umf_result_t cu_memory_provider_post_initialize(void *provider); -static umf_result_t CTL_RUNNABLE_HANDLER(post_initialize)( - void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, - umf_ctl_index_utlist_t *indexes) { +static umf_result_t +CTL_RUNNABLE_HANDLER(post_initialize)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { (void)source; (void)arg; (void)size; @@ -159,12 +160,11 @@ static umf_result_t cu2umf_result(CUresult result) { static void initialize_cu_ctl(void) { CTL_REGISTER_MODULE(&cu_memory_ctl_root, stats); - cu_memory_ctl_root.root[cu_memory_ctl_root.first_free++] = - (umf_ctl_node_t){ - .name = "post_initialize", - .type = CTL_NODE_LEAF, - .runnable_cb = CTL_RUNNABLE_HANDLER(post_initialize), - }; + cu_memory_ctl_root.root[cu_memory_ctl_root.first_free++] = (umf_ctl_node_t){ + .name = "post_initialize", + .type = CTL_NODE_LEAF, + .runnable_cb = CTL_RUNNABLE_HANDLER(post_initialize), + }; } static void init_cu_global_state(void) { diff --git a/src/provider/provider_file_memory.c b/src/provider/provider_file_memory.c index 830740f206..d666a2fd71 100644 --- a/src/provider/provider_file_memory.c +++ b/src/provider/provider_file_memory.c @@ -161,9 +161,10 @@ struct ctl file_memory_ctl_root; static UTIL_ONCE_FLAG ctl_initialized = UTIL_ONCE_FLAG_INIT; static umf_result_t file_post_initialize(void *provider); -static umf_result_t CTL_RUNNABLE_HANDLER(post_initialize)( - void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, - umf_ctl_index_utlist_t *indexes) { +static umf_result_t +CTL_RUNNABLE_HANDLER(post_initialize)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { (void)source; (void)arg; (void)size; diff --git a/src/provider/provider_fixed_memory.c b/src/provider/provider_fixed_memory.c index a38c981c64..4e8f22e4fb 100644 --- a/src/provider/provider_fixed_memory.c +++ b/src/provider/provider_fixed_memory.c @@ -66,9 +66,10 @@ struct ctl fixed_memory_ctl_root; static UTIL_ONCE_FLAG ctl_initialized = UTIL_ONCE_FLAG_INIT; static umf_result_t fixed_post_initialize(void *provider); -static umf_result_t CTL_RUNNABLE_HANDLER(post_initialize)( - void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, - umf_ctl_index_utlist_t *indexes) { +static umf_result_t +CTL_RUNNABLE_HANDLER(post_initialize)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { (void)source; (void)arg; (void)size; diff --git a/test/common/fork_helpers.hpp b/test/common/fork_helpers.hpp index 42f50de6d1..70d52cecab 100644 --- a/test/common/fork_helpers.hpp +++ b/test/common/fork_helpers.hpp @@ -74,4 +74,3 @@ template void run_in_fork(Func &&func) { } } // namespace umf_test - diff --git a/test/ctl/ctl_api.cpp b/test/ctl/ctl_api.cpp index a3961297e5..9f11fe8ace 100644 --- a/test/ctl/ctl_api.cpp +++ b/test/ctl/ctl_api.cpp @@ -9,12 +9,8 @@ #include #include -#include -#include // For std::ref -#include #include #include -#include #include #include @@ -23,8 +19,6 @@ #include #include #include -#include -#include #include #include "../common/base.hpp" @@ -56,124 +50,6 @@ TEST_F(test, ctl_by_handle_os_provider) { umfMemoryProviderDestroy(hProvider); } -class Pool { - public: - Pool() : provider(NULL), pool(NULL) {} - - int instantiatePool(const umf_memory_pool_ops_t *pool_ops, - const void *pool_params, - umf_pool_create_flags_t flags = 0) { - freeResources(); - provider = create_memory_provider(); - if (provider == NULL) { - return -1; // Provider not supported - } - int ret = umfPoolCreate(pool_ops, provider, pool_params, flags, &pool); - if (ret != UMF_RESULT_SUCCESS) { - umfMemoryProviderDestroy(provider); - provider = NULL; - return -2; // Failed to create memory pool - } - return 0; // Success - } - - // Template specialization for different types of reference value - template T getReferenceValue() { - if constexpr (std::is_arithmetic_v) { - return 0xBAD; - } else if constexpr (std::is_same_v) { - return "0xBAD"; - } - } - - template - void validateQuery(umf_result_t (*ctlApiFunction)(const char *name, - void *arg, size_t, ...), - const char *name, T expectedValue, - umf_result_t expected) { - T value = getReferenceValue(); - umf_result_t ret; - char ret_buf[256] = {0}; - if constexpr (std::is_same_v) { - strncpy(ret_buf, value.c_str(), sizeof(ret_buf) - 1); - ret_buf[sizeof(ret_buf) - 1] = '\0'; // Ensure null-termination - ret = ctlApiFunction(name, (void *)ret_buf, sizeof(ret_buf), pool); - } else if constexpr (std::is_arithmetic_v) { - std::string value_str = std::to_string(value); - strncpy(ret_buf, value_str.c_str(), sizeof(ret_buf) - 1); - ret_buf[sizeof(ret_buf) - 1] = '\0'; // Ensure null-termination - ret = ctlApiFunction(name, (void *)ret_buf, sizeof(ret_buf), pool); - } else { - ret = ctlApiFunction(name, &value, sizeof(value), pool); - } - - ASSERT_EQ(ret, expected); - if (ret == UMF_RESULT_SUCCESS) { - ASSERT_EQ(ret_buf, expectedValue); - } - } - - template - void executeQuery(umf_result_t (*ctlApiFunction)(const char *name, - void *arg, size_t, ...), - const char *name, T value) { - size_t value_len; - if constexpr (std::is_arithmetic_v) { - value_len = sizeof(value); - } else if constexpr (std::is_same_v) { - value_len = strlen(value.c_str()); - } else if constexpr (std::is_same_v) { - value_len = strlen(value); - } else { - throw std::runtime_error("Unsupported type for value"); - } - umf_result_t ret = ctlApiFunction(name, (void *)value, value_len); - ASSERT_EQ(ret, UMF_RESULT_SUCCESS); - } - - void freeResources() { - if (pool) { - umfPoolDestroy(pool); - pool = NULL; - } - if (provider) { - umfMemoryProviderDestroy(provider); - provider = NULL; - } - if (data) { - free(data); - data = nullptr; - } - } - - umf_memory_provider_handle_t provider; - umf_memory_pool_handle_t pool; - void *data = nullptr; - - private: - // Create a memory provider - umf_memory_provider_handle_t create_memory_provider() { - const umf_memory_provider_ops_t *provider_ops = - umfFixedMemoryProviderOps(); - umf_fixed_memory_provider_params_handle_t params = NULL; - - data = malloc(1024 * 1024); - int ret = - umfFixedMemoryProviderParamsCreate(data, 1024 * 1024, ¶ms); - if (ret != UMF_RESULT_SUCCESS) { - return 0; - } - - ret = umfMemoryProviderCreate(provider_ops, params, &provider); - umfFixedMemoryProviderParamsDestroy(params); - if (ret != UMF_RESULT_SUCCESS) { - return 0; - } - - return provider; - } -}; - class CtlTest : public ::testing::Test { public: CtlTest() {} @@ -190,13 +66,13 @@ class CtlTest : public ::testing::Test { TEST_F(CtlTest, ctlDefault) { umf_test::run_in_fork([] { const char *arg = "default_name"; - ASSERT_EQ(umfCtlSet("umf.pool.default.some_pool.some_path", - (void *)arg, strlen(arg)), + ASSERT_EQ(umfCtlSet("umf.pool.default.some_pool.some_path", (void *)arg, + strlen(arg)), UMF_RESULT_SUCCESS); char output[64] = {1}; ASSERT_EQ(umfCtlGet("umf.pool.default.some_pool.some_path", - (void *)output, sizeof(output)), + (void *)output, sizeof(output)), UMF_RESULT_SUCCESS); ASSERT_STREQ(output, arg); }); @@ -247,8 +123,7 @@ TEST_F(CtlTest, ctlDefaultPoolMultithreaded) { threads.emplace_back([i, &totalRecords, &predefined_value, &name_prefix, max_size = max_size]() { for (size_t j = 0; j < max_size; j++) { - std::string name = - name_prefix + std::to_string(i * 10 + j); + std::string name = name_prefix + std::to_string(i * 10 + j); umfCtlSet(name.c_str(), (void *)predefined_value, strlen(predefined_value)); std::atomic_fetch_add(&totalRecords, 1UL); @@ -296,66 +171,6 @@ TEST_F(CtlTest, ctlDefaultPoolOverwrite) { }); } -TEST_F(CtlTest, DISABLED_ctlNameValidation) { - std::string name = "umf.pool.default.disjoint.name"; - std::string value = "new_disjoint_pool_name"; - umf_disjoint_pool_params_handle_t params = NULL; - - Pool p; - try { - p.executeQuery(umfCtlSet, name.c_str(), value.c_str()); - umf_result_t res = umfDisjointPoolParamsCreate(¶ms); - ASSERT_EQ(res, UMF_RESULT_SUCCESS); - - auto ret = p.instantiatePool(umfDisjointPoolOps(), params); - ASSERT_EQ(ret, 0); - - p.validateQuery(umfCtlGet, "umf.pool.by_handle.{}.disjoint.name", - std::move(value), UMF_RESULT_SUCCESS); - } catch (...) { - GTEST_FAIL() << "Unknown exception!"; - } - umfDisjointPoolParamsDestroy(params); - p.freeResources(); -} - -TEST_F(CtlTest, DISABLED_ctlSizeValidation) { - std::string name = "umf.pool.default.disjoint.name"; - std::string value = "1234567890"; - umf_disjoint_pool_params_handle_t params = NULL; - - Pool p; - try { - p.executeQuery(umfCtlSet, name.c_str(), value.c_str()); - umf_result_t res = umfDisjointPoolParamsCreate(¶ms); - ASSERT_EQ(res, UMF_RESULT_SUCCESS); - - auto ret = p.instantiatePool(umfDisjointPoolOps(), params); - ASSERT_EQ(ret, 0); - - char output[100] = {0}; - umfCtlGet("umf.pool.default.disjoint.name", output, sizeof(output)); - ASSERT_EQ(std::string(output), value); - - memset(output, 0, sizeof(output)); - umfCtlGet("umf.pool.default.disjoint.name", output, value.size() / 2); - auto half_value = value.substr(0, value.size() / 2); - ASSERT_EQ(half_value, std::string(output)); - } catch (...) { - GTEST_FAIL() << "Unknown exception!"; - } - umfDisjointPoolParamsDestroy(params); - p.freeResources(); -} - -TEST_F(CtlTest, DISABLED_ctlExecInvalidSize) { - std::string name = "umf.pool.default.disjoint.name"; - ASSERT_EQ(umfCtlSet(name.c_str(), (void *)"test_value", 0), - UMF_RESULT_ERROR_INVALID_ARGUMENT); - ASSERT_EQ(umfCtlSet(name.c_str(), NULL, 10), - UMF_RESULT_ERROR_INVALID_ARGUMENT); -} - TEST_F(CtlTest, ctlDefaultMultithreadedProvider) { umf_test::run_in_fork([] { std::vector threads; @@ -363,16 +178,15 @@ TEST_F(CtlTest, ctlDefaultMultithreadedProvider) { const char *predefined_value = "xyzzyx"; std::string name_prefix = "umf.provider.default.some_provider."; for (int i = 0; i < 8; i++) { - threads.emplace_back( - [i, &totalRecords, &predefined_value, &name_prefix]() { - for (int j = 0; j < 10; j++) { - std::string name = - name_prefix + std::to_string(i * 10 + j); - umfCtlSet(name.c_str(), (void *)predefined_value, - strlen(predefined_value)); - std::atomic_fetch_add(&totalRecords, 1); - } - }); + threads.emplace_back([i, &totalRecords, &predefined_value, + &name_prefix]() { + for (int j = 0; j < 10; j++) { + std::string name = name_prefix + std::to_string(i * 10 + j); + umfCtlSet(name.c_str(), (void *)predefined_value, + strlen(predefined_value)); + std::atomic_fetch_add(&totalRecords, 1); + } + }); } for (auto &thread : threads) { thread.join(); diff --git a/test/ctl/ctl_env_app.cpp b/test/ctl/ctl_env_app.cpp index 1d6b3c463b..bb068e361d 100644 --- a/test/ctl/ctl_env_app.cpp +++ b/test/ctl/ctl_env_app.cpp @@ -92,8 +92,8 @@ static int test_disjoint_pool(int argc, char **argv) { return 0; } - if (umfMemoryProviderCreate(umfOsMemoryProviderOps(), os_params, &provider) != - UMF_RESULT_SUCCESS) { + if (umfMemoryProviderCreate(umfOsMemoryProviderOps(), os_params, + &provider) != UMF_RESULT_SUCCESS) { std::cerr << "Failed to create provider" << std::endl; goto out; } @@ -108,8 +108,7 @@ static int test_disjoint_pool(int argc, char **argv) { const char *name = argv[i]; const char *value = argv[i + 1]; char path[128]; - snprintf(path, sizeof(path), - "umf.pool.by_handle.{}.params.%s", name); + snprintf(path, sizeof(path), "umf.pool.by_handle.{}.params.%s", name); if (strcmp(name, "pool_trace") == 0) { int got = 0; diff --git a/test/ctl/ctl_env_driver.cpp b/test/ctl/ctl_env_driver.cpp index 9b5521dec6..63a805abe5 100644 --- a/test/ctl/ctl_env_driver.cpp +++ b/test/ctl/ctl_env_driver.cpp @@ -129,19 +129,17 @@ TEST_F(test, ctl_env_logger) { TEST_F(test, ctl_env_disjoint_pool_env) { run_case( - {{"UMF_CONF", - "umf.pool.default.disjoint.params.slab_min_size=65536;" - "umf.pool.default.disjoint.params.capacity=4;" - "umf.pool.default.disjoint.params.min_bucket_size=8;" - "umf.pool.default.disjoint.params.pool_trace=0"}}, + {{"UMF_CONF", "umf.pool.default.disjoint.params.slab_min_size=65536;" + "umf.pool.default.disjoint.params.capacity=4;" + "umf.pool.default.disjoint.params.min_bucket_size=8;" + "umf.pool.default.disjoint.params.pool_trace=0"}}, {"disjoint_pool", "slab_min_size", "65536", "capacity", "4", "min_bucket_size", "8", "pool_trace", "0"}); } TEST_F(test, ctl_env_disjoint_pool_file) { std::string cfg = CTL_CONF_FILE_DIR "/ctl_env_disjoint_pool.cfg"; - run_case( - {{"UMF_CONF_FILE", cfg}}, - {"disjoint_pool", "slab_min_size", "65536", "capacity", "4", - "min_bucket_size", "8", "pool_trace", "0"}); + run_case({{"UMF_CONF_FILE", cfg}}, + {"disjoint_pool", "slab_min_size", "65536", "capacity", "4", + "min_bucket_size", "8", "pool_trace", "0"}); } diff --git a/test/pools/disjoint_pool.cpp b/test/pools/disjoint_pool.cpp index 049ce277e3..065b457de0 100644 --- a/test/pools/disjoint_pool.cpp +++ b/test/pools/disjoint_pool.cpp @@ -4,10 +4,10 @@ #include +#include #include #include #include -#include #include "pool.hpp" #include "pool/pool_disjoint_internal.h" @@ -70,9 +70,8 @@ TEST_F(test, internals) { EXPECT_EQ(res, UMF_RESULT_SUCCESS); va_list empty_args; get_test_va_list(&empty_args); - res = ops->ext_ctl((void *)pool, CTL_QUERY_PROGRAMMATIC, - "post_initialize", nullptr, 0, CTL_QUERY_RUNNABLE, - empty_args); + res = ops->ext_ctl((void *)pool, CTL_QUERY_PROGRAMMATIC, "post_initialize", + nullptr, 0, CTL_QUERY_RUNNABLE, empty_args); va_end(empty_args); EXPECT_EQ(res, UMF_RESULT_SUCCESS); EXPECT_NE(pool, nullptr); @@ -327,9 +326,8 @@ TEST_F(test, disjointPoolTrim) { va_list empty_args; get_test_va_list(&empty_args); - res = ops->ext_ctl((void *)pool, CTL_QUERY_PROGRAMMATIC, - "post_initialize", nullptr, 0, CTL_QUERY_RUNNABLE, - empty_args); + res = ops->ext_ctl((void *)pool, CTL_QUERY_PROGRAMMATIC, "post_initialize", + nullptr, 0, CTL_QUERY_RUNNABLE, empty_args); va_end(empty_args); EXPECT_EQ(res, UMF_RESULT_SUCCESS); diff --git a/test/pools/disjoint_pool_ctl.cpp b/test/pools/disjoint_pool_ctl.cpp index 04b9216967..d9a5ce99b6 100644 --- a/test/pools/disjoint_pool_ctl.cpp +++ b/test/pools/disjoint_pool_ctl.cpp @@ -11,16 +11,14 @@ #include #include -#include -#include #include #include #include "base.hpp" #include "common/fork_helpers.hpp" +#include "ctl/ctl_internal.h" #include "utils_assert.h" #include "utils_log.h" -#include "ctl/ctl_internal.h" using umf_test::test; using namespace umf_test; @@ -95,73 +93,6 @@ class ProviderWrapper { void *m_params; }; -TEST_F(test, DISABLED_disjointCtlName) { - umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; - if (UMF_RESULT_ERROR_NOT_SUPPORTED == - umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) { - GTEST_SKIP() << "OS memory provider is not supported!"; - } - - ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), - os_memory_provider_params); - if (providerWrapper.get() == NULL) { - GTEST_SKIP() << "OS memory provider is not supported!"; - } - - // Set default name - const char *val = "disjoint_new_name"; - ASSERT_SUCCESS( - umfCtlSet("umf.pool.default.disjoint.name", (void *)val, strlen(val))); - - umf_disjoint_pool_params_handle_t params = nullptr; - ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); - PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), - params); - - // Check that the default name is correctly set - const char *name = NULL; - ASSERT_SUCCESS(umfPoolGetName(poolWrapper.get(), &name)); - ASSERT_STREQ(name, val); - - // Clean up - ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); - ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); -} - -TEST_F(test, DISABLED_disjointCtlChangeNameTwice) { - umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; - if (UMF_RESULT_ERROR_NOT_SUPPORTED == - umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) { - GTEST_SKIP() << "OS memory provider is not supported!"; - } - ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), - os_memory_provider_params); - if (providerWrapper.get() == NULL) { - GTEST_SKIP() << "OS memory provider is not supported!"; - } - // Set default name - const char *val = "disjoint_new_name"; - const char *val2 = "another_name"; - ASSERT_SUCCESS( - umfCtlSet("umf.pool.default.disjoint.name", (void *)val, strlen(val))); - ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.name", (void *)val2, - strlen(val2))); - - umf_disjoint_pool_params_handle_t params = nullptr; - ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); - PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), - params); - - // Check that the default name is correctly set - const char *name = NULL; - ASSERT_SUCCESS(umfPoolGetName(poolWrapper.get(), &name)); - ASSERT_STREQ(name, val2); - - // Clean up - ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); - ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); -} - TEST_F(test, disjointCtlUsedMemory) { umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; if (UMF_RESULT_ERROR_NOT_SUPPORTED == @@ -343,29 +274,24 @@ TEST_F(test, disjointCtlGetParams) { size_t got_size = 0; ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.slab_min_size", - &got_size, sizeof(got_size), - poolWrapper.get())); + &got_size, sizeof(got_size), poolWrapper.get())); EXPECT_EQ(got_size, slab_min_size); - ASSERT_SUCCESS(umfCtlGet( - "umf.pool.by_handle.{}.params.max_poolable_size", &got_size, - sizeof(got_size), poolWrapper.get())); + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.max_poolable_size", + &got_size, sizeof(got_size), poolWrapper.get())); EXPECT_EQ(got_size, max_poolable_size); - ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.capacity", - &got_size, sizeof(got_size), - poolWrapper.get())); + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.capacity", &got_size, + sizeof(got_size), poolWrapper.get())); EXPECT_EQ(got_size, capacity); ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.min_bucket_size", - &got_size, sizeof(got_size), - poolWrapper.get())); + &got_size, sizeof(got_size), poolWrapper.get())); EXPECT_EQ(got_size, min_bucket_size); int got_trace = 0; ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.pool_trace", - &got_trace, sizeof(got_trace), - poolWrapper.get())); + &got_trace, sizeof(got_trace), poolWrapper.get())); EXPECT_EQ(got_trace, pool_trace); ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); @@ -375,8 +301,7 @@ TEST_F(test, disjointCtlGetParams) { TEST_F(test, disjointCtlDefaultsOverride) { umf_test::run_in_fork([] { umf_os_memory_provider_params_handle_t raw_os_params = nullptr; - umf_result_t res = - umfOsMemoryProviderParamsCreate(&raw_os_params); + umf_result_t res = umfOsMemoryProviderParamsCreate(&raw_os_params); if (res == UMF_RESULT_ERROR_NOT_SUPPORTED) { GTEST_SKIP() << "OS memory provider is not supported!"; } @@ -397,24 +322,21 @@ TEST_F(test, disjointCtlDefaultsOverride) { ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.capacity", &default_capacity, sizeof(default_capacity)), UMF_RESULT_SUCCESS); - ASSERT_EQ( - umfCtlSet("umf.pool.default.disjoint.params.min_bucket_size", - &default_min_bucket, sizeof(default_min_bucket)), - UMF_RESULT_SUCCESS); + ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.min_bucket_size", + &default_min_bucket, sizeof(default_min_bucket)), + UMF_RESULT_SUCCESS); size_t override_capacity = 2; size_t override_min_bucket = 32; ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.capacity", &override_capacity, sizeof(override_capacity)), UMF_RESULT_SUCCESS); - ASSERT_EQ( - umfCtlSet("umf.pool.default.disjoint.params.min_bucket_size", - &override_min_bucket, sizeof(override_min_bucket)), - UMF_RESULT_SUCCESS); + ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.min_bucket_size", + &override_min_bucket, sizeof(override_min_bucket)), + UMF_RESULT_SUCCESS); umf_disjoint_pool_params_handle_t raw_params = nullptr; - ASSERT_EQ(umfDisjointPoolParamsCreate(&raw_params), - UMF_RESULT_SUCCESS); + ASSERT_EQ(umfDisjointPoolParamsCreate(&raw_params), UMF_RESULT_SUCCESS); std::unique_ptr params(raw_params, &umfDisjointPoolParamsDestroy); @@ -441,10 +363,9 @@ TEST_F(test, disjointCtlDefaultsOverride) { ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.capacity", &default_capacity, sizeof(default_capacity)), UMF_RESULT_SUCCESS); - ASSERT_EQ( - umfCtlSet("umf.pool.default.disjoint.params.min_bucket_size", - &default_min_bucket, sizeof(default_min_bucket)), - UMF_RESULT_SUCCESS); + ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.min_bucket_size", + &default_min_bucket, sizeof(default_min_bucket)), + UMF_RESULT_SUCCESS); }); } From eff4dbd000aa11194168c683064e6fd935ba28e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Plewa?= Date: Thu, 18 Sep 2025 12:04:07 +0200 Subject: [PATCH 09/11] use actual pool name instead of default name --- docs/config/ctl.rst | 63 +++++++------- docs/config/index.rst | 4 +- docs/config/spelling_exceptions.txt | 3 + examples/ctl/ctl_example.c | 8 +- examples/ctl/ctl_statistics_example.c | 2 +- src/memory_pool.c | 11 +-- src/memory_provider.c | 23 +++--- test/CMakeLists.txt | 2 +- test/ctl/ctl_api.cpp | 115 +++++++++++++++++++++++++- test/pools/disjoint_pool_ctl.cpp | 92 +++++++++++++++++++++ test/utils/cpp_helpers.hpp | 1 + 11 files changed, 267 insertions(+), 57 deletions(-) diff --git a/docs/config/ctl.rst b/docs/config/ctl.rst index 4840f6eb2f..9988b640c8 100644 --- a/docs/config/ctl.rst +++ b/docs/config/ctl.rst @@ -37,7 +37,7 @@ Every ``{}`` in the path is replaced with an extra argument passed to the CTL function. Alternative addressing methods are described below. Pool / Provider addressing -========================== +============================ Two addressing schemes are provided: ``by_handle`` and ``by_name``. Each pool and provider has a unique handle and an optional user-defined name that can be @@ -56,7 +56,7 @@ appending an index after the name:: The number of pools with a given name can be obtained with the ``count`` node. Wildcards -========= +=========== A ``{}`` in the path acts as a wildcard and is replaced with successive arguments of ``umfCtlGet``, ``umfCtlSet`` or ``umfCtlExec``. Wildcards can @@ -74,18 +74,21 @@ replace any node, not only handles. For example:: Ensure that the types of wildcard arguments match the expected node types. Default addressing -================== +=================== ``umf.provider.default`` and ``umf.pool.default`` store default values applied to providers or pools created after the defaults are set. For example:: - const char *name = "custom"; - umfCtlSet("umf.pool.default.disjoint.name", (void *)name, strlen(name)+1); + size_t capacity = 16; + umfCtlSet("umf.pool.default.disjoint.params.capacity", &capacity, + sizeof(capacity)); -Every subsequently created disjoint pool will use ``custom`` as its name unless -overridden by explicit parameters. Defaults may be supplied programmatically or -via configuration and are saved internally and applied during initalization of -a matching provider or pool. +Every subsequently created disjoint pool will use ``16`` as its starting +capacity overriding it's creation parameters. Defaults are keyed by the +name returned from the provider or pool ``get_name`` callback, so if pool/provider +has custom name it must be addressed explicitly. Defaults may be supplied programmatically +or via environment variable and are saved internally and applied during initialization of a +matching provider or pool. Environment variables ===================== @@ -771,7 +774,7 @@ these entries. >>>>>>> da1363dd (better documentation) Reading this reference ----------------------- +======================= Parameter annotations describe the values stored in the node rather than the pointer types passed to ``umfCtlGet``/``umfCtlSet``/``umfCtlExec``. The @@ -841,7 +844,7 @@ Logger nodes :param path: Receives the currently selected sink on reads. On writes, pass ``"stdout"`` or ``"stderr"`` to redirect to standard streams, a - NUL-terminated file path to append to a file, or ``NULL`` to disable + NULL-terminated file path to append to a file, or ``NULL`` to disable logging altogether. :type path: ``char *`` when reading, ``const char *`` when writing @@ -862,14 +865,14 @@ Provider entries are organized beneath ``umf.provider``. Use ``umf.provider.by_handle.{provider}`` with a :type:`umf_memory_provider_handle_t` argument to reach a specific provider. Providers can also be addressed by name through ``umf.provider.by_name.{provider}``; -append ``.{index}`` to address specyfic provider when multiple providers share the same label. -Defaults for future providers live under ``umf.provider.default.{provider_name}``, -where ``{provider_name}`` matches the canonical provider identifier (``OS``, -``FILE``, ``DEVDAX``, ``FIXED``, ``CUDA`` or ``LEVEL_ZERO``). Values written to -the default tree are saved until a matching provider is created and applied -during provider initialization. Defaults can be supplied programmatically or -through configuration strings. The entries below list only the suffix of each -node; prefix them with the appropriate ``umf.provider`` path. +append ``.{index}`` to address specific provider when multiple providers share the same label. +Defaults for future providers reside under ``umf.provider.default.{provider}`` and track the +name returned by each provider's ``get_name`` implementation. Providers have their +default names (``OS``, ``FILE``, ``DEVDAX``, ``FIXED``, ``CUDA`` or ``LEVEL_ZERO``), +unless their name was changed during creation, those renamed providers must be addressed explicitly. +Defaults can be written via ``umf.provider.default.`` either programmatically or through +configuration strings. The entries below list only the suffix of each node; +prefix them with the appropriate ``umf.provider`` path. Common provider statistics -------------------------- @@ -971,11 +974,13 @@ Pool nodes Pool entries mirror the provider layout. ``umf.pool.by_handle.{pool}`` accepts a :type:`umf_memory_pool_handle_t`, while ``umf.pool.by_name.{pool}`` addresses pools by name with an optional ``.{index}`` suffix when names are reused. -Defaults for future pools reside under ``umf.pool.default.{pool}``, where -canonical names include ``disjoint``, ``scalable`` and ``jemalloc``. Defaults -can be written via ``umf.pool.default.`` either programmatically or -through configuration strings. The entries below list only the suffix of each -node; prefix them with the appropriate ``umf.pool`` path. +Defaults for future pools reside under ``umf.pool.default.{pool}`` and track the +name returned by each pool's ``get_name`` implementation. Pools that keep their +default names (``disjoint``, ``scalable`` and ``jemalloc``) continue to match +those entries, while renamed pools must be addressed explicitly. Defaults can be +written via ``umf.pool.default.`` either programmatically or through +configuration strings. The entries below list only the suffix of each node; +prefix them with the appropriate ``umf.pool`` path. Common pool statistics -------------------------- @@ -1001,7 +1006,7 @@ Disjoint pool (``disjoint``) provider. :type bytes: ``size_t`` - **Access:** read-write. (write is only avaiable through defaults) + **Access:** read-write. (write is only available through defaults) **Defaults / Env:** supported. Governs how much memory the pool grabs in each slab. Lower values reduce @@ -1014,7 +1019,7 @@ Disjoint pool (``disjoint``) cached by the pool. :type bytes: ``size_t`` - **Access:** read-write. (write is only avaiable through defaults) + **Access:** read-write. (write is only available through defaults) **Defaults / Env:** supported. Sets the cut-off for pooling allocations. Requests larger than this value are @@ -1027,7 +1032,7 @@ Disjoint pool (``disjoint``) may retain. :type count: ``size_t`` - **Access:** read-write. (write is only avaiable through defaults) + **Access:** read-write. (write is only available through defaults) **Defaults / Env:** supported. Caps the pool's cached slabs per bucket to limit memory retention. Shrinking @@ -1040,7 +1045,7 @@ Disjoint pool (``disjoint``) serve. :type bytes: ``size_t`` - **Access:** read-write. (write is only avaiable through defaults) + **Access:** read-write. (write is only available through defaults) **Defaults / Env:** supported. Controls the smallest chunk size kept in the pool, which in turn affects the @@ -1052,7 +1057,7 @@ Disjoint pool (``disjoint``) :param level: Receives or supplies the tracing level for the pool. :type level: ``int`` (``0`` disables tracing) - **Access:** read-write. (write is only avaiable through defaults) + **Access:** read-write. (write is only available through defaults) **Defaults / Env:** supported. Controls the disjoint pool's tracing features. ``0`` disables tracing. diff --git a/docs/config/index.rst b/docs/config/index.rst index 3bd20828ff..4447dcd74b 100644 --- a/docs/config/index.rst +++ b/docs/config/index.rst @@ -1,4 +1,4 @@ -.. Copyright 2023 Intel Corporation +.. Copyright 2023-2025 Intel Corporation Intel Unified Memory Framework documentation Intel Unified Memory Framework documentation @@ -10,4 +10,6 @@ Intel Unified Memory Framework documentation introduction.rst examples.rst api.rst + ctl.rst glossary.rst + diff --git a/docs/config/spelling_exceptions.txt b/docs/config/spelling_exceptions.txt index 3385a22160..f329b7c6f3 100644 --- a/docs/config/spelling_exceptions.txt +++ b/docs/config/spelling_exceptions.txt @@ -50,11 +50,13 @@ partList pid poolable preallocated +programmatically propertyId providential providerIpcData ptr realloc +runnables Scalable scalable stdout @@ -82,5 +84,6 @@ umfMemspaceUserFilter umfMemspaceMemtargetAdd unfreed usm +wildcarded zA ze diff --git a/examples/ctl/ctl_example.c b/examples/ctl/ctl_example.c index 8d1ed9b5b0..252acf3571 100644 --- a/examples/ctl/ctl_example.c +++ b/examples/ctl/ctl_example.c @@ -204,7 +204,7 @@ static umf_result_t ctl_ctl(void *provider, umf_ctl_query_source_t source, return UMF_RESULT_SUCCESS; } if (queryType == CTL_QUERY_RUNNABLE && - strcmp(formatted, "substraction") == 0) { + strcmp(formatted, "subtraction") == 0) { if (p->m) { p->c = (p->a - p->b) % p->m; } else { @@ -300,9 +300,9 @@ int main(void) { // Execute subtraction and fetch the result res = - umfCtlExec("umf.provider.by_handle.{}.substraction", NULL, 0, provider); + umfCtlExec("umf.provider.by_handle.{}.subtraction", NULL, 0, provider); if (res != UMF_RESULT_SUCCESS) { - fprintf(stderr, "Failed to execute substraction!\n"); + fprintf(stderr, "Failed to execute subtraction!\n"); goto out; } res = umfCtlGet("umf.provider.by_handle.{}.c", &result, sizeof(result), @@ -311,7 +311,7 @@ int main(void) { fprintf(stderr, "Failed to get c!\n"); goto out; } - printf("substraction result: %d\n", result); + printf("subtraction result: %d\n", result); out: umfMemoryProviderDestroy(provider); diff --git a/examples/ctl/ctl_statistics_example.c b/examples/ctl/ctl_statistics_example.c index da0874bed9..5ddce02966 100644 --- a/examples/ctl/ctl_statistics_example.c +++ b/examples/ctl/ctl_statistics_example.c @@ -201,7 +201,7 @@ int main(void) { goto cleanup; } - /* set name of the pool so we can easly ref it by using name */ + /* set name of the pool so we can easily ref it by using name */ res = umfDisjointPoolParamsSetName(disjoint_params, pool_name); if (res != UMF_RESULT_SUCCESS) { fprintf(stderr, "Failed to name disjoint pool (error %d)\n", (int)res); diff --git a/src/memory_pool.c b/src/memory_pool.c index a4663e58d6..2161d5d45c 100644 --- a/src/memory_pool.c +++ b/src/memory_pool.c @@ -469,15 +469,14 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, goto err_pool_init; } - // Set default property "name" to pool if exists const char *pname = NULL; - ret = ops->get_name(NULL, &pname); + ret = ops->get_name(pool->pool_priv, &pname); if (ret != UMF_RESULT_SUCCESS) { LOG_ERR("Failed to get pool name"); goto err_pool_init; } assert(pname != NULL); - + utils_warn_invalid_name("Memory pool", pname); ctl_default_apply(pool_default_list, pname, ops->ext_ctl, pool->pool_priv); ret = umfPoolPostInitialize(&pool->ops, pool->pool_priv); @@ -489,12 +488,6 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, *hPool = pool; pools_by_name_add(pool); - const char *pool_name = NULL; - if (ops->get_name(pool->pool_priv, &pool_name) == UMF_RESULT_SUCCESS && - pool_name) { - utils_warn_invalid_name("Memory pool", pool_name); - } - LOG_INFO("Memory pool created: %p", (void *)pool); return UMF_RESULT_SUCCESS; diff --git a/src/memory_provider.c b/src/memory_provider.c index f02830ac1a..2685ec5316 100644 --- a/src/memory_provider.c +++ b/src/memory_provider.c @@ -345,10 +345,20 @@ umf_result_t umfMemoryProviderCreate(const umf_memory_provider_ops_t *ops, utils_init_once(&mem_provider_ctl_initialized, provider_ctl_init); const char *pname = NULL; - if (provider->ops.get_name(NULL, &pname) == UMF_RESULT_SUCCESS && pname) { - ctl_default_apply(provider_default_list, pname, provider->ops.ext_ctl, - provider->provider_priv); + + ret = provider->ops.get_name(provider->provider_priv, &pname); + if (ret != UMF_RESULT_SUCCESS) { + LOG_ERR("Failed to get pool name"); + umf_ba_global_free(provider); + return ret; } + + assert(pname != NULL); + utils_warn_invalid_name("Memory provider", pname); + + ctl_default_apply(provider_default_list, pname, provider->ops.ext_ctl, + provider->provider_priv); + ret = umfProviderPostInitialize(&provider->ops, provider_priv); if (ret != UMF_RESULT_SUCCESS && ret != UMF_RESULT_ERROR_INVALID_CTL_PATH) { LOG_ERR("Failed to post-initialize provider"); @@ -358,13 +368,6 @@ umf_result_t umfMemoryProviderCreate(const umf_memory_provider_ops_t *ops, *hProvider = provider; - const char *provider_name = NULL; - if (provider->ops.get_name(provider->provider_priv, &provider_name) == - UMF_RESULT_SUCCESS && - provider_name) { - utils_warn_invalid_name("Memory provider", provider_name); - } - return UMF_RESULT_SUCCESS; } diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index fae28faba5..c19719faab 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -214,7 +214,7 @@ add_umf_test( add_umf_test( NAME ctl_api SRCS ctl/ctl_api.cpp - LIBS ${UMF_UTILS_FOR_TEST}) + LIBS ${UMF_UTILS_FOR_TEST} ${UMF_BA_FOR_TEST}) add_umf_test( NAME utils_common diff --git a/test/ctl/ctl_api.cpp b/test/ctl/ctl_api.cpp index 9f11fe8ace..47280d3acf 100644 --- a/test/ctl/ctl_api.cpp +++ b/test/ctl/ctl_api.cpp @@ -23,6 +23,7 @@ #include "../common/base.hpp" #include "../common/fork_helpers.hpp" +#include "../common/provider.hpp" #include "gtest/gtest.h" using namespace umf_test; @@ -61,7 +62,7 @@ class CtlTest : public ::testing::Test { private: }; -// setting default modyfies global state - +// setting default modifies global state - // tests doing so should run in fork to ensure correct test isolation TEST_F(CtlTest, ctlDefault) { umf_test::run_in_fork([] { @@ -147,6 +148,116 @@ TEST_F(CtlTest, ctlDefaultPoolMultithreaded) { }); } +struct ctl_provider_params { + const char *name; + int initial_value; +}; + +class ctl_provider : public umf_test::provider_base_t { + public: + ctl_provider() : name_ptr_(kDefaultName), stored_value_(0) {} + + umf_result_t initialize(const ctl_provider_params *params) noexcept { + if (!params) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + stored_value_ = params->initial_value; + if (params->name) { + name_storage_ = params->name; + name_ptr_ = name_storage_.c_str(); + } else { + name_ptr_ = kDefaultName; + } + + return UMF_RESULT_SUCCESS; + } + + umf_result_t get_name(const char **name) noexcept { + if (!name) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + *name = name_ptr_; + return UMF_RESULT_SUCCESS; + } + + umf_result_t ext_ctl(umf_ctl_query_source_t, const char *path, void *arg, + size_t size, umf_ctl_query_type_t queryType, + va_list) noexcept { + if (std::strcmp(path, "params.value") != 0) { + return UMF_RESULT_ERROR_INVALID_CTL_PATH; + } + + if (!arg || size != sizeof(int)) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (queryType == CTL_QUERY_WRITE) { + stored_value_ = *static_cast(arg); + return UMF_RESULT_SUCCESS; + } + + if (queryType == CTL_QUERY_READ) { + *static_cast(arg) = stored_value_; + return UMF_RESULT_SUCCESS; + } + + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + private: + static constexpr const char *kDefaultName = "mock"; + std::string name_storage_; + const char *name_ptr_; + int stored_value_; +}; + +TEST_F(CtlTest, ctlProviderDefaultsCustomName) { + umf_test::run_in_fork([] { + static auto provider_ops = + umf_test::providerMakeCOps(); + + int canonical_default = 21; + ASSERT_EQ(umfCtlSet("umf.provider.default.mock.params.value", + &canonical_default, sizeof(canonical_default)), + UMF_RESULT_SUCCESS); + + const std::string custom_name = "custom_provider"; + int custom_default = 37; + const std::string custom_path = + "umf.provider.default." + custom_name + ".params.value"; + ASSERT_EQ(umfCtlSet(custom_path.c_str(), &custom_default, + sizeof(custom_default)), + UMF_RESULT_SUCCESS); + + ctl_provider_params custom_params{custom_name.c_str(), 0}; + umf_memory_provider_handle_t custom_handle = nullptr; + ASSERT_EQ(umfMemoryProviderCreate(&provider_ops, &custom_params, + &custom_handle), + UMF_RESULT_SUCCESS); + + int value = 0; + ASSERT_EQ(umfCtlGet("umf.provider.by_handle.{}.params.value", &value, + sizeof(value), custom_handle), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, custom_default); + ASSERT_EQ(umfMemoryProviderDestroy(custom_handle), UMF_RESULT_SUCCESS); + + ctl_provider_params canonical_params{nullptr, 7}; + umf_memory_provider_handle_t canonical_handle = nullptr; + ASSERT_EQ(umfMemoryProviderCreate(&provider_ops, &canonical_params, + &canonical_handle), + UMF_RESULT_SUCCESS); + + ASSERT_EQ(umfCtlGet("umf.provider.by_handle.{}.params.value", &value, + sizeof(value), canonical_handle), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, canonical_default); + ASSERT_EQ(umfMemoryProviderDestroy(canonical_handle), + UMF_RESULT_SUCCESS); + }); +} + /* Case: overwriting an existing value for pool defaults * This test sets a default value and then overwrites it with a new value */ TEST_F(CtlTest, ctlDefaultPoolOverwrite) { @@ -184,7 +295,7 @@ TEST_F(CtlTest, ctlDefaultMultithreadedProvider) { std::string name = name_prefix + std::to_string(i * 10 + j); umfCtlSet(name.c_str(), (void *)predefined_value, strlen(predefined_value)); - std::atomic_fetch_add(&totalRecords, 1); + std::atomic_fetch_add(&totalRecords, (size_t)1); } }); } diff --git a/test/pools/disjoint_pool_ctl.cpp b/test/pools/disjoint_pool_ctl.cpp index d9a5ce99b6..31bc975e3d 100644 --- a/test/pools/disjoint_pool_ctl.cpp +++ b/test/pools/disjoint_pool_ctl.cpp @@ -12,6 +12,7 @@ #include #include +#include #include #include "base.hpp" @@ -369,6 +370,97 @@ TEST_F(test, disjointCtlDefaultsOverride) { }); } +TEST_F(test, disjointCtlDefaultsCustomName) { + umf_test::run_in_fork([] { + umf_os_memory_provider_params_handle_t raw_os_params = nullptr; + umf_result_t res = umfOsMemoryProviderParamsCreate(&raw_os_params); + if (res == UMF_RESULT_ERROR_NOT_SUPPORTED) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + + std::unique_ptr + os_params(raw_os_params, &umfOsMemoryProviderParamsDestroy); + + ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), + os_params.get()); + if (providerWrapper.get() == nullptr) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + const std::string custom_name = "custom_disjoint_pool"; + + size_t canonical_capacity = 9; + size_t canonical_min_bucket = 32; + ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.capacity", + &canonical_capacity, sizeof(canonical_capacity)), + UMF_RESULT_SUCCESS); + ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.min_bucket_size", + &canonical_min_bucket, + sizeof(canonical_min_bucket)), + UMF_RESULT_SUCCESS); + + size_t custom_capacity = 11; + size_t custom_min_bucket = 64; + const std::string custom_capacity_path = + "umf.pool.default." + custom_name + ".params.capacity"; + const std::string custom_min_bucket_path = + "umf.pool.default." + custom_name + ".params.min_bucket_size"; + ASSERT_EQ(umfCtlSet(custom_capacity_path.c_str(), &custom_capacity, + sizeof(custom_capacity)), + UMF_RESULT_SUCCESS); + ASSERT_EQ(umfCtlSet(custom_min_bucket_path.c_str(), &custom_min_bucket, + sizeof(custom_min_bucket)), + UMF_RESULT_SUCCESS); + + umf_disjoint_pool_params_handle_t raw_custom_params = nullptr; + ASSERT_EQ(umfDisjointPoolParamsCreate(&raw_custom_params), + UMF_RESULT_SUCCESS); + std::unique_ptr + custom_params(raw_custom_params, &umfDisjointPoolParamsDestroy); + + ASSERT_EQ(umfDisjointPoolParamsSetName(custom_params.get(), + custom_name.c_str()), + UMF_RESULT_SUCCESS); + + PoolWrapper customPool(providerWrapper.get(), umfDisjointPoolOps(), + custom_params.get()); + ASSERT_NE(customPool.get(), nullptr); + + size_t value = 0; + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.params.capacity", &value, + sizeof(value), customPool.get()), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, custom_capacity); + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.params.min_bucket_size", + &value, sizeof(value), customPool.get()), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, custom_min_bucket); + + umf_disjoint_pool_params_handle_t raw_default_params = nullptr; + ASSERT_EQ(umfDisjointPoolParamsCreate(&raw_default_params), + UMF_RESULT_SUCCESS); + std::unique_ptr + default_params(raw_default_params, &umfDisjointPoolParamsDestroy); + + PoolWrapper defaultPool(providerWrapper.get(), umfDisjointPoolOps(), + default_params.get()); + ASSERT_NE(defaultPool.get(), nullptr); + + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.params.capacity", &value, + sizeof(value), defaultPool.get()), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, canonical_capacity); + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.params.min_bucket_size", + &value, sizeof(value), defaultPool.get()), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, canonical_min_bucket); + }); +} + TEST_F(test, disjointCtlMemoryMetricsConsistency) { umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; if (UMF_RESULT_ERROR_NOT_SUPPORTED == diff --git a/test/utils/cpp_helpers.hpp b/test/utils/cpp_helpers.hpp index dc784728cf..1f8ffb4e7a 100644 --- a/test/utils/cpp_helpers.hpp +++ b/test/utils/cpp_helpers.hpp @@ -118,6 +118,7 @@ template constexpr umf_memory_provider_ops_t providerOpsBase() { UMF_RESULT_ERROR_UNKNOWN); UMF_ASSIGN_OP(ops, T, ext_get_allocation_properties_size, UMF_RESULT_ERROR_UNKNOWN); + UMF_ASSIGN_OP(ops, T, ext_ctl, UMF_RESULT_ERROR_INVALID_CTL_PATH); return ops; } } // namespace detail From 15e7433a929137e216faf9c3de2123def6e3f54d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Plewa?= Date: Thu, 18 Sep 2025 16:58:21 +0200 Subject: [PATCH 10/11] Documentation improvements --- docs/config/ctl.rst | 670 +------------------------------------------- 1 file changed, 8 insertions(+), 662 deletions(-) diff --git a/docs/config/ctl.rst b/docs/config/ctl.rst index 9988b640c8..e6533392e1 100644 --- a/docs/config/ctl.rst +++ b/docs/config/ctl.rst @@ -108,670 +108,15 @@ runtime-only parameters. CTL nodes ============ -Unless noted otherwise, provider and pool nodes accept either ``by_handle`` or -``by_name`` addressing. Replace the ``{provider}``, ``{pool}`` or ``{id}`` -placeholder with the wildcard argument supplied to :c:func:`umfCtlGet`, -:c:func:`umfCtlSet` or :c:func:`umfCtlExec`. +The CTL hierarchy is rooted at ``umf``. The next component selects one of the +major subsystems: -Logger nodes ------------- - - -.. py:function:: umf.logger.timestamp(enabled) - - :param enabled: Receives (or provides) ``0`` when timestamps are disabled and - ``1`` when they are emitted. - :type enabled: ``int *`` - - **Access:** read-write. - **Default addressing:** not supported. - **Environment:** configurable via ``UMF_CONF`` or ``UMF_CONF_FILE`` entries. - - Toggle timestamp prefixes in future log records. The flag is treated as a - boolean value and only affects messages emitted after the change. - -.. py:function:: umf.logger.pid(enabled) - - :param enabled: Receives or supplies ``0`` to omit the process identifier and - ``1`` to include it in every message header. - :type enabled: ``int *`` - - **Access:** read-write. - **Default addressing:** not supported. - **Environment:** configurable via ``UMF_CONF`` or ``UMF_CONF_FILE`` entries. - - Controls whether each log line is annotated with the current process id. - Setting non-boolean values results in coercion to zero/non-zero; the change - applies to subsequent messages only. - -.. py:function:: umf.logger.level(level) - - :param level: Receives or supplies the minimum severity that will be written. - :type level: ``utils_log_level_t *`` (``LOG_DEBUG`` .. ``LOG_FATAL``) - - **Access:** read-write. - **Default addressing:** not supported. - **Environment:** configurable via ``UMF_CONF`` or ``UMF_CONF_FILE`` entries. - - Sets the filtering threshold for the logger. Records below the configured - level are dropped. Writes that fall outside the enumerated range are - rejected. - -.. py:function:: umf.logger.flush_level(level) - - :param level: Receives or supplies the severity at which the logger forces a - flush of the output stream. - :type level: ``utils_log_level_t *`` (``LOG_DEBUG`` .. ``LOG_FATAL``) - - **Access:** read-write. - **Default addressing:** not supported. - **Environment:** configurable via ``UMF_CONF`` or ``UMF_CONF_FILE`` entries. - - Adjusts when buffered log data is synchronously flushed. Writes outside the - valid severity range fail, and lowering the level can incur additional flush - overhead for future messages. - -.. py:function:: umf.logger.output(path) - - :param path: Receives the currently selected sink on reads. On writes, pass - ``"stdout"`` or ``"stderr"`` to redirect to standard streams, a - NUL-terminated file path to append to a file, or ``NULL`` to disable - logging altogether. - :type path: ``char *`` when reading, ``const char *`` when writing - - **Access:** read-write. - **Default addressing:** not supported. - **Environment:** configurable via ``UMF_CONF`` or ``UMF_CONF_FILE`` entries. - - Controls the destination for log messages. The logger closes any previously - opened file when switching targets. Providing a path longer than 256 bytes or - pointing to a file that cannot be opened causes the write to fail. - -Provider nodes --------------- - -The following entries are available for providers that register CTL support -through ``umf.provider``. ``{provider}`` accepts either a handle or a name with -an optional numeric disambiguator. None of these nodes can be set via default -addressing and they are not configurable through environment variables. - -.. py:function:: umf.provider.by_handle.stats.allocated_memory(provider, bytes) - - **CTL path:** ``umf.provider.by_handle.{provider}.stats.allocated_memory`` - (or ``umf.provider.by_name.{provider}.stats.allocated_memory``). - - :param provider: Handle (or name-based selector) of the provider being - queried. When using ``by_name`` addressing, append ``.{index}`` if the - name is not unique. - :type provider: ``umf_memory_provider_handle_t`` when using handles, or - ``const char *`` for names. - :param bytes: Receives the total number of bytes currently outstanding. - :type bytes: ``size_t *`` - - **Access:** read-only. - **Default addressing:** not supported. - **Environment:** not supported. - - Returns the amount of memory the provider has allocated but not yet freed. - The counter updates atomically as the provider serves requests and is not - resettable. - -.. py:function:: umf.provider.by_handle.stats.peak_memory(provider, bytes) - - **CTL path:** ``umf.provider.by_handle.{provider}.stats.peak_memory`` (or - ``umf.provider.by_name.{provider}.stats.peak_memory``). - - :param provider: Handle or name-based selector of the provider being - queried. Disambiguate duplicate names with an index when using - ``by_name``. - :type provider: ``umf_memory_provider_handle_t`` when using handles, or - ``const char *`` for names. - :param bytes: Receives the highest observed outstanding allocation size since - the last reset. - :type bytes: ``size_t *`` - - **Access:** read-only. - **Default addressing:** not supported. - **Environment:** not supported. - - Reports the historical maximum allocation footprint of the provider. - Combine with :py:func:`umf.provider.by_handle.stats.peak_memory.reset` to - discard stale peaks when desired. - -.. py:function:: umf.provider.by_handle.stats.peak_memory.reset(provider) - - **CTL path:** ``umf.provider.by_handle.{provider}.stats.peak_memory.reset`` - (or ``umf.provider.by_name.{provider}.stats.peak_memory.reset``). - - :param provider: Handle or name-based selector of the provider being - updated. - :type provider: ``umf_memory_provider_handle_t`` when using handles, or - ``const char *`` for names. +* ``umf.logger`` – logging configuration and diagnostics. +* ``umf.provider`` – provider-specific parameters, statistics and commands. +* ``umf.pool`` – pool-specific parameters, statistics and inspection helpers. - **Access:** execute (via :c:func:`umfCtlExec`). - **Default addressing:** not supported. - **Environment:** not supported. - - Resets the peak allocation counter to the provider's current outstanding - usage. The operation does not affect other statistics and can be invoked at - any time. - -.. py:function:: umf.provider.by_handle.stats.reset(provider) - - **CTL path:** ``umf.provider.by_handle.{provider}.stats.reset`` (or - ``umf.provider.by_name.{provider}.stats.reset``). - - :param provider: Handle or name-based selector of the provider being - updated. - :type provider: ``umf_memory_provider_handle_t`` when using handles, or - ``const char *`` for names. - - **Access:** execute (via :c:func:`umfCtlExec`). - **Default addressing:** not supported. - **Environment:** not supported. - - Clears all provider statistics, including the peak counter. Use this to start - a new measurement interval; the call has no effect on in-flight allocations. - -.. py:function:: umf.provider.by_handle.params.ipc_enabled(provider, enabled) - - **CTL path:** ``umf.provider.by_handle.{provider}.params.ipc_enabled`` (or - ``umf.provider.by_name.{provider}.params.ipc_enabled``). - - :param provider: Handle or name-based selector of the provider being - queried. - :type provider: ``umf_memory_provider_handle_t`` when using handles, or - ``const char *`` for names. - :param enabled: Receives ``0`` when inter-process sharing is disabled and a - non-zero value when it is active. - :type enabled: ``int *`` - - **Access:** read-only. - **Default addressing:** not supported. - **Environment:** not supported. - - Indicates whether the OS memory provider has been initialized with IPC - support. The value is fixed at provider creation time and cannot be modified - afterwards. - -Disjoint pool parameter nodes ------------------------------ - -The following entries apply to disjoint pools. They can be addressed via -``umf.pool.default.disjoint`` to set defaults for future pools. Environment -configuration may only set these defaults because there is no way to provide a -runtime handle through ``UMF_CONF``. - -.. py:function:: umf.pool.by_handle.params.slab_min_size(pool, bytes) - - **CTL path:** ``umf.pool.by_handle.{pool}.params.slab_min_size`` (or - ``umf.pool.by_name.{pool}.params.slab_min_size``). - - :param pool: Handle or name-based selector of the disjoint pool. Append - ``.{index}`` after the name when multiple pools share it. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param bytes: Receives or supplies the minimum slab size requested from the - provider. - :type bytes: ``size_t *`` - - **Access:** read-write. - **Default addressing:** ``umf.pool.default.disjoint.params.slab_min_size``. - **Environment:** defaults configurable via ``UMF_CONF`` or - ``UMF_CONF_FILE``. - - Governs how much memory the pool grabs in each slab. Lower values reduce - per-allocation slack while higher values amortize provider overhead. Writes - are accepted only before the pool completes its ``post_initialize`` phase. - -.. py:function:: umf.pool.by_handle.params.max_poolable_size(pool, bytes) - - **CTL path:** ``umf.pool.by_handle.{pool}.params.max_poolable_size`` (or - ``umf.pool.by_name.{pool}.params.max_poolable_size``). - - :param pool: Handle or name-based selector of the disjoint pool. Append - ``.{index}`` when disambiguating duplicate names. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param bytes: Receives or supplies the largest allocation size that is still - cached by the pool. - :type bytes: ``size_t *`` - - **Access:** read-write. - **Default addressing:** ``umf.pool.default.disjoint.params.max_poolable_size``. - **Environment:** defaults configurable via ``UMF_CONF`` or - ``UMF_CONF_FILE``. - - Sets the cut-off for pooling allocations. Requests larger than this value are - delegated directly to the provider. Updates must occur before - ``post_initialize`` completes. - -.. py:function:: umf.pool.by_handle.params.capacity(pool, count) - - **CTL path:** ``umf.pool.by_handle.{pool}.params.capacity`` (or - ``umf.pool.by_name.{pool}.params.capacity``). - - :param pool: Handle or name-based selector of the disjoint pool. Append - ``.{index}`` when necessary to disambiguate names. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param count: Receives or supplies the maximum number of slabs each bucket - may retain. - :type count: ``size_t *`` - - **Access:** read-write. - **Default addressing:** ``umf.pool.default.disjoint.params.capacity``. - **Environment:** defaults configurable via ``UMF_CONF`` or - ``UMF_CONF_FILE``. - - Caps the pool's cached slabs per bucket to limit memory retention. Shrinking - the capacity may cause future frees to return slabs to the provider. Writes - are rejected after ``post_initialize``. - -.. py:function:: umf.pool.by_handle.params.min_bucket_size(pool, bytes) - - **CTL path:** ``umf.pool.by_handle.{pool}.params.min_bucket_size`` (or - ``umf.pool.by_name.{pool}.params.min_bucket_size``). - - :param pool: Handle or name-based selector of the disjoint pool. Append - ``.{index}`` to the name when needed. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param bytes: Receives or supplies the minimal allocation size a bucket may - serve. - :type bytes: ``size_t *`` - - **Access:** read-write. - **Default addressing:** ``umf.pool.default.disjoint.params.min_bucket_size``. - **Environment:** defaults configurable via ``UMF_CONF`` or - ``UMF_CONF_FILE``. - - Controls the smallest chunk size kept in the pool, which in turn affects the - number of buckets. Writes are validated for size correctness and disallowed - after ``post_initialize``. - -.. py:function:: umf.pool.by_handle.params.pool_trace(pool, level) - - **CTL path:** ``umf.pool.by_handle.{pool}.params.pool_trace`` (or - ``umf.pool.by_name.{pool}.params.pool_trace``). - - :param pool: Handle or name-based selector of the disjoint pool. Append - ``.{index}`` for ambiguous names. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param level: Receives or supplies a non-zero value to enable tracing of - counters, or ``0`` to disable it. - :type level: ``int *`` - - **Access:** read-write. - **Default addressing:** ``umf.pool.default.disjoint.params.pool_trace``. - **Environment:** defaults configurable via ``UMF_CONF`` or - ``UMF_CONF_FILE``. - - Enables collection of per-bucket and aggregated allocation counters. Tracing - must be activated before ``post_initialize``; attempting to change it later - fails with ``UMF_RESULT_ERROR_NOT_SUPPORTED``. - -Disjoint pool statistics ------------------------- - -Statistics are read-only and cannot be set through defaults or environment -variables. Aggregate counters that rely on tracing require -``params.pool_trace`` to be non-zero. - -.. py:function:: umf.pool.by_handle.stats.used_memory(pool, bytes) - - **CTL path:** ``umf.pool.by_handle.{pool}.stats.used_memory`` (or - ``umf.pool.by_name.{pool}.stats.used_memory``). - - :param pool: Handle or name-based selector of the disjoint pool. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param bytes: Receives the amount of memory that is presently allocated by - the pool's clients. - :type bytes: ``size_t *`` - - **Access:** read-only. - **Default addressing:** not supported. - **Environment:** not supported. - - Reports the memory currently in use across all slabs, regardless of tracing - status. - -.. py:function:: umf.pool.by_handle.stats.reserved_memory(pool, bytes) - - **CTL path:** ``umf.pool.by_handle.{pool}.stats.reserved_memory`` (or - ``umf.pool.by_name.{pool}.stats.reserved_memory``). - - :param pool: Handle or name-based selector of the disjoint pool. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param bytes: Receives the total number of bytes reserved in slabs that the - pool owns. - :type bytes: ``size_t *`` - - **Access:** read-only. - **Default addressing:** not supported. - **Environment:** not supported. - - Returns the total memory reserved by the pool, including free capacity held - in slabs. - -.. py:function:: umf.pool.by_handle.stats.alloc_num(pool, count) - - **CTL path:** ``umf.pool.by_handle.{pool}.stats.alloc_num`` (or - ``umf.pool.by_name.{pool}.stats.alloc_num``). - - :param pool: Handle or name-based selector of the disjoint pool. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param count: Receives the number of allocations the pool has issued. - :type count: ``size_t *`` - - **Access:** read-only. - **Default addressing:** not supported. - **Environment:** not supported. - - Requires tracing. Counts every allocation handed out by the pool since the - pool was created. - -.. py:function:: umf.pool.by_handle.stats.alloc_pool_num(pool, count) - - **CTL path:** ``umf.pool.by_handle.{pool}.stats.alloc_pool_num`` (or - ``umf.pool.by_name.{pool}.stats.alloc_pool_num``). - - :param pool: Handle or name-based selector of the disjoint pool. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param count: Receives the number of allocations served directly from cached - slabs. - :type count: ``size_t *`` - - **Access:** read-only. - **Default addressing:** not supported. - **Environment:** not supported. - - Requires tracing. Counts how many allocations were fulfilled from cached - memory without visiting the provider. - -.. py:function:: umf.pool.by_handle.stats.free_num(pool, count) - - **CTL path:** ``umf.pool.by_handle.{pool}.stats.free_num`` (or - ``umf.pool.by_name.{pool}.stats.free_num``). - - :param pool: Handle or name-based selector of the disjoint pool. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param count: Receives the total number of frees processed by the pool. - :type count: ``size_t *`` - - **Access:** read-only. - **Default addressing:** not supported. - **Environment:** not supported. - - Requires tracing. Tracks the number of frees observed by the pool since its - creation. - -.. py:function:: umf.pool.by_handle.stats.curr_slabs_in_use(pool, count) - - **CTL path:** ``umf.pool.by_handle.{pool}.stats.curr_slabs_in_use`` (or - ``umf.pool.by_name.{pool}.stats.curr_slabs_in_use``). - - :param pool: Handle or name-based selector of the disjoint pool. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param count: Receives the current number of slabs actively serving - allocations. - :type count: ``size_t *`` - - **Access:** read-only. - **Default addressing:** not supported. - **Environment:** not supported. - - Requires tracing. Returns the number of slabs that currently have live - allocations. - -.. py:function:: umf.pool.by_handle.stats.curr_slabs_in_pool(pool, count) - - **CTL path:** ``umf.pool.by_handle.{pool}.stats.curr_slabs_in_pool`` (or - ``umf.pool.by_name.{pool}.stats.curr_slabs_in_pool``). - - :param pool: Handle or name-based selector of the disjoint pool. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param count: Receives how many slabs are cached and ready for reuse. - :type count: ``size_t *`` - - **Access:** read-only. - **Default addressing:** not supported. - **Environment:** not supported. - - Requires tracing. Reports the slabs retained in the pool for future reuse. - -.. py:function:: umf.pool.by_handle.stats.max_slabs_in_use(pool, count) - - **CTL path:** ``umf.pool.by_handle.{pool}.stats.max_slabs_in_use`` (or - ``umf.pool.by_name.{pool}.stats.max_slabs_in_use``). - - :param pool: Handle or name-based selector of the disjoint pool. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param count: Receives the historical maximum of simultaneously used slabs. - :type count: ``size_t *`` - - **Access:** read-only. - **Default addressing:** not supported. - **Environment:** not supported. - - Requires tracing. Provides the peak number of slabs that were in use at the - same time. - -.. py:function:: umf.pool.by_handle.stats.max_slabs_in_pool(pool, count) - - **CTL path:** ``umf.pool.by_handle.{pool}.stats.max_slabs_in_pool`` (or - ``umf.pool.by_name.{pool}.stats.max_slabs_in_pool``). - - :param pool: Handle or name-based selector of the disjoint pool. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param count: Receives the largest number of slabs retained in the cache. - :type count: ``size_t *`` - - **Access:** read-only. - **Default addressing:** not supported. - **Environment:** not supported. - - Requires tracing. Returns the highest number of slabs ever retained in the - cache simultaneously. - -Disjoint pool bucket inspection -------------------------------- - -Bucket-specific nodes take an additional ``{id}`` placeholder that must be -supplied as a ``size_t`` argument. Environment configuration cannot target -these entries. - -.. py:function:: umf.pool.by_handle.buckets.count(pool, count) - - **CTL path:** ``umf.pool.by_handle.{pool}.buckets.count`` (or - ``umf.pool.by_name.{pool}.buckets.count``). - - :param pool: Handle or name-based selector of the disjoint pool. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param count: Receives the number of distinct bucket sizes. - :type count: ``size_t *`` - - **Access:** read-only. - **Default addressing:** not supported. - **Environment:** not supported. - - Returns the total number of bucket sizes maintained by the pool. Call the - node without a bucket index; providing one results in - ``UMF_RESULT_ERROR_INVALID_ARGUMENT``. - -.. py:function:: umf.pool.by_handle.buckets.size(pool, bucket, bytes) - - **CTL path:** ``umf.pool.by_handle.{pool}.buckets.{id}.size`` (or - ``umf.pool.by_name.{pool}.buckets.{id}.size``). - - :param pool: Handle or name-based selector of the disjoint pool. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param bucket: Zero-based bucket index. - :type bucket: ``size_t`` - :param bytes: Receives the allocation size that the bucket serves. - :type bytes: ``size_t *`` - - **Access:** read-only. - **Default addressing:** not supported. - **Environment:** not supported. - - Reports the allocation size serviced by the selected bucket. This value is - available even when tracing is disabled. - -.. py:function:: umf.pool.by_handle.buckets.stats.alloc_num(pool, bucket, count) - - **CTL path:** ``umf.pool.by_handle.{pool}.buckets.{id}.stats.alloc_num`` (or - ``umf.pool.by_name.{pool}.buckets.{id}.stats.alloc_num``). - - :param pool: Handle or name-based selector of the disjoint pool. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param bucket: Zero-based bucket index. - :type bucket: ``size_t`` - :param count: Receives the number of allocations performed by this bucket. - :type count: ``size_t *`` - - **Access:** read-only. - **Default addressing:** not supported. - **Environment:** not supported. - - Requires tracing. Counts every allocation that passed through the specified - bucket. - -.. py:function:: umf.pool.by_handle.buckets.stats.alloc_pool_num(pool, bucket, count) - - **CTL path:** ``umf.pool.by_handle.{pool}.buckets.{id}.stats.alloc_pool_num`` - (or ``umf.pool.by_name.{pool}.buckets.{id}.stats.alloc_pool_num``). - - :param pool: Handle or name-based selector of the disjoint pool. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param bucket: Zero-based bucket index. - :type bucket: ``size_t`` - :param count: Receives the number of allocations satisfied from cached slabs - in this bucket. - :type count: ``size_t *`` - - **Access:** read-only. - **Default addressing:** not supported. - **Environment:** not supported. - - Requires tracing. Counts how many allocations were served entirely from the - bucket's cached slabs. - -.. py:function:: umf.pool.by_handle.buckets.stats.free_num(pool, bucket, count) - - **CTL path:** ``umf.pool.by_handle.{pool}.buckets.{id}.stats.free_num`` (or - ``umf.pool.by_name.{pool}.buckets.{id}.stats.free_num``). - - :param pool: Handle or name-based selector of the disjoint pool. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param bucket: Zero-based bucket index. - :type bucket: ``size_t`` - :param count: Receives the number of frees recorded for this bucket. - :type count: ``size_t *`` - - **Access:** read-only. - **Default addressing:** not supported. - **Environment:** not supported. - - Requires tracing. Tracks the number of frees observed for the bucket. - -.. py:function:: umf.pool.by_handle.buckets.stats.curr_slabs_in_use(pool, bucket, count) - - **CTL path:** ``umf.pool.by_handle.{pool}.buckets.{id}.stats.curr_slabs_in_use`` - (or ``umf.pool.by_name.{pool}.buckets.{id}.stats.curr_slabs_in_use``). - - :param pool: Handle or name-based selector of the disjoint pool. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param bucket: Zero-based bucket index. - :type bucket: ``size_t`` - :param count: Receives how many slabs for this bucket currently serve - allocations. - :type count: ``size_t *`` - - **Access:** read-only. - **Default addressing:** not supported. - **Environment:** not supported. - - Requires tracing. Returns the current slab utilization for the bucket. - -.. py:function:: umf.pool.by_handle.buckets.stats.curr_slabs_in_pool(pool, bucket, count) - - **CTL path:** ``umf.pool.by_handle.{pool}.buckets.{id}.stats.curr_slabs_in_pool`` - (or ``umf.pool.by_name.{pool}.buckets.{id}.stats.curr_slabs_in_pool``). - - :param pool: Handle or name-based selector of the disjoint pool. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param bucket: Zero-based bucket index. - :type bucket: ``size_t`` - :param count: Receives the number of slabs cached and immediately available - for this bucket. - :type count: ``size_t *`` - - **Access:** read-only. - **Default addressing:** not supported. - **Environment:** not supported. - - Requires tracing. Reports cached slabs that the bucket can reuse without a - provider call. - -.. py:function:: umf.pool.by_handle.buckets.stats.max_slabs_in_use(pool, bucket, count) - - **CTL path:** ``umf.pool.by_handle.{pool}.buckets.{id}.stats.max_slabs_in_use`` - (or ``umf.pool.by_name.{pool}.buckets.{id}.stats.max_slabs_in_use``). - - :param pool: Handle or name-based selector of the disjoint pool. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param bucket: Zero-based bucket index. - :type bucket: ``size_t`` - :param count: Receives the peak number of slabs in use for this bucket. - :type count: ``size_t *`` - - **Access:** read-only. - **Default addressing:** not supported. - **Environment:** not supported. - - Requires tracing. Provides the historical maximum of slabs simultaneously in - use for the bucket. - -.. py:function:: umf.pool.by_handle.buckets.stats.max_slabs_in_pool(pool, bucket, count) - - **CTL path:** ``umf.pool.by_handle.{pool}.buckets.{id}.stats.max_slabs_in_pool`` - (or ``umf.pool.by_name.{pool}.buckets.{id}.stats.max_slabs_in_pool``). - - :param pool: Handle or name-based selector of the disjoint pool. - :type pool: ``umf_memory_pool_handle_t`` when using handles, or - ``const char *`` for names. - :param bucket: Zero-based bucket index. - :type bucket: ``size_t`` - :param count: Receives the largest number of slabs retained in the bucket's - cache. - :type count: ``size_t *`` - - **Access:** read-only. - **Default addressing:** not supported. - **Environment:** not supported. - - Requires tracing. Returns the maximum number of slabs cached for later use by - the bucket. ->>>>>>> da1363dd (better documentation) +Within each subsystem the path continues with an addressing scheme followed by +the module or leaf of interest. Reading this reference ======================= @@ -1303,6 +648,7 @@ Disjoint pool (``disjoint``) Requires tracing with ``pool_trace`` of at least ``1``. Returns the maximum number of slabs cached for later use by the bucket. + Scalable pool (``scalable``) ------------------------------ From e2ba1417ca5e63c9bb2f98150a110c9e8cab30a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Plewa?= Date: Fri, 19 Sep 2025 14:37:15 +0200 Subject: [PATCH 11/11] rename examples and mark ctl as experimental in those --- README.md | 3 ++ docs/config/ctl.rst | 18 +++++---- docs/config/examples.rst | 37 ++++++++++++++++--- examples/CMakeLists.txt | 4 +- examples/README.md | 21 ++++++++++- examples/ctl/CMakeLists.txt | 32 ++++++++-------- .../ctl/{ctl_statistics_example.c => ctl.c} | 10 ++--- examples/ctl/{ctl_example.c => custom_ctl.c} | 11 +++--- src/ctl/ctl_defaults.c | 5 +-- src/pool/pool_disjoint.c | 17 +++++---- test/common/fork_helpers.hpp | 2 +- 11 files changed, 104 insertions(+), 56 deletions(-) rename examples/ctl/{ctl_statistics_example.c => ctl.c} (98%) rename examples/ctl/{ctl_example.c => custom_ctl.c} (98%) diff --git a/README.md b/README.md index e73b8b7279..308e29abea 100644 --- a/README.md +++ b/README.md @@ -21,6 +21,9 @@ documentation, which includes the code of the [basic example](https://github.com/oneapi-src/unified-memory-framework/blob/main/examples/basic/basic.c). There are also more advanced examples that allocate USM memory from the [Level Zero device](examples/level_zero_shared_memory/level_zero_shared_memory.c) using the Level Zero API and UMF Level Zero memory provider and [CUDA device](examples/cuda_shared_memory/cuda_shared_memory.c) using the CUDA API and UMF CUDA memory provider. +UMF's experimental CTL API is showcased in the [CTL example](examples/ctl/ctl.c), +which explores provider and pool statistics, and in the [custom CTL example](examples/ctl/custom_ctl.c), which wires CTL support into a custom memory provider. These examples rely on experimental headers which may change in future releases. + ## Build ### Requirements diff --git a/docs/config/ctl.rst b/docs/config/ctl.rst index e6533392e1..b62602d5ae 100644 --- a/docs/config/ctl.rst +++ b/docs/config/ctl.rst @@ -8,6 +8,9 @@ configuration options, statistics and auxiliary APIs. CTL entries can also be set through environment variables or a configuration file, allowing adjustment of UMF behavior without modifying the program. +.. note:: + The CTL API is experimental and may change in future releases. + Main concepts ============= @@ -84,7 +87,7 @@ to providers or pools created after the defaults are set. For example:: sizeof(capacity)); Every subsequently created disjoint pool will use ``16`` as its starting -capacity overriding it's creation parameters. Defaults are keyed by the +capacity overriding its creation parameters. Defaults are keyed by the name returned from the provider or pool ``get_name`` callback, so if pool/provider has custom name it must be addressed explicitly. Defaults may be supplied programmatically or via environment variable and are saved internally and applied during initialization of a @@ -99,7 +102,7 @@ Multiple entries are separated with semicolons, e.g.:: UMF_CONF="umf.logger.output=stdout;umf.logger.level=0" -CTL options available through environment variables are limited—you can only +CTL options available through environment variables are limited — you can only target default nodes when addressing pools. This means that configuration strings can influence values consumed during pool creation but cannot alter runtime-only parameters. @@ -118,7 +121,7 @@ major subsystems: Within each subsystem the path continues with an addressing scheme followed by the module or leaf of interest. -Reading this reference +Reading below sections ======================= Parameter annotations describe the values stored in the node rather than the @@ -211,8 +214,8 @@ Provider entries are organized beneath ``umf.provider``. Use :type:`umf_memory_provider_handle_t` argument to reach a specific provider. Providers can also be addressed by name through ``umf.provider.by_name.{provider}``; append ``.{index}`` to address specific provider when multiple providers share the same label. -Defaults for future providers reside under ``umf.provider.default.{provider}`` and track the -name returned by each provider's ``get_name`` implementation. Providers have their +Defaults for future providers reside under ``umf.provider.default.{provider}`` where ``{provider}`` is +a name returned by each provider's ``get_name`` implementation. Providers have their default names (``OS``, ``FILE``, ``DEVDAX``, ``FIXED``, ``CUDA`` or ``LEVEL_ZERO``), unless their name was changed during creation, those renamed providers must be addressed explicitly. Defaults can be written via ``umf.provider.default.`` either programmatically or through @@ -663,17 +666,16 @@ The jemalloc-backed pool currently exposes only the common statistics nodes. Adding CTL support to custom providers and pools ================================================ -The :file:`examples/ctl/ctl_example.c` source demonstrates how a minimal +The :file:`examples/ctl/custom_ctl.c` source demonstrates how a minimal provider can expose configuration entries, statistics and runnables through the CTL API. To add similar support to your own provider or pool you must implement an ``ext_ctl`` callback – parse incoming CTL paths and handle -`CTL_QUERY_READ``, ``CTL_QUERY_WRITE`` and ``CTL_QUERY_RUNNABLE`` requests. +``CTL_QUERY_READ``, ``CTL_QUERY_WRITE`` and ``CTL_QUERY_RUNNABLE`` requests. The callback receives a ``umf_ctl_query_source_t`` indicating whether the query came from the application or a configuration source. Programmatic calls pass typed binary data, while configuration sources deliver strings that must be parsed. Wildcards (``{}``) may appear in paths and are supplied as additional arguments. -new entries. During initialization UMF will execute ``post_initialize`` on the callback after applying any queued defaults, allowing the provider or pool to finalize its diff --git a/docs/config/examples.rst b/docs/config/examples.rst index 6595b63e78..28b6ab02e7 100644 --- a/docs/config/examples.rst +++ b/docs/config/examples.rst @@ -147,19 +147,23 @@ in the UMF repository. TODO -CTL statistics example +CTL example ============================================================================== -You can find the full example code in the `examples/ctl/ctl_statistics_example.c`_ file -in the UMF repository. +.. note:: + The CTL API is experimental and may change in future releases. + +You can find the full example code in the `examples/ctl/ctl.c`_ file in the +UMF repository. The sample configures an OS memory provider and a disjoint pool, reuses the provider's canonical ``OS`` selector obtained at runtime, assigns a custom pool name, and then mixes ``by_handle`` and ``by_name`` selectors to explore CTL statistics. Wildcard nodes are used to choose provider counters, build a four-segment ``{}.{}`` chain for the named pool, reset the peak tracker, and -drill into per-bucket disjoint pool telemetry. The program prints hints on ``stderr`` -explaining which tracing level is necessary when a statistic is unavailable. +drill into per-bucket disjoint pool telemetry. The program prints hints on +``stderr`` explaining which tracing level is necessary when a statistic is +unavailable. Build and run the example with:: @@ -176,6 +180,26 @@ Tracing level ``1`` enables slab usage counters, level ``2`` adds allocation and free statistics, and level ``3`` additionally emits verbose log messages from the pool implementation. +Custom CTL example +============================================================================== + +You can find the full example code in the `examples/ctl/custom_ctl.c`_ file in +the UMF repository. The program implements a minimal memory provider with CTL +hooks that accept configuration values, execute runnables, and expose provider +state through the experimental API. It highlights converting wildcard segments +to ``printf``-style format strings and reading integers supplied via +configuration defaults. + +Build and run the example with:: + + cmake -B build + cmake --build build + ./build/examples/umf_example_ctl + +Optionally supply a modulus via configuration defaults:: + + UMF_CONF="umf.provider.default.ctl.m=10" ./build/examples/umf_example_ctl + IPC example with Level Zero Memory Provider ============================================================================== The full code of the example is in the `examples/ipc_level_zero/ipc_level_zero.c`_ file in the UMF repository. @@ -260,7 +284,8 @@ the :any:`umfCloseIPCHandle` function is called. .. _examples/cuda_shared_memory/cuda_shared_memory.c: https://github.com/oneapi-src/unified-memory-framework/blob/main/examples/cuda_shared_memory/cuda_shared_memory.c .. _examples/ipc_level_zero/ipc_level_zero.c: https://github.com/oneapi-src/unified-memory-framework/blob/main/examples/ipc_level_zero/ipc_level_zero.c .. _examples/custom_file_provider/custom_file_provider.c: https://github.com/oneapi-src/unified-memory-framework/blob/main/examples/custom_file_provider/custom_file_provider.c -.. _examples/ctl/ctl_statistics_example.c: https://github.com/oneapi-src/unified-memory-framework/blob/main/examples/ctl/ctl_statistics_example.c +.. _examples/ctl/ctl.c: https://github.com/oneapi-src/unified-memory-framework/blob/main/examples/ctl/ctl.c +.. _examples/ctl/custom_ctl.c: https://github.com/oneapi-src/unified-memory-framework/blob/main/examples/ctl/custom_ctl.c .. _examples/memspace: https://github.com/oneapi-src/unified-memory-framework/blob/main/examples/memspace/ .. _README: https://github.com/oneapi-src/unified-memory-framework/blob/main/README.md#memory-pool-managers .. _umf/ipc.h: https://github.com/oneapi-src/unified-memory-framework/blob/main/include/umf/ipc.h diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 1a4da085f5..c79e950ed2 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -277,7 +277,7 @@ if(LINUX) add_umf_executable( NAME ${EXAMPLE_NAME} - SRCS ctl/ctl_example.c + SRCS ctl/custom_ctl.c LIBS umf ${UMF_HWLOC_NAME}) target_include_directories( @@ -295,7 +295,7 @@ if(LINUX) add_umf_executable( NAME ${EXAMPLE_NAME} - SRCS ctl/ctl_statistics_example.c + SRCS ctl/ctl.c LIBS umf ${UMF_HWLOC_NAME}) target_include_directories( diff --git a/examples/README.md b/examples/README.md index 92da754344..4f16e19067 100644 --- a/examples/README.md +++ b/examples/README.md @@ -69,11 +69,28 @@ processes: a producer and a consumer that communicate in the following way ## CTL example +> **Note**: The CTL API is experimental and may change in future releases. + +This example configures an OS memory provider and disjoint pool, then queries +statistics through CTL using both ``by_handle`` and ``by_name`` selectors. It +demonstrates wildcard nodes to mix selectors, reset peak counters, and read +disjoint-pool bucket telemetry. Run it with: + + ./umf_example_ctl_statistics + +Tracing for detailed disjoint pool counters can be enabled through: + + UMF_CONF="umf.pool.default.disjoint.params.pool_trace=2" ./umf_example_ctl_statistics + +## Custom CTL example + +> **Note**: The CTL API is experimental and may change in future releases. + This example demonstrates how to add CTL support to a custom memory provider. It sets variables ``a`` and ``b`` through CTL, plus it allows -for the modulus ``m`` loaded from the environment or a configuration file. +for the modulus ``m`` to be loaded from the environment or a configuration file. Addition and subtraction operations return results modulo ``m`` and the result ``c`` can be retrieved using the CTL API. For example, to set the -modulus through an environment variable run:: +modulus through an environment variable run: UMF_CONF="umf.provider.default.ctl.m=10" ./umf_example_ctl diff --git a/examples/ctl/CMakeLists.txt b/examples/ctl/CMakeLists.txt index 2f78ef4e1d..26fee9e83d 100644 --- a/examples/ctl/CMakeLists.txt +++ b/examples/ctl/CMakeLists.txt @@ -23,59 +23,59 @@ endif() # build the example set(EXAMPLE_NAME umf_example_ctl) -add_executable(${ EXAMPLE_NAME} ctl_example.c) -target_include_directories(${ EXAMPLE_NAME} PRIVATE ${ LIBUMF_INCLUDE_DIRS}) +add_executable(${EXAMPLE_NAME} custom_ctl.c) +target_include_directories(${EXAMPLE_NAME} PRIVATE ${LIBUMF_INCLUDE_DIRS}) target_link_directories( ${ EXAMPLE_NAME} PRIVATE ${ LIBHWLOC_LIBRARY_DIRS}) -target_link_libraries(${ EXAMPLE_NAME} PRIVATE ${LIBUMF_LIBRARIES} hwloc) +target_link_libraries(${EXAMPLE_NAME} PRIVATE ${LIBUMF_LIBRARIES} hwloc) add_test( NAME ${EXAMPLE_NAME} - COMMAND ${ EXAMPLE_NAME} - WORKING_DIRECTORY ${ CMAKE_CURRENT_BINARY_DIR}) + COMMAND ${EXAMPLE_NAME} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) set_tests_properties(${EXAMPLE_NAME} PROPERTIES LABELS "example-standalone") if(LINUX) # set LD_LIBRARY_PATH set_property( - TEST ${ EXAMPLE_NAME} + TEST ${EXAMPLE_NAME} PROPERTY ENVIRONMENT_MODIFICATION "LD_LIBRARY_PATH=path_list_append:" - "${LIBUMF_LIBRARY_DIRS};LD_" - "LIBRARY_PATH=path_list_append:${" + "${LIBUMF_LIBRARY_DIRS};" + "LD_LIBRARY_PATH=path_list_append:${" "LIBHWLOC_LIBRARY_DIRS}") endif() set(EXAMPLE_NAME umf_example_ctl_statistics) -add_executable(${ EXAMPLE_NAME} ctl_statistics_example.c) -target_include_directories(${ EXAMPLE_NAME} PRIVATE ${ LIBUMF_INCLUDE_DIRS}) +add_executable(${EXAMPLE_NAME} ctl.c) +target_include_directories(${EXAMPLE_NAME} PRIVATE ${LIBUMF_INCLUDE_DIRS}) target_link_directories( ${ EXAMPLE_NAME} PRIVATE ${ LIBHWLOC_LIBRARY_DIRS}) -target_link_libraries(${ EXAMPLE_NAME} PRIVATE ${LIBUMF_LIBRARIES} hwloc) +target_link_libraries(${EXAMPLE_NAME} PRIVATE ${LIBUMF_LIBRARIES} hwloc) add_test( NAME ${EXAMPLE_NAME} - COMMAND ${ EXAMPLE_NAME} - WORKING_DIRECTORY ${ CMAKE_CURRENT_BINARY_DIR}) + COMMAND ${EXAMPLE_NAME} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) set_tests_properties(${EXAMPLE_NAME} PROPERTIES LABELS "example-standalone") if(LINUX) # set LD_LIBRARY_PATH set_property( - TEST ${ EXAMPLE_NAME} + TEST ${EXAMPLE_NAME} PROPERTY ENVIRONMENT_MODIFICATION "LD_LIBRARY_PATH=path_list_append:" - "${LIBUMF_LIBRARY_DIRS};LD_" - "LIBRARY_PATH=path_list_append:${" + "${LIBUMF_LIBRARY_DIRS};" + "LD_LIBRARY_PATH=path_list_append:${" "LIBHWLOC_LIBRARY_DIRS}") endif() diff --git a/examples/ctl/ctl_statistics_example.c b/examples/ctl/ctl.c similarity index 98% rename from examples/ctl/ctl_statistics_example.c rename to examples/ctl/ctl.c index 5ddce02966..ce8c435583 100644 --- a/examples/ctl/ctl_statistics_example.c +++ b/examples/ctl/ctl.c @@ -7,11 +7,14 @@ * */ -#include "umf/base.h" #include #include +// This example relies on the experimental CTL API, which may change without +// notice. #include + +#include #include #include #include @@ -153,10 +156,7 @@ int main(void) { return -1; } - res = umfMemoryProviderGetName(provider, &provider_name); - if (res != UMF_RESULT_SUCCESS || provider_name == NULL) { - provider_name = "OS"; - } + umfMemoryProviderGetName(provider, &provider_name); print_provider_stats("Provider stats before allocation", provider, provider_name); diff --git a/examples/ctl/ctl_example.c b/examples/ctl/custom_ctl.c similarity index 98% rename from examples/ctl/ctl_example.c rename to examples/ctl/custom_ctl.c index 252acf3571..83f5509940 100644 --- a/examples/ctl/ctl_example.c +++ b/examples/ctl/custom_ctl.c @@ -13,9 +13,13 @@ #include #include +// This example relies on the experimental CTL API, which may change without +// notice. +#include + #include #include -#include + #include #include @@ -23,10 +27,7 @@ // Provider state exposed via CTL typedef struct ctl_provider_t { - int a; - int b; - int c; - int m; // modulus value, optional + int a, b, c, m; } ctl_provider_t; static umf_result_t ctl_init(const void *params, void **provider) { diff --git a/src/ctl/ctl_defaults.c b/src/ctl/ctl_defaults.c index 3290cbe8db..57799d30d2 100644 --- a/src/ctl/ctl_defaults.c +++ b/src/ctl/ctl_defaults.c @@ -6,11 +6,10 @@ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception */ -#include "ctl_defaults.h" - #include #include "base_alloc_global.h" +#include "ctl_defaults.h" #include "utils_concurrency.h" #include "utils_log.h" #include "utlist.h" @@ -33,7 +32,7 @@ umf_result_t ctl_default_subtree(ctl_default_entry_t **list, utils_mutex_t *mtx, umf_ctl_query_type_t queryType) { (void)source; if (strstr(extra_name, "{}") != NULL) { - LOG_ERR("%s, default setting do not support wildcard parameters {}", + LOG_ERR("%s, default setting does not support wildcard parameters {}", extra_name); return UMF_RESULT_ERROR_NOT_SUPPORTED; } diff --git a/src/pool/pool_disjoint.c b/src/pool/pool_disjoint.c index 3fd5becb02..a7be19fbce 100644 --- a/src/pool/pool_disjoint.c +++ b/src/pool/pool_disjoint.c @@ -28,14 +28,6 @@ static char *DEFAULT_NAME = "disjoint"; -enum { - DP_OVERRIDE_SLAB_MIN_SIZE = 1 << 0, - DP_OVERRIDE_MAX_POOLABLE_SIZE = 1 << 1, - DP_OVERRIDE_CAPACITY = 1 << 2, - DP_OVERRIDE_MIN_BUCKET_SIZE = 1 << 3, - DP_OVERRIDE_POOL_TRACE = 1 << 4, -}; - /* Disjoint pool CTL implementation */ struct ctl disjoint_ctl_root; static UTIL_ONCE_FLAG ctl_initialized = UTIL_ONCE_FLAG_INIT; @@ -118,6 +110,15 @@ CTL_READ_HANDLER(slab_min_size)(void *ctx, umf_ctl_query_source_t source, return UMF_RESULT_SUCCESS; } +// indicates that param was overridden by CTL +enum { + DP_OVERRIDE_SLAB_MIN_SIZE = 1 << 0, + DP_OVERRIDE_MAX_POOLABLE_SIZE = 1 << 1, + DP_OVERRIDE_CAPACITY = 1 << 2, + DP_OVERRIDE_MIN_BUCKET_SIZE = 1 << 3, + DP_OVERRIDE_POOL_TRACE = 1 << 4, +}; + static umf_result_t CTL_WRITE_HANDLER(slab_min_size)(void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, diff --git a/test/common/fork_helpers.hpp b/test/common/fork_helpers.hpp index 70d52cecab..3887f97034 100644 --- a/test/common/fork_helpers.hpp +++ b/test/common/fork_helpers.hpp @@ -49,7 +49,7 @@ template void run_in_fork(Func &&func) { _exit(ForkedTestFailure); } } - umfTearDown(); // exit not call destructor so we need to call it manualy + umfTearDown(); // exit not call destructor so we need to call it manually _exit(ForkedTestSuccess); }