Skip to content

Commit 10b7d79

Browse files
committed
add pool alloc counter ctl
1 parent 3a4a335 commit 10b7d79

File tree

4 files changed

+207
-9
lines changed

4 files changed

+207
-9
lines changed

src/memory_pool.c

Lines changed: 78 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,9 @@ static UTIL_ONCE_FLAG mem_pool_ctl_initialized = UTIL_ONCE_FLAG_INIT;
3333
char CTL_DEFAULT_ENTRIES[UMF_DEFAULT_SIZE][UMF_DEFAULT_LEN] = {0};
3434
char CTL_DEFAULT_VALUES[UMF_DEFAULT_SIZE][UMF_DEFAULT_LEN] = {0};
3535

36-
void ctl_init(void) { utils_mutex_init(&ctl_mtx); }
36+
struct ctl umf_pool_ctl_root;
37+
38+
void ctl_init(void);
3739

3840
static int CTL_SUBTREE_HANDLER(by_handle_pool)(void *ctx,
3941
umf_ctl_query_source_t source,
@@ -43,9 +45,15 @@ static int CTL_SUBTREE_HANDLER(by_handle_pool)(void *ctx,
4345
umf_ctl_query_type_t queryType) {
4446
(void)indexes, (void)source;
4547
umf_memory_pool_handle_t hPool = (umf_memory_pool_handle_t)ctx;
48+
int ret = ctl_query(&umf_pool_ctl_root, hPool, source, extra_name,
49+
queryType, arg, size);
50+
if (ret == -1 &&
51+
errno == EINVAL) { // node was not found in pool_ctl_root, try to
52+
// query the specific pool directly
53+
hPool->ops.ext_ctl(hPool->pool_priv, source, extra_name, arg, size,
54+
queryType);
55+
}
4656

47-
hPool->ops.ext_ctl(hPool->pool_priv, /*unused*/ 0, extra_name, arg, size,
48-
queryType);
4957
return 0;
5058
}
5159

@@ -96,9 +104,38 @@ static int CTL_SUBTREE_HANDLER(default)(void *ctx,
96104
return 0;
97105
}
98106

107+
static int CTL_READ_HANDLER(alloc_count)(void *ctx,
108+
umf_ctl_query_source_t source,
109+
void *arg, size_t size,
110+
umf_ctl_index_utlist_t *indexes,
111+
const char *extra_name,
112+
umf_ctl_query_type_t query_type) {
113+
/* suppress unused-parameter errors */
114+
(void)source, (void)size, (void)indexes, (void)extra_name, (void)query_type;
115+
116+
size_t *arg_out = arg;
117+
if (ctx == NULL || arg_out == NULL) {
118+
return UMF_RESULT_ERROR_INVALID_ARGUMENT;
119+
}
120+
121+
assert(size == sizeof(size_t));
122+
123+
umf_memory_pool_handle_t pool = (umf_memory_pool_handle_t)ctx;
124+
utils_atomic_load_acquire_size_t(&pool->stats.alloc_count, arg_out);
125+
return 0;
126+
}
127+
128+
static const umf_ctl_node_t CTL_NODE(stats)[] = {CTL_LEAF_RO(alloc_count),
129+
CTL_NODE_END};
130+
99131
umf_ctl_node_t CTL_NODE(pool)[] = {CTL_LEAF_SUBTREE2(by_handle, by_handle_pool),
100132
CTL_LEAF_SUBTREE(default), CTL_NODE_END};
101133

134+
void ctl_init(void) {
135+
utils_mutex_init(&ctl_mtx);
136+
CTL_REGISTER_MODULE(&umf_pool_ctl_root, stats);
137+
}
138+
102139
static umf_result_t umfDefaultCtlPoolHandle(void *hPool, int operationType,
103140
const char *name, void *arg,
104141
size_t size,
@@ -160,6 +197,7 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops,
160197
pool->flags = flags;
161198
pool->ops = *ops;
162199
pool->tag = NULL;
200+
memset(&pool->stats, 0, sizeof(pool->stats));
163201

164202
if (NULL == pool->ops.ext_ctl) {
165203
pool->ops.ext_ctl = umfDefaultCtlPoolHandle;
@@ -285,23 +323,47 @@ umf_result_t umfPoolCreate(const umf_memory_pool_ops_t *ops,
285323

286324
void *umfPoolMalloc(umf_memory_pool_handle_t hPool, size_t size) {
287325
UMF_CHECK((hPool != NULL), NULL);
288-
return hPool->ops.malloc(hPool->pool_priv, size);
326+
void *ret = hPool->ops.malloc(hPool->pool_priv, size);
327+
if (!ret) {
328+
return NULL;
329+
}
330+
331+
utils_atomic_increment_size_t(&hPool->stats.alloc_count);
332+
return ret;
289333
}
290334

291335
void *umfPoolAlignedMalloc(umf_memory_pool_handle_t hPool, size_t size,
292336
size_t alignment) {
293337
UMF_CHECK((hPool != NULL), NULL);
294-
return hPool->ops.aligned_malloc(hPool->pool_priv, size, alignment);
338+
void *ret = hPool->ops.aligned_malloc(hPool->pool_priv, size, alignment);
339+
if (!ret) {
340+
return NULL;
341+
}
342+
343+
utils_atomic_increment_size_t(&hPool->stats.alloc_count);
344+
return ret;
295345
}
296346

297347
void *umfPoolCalloc(umf_memory_pool_handle_t hPool, size_t num, size_t size) {
298348
UMF_CHECK((hPool != NULL), NULL);
299-
return hPool->ops.calloc(hPool->pool_priv, num, size);
349+
void *ret = hPool->ops.calloc(hPool->pool_priv, num, size);
350+
if (!ret) {
351+
return NULL;
352+
}
353+
354+
utils_atomic_increment_size_t(&hPool->stats.alloc_count);
355+
return ret;
300356
}
301357

302358
void *umfPoolRealloc(umf_memory_pool_handle_t hPool, void *ptr, size_t size) {
303359
UMF_CHECK((hPool != NULL), NULL);
304-
return hPool->ops.realloc(hPool->pool_priv, ptr, size);
360+
void *ret = hPool->ops.realloc(hPool->pool_priv, ptr, size);
361+
if (size == 0 && ret == NULL && ptr != NULL) { // this is free(ptr)
362+
utils_atomic_decrement_size_t(&hPool->stats.alloc_count);
363+
} else if (ptr == NULL && ret != NULL) { // this is malloc(size)
364+
utils_atomic_increment_size_t(&hPool->stats.alloc_count);
365+
}
366+
return ret;
305367
}
306368

307369
size_t umfPoolMallocUsableSize(umf_memory_pool_handle_t hPool,
@@ -312,7 +374,15 @@ size_t umfPoolMallocUsableSize(umf_memory_pool_handle_t hPool,
312374

313375
umf_result_t umfPoolFree(umf_memory_pool_handle_t hPool, void *ptr) {
314376
UMF_CHECK((hPool != NULL), UMF_RESULT_ERROR_INVALID_ARGUMENT);
315-
return hPool->ops.free(hPool->pool_priv, ptr);
377+
umf_result_t ret = hPool->ops.free(hPool->pool_priv, ptr);
378+
379+
if (ret != UMF_RESULT_SUCCESS) {
380+
return ret;
381+
}
382+
if (ptr != NULL) {
383+
utils_atomic_decrement_size_t(&hPool->stats.alloc_count);
384+
}
385+
return ret;
316386
}
317387

318388
umf_result_t umfPoolGetLastAllocationError(umf_memory_pool_handle_t hPool) {

src/memory_pool_internal.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,10 @@ extern "C" {
2424
#include "base_alloc.h"
2525
#include "utils_concurrency.h"
2626

27+
typedef struct umf_pool_stats {
28+
size_t alloc_count;
29+
} umf_pool_stats_t;
30+
2731
typedef struct umf_memory_pool_t {
2832
void *pool_priv;
2933
umf_pool_create_flags_t flags;
@@ -33,6 +37,8 @@ typedef struct umf_memory_pool_t {
3337

3438
utils_mutex_t lock;
3539
void *tag;
40+
// Memory pool statistics
41+
umf_pool_stats_t stats;
3642

3743
// ops should be the last due to possible change size in the future
3844
umf_memory_pool_ops_t ops;

test/common/pool.hpp

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,11 @@ bool isCallocSupported(umf_memory_pool_handle_t hPool) {
8383
return supported;
8484
}
8585

86-
bool isAlignedAllocSupported(umf_memory_pool_handle_t hPool) {
86+
bool isAlignedAllocSupported([[maybe_unused]] umf_memory_pool_handle_t hPool) {
87+
#ifdef _WIN32
88+
// On Windows, aligned allocation is not supported
89+
return false;
90+
#else
8791
static constexpr size_t allocSize = 8;
8892
static constexpr size_t alignment = 8;
8993
auto *ptr = umfPoolAlignedMalloc(hPool, allocSize, alignment);
@@ -97,6 +101,7 @@ bool isAlignedAllocSupported(umf_memory_pool_handle_t hPool) {
97101
} else {
98102
throw std::runtime_error("AlignedMalloc failed with unexpected error");
99103
}
104+
#endif
100105
}
101106

102107
typedef struct pool_base_t {

test/poolFixtures.hpp

Lines changed: 117 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
#include <array>
99
#include <cstring>
1010
#include <functional>
11+
#include <list>
1112
#include <random>
1213
#include <string>
1314
#include <thread>
@@ -687,4 +688,120 @@ TEST_P(umfPoolTest, pool_from_ptr_half_size_success) {
687688
#endif /* !_WIN32 */
688689
}
689690

691+
TEST_P(umfPoolTest, ctl_stat_alloc_count) {
692+
umf_memory_pool_handle_t pool_get = pool.get();
693+
const size_t size = 4096;
694+
const size_t max_allocs = 10;
695+
std::list<void *> ptrs;
696+
size_t alloc_count = 0;
697+
auto ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get,
698+
&alloc_count, sizeof(alloc_count));
699+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
700+
ASSERT_EQ(alloc_count, 0);
701+
for (size_t i = 1; i <= max_allocs; i++) {
702+
void *ptr = umfPoolMalloc(pool_get, size);
703+
ASSERT_NE(ptr, nullptr);
704+
ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get,
705+
&alloc_count, sizeof(alloc_count));
706+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
707+
ASSERT_EQ(alloc_count, i);
708+
ptrs.push_back(ptr);
709+
}
710+
711+
for (auto &ptr : ptrs) {
712+
umf_result_t umf_result = umfPoolFree(pool_get, ptr);
713+
ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS);
714+
}
715+
716+
ptrs.clear();
717+
ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get,
718+
&alloc_count, sizeof(alloc_count));
719+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
720+
ASSERT_EQ(alloc_count, 0);
721+
722+
if (umf_test::isReallocSupported(pool_get)) {
723+
for (size_t i = 1; i <= max_allocs; i++) {
724+
void *ptr;
725+
if (i % 2 == 0) {
726+
ptr = umfPoolMalloc(pool_get, size);
727+
} else {
728+
ptr = umfPoolRealloc(pool_get, nullptr, size);
729+
}
730+
ASSERT_NE(ptr, nullptr);
731+
ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get,
732+
&alloc_count, sizeof(alloc_count));
733+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
734+
ASSERT_EQ(alloc_count, i);
735+
ptrs.push_back(ptr);
736+
}
737+
for (auto &ptr : ptrs) {
738+
ptr = umfPoolRealloc(pool_get, ptr, size * 2);
739+
ASSERT_NE(ptr, nullptr);
740+
}
741+
ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get,
742+
&alloc_count, sizeof(alloc_count));
743+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
744+
ASSERT_EQ(alloc_count, max_allocs);
745+
size_t allocs = ptrs.size();
746+
for (auto &ptr : ptrs) {
747+
if (allocs-- % 2 == 0) {
748+
ptr = umfPoolRealloc(pool_get, ptr, 0);
749+
ASSERT_EQ(ptr, nullptr);
750+
} else {
751+
ret = umfPoolFree(pool_get, ptr);
752+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
753+
}
754+
}
755+
ptrs.clear();
756+
ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get,
757+
&alloc_count, sizeof(alloc_count));
758+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
759+
ASSERT_EQ(alloc_count, 0);
760+
}
761+
762+
if (umf_test::isCallocSupported(pool_get)) {
763+
for (size_t i = 1; i <= max_allocs; i++) {
764+
void *ptr = umfPoolCalloc(pool_get, 1, size);
765+
ASSERT_NE(ptr, nullptr);
766+
ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get,
767+
&alloc_count, sizeof(alloc_count));
768+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
769+
ASSERT_EQ(alloc_count, i);
770+
ptrs.push_back(ptr);
771+
}
772+
773+
for (auto &ptr : ptrs) {
774+
umf_result_t umf_result = umfPoolFree(pool_get, ptr);
775+
ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS);
776+
}
777+
ptrs.clear();
778+
ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get,
779+
&alloc_count, sizeof(alloc_count));
780+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
781+
ASSERT_EQ(alloc_count, 0);
782+
}
783+
784+
if (umf_test::isAlignedAllocSupported(pool_get)) {
785+
for (size_t i = 1; i <= max_allocs; i++) {
786+
void *ptr = umfPoolAlignedMalloc(pool_get, size, 4096);
787+
ASSERT_NE(ptr, nullptr);
788+
ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get,
789+
&alloc_count, sizeof(alloc_count));
790+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
791+
ASSERT_EQ(alloc_count, i);
792+
ptrs.push_back(ptr);
793+
}
794+
795+
for (auto &ptr : ptrs) {
796+
umf_result_t umf_result = umfPoolFree(pool_get, ptr);
797+
ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS);
798+
}
799+
800+
ptrs.clear();
801+
ret = umfCtlGet("umf.pool.by_handle.stats.alloc_count", pool_get,
802+
&alloc_count, sizeof(alloc_count));
803+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
804+
ASSERT_EQ(alloc_count, 0);
805+
}
806+
}
690807
#endif /* UMF_TEST_POOL_FIXTURES_HPP */

0 commit comments

Comments
 (0)