diff --git a/ChangeLog.md b/ChangeLog.md index 69124a14c5a03..1944971e59a98 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -22,6 +22,7 @@ See docs/process.md for more on how version tagging works. ----------------------- - The `ENVIRONMENT` setting will now be automatically updated to include `worker` if multi-threading is enabled. (#24525) +- mimalloc was updated to 2.2.4. (#24542) 4.0.10 - 06/07/25 ----------------- diff --git a/system/lib/mimalloc/LICENSE b/system/lib/mimalloc/LICENSE index 670b668a0c928..53315ebee557a 100644 --- a/system/lib/mimalloc/LICENSE +++ b/system/lib/mimalloc/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2018-2021 Microsoft Corporation, Daan Leijen +Copyright (c) 2018-2025 Microsoft Corporation, Daan Leijen Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/system/lib/mimalloc/README.emscripten b/system/lib/mimalloc/README.emscripten index 92ffdea281d27..5b7fef2ebf235 100644 --- a/system/lib/mimalloc/README.emscripten +++ b/system/lib/mimalloc/README.emscripten @@ -1,5 +1,5 @@ -This contains mimalloc 8c532c32c3c96e5ba1f2283e032f69ead8add00f (v2.1.7) with +This contains mimalloc fbd8b99c2b828428947d70fdc046bb55609be93e (v2.2.4) with Emscripten-specific changes. Origin: https://github.com/microsoft/mimalloc diff --git a/system/lib/mimalloc/include/mimalloc-stats.h b/system/lib/mimalloc/include/mimalloc-stats.h new file mode 100644 index 0000000000000..44c4886f88a0c --- /dev/null +++ b/system/lib/mimalloc/include/mimalloc-stats.h @@ -0,0 +1,103 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2025, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_STATS_H +#define MIMALLOC_STATS_H + +#include +#include + +#define MI_STAT_VERSION 1 // increased on every backward incompatible change + +// count allocation over time +typedef struct mi_stat_count_s { + int64_t total; // total allocated + int64_t peak; // peak allocation + int64_t current; // current allocation +} mi_stat_count_t; + +// counters only increase +typedef struct mi_stat_counter_s { + int64_t total; // total count +} mi_stat_counter_t; + +#define MI_STAT_FIELDS() \ + MI_STAT_COUNT(pages) /* count of mimalloc pages */ \ + MI_STAT_COUNT(reserved) /* reserved memory bytes */ \ + MI_STAT_COUNT(committed) /* committed bytes */ \ + MI_STAT_COUNT(reset) /* reset bytes */ \ + MI_STAT_COUNT(purged) /* purged bytes */ \ + MI_STAT_COUNT(page_committed) /* committed memory inside pages */ \ + MI_STAT_COUNT(pages_abandoned) /* abandonded pages count */ \ + MI_STAT_COUNT(threads) /* number of threads */ \ + MI_STAT_COUNT(malloc_normal) /* allocated bytes <= MI_LARGE_OBJ_SIZE_MAX */ \ + MI_STAT_COUNT(malloc_huge) /* allocated bytes in huge pages */ \ + MI_STAT_COUNT(malloc_requested) /* malloc requested bytes */ \ + \ + MI_STAT_COUNTER(mmap_calls) \ + MI_STAT_COUNTER(commit_calls) \ + MI_STAT_COUNTER(reset_calls) \ + MI_STAT_COUNTER(purge_calls) \ + MI_STAT_COUNTER(arena_count) /* number of memory arena's */ \ + MI_STAT_COUNTER(malloc_normal_count) /* number of blocks <= MI_LARGE_OBJ_SIZE_MAX */ \ + MI_STAT_COUNTER(malloc_huge_count) /* number of huge bloks */ \ + MI_STAT_COUNTER(malloc_guarded_count) /* number of allocations with guard pages */ \ + \ + /* internal statistics */ \ + MI_STAT_COUNTER(arena_rollback_count) \ + MI_STAT_COUNTER(arena_purges) \ + MI_STAT_COUNTER(pages_extended) /* number of page extensions */ \ + MI_STAT_COUNTER(pages_retire) /* number of pages that are retired */ \ + MI_STAT_COUNTER(page_searches) /* searches for a fresh page */ \ + /* only on v1 and v2 */ \ + MI_STAT_COUNT(segments) \ + MI_STAT_COUNT(segments_abandoned) \ + MI_STAT_COUNT(segments_cache) \ + MI_STAT_COUNT(_segments_reserved) \ + /* only on v3 */ \ + MI_STAT_COUNTER(pages_reclaim_on_alloc) \ + MI_STAT_COUNTER(pages_reclaim_on_free) \ + MI_STAT_COUNTER(pages_reabandon_full) \ + MI_STAT_COUNTER(pages_unabandon_busy_wait) \ + + +// Define the statistics structure +#define MI_BIN_HUGE (73U) // see types.h +#define MI_STAT_COUNT(stat) mi_stat_count_t stat; +#define MI_STAT_COUNTER(stat) mi_stat_counter_t stat; + +typedef struct mi_stats_s +{ + int version; + + MI_STAT_FIELDS() + + // future extension + mi_stat_count_t _stat_reserved[4]; + mi_stat_counter_t _stat_counter_reserved[4]; + + // size segregated statistics + mi_stat_count_t malloc_bins[MI_BIN_HUGE+1]; // allocation per size bin + mi_stat_count_t page_bins[MI_BIN_HUGE+1]; // pages allocated per size bin +} mi_stats_t; + +#undef MI_STAT_COUNT +#undef MI_STAT_COUNTER + +// Exported definitions +#ifdef __cplusplus +extern "C" { +#endif + +mi_decl_export void mi_stats_get( size_t stats_size, mi_stats_t* stats ) mi_attr_noexcept; +mi_decl_export char* mi_stats_get_json( size_t buf_size, char* buf ) mi_attr_noexcept; // use mi_free to free the result if the input buf == NULL + +#ifdef __cplusplus +} +#endif + +#endif // MIMALLOC_STATS_H diff --git a/system/lib/mimalloc/include/mimalloc.h b/system/lib/mimalloc/include/mimalloc.h index c41bcc8039190..f887278a98879 100644 --- a/system/lib/mimalloc/include/mimalloc.h +++ b/system/lib/mimalloc/include/mimalloc.h @@ -1,5 +1,5 @@ /* ---------------------------------------------------------------------------- -Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +Copyright (c) 2018-2025, Microsoft Research, Daan Leijen This is free software; you can redistribute it and/or modify it under the terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. @@ -8,7 +8,7 @@ terms of the MIT license. A copy of the license can be found in the file #ifndef MIMALLOC_H #define MIMALLOC_H -#define MI_MALLOC_VERSION 217 // major + 2 digits minor +#define MI_MALLOC_VERSION 224 // major + 2 digits minor // ------------------------------------------------------ // Compiler specific attributes @@ -154,16 +154,21 @@ mi_decl_export void mi_stats_reset(void) mi_attr_noexcept; mi_decl_export void mi_stats_merge(void) mi_attr_noexcept; mi_decl_export void mi_stats_print(void* out) mi_attr_noexcept; // backward compatibility: `out` is ignored and should be NULL mi_decl_export void mi_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept; - -mi_decl_export void mi_process_init(void) mi_attr_noexcept; -mi_decl_export void mi_thread_init(void) mi_attr_noexcept; -mi_decl_export void mi_thread_done(void) mi_attr_noexcept; mi_decl_export void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept; +mi_decl_export void mi_options_print(void) mi_attr_noexcept; mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept; + +// Generally do not use the following as these are usually called automatically +mi_decl_export void mi_process_init(void) mi_attr_noexcept; +mi_decl_export void mi_cdecl mi_process_done(void) mi_attr_noexcept; +mi_decl_export void mi_thread_init(void) mi_attr_noexcept; +mi_decl_export void mi_thread_done(void) mi_attr_noexcept; + + // ------------------------------------------------------------------------------------- // Aligned allocation // Note that `alignment` always follows `size` for consistency with unaligned @@ -259,23 +264,25 @@ typedef struct mi_heap_area_s { size_t used; // number of allocated blocks size_t block_size; // size in bytes of each block size_t full_block_size; // size in bytes of a full block including padding and metadata. + int heap_tag; // heap tag associated with this area } mi_heap_area_t; typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg); -mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg); +mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg); // Experimental mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept; mi_decl_nodiscard mi_decl_export bool mi_is_redirected(void) mi_attr_noexcept; -mi_decl_export int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept; -mi_decl_export int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept; +mi_decl_export int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept; +mi_decl_export int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept; -mi_decl_export int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept; -mi_decl_export bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept; +mi_decl_export int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept; +mi_decl_export bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept; -mi_decl_export void mi_debug_show_arenas(bool show_inuse, bool show_abandoned, bool show_purge) mi_attr_noexcept; +mi_decl_export void mi_debug_show_arenas(void) mi_attr_noexcept; +mi_decl_export void mi_arenas_print(void) mi_attr_noexcept; // Experimental: heaps associated with specific memory arena's typedef int mi_arena_id_t; @@ -289,8 +296,36 @@ mi_decl_export bool mi_manage_os_memory_ex(void* start, size_t size, bool is_co mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id); #endif + +// Experimental: allow sub-processes whose memory areas stay separated (and no reclamation between them) +// Used for example for separate interpreters in one process. +typedef void* mi_subproc_id_t; +mi_decl_export mi_subproc_id_t mi_subproc_main(void); +mi_decl_export mi_subproc_id_t mi_subproc_new(void); +mi_decl_export void mi_subproc_delete(mi_subproc_id_t subproc); +mi_decl_export void mi_subproc_add_current_thread(mi_subproc_id_t subproc); // this should be called right after a thread is created (and no allocation has taken place yet) + +// Experimental: visit abandoned heap areas (that are not owned by a specific heap) +mi_decl_export bool mi_abandoned_visit_blocks(mi_subproc_id_t subproc_id, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg); + +// Experimental: objects followed by a guard page. +// A sample rate of 0 disables guarded objects, while 1 uses a guard page for every object. +// A seed of 0 uses a random start point. Only objects within the size bound are eligable for guard pages. +mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed); +mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max); + +// Experimental: communicate that the thread is part of a threadpool +mi_decl_export void mi_thread_set_in_threadpool(void) mi_attr_noexcept; + +// Experimental: create a new heap with a specified heap tag. Set `allow_destroy` to false to allow the thread +// to reclaim abandoned memory (with a compatible heap_tag and arena_id) but in that case `mi_heap_destroy` will +// fall back to `mi_heap_delete`. +mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_ex(int heap_tag, bool allow_destroy, mi_arena_id_t arena_id); + // deprecated -mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept; +mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept; +mi_decl_export void mi_collect_reduce(size_t target_thread_owned) mi_attr_noexcept; + // ------------------------------------------------------ @@ -332,7 +367,7 @@ typedef enum mi_option_e { mi_option_deprecated_segment_cache, mi_option_deprecated_page_reset, mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination - mi_option_deprecated_segment_reset, + mi_option_deprecated_segment_reset, mi_option_eager_commit_delay, // the first N segments per thread are not eagerly committed (but per page in the segment on demand) mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all. (=10) mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes. @@ -348,6 +383,14 @@ typedef enum mi_option_e { mi_option_abandoned_reclaim_on_free, // allow to reclaim an abandoned segment on a free (=1) mi_option_disallow_arena_alloc, // 1 = do not use arena's for allocation (except if using specific arena id's) mi_option_retry_on_oom, // retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. (only on windows) + mi_option_visit_abandoned, // allow visiting heap blocks from abandoned threads (=0) + mi_option_guarded_min, // only used when building with MI_GUARDED: minimal rounded object size for guarded objects (=0) + mi_option_guarded_max, // only used when building with MI_GUARDED: maximal rounded object size for guarded objects (=0) + mi_option_guarded_precise, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0) + mi_option_guarded_sample_rate, // 1 out of N allocations in the min/max range will be guarded (=1000) + mi_option_guarded_sample_seed, // can be set to allow for a (more) deterministic re-execution when a guard page is triggered (=0) + mi_option_target_segments_per_thread, // experimental (=0) + mi_option_generic_collect, // collect heaps every N (=10000) generic allocation calls _mi_option_last, // legacy option names mi_option_large_os_pages = mi_option_allow_large_os_pages, diff --git a/system/lib/mimalloc/include/mimalloc/atomic.h b/system/lib/mimalloc/include/mimalloc/atomic.h index d5333dd90f7ca..e8bac316b3a6f 100644 --- a/system/lib/mimalloc/include/mimalloc/atomic.h +++ b/system/lib/mimalloc/include/mimalloc/atomic.h @@ -1,5 +1,5 @@ /* ---------------------------------------------------------------------------- -Copyright (c) 2018-2023 Microsoft Research, Daan Leijen +Copyright (c) 2018-2024 Microsoft Research, Daan Leijen This is free software; you can redistribute it and/or modify it under the terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. @@ -8,10 +8,21 @@ terms of the MIT license. A copy of the license can be found in the file #ifndef MIMALLOC_ATOMIC_H #define MIMALLOC_ATOMIC_H +// include windows.h or pthreads.h +#if defined(_WIN32) +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#elif !defined(__wasi__) && (!defined(__EMSCRIPTEN__) || defined(__EMSCRIPTEN_PTHREADS__)) +#define MI_USE_PTHREADS +#include +#endif + // -------------------------------------------------------------------------------------------- // Atomics // We need to be portable between C, C++, and MSVC. -// We base the primitives on the C/C++ atomics and create a mimimal wrapper for MSVC in C compilation mode. +// We base the primitives on the C/C++ atomics and create a minimal wrapper for MSVC in C compilation mode. // This is why we try to use only `uintptr_t` and `*` as atomic types. // To gain better insight in the range of used atomics, we use explicitly named memory order operations // instead of passing the memory order as a parameter. @@ -20,33 +31,33 @@ terms of the MIT license. A copy of the license can be found in the file #if defined(__cplusplus) // Use C++ atomics #include -#define _Atomic(tp) std::atomic -#define mi_atomic(name) std::atomic_##name -#define mi_memory_order(name) std::memory_order_##name -#if (__cplusplus >= 202002L) // c++20, see issue #571 -#define MI_ATOMIC_VAR_INIT(x) x +#define _Atomic(tp) std::atomic +#define mi_atomic(name) std::atomic_##name +#define mi_memory_order(name) std::memory_order_##name +#if (__cplusplus >= 202002L) // c++20, see issue #571 + #define MI_ATOMIC_VAR_INIT(x) x #elif !defined(ATOMIC_VAR_INIT) -#define MI_ATOMIC_VAR_INIT(x) x + #define MI_ATOMIC_VAR_INIT(x) x #else - #define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x) + #define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x) #endif #elif defined(_MSC_VER) // Use MSVC C wrapper for C11 atomics -#define _Atomic(tp) tp -#define MI_ATOMIC_VAR_INIT(x) x -#define mi_atomic(name) mi_atomic_##name -#define mi_memory_order(name) mi_memory_order_##name +#define _Atomic(tp) tp +#define MI_ATOMIC_VAR_INIT(x) x +#define mi_atomic(name) mi_atomic_##name +#define mi_memory_order(name) mi_memory_order_##name #else // Use C11 atomics #include -#define mi_atomic(name) atomic_##name -#define mi_memory_order(name) memory_order_##name +#define mi_atomic(name) atomic_##name +#define mi_memory_order(name) memory_order_##name #if (__STDC_VERSION__ >= 201710L) // c17, see issue #735 - #define MI_ATOMIC_VAR_INIT(x) x + #define MI_ATOMIC_VAR_INIT(x) x #elif !defined(ATOMIC_VAR_INIT) - #define MI_ATOMIC_VAR_INIT(x) x + #define MI_ATOMIC_VAR_INIT(x) x #else - #define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x) + #define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x) #endif #endif @@ -61,6 +72,7 @@ terms of the MIT license. A copy of the license can be found in the file #define mi_atomic_load_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed)) #define mi_atomic_store_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release)) #define mi_atomic_store_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed)) +#define mi_atomic_exchange_relaxed(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(relaxed)) #define mi_atomic_exchange_release(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(release)) #define mi_atomic_exchange_acq_rel(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(acq_rel)) #define mi_atomic_cas_weak_release(p,exp,des) mi_atomic_cas_weak(p,exp,des,mi_memory_order(release),mi_memory_order(relaxed)) @@ -99,6 +111,8 @@ static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub); #define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,(tp*)des) #define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,(tp*)des) #define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,(tp*)des) +#define mi_atomic_cas_ptr_strong_acq_rel(tp,p,exp,des) mi_atomic_cas_strong_acq_rel(p,exp,(tp*)des) +#define mi_atomic_exchange_ptr_relaxed(tp,p,x) mi_atomic_exchange_relaxed(p,(tp*)x) #define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,(tp*)x) #define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,(tp*)x) #else @@ -107,6 +121,8 @@ static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub); #define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,des) #define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,des) #define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,des) +#define mi_atomic_cas_ptr_strong_acq_rel(tp,p,exp,des) mi_atomic_cas_strong_acq_rel(p,exp,des) +#define mi_atomic_exchange_ptr_relaxed(tp,p,x) mi_atomic_exchange_relaxed(p,x) #define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,x) #define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,x) #endif @@ -115,6 +131,12 @@ static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub); static inline int64_t mi_atomic_addi64_relaxed(volatile int64_t* p, int64_t add) { return mi_atomic(fetch_add_explicit)((_Atomic(int64_t)*)p, add, mi_memory_order(relaxed)); } +static inline void mi_atomic_void_addi64_relaxed(volatile int64_t* p, const volatile int64_t* padd) { + const int64_t add = mi_atomic_load_relaxed((_Atomic(int64_t)*)padd); + if (add != 0) { + mi_atomic(fetch_add_explicit)((_Atomic(int64_t)*)p, add, mi_memory_order(relaxed)); + } +} static inline void mi_atomic_maxi64_relaxed(volatile int64_t* p, int64_t x) { int64_t current = mi_atomic_load_relaxed((_Atomic(int64_t)*)p); while (current < x && !mi_atomic_cas_weak_release((_Atomic(int64_t)*)p, ¤t, x)) { /* nothing */ }; @@ -133,10 +155,6 @@ static inline void mi_atomic_maxi64_relaxed(volatile int64_t* p, int64_t x) { #elif defined(_MSC_VER) // Legacy MSVC plain C compilation wrapper that uses Interlocked operations to model C11 atomics. -#ifndef WIN32_LEAN_AND_MEAN -#define WIN32_LEAN_AND_MEAN -#endif -#include #include #ifdef _WIN64 typedef LONG64 msc_intptr_t; @@ -250,6 +268,13 @@ static inline int64_t mi_atomic_addi64_relaxed(volatile _Atomic(int64_t)*p, int6 return current; #endif } +static inline void mi_atomic_void_addi64_relaxed(volatile int64_t* p, const volatile int64_t* padd) { + const int64_t add = *padd; + if (add != 0) { + mi_atomic_addi64_relaxed((volatile _Atomic(int64_t)*)p, add); + } +} + static inline void mi_atomic_maxi64_relaxed(volatile _Atomic(int64_t)*p, int64_t x) { int64_t current; do { @@ -280,6 +305,8 @@ static inline bool mi_atomic_casi64_strong_acq_rel(volatile _Atomic(int64_t*)p, #define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des) #define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des) #define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des) +#define mi_atomic_cas_ptr_strong_acq_rel(tp,p,exp,des) mi_atomic_cas_strong_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des) +#define mi_atomic_exchange_ptr_relaxed(tp,p,x) (tp*)mi_atomic_exchange_relaxed((_Atomic(uintptr_t)*)(p),(uintptr_t)x) #define mi_atomic_exchange_ptr_release(tp,p,x) (tp*)mi_atomic_exchange_release((_Atomic(uintptr_t)*)(p),(uintptr_t)x) #define mi_atomic_exchange_ptr_acq_rel(tp,p,x) (tp*)mi_atomic_exchange_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t)x) @@ -302,11 +329,16 @@ static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub) { return (intptr_t)mi_atomic_addi(p, -sub); } + +// ---------------------------------------------------------------------- +// Once and Guard +// ---------------------------------------------------------------------- + typedef _Atomic(uintptr_t) mi_atomic_once_t; // Returns true only on the first invocation static inline bool mi_atomic_once( mi_atomic_once_t* once ) { - if (mi_atomic_load_relaxed(once) != 0) return false; // quick test + if (mi_atomic_load_relaxed(once) != 0) return false; // quick test uintptr_t expected = 0; return mi_atomic_cas_strong_acq_rel(once, &expected, (uintptr_t)1); // try to set to 1 } @@ -322,17 +354,16 @@ typedef _Atomic(uintptr_t) mi_atomic_guard_t; +// ---------------------------------------------------------------------- // Yield +// ---------------------------------------------------------------------- + #if defined(__cplusplus) #include static inline void mi_atomic_yield(void) { std::this_thread::yield(); } #elif defined(_WIN32) -#ifndef WIN32_LEAN_AND_MEAN -#define WIN32_LEAN_AND_MEAN -#endif -#include static inline void mi_atomic_yield(void) { YieldProcessor(); } @@ -342,8 +373,9 @@ static inline void mi_atomic_yield(void) { _mm_pause(); } #elif (defined(__GNUC__) || defined(__clang__)) && \ - (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__armel__) || defined(__ARMEL__) || \ - defined(__aarch64__) || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__)) || defined(__POWERPC__) + (defined(__x86_64__) || defined(__i386__) || \ + defined(__aarch64__) || defined(__arm__) || \ + defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__)) #if defined(__x86_64__) || defined(__i386__) static inline void mi_atomic_yield(void) { __asm__ volatile ("pause" ::: "memory"); @@ -352,10 +384,16 @@ static inline void mi_atomic_yield(void) { static inline void mi_atomic_yield(void) { __asm__ volatile("wfe"); } -#elif (defined(__arm__) && __ARM_ARCH__ >= 7) +#elif defined(__arm__) +#if __ARM_ARCH >= 7 static inline void mi_atomic_yield(void) { __asm__ volatile("yield" ::: "memory"); } +#else +static inline void mi_atomic_yield(void) { + __asm__ volatile ("nop" ::: "memory"); +} +#endif #elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__) #ifdef __APPLE__ static inline void mi_atomic_yield(void) { @@ -366,10 +404,6 @@ static inline void mi_atomic_yield(void) { __asm__ __volatile__ ("or 27,27,27" ::: "memory"); } #endif -#elif defined(__armel__) || defined(__ARMEL__) -static inline void mi_atomic_yield(void) { - __asm__ volatile ("nop" ::: "memory"); -} #endif #elif defined(__sun) // Fallback for other archs @@ -390,4 +424,134 @@ static inline void mi_atomic_yield(void) { #endif +// ---------------------------------------------------------------------- +// Locks +// These do not have to be recursive and should be light-weight +// in-process only locks. Only used for reserving arena's and to +// maintain the abandoned list. +// ---------------------------------------------------------------------- +#if _MSC_VER +#pragma warning(disable:26110) // unlock with holding lock +#endif + +#define mi_lock(lock) for(bool _go = (mi_lock_acquire(lock),true); _go; (mi_lock_release(lock), _go=false) ) + +#if defined(_WIN32) + +#if 1 +#define mi_lock_t SRWLOCK // slim reader-writer lock + +static inline bool mi_lock_try_acquire(mi_lock_t* lock) { + return TryAcquireSRWLockExclusive(lock); +} +static inline void mi_lock_acquire(mi_lock_t* lock) { + AcquireSRWLockExclusive(lock); +} +static inline void mi_lock_release(mi_lock_t* lock) { + ReleaseSRWLockExclusive(lock); +} +static inline void mi_lock_init(mi_lock_t* lock) { + InitializeSRWLock(lock); +} +static inline void mi_lock_done(mi_lock_t* lock) { + (void)(lock); +} + +#else +#define mi_lock_t CRITICAL_SECTION + +static inline bool mi_lock_try_acquire(mi_lock_t* lock) { + return TryEnterCriticalSection(lock); +} +static inline void mi_lock_acquire(mi_lock_t* lock) { + EnterCriticalSection(lock); +} +static inline void mi_lock_release(mi_lock_t* lock) { + LeaveCriticalSection(lock); +} +static inline void mi_lock_init(mi_lock_t* lock) { + InitializeCriticalSection(lock); +} +static inline void mi_lock_done(mi_lock_t* lock) { + DeleteCriticalSection(lock); +} + +#endif + +#elif defined(MI_USE_PTHREADS) + +void _mi_error_message(int err, const char* fmt, ...); + +#define mi_lock_t pthread_mutex_t + +static inline bool mi_lock_try_acquire(mi_lock_t* lock) { + return (pthread_mutex_trylock(lock) == 0); +} +static inline void mi_lock_acquire(mi_lock_t* lock) { + const int err = pthread_mutex_lock(lock); + if (err != 0) { + _mi_error_message(err, "internal error: lock cannot be acquired\n"); + } +} +static inline void mi_lock_release(mi_lock_t* lock) { + pthread_mutex_unlock(lock); +} +static inline void mi_lock_init(mi_lock_t* lock) { + pthread_mutex_init(lock, NULL); +} +static inline void mi_lock_done(mi_lock_t* lock) { + pthread_mutex_destroy(lock); +} + +#elif defined(__cplusplus) + +#include +#define mi_lock_t std::mutex + +static inline bool mi_lock_try_acquire(mi_lock_t* lock) { + return lock->try_lock(); +} +static inline void mi_lock_acquire(mi_lock_t* lock) { + lock->lock(); +} +static inline void mi_lock_release(mi_lock_t* lock) { + lock->unlock(); +} +static inline void mi_lock_init(mi_lock_t* lock) { + (void)(lock); +} +static inline void mi_lock_done(mi_lock_t* lock) { + (void)(lock); +} + +#else + +// fall back to poor man's locks. +// this should only be the case in a single-threaded environment (like __wasi__) + +#define mi_lock_t _Atomic(uintptr_t) + +static inline bool mi_lock_try_acquire(mi_lock_t* lock) { + uintptr_t expected = 0; + return mi_atomic_cas_strong_acq_rel(lock, &expected, (uintptr_t)1); +} +static inline void mi_lock_acquire(mi_lock_t* lock) { + for (int i = 0; i < 1000; i++) { // for at most 1000 tries? + if (mi_lock_try_acquire(lock)) return; + mi_atomic_yield(); + } +} +static inline void mi_lock_release(mi_lock_t* lock) { + mi_atomic_store_release(lock, (uintptr_t)0); +} +static inline void mi_lock_init(mi_lock_t* lock) { + mi_lock_release(lock); +} +static inline void mi_lock_done(mi_lock_t* lock) { + (void)(lock); +} + +#endif + + #endif // __MIMALLOC_ATOMIC_H diff --git a/system/lib/mimalloc/include/mimalloc/internal.h b/system/lib/mimalloc/include/mimalloc/internal.h index 6c6e5ed04f1db..ca5be9304a8ba 100644 --- a/system/lib/mimalloc/include/mimalloc/internal.h +++ b/system/lib/mimalloc/include/mimalloc/internal.h @@ -8,195 +8,274 @@ terms of the MIT license. A copy of the license can be found in the file #ifndef MIMALLOC_INTERNAL_H #define MIMALLOC_INTERNAL_H - // -------------------------------------------------------------------------- -// This file contains the interal API's of mimalloc and various utility +// This file contains the internal API's of mimalloc and various utility // functions and macros. // -------------------------------------------------------------------------- #include "types.h" #include "track.h" + +// -------------------------------------------------------------------------- +// Compiler defines +// -------------------------------------------------------------------------- + #if (MI_DEBUG>0) #define mi_trace_message(...) _mi_trace_message(__VA_ARGS__) #else #define mi_trace_message(...) #endif -#define MI_CACHE_LINE 64 +#define mi_decl_cache_align mi_decl_align(64) + #if defined(_MSC_VER) #pragma warning(disable:4127) // suppress constant conditional warning (due to MI_SECURE paths) #pragma warning(disable:26812) // unscoped enum warning #define mi_decl_noinline __declspec(noinline) #define mi_decl_thread __declspec(thread) -#define mi_decl_cache_align __declspec(align(MI_CACHE_LINE)) +#define mi_decl_align(a) __declspec(align(a)) +#define mi_decl_noreturn __declspec(noreturn) #define mi_decl_weak +#define mi_decl_hidden +#define mi_decl_cold #elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) // includes clang and icc #define mi_decl_noinline __attribute__((noinline)) #define mi_decl_thread __thread -#define mi_decl_cache_align __attribute__((aligned(MI_CACHE_LINE))) +#define mi_decl_align(a) __attribute__((aligned(a))) +#define mi_decl_noreturn __attribute__((noreturn)) #define mi_decl_weak __attribute__((weak)) +#define mi_decl_hidden __attribute__((visibility("hidden"))) +#if (__GNUC__ >= 4) || defined(__clang__) +#define mi_decl_cold __attribute__((cold)) +#else +#define mi_decl_cold +#endif +#elif __cplusplus >= 201103L // c++11 +#define mi_decl_noinline +#define mi_decl_thread thread_local +#define mi_decl_align(a) alignas(a) +#define mi_decl_noreturn [[noreturn]] +#define mi_decl_weak +#define mi_decl_hidden +#define mi_decl_cold #else #define mi_decl_noinline #define mi_decl_thread __thread // hope for the best :-) -#define mi_decl_cache_align +#define mi_decl_align(a) +#define mi_decl_noreturn #define mi_decl_weak +#define mi_decl_hidden +#define mi_decl_cold #endif -#if defined(__EMSCRIPTEN__) && !defined(__wasi__) -#define __wasi__ +#if defined(__GNUC__) || defined(__clang__) +#define mi_unlikely(x) (__builtin_expect(!!(x),false)) +#define mi_likely(x) (__builtin_expect(!!(x),true)) +#elif (defined(__cplusplus) && (__cplusplus >= 202002L)) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L) +#define mi_unlikely(x) (x) [[unlikely]] +#define mi_likely(x) (x) [[likely]] +#else +#define mi_unlikely(x) (x) +#define mi_likely(x) (x) +#endif + +#ifndef __has_builtin +#define __has_builtin(x) 0 #endif #if defined(__cplusplus) -#define mi_decl_externc extern "C" +#define mi_decl_externc extern "C" #else #define mi_decl_externc #endif -// pthreads -#if !defined(_WIN32) && !defined(__wasi__) -#define MI_USE_PTHREADS -#include +#if defined(__EMSCRIPTEN__) && !defined(__wasi__) +#define __wasi__ #endif + +// -------------------------------------------------------------------------- +// Internal functions +// -------------------------------------------------------------------------- + +// "libc.c" +#include +int _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args); +int _mi_snprintf(char* buf, size_t buflen, const char* fmt, ...); +char _mi_toupper(char c); +int _mi_strnicmp(const char* s, const char* t, size_t n); +void _mi_strlcpy(char* dest, const char* src, size_t dest_size); +void _mi_strlcat(char* dest, const char* src, size_t dest_size); +size_t _mi_strlen(const char* s); +size_t _mi_strnlen(const char* s, size_t max_len); +bool _mi_getenv(const char* name, char* result, size_t result_size); + // "options.c" -void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message); -void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...); -void _mi_warning_message(const char* fmt, ...); -void _mi_verbose_message(const char* fmt, ...); -void _mi_trace_message(const char* fmt, ...); -void _mi_options_init(void); -void _mi_error_message(int err, const char* fmt, ...); +void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message); +void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...); +void _mi_message(const char* fmt, ...); +void _mi_warning_message(const char* fmt, ...); +void _mi_verbose_message(const char* fmt, ...); +void _mi_trace_message(const char* fmt, ...); +void _mi_options_init(void); +long _mi_option_get_fast(mi_option_t option); +void _mi_error_message(int err, const char* fmt, ...); // random.c -void _mi_random_init(mi_random_ctx_t* ctx); -void _mi_random_init_weak(mi_random_ctx_t* ctx); -void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx); -void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx); -uintptr_t _mi_random_next(mi_random_ctx_t* ctx); -uintptr_t _mi_heap_random_next(mi_heap_t* heap); -uintptr_t _mi_os_random_weak(uintptr_t extra_seed); +void _mi_random_init(mi_random_ctx_t* ctx); +void _mi_random_init_weak(mi_random_ctx_t* ctx); +void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx); +void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx); +uintptr_t _mi_random_next(mi_random_ctx_t* ctx); +uintptr_t _mi_heap_random_next(mi_heap_t* heap); +uintptr_t _mi_os_random_weak(uintptr_t extra_seed); static inline uintptr_t _mi_random_shuffle(uintptr_t x); // init.c -extern mi_decl_cache_align mi_stats_t _mi_stats_main; -extern mi_decl_cache_align const mi_page_t _mi_page_empty; -bool _mi_is_main_thread(void); -size_t _mi_current_thread_count(void); -bool _mi_preloading(void); // true while the C runtime is not initialized yet +extern mi_decl_hidden mi_decl_cache_align mi_stats_t _mi_stats_main; +extern mi_decl_hidden mi_decl_cache_align const mi_page_t _mi_page_empty; +void _mi_auto_process_init(void); +void mi_cdecl _mi_auto_process_done(void) mi_attr_noexcept; +bool _mi_is_redirected(void); +bool _mi_allocator_init(const char** message); +void _mi_allocator_done(void); +bool _mi_is_main_thread(void); +size_t _mi_current_thread_count(void); +bool _mi_preloading(void); // true while the C runtime is not initialized yet +void _mi_thread_done(mi_heap_t* heap); +void _mi_thread_data_collect(void); +void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap); mi_threadid_t _mi_thread_id(void) mi_attr_noexcept; mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap -void _mi_thread_done(mi_heap_t* heap); -void _mi_thread_data_collect(void); -void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap); +mi_subproc_t* _mi_subproc_from_id(mi_subproc_id_t subproc_id); +void _mi_heap_guarded_init(mi_heap_t* heap); // os.c -void _mi_os_init(void); // called from process init -void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats); -void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats); -void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats); - -size_t _mi_os_page_size(void); -size_t _mi_os_good_alloc_size(size_t size); -bool _mi_os_has_overcommit(void); -bool _mi_os_has_virtual_reserve(void); - -bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats); -bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats); -bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats); -bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats); -bool _mi_os_protect(void* addr, size_t size); -bool _mi_os_unprotect(void* addr, size_t size); -bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats); -bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats); - -void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats); -void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* tld_stats); - -void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size); -bool _mi_os_use_large_page(size_t size, size_t alignment); -size_t _mi_os_large_page_size(void); - -void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid); +void _mi_os_init(void); // called from process init +void* _mi_os_alloc(size_t size, mi_memid_t* memid); +void* _mi_os_zalloc(size_t size, mi_memid_t* memid); +void _mi_os_free(void* p, size_t size, mi_memid_t memid); +void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid); + +size_t _mi_os_page_size(void); +size_t _mi_os_good_alloc_size(size_t size); +bool _mi_os_has_overcommit(void); +bool _mi_os_has_virtual_reserve(void); + +bool _mi_os_reset(void* addr, size_t size); +bool _mi_os_decommit(void* addr, size_t size); +bool _mi_os_unprotect(void* addr, size_t size); +bool _mi_os_purge(void* p, size_t size); +bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, size_t stat_size); +void _mi_os_reuse(void* p, size_t size); +mi_decl_nodiscard bool _mi_os_commit(void* p, size_t size, bool* is_zero); +mi_decl_nodiscard bool _mi_os_commit_ex(void* addr, size_t size, bool* is_zero, size_t stat_size); +bool _mi_os_protect(void* addr, size_t size); + +void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid); +void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid); + +void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size); +bool _mi_os_use_large_page(size_t size, size_t alignment); +size_t _mi_os_large_page_size(void); +void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid); + +int _mi_os_numa_node_count(void); +int _mi_os_numa_node(void); // arena.c mi_arena_id_t _mi_arena_id_none(void); -void _mi_arena_free(void* p, size_t size, size_t still_committed_size, mi_memid_t memid, mi_stats_t* stats); -void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld); -void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld); -bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id); -bool _mi_arena_contains(const void* p); -void _mi_arenas_collect(bool force_purge, mi_stats_t* stats); -void _mi_arena_unsafe_destroy_all(mi_stats_t* stats); - -bool _mi_arena_segment_clear_abandoned(mi_segment_t* segment); -void _mi_arena_segment_mark_abandoned(mi_segment_t* segment); -size_t _mi_arena_segment_abandoned_count(void); - -typedef struct mi_arena_field_cursor_s { // abstract - mi_arena_id_t start; - int count; - size_t bitmap_idx; +void _mi_arena_free(void* p, size_t size, size_t still_committed_size, mi_memid_t memid); +void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid); +void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid); +bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id); +bool _mi_arena_contains(const void* p); +void _mi_arenas_collect(bool force_purge); +void _mi_arena_unsafe_destroy_all(void); + +bool _mi_arena_segment_clear_abandoned(mi_segment_t* segment); +void _mi_arena_segment_mark_abandoned(mi_segment_t* segment); + +void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid); +void _mi_arena_meta_free(void* p, mi_memid_t memid, size_t size); + +typedef struct mi_arena_field_cursor_s { // abstract struct + size_t os_list_count; // max entries to visit in the OS abandoned list + size_t start; // start arena idx (may need to be wrapped) + size_t end; // end arena idx (exclusive, may need to be wrapped) + size_t bitmap_idx; // current bit idx for an arena + mi_subproc_t* subproc; // only visit blocks in this sub-process + bool visit_all; // ensure all abandoned blocks are seen (blocking) + bool hold_visit_lock; // if the subproc->abandoned_os_visit_lock is held } mi_arena_field_cursor_t; -void _mi_arena_field_cursor_init(mi_heap_t* heap, mi_arena_field_cursor_t* current); +void _mi_arena_field_cursor_init(mi_heap_t* heap, mi_subproc_t* subproc, bool visit_all, mi_arena_field_cursor_t* current); mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* previous); +void _mi_arena_field_cursor_done(mi_arena_field_cursor_t* current); // "segment-map.c" -void _mi_segment_map_allocated_at(const mi_segment_t* segment); -void _mi_segment_map_freed_at(const mi_segment_t* segment); +void _mi_segment_map_allocated_at(const mi_segment_t* segment); +void _mi_segment_map_freed_at(const mi_segment_t* segment); +void _mi_segment_map_unsafe_destroy(void); // "segment.c" -mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld); +mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld); void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld); void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld); bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segments_tld_t* tld); -void _mi_segment_collect(mi_segment_t* segment, bool force, mi_segments_tld_t* tld); +void _mi_segment_collect(mi_segment_t* segment, bool force); #if MI_HUGE_PAGE_ABANDON -void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block); +void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block); #else -void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block); +void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block); #endif uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size); // page start for any page void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld); -void _mi_abandoned_await_readers(void); void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld); bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment); +bool _mi_segment_visit_blocks(mi_segment_t* segment, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg); // "page.c" -void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc; +void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc; -void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks -void _mi_page_unfull(mi_page_t* page); -void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); // free the page -void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread... -void _mi_heap_delayed_free_all(mi_heap_t* heap); -bool _mi_heap_delayed_free_partial(mi_heap_t* heap); -void _mi_heap_collect_retired(mi_heap_t* heap, bool force); +void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks +void _mi_page_unfull(mi_page_t* page); +void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); // free the page +void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread... +void _mi_page_force_abandon(mi_page_t* page); -void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never); -bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never); -size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append); -void _mi_deferred_free(mi_heap_t* heap, bool force); +void _mi_heap_delayed_free_all(mi_heap_t* heap); +bool _mi_heap_delayed_free_partial(mi_heap_t* heap); +void _mi_heap_collect_retired(mi_heap_t* heap, bool force); -void _mi_page_free_collect(mi_page_t* page,bool force); -void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback from segments +void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never); +bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never); +size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append); +void _mi_deferred_free(mi_heap_t* heap, bool force); -size_t _mi_bin_size(uint8_t bin); // for stats -uint8_t _mi_bin(size_t size); // for stats +void _mi_page_free_collect(mi_page_t* page,bool force); +void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback from segments + +size_t _mi_page_bin(const mi_page_t* page); // for stats +size_t _mi_bin_size(size_t bin); // for stats +size_t _mi_bin(size_t size); // for stats // "heap.c" -void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool noreclaim, uint8_t tag); -void _mi_heap_destroy_pages(mi_heap_t* heap); -void _mi_heap_collect_abandon(mi_heap_t* heap); -void _mi_heap_set_default_direct(mi_heap_t* heap); -bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid); -void _mi_heap_unsafe_destroy_all(void); -mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag); +void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool noreclaim, uint8_t tag); +void _mi_heap_destroy_pages(mi_heap_t* heap); +void _mi_heap_collect_abandon(mi_heap_t* heap); +void _mi_heap_set_default_direct(mi_heap_t* heap); +bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid); +void _mi_heap_unsafe_destroy_all(mi_heap_t* heap); +mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag); +void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page); +bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_block_visit_fun* visitor, void* arg); // "stats.c" -void _mi_stats_done(mi_stats_t* stats); +void _mi_stats_done(mi_stats_t* stats); +void _mi_stats_merge_thread(mi_tld_t* tld); mi_msecs_t _mi_clock_now(void); mi_msecs_t _mi_clock_end(mi_msecs_t start); mi_msecs_t _mi_clock_start(void); @@ -213,43 +292,11 @@ bool _mi_free_delayed_block(mi_block_t* block); void _mi_free_generic(mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept; // for runtime integration void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size); -// "libc.c" -#include -void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args); -void _mi_snprintf(char* buf, size_t buflen, const char* fmt, ...); -char _mi_toupper(char c); -int _mi_strnicmp(const char* s, const char* t, size_t n); -void _mi_strlcpy(char* dest, const char* src, size_t dest_size); -void _mi_strlcat(char* dest, const char* src, size_t dest_size); -size_t _mi_strlen(const char* s); -size_t _mi_strnlen(const char* s, size_t max_len); -bool _mi_getenv(const char* name, char* result, size_t result_size); - #if MI_DEBUG>1 bool _mi_page_is_valid(mi_page_t* page); #endif -// ------------------------------------------------------ -// Branches -// ------------------------------------------------------ - -#if defined(__GNUC__) || defined(__clang__) -#define mi_unlikely(x) (__builtin_expect(!!(x),false)) -#define mi_likely(x) (__builtin_expect(!!(x),true)) -#elif (defined(__cplusplus) && (__cplusplus >= 202002L)) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L) -#define mi_unlikely(x) (x) [[unlikely]] -#define mi_likely(x) (x) [[likely]] -#else -#define mi_unlikely(x) (x) -#define mi_likely(x) (x) -#endif - -#ifndef __has_builtin -#define __has_builtin(x) 0 -#endif - - /* ----------------------------------------------------------- Error codes passed to `_mi_fatal_error` All are recoverable but EFAULT is a serious error and aborts by default in secure mode. @@ -274,6 +321,32 @@ bool _mi_page_is_valid(mi_page_t* page); #endif +// ------------------------------------------------------ +// Assertions +// ------------------------------------------------------ + +#if (MI_DEBUG) +// use our own assertion to print without memory allocation +mi_decl_noreturn mi_decl_cold void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line, const char* func) mi_attr_noexcept; +#define mi_assert(expr) ((expr) ? (void)0 : _mi_assert_fail(#expr,__FILE__,__LINE__,__func__)) +#else +#define mi_assert(x) +#endif + +#if (MI_DEBUG>1) +#define mi_assert_internal mi_assert +#else +#define mi_assert_internal(x) +#endif + +#if (MI_DEBUG>2) +#define mi_assert_expensive mi_assert +#else +#define mi_assert_expensive(x) +#endif + + + /* ----------------------------------------------------------- Inlined definitions ----------------------------------------------------------- */ @@ -291,7 +364,7 @@ bool _mi_page_is_valid(mi_page_t* page); #define MI_INIT64(x) MI_INIT32(x),MI_INIT32(x) #define MI_INIT128(x) MI_INIT64(x),MI_INIT64(x) #define MI_INIT256(x) MI_INIT128(x),MI_INIT128(x) - +#define MI_INIT74(x) MI_INIT64(x),MI_INIT8(x),x(),x() #include // initialize a local variable to zero; use memset as compilers optimize constant sized memset's @@ -349,6 +422,14 @@ static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) { return (divider == 0 ? size : ((size + divider - 1) / divider)); } + +// clamp an integer +static inline size_t _mi_clamp(size_t sz, size_t min, size_t max) { + if (sz < min) return min; + else if (sz > max) return max; + else return sz; +} + // Is memory zero initialized? static inline bool mi_mem_is_zero(const void* p, size_t size) { for (size_t i = 0; i < size; i++) { @@ -410,7 +491,7 @@ static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* tot Heap functions ------------------------------------------------------------------------------------------- */ -extern const mi_heap_t _mi_heap_empty; // read-only empty heap, initial value of the thread local default heap +extern mi_decl_hidden const mi_heap_t _mi_heap_empty; // read-only empty heap, initial value of the thread local default heap static inline bool mi_heap_is_backing(const mi_heap_t* heap) { return (heap->tld->heap_backing == heap); @@ -418,11 +499,11 @@ static inline bool mi_heap_is_backing(const mi_heap_t* heap) { static inline bool mi_heap_is_initialized(mi_heap_t* heap) { mi_assert_internal(heap != NULL); - return (heap != &_mi_heap_empty); + return (heap != NULL && heap != &_mi_heap_empty); } static inline uintptr_t _mi_ptr_cookie(const void* p) { - extern mi_heap_t _mi_heap_main; + extern mi_decl_hidden mi_heap_t _mi_heap_main; mi_assert_internal(_mi_heap_main.cookie != 0); return ((uintptr_t)p ^ _mi_heap_main.cookie); } @@ -589,7 +670,7 @@ static inline bool mi_page_immediate_available(const mi_page_t* page) { } // is more than 7/8th of a page in use? -static inline bool mi_page_mostly_used(const mi_page_t* page) { +static inline bool mi_page_is_mostly_used(const mi_page_t* page) { if (page==NULL) return true; uint16_t frac = page->reserved / 8U; return (page->reserved - page->used <= frac); @@ -620,6 +701,39 @@ static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) { page->flags.x.has_aligned = has_aligned; } +/* ------------------------------------------------------------------- + Guarded objects +------------------------------------------------------------------- */ +#if MI_GUARDED +static inline bool mi_block_ptr_is_guarded(const mi_block_t* block, const void* p) { + const ptrdiff_t offset = (uint8_t*)p - (uint8_t*)block; + return (offset >= (ptrdiff_t)(sizeof(mi_block_t)) && block->next == MI_BLOCK_TAG_GUARDED); +} + +static inline bool mi_heap_malloc_use_guarded(mi_heap_t* heap, size_t size) { + // this code is written to result in fast assembly as it is on the hot path for allocation + const size_t count = heap->guarded_sample_count - 1; // if the rate was 0, this will underflow and count for a long time.. + if mi_likely(count != 0) { + // no sample + heap->guarded_sample_count = count; + return false; + } + else if (size >= heap->guarded_size_min && size <= heap->guarded_size_max) { + // use guarded allocation + heap->guarded_sample_count = heap->guarded_sample_rate; // reset + return (heap->guarded_sample_rate != 0); + } + else { + // failed size criteria, rewind count (but don't write to an empty heap) + if (heap->guarded_sample_rate != 0) { heap->guarded_sample_count = 1; } + return false; + } +} + +mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept; + +#endif + /* ------------------------------------------------------------------- Encoding/Decoding the free list next pointers @@ -679,6 +793,16 @@ static inline mi_encoded_t mi_ptr_encode(const void* null, const void* p, const return mi_rotl(x ^ keys[1], keys[0]) + keys[0]; } +static inline uint32_t mi_ptr_encode_canary(const void* null, const void* p, const uintptr_t* keys) { + const uint32_t x = (uint32_t)(mi_ptr_encode(null,p,keys)); + // make the lowest byte 0 to prevent spurious read overflows which could be a security issue (issue #951) + #ifdef MI_BIG_ENDIAN + return (x & 0x00FFFFFF); + #else + return (x & 0xFFFFFF00); + #endif +} + static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, const uintptr_t* keys ) { mi_track_mem_defined(block,sizeof(mi_block_t)); mi_block_t* next; @@ -788,8 +912,10 @@ static inline mi_memid_t _mi_memid_none(void) { return _mi_memid_create(MI_MEM_NONE); } -static inline mi_memid_t _mi_memid_create_os(bool committed, bool is_zero, bool is_large) { +static inline mi_memid_t _mi_memid_create_os(void* base, size_t size, bool committed, bool is_zero, bool is_large) { mi_memid_t memid = _mi_memid_create(MI_MEM_OS); + memid.mem.os.base = base; + memid.mem.os.size = size; memid.initially_committed = committed; memid.initially_zero = is_zero; memid.is_pinned = is_large; @@ -803,7 +929,7 @@ static inline mi_memid_t _mi_memid_create_os(bool committed, bool is_zero, bool static inline uintptr_t _mi_random_shuffle(uintptr_t x) { if (x==0) { x = 17; } // ensure we don't get stuck in generating zeros -#if (MI_INTPTR_SIZE==8) +#if (MI_INTPTR_SIZE>=8) // by Sebastiano Vigna, see: x ^= x >> 30; x *= 0xbf58476d1ce4e5b9UL; @@ -821,24 +947,6 @@ static inline uintptr_t _mi_random_shuffle(uintptr_t x) { return x; } -// ------------------------------------------------------------------- -// Optimize numa node access for the common case (= one node) -// ------------------------------------------------------------------- - -int _mi_os_numa_node_get(mi_os_tld_t* tld); -size_t _mi_os_numa_node_count_get(void); - -extern _Atomic(size_t) _mi_numa_node_count; -static inline int _mi_os_numa_node(mi_os_tld_t* tld) { - if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; } - else return _mi_os_numa_node_get(tld); -} -static inline size_t _mi_os_numa_node_count(void) { - const size_t count = mi_atomic_load_relaxed(&_mi_numa_node_count); - if mi_likely(count > 0) { return count; } - else return _mi_os_numa_node_count_get(); -} - // ----------------------------------------------------------------------- @@ -849,21 +957,21 @@ static inline size_t _mi_os_numa_node_count(void) { #include // LONG_MAX #define MI_HAVE_FAST_BITSCAN -static inline size_t mi_clz(uintptr_t x) { - if (x==0) return MI_INTPTR_BITS; -#if (INTPTR_MAX == LONG_MAX) - return __builtin_clzl(x); -#else - return __builtin_clzll(x); -#endif +static inline size_t mi_clz(size_t x) { + if (x==0) return MI_SIZE_BITS; + #if (SIZE_MAX == ULONG_MAX) + return __builtin_clzl(x); + #else + return __builtin_clzll(x); + #endif } -static inline size_t mi_ctz(uintptr_t x) { - if (x==0) return MI_INTPTR_BITS; -#if (INTPTR_MAX == LONG_MAX) - return __builtin_ctzl(x); -#else - return __builtin_ctzll(x); -#endif +static inline size_t mi_ctz(size_t x) { + if (x==0) return MI_SIZE_BITS; + #if (SIZE_MAX == ULONG_MAX) + return __builtin_ctzl(x); + #else + return __builtin_ctzll(x); + #endif } #elif defined(_MSC_VER) @@ -871,38 +979,40 @@ static inline size_t mi_ctz(uintptr_t x) { #include // LONG_MAX #include // BitScanReverse64 #define MI_HAVE_FAST_BITSCAN -static inline size_t mi_clz(uintptr_t x) { - if (x==0) return MI_INTPTR_BITS; +static inline size_t mi_clz(size_t x) { + if (x==0) return MI_SIZE_BITS; unsigned long idx; -#if (INTPTR_MAX == LONG_MAX) - _BitScanReverse(&idx, x); -#else - _BitScanReverse64(&idx, x); -#endif - return ((MI_INTPTR_BITS - 1) - idx); + #if (SIZE_MAX == ULONG_MAX) + _BitScanReverse(&idx, x); + #else + _BitScanReverse64(&idx, x); + #endif + return ((MI_SIZE_BITS - 1) - (size_t)idx); } -static inline size_t mi_ctz(uintptr_t x) { - if (x==0) return MI_INTPTR_BITS; +static inline size_t mi_ctz(size_t x) { + if (x==0) return MI_SIZE_BITS; unsigned long idx; -#if (INTPTR_MAX == LONG_MAX) - _BitScanForward(&idx, x); -#else - _BitScanForward64(&idx, x); -#endif - return idx; + #if (SIZE_MAX == ULONG_MAX) + _BitScanForward(&idx, x); + #else + _BitScanForward64(&idx, x); + #endif + return (size_t)idx; } #else -static inline size_t mi_ctz32(uint32_t x) { + +static inline size_t mi_ctz_generic32(uint32_t x) { // de Bruijn multiplication, see - static const unsigned char debruijn[32] = { + static const uint8_t debruijn[32] = { 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 }; if (x==0) return 32; - return debruijn[((x & -(int32_t)x) * 0x077CB531UL) >> 27]; + return debruijn[(uint32_t)((x & -(int32_t)x) * (uint32_t)(0x077CB531U)) >> 27]; } -static inline size_t mi_clz32(uint32_t x) { + +static inline size_t mi_clz_generic32(uint32_t x) { // de Bruijn multiplication, see static const uint8_t debruijn[32] = { 31, 22, 30, 21, 18, 10, 29, 2, 20, 17, 15, 13, 9, 6, 28, 1, @@ -914,37 +1024,61 @@ static inline size_t mi_clz32(uint32_t x) { x |= x >> 4; x |= x >> 8; x |= x >> 16; - return debruijn[(uint32_t)(x * 0x07C4ACDDUL) >> 27]; + return debruijn[(uint32_t)(x * (uint32_t)(0x07C4ACDDU)) >> 27]; } -static inline size_t mi_clz(uintptr_t x) { - if (x==0) return MI_INTPTR_BITS; -#if (MI_INTPTR_BITS <= 32) - return mi_clz32((uint32_t)x); -#else - size_t count = mi_clz32((uint32_t)(x >> 32)); - if (count < 32) return count; - return (32 + mi_clz32((uint32_t)x)); -#endif +static inline size_t mi_ctz(size_t x) { + if (x==0) return MI_SIZE_BITS; + #if (MI_SIZE_BITS <= 32) + return mi_ctz_generic32((uint32_t)x); + #else + const uint32_t lo = (uint32_t)x; + if (lo != 0) { + return mi_ctz_generic32(lo); + } + else { + return (32 + mi_ctz_generic32((uint32_t)(x>>32))); + } + #endif } -static inline size_t mi_ctz(uintptr_t x) { - if (x==0) return MI_INTPTR_BITS; -#if (MI_INTPTR_BITS <= 32) - return mi_ctz32((uint32_t)x); -#else - size_t count = mi_ctz32((uint32_t)x); - if (count < 32) return count; - return (32 + mi_ctz32((uint32_t)(x>>32))); -#endif + +static inline size_t mi_clz(size_t x) { + if (x==0) return MI_SIZE_BITS; + #if (MI_SIZE_BITS <= 32) + return mi_clz_generic32((uint32_t)x); + #else + const uint32_t hi = (uint32_t)(x>>32); + if (hi != 0) { + return mi_clz_generic32(hi); + } + else { + return 32 + mi_clz_generic32((uint32_t)x); + } + #endif } #endif -// "bit scan reverse": Return index of the highest bit (or MI_INTPTR_BITS if `x` is zero) -static inline size_t mi_bsr(uintptr_t x) { - return (x==0 ? MI_INTPTR_BITS : MI_INTPTR_BITS - 1 - mi_clz(x)); +// "bit scan reverse": Return index of the highest bit (or MI_SIZE_BITS if `x` is zero) +static inline size_t mi_bsr(size_t x) { + return (x==0 ? MI_SIZE_BITS : MI_SIZE_BITS - 1 - mi_clz(x)); } +size_t _mi_popcount_generic(size_t x); + +static inline size_t mi_popcount(size_t x) { + if (x<=1) return x; + if (x==SIZE_MAX) return MI_SIZE_BITS; + #if defined(__GNUC__) + #if (SIZE_MAX == ULONG_MAX) + return __builtin_popcountl(x); + #else + return __builtin_popcountll(x); + #endif + #else + return _mi_popcount_generic(x); + #endif +} // --------------------------------------------------------------------------------- // Provide our own `_mi_memcpy` for potential performance optimizations. @@ -956,9 +1090,10 @@ static inline size_t mi_bsr(uintptr_t x) { #if !MI_TRACK_ENABLED && defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64)) #include -extern bool _mi_cpu_has_fsrm; +extern mi_decl_hidden bool _mi_cpu_has_fsrm; +extern mi_decl_hidden bool _mi_cpu_has_erms; static inline void _mi_memcpy(void* dst, const void* src, size_t n) { - if (_mi_cpu_has_fsrm) { + if ((_mi_cpu_has_fsrm && n <= 128) || (_mi_cpu_has_erms && n > 128)) { __movsb((unsigned char*)dst, (const unsigned char*)src, n); } else { @@ -966,7 +1101,7 @@ static inline void _mi_memcpy(void* dst, const void* src, size_t n) { } } static inline void _mi_memzero(void* dst, size_t n) { - if (_mi_cpu_has_fsrm) { + if ((_mi_cpu_has_fsrm && n <= 128) || (_mi_cpu_has_erms && n > 128)) { __stosb((unsigned char*)dst, 0, n); } else { diff --git a/system/lib/mimalloc/include/mimalloc/prim.h b/system/lib/mimalloc/include/mimalloc/prim.h index 3f4574ddd9270..1087d9b8dad30 100644 --- a/system/lib/mimalloc/include/mimalloc/prim.h +++ b/system/lib/mimalloc/include/mimalloc/prim.h @@ -1,5 +1,5 @@ /* ---------------------------------------------------------------------------- -Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +Copyright (c) 2018-2024, Microsoft Research, Daan Leijen This is free software; you can redistribute it and/or modify it under the terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. @@ -22,12 +22,14 @@ terms of the MIT license. A copy of the license can be found in the file // OS memory configuration typedef struct mi_os_mem_config_s { - size_t page_size; // default to 4KiB - size_t large_page_size; // 0 if not supported, usually 2MiB (4MiB on Windows) - size_t alloc_granularity; // smallest allocation size (usually 4KiB, on Windows 64KiB) - bool has_overcommit; // can we reserve more memory than can be actually committed? - bool has_partial_free; // can allocated blocks be freed partially? (true for mmap, false for VirtualAlloc) - bool has_virtual_reserve; // supports virtual address space reservation? (if true we can reserve virtual address space without using commit or physical memory) + size_t page_size; // default to 4KiB + size_t large_page_size; // 0 if not supported, usually 2MiB (4MiB on Windows) + size_t alloc_granularity; // smallest allocation size (usually 4KiB, on Windows 64KiB) + size_t physical_memory_in_kib; // physical memory size in KiB + size_t virtual_address_bits; // usually 48 or 56 bits on 64-bit systems. (used to determine secure randomization) + bool has_overcommit; // can we reserve more memory than can be actually committed? + bool has_partial_free; // can allocated blocks be freed partially? (true for mmap, false for VirtualAlloc) + bool has_virtual_reserve; // supports virtual address space reservation? (if true we can reserve virtual address space without using commit or physical memory) } mi_os_mem_config_t; // Initialize @@ -41,9 +43,10 @@ int _mi_prim_free(void* addr, size_t size ); // If `commit` is false, the virtual memory range only needs to be reserved (with no access) // which will later be committed explicitly using `_mi_prim_commit`. // `is_zero` is set to true if the memory was zero initialized (as on most OS's) +// The `hint_addr` address is either `NULL` or a preferred allocation address but can be ignored. // pre: !commit => !allow_large // try_alignment >= _mi_os_page_size() and a power of 2 -int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr); +int _mi_prim_alloc(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr); // Commit memory. Returns error code or 0 on success. // For example, on Linux this would make the memory PROT_READ|PROT_WRITE. @@ -56,10 +59,15 @@ int _mi_prim_commit(void* addr, size_t size, bool* is_zero); // pre: needs_recommit != NULL int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit); -// Reset memory. The range keeps being accessible but the content might be reset. +// Reset memory. The range keeps being accessible but the content might be reset to zero at any moment. // Returns error code or 0 on success. int _mi_prim_reset(void* addr, size_t size); +// Reuse memory. This is called for memory that is already committed but +// may have been reset (`_mi_prim_reset`) or decommitted (`_mi_prim_decommit`) where `needs_recommit` was false. +// Returns error code or 0 on success. On most platforms this is a no-op. +int _mi_prim_reuse(void* addr, size_t size); + // Protect memory. Returns error code or 0 on success. int _mi_prim_protect(void* addr, size_t size, bool protect); @@ -116,13 +124,10 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap); //------------------------------------------------------------------- -// Thread id: `_mi_prim_thread_id()` -// -// Getting the thread id should be performant as it is called in the -// fast path of `_mi_free` and we specialize for various platforms as -// inlined definitions. Regular code should call `init.c:_mi_thread_id()`. -// We only require _mi_prim_thread_id() to return a unique id -// for each thread (unequal to zero). +// Access to TLS (thread local storage) slots. +// We need fast access to both a unique thread id (in `free.c:mi_free`) and +// to a thread-local heap pointer (in `alloc.c:mi_malloc`). +// To achieve this we use specialized code for various platforms. //------------------------------------------------------------------- // On some libc + platform combinations we can directly access a thread-local storage (TLS) slot. @@ -134,14 +139,14 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap); // but unfortunately we can not detect support reliably (see issue #883) // We also use it on Apple OS as we use a TLS slot for the default heap there. #if defined(__GNUC__) && ( \ - (defined(__GLIBC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \ + (defined(__GLIBC__) && (defined(__x86_64__) || defined(__i386__) || (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__))) \ || (defined(__APPLE__) && (defined(__x86_64__) || defined(__aarch64__) || defined(__POWERPC__))) \ - || (defined(__BIONIC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \ + || (defined(__BIONIC__) && (defined(__x86_64__) || defined(__i386__) || (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__))) \ || (defined(__FreeBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \ || (defined(__OpenBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \ ) -#define MI_HAS_TLS_SLOT +#define MI_HAS_TLS_SLOT 1 static inline void* mi_prim_tls_slot(size_t slot) mi_attr_noexcept { void* res; @@ -202,13 +207,58 @@ static inline void mi_prim_tls_slot_set(size_t slot, void* value) mi_attr_noexce #endif } +#elif _WIN32 && MI_WIN_USE_FIXED_TLS && !defined(MI_WIN_USE_FLS) + +// On windows we can store the thread-local heap at a fixed TLS slot to avoid +// thread-local initialization checks in the fast path. +// We allocate a user TLS slot at process initialization (see `windows/prim.c`) +// and store the offset `_mi_win_tls_offset`. +#define MI_HAS_TLS_SLOT 1 // 2 = we can reliably initialize the slot (saving a test on each malloc) + +extern mi_decl_hidden size_t _mi_win_tls_offset; + +#if MI_WIN_USE_FIXED_TLS > 1 +#define MI_TLS_SLOT (MI_WIN_USE_FIXED_TLS) +#elif MI_SIZE_SIZE == 4 +#define MI_TLS_SLOT (0x0E10 + _mi_win_tls_offset) // User TLS slots +#else +#define MI_TLS_SLOT (0x1480 + _mi_win_tls_offset) // User TLS slots +#endif + +static inline void* mi_prim_tls_slot(size_t slot) mi_attr_noexcept { + #if (_M_X64 || _M_AMD64) && !defined(_M_ARM64EC) + return (void*)__readgsqword((unsigned long)slot); // direct load at offset from gs + #elif _M_IX86 && !defined(_M_ARM64EC) + return (void*)__readfsdword((unsigned long)slot); // direct load at offset from fs + #else + return ((void**)NtCurrentTeb())[slot / sizeof(void*)]; + #endif +} +static inline void mi_prim_tls_slot_set(size_t slot, void* value) mi_attr_noexcept { + ((void**)NtCurrentTeb())[slot / sizeof(void*)] = value; +} + #endif + + +//------------------------------------------------------------------- +// Get a fast unique thread id. +// +// Getting the thread id should be performant as it is called in the +// fast path of `_mi_free` and we specialize for various platforms as +// inlined definitions. Regular code should call `init.c:_mi_thread_id()`. +// We only require _mi_prim_thread_id() to return a unique id +// for each thread (unequal to zero). +//------------------------------------------------------------------- + + // Do we have __builtin_thread_pointer? This would be the preferred way to get a unique thread id // but unfortunately, it seems we cannot test for this reliably at this time (see issue #883) // Nevertheless, it seems needed on older graviton platforms (see issue #851). // For now, we only enable this for specific platforms. #if !defined(__APPLE__) /* on apple (M1) the wrong register is read (tpidr_el0 instead of tpidrro_el0) so fall back to TLS slot assembly ()*/ \ + && !defined(__CYGWIN__) \ && !defined(MI_LIBC_MUSL) \ && (!defined(__clang_major__) || __clang_major__ >= 14) /* older clang versions emit bad code; fall back to using the TLS slot () */ #if (defined(__GNUC__) && (__GNUC__ >= 7) && defined(__aarch64__)) /* aarch64 for older gcc versions (issue #851) */ \ @@ -221,8 +271,8 @@ static inline void mi_prim_tls_slot_set(size_t slot, void* value) mi_attr_noexce // defined in `init.c`; do not use these directly -extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from -extern bool _mi_process_is_initialized; // has mi_process_init been called? +extern mi_decl_hidden mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from +extern mi_decl_hidden bool _mi_process_is_initialized; // has mi_process_init been called? static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept; @@ -235,10 +285,6 @@ static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { #elif defined(_WIN32) -#ifndef WIN32_LEAN_AND_MEAN -#define WIN32_LEAN_AND_MEAN -#endif -#include static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { // Windows: works on Intel and ARM in both 32- and 64-bit return (uintptr_t)NtCurrentTeb(); @@ -251,7 +297,7 @@ static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { return (uintptr_t)__builtin_thread_pointer(); } -#elif defined(MI_HAS_TLS_SLOT) +#elif MI_HAS_TLS_SLOT static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { #if defined(__BIONIC__) @@ -278,7 +324,8 @@ static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { /* ---------------------------------------------------------------------------------------- -The thread local default heap: `_mi_prim_get_default_heap()` +Get the thread local default heap: `_mi_prim_get_default_heap()` + This is inlined here as it is on the fast path for allocation functions. On most platforms (Windows, Linux, FreeBSD, NetBSD, etc), this just returns a @@ -315,19 +362,21 @@ static inline mi_heap_t* mi_prim_get_default_heap(void); #endif -#if defined(MI_TLS_SLOT) +#if MI_TLS_SLOT # if !defined(MI_HAS_TLS_SLOT) # error "trying to use a TLS slot for the default heap, but the mi_prim_tls_slot primitives are not defined" # endif static inline mi_heap_t* mi_prim_get_default_heap(void) { mi_heap_t* heap = (mi_heap_t*)mi_prim_tls_slot(MI_TLS_SLOT); + #if MI_HAS_TLS_SLOT == 1 // check if the TLS slot is initialized if mi_unlikely(heap == NULL) { #ifdef __GNUC__ __asm(""); // prevent conditional load of the address of _mi_heap_empty #endif heap = (mi_heap_t*)&_mi_heap_empty; } + #endif return heap; } @@ -351,7 +400,7 @@ static inline mi_heap_t* mi_prim_get_default_heap(void) { #elif defined(MI_TLS_PTHREAD) -extern pthread_key_t _mi_heap_default_key; +extern mi_decl_hidden pthread_key_t _mi_heap_default_key; static inline mi_heap_t* mi_prim_get_default_heap(void) { mi_heap_t* heap = (mi_unlikely(_mi_heap_default_key == (pthread_key_t)(-1)) ? _mi_heap_main_get() : (mi_heap_t*)pthread_getspecific(_mi_heap_default_key)); return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap); @@ -369,5 +418,4 @@ static inline mi_heap_t* mi_prim_get_default_heap(void) { #endif // mi_prim_get_default_heap() - #endif // MIMALLOC_PRIM_H diff --git a/system/lib/mimalloc/include/mimalloc/track.h b/system/lib/mimalloc/include/mimalloc/track.h index a659d94044670..4b5709e2b5411 100644 --- a/system/lib/mimalloc/include/mimalloc/track.h +++ b/system/lib/mimalloc/include/mimalloc/track.h @@ -34,7 +34,7 @@ The corresponding `mi_track_free` still uses the block start pointer and origina The `mi_track_resize` is currently unused but could be called on reallocations within a block. `mi_track_init` is called at program start. -The following macros are for tools like asan and valgrind to track whether memory is +The following macros are for tools like asan and valgrind to track whether memory is defined, undefined, or not accessible at all: #define mi_track_mem_defined(p,size) @@ -82,10 +82,6 @@ defined, undefined, or not accessible at all: #define MI_TRACK_HEAP_DESTROY 1 #define MI_TRACK_TOOL "ETW" -#ifndef WIN32_LEAN_AND_MEAN -#define WIN32_LEAN_AND_MEAN -#endif -#include #include "../src/prim/windows/etw.h" #define mi_track_init() EventRegistermicrosoft_windows_mimalloc(); @@ -96,7 +92,7 @@ defined, undefined, or not accessible at all: // no tracking #define MI_TRACK_ENABLED 0 -#define MI_TRACK_HEAP_DESTROY 0 +#define MI_TRACK_HEAP_DESTROY 0 #define MI_TRACK_TOOL "none" #define mi_track_malloc_size(p,reqsize,size,zero) diff --git a/system/lib/mimalloc/include/mimalloc/types.h b/system/lib/mimalloc/include/mimalloc/types.h index 2fdde904bbdb3..a15d9cba4658c 100644 --- a/system/lib/mimalloc/include/mimalloc/types.h +++ b/system/lib/mimalloc/include/mimalloc/types.h @@ -22,6 +22,7 @@ terms of the MIT license. A copy of the license can be found in the file // -------------------------------------------------------------------------- +#include #include // ptrdiff_t #include // uintptr_t, uint16_t, etc #include "atomic.h" // _Atomic @@ -66,13 +67,20 @@ terms of the MIT license. A copy of the license can be found in the file // #define MI_DEBUG 2 // + internal assertion checks // #define MI_DEBUG 3 // + extensive internal invariant checking (cmake -DMI_DEBUG_FULL=ON) #if !defined(MI_DEBUG) -#if !defined(NDEBUG) || defined(_DEBUG) -#define MI_DEBUG 2 -#else +#if defined(MI_BUILD_RELEASE) || defined(NDEBUG) #define MI_DEBUG 0 +#else +#define MI_DEBUG 2 #endif #endif +// Use guard pages behind objects of a certain size (set by the MIMALLOC_DEBUG_GUARDED_MIN/MAX options) +// Padding should be disabled when using guard pages +// #define MI_GUARDED 1 +#if defined(MI_GUARDED) +#define MI_PADDING 0 +#endif + // Reserve extra padding at the end of each block to be more resilient against heap block overflows. // The padding can detect buffer overflow on free. #if !defined(MI_PADDING) && (MI_SECURE>=3 || MI_DEBUG>=1 || (MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_TRACK_ETW)) @@ -190,14 +198,16 @@ typedef int32_t mi_ssize_t; #define MI_SMALL_PAGE_SIZE (MI_ZU(1)<= 655360) #error "mimalloc internal: define more bins" @@ -237,13 +247,20 @@ typedef struct mi_block_s { mi_encoded_t next; } mi_block_t; +#if MI_GUARDED +// we always align guarded pointers in a block at an offset +// the block `next` field is then used as a tag to distinguish regular offset aligned blocks from guarded ones +#define MI_BLOCK_TAG_ALIGNED ((mi_encoded_t)(0)) +#define MI_BLOCK_TAG_GUARDED (~MI_BLOCK_TAG_ALIGNED) +#endif + // The delayed flags are used for efficient multi-threaded free-ing typedef enum mi_delayed_e { MI_USE_DELAYED_FREE = 0, // push on the owning heap thread delayed list MI_DELAYED_FREEING = 1, // temporary: another thread is accessing the owning heap MI_NO_DELAYED_FREE = 2, // optimize: push on page local thread free queue if another block is already in the heap thread delayed free list - MI_NEVER_DELAYED_FREE = 3 // sticky: used for abondoned pages without a owning heap; this only resets on page reclaim + MI_NEVER_DELAYED_FREE = 3 // sticky: used for abandoned pages without a owning heap; this only resets on page reclaim } mi_delayed_t; @@ -260,7 +277,7 @@ typedef union mi_page_flags_s { #else // under thread sanitizer, use a byte for each flag to suppress warning, issue #130 typedef union mi_page_flags_s { - uint16_t full_aligned; + uint32_t full_aligned; struct { uint8_t in_full; uint8_t has_aligned; @@ -319,7 +336,7 @@ typedef struct mi_page_s { mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`) uint16_t used; // number of blocks in use (including blocks in `thread_free`) uint8_t block_size_shift; // if not zero, then `(1 << block_size_shift) == block_size` (only used for fast path in `free.c:_mi_page_ptr_unalign`) - uint8_t heap_tag; // tag of the owning heap, used for separated heaps by object type + uint8_t heap_tag; // tag of the owning heap, used to separate heaps by object type // padding size_t block_size; // size available in each block (always `>0`) uint8_t* page_start; // start of the page area containing the blocks @@ -408,7 +425,7 @@ static inline bool mi_memkind_is_os(mi_memkind_t memkind) { typedef struct mi_memid_os_info { void* base; // actual base address of the block (used for offset aligned allocations) - size_t alignment; // alignment at allocation + size_t size; // full allocation size } mi_memid_os_info_t; typedef struct mi_memid_arena_info { @@ -430,7 +447,7 @@ typedef struct mi_memid_s { // ----------------------------------------------------------------------------------------- -// Segments are large allocated memory blocks (8mb on 64 bit) from arenas or the OS. +// Segments are large allocated memory blocks (32mb on 64 bit) from arenas or the OS. // // Inside segments we allocated fixed size mimalloc pages (`mi_page_t`) that contain blocks. // The start of a segment is this structure with a fixed number of slice entries (`slices`) @@ -442,12 +459,16 @@ typedef struct mi_memid_s { // For slices, the `block_size` field is repurposed to signify if a slice is used (`1`) or not (`0`). // Small and medium pages use a fixed amount of slices to reduce slice fragmentation, while // large and huge pages span a variable amount of slices. + +typedef struct mi_subproc_s mi_subproc_t; + typedef struct mi_segment_s { // constant fields mi_memid_t memid; // memory id for arena/OS allocation bool allow_decommit; // can we decommmit the memory bool allow_purge; // can we purge the memory (reset or decommit) size_t segment_size; + mi_subproc_t* subproc; // segment belongs to sub process // segment fields mi_msecs_t purge_expire; // purge slices in the `purge_mask` after this time @@ -457,12 +478,16 @@ typedef struct mi_segment_s { // from here is zero initialized struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`) bool was_reclaimed; // true if it was reclaimed (used to limit on-free reclamation) + bool dont_free; // can be temporarily true to ensure the segment is not freed size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`) size_t abandoned_visits; // count how often this segment is visited during abondoned reclamation (to force reclaim if it takes too long) size_t used; // count of pages in use uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie` + struct mi_segment_s* abandoned_os_next; // only used for abandoned segments outside arena's, and only if `mi_option_visit_abandoned` is enabled + struct mi_segment_s* abandoned_os_prev; + size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT` size_t segment_info_slices; // initial count of slices that we are using for segment info and possible guard pages. @@ -537,14 +562,75 @@ struct mi_heap_s { size_t page_count; // total number of pages in the `pages` queues. size_t page_retired_min; // smallest retired index (retired pages are fully free, but still in the page queues) size_t page_retired_max; // largest retired index into the `pages` array. + long generic_count; // how often is `_mi_malloc_generic` called? + long generic_collect_count; // how often is `_mi_malloc_generic` called without collecting? mi_heap_t* next; // list of heaps per thread bool no_reclaim; // `true` if this heap should not reclaim abandoned pages uint8_t tag; // custom tag, can be used for separating heaps based on the object types + #if MI_GUARDED + size_t guarded_size_min; // minimal size for guarded objects + size_t guarded_size_max; // maximal size for guarded objects + size_t guarded_sample_rate; // sample rate (set to 0 to disable guarded pages) + size_t guarded_sample_count; // current sample count (counting down to 0) + #endif mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size. mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin") }; +// ------------------------------------------------------ +// Sub processes do not reclaim or visit segments +// from other sub processes. These are essentially the +// static variables of a process. +// ------------------------------------------------------ + +struct mi_subproc_s { + _Atomic(size_t) abandoned_count; // count of abandoned segments for this sub-process + _Atomic(size_t) abandoned_os_list_count; // count of abandoned segments in the os-list + mi_lock_t abandoned_os_lock; // lock for the abandoned os segment list (outside of arena's) (this lock protect list operations) + mi_lock_t abandoned_os_visit_lock; // ensure only one thread per subproc visits the abandoned os list + mi_segment_t* abandoned_os_list; // doubly-linked list of abandoned segments outside of arena's (in OS allocated memory) + mi_segment_t* abandoned_os_list_tail; // the tail-end of the list + mi_memid_t memid; // provenance of this memory block +}; + + +// ------------------------------------------------------ +// Thread Local data +// ------------------------------------------------------ + +// A "span" is is an available range of slices. The span queues keep +// track of slice spans of at most the given `slice_count` (but more than the previous size class). +typedef struct mi_span_queue_s { + mi_slice_t* first; + mi_slice_t* last; + size_t slice_count; +} mi_span_queue_t; + +#define MI_SEGMENT_BIN_MAX (35) // 35 == mi_segment_bin(MI_SLICES_PER_SEGMENT) + +// Segments thread local data +typedef struct mi_segments_tld_s { + mi_span_queue_t spans[MI_SEGMENT_BIN_MAX+1]; // free slice spans inside segments + size_t count; // current number of segments; + size_t peak_count; // peak number of segments + size_t current_size; // current size of all segments + size_t peak_size; // peak size of all segments + size_t reclaim_count;// number of reclaimed (abandoned) segments + mi_subproc_t* subproc; // sub-process this thread belongs to. + mi_stats_t* stats; // points to tld stats +} mi_segments_tld_t; + +// Thread local data +struct mi_tld_s { + unsigned long long heartbeat; // monotonic heartbeat count + bool recurse; // true if deferred was called; used to prevent infinite recursion. + mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted) + mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates) + mi_segments_tld_t segments; // segment tld + mi_stats_t stats; // statistics +}; + // ------------------------------------------------------ // Debug @@ -560,30 +646,10 @@ struct mi_heap_s { #define MI_DEBUG_PADDING (0xDE) #endif -#if (MI_DEBUG) -// use our own assertion to print without memory allocation -void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line, const char* func ); -#define mi_assert(expr) ((expr) ? (void)0 : _mi_assert_fail(#expr,__FILE__,__LINE__,__func__)) -#else -#define mi_assert(x) -#endif - -#if (MI_DEBUG>1) -#define mi_assert_internal mi_assert -#else -#define mi_assert_internal(x) -#endif - -#if (MI_DEBUG>2) -#define mi_assert_expensive mi_assert -#else -#define mi_assert_expensive(x) -#endif // ------------------------------------------------------ // Statistics // ------------------------------------------------------ - #ifndef MI_STAT #if (MI_DEBUG>0) #define MI_STAT 2 @@ -592,114 +658,28 @@ void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line #endif #endif -typedef struct mi_stat_count_s { - int64_t allocated; - int64_t freed; - int64_t peak; - int64_t current; -} mi_stat_count_t; - -typedef struct mi_stat_counter_s { - int64_t total; - int64_t count; -} mi_stat_counter_t; - -typedef struct mi_stats_s { - mi_stat_count_t segments; - mi_stat_count_t pages; - mi_stat_count_t reserved; - mi_stat_count_t committed; - mi_stat_count_t reset; - mi_stat_count_t purged; - mi_stat_count_t page_committed; - mi_stat_count_t segments_abandoned; - mi_stat_count_t pages_abandoned; - mi_stat_count_t threads; - mi_stat_count_t normal; - mi_stat_count_t huge; - mi_stat_count_t large; - mi_stat_count_t malloc; - mi_stat_count_t segments_cache; - mi_stat_counter_t pages_extended; - mi_stat_counter_t mmap_calls; - mi_stat_counter_t commit_calls; - mi_stat_counter_t reset_calls; - mi_stat_counter_t purge_calls; - mi_stat_counter_t page_no_retire; - mi_stat_counter_t searches; - mi_stat_counter_t normal_count; - mi_stat_counter_t huge_count; - mi_stat_counter_t large_count; - mi_stat_counter_t arena_count; - mi_stat_counter_t arena_crossover_count; - mi_stat_counter_t arena_rollback_count; -#if MI_STAT>1 - mi_stat_count_t normal_bins[MI_BIN_HUGE+1]; -#endif -} mi_stats_t; - - +// add to stat keeping track of the peak void _mi_stat_increase(mi_stat_count_t* stat, size_t amount); void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount); +void _mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount); +// counters can just be increased void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount); #if (MI_STAT) #define mi_stat_increase(stat,amount) _mi_stat_increase( &(stat), amount) #define mi_stat_decrease(stat,amount) _mi_stat_decrease( &(stat), amount) +#define mi_stat_adjust_decrease(stat,amount) _mi_stat_adjust_decrease( &(stat), amount) #define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount) #else -#define mi_stat_increase(stat,amount) (void)0 -#define mi_stat_decrease(stat,amount) (void)0 -#define mi_stat_counter_increase(stat,amount) (void)0 +#define mi_stat_increase(stat,amount) ((void)0) +#define mi_stat_decrease(stat,amount) ((void)0) +#define mi_stat_adjust_decrease(stat,amount) ((void)0) +#define mi_stat_counter_increase(stat,amount) ((void)0) #endif #define mi_heap_stat_counter_increase(heap,stat,amount) mi_stat_counter_increase( (heap)->tld->stats.stat, amount) #define mi_heap_stat_increase(heap,stat,amount) mi_stat_increase( (heap)->tld->stats.stat, amount) #define mi_heap_stat_decrease(heap,stat,amount) mi_stat_decrease( (heap)->tld->stats.stat, amount) - - -// ------------------------------------------------------ -// Thread Local data -// ------------------------------------------------------ - -// A "span" is is an available range of slices. The span queues keep -// track of slice spans of at most the given `slice_count` (but more than the previous size class). -typedef struct mi_span_queue_s { - mi_slice_t* first; - mi_slice_t* last; - size_t slice_count; -} mi_span_queue_t; - -#define MI_SEGMENT_BIN_MAX (35) // 35 == mi_segment_bin(MI_SLICES_PER_SEGMENT) - -// OS thread local data -typedef struct mi_os_tld_s { - size_t region_idx; // start point for next allocation - mi_stats_t* stats; // points to tld stats -} mi_os_tld_t; - - -// Segments thread local data -typedef struct mi_segments_tld_s { - mi_span_queue_t spans[MI_SEGMENT_BIN_MAX+1]; // free slice spans inside segments - size_t count; // current number of segments; - size_t peak_count; // peak number of segments - size_t current_size; // current size of all segments - size_t peak_size; // peak size of all segments - size_t reclaim_count;// number of reclaimed (abandoned) segments - mi_stats_t* stats; // points to tld stats - mi_os_tld_t* os; // points to os stats -} mi_segments_tld_t; - -// Thread local data -struct mi_tld_s { - unsigned long long heartbeat; // monotonic heartbeat count - bool recurse; // true if deferred was called; used to prevent infinite recursion. - mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted) - mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates) - mi_segments_tld_t segments; // segment tld - mi_os_tld_t os; // os tld - mi_stats_t stats; // statistics -}; +#define mi_heap_stat_adjust_decrease(heap,stat,amount) mi_stat_adjust_decrease( (heap)->tld->stats.stat, amount) #endif diff --git a/system/lib/mimalloc/readme.md b/system/lib/mimalloc/readme.md index a0296b43c35aa..71aaf7a24e839 100644 --- a/system/lib/mimalloc/readme.md +++ b/system/lib/mimalloc/readme.md @@ -12,17 +12,19 @@ is a general purpose allocator with excellent [performance](#performance) charac Initially developed by Daan Leijen for the runtime systems of the [Koka](https://koka-lang.github.io) and [Lean](https://github.com/leanprover/lean) languages. -Latest release tag: `v2.1.7` (2024-05-21). -Latest v1 tag: `v1.8.7` (2024-05-21). +Latest release : `v3.1.4` (beta) (2025-06-09). +Latest v2 release: `v2.2.4` (2025-06-09). +Latest v1 release: `v1.9.4` (2024-06-09). mimalloc is a drop-in replacement for `malloc` and can be used in other programs without code changes, for example, on dynamically linked ELF-based systems (Linux, BSD, etc.) you can use it as: ``` > LD_PRELOAD=/usr/lib/libmimalloc.so myprogram ``` -It also includes a robust way to override the default allocator in [Windows](#override_on_windows). Notable aspects of the design include: +It also includes a way to dynamically override the default allocator in [Windows](#override_on_windows). +Notable aspects of the design include: -- __small and consistent__: the library is about 8k LOC using simple and +- __small and consistent__: the library is about 10k LOC using simple and consistent data structures. This makes it very suitable to integrate and adapt in other projects. For runtime systems it provides hooks for a monotonic _heartbeat_ and deferred freeing (for @@ -70,35 +72,52 @@ Enjoy! ### Branches -* `master`: latest stable release (based on `dev-slice`). -* `dev`: development branch for mimalloc v1. Use this branch for submitting PR's. -* `dev-slice`: development branch for mimalloc v2. This branch is downstream of `dev` (and is essentially equal to `dev` except for -`src/segment.c`) +* `main`: latest stable release (still based on `dev2`). +* `dev`: development branch for mimalloc v1. Use this branch for submitting PR's. +* `dev2`: development branch for mimalloc v2. This branch is downstream of `dev` + (and is essentially equal to `dev` except for `src/segment.c`). Uses larger sliced segments to manage + mimalloc pages that can reduce fragmentation. +* `dev3`: development branch for mimalloc v3 beta. This branch is downstream of `dev`. This version + simplifies the lock-free ownership of previous versions, and improves sharing of memory between + threads. On certain large workloads this version may use (much) less memory. ### Releases -Note: the `v2.x` version has a different algorithm for managing internal mimalloc pages (as slices) that tends to use reduce -memory usage - and fragmentation compared to mimalloc `v1.x` (especially for large workloads). Should otherwise have similar performance - (see [below](#performance)); please report if you observe any significant performance regression. - +* 2025-06-09, `v1.9.4`, `v2.2.4`, `v3.1.4` (beta) : Some important bug fixes, including a case where OS memory + was not always fully released. Improved v3 performance, build on XBox, fix build on Android, support interpose + for older macOS versions, use MADV_FREE_REUSABLE on macOS, always check commit success, better support for Windows + fixed TLS offset, etc. +* 2025-03-28, `v1.9.3`, `v2.2.3`, `v3.0.3` (beta) : Various small bug and build fixes, including: + fix arm32 pre v7 builds, fix mingw build, get runtime statistics, improve statistic commit counts, + fix execution on non BMI1 x64 systems. +* 2025-03-06, `v1.9.2`, `v2.2.2`, `v3.0.2-beta`: Various small bug and build fixes. + Add `mi_options_print`, `mi_arenas_print`, and the experimental `mi_stat_get` and `mi_stat_get_json`. + Add `mi_thread_set_in_threadpool` and `mi_heap_set_numa_affinity` (v3 only). Add vcpkg portfile. + Upgrade mimalloc-redirect to v1.3.2. `MI_OPT_ARCH` is off by default now but still assumes armv8.1-a on arm64 + for fast atomic operations. Add QNX support. +* 2025-01-03, `v1.8.9`, `v2.1.9`, `v3.0.1-alpha`: Interim release. Support Windows arm64. New [guarded](#guarded) build that can place OS + guard pages behind objects to catch buffer overflows as they occur. + Many small fixes: build on Windows arm64, cygwin, riscV, and dragonfly; fix Windows static library initialization to account for + thread local destructors (in Rust/C++); macOS tag change; macOS TLS slot fix; improve stats; + consistent `mimalloc.dll` on Windows (instead of `mimalloc-override.dll`); fix mimalloc-redirect on Win11 H2; + add 0-byte to canary; upstream CPython fixes; reduce .bss size; allow fixed TLS slot on Windows for improved performance. * 2024-05-21, `v1.8.7`, `v2.1.7`: Fix build issues on less common platforms. Started upstreaming patches from the CPython [integration](https://github.com/python/cpython/issues/113141#issuecomment-2119255217). Upstream `vcpkg` patches. * 2024-05-13, `v1.8.6`, `v2.1.6`: Fix build errors on various (older) platforms. Refactored aligned allocation. * 2024-04-22, `v1.8.4`, `v2.1.4`: Fixes various bugs and build issues. Add `MI_LIBC_MUSL` cmake flag for musl builds. Free-ing code is refactored into a separate module (`free.c`). Mimalloc page info is simplified with the block size - directly available (and new `block_size_shift` to improve aligned block free-ing). + directly available (and new `block_size_shift` to improve aligned block free-ing). New approach to collection of abandoned segments: When a thread terminates the segments it owns are abandoned (containing still live objects) and these can be - reclaimed by other threads. We no longer use a list of abandoned segments but this is now done using bitmaps in arena's + reclaimed by other threads. We no longer use a list of abandoned segments but this is now done using bitmaps in arena's which is more concurrent (and more aggressive). Abandoned memory can now also be reclaimed if a thread frees an object in an abandoned page (which can be disabled using `mi_option_abandoned_reclaim_on_free`). The option `mi_option_max_segment_reclaim` gives a maximum percentage of abandoned segments that can be reclaimed per try (=10%). -* 2023-04-24, `v1.8.2`, `v2.1.2`: Fixes build issues on freeBSD, musl, and C17 (UE 5.1.1). Reduce code size/complexity +* 2023-04-24, `v1.8.2`, `v2.1.2`: Fixes build issues on freeBSD, musl, and C17 (UE 5.1.1). Reduce code size/complexity by removing regions and segment-cache's and only use arenas with improved memory purging -- this may improve memory usage as well for larger services. Renamed options for consistency. Improved Valgrind and ASAN checking. - + * 2023-04-03, `v1.8.1`, `v2.1.1`: Fixes build issues on some platforms. * 2023-03-29, `v1.8.0`, `v2.1.0`: Improved support dynamic overriding on Windows 11. Improved tracing precision @@ -106,14 +125,14 @@ memory usage abstraction layer to make it easier to port and separate platform dependent code (in `src/prim`). Fixed C++ STL compilation on older Microsoft C++ compilers, and various small bug fixes. * 2022-12-23, `v1.7.9`, `v2.0.9`: Supports building with [asan](#asan) and improved [Valgrind](#valgrind) support. - Support arbitrary large alignments (in particular for `std::pmr` pools). - Added C++ STL allocators attached to a specific heap (thanks @vmarkovtsev). - Heap walks now visit all object (including huge objects). Support Windows nano server containers (by Johannes Schindelin,@dscho). + Support arbitrary large alignments (in particular for `std::pmr` pools). + Added C++ STL allocators attached to a specific heap (thanks @vmarkovtsev). + Heap walks now visit all object (including huge objects). Support Windows nano server containers (by Johannes Schindelin,@dscho). Various small bug fixes. * 2022-11-03, `v1.7.7`, `v2.0.7`: Initial support for [Valgrind](#valgrind) for leak testing and heap block overflow detection. Initial - support for attaching heaps to a speficic memory area (only in v2). Fix `realloc` behavior for zero size blocks, remove restriction to integral multiple of the alignment in `alloc_align`, improved aligned allocation performance, reduced contention with many threads on few processors (thank you @dposluns!), vs2022 support, support `pkg-config`, . + support for attaching heaps to a specific memory area (only in v2). Fix `realloc` behavior for zero size blocks, remove restriction to integral multiple of the alignment in `alloc_align`, improved aligned allocation performance, reduced contention with many threads on few processors (thank you @dposluns!), vs2022 support, support `pkg-config`, . * 2022-04-14, `v1.7.6`, `v2.0.6`: fix fallback path for aligned OS allocation on Windows, improve Windows aligned allocation even when compiling with older SDK's, fix dynamic overriding on macOS Monterey, fix MSVC C++ dynamic overriding, fix @@ -160,13 +179,13 @@ mimalloc is used in various large scale low-latency services and programs, for e ## Windows Open `ide/vs2022/mimalloc.sln` in Visual Studio 2022 and build. -The `mimalloc` project builds a static library (in `out/msvc-x64`), while the -`mimalloc-override` project builds a DLL for overriding malloc +The `mimalloc-lib` project builds a static library (in `out/msvc-x64`), while the +`mimalloc-override-dll` project builds a DLL for overriding malloc in the entire program. -## macOS, Linux, BSD, etc. +## Linux, macOS, BSD, etc. -We use [`cmake`](https://cmake.org)1 as the build system: +We use [`cmake`](https://cmake.org) as the build system: ``` > mkdir -p out/release @@ -189,32 +208,58 @@ maintains detailed statistics as: > cmake -DCMAKE_BUILD_TYPE=Debug ../.. > make ``` + This will name the shared library as `libmimalloc-debug.so`. -Finally, you can build a _secure_ version that uses guard pages, encrypted -free lists, etc., as: +Finally, you can build a _secure_ version that uses guard pages, encrypted free lists, etc., as: + ``` > mkdir -p out/secure > cd out/secure > cmake -DMI_SECURE=ON ../.. > make ``` + This will name the shared library as `libmimalloc-secure.so`. -Use `ccmake`2 instead of `cmake` -to see and customize all the available build options. +Use `cmake ../.. -LH` to see all the available build options. + +The examples use the default compiler. If you like to use another, use: + +``` +> CC=clang CXX=clang++ cmake ../.. +``` + +## Cmake with Visual Studio + +You can also use cmake on Windows. Open a Visual Studio 2022 development prompt +and invoke `cmake` with the right [generator](https://cmake.org/cmake/help/latest/generator/Visual%20Studio%2017%202022.html) +and architecture, like: + +``` +> cmake ..\.. -G "Visual Studio 17 2022" -A x64 -DMI_OVERRIDE=ON +``` + +The cmake build type is specified when actually building, for example: + +``` +> cmake --build . --config=Release +``` + +You can also install the [LLVM toolset](https://learn.microsoft.com/en-us/cpp/build/clang-support-msbuild?view=msvc-170#install-1) +on Windows to build with the `clang-cl` compiler directly: -Notes: -1. Install CMake: `sudo apt-get install cmake` -2. Install CCMake: `sudo apt-get install cmake-curses-gui` +``` +> cmake ../.. -G "Visual Studio 17 2022" -T ClangCl +``` -## Single source +## Single Source You can also directly build the single `src/static.c` file as part of your project without needing `cmake` at all. Make sure to also add the mimalloc `include` directory to the include path. -# Using the library +# Using the Library The preferred usage is including ``, linking with the shared- or static library, and using the `mi_malloc` API exclusively for allocation. For example, @@ -226,7 +271,7 @@ mimalloc uses only safe OS calls (`mmap` and `VirtualAlloc`) and can co-exist with other allocators linked to the same program. If you use `cmake`, you can simply use: ``` -find_package(mimalloc 1.4 REQUIRED) +find_package(mimalloc 1.8 REQUIRED) ``` in your `CMakeLists.txt` to find a locally installed mimalloc. Then use either: ``` @@ -240,7 +285,7 @@ to link with the static library. See `test\CMakeLists.txt` for an example. For best performance in C++ programs, it is also recommended to override the global `new` and `delete` operators. For convenience, mimalloc provides -[`mimalloc-new-delete.h`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc-new-delete.h) which does this for you -- just include it in a single(!) source file in your project. +[`mimalloc-new-delete.h`](include/mimalloc-new-delete.h) which does this for you -- just include it in a single(!) source file in your project. In C++, mimalloc also provides the `mi_stl_allocator` struct which implements the `std::allocator` interface. @@ -295,14 +340,14 @@ You can set further options either programmatically (using [`mi_option_set`](htt Advanced options: -- `MIMALLOC_ARENA_EAGER_COMMIT=2`: turns on eager commit for the large arenas (usually 1GiB) from which mimalloc - allocates segments and pages. Set this to 2 (default) to - only enable this on overcommit systems (e.g. Linux). Set this to 1 to enable explicitly on other systems - as well (like Windows or macOS) which may improve performance (as the whole arena is committed at once). - Note that eager commit only increases the commit but not the actual the peak resident set +- `MIMALLOC_ARENA_EAGER_COMMIT=2`: turns on eager commit for the large arenas (usually 1GiB) from which mimalloc + allocates segments and pages. Set this to 2 (default) to + only enable this on overcommit systems (e.g. Linux). Set this to 1 to enable explicitly on other systems + as well (like Windows or macOS) which may improve performance (as the whole arena is committed at once). + Note that eager commit only increases the commit but not the actual the peak resident set (rss) so it is generally ok to enable this. -- `MIMALLOC_PURGE_DELAY=N`: the delay in `N` milli-seconds (by default `10`) after which mimalloc will purge - OS pages that are not in use. This signals to the OS that the underlying physical memory can be reused which +- `MIMALLOC_PURGE_DELAY=N`: the delay in `N` milli-seconds (by default `10`) after which mimalloc will purge + OS pages that are not in use. This signals to the OS that the underlying physical memory can be reused which can reduce memory fragmentation especially in long running (server) programs. Setting `N` to `0` purges immediately when a page becomes unused which can improve memory usage but also decreases performance. Setting `N` to a higher value like `100` can improve performance (sometimes by a lot) at the cost of potentially using more memory at times. @@ -310,7 +355,7 @@ Advanced options: - `MIMALLOC_PURGE_DECOMMITS=1`: By default "purging" memory means unused memory is decommitted (`MEM_DECOMMIT` on Windows, `MADV_DONTNEED` (which decresease rss immediately) on `mmap` systems). Set this to 0 to instead "reset" unused memory on a purge (`MEM_RESET` on Windows, generally `MADV_FREE` (which does not decrease rss immediately) on `mmap` systems). - Mimalloc generally does not "free" OS memory but only "purges" OS memory, in other words, it tries to keep virtual + Mimalloc generally does not "free" OS memory but only "purges" OS memory, in other words, it tries to keep virtual address ranges and decommits within those ranges (to make the underlying physical memory available to other processes). Further options for large workloads and services: @@ -319,15 +364,16 @@ Further options for large workloads and services: at runtime. Setting `N` to 1 may avoid problems in some virtual environments. Also, setting it to a lower number than the actual NUMA nodes is fine and will only cause threads to potentially allocate more memory across actual NUMA nodes (but this can happen in any case as NUMA local allocation is always a best effort but not guaranteed). -- `MIMALLOC_ALLOW_LARGE_OS_PAGES=1`: use large OS pages (2 or 4MiB) when available; for some workloads this can significantly - improve performance. When this option is disabled, it also disables transparent huge pages (THP) for the process - (on Linux and Android). Use `MIMALLOC_VERBOSE` to check if the large OS pages are enabled -- usually one needs +- `MIMALLOC_ALLOW_LARGE_OS_PAGES=0`: Set to 1 to use large OS pages (2 or 4MiB) when available; for some workloads this can significantly + improve performance. When this option is disabled (default), it also disables transparent huge pages (THP) for the process + (on Linux and Android). On Linux the default setting is 2 -- this enables the use of large pages through THP only. + Use `MIMALLOC_VERBOSE` to check if the large OS pages are enabled -- usually one needs to explicitly give permissions for large OS pages (as on [Windows][windows-huge] and [Linux][linux-huge]). However, sometimes the OS is very slow to reserve contiguous physical memory for large OS pages so use with care on systems that - can have fragmented memory (for that reason, we generally recommend to use `MIMALLOC_RESERVE_HUGE_OS_PAGES` instead whenever possible). + can have fragmented memory (for that reason, we generally recommend to use `MIMALLOC_RESERVE_HUGE_OS_PAGES` instead whenever possible). - `MIMALLOC_RESERVE_HUGE_OS_PAGES=N`: where `N` is the number of 1GiB _huge_ OS pages. This reserves the huge pages at startup and sometimes this can give a large (latency) performance improvement on big workloads. - Usually it is better to not use `MIMALLOC_ALLOW_LARGE_OS_PAGES=1` in combination with this setting. Just like large + Usually it is better to not use `MIMALLOC_ALLOW_LARGE_OS_PAGES=1` in combination with this setting. Just like large OS pages, use with care as reserving contiguous physical memory can take a long time when memory is fragmented (but reserving the huge pages is done at startup only once). @@ -367,13 +413,39 @@ As always, evaluate with care as part of an overall security strategy as all of ## Debug Mode -When _mimalloc_ is built using debug mode, various checks are done at runtime to catch development errors. +When _mimalloc_ is built using debug mode, (`-DCMAKE_BUILD_TYPE=Debug`), +various checks are done at runtime to catch development errors. - Statistics are maintained in detail for each object size. They can be shown using `MIMALLOC_SHOW_STATS=1` at runtime. - All objects have padding at the end to detect (byte precise) heap block overflows. - Double free's, and freeing invalid heap pointers are detected. - Corrupted free-lists and some forms of use-after-free are detected. +## Guarded Mode + +_mimalloc_ can be build in guarded mode using the `-DMI_GUARDED=ON` flags in `cmake`. +This enables placing OS guard pages behind certain object allocations to catch buffer overflows as they occur. +This can be invaluable to catch buffer-overflow bugs in large programs. However, it also means that any object +allocated with a guard page takes at least 8 KiB memory for the guard page and its alignment. As such, allocating +a guard page for every allocation may be too expensive both in terms of memory, and in terms of performance with +many system calls. Therefore, there are various environment variables (and options) to tune this: + +- `MIMALLOC_GUARDED_SAMPLE_RATE=N`: Set the sample rate to `N` (by default 4000). This mode places a guard page + behind every `N` suitable object allocations (per thread). Since the performance in guarded mode without placing + guard pages is close to release mode, this can be used to enable guard pages even in production to catch latent + buffer overflow bugs. Set the sample rate to `1` to guard every object, and to `0` to place no guard pages at all. + +- `MIMALLOC_GUARDED_SAMPLE_SEED=N`: Start sampling at `N` (by default random). Can be used to reproduce a buffer + overflow if needed. + +- `MIMALLOC_GUARDED_MIN=N`, `MIMALLOC_GUARDED_MAX=N`: Minimal and maximal _rounded_ object sizes for which a guard + page is considered (`0` and `1GiB` respectively). If you suspect a buffer overflow occurs with an object of size + 141, set the minimum and maximum to `148` and the sample rate to `1` to have all of those guarded. + +- `MIMALLOC_GUARDED_PRECISE=1`: If we have an object of size 13, we would usually place it an aligned 16 bytes in + front of the guard page. Using `MIMALLOC_GUARDED_PRECISE` places it exactly 13 bytes before a page so that even + a 1 byte overflow is detected. This violates the C/C++ minimal alignment guarantees though so use with care. + # Overriding Standard Malloc @@ -417,41 +489,44 @@ the [shell](https://stackoverflow.com/questions/43941322/dyld-insert-libraries-i ### Dynamic Override on Windows -Dynamically overriding on mimalloc on Windows -is robust and has the particular advantage to be able to redirect all malloc/free calls that go through -the (dynamic) C runtime allocator, including those from other DLL's or libraries. -As it intercepts all allocation calls on a low level, it can be used reliably -on large programs that include other 3rd party components. -There are four requirements to make the overriding work robustly: +We use a separate redirection DLL to override mimalloc on Windows +such that we redirect all malloc/free calls that go through the (dynamic) C runtime allocator, +including those from other DLL's or libraries. As it intercepts all allocation calls on a low level, +it can be used on large programs that include other 3rd party components. +There are four requirements to make the overriding work well: 1. Use the C-runtime library as a DLL (using the `/MD` or `/MDd` switch). -2. Link your program explicitly with `mimalloc-override.dll` library. - To ensure the `mimalloc-override.dll` is loaded at run-time it is easiest to insert some - call to the mimalloc API in the `main` function, like `mi_version()` - (or use the `/INCLUDE:mi_version` switch on the linker). See the `mimalloc-override-test` project - for an example on how to use this. -3. The [`mimalloc-redirect.dll`](bin) (or `mimalloc-redirect32.dll`) must be put - in the same folder as the main `mimalloc-override.dll` at runtime (as it is a dependency of that DLL). - The redirection DLL ensures that all calls to the C runtime malloc API get redirected to - mimalloc functions (which reside in `mimalloc-override.dll`). -4. Ensure the `mimalloc-override.dll` comes as early as possible in the import + +2. Link your program explicitly with the `mimalloc.dll.lib` export library for the `mimalloc.dll`. + (which must be compiled with `-DMI_OVERRIDE=ON`, which is the default though). + To ensure the `mimalloc.dll` is actually loaded at run-time it is easiest + to insert some call to the mimalloc API in the `main` function, like `mi_version()` + (or use the `/include:mi_version` switch on the linker command, or + similarly, `#pragma comment(linker, "/include:mi_version")` in some source file). + See the `mimalloc-test-override` project for an example on how to use this. + +3. The `mimalloc-redirect.dll` must be put in the same directory as the main + `mimalloc.dll` at runtime (as it is a dependency of that DLL). + The redirection DLL ensures that all calls to the C runtime malloc API get + redirected to mimalloc functions (which reside in `mimalloc.dll`). + +4. Ensure the `mimalloc.dll` comes as early as possible in the import list of the final executable (so it can intercept all potential allocations). + You can use `minject -l ` to check this if needed. -For best performance on Windows with C++, it -is also recommended to also override the `new`/`delete` operations (by including -[`mimalloc-new-delete.h`](include/mimalloc-new-delete.h) +For best performance on Windows with C++, it is also recommended to also override +the `new`/`delete` operations (by including [`mimalloc-new-delete.h`](include/mimalloc-new-delete.h) a single(!) source file in your project). The environment variable `MIMALLOC_DISABLE_REDIRECT=1` can be used to disable dynamic -overriding at run-time. Use `MIMALLOC_VERBOSE=1` to check if mimalloc was successfully redirected. +overriding at run-time. Use `MIMALLOC_VERBOSE=1` to check if mimalloc was successfully +redirected. + +For different platforms than x64, you may need a specific [redirection dll](bin). +Furthermore, we cannot always re-link an executable or ensure `mimalloc.dll` comes +first in the import table. In such cases the [`minject`](bin) tool can be used +to patch the executable's import tables. -We cannot always re-link an executable with `mimalloc-override.dll`, and similarly, we cannot always -ensure the the DLL comes first in the import table of the final executable. -In many cases though we can patch existing executables without any recompilation -if they are linked with the dynamic C runtime (`ucrtbase.dll`) -- just put the `mimalloc-override.dll` -into the import table (and put `mimalloc-redirect.dll` in the same folder) -Such patching can be done for example with [CFF Explorer](https://ntcore.com/?page_id=388) or -the [`minject`](bin) program. ## Static override @@ -462,6 +537,7 @@ an object file instead of a library file as linkers give preference to that over archives to resolve symbols. To ensure that the standard malloc interface resolves to the _mimalloc_ library, link it as the first object file. For example: + ``` > gcc -o myprogram mimalloc.o myfile1.c ... ``` @@ -469,16 +545,17 @@ object file. For example: Another way to override statically that works on all platforms, is to link statically to mimalloc (as shown in the introduction) and include a header file in each source file that re-defines `malloc` etc. to `mi_malloc`. -This is provided by [`mimalloc-override.h`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc-override.h). This only works reliably though if all sources are +This is provided by [`mimalloc-override.h`](include/mimalloc-override.h). This only works +reliably though if all sources are under your control or otherwise mixing of pointers from different heaps may occur! # Tools Generally, we recommend using the standard allocator with memory tracking tools, but mimalloc -can also be build to support the [address sanitizer][asan] or the excellent [Valgrind] tool. +can also be build to support the [address sanitizer][asan] or the excellent [Valgrind] tool. Moreover, it can be build to support Windows event tracing ([ETW]). -This has a small performance overhead but does allow detecting memory leaks and byte-precise +This has a small performance overhead but does allow detecting memory leaks and byte-precise buffer overflows directly on final executables. See also the `test/test-wrong.c` file to test with various tools. ## Valgrind @@ -505,9 +582,13 @@ you also need to tell `valgrind` to not intercept those calls itself, and use: By setting the `MIMALLOC_SHOW_STATS` environment variable you can check that mimalloc is indeed used and not the standard allocator. Even though the [Valgrind option][valgrind-soname] -is called `--soname-synonyms`, this also -works when overriding with a static library or object file. Unfortunately, it is not possible to -dynamically override mimalloc using `LD_PRELOAD` together with `valgrind`. +is called `--soname-synonyms`, this also works when overriding with a static library or object file. +To dynamically override mimalloc using `LD_PRELOAD` together with `valgrind`, use: + +``` +> valgrind --trace-children=yes --soname-synonyms=somalloc=*mimalloc* /usr/bin/env LD_PRELOAD=/usr/lib/libmimalloc.so -- +``` + See also the `test/test-wrong.c` file to test with `valgrind`. Valgrind support is in its initial development -- please report any issues. @@ -523,7 +604,7 @@ To build with the address sanitizer, use the `-DMI_TRACK_ASAN=ON` cmake option: > cmake ../.. -DMI_TRACK_ASAN=ON ``` -This can also be combined with secure mode or debug mode. +This can also be combined with secure mode or debug mode. You can then run your programs as:' ``` @@ -531,7 +612,7 @@ You can then run your programs as:' ``` When you link a program with an address sanitizer build of mimalloc, you should -generally compile that program too with the address sanitizer enabled. +generally compile that program too with the address sanitizer enabled. For example, assuming you build mimalloc in `out/debug`: ``` @@ -540,23 +621,23 @@ clang -g -o test-wrong -Iinclude test/test-wrong.c out/debug/libmimalloc-asan-de Since the address sanitizer redirects the standard allocation functions, on some platforms (macOSX for example) it is required to compile mimalloc with `-DMI_OVERRIDE=OFF`. -Adress sanitizer support is in its initial development -- please report any issues. +Address sanitizer support is in its initial development -- please report any issues. [asan]: https://github.com/google/sanitizers/wiki/AddressSanitizer ## ETW Event tracing for Windows ([ETW]) provides a high performance way to capture all allocations though -mimalloc and analyze them later. To build with ETW support, use the `-DMI_TRACK_ETW=ON` cmake option. +mimalloc and analyze them later. To build with ETW support, use the `-DMI_TRACK_ETW=ON` cmake option. -You can then capture an allocation trace using the Windows performance recorder (WPR), using the +You can then capture an allocation trace using the Windows performance recorder (WPR), using the `src/prim/windows/etw-mimalloc.wprp` profile. In an admin prompt, you can use: ``` > wpr -start src\prim\windows\etw-mimalloc.wprp -filemode > > wpr -stop .etl -``` -and then open `.etl` in the Windows Performance Analyzer (WPA), or +``` +and then open `.etl` in the Windows Performance Analyzer (WPA), or use a tool like [TraceControl] that is specialized for analyzing mimalloc traces. [ETW]: https://learn.microsoft.com/en-us/windows-hardware/test/wpt/event-tracing-for-windows diff --git a/system/lib/mimalloc/src/alloc-aligned.c b/system/lib/mimalloc/src/alloc-aligned.c index ba629ef30a4c2..3d3202eb57497 100644 --- a/system/lib/mimalloc/src/alloc-aligned.c +++ b/system/lib/mimalloc/src/alloc-aligned.c @@ -24,6 +24,33 @@ static bool mi_malloc_is_naturally_aligned( size_t size, size_t alignment ) { return (bsize <= MI_MAX_ALIGN_GUARANTEE && (bsize & (alignment-1)) == 0); } +#if MI_GUARDED +static mi_decl_restrict void* mi_heap_malloc_guarded_aligned(mi_heap_t* heap, size_t size, size_t alignment, bool zero) mi_attr_noexcept { + // use over allocation for guarded blocksl + mi_assert_internal(alignment > 0 && alignment < MI_BLOCK_ALIGNMENT_MAX); + const size_t oversize = size + alignment - 1; + void* base = _mi_heap_malloc_guarded(heap, oversize, zero); + void* p = mi_align_up_ptr(base, alignment); + mi_track_align(base, p, (uint8_t*)p - (uint8_t*)base, size); + mi_assert_internal(mi_usable_size(p) >= size); + mi_assert_internal(_mi_is_aligned(p, alignment)); + return p; +} + +static void* mi_heap_malloc_zero_no_guarded(mi_heap_t* heap, size_t size, bool zero) { + const size_t rate = heap->guarded_sample_rate; + // only write if `rate!=0` so we don't write to the constant `_mi_heap_empty` + if (rate != 0) { heap->guarded_sample_rate = 0; } + void* p = _mi_heap_malloc_zero(heap, size, zero); + if (rate != 0) { heap->guarded_sample_rate = rate; } + return p; +} +#else +static void* mi_heap_malloc_zero_no_guarded(mi_heap_t* heap, size_t size, bool zero) { + return _mi_heap_malloc_zero(heap, size, zero); +} +#endif + // Fallback aligned allocation that over-allocates -- split out for better codegen static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept { @@ -38,22 +65,24 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t // first (and single) page such that the segment info is `MI_SEGMENT_SIZE` bytes before it (so it can be found by aligning the pointer down) if mi_unlikely(offset != 0) { // todo: cannot support offset alignment for very large alignments yet - #if MI_DEBUG > 0 +#if MI_DEBUG > 0 _mi_error_message(EOVERFLOW, "aligned allocation with a very large alignment cannot be used with an alignment offset (size %zu, alignment %zu, offset %zu)\n", size, alignment, offset); - #endif +#endif return NULL; } oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size); + // note: no guarded as alignment > 0 p = _mi_heap_malloc_zero_ex(heap, oversize, false, alignment); // the page block size should be large enough to align in the single huge page block // zero afterwards as only the area from the aligned_p may be committed! if (p == NULL) return NULL; } else { // otherwise over-allocate - oversize = size + alignment - 1; - p = _mi_heap_malloc_zero(heap, oversize, zero); + oversize = (size < MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : size) + alignment - 1; // adjust for size <= 16; with size 0 and aligment 64k, we would allocate a 64k block and pointing just beyond that. + p = mi_heap_malloc_zero_no_guarded(heap, oversize, zero); if (p == NULL) return NULL; } + mi_page_t* page = _mi_ptr_page(p); // .. and align within the allocation const uintptr_t align_mask = alignment - 1; // for any x, `(x & align_mask) == (x % alignment)` @@ -62,17 +91,27 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t mi_assert_internal(adjust < alignment); void* aligned_p = (void*)((uintptr_t)p + adjust); if (aligned_p != p) { - mi_page_t* page = _mi_ptr_page(p); mi_page_set_has_aligned(page, true); + #if MI_GUARDED + // set tag to aligned so mi_usable_size works with guard pages + if (adjust >= sizeof(mi_block_t)) { + mi_block_t* const block = (mi_block_t*)p; + block->next = MI_BLOCK_TAG_ALIGNED; + } + #endif _mi_padding_shrink(page, (mi_block_t*)p, adjust + size); } // todo: expand padding if overallocated ? - mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size); - mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_page(aligned_p), aligned_p)); + mi_assert_internal(mi_page_usable_block_size(page) >= adjust + size); mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0); mi_assert_internal(mi_usable_size(aligned_p)>=size); mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust); + #if MI_DEBUG > 1 + mi_page_t* const apage = _mi_ptr_page(aligned_p); + void* unalign_p = _mi_page_ptr_unalign(apage, aligned_p); + mi_assert_internal(p == unalign_p); + #endif // now zero the block if needed if (alignment > MI_BLOCK_ALIGNMENT_MAX) { @@ -85,6 +124,9 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t if (p != aligned_p) { mi_track_align(p,aligned_p,adjust,mi_usable_size(aligned_p)); + #if MI_GUARDED + mi_track_mem_defined(p, sizeof(mi_block_t)); + #endif } return aligned_p; } @@ -94,27 +136,27 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_generic(mi_heap_t* { mi_assert_internal(alignment != 0 && _mi_is_power_of_two(alignment)); // we don't allocate more than MI_MAX_ALLOC_SIZE (see ) - if mi_unlikely(size > (MI_MAX_ALLOC_SIZE - MI_PADDING_SIZE)) { + if mi_unlikely(size > (MI_MAX_ALLOC_SIZE - MI_PADDING_SIZE)) { #if MI_DEBUG > 0 _mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment); #endif return NULL; } - + // use regular allocation if it is guaranteed to fit the alignment constraints. // this is important to try as the fast path in `mi_heap_malloc_zero_aligned` only works when there exist // a page with the right block size, and if we always use the over-alloc fallback that would never happen. if (offset == 0 && mi_malloc_is_naturally_aligned(size,alignment)) { - void* p = _mi_heap_malloc_zero(heap, size, zero); + void* p = mi_heap_malloc_zero_no_guarded(heap, size, zero); mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0); - const bool is_aligned_or_null = (((uintptr_t)p) & (alignment-1))==0; + const bool is_aligned_or_null = (((uintptr_t)p) & (alignment-1))==0; if mi_likely(is_aligned_or_null) { return p; } else { // this should never happen if the `mi_malloc_is_naturally_aligned` check is correct.. mi_assert(false); - mi_free(p); + mi_free(p); } } @@ -122,6 +164,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_generic(mi_heap_t* return mi_heap_malloc_zero_aligned_at_overalloc(heap,size,alignment,offset,zero); } + // Primitive aligned allocation static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept { @@ -132,19 +175,22 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t #endif return NULL; } - + + #if MI_GUARDED + if (offset==0 && alignment < MI_BLOCK_ALIGNMENT_MAX && mi_heap_malloc_use_guarded(heap,size)) { + return mi_heap_malloc_guarded_aligned(heap, size, alignment, zero); + } + #endif + // try first if there happens to be a small block available with just the right alignment if mi_likely(size <= MI_SMALL_SIZE_MAX && alignment <= size) { const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)` - const size_t padsize = size + MI_PADDING_SIZE; + const size_t padsize = size + MI_PADDING_SIZE; mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize); if mi_likely(page->free != NULL) { const bool is_aligned = (((uintptr_t)page->free + offset) & align_mask)==0; if mi_likely(is_aligned) { - #if MI_STAT>1 - mi_heap_stat_increase(heap, malloc, size); - #endif void* p = (zero ? _mi_page_malloc_zeroed(heap,page,padsize) : _mi_page_malloc(heap,page,padsize)); // call specific page malloc for better codegen mi_assert_internal(p != NULL); mi_assert_internal(((uintptr_t)p + offset) % alignment == 0); @@ -310,3 +356,5 @@ mi_decl_nodiscard void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t mi_decl_nodiscard void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept { return mi_heap_recalloc_aligned(mi_prim_get_default_heap(), p, newcount, size, alignment); } + + diff --git a/system/lib/mimalloc/src/alloc-override.c b/system/lib/mimalloc/src/alloc-override.c index ded7a101de9f2..8d2efcd7ce3ef 100644 --- a/system/lib/mimalloc/src/alloc-override.c +++ b/system/lib/mimalloc/src/alloc-override.c @@ -72,24 +72,20 @@ typedef void* mi_nothrow_t; #define MI_INTERPOSE_FUN(oldfun,newfun) { (const void*)&newfun, (const void*)&oldfun } #define MI_INTERPOSE_MI(fun) MI_INTERPOSE_FUN(fun,mi_##fun) - __attribute__((used)) static struct mi_interpose_s _mi_interposes[] __attribute__((section("__DATA, __interpose"))) = + #define MI_INTERPOSE_DECLS(name) __attribute__((used)) static struct mi_interpose_s name[] __attribute__((section("__DATA, __interpose"))) + + MI_INTERPOSE_DECLS(_mi_interposes) = { MI_INTERPOSE_MI(malloc), MI_INTERPOSE_MI(calloc), MI_INTERPOSE_MI(realloc), MI_INTERPOSE_MI(strdup), - #if defined(MAC_OS_X_VERSION_10_7) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_7 - MI_INTERPOSE_MI(strndup), - #endif MI_INTERPOSE_MI(realpath), MI_INTERPOSE_MI(posix_memalign), MI_INTERPOSE_MI(reallocf), MI_INTERPOSE_MI(valloc), MI_INTERPOSE_FUN(malloc_size,mi_malloc_size_checked), MI_INTERPOSE_MI(malloc_good_size), - #if defined(MAC_OS_X_VERSION_10_15) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_15 - MI_INTERPOSE_MI(aligned_alloc), - #endif #ifdef MI_OSX_ZONE // we interpose malloc_default_zone in alloc-override-osx.c so we can use mi_free safely MI_INTERPOSE_MI(free), @@ -100,6 +96,12 @@ typedef void* mi_nothrow_t; MI_INTERPOSE_FUN(vfree,mi_cfree), #endif }; + MI_INTERPOSE_DECLS(_mi_interposes_10_7) __OSX_AVAILABLE(10.7) = { + MI_INTERPOSE_MI(strndup), + }; + MI_INTERPOSE_DECLS(_mi_interposes_10_15) __OSX_AVAILABLE(10.15) = { + MI_INTERPOSE_MI(aligned_alloc), + }; #ifdef __cplusplus extern "C" { @@ -249,7 +251,7 @@ extern "C" { // Forward Posix/Unix calls as well void* reallocf(void* p, size_t newsize) MI_FORWARD2(mi_reallocf,p,newsize) size_t malloc_size(const void* p) MI_FORWARD1(mi_usable_size,p) - #if !defined(__ANDROID__) && !defined(__FreeBSD__) + #if !defined(__ANDROID__) && !defined(__FreeBSD__) && !defined(__DragonFly__) size_t malloc_usable_size(void *p) MI_FORWARD1(mi_usable_size,p) #else size_t malloc_usable_size(const void *p) MI_FORWARD1(mi_usable_size,p) @@ -304,8 +306,8 @@ mi_decl_weak int reallocarr(void* p, size_t count, size_t size) { return mi_r void* emscripten_builtin_calloc(size_t nmemb, size_t size) MI_FORWARD2(mi_calloc, nmemb, size) #endif -#elif defined(__GLIBC__) && defined(__linux__) - // forward __libc interface (needed for glibc-based Linux distributions) +#elif defined(__linux__) + // forward __libc interface (needed for glibc-based and musl-based Linux distributions) void* __libc_malloc(size_t size) MI_FORWARD1(mi_malloc,size) void* __libc_calloc(size_t count, size_t size) MI_FORWARD2(mi_calloc,count,size) void* __libc_realloc(void* p, size_t size) MI_FORWARD2(mi_realloc,p,size) diff --git a/system/lib/mimalloc/src/alloc.c b/system/lib/mimalloc/src/alloc.c index 86aaae757bddf..0fed5e754c191 100644 --- a/system/lib/mimalloc/src/alloc.c +++ b/system/lib/mimalloc/src/alloc.c @@ -28,20 +28,26 @@ terms of the MIT license. A copy of the license can be found in the file // Fast allocation in a page: just pop from the free list. // Fall back to generic allocation only if the list is empty. // Note: in release mode the (inlined) routine is about 7 instructions with a single test. -extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept +extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept { + mi_assert_internal(size >= MI_PADDING_SIZE); mi_assert_internal(page->block_size == 0 /* empty heap */ || mi_page_block_size(page) >= size); + + // check the free list mi_block_t* const block = page->free; if mi_unlikely(block == NULL) { return _mi_malloc_generic(heap, size, zero, 0); } mi_assert_internal(block != NULL && _mi_ptr_page(block) == page); + // pop from the free list page->free = mi_block_next(page, block); page->used++; mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page); + mi_assert_internal(page->block_size < MI_MAX_ALIGN_SIZE || _mi_is_aligned(block, MI_MAX_ALIGN_SIZE)); + #if MI_DEBUG>3 - if (page->free_is_zero) { + if (page->free_is_zero && size > sizeof(*block)) { mi_assert_expensive(mi_mem_is_zero(block+1,size - sizeof(*block))); } #endif @@ -54,7 +60,10 @@ extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_ // zero the block? note: we need to zero the full block size (issue #63) if mi_unlikely(zero) { mi_assert_internal(page->block_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic) + mi_assert_internal(!mi_page_is_huge(page)); + #if MI_PADDING mi_assert_internal(page->block_size >= MI_PADDING_SIZE); + #endif if (page->free_is_zero) { block->next = 0; mi_track_mem_defined(block, page->block_size - MI_PADDING_SIZE); @@ -75,11 +84,12 @@ extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_ #if (MI_STAT>0) const size_t bsize = mi_page_usable_block_size(page); if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) { - mi_heap_stat_increase(heap, normal, bsize); - mi_heap_stat_counter_increase(heap, normal_count, 1); + mi_heap_stat_increase(heap, malloc_normal, bsize); + mi_heap_stat_counter_increase(heap, malloc_normal_count, 1); #if (MI_STAT>1) const size_t bin = _mi_bin(bsize); - mi_heap_stat_increase(heap, normal_bins[bin], 1); + mi_heap_stat_increase(heap, malloc_bins[bin], 1); + mi_heap_stat_increase(heap, malloc_requested, size - MI_PADDING_SIZE); #endif } #endif @@ -91,7 +101,7 @@ extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_ mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta)); #endif mi_track_mem_defined(padding,sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess - padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys)); + padding->canary = mi_ptr_encode_canary(page,block,page->keys); padding->delta = (uint32_t)(delta); #if MI_PADDING_CHECK if (!mi_page_is_huge(page)) { @@ -113,27 +123,31 @@ extern void* _mi_page_malloc_zeroed(mi_heap_t* heap, mi_page_t* page, size_t siz return _mi_page_malloc_zero(heap,page,size,true); } +#if MI_GUARDED +mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept; +#endif + static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept { mi_assert(heap != NULL); + mi_assert(size <= MI_SMALL_SIZE_MAX); #if MI_DEBUG const uintptr_t tid = _mi_thread_id(); mi_assert(heap->thread_id == 0 || heap->thread_id == tid); // heaps are thread local #endif - mi_assert(size <= MI_SMALL_SIZE_MAX); - #if (MI_PADDING) + #if (MI_PADDING || MI_GUARDED) if (size == 0) { size = sizeof(void*); } #endif + #if MI_GUARDED + if (mi_heap_malloc_use_guarded(heap,size)) { + return _mi_heap_malloc_guarded(heap, size, zero); + } + #endif + // get page in constant time, and allocate from it mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE); - void* const p = _mi_page_malloc_zero(heap, page, size + MI_PADDING_SIZE, zero); + void* const p = _mi_page_malloc_zero(heap, page, size + MI_PADDING_SIZE, zero); mi_track_malloc(p,size,zero); - #if MI_STAT>1 - if (p != NULL) { - if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); } - mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); - } - #endif #if MI_DEBUG>3 if (p != NULL && zero) { mi_assert_expensive(mi_mem_is_zero(p, size)); @@ -153,21 +167,23 @@ mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t si // The main allocation function extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept { + // fast path for small objects if mi_likely(size <= MI_SMALL_SIZE_MAX) { mi_assert_internal(huge_alignment == 0); return mi_heap_malloc_small_zero(heap, size, zero); } + #if MI_GUARDED + else if (huge_alignment==0 && mi_heap_malloc_use_guarded(heap,size)) { + return _mi_heap_malloc_guarded(heap, size, zero); + } + #endif else { + // regular allocation mi_assert(heap!=NULL); mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment); // note: size can overflow but it is detected in malloc_generic mi_track_malloc(p,size,zero); - #if MI_STAT>1 - if (p != NULL) { - if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); } - mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); - } - #endif + #if MI_DEBUG>3 if (p != NULL && zero) { mi_assert_expensive(mi_mem_is_zero(p, size)); @@ -362,7 +378,7 @@ mi_decl_nodiscard mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_ #ifndef PATH_MAX #define PATH_MAX MAX_PATH #endif -#include + mi_decl_nodiscard mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept { // todo: use GetFullPathNameW to allow longer file names char buf[PATH_MAX]; @@ -530,7 +546,7 @@ mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, si } mi_decl_nodiscard mi_decl_restrict void* mi_new_n(size_t count, size_t size) { - return mi_heap_alloc_new_n(mi_prim_get_default_heap(), size, count); + return mi_heap_alloc_new_n(mi_prim_get_default_heap(), count, size); } @@ -577,6 +593,83 @@ mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) { } } +#if MI_GUARDED +// We always allocate a guarded allocation at an offset (`mi_page_has_aligned` will be true). +// We then set the first word of the block to `0` for regular offset aligned allocations (in `alloc-aligned.c`) +// and the first word to `~0` for guarded allocations to have a correct `mi_usable_size` + +static void* mi_block_ptr_set_guarded(mi_block_t* block, size_t obj_size) { + // TODO: we can still make padding work by moving it out of the guard page area + mi_page_t* const page = _mi_ptr_page(block); + mi_page_set_has_aligned(page, true); + block->next = MI_BLOCK_TAG_GUARDED; + + // set guard page at the end of the block + mi_segment_t* const segment = _mi_page_segment(page); + const size_t block_size = mi_page_block_size(page); // must use `block_size` to match `mi_free_local` + const size_t os_page_size = _mi_os_page_size(); + mi_assert_internal(block_size >= obj_size + os_page_size + sizeof(mi_block_t)); + if (block_size < obj_size + os_page_size + sizeof(mi_block_t)) { + // should never happen + mi_free(block); + return NULL; + } + uint8_t* guard_page = (uint8_t*)block + block_size - os_page_size; + mi_assert_internal(_mi_is_aligned(guard_page, os_page_size)); + if (segment->allow_decommit && _mi_is_aligned(guard_page, os_page_size)) { + _mi_os_protect(guard_page, os_page_size); + } + else { + _mi_warning_message("unable to set a guard page behind an object due to pinned memory (large OS pages?) (object %p of size %zu)\n", block, block_size); + } + + // align pointer just in front of the guard page + size_t offset = block_size - os_page_size - obj_size; + mi_assert_internal(offset > sizeof(mi_block_t)); + if (offset > MI_BLOCK_ALIGNMENT_MAX) { + // give up to place it right in front of the guard page if the offset is too large for unalignment + offset = MI_BLOCK_ALIGNMENT_MAX; + } + void* p = (uint8_t*)block + offset; + mi_track_align(block, p, offset, obj_size); + mi_track_mem_defined(block, sizeof(mi_block_t)); + return p; +} + +mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept +{ + #if defined(MI_PADDING_SIZE) + mi_assert(MI_PADDING_SIZE==0); + #endif + // allocate multiple of page size ending in a guard page + // ensure minimal alignment requirement? + const size_t os_page_size = _mi_os_page_size(); + const size_t obj_size = (mi_option_is_enabled(mi_option_guarded_precise) ? size : _mi_align_up(size, MI_MAX_ALIGN_SIZE)); + const size_t bsize = _mi_align_up(_mi_align_up(obj_size, MI_MAX_ALIGN_SIZE) + sizeof(mi_block_t), MI_MAX_ALIGN_SIZE); + const size_t req_size = _mi_align_up(bsize + os_page_size, os_page_size); + mi_block_t* const block = (mi_block_t*)_mi_malloc_generic(heap, req_size, zero, 0 /* huge_alignment */); + if (block==NULL) return NULL; + void* const p = mi_block_ptr_set_guarded(block, obj_size); + + // stats + mi_track_malloc(p, size, zero); + if (p != NULL) { + if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); } + #if MI_STAT>1 + mi_heap_stat_adjust_decrease(heap, malloc_requested, req_size); + mi_heap_stat_increase(heap, malloc_requested, size); + #endif + _mi_stat_counter_increase(&heap->tld->stats.malloc_guarded_count, 1); + } + #if MI_DEBUG>3 + if (p != NULL && zero) { + mi_assert_expensive(mi_mem_is_zero(p, size)); + } + #endif + return p; +} +#endif + // ------------------------------------------------------ // ensure explicit external inline definitions are emitted! // ------------------------------------------------------ @@ -584,6 +677,7 @@ mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) { #ifdef __cplusplus void* _mi_externs[] = { (void*)&_mi_page_malloc, + (void*)&_mi_page_malloc_zero, (void*)&_mi_heap_malloc_zero, (void*)&_mi_heap_malloc_zero_ex, (void*)&mi_malloc, diff --git a/system/lib/mimalloc/src/arena-abandon.c b/system/lib/mimalloc/src/arena-abandon.c new file mode 100644 index 0000000000000..460c80fc22782 --- /dev/null +++ b/system/lib/mimalloc/src/arena-abandon.c @@ -0,0 +1,346 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2019-2024, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +#if !defined(MI_IN_ARENA_C) +#error "this file should be included from 'arena.c' (so mi_arena_t is visible)" +// add includes help an IDE +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "bitmap.h" +#endif + +// Minimal exports for arena-abandoned. +size_t mi_arena_id_index(mi_arena_id_t id); +mi_arena_t* mi_arena_from_index(size_t idx); +size_t mi_arena_get_count(void); +void* mi_arena_block_start(mi_arena_t* arena, mi_bitmap_index_t bindex); +bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index); + +/* ----------------------------------------------------------- + Abandoned blocks/segments: + + _mi_arena_segment_clear_abandoned + _mi_arena_segment_mark_abandoned + + This is used to atomically abandon/reclaim segments + (and crosses the arena API but it is convenient to have here). + + Abandoned segments still have live blocks; they get reclaimed + when a thread frees a block in it, or when a thread needs a fresh + segment. + + Abandoned segments are atomically marked in the `block_abandoned` + bitmap of arenas. Any segments allocated outside arenas are put + in the sub-process `abandoned_os_list`. This list is accessed + using locks but this should be uncommon and generally uncontended. + Reclaim and visiting either scan through the `block_abandoned` + bitmaps of the arena's, or visit the `abandoned_os_list` + + A potentially nicer design is to use arena's for everything + and perhaps have virtual arena's to map OS allocated memory + but this would lack the "density" of our current arena's. TBC. +----------------------------------------------------------- */ + + +// reclaim a specific OS abandoned segment; `true` on success. +// sets the thread_id. +static bool mi_arena_segment_os_clear_abandoned(mi_segment_t* segment, bool take_lock) { + mi_assert(segment->memid.memkind != MI_MEM_ARENA); + // not in an arena, remove from list of abandoned os segments + mi_subproc_t* const subproc = segment->subproc; + if (take_lock && !mi_lock_try_acquire(&subproc->abandoned_os_lock)) { + return false; // failed to acquire the lock, we just give up + } + // remove atomically from the abandoned os list (if possible!) + bool reclaimed = false; + mi_segment_t* const next = segment->abandoned_os_next; + mi_segment_t* const prev = segment->abandoned_os_prev; + if (next != NULL || prev != NULL || subproc->abandoned_os_list == segment) { + #if MI_DEBUG>3 + // find ourselves in the abandoned list (and check the count) + bool found = false; + size_t count = 0; + for (mi_segment_t* current = subproc->abandoned_os_list; current != NULL; current = current->abandoned_os_next) { + if (current == segment) { found = true; } + count++; + } + mi_assert_internal(found); + mi_assert_internal(count == mi_atomic_load_relaxed(&subproc->abandoned_os_list_count)); + #endif + // remove (atomically) from the list and reclaim + if (prev != NULL) { prev->abandoned_os_next = next; } + else { subproc->abandoned_os_list = next; } + if (next != NULL) { next->abandoned_os_prev = prev; } + else { subproc->abandoned_os_list_tail = prev; } + segment->abandoned_os_next = NULL; + segment->abandoned_os_prev = NULL; + mi_atomic_decrement_relaxed(&subproc->abandoned_count); + mi_atomic_decrement_relaxed(&subproc->abandoned_os_list_count); + if (take_lock) { // don't reset the thread_id when iterating + mi_atomic_store_release(&segment->thread_id, _mi_thread_id()); + } + reclaimed = true; + } + if (take_lock) { mi_lock_release(&segment->subproc->abandoned_os_lock); } + return reclaimed; +} + +// reclaim a specific abandoned segment; `true` on success. +// sets the thread_id. +bool _mi_arena_segment_clear_abandoned(mi_segment_t* segment) { + if mi_unlikely(segment->memid.memkind != MI_MEM_ARENA) { + return mi_arena_segment_os_clear_abandoned(segment, true /* take lock */); + } + // arena segment: use the blocks_abandoned bitmap. + size_t arena_idx; + size_t bitmap_idx; + mi_arena_memid_indices(segment->memid, &arena_idx, &bitmap_idx); + mi_arena_t* arena = mi_arena_from_index(arena_idx); + mi_assert_internal(arena != NULL); + // reclaim atomically + bool was_marked = _mi_bitmap_unclaim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx); + if (was_marked) { + mi_assert_internal(mi_atomic_load_acquire(&segment->thread_id) == 0); + mi_atomic_decrement_relaxed(&segment->subproc->abandoned_count); + mi_atomic_store_release(&segment->thread_id, _mi_thread_id()); + } + // mi_assert_internal(was_marked); + mi_assert_internal(!was_marked || _mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx)); + //mi_assert_internal(arena->blocks_committed == NULL || _mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, 1, bitmap_idx)); + return was_marked; +} + + +// mark a specific OS segment as abandoned +static void mi_arena_segment_os_mark_abandoned(mi_segment_t* segment) { + mi_assert(segment->memid.memkind != MI_MEM_ARENA); + // not in an arena; we use a list of abandoned segments + mi_subproc_t* const subproc = segment->subproc; + mi_lock(&subproc->abandoned_os_lock) { + // push on the tail of the list (important for the visitor) + mi_segment_t* prev = subproc->abandoned_os_list_tail; + mi_assert_internal(prev == NULL || prev->abandoned_os_next == NULL); + mi_assert_internal(segment->abandoned_os_prev == NULL); + mi_assert_internal(segment->abandoned_os_next == NULL); + if (prev != NULL) { prev->abandoned_os_next = segment; } + else { subproc->abandoned_os_list = segment; } + subproc->abandoned_os_list_tail = segment; + segment->abandoned_os_prev = prev; + segment->abandoned_os_next = NULL; + mi_atomic_increment_relaxed(&subproc->abandoned_os_list_count); + mi_atomic_increment_relaxed(&subproc->abandoned_count); + // and release the lock + } + return; +} + +// mark a specific segment as abandoned +// clears the thread_id. +void _mi_arena_segment_mark_abandoned(mi_segment_t* segment) +{ + mi_assert_internal(segment->used == segment->abandoned); + mi_atomic_store_release(&segment->thread_id, (uintptr_t)0); // mark as abandoned for multi-thread free's + if mi_unlikely(segment->memid.memkind != MI_MEM_ARENA) { + mi_arena_segment_os_mark_abandoned(segment); + return; + } + // segment is in an arena, mark it in the arena `blocks_abandoned` bitmap + size_t arena_idx; + size_t bitmap_idx; + mi_arena_memid_indices(segment->memid, &arena_idx, &bitmap_idx); + mi_arena_t* arena = mi_arena_from_index(arena_idx); + mi_assert_internal(arena != NULL); + // set abandonment atomically + mi_subproc_t* const subproc = segment->subproc; // don't access the segment after setting it abandoned + const bool was_unmarked = _mi_bitmap_claim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx, NULL); + if (was_unmarked) { mi_atomic_increment_relaxed(&subproc->abandoned_count); } + mi_assert_internal(was_unmarked); + mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx)); +} + + +/* ----------------------------------------------------------- + Iterate through the abandoned blocks/segments using a cursor. + This is used for reclaiming and abandoned block visiting. +----------------------------------------------------------- */ + +// start a cursor at a randomized arena +void _mi_arena_field_cursor_init(mi_heap_t* heap, mi_subproc_t* subproc, bool visit_all, mi_arena_field_cursor_t* current) { + mi_assert_internal(heap == NULL || heap->tld->segments.subproc == subproc); + current->bitmap_idx = 0; + current->subproc = subproc; + current->visit_all = visit_all; + current->hold_visit_lock = false; + const size_t abandoned_count = mi_atomic_load_relaxed(&subproc->abandoned_count); + const size_t abandoned_list_count = mi_atomic_load_relaxed(&subproc->abandoned_os_list_count); + const size_t max_arena = mi_arena_get_count(); + if (heap != NULL && heap->arena_id != _mi_arena_id_none()) { + // for a heap that is bound to one arena, only visit that arena + current->start = mi_arena_id_index(heap->arena_id); + current->end = current->start + 1; + current->os_list_count = 0; + } + else { + // otherwise visit all starting at a random location + if (abandoned_count > abandoned_list_count && max_arena > 0) { + current->start = (heap == NULL || max_arena == 0 ? 0 : (mi_arena_id_t)(_mi_heap_random_next(heap) % max_arena)); + current->end = current->start + max_arena; + } + else { + current->start = 0; + current->end = 0; + } + current->os_list_count = abandoned_list_count; // max entries to visit in the os abandoned list + } + mi_assert_internal(current->start <= max_arena); +} + +void _mi_arena_field_cursor_done(mi_arena_field_cursor_t* current) { + if (current->hold_visit_lock) { + mi_lock_release(¤t->subproc->abandoned_os_visit_lock); + current->hold_visit_lock = false; + } +} + +static mi_segment_t* mi_arena_segment_clear_abandoned_at(mi_arena_t* arena, mi_subproc_t* subproc, mi_bitmap_index_t bitmap_idx) { + // try to reclaim an abandoned segment in the arena atomically + if (!_mi_bitmap_unclaim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx)) return NULL; + mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx)); + mi_segment_t* segment = (mi_segment_t*)mi_arena_block_start(arena, bitmap_idx); + mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id) == 0); + // check that the segment belongs to our sub-process + // note: this is the reason we need the `abandoned_visit` lock in the case abandoned visiting is enabled. + // without the lock an abandoned visit may otherwise fail to visit all abandoned segments in the sub-process. + // for regular reclaim it is fine to miss one sometimes so without abandoned visiting we don't need the `abandoned_visit` lock. + if (segment->subproc != subproc) { + // it is from another sub-process, re-mark it and continue searching + const bool was_zero = _mi_bitmap_claim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx, NULL); + mi_assert_internal(was_zero); MI_UNUSED(was_zero); + return NULL; + } + else { + // success, we unabandoned a segment in our sub-process + mi_atomic_decrement_relaxed(&subproc->abandoned_count); + return segment; + } +} + +static mi_segment_t* mi_arena_segment_clear_abandoned_next_field(mi_arena_field_cursor_t* previous) { + const size_t max_arena = mi_arena_get_count(); + size_t field_idx = mi_bitmap_index_field(previous->bitmap_idx); + size_t bit_idx = mi_bitmap_index_bit_in_field(previous->bitmap_idx); + // visit arena's (from the previous cursor) + for (; previous->start < previous->end; previous->start++, field_idx = 0, bit_idx = 0) { + // index wraps around + size_t arena_idx = (previous->start >= max_arena ? previous->start % max_arena : previous->start); + mi_arena_t* arena = mi_arena_from_index(arena_idx); + if (arena != NULL) { + bool has_lock = false; + // visit the abandoned fields (starting at previous_idx) + for (; field_idx < arena->field_count; field_idx++, bit_idx = 0) { + size_t field = mi_atomic_load_relaxed(&arena->blocks_abandoned[field_idx]); + if mi_unlikely(field != 0) { // skip zero fields quickly + // we only take the arena lock if there are actually abandoned segments present + if (!has_lock && mi_option_is_enabled(mi_option_visit_abandoned)) { + has_lock = (previous->visit_all ? (mi_lock_acquire(&arena->abandoned_visit_lock),true) : mi_lock_try_acquire(&arena->abandoned_visit_lock)); + if (!has_lock) { + if (previous->visit_all) { + _mi_error_message(EFAULT, "internal error: failed to visit all abandoned segments due to failure to acquire the visitor lock"); + } + // skip to next arena + break; + } + } + mi_assert_internal(has_lock || !mi_option_is_enabled(mi_option_visit_abandoned)); + // visit each set bit in the field (todo: maybe use `ctz` here?) + for (; bit_idx < MI_BITMAP_FIELD_BITS; bit_idx++) { + // pre-check if the bit is set + size_t mask = ((size_t)1 << bit_idx); + if mi_unlikely((field & mask) == mask) { + mi_bitmap_index_t bitmap_idx = mi_bitmap_index_create(field_idx, bit_idx); + mi_segment_t* const segment = mi_arena_segment_clear_abandoned_at(arena, previous->subproc, bitmap_idx); + if (segment != NULL) { + //mi_assert_internal(arena->blocks_committed == NULL || _mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, 1, bitmap_idx)); + if (has_lock) { mi_lock_release(&arena->abandoned_visit_lock); } + previous->bitmap_idx = mi_bitmap_index_create_ex(field_idx, bit_idx + 1); // start at next one for the next iteration + return segment; + } + } + } + } + } + if (has_lock) { mi_lock_release(&arena->abandoned_visit_lock); } + } + } + return NULL; +} + +static mi_segment_t* mi_arena_segment_clear_abandoned_next_list(mi_arena_field_cursor_t* previous) { + // go through the abandoned_os_list + // we only allow one thread per sub-process to do to visit guarded by the `abandoned_os_visit_lock`. + // The lock is released when the cursor is released. + if (!previous->hold_visit_lock) { + previous->hold_visit_lock = (previous->visit_all ? (mi_lock_acquire(&previous->subproc->abandoned_os_visit_lock),true) + : mi_lock_try_acquire(&previous->subproc->abandoned_os_visit_lock)); + if (!previous->hold_visit_lock) { + if (previous->visit_all) { + _mi_error_message(EFAULT, "internal error: failed to visit all abandoned segments due to failure to acquire the OS visitor lock"); + } + return NULL; // we cannot get the lock, give up + } + } + // One list entry at a time + while (previous->os_list_count > 0) { + previous->os_list_count--; + mi_lock_acquire(&previous->subproc->abandoned_os_lock); // this could contend with concurrent OS block abandonment and reclaim from `free` + mi_segment_t* segment = previous->subproc->abandoned_os_list; + // pop from head of the list, a subsequent mark will push at the end (and thus we iterate through os_list_count entries) + if (segment == NULL || mi_arena_segment_os_clear_abandoned(segment, false /* we already have the lock */)) { + mi_lock_release(&previous->subproc->abandoned_os_lock); + return segment; + } + // already abandoned, try again + mi_lock_release(&previous->subproc->abandoned_os_lock); + } + // done + mi_assert_internal(previous->os_list_count == 0); + return NULL; +} + + +// reclaim abandoned segments +// this does not set the thread id (so it appears as still abandoned) +mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* previous) { + if (previous->start < previous->end) { + // walk the arena + mi_segment_t* segment = mi_arena_segment_clear_abandoned_next_field(previous); + if (segment != NULL) { return segment; } + } + // no entries in the arena's anymore, walk the abandoned OS list + mi_assert_internal(previous->start == previous->end); + return mi_arena_segment_clear_abandoned_next_list(previous); +} + + +bool mi_abandoned_visit_blocks(mi_subproc_id_t subproc_id, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) { + // (unfortunately) the visit_abandoned option must be enabled from the start. + // This is to avoid taking locks if abandoned list visiting is not required (as for most programs) + if (!mi_option_is_enabled(mi_option_visit_abandoned)) { + _mi_error_message(EFAULT, "internal error: can only visit abandoned blocks when MIMALLOC_VISIT_ABANDONED=ON"); + return false; + } + mi_arena_field_cursor_t current; + _mi_arena_field_cursor_init(NULL, _mi_subproc_from_id(subproc_id), true /* visit all (blocking) */, ¤t); + mi_segment_t* segment; + bool ok = true; + while (ok && (segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL) { + ok = _mi_segment_visit_blocks(segment, heap_tag, visit_blocks, visitor, arg); + _mi_arena_segment_mark_abandoned(segment); + } + _mi_arena_field_cursor_done(¤t); + return ok; +} diff --git a/system/lib/mimalloc/src/arena.c b/system/lib/mimalloc/src/arena.c index 648ee844fedae..e97ca885fed86 100644 --- a/system/lib/mimalloc/src/arena.c +++ b/system/lib/mimalloc/src/arena.c @@ -1,5 +1,5 @@ /* ---------------------------------------------------------------------------- -Copyright (c) 2019-2023, Microsoft Research, Daan Leijen +Copyright (c) 2019-2024, Microsoft Research, Daan Leijen This is free software; you can redistribute it and/or modify it under the terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. @@ -11,69 +11,68 @@ large blocks (>= MI_ARENA_MIN_BLOCK_SIZE, 4MiB). In contrast to the rest of mimalloc, the arenas are shared between threads and need to be accessed using atomic operations. -Arenas are used to for huge OS page (1GiB) reservations or for reserving +Arenas are also used to for huge OS page (1GiB) reservations or for reserving OS memory upfront which can be improve performance or is sometimes needed on embedded devices. We can also employ this with WASI or `sbrk` systems to reserve large arenas upfront and be able to reuse the memory more effectively. The arena allocation needs to be thread safe and we use an atomic bitmap to allocate. -----------------------------------------------------------------------------*/ + #include "mimalloc.h" #include "mimalloc/internal.h" #include "mimalloc/atomic.h" +#include "bitmap.h" -#include // memset -#include // ENOMEM - -#include "bitmap.h" // atomic bitmap /* ----------------------------------------------------------- Arena allocation ----------------------------------------------------------- */ -// Block info: bit 0 contains the `in_use` bit, the upper bits the -// size in count of arena blocks. -typedef uintptr_t mi_block_info_t; -#define MI_ARENA_BLOCK_SIZE (MI_SEGMENT_SIZE) // 64MiB (must be at least MI_SEGMENT_ALIGN) -#define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_BLOCK_SIZE/2) // 32MiB -#define MI_MAX_ARENAS (112) // not more than 126 (since we use 7 bits in the memid and an arena index + 1) - // A memory arena descriptor typedef struct mi_arena_s { - mi_arena_id_t id; // arena id; 0 for non-specific - mi_memid_t memid; // memid of the memory area - _Atomic(uint8_t*) start; // the start of the memory area - size_t block_count; // size of the area in arena blocks (of `MI_ARENA_BLOCK_SIZE`) - size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`) - size_t meta_size; // size of the arena structure itself (including its bitmaps) - mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation) - int numa_node; // associated NUMA node - bool exclusive; // only allow allocations if specifically for this arena - bool is_large; // memory area consists of large- or huge OS pages (always committed) - _Atomic(size_t) search_idx; // optimization to start the search for free blocks - _Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be decommitted from `blocks_decommit`. - mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero? - mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted) - mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted) - mi_bitmap_field_t* blocks_abandoned; // blocks that start with an abandoned segment. (This crosses API's but it is convenient to have here) - mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`) + mi_arena_id_t id; // arena id; 0 for non-specific + mi_memid_t memid; // memid of the memory area + _Atomic(uint8_t*) start; // the start of the memory area + size_t block_count; // size of the area in arena blocks (of `MI_ARENA_BLOCK_SIZE`) + size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`) + size_t meta_size; // size of the arena structure itself (including its bitmaps) + mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation) + int numa_node; // associated NUMA node + bool exclusive; // only allow allocations if specifically for this arena + bool is_large; // memory area consists of large- or huge OS pages (always committed) + mi_lock_t abandoned_visit_lock; // lock is only used when abandoned segments are being visited + _Atomic(size_t) search_idx; // optimization to start the search for free blocks + _Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be purged from `blocks_purge`. + + mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero? + mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted) + mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted) + mi_bitmap_field_t* blocks_abandoned; // blocks that start with an abandoned segment. (This crosses API's but it is convenient to have here) + mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`) // do not add further fields here as the dirty, committed, purged, and abandoned bitmaps follow the inuse bitmap fields. } mi_arena_t; +#define MI_ARENA_BLOCK_SIZE (MI_SEGMENT_SIZE) // 64MiB (must be at least MI_SEGMENT_ALIGN) +#define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_BLOCK_SIZE/2) // 32MiB +#define MI_MAX_ARENAS (132) // Limited as the reservation exponentially increases (and takes up .bss) + // The available arenas static mi_decl_cache_align _Atomic(mi_arena_t*) mi_arenas[MI_MAX_ARENAS]; static mi_decl_cache_align _Atomic(size_t) mi_arena_count; // = 0 +static mi_decl_cache_align _Atomic(int64_t) mi_arenas_purge_expire; // set if there exist purgeable arenas - -//static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int numa_node, bool exclusive, mi_memid_t memid, mi_arena_id_t* arena_id) mi_attr_noexcept; +#define MI_IN_ARENA_C +#include "arena-abandon.c" +#undef MI_IN_ARENA_C /* ----------------------------------------------------------- Arena id's id = arena_index + 1 ----------------------------------------------------------- */ -static size_t mi_arena_id_index(mi_arena_id_t id) { +size_t mi_arena_id_index(mi_arena_id_t id) { return (size_t)(id <= 0 ? MI_MAX_ARENAS : id - 1); } @@ -104,6 +103,16 @@ bool _mi_arena_memid_is_os_allocated(mi_memid_t memid) { return (memid.memkind == MI_MEM_OS); } +size_t mi_arena_get_count(void) { + return mi_atomic_load_relaxed(&mi_arena_count); +} + +mi_arena_t* mi_arena_from_index(size_t idx) { + mi_assert_internal(idx < mi_arena_get_count()); + return mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[idx]); +} + + /* ----------------------------------------------------------- Arena allocations get a (currently) 16-bit memory id where the lower 8 bits are the arena id, and the upper bits the block index. @@ -129,7 +138,7 @@ static mi_memid_t mi_memid_create_arena(mi_arena_id_t id, bool is_exclusive, mi_ return memid; } -static bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) { +bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) { mi_assert_internal(memid.memkind == MI_MEM_ARENA); *arena_index = mi_arena_id_index(memid.mem.arena.id); *bitmap_index = memid.mem.arena.block_index; @@ -140,10 +149,10 @@ static bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bit /* ----------------------------------------------------------- Special static area for mimalloc internal structures - to avoid OS calls (for example, for the arena metadata) + to avoid OS calls (for example, for the arena metadata (~= 256b)) ----------------------------------------------------------- */ -#define MI_ARENA_STATIC_MAX (MI_INTPTR_SIZE*MI_KiB) // 8 KiB on 64-bit +#define MI_ARENA_STATIC_MAX ((MI_INTPTR_SIZE/2)*MI_KiB) // 4 KiB on 64-bit static mi_decl_cache_align uint8_t mi_arena_static[MI_ARENA_STATIC_MAX]; // must be cache aligned, see issue #895 static mi_decl_cache_align _Atomic(size_t) mi_arena_static_top; @@ -175,7 +184,7 @@ static void* mi_arena_static_zalloc(size_t size, size_t alignment, mi_memid_t* m return p; } -static void* mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) { +void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid) { *memid = _mi_memid_none(); // try static @@ -183,27 +192,22 @@ static void* mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* st if (p != NULL) return p; // or fall back to the OS - p = _mi_os_alloc(size, memid, stats); + p = _mi_os_zalloc(size, memid); if (p == NULL) return NULL; - // zero the OS memory if needed - if (!memid->initially_zero) { - _mi_memzero_aligned(p, size); - memid->initially_zero = true; - } return p; } -static void mi_arena_meta_free(void* p, mi_memid_t memid, size_t size, mi_stats_t* stats) { +void _mi_arena_meta_free(void* p, mi_memid_t memid, size_t size) { if (mi_memkind_is_os(memid.memkind)) { - _mi_os_free(p, size, memid, stats); + _mi_os_free(p, size, memid); } else { mi_assert(memid.memkind == MI_MEM_STATIC); } } -static void* mi_arena_block_start(mi_arena_t* arena, mi_bitmap_index_t bindex) { +void* mi_arena_block_start(mi_arena_t* arena, mi_bitmap_index_t bindex) { return (arena->start + mi_arena_block_size(mi_bitmap_index_bit(bindex))); } @@ -213,10 +217,10 @@ static void* mi_arena_block_start(mi_arena_t* arena, mi_bitmap_index_t bindex) { ----------------------------------------------------------- */ // claim the `blocks_inuse` bits -static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats) +static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx) { size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter - if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx, stats)) { + if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) { mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around return true; }; @@ -229,13 +233,13 @@ static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index ----------------------------------------------------------- */ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount, - bool commit, mi_memid_t* memid, mi_os_tld_t* tld) + bool commit, mi_memid_t* memid) { MI_UNUSED(arena_index); mi_assert_internal(mi_arena_id_index(arena->id) == arena_index); mi_bitmap_index_t bitmap_index; - if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index, tld->stats)) return NULL; + if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index)) return NULL; // claimed it! void* p = mi_arena_block_start(arena, bitmap_index); @@ -250,7 +254,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar // set the dirty bits (todo: no need for an atomic op here?) if (arena->memid.initially_zero && arena->blocks_dirty != NULL) { - memid->initially_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL); + memid->initially_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL, NULL); } // set commit state @@ -261,39 +265,54 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar else if (commit) { // commit requested, but the range may not be committed as a whole: ensure it is committed now memid->initially_committed = true; + const size_t commit_size = mi_arena_block_size(needed_bcount); bool any_uncommitted; - _mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted); + size_t already_committed = 0; + _mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted, &already_committed); if (any_uncommitted) { + mi_assert_internal(already_committed < needed_bcount); + const size_t stat_commit_size = commit_size - mi_arena_block_size(already_committed); bool commit_zero = false; - if (!_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero, tld->stats)) { + if (!_mi_os_commit_ex(p, commit_size, &commit_zero, stat_commit_size)) { memid->initially_committed = false; } else { if (commit_zero) { memid->initially_zero = true; } } } + else { + // all are already committed: signal that we are reusing memory in case it was purged before + _mi_os_reuse( p, commit_size ); + } } else { // no need to commit, but check if already fully committed - memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index); + size_t already_committed = 0; + memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &already_committed); + if (!memid->initially_committed && already_committed > 0) { + // partially committed: as it will be committed at some time, adjust the stats and pretend the range is fully uncommitted. + mi_assert_internal(already_committed < needed_bcount); + _mi_stat_decrease(&_mi_stats_main.committed, mi_arena_block_size(already_committed)); + _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index); + } } return p; } -// allocate in a speficic arena +// allocate in a specific arena static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment, - bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld ) + bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid ) { MI_UNUSED_RELEASE(alignment); - mi_assert_internal(alignment <= MI_SEGMENT_ALIGN); + mi_assert(alignment <= MI_SEGMENT_ALIGN); const size_t bcount = mi_block_count_of_size(size); const size_t arena_index = mi_arena_id_index(arena_id); mi_assert_internal(arena_index < mi_atomic_load_relaxed(&mi_arena_count)); mi_assert_internal(size <= mi_arena_block_size(bcount)); // Check arena suitability - mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]); + mi_arena_t* arena = mi_arena_from_index(arena_index); if (arena == NULL) return NULL; if (!allow_large && arena->is_large) return NULL; if (!mi_arena_id_is_suitable(arena->id, arena->exclusive, req_arena_id)) return NULL; @@ -304,7 +323,7 @@ static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_no } // try to allocate - void* p = mi_arena_try_alloc_at(arena, arena_index, bcount, commit, memid, tld); + void* p = mi_arena_try_alloc_at(arena, arena_index, bcount, commit, memid); mi_assert_internal(p == NULL || _mi_is_aligned(p, alignment)); return p; } @@ -313,7 +332,7 @@ static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_no // allocate from an arena with fallback to the OS static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment, bool commit, bool allow_large, - mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld ) + mi_arena_id_t req_arena_id, mi_memid_t* memid ) { MI_UNUSED(alignment); mi_assert_internal(alignment <= MI_SEGMENT_ALIGN); @@ -323,21 +342,21 @@ static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, siz if (req_arena_id != _mi_arena_id_none()) { // try a specific arena if requested if (mi_arena_id_index(req_arena_id) < max_arena) { - void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); + void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid); if (p != NULL) return p; } } else { // try numa affine allocation for (size_t i = 0; i < max_arena; i++) { - void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); + void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid); if (p != NULL) return p; } // try from another numa node instead.. if (numa_node >= 0) { // if numa_node was < 0 (no specific affinity requested), all arena's have been tried already for (size_t i = 0; i < max_arena; i++) { - void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), false /* only proceed if not numa local */, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); + void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), false /* only proceed if not numa local */, numa_node, size, alignment, commit, allow_large, req_arena_id, memid); if (p != NULL) return p; } } @@ -346,10 +365,9 @@ static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, siz } // try to reserve a fresh arena space -static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t req_arena_id, mi_arena_id_t *arena_id) +static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t *arena_id) { if (_mi_preloading()) return false; // use OS only while pre loading - if (req_arena_id != _mi_arena_id_none()) return false; const size_t arena_count = mi_atomic_load_acquire(&mi_arena_count); if (arena_count > (MI_MAX_ARENAS - 4)) return false; @@ -361,8 +379,14 @@ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t re arena_reserve = arena_reserve/4; // be conservative if virtual reserve is not supported (for WASM for example) } arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE); + arena_reserve = _mi_align_up(arena_reserve, MI_SEGMENT_SIZE); if (arena_count >= 8 && arena_count <= 128) { - arena_reserve = ((size_t)1<<(arena_count/8)) * arena_reserve; // scale up the arena sizes exponentially + // scale up the arena sizes exponentially every 8 entries (128 entries get to 589TiB) + const size_t multiplier = (size_t)1 << _mi_clamp(arena_count/8, 0, 16 ); + size_t reserve = 0; + if (!mi_mul_overflow(multiplier, arena_reserve, &reserve)) { + arena_reserve = reserve; + } } if (arena_reserve < req_size) return false; // should be able to at least handle the current allocation size @@ -376,27 +400,28 @@ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t re void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, - mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld) + mi_arena_id_t req_arena_id, mi_memid_t* memid) { - mi_assert_internal(memid != NULL && tld != NULL); + mi_assert_internal(memid != NULL); mi_assert_internal(size > 0); *memid = _mi_memid_none(); - const int numa_node = _mi_os_numa_node(tld); // current numa node + const int numa_node = _mi_os_numa_node(); // current numa node // try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data) - if (!mi_option_is_enabled(mi_option_disallow_arena_alloc) || req_arena_id != _mi_arena_id_none()) { // is arena allocation allowed? - if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) { - void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); + if (!mi_option_is_enabled(mi_option_disallow_arena_alloc)) { // is arena allocation allowed? + if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) + { + void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid); if (p != NULL) return p; // otherwise, try to first eagerly reserve a new arena if (req_arena_id == _mi_arena_id_none()) { mi_arena_id_t arena_id = 0; - if (mi_arena_reserve(size, allow_large, req_arena_id, &arena_id)) { + if (mi_arena_reserve(size, allow_large, &arena_id)) { // and try allocate in there mi_assert_internal(req_arena_id == _mi_arena_id_none()); - p = mi_arena_try_alloc_at_id(arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); + p = mi_arena_try_alloc_at_id(arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid); if (p != NULL) return p; } } @@ -411,16 +436,16 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset // finally, fall back to the OS if (align_offset > 0) { - return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid, tld->stats); + return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid); } else { - return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld->stats); + return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid); } } -void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld) +void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid) { - return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, allow_large, req_arena_id, memid, tld); + return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, allow_large, req_arena_id, memid); } @@ -446,25 +471,26 @@ static long mi_arena_purge_delay(void) { // reset or decommit in an arena and update the committed/decommit bitmaps // assumes we own the area (i.e. blocks_in_use is claimed by us) -static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) { +static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks) { mi_assert_internal(arena->blocks_committed != NULL); mi_assert_internal(arena->blocks_purge != NULL); mi_assert_internal(!arena->memid.is_pinned); const size_t size = mi_arena_block_size(blocks); void* const p = mi_arena_block_start(arena, bitmap_idx); bool needs_recommit; - if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) { + size_t already_committed = 0; + if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx, &already_committed)) { // all blocks are committed, we can purge freely - needs_recommit = _mi_os_purge(p, size, stats); + mi_assert_internal(already_committed == blocks); + needs_recommit = _mi_os_purge(p, size); } else { // some blocks are not committed -- this can happen when a partially committed block is freed // in `_mi_arena_free` and it is conservatively marked as uncommitted but still scheduled for a purge - // we need to ensure we do not try to reset (as that may be invalid for uncommitted memory), - // and also undo the decommit stats (as it was already adjusted) + // we need to ensure we do not try to reset (as that may be invalid for uncommitted memory). + mi_assert_internal(already_committed < blocks); mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits)); - needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, stats); - if (needs_recommit) { _mi_stat_increase(&_mi_stats_main.committed, size); } + needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, mi_arena_block_size(already_committed)); } // clear the purged blocks @@ -477,37 +503,40 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, // Schedule a purge. This is usually delayed to avoid repeated decommit/commit calls. // Note: assumes we (still) own the area as we may purge immediately -static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) { +static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks) { mi_assert_internal(arena->blocks_purge != NULL); const long delay = mi_arena_purge_delay(); if (delay < 0) return; // is purging allowed at all? if (_mi_preloading() || delay == 0) { // decommit directly - mi_arena_purge(arena, bitmap_idx, blocks, stats); + mi_arena_purge(arena, bitmap_idx, blocks); } else { - // schedule decommit - mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire); - if (expire != 0) { - mi_atomic_addi64_acq_rel(&arena->purge_expire, (mi_msecs_t)(delay/10)); // add smallish extra delay + // schedule purge + const mi_msecs_t expire = _mi_clock_now() + delay; + mi_msecs_t expire0 = 0; + if (mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire0, expire)) { + // expiration was not yet set + // maybe set the global arenas expire as well (if it wasn't set already) + mi_atomic_casi64_strong_acq_rel(&mi_arenas_purge_expire, &expire0, expire); } else { - mi_atomic_storei64_release(&arena->purge_expire, _mi_clock_now() + delay); + // already an expiration was set } - _mi_bitmap_claim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx, NULL); + _mi_bitmap_claim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx, NULL, NULL); } } // purge a range of blocks // return true if the full range was purged. // assumes we own the area (i.e. blocks_in_use is claimed by us) -static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, size_t bitlen, size_t purge, mi_stats_t* stats) { +static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, size_t bitlen, size_t purge) { const size_t endidx = startidx + bitlen; size_t bitidx = startidx; bool all_purged = false; while (bitidx < endidx) { - // count consequetive ones in the purge mask + // count consecutive ones in the purge mask size_t count = 0; while (bitidx + count < endidx && (purge & ((size_t)1 << (bitidx + count))) != 0) { count++; @@ -515,7 +544,7 @@ static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, if (count > 0) { // found range to be purged const mi_bitmap_index_t range_idx = mi_bitmap_index_create(idx, bitidx); - mi_arena_purge(arena, range_idx, count, stats); + mi_arena_purge(arena, range_idx, count); if (count == bitlen) { all_purged = true; } @@ -526,15 +555,18 @@ static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, } // returns true if anything was purged -static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats) +static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force) { - if (arena->memid.is_pinned || arena->blocks_purge == NULL) return false; + // check pre-conditions + if (arena->memid.is_pinned) return false; + + // expired yet? mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire); - if (expire == 0) return false; - if (!force && expire > now) return false; + if (!force && (expire == 0 || expire > now)) return false; // reset expire (if not already set concurrently) mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire, (mi_msecs_t)0); + _mi_stat_counter_increase(&_mi_stats_main.arena_purges, 1); // potential purges scheduled, walk through the bitmap bool any_purged = false; @@ -544,11 +576,12 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi if (purge != 0) { size_t bitidx = 0; while (bitidx < MI_BITMAP_FIELD_BITS) { - // find consequetive range of ones in the purge mask + // find consecutive range of ones in the purge mask size_t bitlen = 0; while (bitidx + bitlen < MI_BITMAP_FIELD_BITS && (purge & ((size_t)1 << (bitidx + bitlen))) != 0) { bitlen++; } + // temporarily claim the purge range as "in-use" to be thread-safe with allocation // try to claim the longest range of corresponding in_use bits const mi_bitmap_index_t bitmap_index = mi_bitmap_index_create(i, bitidx); while( bitlen > 0 ) { @@ -561,7 +594,7 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi if (bitlen > 0) { // read purge again now that we have the in_use bits purge = mi_atomic_load_acquire(&arena->blocks_purge[i]); - if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge, stats)) { + if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge)) { full_purge = false; } any_purged = true; @@ -581,9 +614,15 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi return any_purged; } -static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats ) { +static void mi_arenas_try_purge( bool force, bool visit_all ) +{ if (_mi_preloading() || mi_arena_purge_delay() <= 0) return; // nothing will be scheduled + // check if any arena needs purging? + const mi_msecs_t now = _mi_clock_now(); + mi_msecs_t arenas_expire = mi_atomic_loadi64_acquire(&mi_arenas_purge_expire); + if (!force && (arenas_expire == 0 || arenas_expire < now)) return; + const size_t max_arena = mi_atomic_load_acquire(&mi_arena_count); if (max_arena == 0) return; @@ -591,17 +630,26 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats ) static mi_atomic_guard_t purge_guard; mi_atomic_guard(&purge_guard) { - mi_msecs_t now = _mi_clock_now(); - size_t max_purge_count = (visit_all ? max_arena : 1); + // increase global expire: at most one purge per delay cycle + mi_atomic_storei64_release(&mi_arenas_purge_expire, now + mi_arena_purge_delay()); + size_t max_purge_count = (visit_all ? max_arena : 2); + bool all_visited = true; for (size_t i = 0; i < max_arena; i++) { mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]); if (arena != NULL) { - if (mi_arena_try_purge(arena, now, force, stats)) { - if (max_purge_count <= 1) break; + if (mi_arena_try_purge(arena, now, force)) { + if (max_purge_count <= 1) { + all_visited = false; + break; + } max_purge_count--; } } } + if (all_visited) { + // all arena's were visited and purged: reset global expire + mi_atomic_storei64_release(&mi_arenas_purge_expire, 0); + } } } @@ -610,20 +658,24 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats ) Arena free ----------------------------------------------------------- */ -void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memid, mi_stats_t* stats) { - mi_assert_internal(size > 0 && stats != NULL); +void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memid) { + mi_assert_internal(size > 0); mi_assert_internal(committed_size <= size); if (p==NULL) return; if (size==0) return; const bool all_committed = (committed_size == size); + const size_t decommitted_size = (committed_size <= size ? size - committed_size : 0); + + // need to set all memory to undefined as some parts may still be marked as no_access (like padding etc.) + mi_track_mem_undefined(p,size); if (mi_memkind_is_os(memid.memkind)) { // was a direct OS allocation, pass through - if (!all_committed && committed_size > 0) { - // if partially committed, adjust the committed stats (as `_mi_os_free` will increase decommit by the full size) - _mi_stat_decrease(&_mi_stats_main.committed, committed_size); + if (!all_committed && decommitted_size > 0) { + // if partially committed, adjust the committed stats (as `_mi_os_free` will decrease commit by the full size) + _mi_stat_increase(&_mi_stats_main.committed, decommitted_size); } - _mi_os_free(p, size, memid, stats); + _mi_os_free(p, size, memid); } else if (memid.memkind == MI_MEM_ARENA) { // allocated in an arena @@ -646,9 +698,6 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi return; } - // need to set all memory to undefined as some parts may still be marked as no_access (like padding etc.) - mi_track_mem_undefined(p,size); - // potentially decommit if (arena->memid.is_pinned || arena->blocks_committed == NULL) { mi_assert_internal(all_committed); @@ -658,20 +707,20 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi mi_assert_internal(arena->blocks_purge != NULL); if (!all_committed) { - // mark the entire range as no longer committed (so we recommit the full range when re-using) + // mark the entire range as no longer committed (so we will recommit the full range when re-using) _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx); mi_track_mem_noaccess(p,size); - if (committed_size > 0) { + //if (committed_size > 0) { // if partially committed, adjust the committed stats (is it will be recommitted when re-using) - // in the delayed purge, we now need to not count a decommit if the range is not marked as committed. + // in the delayed purge, we do no longer decrease the commit if the range is not marked entirely as committed. _mi_stat_decrease(&_mi_stats_main.committed, committed_size); - } + //} // note: if not all committed, it may be that the purge will reset/decommit the entire range // that contains already decommitted parts. Since purge consistently uses reset or decommit that // works (as we should never reset decommitted parts). } // (delay) purge the entire range - mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats); + mi_arena_schedule_purge(arena, bitmap_idx, blocks); } // and make it available to others again @@ -687,7 +736,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi } // purge expired decommits - mi_arenas_try_purge(false, false, stats); + mi_arenas_try_purge(false, false); } // destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit` @@ -698,14 +747,15 @@ static void mi_arenas_unsafe_destroy(void) { for (size_t i = 0; i < max_arena; i++) { mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]); if (arena != NULL) { + mi_lock_done(&arena->abandoned_visit_lock); if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) { mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL); - _mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main); + _mi_os_free(arena->start, mi_arena_size(arena), arena->memid); } else { new_max_arena = i; } - mi_arena_meta_free(arena, arena->meta_memid, arena->meta_size, &_mi_stats_main); + _mi_arena_meta_free(arena, arena->meta_memid, arena->meta_size); } } @@ -715,22 +765,22 @@ static void mi_arenas_unsafe_destroy(void) { } // Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired -void _mi_arenas_collect(bool force_purge, mi_stats_t* stats) { - mi_arenas_try_purge(force_purge, force_purge /* visit all? */, stats); +void _mi_arenas_collect(bool force_purge) { + mi_arenas_try_purge(force_purge, force_purge /* visit all? */); } // destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit` // for dynamic libraries that are unloaded and need to release all their allocated memory. -void _mi_arena_unsafe_destroy_all(mi_stats_t* stats) { +void _mi_arena_unsafe_destroy_all(void) { mi_arenas_unsafe_destroy(); - _mi_arenas_collect(true /* force purge */, stats); // purge non-owned arenas + _mi_arenas_collect(true /* force purge */); // purge non-owned arenas } // Is a pointer inside any of our arenas? bool _mi_arena_contains(const void* p) { const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count); for (size_t i = 0; i < max_arena; i++) { - mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]); + mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]); if (arena != NULL && arena->start <= (const uint8_t*)p && arena->start + mi_arena_block_size(arena->block_count) > (const uint8_t*)p) { return true; } @@ -738,139 +788,6 @@ bool _mi_arena_contains(const void* p) { return false; } -/* ----------------------------------------------------------- - Abandoned blocks/segments. - This is used to atomically abandon/reclaim segments - (and crosses the arena API but it is convenient to have here). - Abandoned segments still have live blocks; they get reclaimed - when a thread frees a block in it, or when a thread needs a fresh - segment; these threads scan the abandoned segments through - the arena bitmaps. ------------------------------------------------------------ */ - -// Maintain a count of all abandoned segments -static mi_decl_cache_align _Atomic(size_t)abandoned_count; - -size_t _mi_arena_segment_abandoned_count(void) { - return mi_atomic_load_relaxed(&abandoned_count); -} - -// reclaim a specific abandoned segment; `true` on success. -// sets the thread_id. -bool _mi_arena_segment_clear_abandoned(mi_segment_t* segment ) -{ - if (segment->memid.memkind != MI_MEM_ARENA) { - // not in an arena, consider it un-abandoned now. - // but we need to still claim it atomically -- we use the thread_id for that. - size_t expected = 0; - if (mi_atomic_cas_strong_acq_rel(&segment->thread_id, &expected, _mi_thread_id())) { - mi_atomic_decrement_relaxed(&abandoned_count); - return true; - } - else { - return false; - } - } - // arena segment: use the blocks_abandoned bitmap. - size_t arena_idx; - size_t bitmap_idx; - mi_arena_memid_indices(segment->memid, &arena_idx, &bitmap_idx); - mi_assert_internal(arena_idx < MI_MAX_ARENAS); - mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_idx]); - mi_assert_internal(arena != NULL); - bool was_marked = _mi_bitmap_unclaim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx); - if (was_marked) { - mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id) == 0); - mi_atomic_decrement_relaxed(&abandoned_count); - mi_atomic_store_release(&segment->thread_id, _mi_thread_id()); - } - // mi_assert_internal(was_marked); - mi_assert_internal(!was_marked || _mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx)); - //mi_assert_internal(arena->blocks_committed == NULL || _mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, 1, bitmap_idx)); - return was_marked; -} - -// mark a specific segment as abandoned -// clears the thread_id. -void _mi_arena_segment_mark_abandoned(mi_segment_t* segment) -{ - mi_atomic_store_release(&segment->thread_id, 0); - mi_assert_internal(segment->used == segment->abandoned); - if (segment->memid.memkind != MI_MEM_ARENA) { - // not in an arena; count it as abandoned and return - mi_atomic_increment_relaxed(&abandoned_count); - return; - } - size_t arena_idx; - size_t bitmap_idx; - mi_arena_memid_indices(segment->memid, &arena_idx, &bitmap_idx); - mi_assert_internal(arena_idx < MI_MAX_ARENAS); - mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_idx]); - mi_assert_internal(arena != NULL); - const bool was_unmarked = _mi_bitmap_claim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx, NULL); - if (was_unmarked) { mi_atomic_increment_relaxed(&abandoned_count); } - mi_assert_internal(was_unmarked); - mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx)); -} - -// start a cursor at a randomized arena -void _mi_arena_field_cursor_init(mi_heap_t* heap, mi_arena_field_cursor_t* current) { - const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count); - current->start = (max_arena == 0 ? 0 : (mi_arena_id_t)( _mi_heap_random_next(heap) % max_arena)); - current->count = 0; - current->bitmap_idx = 0; -} - -// reclaim abandoned segments -// this does not set the thread id (so it appears as still abandoned) -mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* previous ) -{ - const int max_arena = (int)mi_atomic_load_relaxed(&mi_arena_count); - if (max_arena <= 0 || mi_atomic_load_relaxed(&abandoned_count) == 0) return NULL; - - int count = previous->count; - size_t field_idx = mi_bitmap_index_field(previous->bitmap_idx); - size_t bit_idx = mi_bitmap_index_bit_in_field(previous->bitmap_idx) + 1; - // visit arena's (from previous) - for (; count < max_arena; count++, field_idx = 0, bit_idx = 0) { - mi_arena_id_t arena_idx = previous->start + count; - if (arena_idx >= max_arena) { arena_idx = arena_idx % max_arena; } // wrap around - mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_idx]); - if (arena != NULL) { - // visit the abandoned fields (starting at previous_idx) - for ( ; field_idx < arena->field_count; field_idx++, bit_idx = 0) { - size_t field = mi_atomic_load_relaxed(&arena->blocks_abandoned[field_idx]); - if mi_unlikely(field != 0) { // skip zero fields quickly - // visit each set bit in the field (todo: maybe use `ctz` here?) - for ( ; bit_idx < MI_BITMAP_FIELD_BITS; bit_idx++) { - // pre-check if the bit is set - size_t mask = ((size_t)1 << bit_idx); - if mi_unlikely((field & mask) == mask) { - mi_bitmap_index_t bitmap_idx = mi_bitmap_index_create(field_idx, bit_idx); - // try to reclaim it atomically - if (_mi_bitmap_unclaim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx)) { - mi_atomic_decrement_relaxed(&abandoned_count); - previous->bitmap_idx = bitmap_idx; - previous->count = count; - mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx)); - mi_segment_t* segment = (mi_segment_t*)mi_arena_block_start(arena, bitmap_idx); - mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id) == 0); - //mi_assert_internal(arena->blocks_committed == NULL || _mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, 1, bitmap_idx)); - return segment; - } - } - } - } - } - } - } - // no more found - previous->bitmap_idx = 0; - previous->count = 0; - return NULL; -} - - /* ----------------------------------------------------------- Add an arena. ----------------------------------------------------------- */ @@ -896,18 +813,30 @@ static bool mi_arena_add(mi_arena_t* arena, mi_arena_id_t* arena_id, mi_stats_t* static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int numa_node, bool exclusive, mi_memid_t memid, mi_arena_id_t* arena_id) mi_attr_noexcept { if (arena_id != NULL) *arena_id = _mi_arena_id_none(); - if (size < MI_ARENA_BLOCK_SIZE) return false; - + if (size < MI_ARENA_BLOCK_SIZE) { + _mi_warning_message("the arena size is too small (memory at %p with size %zu)\n", start, size); + return false; + } if (is_large) { mi_assert_internal(memid.initially_committed && memid.is_pinned); } + if (!_mi_is_aligned(start, MI_SEGMENT_ALIGN)) { + void* const aligned_start = mi_align_up_ptr(start, MI_SEGMENT_ALIGN); + const size_t diff = (uint8_t*)aligned_start - (uint8_t*)start; + if (diff >= size || (size - diff) < MI_ARENA_BLOCK_SIZE) { + _mi_warning_message("after alignment, the size of the arena becomes too small (memory at %p with size %zu)\n", start, size); + return false; + } + start = aligned_start; + size = size - diff; + } const size_t bcount = size / MI_ARENA_BLOCK_SIZE; const size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS); const size_t bitmaps = (memid.is_pinned ? 3 : 5); const size_t asize = sizeof(mi_arena_t) + (bitmaps*fields*sizeof(mi_bitmap_field_t)); mi_memid_t meta_memid; - mi_arena_t* arena = (mi_arena_t*)mi_arena_meta_zalloc(asize, &meta_memid, &_mi_stats_main); // TODO: can we avoid allocating from the OS? + mi_arena_t* arena = (mi_arena_t*)_mi_arena_meta_zalloc(asize, &meta_memid); if (arena == NULL) return false; // already zero'd due to zalloc @@ -924,7 +853,8 @@ static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int arena->is_large = is_large; arena->purge_expire = 0; arena->search_idx = 0; - // consequetive bitmaps + mi_lock_init(&arena->abandoned_visit_lock); + // consecutive bitmaps arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap arena->blocks_abandoned = &arena->blocks_inuse[2 * fields]; // just after dirty bitmap arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after abandoned bitmap @@ -959,11 +889,11 @@ int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exc if (arena_id != NULL) *arena_id = _mi_arena_id_none(); size = _mi_align_up(size, MI_ARENA_BLOCK_SIZE); // at least one block mi_memid_t memid; - void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, allow_large, &memid, &_mi_stats_main); + void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, allow_large, &memid); if (start == NULL) return ENOMEM; const bool is_large = memid.is_pinned; // todo: use separate is_large field? if (!mi_manage_os_memory_ex2(start, size, is_large, -1 /* numa node */, exclusive, memid, arena_id)) { - _mi_os_free_ex(start, size, commit, memid, &_mi_stats_main); + _mi_os_free_ex(start, size, commit, memid); _mi_verbose_message("failed to reserve %zu KiB memory\n", _mi_divide_up(size, 1024)); return ENOMEM; } @@ -988,7 +918,7 @@ int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noe ----------------------------------------------------------- */ static size_t mi_debug_show_bitmap(const char* prefix, const char* header, size_t block_count, mi_bitmap_field_t* fields, size_t field_count ) { - _mi_verbose_message("%s%s:\n", prefix, header); + _mi_message("%s%s:\n", prefix, header); size_t bcount = 0; size_t inuse_count = 0; for (size_t i = 0; i < field_count; i++) { @@ -1005,37 +935,43 @@ static size_t mi_debug_show_bitmap(const char* prefix, const char* header, size_ } } buf[MI_BITMAP_FIELD_BITS] = 0; - _mi_verbose_message("%s %s\n", prefix, buf); + _mi_message("%s %s\n", prefix, buf); } - _mi_verbose_message("%s total ('x'): %zu\n", prefix, inuse_count); + _mi_message("%s total ('x'): %zu\n", prefix, inuse_count); return inuse_count; } -void mi_debug_show_arenas(bool show_inuse, bool show_abandoned, bool show_purge) mi_attr_noexcept { +void mi_debug_show_arenas(void) mi_attr_noexcept { + const bool show_inuse = true; size_t max_arenas = mi_atomic_load_relaxed(&mi_arena_count); size_t inuse_total = 0; - size_t abandoned_total = 0; - size_t purge_total = 0; + //size_t abandoned_total = 0; + //size_t purge_total = 0; for (size_t i = 0; i < max_arenas; i++) { mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]); if (arena == NULL) break; - _mi_verbose_message("arena %zu: %zu blocks of size %zuMiB (in %zu fields) %s\n", i, arena->block_count, MI_ARENA_BLOCK_SIZE / MI_MiB, arena->field_count, (arena->memid.is_pinned ? ", pinned" : "")); + _mi_message("arena %zu: %zu blocks of size %zuMiB (in %zu fields) %s\n", i, arena->block_count, (size_t)(MI_ARENA_BLOCK_SIZE / MI_MiB), arena->field_count, (arena->memid.is_pinned ? ", pinned" : "")); if (show_inuse) { inuse_total += mi_debug_show_bitmap(" ", "inuse blocks", arena->block_count, arena->blocks_inuse, arena->field_count); } if (arena->blocks_committed != NULL) { mi_debug_show_bitmap(" ", "committed blocks", arena->block_count, arena->blocks_committed, arena->field_count); } - if (show_abandoned) { - abandoned_total += mi_debug_show_bitmap(" ", "abandoned blocks", arena->block_count, arena->blocks_abandoned, arena->field_count); - } - if (show_purge && arena->blocks_purge != NULL) { - purge_total += mi_debug_show_bitmap(" ", "purgeable blocks", arena->block_count, arena->blocks_purge, arena->field_count); - } + //if (show_abandoned) { + // abandoned_total += mi_debug_show_bitmap(" ", "abandoned blocks", arena->block_count, arena->blocks_abandoned, arena->field_count); + //} + //if (show_purge && arena->blocks_purge != NULL) { + // purge_total += mi_debug_show_bitmap(" ", "purgeable blocks", arena->block_count, arena->blocks_purge, arena->field_count); + //} } - if (show_inuse) _mi_verbose_message("total inuse blocks : %zu\n", inuse_total); - if (show_abandoned) _mi_verbose_message("total abandoned blocks: %zu\n", abandoned_total); - if (show_purge) _mi_verbose_message("total purgeable blocks: %zu\n", purge_total); + if (show_inuse) _mi_message("total inuse blocks : %zu\n", inuse_total); + //if (show_abandoned) _mi_message("total abandoned blocks: %zu\n", abandoned_total); + //if (show_purge) _mi_message("total purgeable blocks: %zu\n", purge_total); +} + + +void mi_arenas_print(void) mi_attr_noexcept { + mi_debug_show_arenas(); } @@ -1059,7 +995,7 @@ int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_m _mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages); if (!mi_manage_os_memory_ex2(p, hsize, true, numa_node, exclusive, memid, arena_id)) { - _mi_os_free(p, hsize, memid, &_mi_stats_main); + _mi_os_free(p, hsize, memid); return ENOMEM; } return 0; @@ -1074,17 +1010,17 @@ int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t if (pages == 0) return 0; // pages per numa node - size_t numa_count = (numa_nodes > 0 ? numa_nodes : _mi_os_numa_node_count()); - if (numa_count <= 0) numa_count = 1; + int numa_count = (numa_nodes > 0 && numa_nodes <= INT_MAX ? (int)numa_nodes : _mi_os_numa_node_count()); + if (numa_count == 0) numa_count = 1; const size_t pages_per = pages / numa_count; const size_t pages_mod = pages % numa_count; const size_t timeout_per = (timeout_msecs==0 ? 0 : (timeout_msecs / numa_count) + 50); // reserve evenly among numa nodes - for (size_t numa_node = 0; numa_node < numa_count && pages > 0; numa_node++) { + for (int numa_node = 0; numa_node < numa_count && pages > 0; numa_node++) { size_t node_pages = pages_per; // can be 0 - if (numa_node < pages_mod) node_pages++; - int err = mi_reserve_huge_os_pages_at(node_pages, (int)numa_node, timeout_per); + if ((size_t)numa_node < pages_mod) node_pages++; + int err = mi_reserve_huge_os_pages_at(node_pages, numa_node, timeout_per); if (err) return err; if (pages < node_pages) { pages = 0; @@ -1105,4 +1041,3 @@ int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserv if (err==0 && pages_reserved!=NULL) *pages_reserved = pages; return err; } - diff --git a/system/lib/mimalloc/src/bitmap.c b/system/lib/mimalloc/src/bitmap.c index 4b6be66bcd2c9..32d1e9548d3e3 100644 --- a/system/lib/mimalloc/src/bitmap.c +++ b/system/lib/mimalloc/src/bitmap.c @@ -81,7 +81,7 @@ inline bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, cons // on to the next bit range #ifdef MI_HAVE_FAST_BITSCAN mi_assert_internal(mapm != 0); - const size_t shift = (count == 1 ? 1 : (MI_INTPTR_BITS - mi_clz(mapm) - bitidx)); + const size_t shift = (count == 1 ? 1 : (MI_SIZE_BITS - mi_clz(mapm) - bitidx)); mi_assert_internal(shift > 0 && shift <= count); #else const size_t shift = 1; @@ -164,7 +164,7 @@ static bool mi_bitmap_is_claimedx(mi_bitmap_t bitmap, size_t bitmap_fields, size return ((field & mask) == mask); } -// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically. +// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically. // Returns `true` if successful when all previous `count` bits were 0. bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { const size_t idx = mi_bitmap_index_field(bitmap_idx); @@ -172,9 +172,9 @@ bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count const size_t mask = mi_bitmap_mask_(count, bitidx); mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields); size_t expected = mi_atomic_load_relaxed(&bitmap[idx]); - do { + do { if ((expected & mask) != 0) return false; - } + } while (!mi_atomic_cas_strong_acq_rel(&bitmap[idx], &expected, expected | mask)); mi_assert_internal((expected & mask) == 0); return true; @@ -200,7 +200,7 @@ bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t // Try to atomically claim a sequence of `count` bits starting from the field // at `idx` in `bitmap` and crossing into subsequent fields. Returns `true` on success. // Only needs to consider crossing into the next fields (see `mi_bitmap_try_find_from_claim_across`) -static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t idx, const size_t count, const size_t retries, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats) +static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t idx, const size_t count, const size_t retries, mi_bitmap_index_t* bitmap_idx) { mi_assert_internal(bitmap_idx != NULL); @@ -212,7 +212,7 @@ static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bit if (initial == 0) return false; if (initial >= count) return _mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx); // no need to cross fields (this case won't happen for us) if (_mi_divide_up(count - initial, MI_BITMAP_FIELD_BITS) >= (bitmap_fields - idx)) return false; // not enough entries - + // scan ahead size_t found = initial; size_t mask = 0; // mask bits for the final field @@ -260,7 +260,6 @@ static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bit } while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)); // claimed! - mi_stat_counter_increase(stats->arena_crossover_count,1); *bitmap_idx = mi_bitmap_index_create(idx, initial_idx); return true; @@ -280,10 +279,10 @@ static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bit newmap = (map & ~initial_mask); } while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)); } - mi_stat_counter_increase(stats->arena_rollback_count,1); + mi_stat_counter_increase(_mi_stats_main.arena_rollback_count,1); // retry? (we make a recursive call instead of goto to be able to use const declarations) if (retries <= 2) { - return mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, retries+1, bitmap_idx, stats); + return mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, retries+1, bitmap_idx); } else { return false; @@ -293,7 +292,7 @@ static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bit // Find `count` bits of zeros and set them to 1 atomically; returns `true` on success. // Starts at idx, and wraps around to search in all `bitmap_fields` fields. -bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats) { +bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx) { mi_assert_internal(count > 0); if (count <= 2) { // we don't bother with crossover fields for small counts @@ -313,7 +312,7 @@ bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitm } */ // if that fails, then try to claim across fields - if (mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, 0, bitmap_idx, stats)) { + if (mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, 0, bitmap_idx)) { return true; } } @@ -370,7 +369,7 @@ bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t // Set `count` bits at `bitmap_idx` to 1 atomically // Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit. -bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero) { +bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero, size_t* already_set) { size_t idx = mi_bitmap_index_field(bitmap_idx); size_t pre_mask; size_t mid_mask; @@ -378,28 +377,31 @@ bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t co size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask); bool all_zero = true; bool any_zero = false; + size_t one_count = 0; _Atomic(size_t)*field = &bitmap[idx]; size_t prev = mi_atomic_or_acq_rel(field++, pre_mask); - if ((prev & pre_mask) != 0) all_zero = false; + if ((prev & pre_mask) != 0) { all_zero = false; one_count += mi_popcount(prev & pre_mask); } if ((prev & pre_mask) != pre_mask) any_zero = true; while (mid_count-- > 0) { prev = mi_atomic_or_acq_rel(field++, mid_mask); - if ((prev & mid_mask) != 0) all_zero = false; + if ((prev & mid_mask) != 0) { all_zero = false; one_count += mi_popcount(prev & mid_mask); } if ((prev & mid_mask) != mid_mask) any_zero = true; } if (post_mask!=0) { prev = mi_atomic_or_acq_rel(field, post_mask); - if ((prev & post_mask) != 0) all_zero = false; + if ((prev & post_mask) != 0) { all_zero = false; one_count += mi_popcount(prev & post_mask); } if ((prev & post_mask) != post_mask) any_zero = true; } if (pany_zero != NULL) { *pany_zero = any_zero; } + if (already_set != NULL) { *already_set = one_count; }; + mi_assert_internal(all_zero ? one_count == 0 : one_count <= count); return all_zero; } // Returns `true` if all `count` bits were 1. // `any_ones` is `true` if there was at least one bit set to one. -static bool mi_bitmap_is_claimedx_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_ones) { +static bool mi_bitmap_is_claimedx_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_ones, size_t* already_set) { size_t idx = mi_bitmap_index_field(bitmap_idx); size_t pre_mask; size_t mid_mask; @@ -407,30 +409,33 @@ static bool mi_bitmap_is_claimedx_across(mi_bitmap_t bitmap, size_t bitmap_field size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask); bool all_ones = true; bool any_ones = false; + size_t one_count = 0; mi_bitmap_field_t* field = &bitmap[idx]; size_t prev = mi_atomic_load_relaxed(field++); if ((prev & pre_mask) != pre_mask) all_ones = false; - if ((prev & pre_mask) != 0) any_ones = true; + if ((prev & pre_mask) != 0) { any_ones = true; one_count += mi_popcount(prev & pre_mask); } while (mid_count-- > 0) { prev = mi_atomic_load_relaxed(field++); if ((prev & mid_mask) != mid_mask) all_ones = false; - if ((prev & mid_mask) != 0) any_ones = true; + if ((prev & mid_mask) != 0) { any_ones = true; one_count += mi_popcount(prev & mid_mask); } } if (post_mask!=0) { prev = mi_atomic_load_relaxed(field); if ((prev & post_mask) != post_mask) all_ones = false; - if ((prev & post_mask) != 0) any_ones = true; + if ((prev & post_mask) != 0) { any_ones = true; one_count += mi_popcount(prev & post_mask); } } if (pany_ones != NULL) { *pany_ones = any_ones; } + if (already_set != NULL) { *already_set = one_count; } + mi_assert_internal(all_ones ? one_count == count : one_count < count); return all_ones; } -bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { - return mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, NULL); +bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, size_t* already_set) { + return mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, NULL, already_set); } bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { bool any_ones; - mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, &any_ones); + mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, &any_ones, NULL); return any_ones; } diff --git a/system/lib/mimalloc/src/bitmap.h b/system/lib/mimalloc/src/bitmap.h index d8316b83f40f1..0f4744f4fc3ff 100644 --- a/system/lib/mimalloc/src/bitmap.h +++ b/system/lib/mimalloc/src/bitmap.h @@ -35,9 +35,13 @@ typedef mi_bitmap_field_t* mi_bitmap_t; typedef size_t mi_bitmap_index_t; // Create a bit index. +static inline mi_bitmap_index_t mi_bitmap_index_create_ex(size_t idx, size_t bitidx) { + mi_assert_internal(bitidx <= MI_BITMAP_FIELD_BITS); + return (idx*MI_BITMAP_FIELD_BITS) + bitidx; +} static inline mi_bitmap_index_t mi_bitmap_index_create(size_t idx, size_t bitidx) { mi_assert_internal(bitidx < MI_BITMAP_FIELD_BITS); - return (idx*MI_BITMAP_FIELD_BITS) + bitidx; + return mi_bitmap_index_create_ex(idx,bitidx); } // Create a bit index. @@ -99,7 +103,7 @@ bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t // Find `count` bits of zeros and set them to 1 atomically; returns `true` on success. // Starts at idx, and wraps around to search in all `bitmap_fields` fields. -bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats); +bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx); // Set `count` bits at `bitmap_idx` to 0 atomically // Returns `true` if all `count` bits were 1 previously. @@ -107,9 +111,9 @@ bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t // Set `count` bits at `bitmap_idx` to 1 atomically // Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit. -bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero); +bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero, size_t* already_set); -bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); +bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, size_t* already_set); bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); #endif diff --git a/system/lib/mimalloc/src/free.c b/system/lib/mimalloc/src/free.c index b9cb634616958..5e5ae443f3a3a 100644 --- a/system/lib/mimalloc/src/free.c +++ b/system/lib/mimalloc/src/free.c @@ -9,7 +9,6 @@ terms of the MIT license. A copy of the license can be found in the file // add includes help an IDE #include "mimalloc.h" #include "mimalloc/internal.h" -#include "mimalloc/atomic.h" #include "mimalloc/prim.h" // _mi_prim_thread_id() #endif @@ -35,7 +34,7 @@ static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool if mi_unlikely(mi_check_is_double_free(page, block)) return; mi_check_padding(page, block); if (track_stats) { mi_stat_free(page, block); } - #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN + #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN && !MI_GUARDED if (!mi_page_is_huge(page)) { // huge page content may be already decommitted memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); } @@ -54,8 +53,8 @@ static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool } // Adjust a block that was allocated aligned, to the actual start of the block in the page. -// note: this can be called from `mi_free_generic_mt` where a non-owning thread accesses the -// `page_start` and `block_size` fields; however these are constant and the page won't be +// note: this can be called from `mi_free_generic_mt` where a non-owning thread accesses the +// `page_start` and `block_size` fields; however these are constant and the page won't be // deallocated (as the block we are freeing keeps it alive) and thus safe to read concurrently. mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p) { mi_assert_internal(page!=NULL && p!=NULL); @@ -72,16 +71,30 @@ mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p) { return (mi_block_t*)((uintptr_t)p - adjust); } +// forward declaration for a MI_GUARDED build +#if MI_GUARDED +static void mi_block_unguard(mi_page_t* page, mi_block_t* block, void* p); // forward declaration +static inline void mi_block_check_unguard(mi_page_t* page, mi_block_t* block, void* p) { + if (mi_block_ptr_is_guarded(block, p)) { mi_block_unguard(page, block, p); } +} +#else +static inline void mi_block_check_unguard(mi_page_t* page, mi_block_t* block, void* p) { + MI_UNUSED(page); MI_UNUSED(block); MI_UNUSED(p); +} +#endif + // free a local pointer (page parameter comes first for better codegen) static void mi_decl_noinline mi_free_generic_local(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept { MI_UNUSED(segment); mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(page, p) : (mi_block_t*)p); + mi_block_check_unguard(page, block, p); mi_free_block_local(page, block, true /* track stats */, true /* check for a full page */); } // free a pointer owned by another thread (page parameter comes first for better codegen) static void mi_decl_noinline mi_free_generic_mt(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept { mi_block_t* const block = _mi_page_ptr_unalign(page, p); // don't check `has_aligned` flag to avoid a race (issue #865) + mi_block_check_unguard(page, block, p); mi_free_block_mt(page, segment, block); } @@ -98,17 +111,17 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms { MI_UNUSED(msg); -#if (MI_DEBUG>0) - if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) { + #if (MI_DEBUG>0) + if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0 && !mi_option_is_enabled(mi_option_guarded_precise)) { _mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p); return NULL; } -#endif + #endif mi_segment_t* const segment = _mi_ptr_segment(p); if mi_unlikely(segment==NULL) return segment; -#if (MI_DEBUG>0) + #if (MI_DEBUG>0) if mi_unlikely(!mi_is_in_heap_region(p)) { #if (MI_INTPTR_SIZE == 8 && defined(__linux__)) if (((uintptr_t)p >> 40) != 0x7F) { // linux tends to align large blocks above 0x7F000000000 (issue #640) @@ -122,13 +135,13 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms } } } -#endif -#if (MI_DEBUG>0 || MI_SECURE>=4) + #endif + #if (MI_DEBUG>0 || MI_SECURE>=4) if mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie) { _mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p); return NULL; } -#endif + #endif return segment; } @@ -240,15 +253,17 @@ static void mi_decl_noinline mi_free_block_delayed_mt( mi_page_t* page, mi_block static void mi_decl_noinline mi_free_block_mt(mi_page_t* page, mi_segment_t* segment, mi_block_t* block) { // first see if the segment was abandoned and if we can reclaim it into our thread - if (mi_option_is_enabled(mi_option_abandoned_reclaim_on_free) && + if (_mi_option_get_fast(mi_option_abandoned_reclaim_on_free) != 0 && #if MI_HUGE_PAGE_ABANDON segment->page_kind != MI_PAGE_HUGE && #endif - mi_atomic_load_relaxed(&segment->thread_id) == 0) + mi_atomic_load_relaxed(&segment->thread_id) == 0 && // segment is abandoned? + mi_prim_get_default_heap() != (mi_heap_t*)&_mi_heap_empty) // and we did not already exit this thread (without this check, a fresh heap will be initalized (issue #944)) { // the segment is abandoned, try to reclaim it into our heap if (_mi_segment_attempt_reclaim(mi_heap_get_default(), segment)) { - mi_assert_internal(_mi_prim_thread_id() == mi_atomic_load_relaxed(&segment->thread_id)); + mi_assert_internal(_mi_thread_id() == mi_atomic_load_relaxed(&segment->thread_id)); + mi_assert_internal(mi_heap_get_default()->tld->segments.subproc == segment->subproc); mi_free(block); // recursively free as now it will be a local free in our heap return; } @@ -299,7 +314,13 @@ static size_t mi_decl_noinline mi_page_usable_aligned_size_of(const mi_page_t* p const size_t size = mi_page_usable_size_of(page, block); const ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)block; mi_assert_internal(adjust >= 0 && (size_t)adjust <= size); - return (size - adjust); + const size_t aligned_size = (size - adjust); + #if MI_GUARDED + if (mi_block_ptr_is_guarded(block, p)) { + return aligned_size - _mi_os_page_size(); + } + #endif + return aligned_size; } static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noexcept { @@ -327,7 +348,10 @@ mi_decl_nodiscard size_t mi_usable_size(const void* p) mi_attr_noexcept { void mi_free_size(void* p, size_t size) mi_attr_noexcept { MI_UNUSED_RELEASE(size); - mi_assert(p == NULL || size <= _mi_usable_size(p,"mi_free_size")); + #if MI_DEBUG + const size_t available = _mi_usable_size(p,"mi_free_size"); + mi_assert(p == NULL || size <= available || available == 0 /* invalid pointer */ ); + #endif mi_free(p); } @@ -409,7 +433,7 @@ static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* bloc uintptr_t keys[2]; keys[0] = page->keys[0]; keys[1] = page->keys[1]; - bool ok = ((uint32_t)mi_ptr_encode(page,block,keys) == canary && *delta <= *bsize); + bool ok = (mi_ptr_encode_canary(page,block,keys) == canary && *delta <= *bsize); mi_track_mem_noaccess(padding,sizeof(mi_padding_t)); return ok; } @@ -501,26 +525,24 @@ static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) { // only maintain stats for smaller objects if requested #if (MI_STAT>0) static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { - #if (MI_STAT < 2) MI_UNUSED(block); - #endif mi_heap_t* const heap = mi_heap_get_default(); const size_t bsize = mi_page_usable_block_size(page); - #if (MI_STAT>1) - const size_t usize = mi_page_usable_size_of(page, block); - mi_heap_stat_decrease(heap, malloc, usize); - #endif + // #if (MI_STAT>1) + // const size_t usize = mi_page_usable_size_of(page, block); + // mi_heap_stat_decrease(heap, malloc_requested, usize); + // #endif if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) { - mi_heap_stat_decrease(heap, normal, bsize); + mi_heap_stat_decrease(heap, malloc_normal, bsize); #if (MI_STAT > 1) - mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], 1); + mi_heap_stat_decrease(heap, malloc_bins[_mi_bin(bsize)], 1); #endif } - else if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { - mi_heap_stat_decrease(heap, large, bsize); - } + //else if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { + // mi_heap_stat_decrease(heap, malloc_large, bsize); + //} else { - mi_heap_stat_decrease(heap, huge, bsize); + mi_heap_stat_decrease(heap, malloc_huge, bsize); } } #else @@ -528,3 +550,23 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { MI_UNUSED(page); MI_UNUSED(block); } #endif + + +// Remove guard page when building with MI_GUARDED +#if MI_GUARDED +static void mi_block_unguard(mi_page_t* page, mi_block_t* block, void* p) { + MI_UNUSED(p); + mi_assert_internal(mi_block_ptr_is_guarded(block, p)); + mi_assert_internal(mi_page_has_aligned(page)); + mi_assert_internal((uint8_t*)p - (uint8_t*)block >= (ptrdiff_t)sizeof(mi_block_t)); + mi_assert_internal(block->next == MI_BLOCK_TAG_GUARDED); + + const size_t bsize = mi_page_block_size(page); + const size_t psize = _mi_os_page_size(); + mi_assert_internal(bsize > psize); + mi_assert_internal(_mi_page_segment(page)->allow_decommit); + void* gpage = (uint8_t*)block + bsize - psize; + mi_assert_internal(_mi_is_aligned(gpage, psize)); + _mi_os_unprotect(gpage, psize); +} +#endif diff --git a/system/lib/mimalloc/src/heap.c b/system/lib/mimalloc/src/heap.c index e498fdb2093fb..f96e60d0f8d94 100644 --- a/system/lib/mimalloc/src/heap.c +++ b/system/lib/mimalloc/src/heap.c @@ -59,7 +59,7 @@ static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_ MI_UNUSED(pq); mi_assert_internal(mi_page_heap(page) == heap); mi_segment_t* segment = _mi_page_segment(page); - mi_assert_internal(segment->thread_id == heap->thread_id); + mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id) == heap->thread_id); mi_assert_expensive(_mi_page_is_valid(page)); return true; } @@ -98,7 +98,7 @@ static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t if (collect == MI_FORCE) { // note: call before a potential `_mi_page_free` as the segment may be freed if this was the last used page in that segment. mi_segment_t* segment = _mi_page_segment(page); - _mi_segment_collect(segment, true /* force? */, &heap->tld->segments); + _mi_segment_collect(segment, true /* force? */); } if (mi_page_all_free(page)) { // no more used blocks, free the page. @@ -143,6 +143,7 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect) if (force_main) { // the main thread is abandoned (end-of-program), try to reclaim all abandoned segments. // if all memory is freed by now, all segments should be freed. + // note: this only collects in the current subprocess _mi_abandoned_reclaim_all(heap, &heap->tld->segments); } @@ -165,14 +166,17 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect) // collect abandoned segments (in particular, purge expired parts of segments in the abandoned segment list) // note: forced purge can be quite expensive if many threads are created/destroyed so we do not force on abandonment _mi_abandoned_collect(heap, collect == MI_FORCE /* force? */, &heap->tld->segments); - + // if forced, collect thread data cache on program-exit (or shared library unload) if (force && is_main_thread && mi_heap_is_backing(heap)) { _mi_thread_data_collect(); // collect thread data cache } - + // collect arenas (this is program wide so don't force purges on abandonment of threads) - _mi_arenas_collect(collect == MI_FORCE /* force purge? */, &heap->tld->stats); + _mi_arenas_collect(collect == MI_FORCE /* force purge? */); + + // merge statistics + if (collect <= MI_FORCE) { _mi_stats_merge_thread(heap->tld); } } void _mi_heap_collect_abandon(mi_heap_t* heap) { @@ -227,22 +231,28 @@ void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool heap->cookie = _mi_heap_random_next(heap) | 1; heap->keys[0] = _mi_heap_random_next(heap); heap->keys[1] = _mi_heap_random_next(heap); + _mi_heap_guarded_init(heap); // push on the thread local heaps list heap->next = heap->tld->heaps; heap->tld->heaps = heap; } -mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) { +mi_decl_nodiscard mi_heap_t* mi_heap_new_ex(int heap_tag, bool allow_destroy, mi_arena_id_t arena_id) { mi_heap_t* bheap = mi_heap_get_backing(); mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode? if (heap == NULL) return NULL; - // don't reclaim abandoned pages or otherwise destroy is unsafe - _mi_heap_init(heap, bheap->tld, arena_id, true /* no reclaim */, 0 /* default tag */); + mi_assert(heap_tag >= 0 && heap_tag < 256); + _mi_heap_init(heap, bheap->tld, arena_id, allow_destroy /* no reclaim? */, (uint8_t)heap_tag /* heap tag */); return heap; } +mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) { + return mi_heap_new_ex(0 /* default heap tag */, false /* don't allow `mi_heap_destroy` */, arena_id); +} + mi_decl_nodiscard mi_heap_t* mi_heap_new(void) { - return mi_heap_new_in_arena(_mi_arena_id_none()); + // don't reclaim abandoned memory or otherwise destroy is unsafe + return mi_heap_new_ex(0 /* default heap tag */, true /* no reclaim */, _mi_arena_id_none()); } bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid) { @@ -324,24 +334,25 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_ // stats const size_t bsize = mi_page_block_size(page); if (bsize > MI_MEDIUM_OBJ_SIZE_MAX) { - if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { - mi_heap_stat_decrease(heap, large, bsize); - } - else { - mi_heap_stat_decrease(heap, huge, bsize); + //if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { + // mi_heap_stat_decrease(heap, malloc_large, bsize); + //} + //else + { + mi_heap_stat_decrease(heap, malloc_huge, bsize); } } -#if (MI_STAT) + #if (MI_STAT>0) _mi_page_free_collect(page, false); // update used count const size_t inuse = page->used; if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { - mi_heap_stat_decrease(heap, normal, bsize * inuse); -#if (MI_STAT>1) - mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], inuse); -#endif + mi_heap_stat_decrease(heap, malloc_normal, bsize * inuse); + #if (MI_STAT>1) + mi_heap_stat_decrease(heap, malloc_bins[_mi_bin(bsize)], inuse); + #endif } - mi_heap_stat_decrease(heap, malloc, bsize * inuse); // todo: off for aligned blocks... -#endif + // mi_heap_stat_decrease(heap, malloc_requested, bsize * inuse); // todo: off for aligned blocks... + #endif /// pretend it is all free now mi_assert_internal(mi_page_thread_free(page) == NULL); @@ -375,7 +386,13 @@ void mi_heap_destroy(mi_heap_t* heap) { mi_assert(heap->no_reclaim); mi_assert_expensive(mi_heap_is_valid(heap)); if (heap==NULL || !mi_heap_is_initialized(heap)) return; + #if MI_GUARDED + // _mi_warning_message("'mi_heap_destroy' called but MI_GUARDED is enabled -- using `mi_heap_delete` instead (heap at %p)\n", heap); + mi_heap_delete(heap); + return; + #else if (!heap->no_reclaim) { + _mi_warning_message("'mi_heap_destroy' called but ignored as the heap was not created with 'allow_destroy' (heap at %p)\n", heap); // don't free in case it may contain reclaimed pages mi_heap_delete(heap); } @@ -388,12 +405,14 @@ void mi_heap_destroy(mi_heap_t* heap) { _mi_heap_destroy_pages(heap); mi_heap_free(heap); } + #endif } // forcefully destroy all heaps in the current thread -void _mi_heap_unsafe_destroy_all(void) { - mi_heap_t* bheap = mi_heap_get_backing(); - mi_heap_t* curr = bheap->tld->heaps; +void _mi_heap_unsafe_destroy_all(mi_heap_t* heap) { + mi_assert_internal(heap != NULL); + if (heap == NULL) return; + mi_heap_t* curr = heap->tld->heaps; while (curr != NULL) { mi_heap_t* next = curr->next; if (curr->no_reclaim) { @@ -444,6 +463,12 @@ static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) { mi_heap_reset_pages(from); } +// are two heaps compatible with respect to heap-tag, exclusive arena etc. +static bool mi_heaps_are_compatible(mi_heap_t* heap1, mi_heap_t* heap2) { + return (heap1->tag == heap2->tag && // store same kind of objects + heap1->arena_id == heap2->arena_id); // same arena preference +} + // Safe delete a heap without freeing any still allocated blocks in that heap. void mi_heap_delete(mi_heap_t* heap) { @@ -452,9 +477,10 @@ void mi_heap_delete(mi_heap_t* heap) mi_assert_expensive(mi_heap_is_valid(heap)); if (heap==NULL || !mi_heap_is_initialized(heap)) return; - if (!mi_heap_is_backing(heap)) { + mi_heap_t* bheap = heap->tld->heap_backing; + if (bheap != heap && mi_heaps_are_compatible(bheap,heap)) { // transfer still used pages to the backing heap - mi_heap_absorb(heap->tld->heap_backing, heap); + mi_heap_absorb(bheap, heap); } else { // the backing heap abandons its pages @@ -527,54 +553,97 @@ bool mi_check_owned(const void* p) { enable visiting all blocks of all heaps across threads ----------------------------------------------------------- */ -// Separate struct to keep `mi_page_t` out of the public interface -typedef struct mi_heap_area_ex_s { - mi_heap_area_t area; - mi_page_t* page; -} mi_heap_area_ex_t; +void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page) { + const size_t bsize = mi_page_block_size(page); + const size_t ubsize = mi_page_usable_block_size(page); + area->reserved = page->reserved * bsize; + area->committed = page->capacity * bsize; + area->blocks = mi_page_start(page); + area->used = page->used; // number of blocks in use (#553) + area->block_size = ubsize; + area->full_block_size = bsize; + area->heap_tag = page->heap_tag; +} + + +static void mi_get_fast_divisor(size_t divisor, uint64_t* magic, size_t* shift) { + mi_assert_internal(divisor > 0 && divisor <= UINT32_MAX); + *shift = MI_SIZE_BITS - mi_clz(divisor - 1); + *magic = ((((uint64_t)1 << 32) * (((uint64_t)1 << *shift) - divisor)) / divisor + 1); +} -static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_visit_fun* visitor, void* arg) { - mi_assert(xarea != NULL); - if (xarea==NULL) return true; - const mi_heap_area_t* area = &xarea->area; - mi_page_t* page = xarea->page; +static size_t mi_fast_divide(size_t n, uint64_t magic, size_t shift) { + mi_assert_internal(n <= UINT32_MAX); + const uint64_t hi = ((uint64_t)n * magic) >> 32; + return (size_t)((hi + n) >> shift); +} + +bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_block_visit_fun* visitor, void* arg) { + mi_assert(area != NULL); + if (area==NULL) return true; mi_assert(page != NULL); if (page == NULL) return true; - _mi_page_free_collect(page,true); + _mi_page_free_collect(page,true); // collect both thread_delayed and local_free mi_assert_internal(page->local_free == NULL); if (page->used == 0) return true; - const size_t bsize = mi_page_block_size(page); - const size_t ubsize = mi_page_usable_block_size(page); // without padding - size_t psize; - uint8_t* pstart = _mi_segment_page_start(_mi_page_segment(page), page, &psize); + size_t psize; + uint8_t* const pstart = _mi_segment_page_start(_mi_page_segment(page), page, &psize); + mi_heap_t* const heap = mi_page_heap(page); + const size_t bsize = mi_page_block_size(page); + const size_t ubsize = mi_page_usable_block_size(page); // without padding + // optimize page with one block if (page->capacity == 1) { - // optimize page with one block mi_assert_internal(page->used == 1 && page->free == NULL); return visitor(mi_page_heap(page), area, pstart, ubsize, arg); } + mi_assert(bsize <= UINT32_MAX); + + // optimize full pages + if (page->used == page->capacity) { + uint8_t* block = pstart; + for (size_t i = 0; i < page->capacity; i++) { + if (!visitor(heap, area, block, ubsize, arg)) return false; + block += bsize; + } + return true; + } // create a bitmap of free blocks. #define MI_MAX_BLOCKS (MI_SMALL_PAGE_SIZE / sizeof(void*)) - uintptr_t free_map[MI_MAX_BLOCKS / sizeof(uintptr_t)]; - memset(free_map, 0, sizeof(free_map)); + uintptr_t free_map[MI_MAX_BLOCKS / MI_INTPTR_BITS]; + const uintptr_t bmapsize = _mi_divide_up(page->capacity, MI_INTPTR_BITS); + memset(free_map, 0, bmapsize * sizeof(intptr_t)); + if (page->capacity % MI_INTPTR_BITS != 0) { + // mark left-over bits at the end as free + size_t shift = (page->capacity % MI_INTPTR_BITS); + uintptr_t mask = (UINTPTR_MAX << shift); + free_map[bmapsize - 1] = mask; + } + + // fast repeated division by the block size + uint64_t magic; + size_t shift; + mi_get_fast_divisor(bsize, &magic, &shift); #if MI_DEBUG>1 size_t free_count = 0; #endif - for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) { + for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page, block)) { #if MI_DEBUG>1 free_count++; #endif mi_assert_internal((uint8_t*)block >= pstart && (uint8_t*)block < (pstart + psize)); size_t offset = (uint8_t*)block - pstart; mi_assert_internal(offset % bsize == 0); - size_t blockidx = offset / bsize; // Todo: avoid division? - mi_assert_internal( blockidx < MI_MAX_BLOCKS); - size_t bitidx = (blockidx / sizeof(uintptr_t)); - size_t bit = blockidx - (bitidx * sizeof(uintptr_t)); + mi_assert_internal(offset <= UINT32_MAX); + size_t blockidx = mi_fast_divide(offset, magic, shift); + mi_assert_internal(blockidx == offset / bsize); + mi_assert_internal(blockidx < MI_MAX_BLOCKS); + size_t bitidx = (blockidx / MI_INTPTR_BITS); + size_t bit = blockidx - (bitidx * MI_INTPTR_BITS); free_map[bitidx] |= ((uintptr_t)1 << bit); } mi_assert_internal(page->capacity == (free_count + page->used)); @@ -583,42 +652,53 @@ static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_v #if MI_DEBUG>1 size_t used_count = 0; #endif - for (size_t i = 0; i < page->capacity; i++) { - size_t bitidx = (i / sizeof(uintptr_t)); - size_t bit = i - (bitidx * sizeof(uintptr_t)); - uintptr_t m = free_map[bitidx]; - if (bit == 0 && m == UINTPTR_MAX) { - i += (sizeof(uintptr_t) - 1); // skip a run of free blocks + uint8_t* block = pstart; + for (size_t i = 0; i < bmapsize; i++) { + if (free_map[i] == 0) { + // every block is in use + for (size_t j = 0; j < MI_INTPTR_BITS; j++) { + #if MI_DEBUG>1 + used_count++; + #endif + if (!visitor(heap, area, block, ubsize, arg)) return false; + block += bsize; + } } - else if ((m & ((uintptr_t)1 << bit)) == 0) { - #if MI_DEBUG>1 - used_count++; - #endif - uint8_t* block = pstart + (i * bsize); - if (!visitor(mi_page_heap(page), area, block, ubsize, arg)) return false; + else { + // visit the used blocks in the mask + uintptr_t m = ~free_map[i]; + while (m != 0) { + #if MI_DEBUG>1 + used_count++; + #endif + size_t bitidx = mi_ctz(m); + if (!visitor(heap, area, block + (bitidx * bsize), ubsize, arg)) return false; + m &= m - 1; // clear least significant bit + } + block += bsize * MI_INTPTR_BITS; } } mi_assert_internal(page->used == used_count); return true; } -typedef bool (mi_heap_area_visit_fun)(const mi_heap_t* heap, const mi_heap_area_ex_t* area, void* arg); +// Separate struct to keep `mi_page_t` out of the public interface +typedef struct mi_heap_area_ex_s { + mi_heap_area_t area; + mi_page_t* page; +} mi_heap_area_ex_t; + +typedef bool (mi_heap_area_visit_fun)(const mi_heap_t* heap, const mi_heap_area_ex_t* area, void* arg); + static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* vfun, void* arg) { MI_UNUSED(heap); MI_UNUSED(pq); mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun; mi_heap_area_ex_t xarea; - const size_t bsize = mi_page_block_size(page); - const size_t ubsize = mi_page_usable_block_size(page); xarea.page = page; - xarea.area.reserved = page->reserved * bsize; - xarea.area.committed = page->capacity * bsize; - xarea.area.blocks = mi_page_start(page); - xarea.area.used = page->used; // number of blocks in use (#553) - xarea.area.block_size = ubsize; - xarea.area.full_block_size = bsize; + _mi_heap_area_init(&xarea.area, page); return fun(heap, &xarea, arg); } @@ -639,7 +719,7 @@ static bool mi_heap_area_visitor(const mi_heap_t* heap, const mi_heap_area_ex_t* mi_visit_blocks_args_t* args = (mi_visit_blocks_args_t*)arg; if (!args->visitor(heap, &xarea->area, NULL, xarea->area.block_size, args->arg)) return false; if (args->visit_blocks) { - return mi_heap_area_visit_blocks(xarea, args->visitor, args->arg); + return _mi_heap_area_visit_blocks(&xarea->area, xarea->page, args->visitor, args->arg); } else { return true; diff --git a/system/lib/mimalloc/src/init.c b/system/lib/mimalloc/src/init.c index 6f51ca8923c33..3fc8b033695a3 100644 --- a/system/lib/mimalloc/src/init.c +++ b/system/lib/mimalloc/src/init.c @@ -67,29 +67,25 @@ const mi_page_t _mi_page_empty = { QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 1 /* 655360, Huge queue */), \ QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 2) /* Full queue */ } -#define MI_STAT_COUNT_NULL() {0,0,0,0} +#define MI_STAT_COUNT_NULL() {0,0,0} // Empty statistics -#if MI_STAT>1 -#define MI_STAT_COUNT_END_NULL() , { MI_STAT_COUNT_NULL(), MI_INIT32(MI_STAT_COUNT_NULL) } -#else -#define MI_STAT_COUNT_END_NULL() -#endif - #define MI_STATS_NULL \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), \ - { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ - { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ - { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ - { 0, 0 } \ - MI_STAT_COUNT_END_NULL() + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + { 0 }, { 0 }, { 0 }, { 0 }, \ + { 0 }, { 0 }, { 0 }, { 0 }, \ + \ + { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, \ + MI_INIT4(MI_STAT_COUNT_NULL), \ + { 0 }, { 0 }, { 0 }, { 0 }, \ + \ + { MI_INIT4(MI_STAT_COUNT_NULL) }, \ + { { 0 }, { 0 }, { 0 }, { 0 } }, \ + \ + { MI_INIT74(MI_STAT_COUNT_NULL) }, \ + { MI_INIT74(MI_STAT_COUNT_NULL) } // Empty slice span queues for every bin @@ -122,23 +118,27 @@ mi_decl_cache_align const mi_heap_t _mi_heap_empty = { { {0}, {0}, 0, true }, // random 0, // page count MI_BIN_FULL, 0, // page retired min/max + 0, 0, // generic count NULL, // next false, // can reclaim 0, // tag + #if MI_GUARDED + 0, 0, 0, 1, // count is 1 so we never write to it (see `internal.h:mi_heap_malloc_use_guarded`) + #endif MI_SMALL_PAGES_EMPTY, MI_PAGE_QUEUES_EMPTY }; +static mi_decl_cache_align mi_subproc_t mi_subproc_default; + #define tld_empty_stats ((mi_stats_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,stats))) -#define tld_empty_os ((mi_os_tld_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,os))) mi_decl_cache_align static const mi_tld_t tld_empty = { 0, false, NULL, NULL, - { MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, tld_empty_stats, tld_empty_os }, // segments - { 0, tld_empty_stats }, // os - { MI_STATS_NULL } // stats + { MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, &mi_subproc_default, tld_empty_stats }, // segments + { MI_STAT_VERSION, MI_STATS_NULL } // stats }; mi_threadid_t _mi_thread_id(void) mi_attr_noexcept { @@ -148,17 +148,16 @@ mi_threadid_t _mi_thread_id(void) mi_attr_noexcept { // the thread-local default heap for allocation mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty; -extern mi_heap_t _mi_heap_main; +extern mi_decl_hidden mi_heap_t _mi_heap_main; -static mi_tld_t tld_main = { +static mi_decl_cache_align mi_tld_t tld_main = { 0, false, &_mi_heap_main, & _mi_heap_main, - { MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, &tld_main.stats, &tld_main.os }, // segments - { 0, &tld_main.stats }, // os - { MI_STATS_NULL } // stats + { MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, &mi_subproc_default, &tld_main.stats }, // segments + { MI_STAT_VERSION, MI_STATS_NULL } // stats }; -mi_heap_t _mi_heap_main = { +mi_decl_cache_align mi_heap_t _mi_heap_main = { &tld_main, MI_ATOMIC_VAR_INIT(NULL), 0, // thread id @@ -168,16 +167,58 @@ mi_heap_t _mi_heap_main = { { {0x846ca68b}, {0}, 0, true }, // random 0, // page count MI_BIN_FULL, 0, // page retired min/max + 0, 0, // generic count NULL, // next heap false, // can reclaim 0, // tag + #if MI_GUARDED + 0, 0, 0, 0, + #endif MI_SMALL_PAGES_EMPTY, MI_PAGE_QUEUES_EMPTY }; bool _mi_process_is_initialized = false; // set to `true` in `mi_process_init`. -mi_stats_t _mi_stats_main = { MI_STATS_NULL }; +mi_stats_t _mi_stats_main = { MI_STAT_VERSION, MI_STATS_NULL }; + +#if MI_GUARDED +mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed) { + heap->guarded_sample_rate = sample_rate; + heap->guarded_sample_count = sample_rate; // count down samples + if (heap->guarded_sample_rate > 1) { + if (seed == 0) { + seed = _mi_heap_random_next(heap); + } + heap->guarded_sample_count = (seed % heap->guarded_sample_rate) + 1; // start at random count between 1 and `sample_rate` + } +} + +mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max) { + heap->guarded_size_min = min; + heap->guarded_size_max = (min > max ? min : max); +} + +void _mi_heap_guarded_init(mi_heap_t* heap) { + mi_heap_guarded_set_sample_rate(heap, + (size_t)mi_option_get_clamp(mi_option_guarded_sample_rate, 0, LONG_MAX), + (size_t)mi_option_get(mi_option_guarded_sample_seed)); + mi_heap_guarded_set_size_bound(heap, + (size_t)mi_option_get_clamp(mi_option_guarded_min, 0, LONG_MAX), + (size_t)mi_option_get_clamp(mi_option_guarded_max, 0, LONG_MAX) ); +} +#else +mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed) { + MI_UNUSED(heap); MI_UNUSED(sample_rate); MI_UNUSED(seed); +} + +mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max) { + MI_UNUSED(heap); MI_UNUSED(min); MI_UNUSED(max); +} +void _mi_heap_guarded_init(mi_heap_t* heap) { + MI_UNUSED(heap); +} +#endif static void mi_heap_main_init(void) { @@ -192,6 +233,9 @@ static void mi_heap_main_init(void) { _mi_heap_main.cookie = _mi_heap_random_next(&_mi_heap_main); _mi_heap_main.keys[0] = _mi_heap_random_next(&_mi_heap_main); _mi_heap_main.keys[1] = _mi_heap_random_next(&_mi_heap_main); + mi_lock_init(&mi_subproc_default.abandoned_os_lock); + mi_lock_init(&mi_subproc_default.abandoned_os_visit_lock); + _mi_heap_guarded_init(&_mi_heap_main); } } @@ -200,6 +244,56 @@ mi_heap_t* _mi_heap_main_get(void) { return &_mi_heap_main; } +/* ----------------------------------------------------------- + Sub process +----------------------------------------------------------- */ + +mi_subproc_id_t mi_subproc_main(void) { + return NULL; +} + +mi_subproc_id_t mi_subproc_new(void) { + mi_memid_t memid = _mi_memid_none(); + mi_subproc_t* subproc = (mi_subproc_t*)_mi_arena_meta_zalloc(sizeof(mi_subproc_t), &memid); + if (subproc == NULL) return NULL; + subproc->memid = memid; + subproc->abandoned_os_list = NULL; + mi_lock_init(&subproc->abandoned_os_lock); + mi_lock_init(&subproc->abandoned_os_visit_lock); + return subproc; +} + +mi_subproc_t* _mi_subproc_from_id(mi_subproc_id_t subproc_id) { + return (subproc_id == NULL ? &mi_subproc_default : (mi_subproc_t*)subproc_id); +} + +void mi_subproc_delete(mi_subproc_id_t subproc_id) { + if (subproc_id == NULL) return; + mi_subproc_t* subproc = _mi_subproc_from_id(subproc_id); + // check if there are no abandoned segments still.. + bool safe_to_delete = false; + mi_lock(&subproc->abandoned_os_lock) { + if (subproc->abandoned_os_list == NULL) { + safe_to_delete = true; + } + } + if (!safe_to_delete) return; + // safe to release + // todo: should we refcount subprocesses? + mi_lock_done(&subproc->abandoned_os_lock); + mi_lock_done(&subproc->abandoned_os_visit_lock); + _mi_arena_meta_free(subproc, subproc->memid, sizeof(mi_subproc_t)); +} + +void mi_subproc_add_current_thread(mi_subproc_id_t subproc_id) { + mi_heap_t* heap = mi_heap_get_default(); + if (heap == NULL) return; + mi_assert(heap->tld->segments.subproc == &mi_subproc_default); + if (heap->tld->segments.subproc != &mi_subproc_default) return; + heap->tld->segments.subproc = _mi_subproc_from_id(subproc_id); +} + + /* ----------------------------------------------------------- Initialization and freeing of the thread local heaps @@ -218,12 +312,11 @@ typedef struct mi_thread_data_s { // destroy many OS threads, this may causes too much overhead // per thread so we maintain a small cache of recently freed metadata. -#define TD_CACHE_SIZE (16) +#define TD_CACHE_SIZE (32) static _Atomic(mi_thread_data_t*) td_cache[TD_CACHE_SIZE]; static mi_thread_data_t* mi_thread_data_zalloc(void) { // try to find thread metadata in the cache - bool is_zero = false; mi_thread_data_t* td = NULL; for (int i = 0; i < TD_CACHE_SIZE; i++) { td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]); @@ -231,32 +324,25 @@ static mi_thread_data_t* mi_thread_data_zalloc(void) { // found cached allocation, try use it td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL); if (td != NULL) { - break; + _mi_memzero(td, offsetof(mi_thread_data_t,memid)); + return td; } } } // if that fails, allocate as meta data + mi_memid_t memid; + td = (mi_thread_data_t*)_mi_os_zalloc(sizeof(mi_thread_data_t), &memid); if (td == NULL) { - mi_memid_t memid; - td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid, &_mi_stats_main); + // if this fails, try once more. (issue #257) + td = (mi_thread_data_t*)_mi_os_zalloc(sizeof(mi_thread_data_t), &memid); if (td == NULL) { - // if this fails, try once more. (issue #257) - td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid, &_mi_stats_main); - if (td == NULL) { - // really out of memory - _mi_error_message(ENOMEM, "unable to allocate thread local heap metadata (%zu bytes)\n", sizeof(mi_thread_data_t)); - } + // really out of memory + _mi_error_message(ENOMEM, "unable to allocate thread local heap metadata (%zu bytes)\n", sizeof(mi_thread_data_t)); + return NULL; } - if (td != NULL) { - td->memid = memid; - is_zero = memid.initially_zero; - } - } - - if (td != NULL && !is_zero) { - _mi_memzero_aligned(td, offsetof(mi_thread_data_t,memid)); } + td->memid = memid; return td; } @@ -272,7 +358,7 @@ static void mi_thread_data_free( mi_thread_data_t* tdfree ) { } } // if that fails, just free it directly - _mi_os_free(tdfree, sizeof(mi_thread_data_t), tdfree->memid, &_mi_stats_main); + _mi_os_free(tdfree, sizeof(mi_thread_data_t), tdfree->memid); } void _mi_thread_data_collect(void) { @@ -282,7 +368,7 @@ void _mi_thread_data_collect(void) { if (td != NULL) { td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL); if (td != NULL) { - _mi_os_free(td, sizeof(mi_thread_data_t), td->memid, &_mi_stats_main); + _mi_os_free(td, sizeof(mi_thread_data_t), td->memid); } } } @@ -307,7 +393,7 @@ static bool _mi_thread_heap_init(void) { mi_heap_t* heap = &td->heap; _mi_tld_init(tld, heap); // must be before `_mi_heap_init` _mi_heap_init(heap, tld, _mi_arena_id_none(), false /* can reclaim */, 0 /* default tag */); - _mi_heap_set_default_direct(heap); + _mi_heap_set_default_direct(heap); } return false; } @@ -317,9 +403,8 @@ void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap) { _mi_memcpy_aligned(tld, &tld_empty, sizeof(mi_tld_t)); tld->heap_backing = bheap; tld->heaps = NULL; + tld->segments.subproc = &mi_subproc_default; tld->segments.stats = &tld->stats; - tld->segments.os = &tld->os; - tld->os.stats = &tld->stats; } // Free the thread local default heap (called from `mi_thread_done`) @@ -472,58 +557,27 @@ void _mi_heap_set_default_direct(mi_heap_t* heap) { _mi_prim_thread_associate_default_heap(heap); } +void mi_thread_set_in_threadpool(void) mi_attr_noexcept { + // nothing +} // -------------------------------------------------------- // Run functions on process init/done, and thread init/done // -------------------------------------------------------- -static void mi_cdecl mi_process_done(void); - static bool os_preloading = true; // true until this module is initialized -static bool mi_redirected = false; // true if malloc redirects to mi_malloc // Returns true if this module has not been initialized; Don't use C runtime routines until it returns false. bool mi_decl_noinline _mi_preloading(void) { return os_preloading; } +// Returns true if mimalloc was redirected mi_decl_nodiscard bool mi_is_redirected(void) mi_attr_noexcept { - return mi_redirected; -} - -// Communicate with the redirection module on Windows -#if defined(_WIN32) && defined(MI_SHARED_LIB) && !defined(MI_WIN_NOREDIRECT) -#ifdef __cplusplus -extern "C" { -#endif -mi_decl_export void _mi_redirect_entry(DWORD reason) { - // called on redirection; careful as this may be called before DllMain - if (reason == DLL_PROCESS_ATTACH) { - mi_redirected = true; - } - else if (reason == DLL_PROCESS_DETACH) { - mi_redirected = false; - } - else if (reason == DLL_THREAD_DETACH) { - mi_thread_done(); - } + return _mi_is_redirected(); } -__declspec(dllimport) bool mi_cdecl mi_allocator_init(const char** message); -__declspec(dllimport) void mi_cdecl mi_allocator_done(void); -#ifdef __cplusplus -} -#endif -#else -static bool mi_allocator_init(const char** message) { - if (message != NULL) *message = NULL; - return true; -} -static void mi_allocator_done(void) { - // nothing to do -} -#endif -// Called once by the process loader -static void mi_process_load(void) { +// Called once by the process loader from `src/prim/prim.c` +void _mi_auto_process_init(void) { mi_heap_main_init(); #if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD) volatile mi_heap_t* dummy = _mi_heap_default; // access TLS to allocate it before setting tls_initialized to true; @@ -531,17 +585,14 @@ static void mi_process_load(void) { #endif os_preloading = false; mi_assert_internal(_mi_is_main_thread()); - #if !(defined(_WIN32) && defined(MI_SHARED_LIB)) // use Dll process detach (see below) instead of atexit (issue #521) - atexit(&mi_process_done); - #endif _mi_options_init(); mi_process_setup_auto_thread_done(); mi_process_init(); - if (mi_redirected) _mi_verbose_message("malloc is redirected.\n"); + if (_mi_is_redirected()) _mi_verbose_message("malloc is redirected.\n"); // show message from the redirector (if present) const char* msg = NULL; - mi_allocator_init(&msg); + _mi_allocator_init(&msg); if (msg != NULL && (mi_option_is_enabled(mi_option_verbose) || mi_option_is_enabled(mi_option_show_errors))) { _mi_fputs(NULL,NULL,NULL,msg); } @@ -553,12 +604,15 @@ static void mi_process_load(void) { #if defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64)) #include mi_decl_cache_align bool _mi_cpu_has_fsrm = false; +mi_decl_cache_align bool _mi_cpu_has_erms = false; static void mi_detect_cpu_features(void) { - // FSRM for fast rep movsb support (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017)) + // FSRM for fast short rep movsb/stosb support (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017)) + // EMRS for fast enhanced rep movsb/stosb support int32_t cpu_info[4]; __cpuid(cpu_info, 7); _mi_cpu_has_fsrm = ((cpu_info[3] & (1 << 4)) != 0); // bit 4 of EDX : see + _mi_cpu_has_erms = ((cpu_info[1] & (1 << 9)) != 0); // bit 9 of EBX : see } #else static void mi_detect_cpu_features(void) { @@ -581,14 +635,6 @@ void mi_process_init(void) mi_attr_noexcept { mi_detect_cpu_features(); _mi_os_init(); mi_heap_main_init(); - #if MI_DEBUG - _mi_verbose_message("debug level : %d\n", MI_DEBUG); - #endif - _mi_verbose_message("secure level: %d\n", MI_SECURE); - _mi_verbose_message("mem tracking: %s\n", MI_TRACK_TOOL); - #if MI_TSAN - _mi_verbose_message("thread santizer enabled\n"); - #endif mi_thread_init(); #if defined(_WIN32) @@ -618,8 +664,8 @@ void mi_process_init(void) mi_attr_noexcept { } } -// Called when the process is done (through `at_exit`) -static void mi_cdecl mi_process_done(void) { +// Called when the process is done (cdecl as it is used with `at_exit` on some platforms) +void mi_cdecl mi_process_done(void) mi_attr_noexcept { // only shutdown if we were initialized if (!_mi_process_is_initialized) return; // ensure we are called once @@ -627,15 +673,20 @@ static void mi_cdecl mi_process_done(void) { if (process_done) return; process_done = true; + // get the default heap so we don't need to acces thread locals anymore + mi_heap_t* heap = mi_prim_get_default_heap(); // use prim to not initialize any heap + mi_assert_internal(heap != NULL); + // release any thread specific resources and ensure _mi_thread_done is called on all but the main thread _mi_prim_thread_done_auto_done(); + #ifndef MI_SKIP_COLLECT_ON_EXIT #if (MI_DEBUG || !defined(MI_SHARED_LIB)) // free all memory if possible on process exit. This is not needed for a stand-alone process // but should be done if mimalloc is statically linked into another shared library which // is repeatedly loaded/unloaded, see issue #281. - mi_collect(true /* force */ ); + mi_heap_collect(heap, true /* force */ ); #endif #endif @@ -643,72 +694,21 @@ static void mi_cdecl mi_process_done(void) { // since after process_done there might still be other code running that calls `free` (like at_exit routines, // or C-runtime termination code. if (mi_option_is_enabled(mi_option_destroy_on_exit)) { - mi_collect(true /* force */); - _mi_heap_unsafe_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!) - _mi_arena_unsafe_destroy_all(& _mi_heap_main_get()->tld->stats); + mi_heap_collect(heap, true /* force */); + _mi_heap_unsafe_destroy_all(heap); // forcefully release all memory held by all heaps (of this thread only!) + _mi_arena_unsafe_destroy_all(); + _mi_segment_map_unsafe_destroy(); } if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) { mi_stats_print(NULL); } - mi_allocator_done(); + _mi_allocator_done(); _mi_verbose_message("process done: 0x%zx\n", _mi_heap_main.thread_id); os_preloading = true; // don't call the C runtime anymore } - - -#if defined(_WIN32) && defined(MI_SHARED_LIB) - // Windows DLL: easy to hook into process_init and thread_done - __declspec(dllexport) BOOL WINAPI DllMain(HINSTANCE inst, DWORD reason, LPVOID reserved) { - MI_UNUSED(reserved); - MI_UNUSED(inst); - if (reason==DLL_PROCESS_ATTACH) { - mi_process_load(); - } - else if (reason==DLL_PROCESS_DETACH) { - mi_process_done(); - } - else if (reason==DLL_THREAD_DETACH) { - if (!mi_is_redirected()) { - mi_thread_done(); - } - } - return TRUE; - } - -#elif defined(_MSC_VER) - // MSVC: use data section magic for static libraries - // See - static int _mi_process_init(void) { - mi_process_load(); - return 0; - } - typedef int(*_mi_crt_callback_t)(void); - #if defined(_M_X64) || defined(_M_ARM64) - __pragma(comment(linker, "/include:" "_mi_msvc_initu")) - #pragma section(".CRT$XIU", long, read) - #else - __pragma(comment(linker, "/include:" "__mi_msvc_initu")) - #endif - #pragma data_seg(".CRT$XIU") - mi_decl_externc _mi_crt_callback_t _mi_msvc_initu[] = { &_mi_process_init }; - #pragma data_seg() - -#elif defined(__cplusplus) - // C++: use static initialization to detect process start - static bool _mi_process_init(void) { - mi_process_load(); - return (_mi_heap_main.thread_id != 0); - } - static bool mi_initialized = _mi_process_init(); - -#elif defined(__GNUC__) || defined(__clang__) - // GCC,Clang: use the constructor attribute - static void __attribute__((constructor)) _mi_process_init(void) { - mi_process_load(); - } - -#else -#pragma message("define a way to call mi_process_load on your platform") -#endif +void mi_cdecl _mi_auto_process_done(void) mi_attr_noexcept { + if (_mi_option_get_fast(mi_option_destroy_on_exit)>1) return; + mi_process_done(); +} diff --git a/system/lib/mimalloc/src/libc.c b/system/lib/mimalloc/src/libc.c index dd6b400737906..52d095eb240dc 100644 --- a/system/lib/mimalloc/src/libc.c +++ b/system/lib/mimalloc/src/libc.c @@ -7,7 +7,7 @@ terms of the MIT license. A copy of the license can be found in the file // -------------------------------------------------------- // This module defines various std libc functions to reduce -// the dependency on libc, and also prevent errors caused +// the dependency on libc, and also prevent errors caused // by some libc implementations when called before `main` // executes (due to malloc redirection) // -------------------------------------------------------- @@ -83,7 +83,7 @@ bool _mi_getenv(const char* name, char* result, size_t result_size) { // Define our own limited `_mi_vsnprintf` and `_mi_snprintf` // This is mostly to avoid calling these when libc is not yet // initialized (and to reduce dependencies) -// +// // format: d i, p x u, s // prec: z l ll L // width: 10 @@ -130,7 +130,7 @@ static void mi_out_alignright(char fill, char* start, size_t len, size_t extra, } -static void mi_out_num(uintptr_t x, size_t base, char prefix, char** out, char* end) +static void mi_out_num(uintmax_t x, size_t base, char prefix, char** out, char* end) { if (x == 0 || base == 0 || base > 16) { if (prefix != 0) { mi_outc(prefix, out, end); } @@ -144,8 +144,8 @@ static void mi_out_num(uintptr_t x, size_t base, char prefix, char** out, char* mi_outc((digit <= 9 ? '0' + digit : 'A' + digit - 10),out,end); x = x / base; } - if (prefix != 0) { - mi_outc(prefix, out, end); + if (prefix != 0) { + mi_outc(prefix, out, end); } size_t len = *out - start; // and reverse in-place @@ -160,8 +160,8 @@ static void mi_out_num(uintptr_t x, size_t base, char prefix, char** out, char* #define MI_NEXTC() c = *in; if (c==0) break; in++; -void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args) { - if (buf == NULL || bufsize == 0 || fmt == NULL) return; +int _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args) { + if (buf == NULL || bufsize == 0 || fmt == NULL) return 0; buf[bufsize - 1] = 0; char* const end = buf + (bufsize - 1); const char* in = fmt; @@ -181,7 +181,7 @@ void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args) { size_t width = 0; char numtype = 'd'; char numplus = 0; - bool alignright = true; + bool alignright = true; if (c == '+' || c == ' ') { numplus = c; MI_NEXTC(); } if (c == '-') { alignright = false; MI_NEXTC(); } if (c == '0') { fill = '0'; MI_NEXTC(); } @@ -191,7 +191,7 @@ void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args) { width = (10 * width) + (c - '0'); MI_NEXTC(); } if (c == 0) break; // extra check due to while - } + } if (c == 'z' || c == 't' || c == 'L') { numtype = c; MI_NEXTC(); } else if (c == 'l') { numtype = c; MI_NEXTC(); @@ -206,12 +206,13 @@ void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args) { } else if (c == 'p' || c == 'x' || c == 'u') { // unsigned - uintptr_t x = 0; + uintmax_t x = 0; if (c == 'x' || c == 'u') { if (numtype == 'z') x = va_arg(args, size_t); else if (numtype == 't') x = va_arg(args, uintptr_t); // unsigned ptrdiff_t - else if (numtype == 'L') x = (uintptr_t)va_arg(args, unsigned long long); - else x = va_arg(args, unsigned long); + else if (numtype == 'L') x = va_arg(args, unsigned long long); + else if (numtype == 'l') x = va_arg(args, unsigned long); + else x = va_arg(args, unsigned int); } else if (c == 'p') { x = va_arg(args, uintptr_t); @@ -228,20 +229,21 @@ void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args) { } else if (c == 'i' || c == 'd') { // signed - intptr_t x = 0; + intmax_t x = 0; if (numtype == 'z') x = va_arg(args, intptr_t ); else if (numtype == 't') x = va_arg(args, ptrdiff_t); - else if (numtype == 'L') x = (intptr_t)va_arg(args, long long); - else x = va_arg(args, long); + else if (numtype == 'L') x = va_arg(args, long long); + else if (numtype == 'l') x = va_arg(args, long); + else x = va_arg(args, int); char pre = 0; if (x < 0) { pre = '-'; - if (x > INTPTR_MIN) { x = -x; } + if (x > INTMAX_MIN) { x = -x; } } else if (numplus != 0) { pre = numplus; } - mi_out_num((uintptr_t)x, 10, pre, &out, end); + mi_out_num((uintmax_t)x, 10, pre, &out, end); } else if (c >= ' ' && c <= '~') { // unknown format @@ -263,11 +265,70 @@ void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args) { } mi_assert_internal(out <= end); *out = 0; + return (int)(out - buf); } -void _mi_snprintf(char* buf, size_t buflen, const char* fmt, ...) { +int _mi_snprintf(char* buf, size_t buflen, const char* fmt, ...) { va_list args; va_start(args, fmt); - _mi_vsnprintf(buf, buflen, fmt, args); + const int written = _mi_vsnprintf(buf, buflen, fmt, args); va_end(args); + return written; +} + + +#if MI_SIZE_SIZE == 4 +#define mi_mask_even_bits32 (0x55555555) +#define mi_mask_even_pairs32 (0x33333333) +#define mi_mask_even_nibbles32 (0x0F0F0F0F) + +// sum of all the bytes in `x` if it is guaranteed that the sum < 256! +static size_t mi_byte_sum32(uint32_t x) { + // perform `x * 0x01010101`: the highest byte contains the sum of all bytes. + x += (x << 8); + x += (x << 16); + return (size_t)(x >> 24); } + +static size_t mi_popcount_generic32(uint32_t x) { + // first count each 2-bit group `a`, where: a==0b00 -> 00, a==0b01 -> 01, a==0b10 -> 01, a==0b11 -> 10 + // in other words, `a - (a>>1)`; to do this in parallel, we need to mask to prevent spilling a bit pair + // into the lower bit-pair: + x = x - ((x >> 1) & mi_mask_even_bits32); + // add the 2-bit pair results + x = (x & mi_mask_even_pairs32) + ((x >> 2) & mi_mask_even_pairs32); + // add the 4-bit nibble results + x = (x + (x >> 4)) & mi_mask_even_nibbles32; + // each byte now has a count of its bits, we can sum them now: + return mi_byte_sum32(x); +} + +mi_decl_noinline size_t _mi_popcount_generic(size_t x) { + return mi_popcount_generic32(x); +} + +#else +#define mi_mask_even_bits64 (0x5555555555555555) +#define mi_mask_even_pairs64 (0x3333333333333333) +#define mi_mask_even_nibbles64 (0x0F0F0F0F0F0F0F0F) + +// sum of all the bytes in `x` if it is guaranteed that the sum < 256! +static size_t mi_byte_sum64(uint64_t x) { + x += (x << 8); + x += (x << 16); + x += (x << 32); + return (size_t)(x >> 56); +} + +static size_t mi_popcount_generic64(uint64_t x) { + x = x - ((x >> 1) & mi_mask_even_bits64); + x = (x & mi_mask_even_pairs64) + ((x >> 2) & mi_mask_even_pairs64); + x = (x + (x >> 4)) & mi_mask_even_nibbles64; + return mi_byte_sum64(x); +} + +mi_decl_noinline size_t _mi_popcount_generic(size_t x) { + return mi_popcount_generic64(x); +} +#endif + diff --git a/system/lib/mimalloc/src/options.c b/system/lib/mimalloc/src/options.c index a62727dd69fe3..af2a0e70c4c7a 100644 --- a/system/lib/mimalloc/src/options.c +++ b/system/lib/mimalloc/src/options.c @@ -47,6 +47,62 @@ typedef struct mi_option_desc_s { #define MI_OPTION(opt) mi_option_##opt, #opt, NULL #define MI_OPTION_LEGACY(opt,legacy) mi_option_##opt, #opt, #legacy +// Some options can be set at build time for statically linked libraries +// (use `-DMI_EXTRA_CPPDEFS="opt1=val1;opt2=val2"`) +// +// This is useful if we cannot pass them as environment variables +// (and setting them programmatically would be too late) + +#ifndef MI_DEFAULT_VERBOSE +#define MI_DEFAULT_VERBOSE 0 +#endif + +#ifndef MI_DEFAULT_EAGER_COMMIT +#define MI_DEFAULT_EAGER_COMMIT 1 +#endif + +#ifndef MI_DEFAULT_ARENA_EAGER_COMMIT +#define MI_DEFAULT_ARENA_EAGER_COMMIT 2 +#endif + +// in KiB +#ifndef MI_DEFAULT_ARENA_RESERVE + #if (MI_INTPTR_SIZE>4) + #define MI_DEFAULT_ARENA_RESERVE 1024L*1024L + #else + #define MI_DEFAULT_ARENA_RESERVE 128L*1024L + #endif +#endif + +#ifndef MI_DEFAULT_DISALLOW_ARENA_ALLOC +#define MI_DEFAULT_DISALLOW_ARENA_ALLOC 0 +#endif + +#ifndef MI_DEFAULT_ALLOW_LARGE_OS_PAGES +#if defined(__linux__) && !defined(__ANDROID__) +#define MI_DEFAULT_ALLOW_LARGE_OS_PAGES 2 // enabled, but only use transparent huge pages through madvise +#else +#define MI_DEFAULT_ALLOW_LARGE_OS_PAGES 0 +#endif +#endif + +#ifndef MI_DEFAULT_RESERVE_HUGE_OS_PAGES +#define MI_DEFAULT_RESERVE_HUGE_OS_PAGES 0 +#endif + +#ifndef MI_DEFAULT_RESERVE_OS_MEMORY +#define MI_DEFAULT_RESERVE_OS_MEMORY 0 +#endif + +#ifndef MI_DEFAULT_GUARDED_SAMPLE_RATE +#if MI_GUARDED +#define MI_DEFAULT_GUARDED_SAMPLE_RATE 4000 +#else +#define MI_DEFAULT_GUARDED_SAMPLE_RATE 0 +#endif +#endif + + static mi_option_desc_t options[_mi_option_last] = { // stable options @@ -56,16 +112,21 @@ static mi_option_desc_t options[_mi_option_last] = { 0, UNINIT, MI_OPTION(show_errors) }, #endif { 0, UNINIT, MI_OPTION(show_stats) }, - { 0, UNINIT, MI_OPTION(verbose) }, + { MI_DEFAULT_VERBOSE, UNINIT, MI_OPTION(verbose) }, - // the following options are experimental and not all combinations make sense. - { 1, UNINIT, MI_OPTION(eager_commit) }, // commit per segment directly (4MiB) (but see also `eager_commit_delay`) - { 2, UNINIT, MI_OPTION_LEGACY(arena_eager_commit,eager_region_commit) }, // eager commit arena's? 2 is used to enable this only on an OS that has overcommit (i.e. linux) + // some of the following options are experimental and not all combinations are allowed. + { MI_DEFAULT_EAGER_COMMIT, + UNINIT, MI_OPTION(eager_commit) }, // commit per segment directly (4MiB) (but see also `eager_commit_delay`) + { MI_DEFAULT_ARENA_EAGER_COMMIT, + UNINIT, MI_OPTION_LEGACY(arena_eager_commit,eager_region_commit) }, // eager commit arena's? 2 is used to enable this only on an OS that has overcommit (i.e. linux) { 1, UNINIT, MI_OPTION_LEGACY(purge_decommits,reset_decommits) }, // purge decommits memory (instead of reset) (note: on linux this uses MADV_DONTNEED for decommit) - { 0, UNINIT, MI_OPTION_LEGACY(allow_large_os_pages,large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's - { 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages + { MI_DEFAULT_ALLOW_LARGE_OS_PAGES, + UNINIT, MI_OPTION_LEGACY(allow_large_os_pages,large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's + { MI_DEFAULT_RESERVE_HUGE_OS_PAGES, + UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages {-1, UNINIT, MI_OPTION(reserve_huge_os_pages_at) }, // reserve huge pages at node N - { 0, UNINIT, MI_OPTION(reserve_os_memory) }, // reserve N KiB OS memory in advance (use `option_get_size`) + { MI_DEFAULT_RESERVE_OS_MEMORY, + UNINIT, MI_OPTION(reserve_os_memory) }, // reserve N KiB OS memory in advance (use `option_get_size`) { 0, UNINIT, MI_OPTION(deprecated_segment_cache) }, // cache N segments per thread { 0, UNINIT, MI_OPTION(deprecated_page_reset) }, // reset page memory on free { 0, UNINIT, MI_OPTION_LEGACY(abandoned_page_purge,abandoned_page_reset) }, // reset free page memory when a thread terminates @@ -83,16 +144,25 @@ static mi_option_desc_t options[_mi_option_last] = { 32, UNINIT, MI_OPTION(max_warnings) }, // maximum warnings that are output { 10, UNINIT, MI_OPTION(max_segment_reclaim)}, // max. percentage of the abandoned segments to be reclaimed per try. { 0, UNINIT, MI_OPTION(destroy_on_exit)}, // release all OS memory on process exit; careful with dangling pointer or after-exit frees! - #if (MI_INTPTR_SIZE>4) - { 1024L*1024L, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time (=1GiB) (use `option_get_size`) - #else - { 128L*1024L, UNINIT, MI_OPTION(arena_reserve) }, // =128MiB on 32-bit - #endif - { 10, UNINIT, MI_OPTION(arena_purge_mult) }, // purge delay multiplier for arena's + { MI_DEFAULT_ARENA_RESERVE, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time (=1GiB) (use `option_get_size`) + { 10, UNINIT, MI_OPTION(arena_purge_mult) }, // purge delay multiplier for arena's { 1, UNINIT, MI_OPTION_LEGACY(purge_extend_delay, decommit_extend_delay) }, - { 1, UNINIT, MI_OPTION(abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free - { 0, UNINIT, MI_OPTION(disallow_arena_alloc) }, // 1 = do not use arena's for allocation (except if using specific arena id's) + { 0, UNINIT, MI_OPTION(abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free + { MI_DEFAULT_DISALLOW_ARENA_ALLOC, UNINIT, MI_OPTION(disallow_arena_alloc) }, // 1 = do not use arena's for allocation (except if using specific arena id's) { 400, UNINIT, MI_OPTION(retry_on_oom) }, // windows only: retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. +#if defined(MI_VISIT_ABANDONED) + { 1, INITIALIZED, MI_OPTION(visit_abandoned) }, // allow visiting heap blocks in abandoned segments; requires taking locks during reclaim. +#else + { 0, UNINIT, MI_OPTION(visit_abandoned) }, +#endif + { 0, UNINIT, MI_OPTION(guarded_min) }, // only used when building with MI_GUARDED: minimal rounded object size for guarded objects + { MI_GiB, UNINIT, MI_OPTION(guarded_max) }, // only used when building with MI_GUARDED: maximal rounded object size for guarded objects + { 0, UNINIT, MI_OPTION(guarded_precise) }, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0) + { MI_DEFAULT_GUARDED_SAMPLE_RATE, + UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000) + { 0, UNINIT, MI_OPTION(guarded_sample_seed)}, + { 0, UNINIT, MI_OPTION(target_segments_per_thread) }, // abandon segments beyond this point, or 0 to disable. + { 10000, UNINIT, MI_OPTION(generic_collect) }, // collect heaps every N (=10000) generic allocation calls }; static void mi_option_init(mi_option_desc_t* desc); @@ -102,22 +172,77 @@ static bool mi_option_has_size_in_kib(mi_option_t option) { } void _mi_options_init(void) { - // called on process load; should not be called before the CRT is initialized! - // (e.g. do not call this from process_init as that may run before CRT initialization) + // called on process load mi_add_stderr_output(); // now it safe to use stderr for output for(int i = 0; i < _mi_option_last; i++ ) { mi_option_t option = (mi_option_t)i; long l = mi_option_get(option); MI_UNUSED(l); // initialize - // if (option != mi_option_verbose) - { - mi_option_desc_t* desc = &options[option]; - _mi_verbose_message("option '%s': %ld %s\n", desc->name, desc->value, (mi_option_has_size_in_kib(option) ? "KiB" : "")); - } } mi_max_error_count = mi_option_get(mi_option_max_errors); mi_max_warning_count = mi_option_get(mi_option_max_warnings); + #if MI_GUARDED + if (mi_option_get(mi_option_guarded_sample_rate) > 0) { + if (mi_option_is_enabled(mi_option_allow_large_os_pages)) { + mi_option_disable(mi_option_allow_large_os_pages); + _mi_warning_message("option 'allow_large_os_pages' is disabled to allow for guarded objects\n"); + } + } + #endif + if (mi_option_is_enabled(mi_option_verbose)) { mi_options_print(); } +} + +#define mi_stringifyx(str) #str // and stringify +#define mi_stringify(str) mi_stringifyx(str) // expand + +void mi_options_print(void) mi_attr_noexcept +{ + // show version + const int vermajor = MI_MALLOC_VERSION/100; + const int verminor = (MI_MALLOC_VERSION%100)/10; + const int verpatch = (MI_MALLOC_VERSION%10); + _mi_message("v%i.%i.%i%s%s (built on %s, %s)\n", vermajor, verminor, verpatch, + #if defined(MI_CMAKE_BUILD_TYPE) + ", " mi_stringify(MI_CMAKE_BUILD_TYPE) + #else + "" + #endif + , + #if defined(MI_GIT_DESCRIBE) + ", git " mi_stringify(MI_GIT_DESCRIBE) + #else + "" + #endif + , __DATE__, __TIME__); + + // show options + for (int i = 0; i < _mi_option_last; i++) { + mi_option_t option = (mi_option_t)i; + long l = mi_option_get(option); MI_UNUSED(l); // possibly initialize + mi_option_desc_t* desc = &options[option]; + _mi_message("option '%s': %ld %s\n", desc->name, desc->value, (mi_option_has_size_in_kib(option) ? "KiB" : "")); + } + + // show build configuration + _mi_message("debug level : %d\n", MI_DEBUG ); + _mi_message("secure level: %d\n", MI_SECURE ); + _mi_message("mem tracking: %s\n", MI_TRACK_TOOL); + #if MI_GUARDED + _mi_message("guarded build: %s\n", mi_option_get(mi_option_guarded_sample_rate) != 0 ? "enabled" : "disabled"); + #endif + #if MI_TSAN + _mi_message("thread santizer enabled\n"); + #endif } +long _mi_option_get_fast(mi_option_t option) { + mi_assert(option >= 0 && option < _mi_option_last); + mi_option_desc_t* desc = &options[option]; + mi_assert(desc->option == option); // index should match the option + //mi_assert(desc->init != UNINIT); + return desc->value; +} + + mi_decl_nodiscard long mi_option_get(mi_option_t option) { mi_assert(option >= 0 && option < _mi_option_last); if (option < 0 || option >= _mi_option_last) return 0; @@ -135,7 +260,6 @@ mi_decl_nodiscard long mi_option_get_clamp(mi_option_t option, long min, long ma } mi_decl_nodiscard size_t mi_option_get_size(mi_option_t option) { - mi_assert_internal(mi_option_has_size_in_kib(option)); const long x = mi_option_get(option); size_t size = (x < 0 ? 0 : (size_t)x); if (mi_option_has_size_in_kib(option)) { @@ -151,6 +275,13 @@ void mi_option_set(mi_option_t option, long value) { mi_assert(desc->option == option); // index should match the option desc->value = value; desc->init = INITIALIZED; + // ensure min/max range; be careful to not recurse. + if (desc->option == mi_option_guarded_min && _mi_option_get_fast(mi_option_guarded_max) < value) { + mi_option_set(mi_option_guarded_max, value); + } + else if (desc->option == mi_option_guarded_max && _mi_option_get_fast(mi_option_guarded_min) > value) { + mi_option_set(mi_option_guarded_min, value); + } } void mi_option_set_default(mi_option_t option, long value) { @@ -194,7 +325,7 @@ static void mi_cdecl mi_out_stderr(const char* msg, void* arg) { // an output function is registered it is called immediately with // the output up to that point. #ifndef MI_MAX_DELAY_OUTPUT -#define MI_MAX_DELAY_OUTPUT ((size_t)(32*1024)) +#define MI_MAX_DELAY_OUTPUT ((size_t)(16*1024)) #endif static char out_buf[MI_MAX_DELAY_OUTPUT+1]; static _Atomic(size_t) out_len; @@ -280,7 +411,7 @@ static _Atomic(size_t) warning_count; // = 0; // when >= max_warning_count stop // (recursively) invoke malloc again to allocate space for the thread local // variables on demand. This is why we use a _mi_preloading test on such // platforms. However, C code generator may move the initial thread local address -// load before the `if` and we therefore split it out in a separate funcion. +// load before the `if` and we therefore split it out in a separate function. static mi_decl_thread bool recurse = false; static mi_decl_noinline bool mi_recurse_enter_prim(void) { @@ -294,14 +425,14 @@ static mi_decl_noinline void mi_recurse_exit_prim(void) { } static bool mi_recurse_enter(void) { - #if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD) + #if defined(__APPLE__) || defined(__ANDROID__) || defined(MI_TLS_RECURSE_GUARD) if (_mi_preloading()) return false; #endif return mi_recurse_enter_prim(); } static void mi_recurse_exit(void) { - #if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD) + #if defined(__APPLE__) || defined(__ANDROID__) || defined(MI_TLS_RECURSE_GUARD) if (_mi_preloading()) return; #endif mi_recurse_exit_prim(); @@ -350,6 +481,13 @@ static void mi_vfprintf_thread(mi_output_fun* out, void* arg, const char* prefix } } +void _mi_message(const char* fmt, ...) { + va_list args; + va_start(args, fmt); + mi_vfprintf_thread(NULL, NULL, "mimalloc: ", fmt, args); + va_end(args); +} + void _mi_trace_message(const char* fmt, ...) { if (mi_option_get(mi_option_verbose) <= 1) return; // only with verbose level 2 or higher va_list args; @@ -387,7 +525,7 @@ void _mi_warning_message(const char* fmt, ...) { #if MI_DEBUG -void _mi_assert_fail(const char* assertion, const char* fname, unsigned line, const char* func ) { +mi_decl_noreturn mi_decl_cold void _mi_assert_fail(const char* assertion, const char* fname, unsigned line, const char* func ) mi_attr_noexcept { _mi_fprintf(NULL, NULL, "mimalloc: assertion failed: at \"%s\":%u, %s\n assertion: \"%s\"\n", fname, line, (func==NULL?"":func), assertion); abort(); } @@ -485,7 +623,7 @@ static void mi_option_init(mi_option_desc_t* desc) { char* end = buf; long value = strtol(buf, &end, 10); if (mi_option_has_size_in_kib(desc->option)) { - // this option is interpreted in KiB to prevent overflow of `long` for large allocations + // this option is interpreted in KiB to prevent overflow of `long` for large allocations // (long is 32-bit on 64-bit windows, which allows for 4TiB max.) size_t size = (value < 0 ? 0 : (size_t)value); bool overflow = false; @@ -500,8 +638,7 @@ static void mi_option_init(mi_option_desc_t* desc) { value = (size > LONG_MAX ? LONG_MAX : (long)size); } if (*end == 0) { - desc->value = value; - desc->init = INITIALIZED; + mi_option_set(desc->option, value); } else { // set `init` first to avoid recursion through _mi_warning_message on mimalloc_verbose. diff --git a/system/lib/mimalloc/src/os.c b/system/lib/mimalloc/src/os.c index ce104273bfdb0..9b1b4b460775f 100644 --- a/system/lib/mimalloc/src/os.c +++ b/system/lib/mimalloc/src/os.c @@ -1,5 +1,5 @@ /* ---------------------------------------------------------------------------- -Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +Copyright (c) 2018-2025, Microsoft Research, Daan Leijen This is free software; you can redistribute it and/or modify it under the terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. @@ -9,18 +9,38 @@ terms of the MIT license. A copy of the license can be found in the file #include "mimalloc/atomic.h" #include "mimalloc/prim.h" +#define mi_os_stat_increase(stat,amount) _mi_stat_increase(&_mi_stats_main.stat, amount) +#define mi_os_stat_decrease(stat,amount) _mi_stat_decrease(&_mi_stats_main.stat, amount) +#define mi_os_stat_counter_increase(stat,inc) _mi_stat_counter_increase(&_mi_stats_main.stat, inc) /* ----------------------------------------------------------- - Initialization. + Initialization. ----------------------------------------------------------- */ +#ifndef MI_DEFAULT_VIRTUAL_ADDRESS_BITS +#if MI_INTPTR_SIZE < 8 +#define MI_DEFAULT_VIRTUAL_ADDRESS_BITS 32 +#else +#define MI_DEFAULT_VIRTUAL_ADDRESS_BITS 48 +#endif +#endif + +#ifndef MI_DEFAULT_PHYSICAL_MEMORY_IN_KIB +#if MI_INTPTR_SIZE < 8 +#define MI_DEFAULT_PHYSICAL_MEMORY_IN_KIB 4*MI_MiB // 4 GiB +#else +#define MI_DEFAULT_PHYSICAL_MEMORY_IN_KIB 32*MI_MiB // 32 GiB +#endif +#endif static mi_os_mem_config_t mi_os_mem_config = { - 4096, // page size - 0, // large page size (usually 2MiB) - 4096, // allocation granularity - true, // has overcommit? (if true we use MAP_NORESERVE on mmap systems) - false, // can we partially free allocated blocks? (on mmap systems we can free anywhere in a mapped range, but on Windows we must free the entire span) - true // has virtual reserve? (if true we can reserve virtual address space without using commit or physical memory) + 4096, // page size + 0, // large page size (usually 2MiB) + 4096, // allocation granularity + MI_DEFAULT_PHYSICAL_MEMORY_IN_KIB, + MI_DEFAULT_VIRTUAL_ADDRESS_BITS, + true, // has overcommit? (if true we use MAP_NORESERVE on mmap systems) + false, // can we partially free allocated blocks? (on mmap systems we can free anywhere in a mapped range, but on Windows we must free the entire span) + true // has virtual reserve? (if true we can reserve virtual address space without using commit or physical memory) }; bool _mi_os_has_overcommit(void) { @@ -68,17 +88,18 @@ void _mi_os_init(void) { /* ----------------------------------------------------------- Util -------------------------------------------------------------- */ -bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats); -bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats); +bool _mi_os_decommit(void* addr, size_t size); +bool _mi_os_commit(void* addr, size_t size, bool* is_zero); /* ----------------------------------------------------------- aligned hinting -------------------------------------------------------------- */ -// On 64-bit systems, we can do efficient aligned allocation by using -// the 2TiB to 30TiB area to allocate those. -#if (MI_INTPTR_SIZE >= 8) +// On systems with enough virtual address bits, we can do efficient aligned allocation by using +// the 2TiB to 30TiB area to allocate those. If we have at least 46 bits of virtual address +// space (64TiB) we use this technique. (but see issue #939) +#if (MI_INTPTR_SIZE >= 8) && !defined(MI_NO_ALIGNED_HINT) static mi_decl_cache_align _Atomic(uintptr_t)aligned_base; // Return a MI_SEGMENT_SIZE aligned address that is probably available. @@ -95,6 +116,7 @@ static mi_decl_cache_align _Atomic(uintptr_t)aligned_base; void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size) { if (try_alignment <= 1 || try_alignment > MI_SEGMENT_SIZE) return NULL; + if (mi_os_mem_config.virtual_address_bits < 46) return NULL; // < 64TiB virtual address space size = _mi_align_up(size, MI_SEGMENT_SIZE); if (size > 1*MI_GiB) return NULL; // guarantee the chance of fixed valid address is at most 1/(MI_HINT_AREA / 1<<30) = 1/4096. #if (MI_SECURE>0) @@ -122,44 +144,51 @@ void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size) { } #endif - /* ----------------------------------------------------------- Free memory -------------------------------------------------------------- */ -static void mi_os_free_huge_os_pages(void* p, size_t size, mi_stats_t* stats); +static void mi_os_free_huge_os_pages(void* p, size_t size); -static void mi_os_prim_free(void* addr, size_t size, bool still_committed, mi_stats_t* tld_stats) { - MI_UNUSED(tld_stats); - mi_stats_t* stats = &_mi_stats_main; +static void mi_os_prim_free(void* addr, size_t size, size_t commit_size) { mi_assert_internal((size % _mi_os_page_size()) == 0); - if (addr == NULL || size == 0) return; // || _mi_os_is_huge_reserved(addr) - int err = _mi_prim_free(addr, size); + if (addr == NULL) return; // || _mi_os_is_huge_reserved(addr) + int err = _mi_prim_free(addr, size); // allow size==0 (issue #1041) if (err != 0) { _mi_warning_message("unable to free OS memory (error: %d (0x%x), size: 0x%zx bytes, address: %p)\n", err, err, size, addr); } - if (still_committed) { _mi_stat_decrease(&stats->committed, size); } - _mi_stat_decrease(&stats->reserved, size); + if (commit_size > 0) { + mi_os_stat_decrease(committed, commit_size); + } + mi_os_stat_decrease(reserved, size); } -void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* tld_stats) { +void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t memid) { if (mi_memkind_is_os(memid.memkind)) { - size_t csize = _mi_os_good_alloc_size(size); + size_t csize = memid.mem.os.size; + if (csize==0) { csize = _mi_os_good_alloc_size(size); } + mi_assert_internal(csize >= size); + size_t commit_size = (still_committed ? csize : 0); void* base = addr; // different base? (due to alignment) - if (memid.mem.os.base != NULL) { + if (memid.mem.os.base != base) { mi_assert(memid.mem.os.base <= addr); - mi_assert((uint8_t*)memid.mem.os.base + memid.mem.os.alignment >= (uint8_t*)addr); base = memid.mem.os.base; - csize += ((uint8_t*)addr - (uint8_t*)memid.mem.os.base); + const size_t diff = (uint8_t*)addr - (uint8_t*)memid.mem.os.base; + if (memid.mem.os.size==0) { + csize += diff; + } + if (still_committed) { + commit_size -= diff; // the (addr-base) part was already un-committed + } } // free it if (memid.memkind == MI_MEM_OS_HUGE) { mi_assert(memid.is_pinned); - mi_os_free_huge_os_pages(base, csize, tld_stats); + mi_os_free_huge_os_pages(base, csize); } else { - mi_os_prim_free(base, csize, still_committed, tld_stats); + mi_os_prim_free(base, csize, (still_committed ? commit_size : 0)); } } else { @@ -168,8 +197,8 @@ void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t me } } -void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* tld_stats) { - _mi_os_free_ex(p, size, true, memid, tld_stats); +void _mi_os_free(void* p, size_t size, mi_memid_t memid) { + _mi_os_free_ex(p, size, true, memid); } @@ -178,7 +207,8 @@ void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* tld_stats) -------------------------------------------------------------- */ // Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned. -static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, mi_stats_t* tld_stats) { +// Also `hint_addr` is a hint and may be ignored. +static void* mi_os_prim_alloc_at(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero) { mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); mi_assert_internal(is_zero != NULL); mi_assert_internal(is_large != NULL); @@ -187,18 +217,18 @@ static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bo if (try_alignment == 0) { try_alignment = 1; } // avoid 0 to ensure there will be no divide by zero when aligning *is_zero = false; void* p = NULL; - int err = _mi_prim_alloc(size, try_alignment, commit, allow_large, is_large, is_zero, &p); + int err = _mi_prim_alloc(hint_addr, size, try_alignment, commit, allow_large, is_large, is_zero, &p); if (err != 0) { - _mi_warning_message("unable to allocate OS memory (error: %d (0x%x), size: 0x%zx bytes, align: 0x%zx, commit: %d, allow large: %d)\n", err, err, size, try_alignment, commit, allow_large); + _mi_warning_message("unable to allocate OS memory (error: %d (0x%x), addr: %p, size: 0x%zx bytes, align: 0x%zx, commit: %d, allow large: %d)\n", err, err, hint_addr, size, try_alignment, commit, allow_large); } - MI_UNUSED(tld_stats); - mi_stats_t* stats = &_mi_stats_main; - mi_stat_counter_increase(stats->mmap_calls, 1); + + + mi_os_stat_counter_increase(mmap_calls, 1); if (p != NULL) { - _mi_stat_increase(&stats->reserved, size); + mi_os_stat_increase(reserved, size); if (commit) { - _mi_stat_increase(&stats->committed, size); + mi_os_stat_increase(committed, size); // seems needed for asan (or `mimalloc-test-api` fails) #ifdef MI_TRACK_ASAN if (*is_zero) { mi_track_mem_defined(p,size); } @@ -209,10 +239,14 @@ static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bo return p; } +static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero) { + return mi_os_prim_alloc_at(NULL, size, try_alignment, commit, allow_large, is_large, is_zero); +} + // Primitive aligned allocation from the OS. // This function guarantees the allocated memory is aligned. -static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** base, mi_stats_t* stats) { +static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** base) { mi_assert_internal(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0)); mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); mi_assert_internal(is_large != NULL); @@ -222,8 +256,8 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit if (!(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0))) return NULL; size = _mi_align_up(size, _mi_os_page_size()); - // try first with a hint (this will be aligned directly on Win 10+ or BSD) - void* p = mi_os_prim_alloc(size, alignment, commit, allow_large, is_large, is_zero, stats); + // try first with a requested alignment hint (this will usually be aligned directly on Win 10+ or BSD) + void* p = mi_os_prim_alloc(size, alignment, commit, allow_large, is_large, is_zero); if (p == NULL) return NULL; // aligned already? @@ -232,14 +266,16 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit } else { // if not aligned, free it, overallocate, and unmap around it + #if !MI_TRACK_ASAN _mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (size: 0x%zx bytes, address: %p, alignment: 0x%zx, commit: %d)\n", size, p, alignment, commit); - mi_os_prim_free(p, size, commit, stats); + #endif + if (p != NULL) { mi_os_prim_free(p, size, (commit ? size : 0)); } if (size >= (SIZE_MAX - alignment)) return NULL; // overflow const size_t over_size = size + alignment; if (!mi_os_mem_config.has_partial_free) { // win32 virtualAlloc cannot free parts of an allocated block // over-allocate uncommitted (virtual) memory - p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, is_zero, stats); + p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, is_zero); if (p == NULL) return NULL; // set p to the aligned part in the full region @@ -250,22 +286,25 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit // explicitly commit only the aligned part if (commit) { - _mi_os_commit(p, size, NULL, stats); + if (!_mi_os_commit(p, size, NULL)) { + mi_os_prim_free(*base, over_size, 0); + return NULL; + } } } else { // mmap can free inside an allocation // overallocate... - p = mi_os_prim_alloc(over_size, 1, commit, false, is_large, is_zero, stats); + p = mi_os_prim_alloc(over_size, 1, commit, false, is_large, is_zero); if (p == NULL) return NULL; - // and selectively unmap parts around the over-allocated area. + // and selectively unmap parts around the over-allocated area. void* aligned_p = mi_align_up_ptr(p, alignment); size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p; size_t mid_size = _mi_align_up(size, _mi_os_page_size()); size_t post_size = over_size - pre_size - mid_size; mi_assert_internal(pre_size < over_size&& post_size < over_size&& mid_size >= size); - if (pre_size > 0) { mi_os_prim_free(p, pre_size, commit, stats); } - if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, commit, stats); } + if (pre_size > 0) { mi_os_prim_free(p, pre_size, (commit ? pre_size : 0)); } + if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, (commit ? post_size : 0)); } // we can return the aligned pointer on `mmap` systems p = aligned_p; *base = aligned_p; // since we freed the pre part, `*base == p`. @@ -281,20 +320,22 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit OS API: alloc and alloc_aligned ----------------------------------------------------------- */ -void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) { +void* _mi_os_alloc(size_t size, mi_memid_t* memid) { *memid = _mi_memid_none(); if (size == 0) return NULL; size = _mi_os_good_alloc_size(size); bool os_is_large = false; bool os_is_zero = false; - void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero, stats); - if (p != NULL) { - *memid = _mi_memid_create_os(true, os_is_zero, os_is_large); - } + void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero); + if (p == NULL) return NULL; + + *memid = _mi_memid_create_os(p, size, true, os_is_zero, os_is_large); + mi_assert_internal(memid->mem.os.size >= size); + mi_assert_internal(memid->initially_committed); return p; } -void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats) +void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid) { MI_UNUSED(&_mi_os_get_aligned_hint); // suppress unused warnings *memid = _mi_memid_none(); @@ -305,15 +346,43 @@ void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allo bool os_is_large = false; bool os_is_zero = false; void* os_base = NULL; - void* p = mi_os_prim_alloc_aligned(size, alignment, commit, allow_large, &os_is_large, &os_is_zero, &os_base, stats ); - if (p != NULL) { - *memid = _mi_memid_create_os(commit, os_is_zero, os_is_large); - memid->mem.os.base = os_base; - memid->mem.os.alignment = alignment; + void* p = mi_os_prim_alloc_aligned(size, alignment, commit, allow_large, &os_is_large, &os_is_zero, &os_base ); + if (p == NULL) return NULL; + + *memid = _mi_memid_create_os(p, size, commit, os_is_zero, os_is_large); + memid->mem.os.base = os_base; + memid->mem.os.size += ((uint8_t*)p - (uint8_t*)os_base); // todo: return from prim_alloc_aligned? + + mi_assert_internal(memid->mem.os.size >= size); + mi_assert_internal(_mi_is_aligned(p,alignment)); + if (commit) { mi_assert_internal(memid->initially_committed); } + return p; +} + + +mi_decl_nodiscard static void* mi_os_ensure_zero(void* p, size_t size, mi_memid_t* memid) { + if (p==NULL || size==0) return p; + // ensure committed + if (!memid->initially_committed) { + bool is_zero = false; + if (!_mi_os_commit(p, size, &is_zero)) { + _mi_os_free(p, size, *memid); + return NULL; + } + memid->initially_committed = true; } + // ensure zero'd + if (memid->initially_zero) return p; + _mi_memzero_aligned(p,size); + memid->initially_zero = true; return p; } +void* _mi_os_zalloc(size_t size, mi_memid_t* memid) { + void* p = _mi_os_alloc(size,memid); + return mi_os_ensure_zero(p, size, memid); +} + /* ----------------------------------------------------------- OS aligned allocation with an offset. This is used for large alignments > MI_BLOCK_ALIGNMENT_MAX. We use a large mimalloc @@ -322,7 +391,7 @@ void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allo to use the actual start of the memory region. ----------------------------------------------------------- */ -void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t offset, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats) { +void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t offset, bool commit, bool allow_large, mi_memid_t* memid) { mi_assert(offset <= MI_SEGMENT_SIZE); mi_assert(offset <= size); mi_assert((alignment % _mi_os_page_size()) == 0); @@ -330,20 +399,20 @@ void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t offse if (offset > MI_SEGMENT_SIZE) return NULL; if (offset == 0) { // regular aligned allocation - return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, stats); + return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid); } else { // overallocate to align at an offset const size_t extra = _mi_align_up(offset, alignment) - offset; const size_t oversize = size + extra; - void* const start = _mi_os_alloc_aligned(oversize, alignment, commit, allow_large, memid, stats); + void* const start = _mi_os_alloc_aligned(oversize, alignment, commit, allow_large, memid); if (start == NULL) return NULL; void* const p = (uint8_t*)start + extra; mi_assert(_mi_is_aligned((uint8_t*)p + offset, alignment)); // decommit the overallocation at the start if (commit && extra > _mi_os_page_size()) { - _mi_os_decommit(start, extra, stats); + _mi_os_decommit(start, extra); } return p; } @@ -377,12 +446,10 @@ static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t* return mi_os_page_align_areax(true, addr, size, newsize); } -bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) { - MI_UNUSED(tld_stats); - mi_stats_t* stats = &_mi_stats_main; +bool _mi_os_commit_ex(void* addr, size_t size, bool* is_zero, size_t stat_size) { if (is_zero != NULL) { *is_zero = false; } - _mi_stat_increase(&stats->committed, size); // use size for precise commit vs. decommit - _mi_stat_counter_increase(&stats->commit_calls, 1); + mi_os_stat_increase(committed, stat_size); // use size for precise commit vs. decommit + mi_os_stat_counter_increase(commit_calls, 1); // page align range size_t csize; @@ -408,11 +475,13 @@ bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats return true; } -static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, mi_stats_t* tld_stats) { - MI_UNUSED(tld_stats); - mi_stats_t* stats = &_mi_stats_main; +bool _mi_os_commit(void* addr, size_t size, bool* is_zero) { + return _mi_os_commit_ex(addr, size, is_zero, size); +} + +static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, size_t stat_size) { mi_assert_internal(needs_recommit!=NULL); - _mi_stat_decrease(&stats->committed, size); + mi_os_stat_decrease(committed, stat_size); // page align size_t csize; @@ -429,9 +498,9 @@ static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, mi_ return (err == 0); } -bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) { +bool _mi_os_decommit(void* addr, size_t size) { bool needs_recommit; - return mi_os_decommit_ex(addr, size, &needs_recommit, tld_stats); + return mi_os_decommit_ex(addr, size, &needs_recommit, size); } @@ -439,13 +508,13 @@ bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) { // but may be used later again. This will release physical memory // pages and reduce swapping while keeping the memory committed. // We page align to a conservative area inside the range to reset. -bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) { +bool _mi_os_reset(void* addr, size_t size) { // page align conservatively within the range size_t csize; void* start = mi_os_page_align_area_conservative(addr, size, &csize); if (csize == 0) return true; // || _mi_os_is_huge_reserved(addr) - _mi_stat_increase(&stats->reset, csize); - _mi_stat_counter_increase(&stats->reset_calls, 1); + mi_os_stat_increase(reset, csize); + mi_os_stat_counter_increase(reset_calls, 1); #if (MI_DEBUG>1) && !MI_SECURE && !MI_TRACK_ENABLED // && !MI_TSAN memset(start, 0, csize); // pretend it is eagerly reset @@ -459,24 +528,35 @@ bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) { } +void _mi_os_reuse( void* addr, size_t size ) { + // page align conservatively within the range + size_t csize = 0; + void* const start = mi_os_page_align_area_conservative(addr, size, &csize); + if (csize == 0) return; + const int err = _mi_prim_reuse(start, csize); + if (err != 0) { + _mi_warning_message("cannot reuse OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize); + } +} + // either resets or decommits memory, returns true if the memory needs // to be recommitted if it is to be re-used later on. -bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats) +bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, size_t stat_size) { if (mi_option_get(mi_option_purge_delay) < 0) return false; // is purging allowed? - _mi_stat_counter_increase(&stats->purge_calls, 1); - _mi_stat_increase(&stats->purged, size); + mi_os_stat_counter_increase(purge_calls, 1); + mi_os_stat_increase(purged, size); if (mi_option_is_enabled(mi_option_purge_decommits) && // should decommit? !_mi_preloading()) // don't decommit during preloading (unsafe) { bool needs_recommit = true; - mi_os_decommit_ex(p, size, &needs_recommit, stats); + mi_os_decommit_ex(p, size, &needs_recommit, stat_size); return needs_recommit; } else { if (allow_reset) { // this can sometimes be not allowed if the range is not fully committed - _mi_os_reset(p, size, stats); + _mi_os_reset(p, size); } return false; // needs no recommit } @@ -484,8 +564,8 @@ bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats) // either resets or decommits memory, returns true if the memory needs // to be recommitted if it is to be re-used later on. -bool _mi_os_purge(void* p, size_t size, mi_stats_t * stats) { - return _mi_os_purge_ex(p, size, true, stats); +bool _mi_os_purge(void* p, size_t size) { + return _mi_os_purge_ex(p, size, true, size); } // Protect a region in memory to be not accessible. @@ -566,7 +646,7 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse if (psize != NULL) *psize = 0; if (pages_reserved != NULL) *pages_reserved = 0; size_t size = 0; - uint8_t* start = mi_os_claim_huge_pages(pages, &size); + uint8_t* const start = mi_os_claim_huge_pages(pages, &size); if (start == NULL) return NULL; // or 32-bit systems // Allocate one page at the time but try to place them contiguously @@ -592,15 +672,15 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse // no success, issue a warning and break if (p != NULL) { _mi_warning_message("could not allocate contiguous huge OS page %zu at %p\n", page, addr); - mi_os_prim_free(p, MI_HUGE_OS_PAGE_SIZE, true, &_mi_stats_main); + mi_os_prim_free(p, MI_HUGE_OS_PAGE_SIZE, MI_HUGE_OS_PAGE_SIZE); } break; } // success, record it page++; // increase before timeout check (see issue #711) - _mi_stat_increase(&_mi_stats_main.committed, MI_HUGE_OS_PAGE_SIZE); - _mi_stat_increase(&_mi_stats_main.reserved, MI_HUGE_OS_PAGE_SIZE); + mi_os_stat_increase(committed, MI_HUGE_OS_PAGE_SIZE); + mi_os_stat_increase(reserved, MI_HUGE_OS_PAGE_SIZE); // check for timeout if (max_msecs > 0) { @@ -622,7 +702,7 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse if (psize != NULL) { *psize = page * MI_HUGE_OS_PAGE_SIZE; } if (page != 0) { mi_assert(start != NULL); - *memid = _mi_memid_create_os(true /* is committed */, all_zero, true /* is_large */); + *memid = _mi_memid_create_os(start, size, true /* is committed */, all_zero, true /* is_large */); memid->memkind = MI_MEM_OS_HUGE; mi_assert(memid->is_pinned); #ifdef MI_TRACK_ASAN @@ -634,45 +714,57 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse // free every huge page in a range individually (as we allocated per page) // note: needed with VirtualAlloc but could potentially be done in one go on mmap'd systems. -static void mi_os_free_huge_os_pages(void* p, size_t size, mi_stats_t* stats) { +static void mi_os_free_huge_os_pages(void* p, size_t size) { if (p==NULL || size==0) return; uint8_t* base = (uint8_t*)p; while (size >= MI_HUGE_OS_PAGE_SIZE) { - mi_os_prim_free(base, MI_HUGE_OS_PAGE_SIZE, true, stats); + mi_os_prim_free(base, MI_HUGE_OS_PAGE_SIZE, MI_HUGE_OS_PAGE_SIZE); size -= MI_HUGE_OS_PAGE_SIZE; base += MI_HUGE_OS_PAGE_SIZE; } } + /* ---------------------------------------------------------------------------- Support NUMA aware allocation -----------------------------------------------------------------------------*/ -_Atomic(size_t) _mi_numa_node_count; // = 0 // cache the node count +static _Atomic(size_t) mi_numa_node_count; // = 0 // cache the node count -size_t _mi_os_numa_node_count_get(void) { - size_t count = mi_atomic_load_acquire(&_mi_numa_node_count); - if (count <= 0) { +int _mi_os_numa_node_count(void) { + size_t count = mi_atomic_load_acquire(&mi_numa_node_count); + if mi_unlikely(count == 0) { long ncount = mi_option_get(mi_option_use_numa_nodes); // given explicitly? - if (ncount > 0) { + if (ncount > 0 && ncount < INT_MAX) { count = (size_t)ncount; } else { - count = _mi_prim_numa_node_count(); // or detect dynamically - if (count == 0) count = 1; + const size_t n = _mi_prim_numa_node_count(); // or detect dynamically + if (n == 0 || n > INT_MAX) { count = 1; } + else { count = n; } } - mi_atomic_store_release(&_mi_numa_node_count, count); // save it + mi_atomic_store_release(&mi_numa_node_count, count); // save it _mi_verbose_message("using %zd numa regions\n", count); } - return count; + mi_assert_internal(count > 0 && count <= INT_MAX); + return (int)count; } -int _mi_os_numa_node_get(mi_os_tld_t* tld) { - MI_UNUSED(tld); - size_t numa_count = _mi_os_numa_node_count(); +static int mi_os_numa_node_get(void) { + int numa_count = _mi_os_numa_node_count(); if (numa_count<=1) return 0; // optimize on single numa node systems: always node 0 // never more than the node count and >= 0 - size_t numa_node = _mi_prim_numa_node(); + const size_t n = _mi_prim_numa_node(); + int numa_node = (n < INT_MAX ? (int)n : 0); if (numa_node >= numa_count) { numa_node = numa_node % numa_count; } - return (int)numa_node; + return numa_node; +} + +int _mi_os_numa_node(void) { + if mi_likely(mi_atomic_load_relaxed(&mi_numa_node_count) == 1) { + return 0; + } + else { + return mi_os_numa_node_get(); + } } diff --git a/system/lib/mimalloc/src/page-queue.c b/system/lib/mimalloc/src/page-queue.c index ceea91ee4dcbd..c719b6265afe5 100644 --- a/system/lib/mimalloc/src/page-queue.c +++ b/system/lib/mimalloc/src/page-queue.c @@ -57,27 +57,23 @@ static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) { // Returns MI_BIN_HUGE if the size is too large. // We use `wsize` for the size in "machine word sizes", // i.e. byte size == `wsize*sizeof(void*)`. -static inline uint8_t mi_bin(size_t size) { +static inline size_t mi_bin(size_t size) { size_t wsize = _mi_wsize_from_size(size); - uint8_t bin; - if (wsize <= 1) { - bin = 1; +#if defined(MI_ALIGN4W) + if mi_likely(wsize <= 4) { + return (wsize <= 1 ? 1 : (wsize+1)&~1); // round to double word sizes } - #if defined(MI_ALIGN4W) - else if (wsize <= 4) { - bin = (uint8_t)((wsize+1)&~1); // round to double word sizes +#elif defined(MI_ALIGN2W) + if mi_likely(wsize <= 8) { + return (wsize <= 1 ? 1 : (wsize+1)&~1); // round to double word sizes } - #elif defined(MI_ALIGN2W) - else if (wsize <= 8) { - bin = (uint8_t)((wsize+1)&~1); // round to double word sizes - } - #else - else if (wsize <= 8) { - bin = (uint8_t)wsize; +#else + if mi_likely(wsize <= 8) { + return (wsize == 0 ? 1 : wsize); } - #endif - else if (wsize > MI_MEDIUM_OBJ_WSIZE_MAX) { - bin = MI_BIN_HUGE; +#endif + else if mi_unlikely(wsize > MI_MEDIUM_OBJ_WSIZE_MAX) { + return MI_BIN_HUGE; } else { #if defined(MI_ALIGN4W) @@ -85,15 +81,14 @@ static inline uint8_t mi_bin(size_t size) { #endif wsize--; // find the highest bit - uint8_t b = (uint8_t)mi_bsr(wsize); // note: wsize != 0 + const size_t b = (MI_SIZE_BITS - 1 - mi_clz(wsize)); // note: wsize != 0 // and use the top 3 bits to determine the bin (~12.5% worst internal fragmentation). // - adjust with 3 because we use do not round the first 8 sizes // which each get an exact bin - bin = ((b << 2) + (uint8_t)((wsize >> (b - 2)) & 0x03)) - 3; - mi_assert_internal(bin < MI_BIN_HUGE); + const size_t bin = ((b << 2) + ((wsize >> (b - 2)) & 0x03)) - 3; + mi_assert_internal(bin > 0 && bin < MI_BIN_HUGE); + return bin; } - mi_assert_internal(bin > 0 && bin <= MI_BIN_HUGE); - return bin; } @@ -102,11 +97,11 @@ static inline uint8_t mi_bin(size_t size) { Queue of pages with free blocks ----------------------------------------------------------- */ -uint8_t _mi_bin(size_t size) { +size_t _mi_bin(size_t size) { return mi_bin(size); } -size_t _mi_bin_size(uint8_t bin) { +size_t _mi_bin_size(size_t bin) { return _mi_heap_empty.pages[bin].block_size; } @@ -145,10 +140,15 @@ static inline bool mi_page_is_large_or_huge(const mi_page_t* page) { return (mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_huge(page)); } +size_t _mi_page_bin(const mi_page_t* page) { + const size_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : (mi_page_is_huge(page) ? MI_BIN_HUGE : mi_bin(mi_page_block_size(page)))); + mi_assert_internal(bin <= MI_BIN_FULL); + return bin; +} + static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) { mi_assert_internal(heap!=NULL); - uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : (mi_page_is_huge(page) ? MI_BIN_HUGE : mi_bin(mi_page_block_size(page)))); - mi_assert_internal(bin <= MI_BIN_FULL); + const size_t bin = _mi_page_bin(page); mi_page_queue_t* pq = &heap->pages[bin]; mi_assert_internal((mi_page_block_size(page) == pq->block_size) || (mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(pq)) || @@ -189,7 +189,7 @@ static inline void mi_heap_queue_first_update(mi_heap_t* heap, const mi_page_que } else { // find previous size; due to minimal alignment upto 3 previous bins may need to be skipped - uint8_t bin = mi_bin(size); + size_t bin = mi_bin(size); const mi_page_queue_t* prev = pq - 1; while( bin == mi_bin(prev->block_size) && prev > &heap->pages[0]) { prev--; @@ -264,8 +264,16 @@ static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_ heap->page_count++; } +static void mi_page_queue_move_to_front(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) { + mi_assert_internal(mi_page_heap(page) == heap); + mi_assert_internal(mi_page_queue_contains(queue, page)); + if (queue->first == page) return; + mi_page_queue_remove(queue, page); + mi_page_queue_push(heap, queue, page); + mi_assert_internal(queue->first == page); +} -static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) { +static void mi_page_queue_enqueue_from_ex(mi_page_queue_t* to, mi_page_queue_t* from, bool enqueue_at_end, mi_page_t* page) { mi_assert_internal(page != NULL); mi_assert_expensive(mi_page_queue_contains(from, page)); mi_assert_expensive(!mi_page_queue_contains(to, page)); @@ -278,6 +286,8 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro (mi_page_is_large_or_huge(page) && mi_page_queue_is_full(to))); mi_heap_t* heap = mi_page_heap(page); + + // delete from `from` if (page->prev != NULL) page->prev->next = page->next; if (page->next != NULL) page->next->prev = page->prev; if (page == from->last) from->last = page->prev; @@ -288,22 +298,59 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro mi_heap_queue_first_update(heap, from); } - page->prev = to->last; - page->next = NULL; - if (to->last != NULL) { - mi_assert_internal(heap == mi_page_heap(to->last)); - to->last->next = page; - to->last = page; + // insert into `to` + if (enqueue_at_end) { + // enqueue at the end + page->prev = to->last; + page->next = NULL; + if (to->last != NULL) { + mi_assert_internal(heap == mi_page_heap(to->last)); + to->last->next = page; + to->last = page; + } + else { + to->first = page; + to->last = page; + mi_heap_queue_first_update(heap, to); + } } else { - to->first = page; - to->last = page; - mi_heap_queue_first_update(heap, to); + if (to->first != NULL) { + // enqueue at 2nd place + mi_assert_internal(heap == mi_page_heap(to->first)); + mi_page_t* next = to->first->next; + page->prev = to->first; + page->next = next; + to->first->next = page; + if (next != NULL) { + next->prev = page; + } + else { + to->last = page; + } + } + else { + // enqueue at the head (singleton list) + page->prev = NULL; + page->next = NULL; + to->first = page; + to->last = page; + mi_heap_queue_first_update(heap, to); + } } mi_page_set_in_full(page, mi_page_queue_is_full(to)); } +static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) { + mi_page_queue_enqueue_from_ex(to, from, true /* enqueue at the end */, page); +} + +static void mi_page_queue_enqueue_from_full(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) { + // note: we could insert at the front to increase reuse, but it slows down certain benchmarks (like `alloc-test`) + mi_page_queue_enqueue_from_ex(to, from, true /* enqueue at the end of the `to` queue? */, page); +} + // Only called from `mi_heap_absorb`. size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) { mi_assert_internal(mi_heap_contains_queue(heap,pq)); diff --git a/system/lib/mimalloc/src/page.c b/system/lib/mimalloc/src/page.c index 871ed21514775..a5a1050324862 100644 --- a/system/lib/mimalloc/src/page.c +++ b/system/lib/mimalloc/src/page.c @@ -37,7 +37,7 @@ static inline mi_block_t* mi_page_block_at(const mi_page_t* page, void* page_sta } static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t size, mi_tld_t* tld); -static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld); +static bool mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld); #if (MI_DEBUG>=3) static size_t mi_page_list_count(mi_page_t* page, mi_block_t* head) { @@ -112,7 +112,7 @@ static bool mi_page_is_valid_init(mi_page_t* page) { return true; } -extern bool _mi_process_is_initialized; // has mi_process_init been called? +extern mi_decl_hidden bool _mi_process_is_initialized; // has mi_process_init been called? bool _mi_page_is_valid(mi_page_t* page) { mi_assert_internal(mi_page_is_valid_init(page)); @@ -276,7 +276,7 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size mi_assert_internal(mi_heap_contains_queue(heap, pq)); mi_assert_internal(page_alignment > 0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || block_size == pq->block_size); #endif - mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments, &heap->tld->os); + mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments); if (page == NULL) { // this may be out-of-memory, or an abandoned page was reclaimed (and in our queue) return NULL; @@ -291,6 +291,7 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size mi_assert_internal(full_block_size >= block_size); mi_page_init(heap, page, full_block_size, heap->tld); mi_heap_stat_increase(heap, pages, 1); + mi_heap_stat_increase(heap, page_bins[_mi_page_bin(page)], 1); if (pq != NULL) { mi_page_queue_push(heap, pq, page); } mi_assert_expensive(_mi_page_is_valid(page)); return page; @@ -358,7 +359,7 @@ void _mi_page_unfull(mi_page_t* page) { mi_page_set_in_full(page, false); // to get the right queue mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page); mi_page_set_in_full(page, true); - mi_page_queue_enqueue_from(pq, pqfull, page); + mi_page_queue_enqueue_from_full(pq, pqfull, page); } static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) { @@ -404,6 +405,28 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) { _mi_segment_page_abandon(page,segments_tld); } +// force abandon a page +void _mi_page_force_abandon(mi_page_t* page) { + mi_heap_t* heap = mi_page_heap(page); + // mark page as not using delayed free + _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false); + + // ensure this page is no longer in the heap delayed free list + _mi_heap_delayed_free_all(heap); + // We can still access the page meta-info even if it is freed as we ensure + // in `mi_segment_force_abandon` that the segment is not freed (yet) + if (page->capacity == 0) return; // it may have been freed now + + // and now unlink it from the page queue and abandon (or free) + mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page); + if (mi_page_all_free(page)) { + _mi_page_free(page, pq, false); + } + else { + _mi_page_abandon(page, pq); + } +} + // Free a page with no more free blocks void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) { @@ -416,14 +439,13 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) { // no more aligned blocks in here mi_page_set_has_aligned(page, false); - mi_heap_t* heap = mi_page_heap(page); - // remove from the page list // (no need to do _mi_heap_delayed_free first as all blocks are already free) + mi_heap_t* heap = mi_page_heap(page); mi_segments_tld_t* segments_tld = &heap->tld->segments; mi_page_queue_remove(pq, page); - // and free it + // and free it mi_page_set_heap(page,NULL); _mi_segment_page_free(page, force, segments_tld); } @@ -451,10 +473,11 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept { // how to check this efficiently though... // for now, we don't retire if it is the only page left of this size class. mi_page_queue_t* pq = mi_page_queue_of(page); + #if MI_RETIRE_CYCLES > 0 const size_t bsize = mi_page_block_size(page); if mi_likely( /* bsize < MI_MAX_RETIRE_SIZE && */ !mi_page_queue_is_special(pq)) { // not full or huge queue? if (pq->last==page && pq->first==page) { // the only page in the queue? - mi_stat_counter_increase(_mi_stats_main.page_no_retire,1); + mi_stat_counter_increase(_mi_stats_main.pages_retire,1); page->retire_expire = (bsize <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4); mi_heap_t* heap = mi_page_heap(page); mi_assert_internal(pq >= heap->pages); @@ -466,6 +489,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept { return; // don't free after all } } + #endif _mi_page_free(page, pq, false); } @@ -608,15 +632,14 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co // Note: we also experimented with "bump" allocation on the first // allocations but this did not speed up any benchmark (due to an // extra test in malloc? or cache effects?) -static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) { - MI_UNUSED(tld); +static bool mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) { mi_assert_expensive(mi_page_is_valid_init(page)); #if (MI_SECURE<=2) mi_assert(page->free == NULL); mi_assert(page->local_free == NULL); - if (page->free != NULL) return; + if (page->free != NULL) return true; #endif - if (page->capacity >= page->reserved) return; + if (page->capacity >= page->reserved) return true; mi_stat_counter_increase(tld->stats.pages_extended, 1); @@ -649,6 +672,7 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) page->capacity += (uint16_t)extend; mi_stat_increase(tld->stats.page_committed, extend * bsize); mi_assert_expensive(mi_page_is_valid_init(page)); + return true; } // Initialize a fresh page @@ -703,8 +727,10 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi mi_assert_expensive(mi_page_is_valid_init(page)); // initialize an initial free list - mi_page_extend_free(heap,page,tld); - mi_assert(mi_page_immediate_available(page)); + if (mi_page_extend_free(heap,page,tld)) { + mi_assert(mi_page_immediate_available(page)); + } + return; } @@ -712,6 +738,17 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi Find pages with free blocks -------------------------------------------------------------*/ +// search for a best next page to use for at most N pages (often cut short if immediate blocks are available) +#define MI_MAX_CANDIDATE_SEARCH (4) + +// is the page not yet used up to its reserved space? +static bool mi_page_is_expandable(const mi_page_t* page) { + mi_assert_internal(page != NULL); + mi_assert_internal(page->capacity <= page->reserved); + return (page->capacity < page->reserved); +} + + // Find a page with free blocks of `page->block_size`. static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try) { @@ -719,38 +756,81 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p #if MI_STAT size_t count = 0; #endif + size_t candidate_count = 0; // we reset this on the first candidate to limit the search + mi_page_t* page_candidate = NULL; // a page with free space mi_page_t* page = pq->first; + while (page != NULL) { mi_page_t* next = page->next; // remember next #if MI_STAT count++; #endif + candidate_count++; - // 0. collect freed blocks by us and other threads + // collect freed blocks by us and other threads _mi_page_free_collect(page, false); - // 1. if the page contains free blocks, we are done - if (mi_page_immediate_available(page)) { - break; // pick this one - } + #if MI_MAX_CANDIDATE_SEARCH > 1 + // search up to N pages for a best candidate - // 2. Try to extend - if (page->capacity < page->reserved) { - mi_page_extend_free(heap, page, heap->tld); - mi_assert_internal(mi_page_immediate_available(page)); - break; + // is the local free list non-empty? + const bool immediate_available = mi_page_immediate_available(page); + + // if the page is completely full, move it to the `mi_pages_full` + // queue so we don't visit long-lived pages too often. + if (!immediate_available && !mi_page_is_expandable(page)) { + mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page)); + mi_page_to_full(page, pq); + } + else { + // the page has free space, make it a candidate + // we prefer non-expandable pages with high usage as candidates (to reduce commit, and increase chances of free-ing up pages) + if (page_candidate == NULL) { + page_candidate = page; + candidate_count = 0; + } + // prefer to reuse fuller pages (in the hope the less used page gets freed) + else if (page->used >= page_candidate->used && !mi_page_is_mostly_used(page) && !mi_page_is_expandable(page)) { + page_candidate = page; + } + // if we find a non-expandable candidate, or searched for N pages, return with the best candidate + if (immediate_available || candidate_count > MI_MAX_CANDIDATE_SEARCH) { + mi_assert_internal(page_candidate!=NULL); + break; + } + } + #else + // first-fit algorithm + // If the page contains free blocks, we are done + if (mi_page_immediate_available(page) || mi_page_is_expandable(page)) { + break; // pick this one } - // 3. If the page is completely full, move it to the `mi_pages_full` + // If the page is completely full, move it to the `mi_pages_full` // queue so we don't visit long-lived pages too often. mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page)); mi_page_to_full(page, pq); + #endif page = next; } // for each page - mi_heap_stat_counter_increase(heap, searches, count); + mi_heap_stat_counter_increase(heap, page_searches, count); + + // set the page to the best candidate + if (page_candidate != NULL) { + page = page_candidate; + } + if (page != NULL) { + if (!mi_page_immediate_available(page)) { + mi_assert_internal(mi_page_is_expandable(page)); + if (!mi_page_extend_free(heap, page, heap->tld)) { + page = NULL; // failed to extend + } + } + mi_assert_internal(page == NULL || mi_page_immediate_available(page)); + } if (page == NULL) { _mi_heap_collect_retired(heap, false); // perhaps make a page available? @@ -761,10 +841,14 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p } } else { - mi_assert(pq->first == page); + // move the page to the front of the queue + mi_page_queue_move_to_front(heap, pq, page); page->retire_expire = 0; + // _mi_heap_collect_retired(heap, false); // update retire counts; note: increases rss on MemoryLoad bench so don't do this } mi_assert_internal(page == NULL || mi_page_immediate_available(page)); + + return page; } @@ -772,7 +856,9 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p // Find a page with free blocks of `size`. static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) { - mi_page_queue_t* pq = mi_page_queue(heap,size); + mi_page_queue_t* pq = mi_page_queue(heap, size); + + // check the first page: we even do this with candidate search or otherwise we re-search every time mi_page_t* page = pq->first; if (page != NULL) { #if (MI_SECURE>=3) // in secure mode, we extend half the time to increase randomness @@ -791,6 +877,7 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) { return page; // fast path } } + return mi_page_queue_find_free_ex(heap, pq, true); } @@ -856,13 +943,14 @@ static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t } const size_t bsize = mi_page_usable_block_size(page); // note: not `mi_page_block_size` to account for padding - if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { - mi_heap_stat_increase(heap, large, bsize); - mi_heap_stat_counter_increase(heap, large_count, 1); + /*if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { + mi_heap_stat_increase(heap, malloc_large, bsize); + mi_heap_stat_counter_increase(heap, malloc_large_count, 1); } - else { - mi_heap_stat_increase(heap, huge, bsize); - mi_heap_stat_counter_increase(heap, huge_count, 1); + else */ + { + _mi_stat_increase(&heap->tld->stats.malloc_huge, bsize); + _mi_stat_counter_increase(&heap->tld->stats.malloc_huge_count, 1); } } return page; @@ -907,11 +995,23 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al } mi_assert_internal(mi_heap_is_initialized(heap)); - // call potential deferred free routines - _mi_deferred_free(heap, false); - - // free delayed frees from other threads (but skip contended ones) - _mi_heap_delayed_free_partial(heap); + // do administrative tasks every N generic mallocs + if mi_unlikely(++heap->generic_count >= 100) { + heap->generic_collect_count += heap->generic_count; + heap->generic_count = 0; + // call potential deferred free routines + _mi_deferred_free(heap, false); + + // free delayed frees from other threads (but skip contended ones) + _mi_heap_delayed_free_partial(heap); + + // collect every once in a while (10000 by default) + const long generic_collect = mi_option_get_clamp(mi_option_generic_collect, 1, 1000000L); + if (heap->generic_collect_count >= generic_collect) { + heap->generic_collect_count = 0; + mi_heap_collect(heap, false /* force? */); + } + } // find (or allocate) a page of the right size mi_page_t* page = mi_find_page(heap, size, huge_alignment); @@ -930,14 +1030,20 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al mi_assert_internal(mi_page_block_size(page) >= size); // and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc) - if mi_unlikely(zero && page->block_size == 0) { + void* p; + if mi_unlikely(zero && mi_page_is_huge(page)) { // note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case. - void* p = _mi_page_malloc(heap, page, size); + p = _mi_page_malloc(heap, page, size); mi_assert_internal(p != NULL); _mi_memzero_aligned(p, mi_page_usable_block_size(page)); - return p; } else { - return _mi_page_malloc_zero(heap, page, size, zero); + p = _mi_page_malloc_zero(heap, page, size, zero); + mi_assert_internal(p != NULL); + } + // move singleton pages to the full queue + if (page->reserved == page->used) { + mi_page_to_full(page, mi_page_queue_of(page)); } + return p; } diff --git a/system/lib/mimalloc/src/prim/emscripten/prim.c b/system/lib/mimalloc/src/prim/emscripten/prim.c index 069cdb3b57ef8..d9cdf1b505152 100644 --- a/system/lib/mimalloc/src/prim/emscripten/prim.c +++ b/system/lib/mimalloc/src/prim/emscripten/prim.c @@ -1,5 +1,5 @@ /* ---------------------------------------------------------------------------- -Copyright (c) 2018-2023, Microsoft Research, Daan Leijen, Alon Zakai +Copyright (c) 2018-2025, Microsoft Research, Daan Leijen, Alon Zakai This is free software; you can redistribute it and/or modify it under the terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. @@ -58,7 +58,7 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config) { extern void emmalloc_free(void*); int _mi_prim_free(void* addr, size_t size) { - MI_UNUSED(size); + if (size==0) return 0; emmalloc_free(addr); return 0; } @@ -71,8 +71,8 @@ int _mi_prim_free(void* addr, size_t size) { extern void* emmalloc_memalign(size_t alignment, size_t size); // Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned. -int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) { - MI_UNUSED(allow_large); MI_UNUSED(commit); +int _mi_prim_alloc(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) { + MI_UNUSED(allow_large); MI_UNUSED(commit); MI_UNUSED(hint_addr); *is_large = false; // TODO: Track the highest address ever seen; first uses of it are zeroes. // That assumes no one else uses sbrk but us (they could go up, @@ -110,6 +110,11 @@ int _mi_prim_reset(void* addr, size_t size) { return 0; } +int _mi_prim_reuse(void* addr, size_t size) { + MI_UNUSED(addr); MI_UNUSED(size); + return 0; +} + int _mi_prim_protect(void* addr, size_t size, bool protect) { MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(protect); return 0; @@ -196,7 +201,7 @@ bool _mi_prim_random_buf(void* buf, size_t buf_len) { // Thread init/done //---------------------------------------------------------------- -#ifdef __EMSCRIPTEN_SHARED_MEMORY__ +#if defined(MI_USE_PTHREADS) // use pthread local storage keys to detect thread ending // (and used with MI_TLS_PTHREADS for the default heap) diff --git a/system/lib/mimalloc/src/prim/osx/alloc-override-zone.c b/system/lib/mimalloc/src/prim/osx/alloc-override-zone.c index 1515b886b20b7..d3af170decfcc 100644 --- a/system/lib/mimalloc/src/prim/osx/alloc-override-zone.c +++ b/system/lib/mimalloc/src/prim/osx/alloc-override-zone.c @@ -418,9 +418,9 @@ static inline malloc_zone_t* mi_get_default_zone(void) } #if defined(__clang__) -__attribute__((constructor(0))) +__attribute__((constructor(101))) // highest priority #else -__attribute__((constructor)) // seems not supported by g++-11 on the M1 +__attribute__((constructor)) // priority level is not supported by gcc #endif __attribute__((used)) static void _mi_macos_override_malloc(void) { diff --git a/system/lib/mimalloc/src/prim/prim.c b/system/lib/mimalloc/src/prim/prim.c index 3b7d373642f51..5147bae81feaa 100644 --- a/system/lib/mimalloc/src/prim/prim.c +++ b/system/lib/mimalloc/src/prim/prim.c @@ -25,3 +25,52 @@ terms of the MIT license. A copy of the license can be found in the file #include "unix/prim.c" // mmap() (Linux, macOSX, BSD, Illumnos, Haiku, DragonFly, etc.) #endif + +// Generic process initialization +#ifndef MI_PRIM_HAS_PROCESS_ATTACH +#if defined(__GNUC__) || defined(__clang__) + // gcc,clang: use the constructor/destructor attribute + // which for both seem to run before regular constructors/destructors + #if defined(__clang__) + #define mi_attr_constructor __attribute__((constructor(101))) + #define mi_attr_destructor __attribute__((destructor(101))) + #else + #define mi_attr_constructor __attribute__((constructor)) + #define mi_attr_destructor __attribute__((destructor)) + #endif + static void mi_attr_constructor mi_process_attach(void) { + _mi_auto_process_init(); + } + static void mi_attr_destructor mi_process_detach(void) { + _mi_auto_process_done(); + } +#elif defined(__cplusplus) + // C++: use static initialization to detect process start/end + // This is not guaranteed to be first/last but the best we can generally do? + struct mi_init_done_t { + mi_init_done_t() { + _mi_auto_process_init(); + } + ~mi_init_done_t() { + _mi_auto_process_done(); + } + }; + static mi_init_done_t mi_init_done; + #else + #pragma message("define a way to call _mi_auto_process_init/done on your platform") +#endif +#endif + +// Generic allocator init/done callback +#ifndef MI_PRIM_HAS_ALLOCATOR_INIT +bool _mi_is_redirected(void) { + return false; +} +bool _mi_allocator_init(const char** message) { + if (message != NULL) { *message = NULL; } + return true; +} +void _mi_allocator_done(void) { + // nothing to do +} +#endif diff --git a/system/lib/mimalloc/src/prim/unix/prim.c b/system/lib/mimalloc/src/prim/unix/prim.c index dd665d3d1f192..650aa657b9eb1 100644 --- a/system/lib/mimalloc/src/prim/unix/prim.c +++ b/system/lib/mimalloc/src/prim/unix/prim.c @@ -1,5 +1,5 @@ /* ---------------------------------------------------------------------------- -Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +Copyright (c) 2018-2025, Microsoft Research, Daan Leijen This is free software; you can redistribute it and/or modify it under the terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. @@ -22,20 +22,21 @@ terms of the MIT license. A copy of the license can be found in the file #include "mimalloc.h" #include "mimalloc/internal.h" -#include "mimalloc/atomic.h" #include "mimalloc/prim.h" #include // mmap #include // sysconf #include // open, close, read, access +#include // getenv, arc4random_buf #if defined(__linux__) #include - #if defined(MI_NO_THP) - #include + #include // THP disable, PR_SET_VMA + #if defined(__GLIBC__) && !defined(PR_SET_VMA) + #include #endif #if defined(__GLIBC__) - #include // linux mmap flags + #include // linux mmap flags #else #include #endif @@ -57,11 +58,19 @@ terms of the MIT license. A copy of the license can be found in the file #include #endif -#if defined(__linux__) || defined(__FreeBSD__) +#if (defined(__linux__) && !defined(__ANDROID__)) || defined(__FreeBSD__) #define MI_HAS_SYSCALL_H #include #endif +#if !defined(MADV_DONTNEED) && defined(POSIX_MADV_DONTNEED) // QNX +#define MADV_DONTNEED POSIX_MADV_DONTNEED +#endif +#if !defined(MADV_FREE) && defined(POSIX_MADV_FREE) // QNX +#define MADV_FREE POSIX_MADV_FREE +#endif + +#define MI_UNIX_LARGE_PAGE_SIZE (2*MI_MiB) // TODO: can we query the OS for this? //------------------------------------------------------------------------------------ // Use syscalls for some primitives to allow for libraries that override open/read/close etc. @@ -140,8 +149,15 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config ) if (psize > 0) { config->page_size = (size_t)psize; config->alloc_granularity = (size_t)psize; + #if defined(_SC_PHYS_PAGES) + long pphys = sysconf(_SC_PHYS_PAGES); + const size_t psize_in_kib = (size_t)psize / MI_KiB; + if (psize_in_kib > 0 && pphys > 0 && (size_t)pphys <= (SIZE_MAX/psize_in_kib)) { + config->physical_memory_in_kib = (size_t)pphys * psize_in_kib; + } + #endif } - config->large_page_size = 2*MI_MiB; // TODO: can we query the OS for this? + config->large_page_size = MI_UNIX_LARGE_PAGE_SIZE; config->has_overcommit = unix_detect_overcommit(); config->has_partial_free = true; // mmap can free in parts config->has_virtual_reserve = true; // todo: check if this true for NetBSD? (for anonymous mmap with PROT_NONE) @@ -171,6 +187,7 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config ) //--------------------------------------------- int _mi_prim_free(void* addr, size_t size ) { + if (size==0) return 0; bool err = (munmap(addr, size) == -1); return (err ? errno : 0); } @@ -182,20 +199,33 @@ int _mi_prim_free(void* addr, size_t size ) { static int unix_madvise(void* addr, size_t size, int advice) { #if defined(__sun) - return madvise((caddr_t)addr, size, advice); // Solaris needs cast (issue #520) + int res = madvise((caddr_t)addr, size, advice); // Solaris needs cast (issue #520) + #elif defined(__QNX__) + int res = posix_madvise(addr, size, advice); #else - return madvise(addr, size, advice); + int res = madvise(addr, size, advice); #endif + return (res==0 ? 0 : errno); } -static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) { +static void* unix_mmap_prim(void* addr, size_t size, int protect_flags, int flags, int fd) { + void* p = mmap(addr, size, protect_flags, flags, fd, 0 /* offset */); + #if defined(__linux__) && defined(PR_SET_VMA) + if (p!=MAP_FAILED && p!=NULL) { + prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, p, size, "mimalloc"); + } + #endif + return p; +} + +static void* unix_mmap_prim_aligned(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) { MI_UNUSED(try_alignment); void* p = NULL; #if defined(MAP_ALIGNED) // BSD if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) { size_t n = mi_bsr(try_alignment); if (((size_t)1 << n) == try_alignment && n >= 12 && n <= 30) { // alignment is a power of 2 and 4096 <= alignment <= 1GiB - p = mmap(addr, size, protect_flags, flags | MAP_ALIGNED(n), fd, 0); + p = unix_mmap_prim(addr, size, protect_flags, flags | MAP_ALIGNED(n), fd); if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) { int err = errno; _mi_trace_message("unable to directly request aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, addr); @@ -206,7 +236,7 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p } #elif defined(MAP_ALIGN) // Solaris if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) { - p = mmap((void*)try_alignment, size, protect_flags, flags | MAP_ALIGN, fd, 0); // addr parameter is the required alignment + p = unix_mmap_prim((void*)try_alignment, size, protect_flags, flags | MAP_ALIGN, fd); // addr parameter is the required alignment if (p!=MAP_FAILED) return p; // fall back to regular mmap } @@ -216,7 +246,7 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p if (addr == NULL) { void* hint = _mi_os_get_aligned_hint(try_alignment, size); if (hint != NULL) { - p = mmap(hint, size, protect_flags, flags, fd, 0); + p = unix_mmap_prim(hint, size, protect_flags, flags, fd); if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) { #if MI_TRACK_ENABLED // asan sometimes does not instrument errno correctly? int err = 0; @@ -231,7 +261,7 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p } #endif // regular mmap - p = mmap(addr, size, protect_flags, flags, fd, 0); + p = unix_mmap_prim(addr, size, protect_flags, flags, fd); if (p!=MAP_FAILED) return p; // failed to allocate return NULL; @@ -241,7 +271,7 @@ static int unix_mmap_fd(void) { #if defined(VM_MAKE_TAG) // macOS: tracking anonymous page with a specific ID. (All up to 98 are taken officially but LLVM sanitizers had taken 99) int os_tag = (int)mi_option_get(mi_option_os_tag); - if (os_tag < 100 || os_tag > 255) { os_tag = 100; } + if (os_tag < 100 || os_tag > 255) { os_tag = 254; } return VM_MAKE_TAG(os_tag); #else return -1; @@ -265,7 +295,7 @@ static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protec protect_flags |= PROT_MAX(PROT_READ | PROT_WRITE); // BSD #endif // huge page allocation - if ((large_only || _mi_os_use_large_page(size, try_alignment)) && allow_large) { + if (allow_large && (large_only || (_mi_os_use_large_page(size, try_alignment) && mi_option_get(mi_option_allow_large_os_pages) == 1))) { static _Atomic(size_t) large_page_try_ok; // = 0; size_t try_ok = mi_atomic_load_acquire(&large_page_try_ok); if (!large_only && try_ok > 0) { @@ -286,7 +316,7 @@ static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protec #endif #ifdef MAP_HUGE_1GB static bool mi_huge_pages_available = true; - if ((size % MI_GiB) == 0 && mi_huge_pages_available) { + if (large_only && (size % MI_GiB) == 0 && mi_huge_pages_available) { lflags |= MAP_HUGE_1GB; } else @@ -302,13 +332,15 @@ static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protec if (large_only || lflags != flags) { // try large OS page allocation *is_large = true; - p = unix_mmap_prim(addr, size, try_alignment, protect_flags, lflags, lfd); + p = unix_mmap_prim_aligned(addr, size, try_alignment, protect_flags, lflags, lfd); #ifdef MAP_HUGE_1GB if (p == NULL && (lflags & MAP_HUGE_1GB) == MAP_HUGE_1GB) { mi_huge_pages_available = false; // don't try huge 1GiB pages again - _mi_warning_message("unable to allocate huge (1GiB) page, trying large (2MiB) pages instead (errno: %i)\n", errno); + if (large_only) { + _mi_warning_message("unable to allocate huge (1GiB) page, trying large (2MiB) pages instead (errno: %i)\n", errno); + } lflags = ((lflags & ~MAP_HUGE_1GB) | MAP_HUGE_2MB); - p = unix_mmap_prim(addr, size, try_alignment, protect_flags, lflags, lfd); + p = unix_mmap_prim_aligned(addr, size, try_alignment, protect_flags, lflags, lfd); } #endif if (large_only) return p; @@ -321,7 +353,7 @@ static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protec // regular allocation if (p == NULL) { *is_large = false; - p = unix_mmap_prim(addr, size, try_alignment, protect_flags, flags, fd); + p = unix_mmap_prim_aligned(addr, size, try_alignment, protect_flags, flags, fd); if (p != NULL) { #if defined(MADV_HUGEPAGE) // Many Linux systems don't allow MAP_HUGETLB but they support instead @@ -332,7 +364,7 @@ static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protec // when large OS pages are enabled for mimalloc, we call `madvise` anyways. if (allow_large && _mi_os_use_large_page(size, try_alignment)) { if (unix_madvise(p, size, MADV_HUGEPAGE) == 0) { - *is_large = true; // possibly + // *is_large = true; // possibly }; } #elif defined(__sun) @@ -341,7 +373,7 @@ static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protec cmd.mha_pagesize = _mi_os_large_page_size(); cmd.mha_cmd = MHA_MAPSIZE_VA; if (memcntl((caddr_t)p, size, MC_HAT_ADVISE, (caddr_t)&cmd, 0, 0) == 0) { - *is_large = true; + // *is_large = true; // possibly } } #endif @@ -351,14 +383,17 @@ static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protec } // Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned. -int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) { +int _mi_prim_alloc(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) { mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); mi_assert_internal(commit || !allow_large); mi_assert_internal(try_alignment > 0); + if (hint_addr == NULL && size >= 8*MI_UNIX_LARGE_PAGE_SIZE && try_alignment > 1 && _mi_is_power_of_two(try_alignment) && try_alignment < MI_UNIX_LARGE_PAGE_SIZE) { + try_alignment = MI_UNIX_LARGE_PAGE_SIZE; // try to align along large page size for larger allocations + } *is_zero = true; int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE); - *addr = unix_mmap(NULL, size, try_alignment, protect_flags, false, allow_large, is_large); + *addr = unix_mmap(hint_addr, size, try_alignment, protect_flags, false, allow_large, is_large); return (*addr != NULL ? 0 : errno); } @@ -394,11 +429,25 @@ int _mi_prim_commit(void* start, size_t size, bool* is_zero) { return err; } +int _mi_prim_reuse(void* start, size_t size) { + MI_UNUSED(start); MI_UNUSED(size); + #if defined(__APPLE__) && defined(MADV_FREE_REUSE) + return unix_madvise(start, size, MADV_FREE_REUSE); + #endif + return 0; +} + int _mi_prim_decommit(void* start, size_t size, bool* needs_recommit) { int err = 0; - // decommit: use MADV_DONTNEED as it decreases rss immediately (unlike MADV_FREE) - err = unix_madvise(start, size, MADV_DONTNEED); - #if !MI_DEBUG && !MI_SECURE + #if defined(__APPLE__) && defined(MADV_FREE_REUSABLE) + // decommit on macOS: use MADV_FREE_REUSABLE as it does immediate rss accounting (issue #1097) + err = unix_madvise(start, size, MADV_FREE_REUSABLE); + if (err) { err = unix_madvise(start, size, MADV_DONTNEED); } + #else + // decommit: use MADV_DONTNEED as it decreases rss immediately (unlike MADV_FREE) + err = unix_madvise(start, size, MADV_DONTNEED); + #endif + #if !MI_DEBUG && MI_SECURE<=2 *needs_recommit = false; #else *needs_recommit = true; @@ -415,14 +464,22 @@ int _mi_prim_decommit(void* start, size_t size, bool* needs_recommit) { } int _mi_prim_reset(void* start, size_t size) { - // We try to use `MADV_FREE` as that is the fastest. A drawback though is that it + int err = 0; + + // on macOS can use MADV_FREE_REUSABLE (but we disable this for now as it seems slower) + #if 0 && defined(__APPLE__) && defined(MADV_FREE_REUSABLE) + err = unix_madvise(start, size, MADV_FREE_REUSABLE); + if (err==0) return 0; + // fall through + #endif + + #if defined(MADV_FREE) + // Otherwise, we try to use `MADV_FREE` as that is the fastest. A drawback though is that it // will not reduce the `rss` stats in tools like `top` even though the memory is available // to other processes. With the default `MIMALLOC_PURGE_DECOMMITS=1` we ensure that by // default `MADV_DONTNEED` is used though. - #if defined(MADV_FREE) static _Atomic(size_t) advice = MI_ATOMIC_VAR_INIT(MADV_FREE); int oadvice = (int)mi_atomic_load_relaxed(&advice); - int err; while ((err = unix_madvise(start, size, oadvice)) != 0 && errno == EAGAIN) { errno = 0; }; if (err != 0 && errno == EINVAL && oadvice == MADV_FREE) { // if MADV_FREE is not supported, fall back to MADV_DONTNEED from now on @@ -430,7 +487,7 @@ int _mi_prim_reset(void* start, size_t size) { err = unix_madvise(start, size, MADV_DONTNEED); } #else - int err = unix_madvise(start, size, MADV_DONTNEED); + err = unix_madvise(start, size, MADV_DONTNEED); #endif return err; } @@ -760,7 +817,7 @@ bool _mi_prim_getenv(const char* name, char* result, size_t result_size) { #include bool _mi_prim_random_buf(void* buf, size_t buf_len) { - // We prefere CCRandomGenerateBytes as it returns an error code while arc4random_buf + // We prefer CCRandomGenerateBytes as it returns an error code while arc4random_buf // may fail silently on macOS. See PR #390, and return (CCRandomGenerateBytes(buf, buf_len) == kCCSuccess); } @@ -770,7 +827,6 @@ bool _mi_prim_random_buf(void* buf, size_t buf_len) { defined(__sun) || \ (defined(__APPLE__) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_7)) -#include bool _mi_prim_random_buf(void* buf, size_t buf_len) { arc4random_buf(buf, buf_len); return true; diff --git a/system/lib/mimalloc/src/prim/wasi/prim.c b/system/lib/mimalloc/src/prim/wasi/prim.c index e95f67f587ea3..745a41fd348fd 100644 --- a/system/lib/mimalloc/src/prim/wasi/prim.c +++ b/system/lib/mimalloc/src/prim/wasi/prim.c @@ -9,7 +9,6 @@ terms of the MIT license. A copy of the license can be found in the file #include "mimalloc.h" #include "mimalloc/internal.h" -#include "mimalloc/atomic.h" #include "mimalloc/prim.h" #include // fputs @@ -22,7 +21,7 @@ terms of the MIT license. A copy of the license can be found in the file void _mi_prim_mem_init( mi_os_mem_config_t* config ) { config->page_size = 64*MI_KiB; // WebAssembly has a fixed page size: 64KiB config->alloc_granularity = 16; - config->has_overcommit = false; + config->has_overcommit = false; config->has_partial_free = false; config->has_virtual_reserve = false; } @@ -120,8 +119,8 @@ static void* mi_prim_mem_grow(size_t size, size_t try_alignment) { } // Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned. -int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) { - MI_UNUSED(allow_large); MI_UNUSED(commit); +int _mi_prim_alloc(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) { + MI_UNUSED(allow_large); MI_UNUSED(commit); MI_UNUSED(hint_addr); *is_large = false; *is_zero = false; *addr = mi_prim_mem_grow(size, try_alignment); @@ -134,7 +133,7 @@ int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_la //--------------------------------------------- int _mi_prim_commit(void* addr, size_t size, bool* is_zero) { - MI_UNUSED(addr); MI_UNUSED(size); + MI_UNUSED(addr); MI_UNUSED(size); *is_zero = false; return 0; } @@ -150,6 +149,11 @@ int _mi_prim_reset(void* addr, size_t size) { return 0; } +int _mi_prim_reuse(void* addr, size_t size) { + MI_UNUSED(addr); MI_UNUSED(size); + return 0; +} + int _mi_prim_protect(void* addr, size_t size, bool protect) { MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(protect); return 0; @@ -199,9 +203,9 @@ mi_msecs_t _mi_prim_clock_now(void) { // low resolution timer mi_msecs_t _mi_prim_clock_now(void) { #if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0) - return (mi_msecs_t)clock(); + return (mi_msecs_t)clock(); #elif (CLOCKS_PER_SEC < 1000) - return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC); + return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC); #else return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000); #endif diff --git a/system/lib/mimalloc/src/prim/windows/prim.c b/system/lib/mimalloc/src/prim/windows/prim.c index 5074ad4cbd54c..eebdc4a67e2f9 100644 --- a/system/lib/mimalloc/src/prim/windows/prim.c +++ b/system/lib/mimalloc/src/prim/windows/prim.c @@ -9,10 +9,13 @@ terms of the MIT license. A copy of the license can be found in the file #include "mimalloc.h" #include "mimalloc/internal.h" -#include "mimalloc/atomic.h" #include "mimalloc/prim.h" #include // fputs, stderr +// xbox has no console IO +#if !defined(WINAPI_FAMILY_PARTITION) || WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP | WINAPI_PARTITION_SYSTEM) +#define MI_HAS_CONSOLE_IO +#endif //--------------------------------------------- // Dynamically bind Windows API points for portability @@ -46,22 +49,33 @@ typedef struct MI_MEM_ADDRESS_REQUIREMENTS_S { #define MI_MEM_EXTENDED_PARAMETER_NONPAGED_HUGE 0x00000010 #include -typedef PVOID (__stdcall *PVirtualAlloc2)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, MI_MEM_EXTENDED_PARAMETER*, ULONG); -typedef NTSTATUS (__stdcall *PNtAllocateVirtualMemoryEx)(HANDLE, PVOID*, SIZE_T*, ULONG, ULONG, MI_MEM_EXTENDED_PARAMETER*, ULONG); +typedef PVOID (__stdcall *PVirtualAlloc2)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, MI_MEM_EXTENDED_PARAMETER*, ULONG); +typedef LONG (__stdcall *PNtAllocateVirtualMemoryEx)(HANDLE, PVOID*, SIZE_T*, ULONG, ULONG, MI_MEM_EXTENDED_PARAMETER*, ULONG); // avoid NTSTATUS as it is not defined on xbox (pr #1084) static PVirtualAlloc2 pVirtualAlloc2 = NULL; static PNtAllocateVirtualMemoryEx pNtAllocateVirtualMemoryEx = NULL; -// Similarly, GetNumaProcesorNodeEx is only supported since Windows 7 +// Similarly, GetNumaProcessorNodeEx is only supported since Windows 7 (and GetNumaNodeProcessorMask is not supported on xbox) typedef struct MI_PROCESSOR_NUMBER_S { WORD Group; BYTE Number; BYTE Reserved; } MI_PROCESSOR_NUMBER; typedef VOID (__stdcall *PGetCurrentProcessorNumberEx)(MI_PROCESSOR_NUMBER* ProcNumber); typedef BOOL (__stdcall *PGetNumaProcessorNodeEx)(MI_PROCESSOR_NUMBER* Processor, PUSHORT NodeNumber); typedef BOOL (__stdcall* PGetNumaNodeProcessorMaskEx)(USHORT Node, PGROUP_AFFINITY ProcessorMask); typedef BOOL (__stdcall *PGetNumaProcessorNode)(UCHAR Processor, PUCHAR NodeNumber); +typedef BOOL (__stdcall* PGetNumaNodeProcessorMask)(UCHAR Node, PULONGLONG ProcessorMask); +typedef BOOL (__stdcall* PGetNumaHighestNodeNumber)(PULONG Node); static PGetCurrentProcessorNumberEx pGetCurrentProcessorNumberEx = NULL; static PGetNumaProcessorNodeEx pGetNumaProcessorNodeEx = NULL; static PGetNumaNodeProcessorMaskEx pGetNumaNodeProcessorMaskEx = NULL; static PGetNumaProcessorNode pGetNumaProcessorNode = NULL; +static PGetNumaNodeProcessorMask pGetNumaNodeProcessorMask = NULL; +static PGetNumaHighestNodeNumber pGetNumaHighestNodeNumber = NULL; + +// Not available on xbox +typedef SIZE_T(__stdcall* PGetLargePageMinimum)(VOID); +static PGetLargePageMinimum pGetLargePageMinimum = NULL; + +// Available after Windows XP +typedef BOOL (__stdcall *PGetPhysicallyInstalledSystemMemory)( PULONGLONG TotalMemoryInKilobytes ); //--------------------------------------------- // Enable large page support dynamically (if possible) @@ -72,6 +86,7 @@ static bool win_enable_large_os_pages(size_t* large_page_size) static bool large_initialized = false; if (large_initialized) return (_mi_os_large_page_size() > 0); large_initialized = true; + if (pGetLargePageMinimum==NULL) return false; // no large page support (xbox etc.) // Try to see if large OS pages are supported // To use large pages on Windows, we first need access permission @@ -90,8 +105,8 @@ static bool win_enable_large_os_pages(size_t* large_page_size) if (ok) { err = GetLastError(); ok = (err == ERROR_SUCCESS); - if (ok && large_page_size != NULL) { - *large_page_size = GetLargePageMinimum(); + if (ok && large_page_size != NULL && pGetLargePageMinimum != NULL) { + *large_page_size = (*pGetLargePageMinimum)(); } } } @@ -119,6 +134,12 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config ) GetSystemInfo(&si); if (si.dwPageSize > 0) { config->page_size = si.dwPageSize; } if (si.dwAllocationGranularity > 0) { config->alloc_granularity = si.dwAllocationGranularity; } + // get virtual address bits + if ((uintptr_t)si.lpMaximumApplicationAddress > 0) { + const size_t vbits = MI_SIZE_BITS - mi_clz((uintptr_t)si.lpMaximumApplicationAddress); + config->virtual_address_bits = vbits; + } + // get the VirtualAlloc2 function HINSTANCE hDll; hDll = LoadLibrary(TEXT("kernelbase.dll")); @@ -141,8 +162,22 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config ) pGetNumaProcessorNodeEx = (PGetNumaProcessorNodeEx)(void (*)(void))GetProcAddress(hDll, "GetNumaProcessorNodeEx"); pGetNumaNodeProcessorMaskEx = (PGetNumaNodeProcessorMaskEx)(void (*)(void))GetProcAddress(hDll, "GetNumaNodeProcessorMaskEx"); pGetNumaProcessorNode = (PGetNumaProcessorNode)(void (*)(void))GetProcAddress(hDll, "GetNumaProcessorNode"); + pGetNumaNodeProcessorMask = (PGetNumaNodeProcessorMask)(void (*)(void))GetProcAddress(hDll, "GetNumaNodeProcessorMask"); + pGetNumaHighestNodeNumber = (PGetNumaHighestNodeNumber)(void (*)(void))GetProcAddress(hDll, "GetNumaHighestNodeNumber"); + pGetLargePageMinimum = (PGetLargePageMinimum)(void (*)(void))GetProcAddress(hDll, "GetLargePageMinimum"); + // Get physical memory (not available on XP, so check dynamically) + PGetPhysicallyInstalledSystemMemory pGetPhysicallyInstalledSystemMemory = (PGetPhysicallyInstalledSystemMemory)(void (*)(void))GetProcAddress(hDll,"GetPhysicallyInstalledSystemMemory"); + if (pGetPhysicallyInstalledSystemMemory != NULL) { + ULONGLONG memInKiB = 0; + if ((*pGetPhysicallyInstalledSystemMemory)(&memInKiB)) { + if (memInKiB > 0 && memInKiB <= SIZE_MAX) { + config->physical_memory_in_kib = (size_t)memInKiB; + } + } + } FreeLibrary(hDll); } + // Enable large/huge OS page support? if (mi_option_is_enabled(mi_option_allow_large_os_pages) || mi_option_is_enabled(mi_option_reserve_huge_os_pages)) { win_enable_large_os_pages(&config->large_page_size); } @@ -162,7 +197,7 @@ int _mi_prim_free(void* addr, size_t size ) { // In mi_os_mem_alloc_aligned the fallback path may have returned a pointer inside // the memory region returned by VirtualAlloc; in that case we need to free using // the start of the region. - MEMORY_BASIC_INFORMATION info = { 0 }; + MEMORY_BASIC_INFORMATION info; _mi_memzero_var(info); VirtualQuery(addr, &info, sizeof(info)); if (info.AllocationBase < addr && ((uint8_t*)addr - (uint8_t*)info.AllocationBase) < (ptrdiff_t)MI_SEGMENT_SIZE) { errcode = 0; @@ -192,7 +227,7 @@ static void* win_virtual_alloc_prim_once(void* addr, size_t size, size_t try_ali } #endif // on modern Windows try use VirtualAlloc2 for aligned allocation - if (try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0 && pVirtualAlloc2 != NULL) { + if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0 && pVirtualAlloc2 != NULL) { MI_MEM_ADDRESS_REQUIREMENTS reqs = { 0, 0, 0 }; reqs.Alignment = try_alignment; MI_MEM_EXTENDED_PARAMETER param = { {0, 0}, {0} }; @@ -231,7 +266,7 @@ static void* win_virtual_alloc_prim(void* addr, size_t size, size_t try_alignmen else if (max_retry_msecs > 0 && (try_alignment <= 2*MI_SEGMENT_ALIGN) && (flags&MEM_COMMIT) != 0 && (flags&MEM_LARGE_PAGES) == 0 && win_is_out_of_memory_error(GetLastError())) { - // if committing regular memory and being out-of-memory, + // if committing regular memory and being out-of-memory, // keep trying for a bit in case memory frees up after all. See issue #894 _mi_warning_message("out-of-memory on OS allocation, try again... (attempt %lu, 0x%zx bytes, error code: 0x%x, address: %p, alignment: 0x%zx, flags: 0x%x)\n", tries, size, GetLastError(), addr, try_alignment, flags); long sleep_msecs = tries*40; // increasing waits @@ -280,14 +315,14 @@ static void* win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DW return p; } -int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) { +int _mi_prim_alloc(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) { mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); mi_assert_internal(commit || !allow_large); mi_assert_internal(try_alignment > 0); *is_zero = true; int flags = MEM_RESERVE; if (commit) { flags |= MEM_COMMIT; } - *addr = win_virtual_alloc(NULL, size, try_alignment, flags, false, allow_large, is_large); + *addr = win_virtual_alloc(hint_addr, size, try_alignment, flags, false, allow_large, is_large); return (*addr != NULL ? 0 : (int)GetLastError()); } @@ -316,7 +351,7 @@ int _mi_prim_commit(void* addr, size_t size, bool* is_zero) { return 0; } -int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit) { +int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit) { BOOL ok = VirtualFree(addr, size, MEM_DECOMMIT); *needs_recommit = true; // for safety, assume always decommitted even in the case of an error. return (ok ? 0 : (int)GetLastError()); @@ -333,6 +368,11 @@ int _mi_prim_reset(void* addr, size_t size) { return (p != NULL ? 0 : (int)GetLastError()); } +int _mi_prim_reuse(void* addr, size_t size) { + MI_UNUSED(addr); MI_UNUSED(size); + return 0; +} + int _mi_prim_protect(void* addr, size_t size, bool protect) { DWORD oldprotect = 0; BOOL ok = VirtualProtect(addr, size, protect ? PAGE_NOACCESS : PAGE_READWRITE, &oldprotect); @@ -364,7 +404,7 @@ static void* _mi_prim_alloc_huge_os_pagesx(void* hint_addr, size_t size, int num } SIZE_T psize = size; void* base = hint_addr; - NTSTATUS err = (*pNtAllocateVirtualMemoryEx)(GetCurrentProcess(), &base, &psize, flags, PAGE_READWRITE, params, param_count); + LONG err = (*pNtAllocateVirtualMemoryEx)(GetCurrentProcess(), &base, &psize, flags, PAGE_READWRITE, params, param_count); if (err == 0 && base != NULL) { return base; } @@ -418,9 +458,11 @@ size_t _mi_prim_numa_node(void) { size_t _mi_prim_numa_node_count(void) { ULONG numa_max = 0; - GetNumaHighestNodeNumber(&numa_max); + if (pGetNumaHighestNodeNumber!=NULL) { + (*pGetNumaHighestNodeNumber)(&numa_max); + } // find the highest node number that has actual processors assigned to it. Issue #282 - while(numa_max > 0) { + while (numa_max > 0) { if (pGetNumaNodeProcessorMaskEx != NULL) { // Extended API is supported GROUP_AFFINITY affinity; @@ -431,8 +473,10 @@ size_t _mi_prim_numa_node_count(void) { else { // Vista or earlier, use older API that is limited to 64 processors. ULONGLONG mask; - if (GetNumaNodeProcessorMask((UCHAR)numa_max, &mask)) { - if (mask != 0) break; // found the maximum non-empty node + if (pGetNumaNodeProcessorMask != NULL) { + if ((*pGetNumaNodeProcessorMask)((UCHAR)numa_max, &mask)) { + if (mask != 0) break; // found the maximum non-empty node + } }; } // max node was invalid or had no processor assigned, try again @@ -468,7 +512,6 @@ mi_msecs_t _mi_prim_clock_now(void) { // Process Info //---------------------------------------------------------------- -#include #include static mi_msecs_t filetime_msecs(const FILETIME* ftime) { @@ -491,7 +534,7 @@ void _mi_prim_process_info(mi_process_info_t* pinfo) GetProcessTimes(GetCurrentProcess(), &ct, &et, &st, &ut); pinfo->utime = filetime_msecs(&ut); pinfo->stime = filetime_msecs(&st); - + // load psapi on demand if (pGetProcessMemoryInfo == NULL) { HINSTANCE hDll = LoadLibrary(TEXT("psapi.dll")); @@ -501,11 +544,10 @@ void _mi_prim_process_info(mi_process_info_t* pinfo) } // get process info - PROCESS_MEMORY_COUNTERS info; - memset(&info, 0, sizeof(info)); + PROCESS_MEMORY_COUNTERS info; _mi_memzero_var(info); if (pGetProcessMemoryInfo != NULL) { pGetProcessMemoryInfo(GetCurrentProcess(), &info, sizeof(info)); - } + } pinfo->current_rss = (size_t)info.WorkingSetSize; pinfo->peak_rss = (size_t)info.PeakWorkingSetSize; pinfo->current_commit = (size_t)info.PagefileUsage; @@ -517,24 +559,28 @@ void _mi_prim_process_info(mi_process_info_t* pinfo) // Output //---------------------------------------------------------------- -void _mi_prim_out_stderr( const char* msg ) +void _mi_prim_out_stderr( const char* msg ) { // on windows with redirection, the C runtime cannot handle locale dependent output // after the main thread closes so we use direct console output. if (!_mi_preloading()) { // _cputs(msg); // _cputs cannot be used as it aborts when failing to lock the console static HANDLE hcon = INVALID_HANDLE_VALUE; - static bool hconIsConsole; + static bool hconIsConsole = false; if (hcon == INVALID_HANDLE_VALUE) { - CONSOLE_SCREEN_BUFFER_INFO sbi; hcon = GetStdHandle(STD_ERROR_HANDLE); + #ifdef MI_HAS_CONSOLE_IO + CONSOLE_SCREEN_BUFFER_INFO sbi; hconIsConsole = ((hcon != INVALID_HANDLE_VALUE) && GetConsoleScreenBufferInfo(hcon, &sbi)); + #endif } const size_t len = _mi_strlen(msg); if (len > 0 && len < UINT32_MAX) { DWORD written = 0; if (hconIsConsole) { + #ifdef MI_HAS_CONSOLE_IO WriteConsoleA(hcon, msg, (DWORD)len, &written, NULL); + #endif } else if (hcon != INVALID_HANDLE_VALUE) { // use direct write if stderr was redirected @@ -564,7 +610,6 @@ bool _mi_prim_getenv(const char* name, char* result, size_t result_size) { } - //---------------------------------------------------------------- // Random //---------------------------------------------------------------- @@ -600,64 +645,234 @@ bool _mi_prim_random_buf(void* buf, size_t buf_len) { } if (pBCryptGenRandom == NULL) return false; } - return (pBCryptGenRandom(NULL, (PUCHAR)buf, (ULONG)buf_len, BCRYPT_USE_SYSTEM_PREFERRED_RNG) >= 0); + return (pBCryptGenRandom(NULL, (PUCHAR)buf, (ULONG)buf_len, BCRYPT_USE_SYSTEM_PREFERRED_RNG) >= 0); } #endif // MI_USE_RTLGENRANDOM + + //---------------------------------------------------------------- -// Thread init/done +// Process & Thread Init/Done //---------------------------------------------------------------- -#if !defined(MI_SHARED_LIB) - -// use thread local storage keys to detect thread ending -// note: another design could be to use special linker sections (see issue #869) -#include -#if (_WIN32_WINNT < 0x600) // before Windows Vista -WINBASEAPI DWORD WINAPI FlsAlloc( _In_opt_ PFLS_CALLBACK_FUNCTION lpCallback ); -WINBASEAPI PVOID WINAPI FlsGetValue( _In_ DWORD dwFlsIndex ); -WINBASEAPI BOOL WINAPI FlsSetValue( _In_ DWORD dwFlsIndex, _In_opt_ PVOID lpFlsData ); -WINBASEAPI BOOL WINAPI FlsFree(_In_ DWORD dwFlsIndex); +#if MI_WIN_USE_FIXED_TLS==1 +mi_decl_cache_align size_t _mi_win_tls_offset = 0; #endif -static DWORD mi_fls_key = (DWORD)(-1); - -static void NTAPI mi_fls_done(PVOID value) { - mi_heap_t* heap = (mi_heap_t*)value; - if (heap != NULL) { - _mi_thread_done(heap); - FlsSetValue(mi_fls_key, NULL); // prevent recursion as _mi_thread_done may set it back to the main heap, issue #672 +//static void mi_debug_out(const char* s) { +// HANDLE h = GetStdHandle(STD_ERROR_HANDLE); +// WriteConsole(h, s, (DWORD)_mi_strlen(s), NULL, NULL); +//} + +static void mi_win_tls_init(DWORD reason) { + if (reason==DLL_PROCESS_ATTACH || reason==DLL_THREAD_ATTACH) { + #if MI_WIN_USE_FIXED_TLS==1 // we must allocate a TLS slot dynamically + if (_mi_win_tls_offset == 0 && reason == DLL_PROCESS_ATTACH) { + const DWORD tls_slot = TlsAlloc(); // usually returns slot 1 + if (tls_slot == TLS_OUT_OF_INDEXES) { + _mi_error_message(EFAULT, "unable to allocate the a TLS slot (rebuild without MI_WIN_USE_FIXED_TLS?)\n"); + } + _mi_win_tls_offset = (size_t)tls_slot * sizeof(void*); + } + #endif + #if MI_HAS_TLS_SLOT >= 2 // we must initialize the TLS slot before any allocation + if (mi_prim_get_default_heap() == NULL) { + _mi_heap_set_default_direct((mi_heap_t*)&_mi_heap_empty); + #if MI_DEBUG && MI_WIN_USE_FIXED_TLS==1 + void* const p = TlsGetValue((DWORD)(_mi_win_tls_offset / sizeof(void*))); + mi_assert_internal(p == (void*)&_mi_heap_empty); + #endif + } + #endif } } -void _mi_prim_thread_init_auto_done(void) { - mi_fls_key = FlsAlloc(&mi_fls_done); +static void NTAPI mi_win_main(PVOID module, DWORD reason, LPVOID reserved) { + MI_UNUSED(reserved); + MI_UNUSED(module); + mi_win_tls_init(reason); + if (reason==DLL_PROCESS_ATTACH) { + _mi_auto_process_init(); + } + else if (reason==DLL_PROCESS_DETACH) { + _mi_auto_process_done(); + } + else if (reason==DLL_THREAD_DETACH && !_mi_is_redirected()) { + _mi_thread_done(NULL); + } } -void _mi_prim_thread_done_auto_done(void) { - // call thread-done on all threads (except the main thread) to prevent - // dangling callback pointer if statically linked with a DLL; Issue #208 - FlsFree(mi_fls_key); -} -void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { - mi_assert_internal(mi_fls_key != (DWORD)(-1)); - FlsSetValue(mi_fls_key, heap); -} +#if defined(MI_SHARED_LIB) + #define MI_PRIM_HAS_PROCESS_ATTACH 1 -#else + // Windows DLL: easy to hook into process_init and thread_done + BOOL WINAPI DllMain(HINSTANCE inst, DWORD reason, LPVOID reserved) { + mi_win_main((PVOID)inst,reason,reserved); + return TRUE; + } -// Dll; nothing to do as in that case thread_done is handled through the DLL_THREAD_DETACH event. + // nothing to do since `_mi_thread_done` is handled through the DLL_THREAD_DETACH event. + void _mi_prim_thread_init_auto_done(void) { } + void _mi_prim_thread_done_auto_done(void) { } + void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + MI_UNUSED(heap); + } -void _mi_prim_thread_init_auto_done(void) { -} +#elif !defined(MI_WIN_USE_FLS) + #define MI_PRIM_HAS_PROCESS_ATTACH 1 -void _mi_prim_thread_done_auto_done(void) { -} + static void NTAPI mi_win_main_attach(PVOID module, DWORD reason, LPVOID reserved) { + if (reason == DLL_PROCESS_ATTACH || reason == DLL_THREAD_ATTACH) { + mi_win_main(module, reason, reserved); + } + } + static void NTAPI mi_win_main_detach(PVOID module, DWORD reason, LPVOID reserved) { + if (reason == DLL_PROCESS_DETACH || reason == DLL_THREAD_DETACH) { + mi_win_main(module, reason, reserved); + } + } -void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { - MI_UNUSED(heap); -} + // Set up TLS callbacks in a statically linked library by using special data sections. + // See + // We use 2 entries to ensure we call attach events before constructors + // are called, and detach events after destructors are called. + #if defined(__cplusplus) + extern "C" { + #endif + + #if defined(_WIN64) + #pragma comment(linker, "/INCLUDE:_tls_used") + #pragma comment(linker, "/INCLUDE:_mi_tls_callback_pre") + #pragma comment(linker, "/INCLUDE:_mi_tls_callback_post") + #pragma const_seg(".CRT$XLB") + extern const PIMAGE_TLS_CALLBACK _mi_tls_callback_pre[]; + const PIMAGE_TLS_CALLBACK _mi_tls_callback_pre[] = { &mi_win_main_attach }; + #pragma const_seg() + #pragma const_seg(".CRT$XLY") + extern const PIMAGE_TLS_CALLBACK _mi_tls_callback_post[]; + const PIMAGE_TLS_CALLBACK _mi_tls_callback_post[] = { &mi_win_main_detach }; + #pragma const_seg() + #else + #pragma comment(linker, "/INCLUDE:__tls_used") + #pragma comment(linker, "/INCLUDE:__mi_tls_callback_pre") + #pragma comment(linker, "/INCLUDE:__mi_tls_callback_post") + #pragma data_seg(".CRT$XLB") + PIMAGE_TLS_CALLBACK _mi_tls_callback_pre[] = { &mi_win_main_attach }; + #pragma data_seg() + #pragma data_seg(".CRT$XLY") + PIMAGE_TLS_CALLBACK _mi_tls_callback_post[] = { &mi_win_main_detach }; + #pragma data_seg() + #endif + + #if defined(__cplusplus) + } + #endif + + // nothing to do since `_mi_thread_done` is handled through the DLL_THREAD_DETACH event. + void _mi_prim_thread_init_auto_done(void) { } + void _mi_prim_thread_done_auto_done(void) { } + void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + MI_UNUSED(heap); + } + +#else // deprecated: statically linked, use fiber api + + #if defined(_MSC_VER) // on clang/gcc use the constructor attribute (in `src/prim/prim.c`) + // MSVC: use data section magic for static libraries + // See + #define MI_PRIM_HAS_PROCESS_ATTACH 1 + static int mi_process_attach(void) { + mi_win_main(NULL,DLL_PROCESS_ATTACH,NULL); + atexit(&_mi_auto_process_done); + return 0; + } + typedef int(*mi_crt_callback_t)(void); + #if defined(_WIN64) + #pragma comment(linker, "/INCLUDE:_mi_tls_callback") + #pragma section(".CRT$XIU", long, read) + #else + #pragma comment(linker, "/INCLUDE:__mi_tls_callback") + #endif + #pragma data_seg(".CRT$XIU") + mi_decl_externc mi_crt_callback_t _mi_tls_callback[] = { &mi_process_attach }; + #pragma data_seg() + #endif + + // use the fiber api for calling `_mi_thread_done`. + #include + #if (_WIN32_WINNT < 0x600) // before Windows Vista + WINBASEAPI DWORD WINAPI FlsAlloc( _In_opt_ PFLS_CALLBACK_FUNCTION lpCallback ); + WINBASEAPI PVOID WINAPI FlsGetValue( _In_ DWORD dwFlsIndex ); + WINBASEAPI BOOL WINAPI FlsSetValue( _In_ DWORD dwFlsIndex, _In_opt_ PVOID lpFlsData ); + WINBASEAPI BOOL WINAPI FlsFree(_In_ DWORD dwFlsIndex); + #endif + + static DWORD mi_fls_key = (DWORD)(-1); + + static void NTAPI mi_fls_done(PVOID value) { + mi_heap_t* heap = (mi_heap_t*)value; + if (heap != NULL) { + _mi_thread_done(heap); + FlsSetValue(mi_fls_key, NULL); // prevent recursion as _mi_thread_done may set it back to the main heap, issue #672 + } + } + + void _mi_prim_thread_init_auto_done(void) { + mi_fls_key = FlsAlloc(&mi_fls_done); + } + + void _mi_prim_thread_done_auto_done(void) { + // call thread-done on all threads (except the main thread) to prevent + // dangling callback pointer if statically linked with a DLL; Issue #208 + FlsFree(mi_fls_key); + } + + void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + mi_assert_internal(mi_fls_key != (DWORD)(-1)); + FlsSetValue(mi_fls_key, heap); + } +#endif + +// ---------------------------------------------------- +// Communicate with the redirection module on Windows +// ---------------------------------------------------- +#if defined(MI_SHARED_LIB) && !defined(MI_WIN_NOREDIRECT) + #define MI_PRIM_HAS_ALLOCATOR_INIT 1 + + static bool mi_redirected = false; // true if malloc redirects to mi_malloc + + bool _mi_is_redirected(void) { + return mi_redirected; + } + + #ifdef __cplusplus + extern "C" { + #endif + mi_decl_export void _mi_redirect_entry(DWORD reason) { + // called on redirection; careful as this may be called before DllMain + mi_win_tls_init(reason); + if (reason == DLL_PROCESS_ATTACH) { + mi_redirected = true; + } + else if (reason == DLL_PROCESS_DETACH) { + mi_redirected = false; + } + else if (reason == DLL_THREAD_DETACH) { + _mi_thread_done(NULL); + } + } + __declspec(dllimport) bool mi_cdecl mi_allocator_init(const char** message); + __declspec(dllimport) void mi_cdecl mi_allocator_done(void); + #ifdef __cplusplus + } + #endif + bool _mi_allocator_init(const char** message) { + return mi_allocator_init(message); + } + void _mi_allocator_done(void) { + mi_allocator_done(); + } #endif diff --git a/system/lib/mimalloc/src/random.c b/system/lib/mimalloc/src/random.c index 4fc8b2f8fb0bc..f17698ba8a6d0 100644 --- a/system/lib/mimalloc/src/random.c +++ b/system/lib/mimalloc/src/random.c @@ -143,13 +143,17 @@ void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* ctx_new) { uintptr_t _mi_random_next(mi_random_ctx_t* ctx) { mi_assert_internal(mi_random_is_initialized(ctx)); - #if MI_INTPTR_SIZE <= 4 - return chacha_next32(ctx); - #elif MI_INTPTR_SIZE == 8 - return (((uintptr_t)chacha_next32(ctx) << 32) | chacha_next32(ctx)); - #else - # error "define mi_random_next for this platform" - #endif + uintptr_t r; + do { + #if MI_INTPTR_SIZE <= 4 + r = chacha_next32(ctx); + #elif MI_INTPTR_SIZE == 8 + r = (((uintptr_t)chacha_next32(ctx) << 32) | chacha_next32(ctx)); + #else + # error "define mi_random_next for this platform" + #endif + } while (r==0); + return r; } @@ -163,7 +167,7 @@ uintptr_t _mi_os_random_weak(uintptr_t extra_seed) { x ^= _mi_prim_clock_now(); // and do a few randomization steps uintptr_t max = ((x ^ (x >> 17)) & 0x0F) + 1; - for (uintptr_t i = 0; i < max; i++) { + for (uintptr_t i = 0; i < max || x==0; i++, x++) { x = _mi_random_shuffle(x); } mi_assert_internal(x != 0); @@ -179,7 +183,7 @@ static void mi_random_init_ex(mi_random_ctx_t* ctx, bool use_weak) { if (!use_weak) { _mi_warning_message("unable to use secure randomness\n"); } #endif uintptr_t x = _mi_os_random_weak(0); - for (size_t i = 0; i < 8; i++) { // key is eight 32-bit words. + for (size_t i = 0; i < 8; i++, x++) { // key is eight 32-bit words. x = _mi_random_shuffle(x); ((uint32_t*)key)[i] = (uint32_t)x; } diff --git a/system/lib/mimalloc/src/segment-map.c b/system/lib/mimalloc/src/segment-map.c index 1efb1e2360bf2..bbcea28aabc2e 100644 --- a/system/lib/mimalloc/src/segment-map.c +++ b/system/lib/mimalloc/src/segment-map.c @@ -16,140 +16,127 @@ terms of the MIT license. A copy of the license can be found in the file #include "mimalloc/internal.h" #include "mimalloc/atomic.h" -#if (MI_INTPTR_SIZE>=8) && MI_TRACK_ASAN -#define MI_MAX_ADDRESS ((size_t)140 << 40) // 140TB (see issue #881) -#elif (MI_INTPTR_SIZE >= 8) -#define MI_MAX_ADDRESS ((size_t)40 << 40) // 40TB (to include huge page areas) +// Reduce total address space to reduce .bss (due to the `mi_segment_map`) +#if (MI_INTPTR_SIZE > 4) && MI_TRACK_ASAN +#define MI_SEGMENT_MAP_MAX_ADDRESS (128*1024ULL*MI_GiB) // 128 TiB (see issue #881) +#elif (MI_INTPTR_SIZE > 4) +#define MI_SEGMENT_MAP_MAX_ADDRESS (48*1024ULL*MI_GiB) // 48 TiB #else -#define MI_MAX_ADDRESS ((size_t)2 << 30) // 2Gb +#define MI_SEGMENT_MAP_MAX_ADDRESS (UINT32_MAX) #endif -#define MI_SEGMENT_MAP_BITS (MI_MAX_ADDRESS / MI_SEGMENT_SIZE) -#define MI_SEGMENT_MAP_SIZE (MI_SEGMENT_MAP_BITS / 8) -#define MI_SEGMENT_MAP_WSIZE (MI_SEGMENT_MAP_SIZE / MI_INTPTR_SIZE) +#define MI_SEGMENT_MAP_PART_SIZE (MI_INTPTR_SIZE*MI_KiB - 128) // 128 > sizeof(mi_memid_t) ! +#define MI_SEGMENT_MAP_PART_BITS (8*MI_SEGMENT_MAP_PART_SIZE) +#define MI_SEGMENT_MAP_PART_ENTRIES (MI_SEGMENT_MAP_PART_SIZE / MI_INTPTR_SIZE) +#define MI_SEGMENT_MAP_PART_BIT_SPAN (MI_SEGMENT_ALIGN) // memory area covered by 1 bit -static _Atomic(uintptr_t) mi_segment_map[MI_SEGMENT_MAP_WSIZE + 1]; // 2KiB per TB with 64MiB segments +#if (MI_SEGMENT_MAP_PART_BITS < (MI_SEGMENT_MAP_MAX_ADDRESS / MI_SEGMENT_MAP_PART_BIT_SPAN)) // prevent overflow on 32-bit (issue #1017) +#define MI_SEGMENT_MAP_PART_SPAN (MI_SEGMENT_MAP_PART_BITS * MI_SEGMENT_MAP_PART_BIT_SPAN) +#else +#define MI_SEGMENT_MAP_PART_SPAN MI_SEGMENT_MAP_MAX_ADDRESS +#endif + +#define MI_SEGMENT_MAP_MAX_PARTS ((MI_SEGMENT_MAP_MAX_ADDRESS / MI_SEGMENT_MAP_PART_SPAN) + 1) + +// A part of the segment map. +typedef struct mi_segmap_part_s { + mi_memid_t memid; + _Atomic(uintptr_t) map[MI_SEGMENT_MAP_PART_ENTRIES]; +} mi_segmap_part_t; -static size_t mi_segment_map_index_of(const mi_segment_t* segment, size_t* bitidx) { +// Allocate parts on-demand to reduce .bss footprint +static _Atomic(mi_segmap_part_t*) mi_segment_map[MI_SEGMENT_MAP_MAX_PARTS]; // = { NULL, .. } + +static mi_segmap_part_t* mi_segment_map_index_of(const mi_segment_t* segment, bool create_on_demand, size_t* idx, size_t* bitidx) { // note: segment can be invalid or NULL. mi_assert_internal(_mi_ptr_segment(segment + 1) == segment); // is it aligned on MI_SEGMENT_SIZE? - if ((uintptr_t)segment >= MI_MAX_ADDRESS) { - *bitidx = 0; - return MI_SEGMENT_MAP_WSIZE; - } - else { - const uintptr_t segindex = ((uintptr_t)segment) / MI_SEGMENT_SIZE; - *bitidx = segindex % MI_INTPTR_BITS; - const size_t mapindex = segindex / MI_INTPTR_BITS; - mi_assert_internal(mapindex < MI_SEGMENT_MAP_WSIZE); - return mapindex; + *idx = 0; + *bitidx = 0; + if ((uintptr_t)segment >= MI_SEGMENT_MAP_MAX_ADDRESS) return NULL; + const uintptr_t segindex = ((uintptr_t)segment) / MI_SEGMENT_MAP_PART_SPAN; + if (segindex >= MI_SEGMENT_MAP_MAX_PARTS) return NULL; + mi_segmap_part_t* part = mi_atomic_load_ptr_relaxed(mi_segmap_part_t, &mi_segment_map[segindex]); + + // allocate on demand to reduce .bss footprint + if mi_unlikely(part == NULL) { + if (!create_on_demand) return NULL; + mi_memid_t memid; + part = (mi_segmap_part_t*)_mi_os_zalloc(sizeof(mi_segmap_part_t), &memid); + if (part == NULL) return NULL; + part->memid = memid; + mi_segmap_part_t* expected = NULL; + if (!mi_atomic_cas_ptr_strong_release(mi_segmap_part_t, &mi_segment_map[segindex], &expected, part)) { + _mi_os_free(part, sizeof(mi_segmap_part_t), memid); + part = expected; + if (part == NULL) return NULL; + } } + mi_assert(part != NULL); + const uintptr_t offset = ((uintptr_t)segment) % MI_SEGMENT_MAP_PART_SPAN; + const uintptr_t bitofs = offset / MI_SEGMENT_MAP_PART_BIT_SPAN; + *idx = bitofs / MI_INTPTR_BITS; + *bitidx = bitofs % MI_INTPTR_BITS; + return part; } void _mi_segment_map_allocated_at(const mi_segment_t* segment) { + if (segment->memid.memkind == MI_MEM_ARENA) return; // we lookup segments first in the arena's and don't need the segment map + size_t index; size_t bitidx; - size_t index = mi_segment_map_index_of(segment, &bitidx); - mi_assert_internal(index <= MI_SEGMENT_MAP_WSIZE); - if (index==MI_SEGMENT_MAP_WSIZE) return; - uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]); + mi_segmap_part_t* part = mi_segment_map_index_of(segment, true /* alloc map if needed */, &index, &bitidx); + if (part == NULL) return; // outside our address range.. + uintptr_t mask = mi_atomic_load_relaxed(&part->map[index]); uintptr_t newmask; do { newmask = (mask | ((uintptr_t)1 << bitidx)); - } while (!mi_atomic_cas_weak_release(&mi_segment_map[index], &mask, newmask)); + } while (!mi_atomic_cas_weak_release(&part->map[index], &mask, newmask)); } void _mi_segment_map_freed_at(const mi_segment_t* segment) { + if (segment->memid.memkind == MI_MEM_ARENA) return; + size_t index; size_t bitidx; - size_t index = mi_segment_map_index_of(segment, &bitidx); - mi_assert_internal(index <= MI_SEGMENT_MAP_WSIZE); - if (index == MI_SEGMENT_MAP_WSIZE) return; - uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]); + mi_segmap_part_t* part = mi_segment_map_index_of(segment, false /* don't alloc if not present */, &index, &bitidx); + if (part == NULL) return; // outside our address range.. + uintptr_t mask = mi_atomic_load_relaxed(&part->map[index]); uintptr_t newmask; do { newmask = (mask & ~((uintptr_t)1 << bitidx)); - } while (!mi_atomic_cas_weak_release(&mi_segment_map[index], &mask, newmask)); + } while (!mi_atomic_cas_weak_release(&part->map[index], &mask, newmask)); } // Determine the segment belonging to a pointer or NULL if it is not in a valid segment. static mi_segment_t* _mi_segment_of(const void* p) { if (p == NULL) return NULL; mi_segment_t* segment = _mi_ptr_segment(p); // segment can be NULL + size_t index; size_t bitidx; - size_t index = mi_segment_map_index_of(segment, &bitidx); - // fast path: for any pointer to valid small/medium/large object or first MI_SEGMENT_SIZE in huge - const uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]); + mi_segmap_part_t* part = mi_segment_map_index_of(segment, false /* dont alloc if not present */, &index, &bitidx); + if (part == NULL) return NULL; + const uintptr_t mask = mi_atomic_load_relaxed(&part->map[index]); if mi_likely((mask & ((uintptr_t)1 << bitidx)) != 0) { + bool cookie_ok = (_mi_ptr_cookie(segment) == segment->cookie); + mi_assert_internal(cookie_ok); MI_UNUSED(cookie_ok); return segment; // yes, allocated by us } - if (index==MI_SEGMENT_MAP_WSIZE) return NULL; - - // TODO: maintain max/min allocated range for efficiency for more efficient rejection of invalid pointers? - - // search downwards for the first segment in case it is an interior pointer - // could be slow but searches in MI_INTPTR_SIZE * MI_SEGMENT_SIZE (512MiB) steps trough - // valid huge objects - // note: we could maintain a lowest index to speed up the path for invalid pointers? - size_t lobitidx; - size_t loindex; - uintptr_t lobits = mask & (((uintptr_t)1 << bitidx) - 1); - if (lobits != 0) { - loindex = index; - lobitidx = mi_bsr(lobits); // lobits != 0 - } - else if (index == 0) { - return NULL; - } - else { - mi_assert_internal(index > 0); - uintptr_t lomask = mask; - loindex = index; - do { - loindex--; - lomask = mi_atomic_load_relaxed(&mi_segment_map[loindex]); - } while (lomask != 0 && loindex > 0); - if (lomask == 0) return NULL; - lobitidx = mi_bsr(lomask); // lomask != 0 - } - mi_assert_internal(loindex < MI_SEGMENT_MAP_WSIZE); - // take difference as the addresses could be larger than the MAX_ADDRESS space. - size_t diff = (((index - loindex) * (8*MI_INTPTR_SIZE)) + bitidx - lobitidx) * MI_SEGMENT_SIZE; - segment = (mi_segment_t*)((uint8_t*)segment - diff); - - if (segment == NULL) return NULL; - mi_assert_internal((void*)segment < p); - bool cookie_ok = (_mi_ptr_cookie(segment) == segment->cookie); - mi_assert_internal(cookie_ok); - if mi_unlikely(!cookie_ok) return NULL; - if (((uint8_t*)segment + mi_segment_size(segment)) <= (uint8_t*)p) return NULL; // outside the range - mi_assert_internal(p >= (void*)segment && (uint8_t*)p < (uint8_t*)segment + mi_segment_size(segment)); - return segment; + return NULL; } // Is this a valid pointer in our heap? -static bool mi_is_valid_pointer(const void* p) { - return ((_mi_segment_of(p) != NULL) || (_mi_arena_contains(p))); +static bool mi_is_valid_pointer(const void* p) { + // first check if it is in an arena, then check if it is OS allocated + return (_mi_arena_contains(p) || _mi_segment_of(p) != NULL); } mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept { return mi_is_valid_pointer(p); } -/* -// Return the full segment range belonging to a pointer -static void* mi_segment_range_of(const void* p, size_t* size) { - mi_segment_t* segment = _mi_segment_of(p); - if (segment == NULL) { - if (size != NULL) *size = 0; - return NULL; - } - else { - if (size != NULL) *size = segment->segment_size; - return segment; +void _mi_segment_map_unsafe_destroy(void) { + for (size_t i = 0; i < MI_SEGMENT_MAP_MAX_PARTS; i++) { + mi_segmap_part_t* part = mi_atomic_exchange_ptr_relaxed(mi_segmap_part_t, &mi_segment_map[i], NULL); + if (part != NULL) { + _mi_os_free(part, sizeof(mi_segmap_part_t), part->memid); + } } - mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld)); - mi_assert_internal(page == NULL || (mi_segment_page_size(_mi_page_segment(page)) - (MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= block_size); - mi_reset_delayed(tld); - mi_assert_internal(page == NULL || mi_page_not_in_queue(page, tld)); - return page; } -*/ diff --git a/system/lib/mimalloc/src/segment.c b/system/lib/mimalloc/src/segment.c index 4e4dcb80ee177..32841e6deef20 100644 --- a/system/lib/mimalloc/src/segment.c +++ b/system/lib/mimalloc/src/segment.c @@ -17,7 +17,7 @@ terms of the MIT license. A copy of the license can be found in the file // ------------------------------------------------------------------- -static void mi_segment_try_purge(mi_segment_t* segment, bool force, mi_stats_t* stats); +static void mi_segment_try_purge(mi_segment_t* segment, bool force); // ------------------------------------------------------------------- @@ -150,6 +150,23 @@ size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx) { /* -------------------------------------------------------------------------------- Segment allocation + We allocate pages inside bigger "segments" (32 MiB on 64-bit). This is to avoid + splitting VMA's on Linux and reduce fragmentation on other OS's. + Each thread owns its own segments. + + Currently we have: + - small pages (64KiB) + - medium pages (512KiB) + - large pages (4MiB), + - huge segments have 1 page in one segment that can be larger than `MI_SEGMENT_SIZE`. + it is used for blocks `> MI_LARGE_OBJ_SIZE_MAX` or with alignment `> MI_BLOCK_ALIGNMENT_MAX`. + + The memory for a segment is usually committed on demand. + (i.e. we are careful to not touch the memory until we actually allocate a block there) + + If a thread ends, it "abandons" pages that still contain live blocks. + Such segments are abandoned and these can be reclaimed by still running threads, + (much like work-stealing). -------------------------------------------------------------------------------- */ @@ -332,6 +349,9 @@ static uint8_t* _mi_segment_page_start_from_slice(const mi_segment_t* segment, c if (block_size <= 64) { start_offset += 3*block_size; } else if (block_size <= 512) { start_offset += block_size; } } + start_offset = _mi_align_up(start_offset, MI_MAX_ALIGN_SIZE); + mi_assert_internal(_mi_is_aligned(pstart + start_offset, MI_MAX_ALIGN_SIZE)); + mi_assert_internal(block_size == 0 || block_size > MI_MAX_ALIGN_GUARANTEE || _mi_is_aligned(pstart + start_offset,block_size)); if (page_size != NULL) { *page_size = psize - start_offset; } return (pstart + start_offset); } @@ -407,8 +427,7 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) { const size_t size = mi_segment_size(segment); const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size); - _mi_abandoned_await_readers(); // wait until safe to free - _mi_arena_free(segment, mi_segment_size(segment), csize, segment->memid, tld->stats); + _mi_arena_free(segment, mi_segment_size(segment), csize, segment->memid); } /* ----------------------------------------------------------- @@ -465,7 +484,7 @@ static void mi_segment_commit_mask(mi_segment_t* segment, bool conservative, uin mi_commit_mask_create(bitidx, bitcount, cm); } -static bool mi_segment_commit(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) { +static bool mi_segment_commit(mi_segment_t* segment, uint8_t* p, size_t size) { mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask)); // commit liberal @@ -481,7 +500,7 @@ static bool mi_segment_commit(mi_segment_t* segment, uint8_t* p, size_t size, mi mi_commit_mask_t cmask; mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); _mi_stat_decrease(&_mi_stats_main.committed, _mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for overlap - if (!_mi_os_commit(start, full_size, &is_zero, stats)) return false; + if (!_mi_os_commit(start, full_size, &is_zero)) return false; mi_commit_mask_set(&segment->commit_mask, &mask); } @@ -495,15 +514,15 @@ static bool mi_segment_commit(mi_segment_t* segment, uint8_t* p, size_t size, mi return true; } -static bool mi_segment_ensure_committed(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) { +static bool mi_segment_ensure_committed(mi_segment_t* segment, uint8_t* p, size_t size) { mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask)); // note: assumes commit_mask is always full for huge segments as otherwise the commit mask bits can overflow if (mi_commit_mask_is_full(&segment->commit_mask) && mi_commit_mask_is_empty(&segment->purge_mask)) return true; // fully committed mi_assert_internal(segment->kind != MI_SEGMENT_HUGE); - return mi_segment_commit(segment, p, size, stats); + return mi_segment_commit(segment, p, size); } -static bool mi_segment_purge(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) { +static bool mi_segment_purge(mi_segment_t* segment, uint8_t* p, size_t size) { mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask)); if (!segment->allow_purge) return true; @@ -518,7 +537,7 @@ static bool mi_segment_purge(mi_segment_t* segment, uint8_t* p, size_t size, mi_ // purging mi_assert_internal((void*)start != (void*)segment); mi_assert_internal(segment->allow_decommit); - const bool decommitted = _mi_os_purge(start, full_size, stats); // reset or decommit + const bool decommitted = _mi_os_purge(start, full_size); // reset or decommit if (decommitted) { mi_commit_mask_t cmask; mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); @@ -532,11 +551,11 @@ static bool mi_segment_purge(mi_segment_t* segment, uint8_t* p, size_t size, mi_ return true; } -static void mi_segment_schedule_purge(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) { +static void mi_segment_schedule_purge(mi_segment_t* segment, uint8_t* p, size_t size) { if (!segment->allow_purge) return; if (mi_option_get(mi_option_purge_delay) == 0) { - mi_segment_purge(segment, p, size, stats); + mi_segment_purge(segment, p, size); } else { // register for future purge in the purge mask @@ -559,7 +578,7 @@ static void mi_segment_schedule_purge(mi_segment_t* segment, uint8_t* p, size_t else if (segment->purge_expire <= now) { // previous purge mask already expired if (segment->purge_expire + mi_option_get(mi_option_purge_extend_delay) <= now) { - mi_segment_try_purge(segment, true, stats); + mi_segment_try_purge(segment, true); } else { segment->purge_expire = now + mi_option_get(mi_option_purge_extend_delay); // (mi_option_get(mi_option_purge_delay) / 8); // wait a tiny bit longer in case there is a series of free's @@ -572,7 +591,7 @@ static void mi_segment_schedule_purge(mi_segment_t* segment, uint8_t* p, size_t } } -static void mi_segment_try_purge(mi_segment_t* segment, bool force, mi_stats_t* stats) { +static void mi_segment_try_purge(mi_segment_t* segment, bool force) { if (!segment->allow_purge || segment->purge_expire == 0 || mi_commit_mask_is_empty(&segment->purge_mask)) return; mi_msecs_t now = _mi_clock_now(); if (!force && now < segment->purge_expire) return; @@ -588,7 +607,7 @@ static void mi_segment_try_purge(mi_segment_t* segment, bool force, mi_stats_t* if (count > 0) { uint8_t* p = (uint8_t*)segment + (idx*MI_COMMIT_SIZE); size_t size = count * MI_COMMIT_SIZE; - mi_segment_purge(segment, p, size, stats); + mi_segment_purge(segment, p, size); } } mi_commit_mask_foreach_end() @@ -597,8 +616,8 @@ static void mi_segment_try_purge(mi_segment_t* segment, bool force, mi_stats_t* // called from `mi_heap_collect_ex` // this can be called per-page so it is important that try_purge has fast exit path -void _mi_segment_collect(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) { - mi_segment_try_purge(segment, force, tld->stats); +void _mi_segment_collect(mi_segment_t* segment, bool force) { + mi_segment_try_purge(segment, force); } /* ----------------------------------------------------------- @@ -633,7 +652,7 @@ static void mi_segment_span_free(mi_segment_t* segment, size_t slice_index, size // perhaps decommit if (allow_purge) { - mi_segment_schedule_purge(segment, mi_slice_start(slice), slice_count * MI_SEGMENT_SLICE_SIZE, tld->stats); + mi_segment_schedule_purge(segment, mi_slice_start(slice), slice_count * MI_SEGMENT_SLICE_SIZE); } // and push it on the free page queue (if it was not a huge page) @@ -662,7 +681,6 @@ static void mi_segment_span_remove_from_queue(mi_slice_t* slice, mi_segments_tld static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_tld_t* tld) { mi_assert_internal(slice != NULL && slice->slice_count > 0 && slice->slice_offset == 0); mi_segment_t* const segment = _mi_ptr_segment(slice); - const bool is_abandoned = (segment->thread_id == 0); // mi_segment_is_abandoned(segment); // for huge pages, just mark as free but don't add to the queues if (segment->kind == MI_SEGMENT_HUGE) { @@ -675,6 +693,7 @@ static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_ } // otherwise coalesce the span and add to the free span queues + const bool is_abandoned = (segment->thread_id == 0); // mi_segment_is_abandoned(segment); size_t slice_count = slice->slice_count; mi_slice_t* next = slice + slice->slice_count; mi_assert_internal(next <= mi_segment_slices_end(segment)); @@ -691,6 +710,8 @@ static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_ // free previous slice -- remove it from free and merge mi_assert_internal(prev->slice_count > 0 && prev->slice_offset==0); slice_count += prev->slice_count; + slice->slice_count = 0; + slice->slice_offset = (uint32_t)((uint8_t*)slice - (uint8_t*)prev); // set the slice offset for `segment_force_abandon` (in case the previous free block is very large). if (!is_abandoned) { mi_segment_span_remove_from_queue(prev, tld); } slice = prev; } @@ -708,13 +729,13 @@ static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_ ----------------------------------------------------------- */ // Note: may still return NULL if committing the memory failed -static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_index, size_t slice_count, mi_segments_tld_t* tld) { +static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_index, size_t slice_count) { mi_assert_internal(slice_index < segment->slice_entries); mi_slice_t* const slice = &segment->slices[slice_index]; mi_assert_internal(slice->block_size==0 || slice->block_size==1); // commit before changing the slice data - if (!mi_segment_ensure_committed(segment, _mi_segment_page_start_from_slice(segment, slice, 0, NULL), slice_count * MI_SEGMENT_SLICE_SIZE, tld->stats)) { + if (!mi_segment_ensure_committed(segment, _mi_segment_page_start_from_slice(segment, slice, 0, NULL), slice_count * MI_SEGMENT_SLICE_SIZE)) { return NULL; // commit failed! } @@ -787,7 +808,7 @@ static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_aren mi_segment_slice_split(segment, slice, slice_count, tld); } mi_assert_internal(slice != NULL && slice->slice_count == slice_count && slice->block_size > 0); - mi_page_t* page = mi_segment_span_allocate(segment, mi_slice_index(slice), slice->slice_count, tld); + mi_page_t* page = mi_segment_span_allocate(segment, mi_slice_index(slice), slice->slice_count); if (page == NULL) { // commit failed; return NULL but first restore the slice mi_segment_span_free_coalesce(slice, tld); @@ -810,7 +831,7 @@ static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_aren static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment, bool eager_delayed, mi_arena_id_t req_arena_id, size_t* psegment_slices, size_t* pinfo_slices, - bool commit, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) + bool commit, mi_segments_tld_t* tld) { mi_memid_t memid; @@ -831,7 +852,7 @@ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment } const size_t segment_size = (*psegment_slices) * MI_SEGMENT_SLICE_SIZE; - mi_segment_t* segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, alignment, align_offset, commit, allow_large, req_arena_id, &memid, os_tld); + mi_segment_t* segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, alignment, align_offset, commit, allow_large, req_arena_id, &memid); if (segment == NULL) { return NULL; // failed to allocate } @@ -847,8 +868,8 @@ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment mi_assert_internal(commit_needed>0); mi_commit_mask_create(0, commit_needed, &commit_mask); mi_assert_internal(commit_needed*MI_COMMIT_SIZE >= (*pinfo_slices)*MI_SEGMENT_SLICE_SIZE); - if (!_mi_os_commit(segment, commit_needed*MI_COMMIT_SIZE, NULL, tld->stats)) { - _mi_arena_free(segment,segment_size,0,memid,tld->stats); + if (!_mi_os_commit(segment, commit_needed*MI_COMMIT_SIZE, NULL)) { + _mi_arena_free(segment,segment_size,0,memid); return NULL; } } @@ -858,6 +879,7 @@ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment segment->allow_decommit = !memid.is_pinned; segment->allow_purge = segment->allow_decommit && (mi_option_get(mi_option_purge_delay) >= 0); segment->segment_size = segment_size; + segment->subproc = tld->subproc; segment->commit_mask = commit_mask; segment->purge_expire = 0; mi_commit_mask_create_empty(&segment->purge_mask); @@ -869,7 +891,7 @@ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment // Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` . -static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page) +static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_page_t** huge_page) { mi_assert_internal((required==0 && huge_page==NULL) || (required>0 && huge_page != NULL)); @@ -881,13 +903,13 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi // Commit eagerly only if not the first N lazy segments (to reduce impact of many threads that allocate just a little) const bool eager_delay = (// !_mi_os_has_overcommit() && // never delay on overcommit systems _mi_current_thread_count() > 1 && // do not delay for the first N threads - tld->count < (size_t)mi_option_get(mi_option_eager_commit_delay)); + tld->peak_count < (size_t)mi_option_get(mi_option_eager_commit_delay)); const bool eager = !eager_delay && mi_option_is_enabled(mi_option_eager_commit); bool commit = eager || (required > 0); // Allocate the segment from the OS mi_segment_t* segment = mi_segment_os_alloc(required, page_alignment, eager_delay, req_arena_id, - &segment_slices, &info_slices, commit, tld, os_tld); + &segment_slices, &info_slices, commit, tld); if (segment == NULL) return NULL; // zero the segment info? -- not always needed as it may be zero initialized from the OS @@ -915,17 +937,17 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi if (MI_SECURE>0) { // in secure mode, we set up a protected page in between the segment info // and the page data, and at the end of the segment. - size_t os_pagesize = _mi_os_page_size(); + size_t os_pagesize = _mi_os_page_size(); _mi_os_protect((uint8_t*)segment + mi_segment_info_size(segment) - os_pagesize, os_pagesize); uint8_t* end = (uint8_t*)segment + mi_segment_size(segment) - os_pagesize; - mi_segment_ensure_committed(segment, end, os_pagesize, tld->stats); + mi_segment_ensure_committed(segment, end, os_pagesize); _mi_os_protect(end, os_pagesize); if (slice_entries == segment_slices) segment->slice_entries--; // don't use the last slice :-( guard_slices = 1; } // reserve first slices for segment info - mi_page_t* page0 = mi_segment_span_allocate(segment, 0, info_slices, tld); + mi_page_t* page0 = mi_segment_span_allocate(segment, 0, info_slices); mi_assert_internal(page0!=NULL); if (page0==NULL) return NULL; // cannot fail as we always commit in advance mi_assert_internal(segment->used == 1); segment->used = 0; // don't count our internal slices towards usage @@ -939,7 +961,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi mi_assert_internal(huge_page!=NULL); mi_assert_internal(mi_commit_mask_is_empty(&segment->purge_mask)); mi_assert_internal(mi_commit_mask_is_full(&segment->commit_mask)); - *huge_page = mi_segment_span_allocate(segment, info_slices, segment_slices - info_slices - guard_slices, tld); + *huge_page = mi_segment_span_allocate(segment, info_slices, segment_slices - info_slices - guard_slices); mi_assert_internal(*huge_page != NULL); // cannot fail as we commit in advance } @@ -954,6 +976,9 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t mi_assert_internal(segment->next == NULL); mi_assert_internal(segment->used == 0); + // in `mi_segment_force_abandon` we set this to true to ensure the segment's memory stays valid + if (segment->dont_free) return; + // Remove the free pages mi_slice_t* slice = &segment->slices[0]; const mi_slice_t* end = mi_segment_slices_end(segment); @@ -975,7 +1000,7 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t mi_assert_internal(page_count == 2); // first page is allocated by the segment itself // stats - _mi_stat_decrease(&tld->stats->page_committed, mi_segment_info_size(segment)); + // _mi_stat_decrease(&tld->stats->page_committed, mi_segment_info_size(segment)); // return it to the OS mi_segment_os_free(segment, tld); @@ -998,12 +1023,13 @@ static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld size_t inuse = page->capacity * mi_page_block_size(page); _mi_stat_decrease(&tld->stats->page_committed, inuse); _mi_stat_decrease(&tld->stats->pages, 1); + _mi_stat_decrease(&tld->stats->page_bins[_mi_page_bin(page)], 1); // reset the page memory to reduce memory pressure? if (segment->allow_decommit && mi_option_is_enabled(mi_option_deprecated_page_reset)) { size_t psize; uint8_t* start = _mi_segment_page_start(segment, page, &psize); - _mi_os_reset(start, psize, tld->stats); + _mi_os_reset(start, psize); } // zero the page data, but not the segment fields and heap tag @@ -1025,7 +1051,6 @@ static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld) { mi_assert(page != NULL); - mi_segment_t* segment = _mi_page_segment(page); mi_assert_expensive(mi_segment_is_valid(segment,tld)); @@ -1043,7 +1068,7 @@ void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld) } else { // perform delayed purges - mi_segment_try_purge(segment, false /* force? */, tld->stats); + mi_segment_try_purge(segment, false /* force? */); } } @@ -1061,16 +1086,11 @@ When a block is freed in an abandoned segment, the segment is reclaimed into that thread. Moreover, if threads are looking for a fresh segment, they -will first consider abondoned segments -- these can be found +will first consider abandoned segments -- these can be found by scanning the arena memory (segments outside arena memoryare only reclaimed by a free). ----------------------------------------------------------- */ -// legacy: Wait until there are no more pending reads on segments that used to be in the abandoned list -void _mi_abandoned_await_readers(void) { - // nothing needed -} - /* ----------------------------------------------------------- Abandon segment/page ----------------------------------------------------------- */ @@ -1098,7 +1118,7 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) { // Only abandoned segments in arena memory can be reclaimed without a free // so if a segment is not from an arena we force purge here to be conservative. const bool force_purge = (segment->memid.memkind != MI_MEM_ARENA) || mi_option_is_enabled(mi_option_abandoned_page_purge); - mi_segment_try_purge(segment, force_purge, tld->stats); + mi_segment_try_purge(segment, force_purge); // all pages in the segment are abandoned; add it to the abandoned list _mi_stat_increase(&tld->stats->segments_abandoned, 1); @@ -1190,6 +1210,7 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, if (right_page_reclaimed != NULL) { *right_page_reclaimed = false; } // can be 0 still with abandoned_next, or already a thread id for segments outside an arena that are reclaimed on a free. mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id) == 0 || mi_atomic_load_relaxed(&segment->thread_id) == _mi_thread_id()); + mi_assert_internal(segment->subproc == heap->tld->segments.subproc); // only reclaim within the same subprocess mi_atomic_store_release(&segment->thread_id, _mi_thread_id()); segment->abandoned_visits = 0; segment->was_reclaimed = true; @@ -1213,12 +1234,13 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, mi_assert_internal(page->next == NULL && page->prev==NULL); _mi_stat_decrease(&tld->stats->pages_abandoned, 1); segment->abandoned--; - // set the heap again and allow heap thread delayed free again. + // get the target heap for this thread which has a matching heap tag (so we reclaim into a matching heap) mi_heap_t* target_heap = _mi_heap_by_tag(heap, page->heap_tag); // allow custom heaps to separate objects if (target_heap == NULL) { target_heap = heap; - _mi_error_message(EINVAL, "page with tag %u cannot be reclaimed by a heap with the same tag (using %u instead)\n", page->heap_tag, heap->tag ); + _mi_error_message(EFAULT, "page with tag %u cannot be reclaimed by a heap with the same tag (using heap tag %u instead)\n", page->heap_tag, heap->tag ); } + // associate the heap with this page, and allow heap thread delayed free again. mi_page_set_heap(page, target_heap); _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set) _mi_page_free_collect(page, false); // ensure used count is up to date @@ -1254,12 +1276,21 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, } } + // attempt to reclaim a particular segment (called from multi threaded free `alloc.c:mi_free_block_mt`) bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment) { if (mi_atomic_load_relaxed(&segment->thread_id) != 0) return false; // it is not abandoned - // don't reclaim more from a free than half the current segments + if (segment->subproc != heap->tld->segments.subproc) return false; // only reclaim within the same subprocess + if (!_mi_heap_memid_is_suitable(heap,segment->memid)) return false; // don't reclaim between exclusive and non-exclusive arena's + const long target = _mi_option_get_fast(mi_option_target_segments_per_thread); + if (target > 0 && (size_t)target <= heap->tld->segments.count) return false; // don't reclaim if going above the target count + + // don't reclaim more from a `free` call than half the current segments // this is to prevent a pure free-ing thread to start owning too many segments - if (heap->tld->segments.reclaim_count * 2 > heap->tld->segments.count) return false; + // (but not for out-of-arena segments as that is the main way to be reclaimed for those) + if (segment->memid.memkind == MI_MEM_ARENA && heap->tld->segments.reclaim_count * 2 > heap->tld->segments.count) { + return false; + } if (_mi_arena_segment_clear_abandoned(segment)) { // atomically unabandon mi_segment_t* res = mi_segment_reclaim(segment, heap, 0, NULL, &heap->tld->segments); mi_assert_internal(res == segment); @@ -1270,17 +1301,26 @@ bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment) { void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) { mi_segment_t* segment; - mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap, ¤t); + mi_arena_field_cursor_t current; + _mi_arena_field_cursor_init(heap, tld->subproc, true /* visit all, blocking */, ¤t); while ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL) { mi_segment_reclaim(segment, heap, 0, NULL, tld); } + _mi_arena_field_cursor_done(¤t); } -static long mi_segment_get_reclaim_tries(void) { + +static bool segment_count_is_within_target(mi_segments_tld_t* tld, size_t* ptarget) { + const size_t target = (size_t)mi_option_get_clamp(mi_option_target_segments_per_thread, 0, 1024); + if (ptarget != NULL) { *ptarget = target; } + return (target == 0 || tld->count < target); +} + +static long mi_segment_get_reclaim_tries(mi_segments_tld_t* tld) { // limit the tries to 10% (default) of the abandoned segments with at least 8 and at most 1024 tries. const size_t perc = (size_t)mi_option_get_clamp(mi_option_max_segment_reclaim, 0, 100); if (perc <= 0) return 0; - const size_t total_count = _mi_arena_segment_abandoned_count(); + const size_t total_count = mi_atomic_load_relaxed(&tld->subproc->abandoned_count); if (total_count == 0) return 0; const size_t relative_count = (total_count > 10000 ? (total_count / 100) * perc : (total_count * perc) / 100); // avoid overflow long max_tries = (long)(relative_count <= 1 ? 1 : (relative_count > 1024 ? 1024 : relative_count)); @@ -1291,15 +1331,18 @@ static long mi_segment_get_reclaim_tries(void) { static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slices, size_t block_size, bool* reclaimed, mi_segments_tld_t* tld) { *reclaimed = false; - long max_tries = mi_segment_get_reclaim_tries(); + long max_tries = mi_segment_get_reclaim_tries(tld); if (max_tries <= 0) return NULL; - mi_segment_t* segment; - mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap, ¤t); - while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL)) + mi_segment_t* result = NULL; + mi_segment_t* segment = NULL; + mi_arena_field_cursor_t current; + _mi_arena_field_cursor_init(heap, tld->subproc, false /* non-blocking */, ¤t); + while (segment_count_is_within_target(tld,NULL) && (max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL)) { + mi_assert(segment->subproc == heap->tld->segments.subproc); // cursor only visits segments in our sub-process segment->abandoned_visits++; - // todo: should we respect numa affinity for abondoned reclaim? perhaps only for the first visit? + // todo: should we respect numa affinity for abandoned reclaim? perhaps only for the first visit? // todo: an arena exclusive heap will potentially visit many abandoned unsuitable segments and use many tries // Perhaps we can skip non-suitable ones in a better way? bool is_suitable = _mi_heap_memid_is_suitable(heap, segment->memid); @@ -1316,27 +1359,30 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slice // found a large enough free span, or a page of the right block_size with free space // we return the result of reclaim (which is usually `segment`) as it might free // the segment due to concurrent frees (in which case `NULL` is returned). - return mi_segment_reclaim(segment, heap, block_size, reclaimed, tld); + result = mi_segment_reclaim(segment, heap, block_size, reclaimed, tld); + break; } else if (segment->abandoned_visits > 3 && is_suitable) { - // always reclaim on 3rd visit to limit the abandoned queue length. + // always reclaim on 3rd visit to limit the abandoned segment count. mi_segment_reclaim(segment, heap, 0, NULL, tld); } else { // otherwise, push on the visited list so it gets not looked at too quickly again - mi_segment_try_purge(segment, false /* true force? */, tld->stats); // force purge if needed as we may not visit soon again + max_tries++; // don't count this as a try since it was not suitable + mi_segment_try_purge(segment, false /* true force? */); // force purge if needed as we may not visit soon again _mi_arena_segment_mark_abandoned(segment); } } - return NULL; + _mi_arena_field_cursor_done(¤t); + return result; } - +// collect abandoned segments void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld) { mi_segment_t* segment; - mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap, ¤t); - long max_tries = (force ? (long)_mi_arena_segment_abandoned_count() : 1024); // limit latency + mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap, tld->subproc, force /* blocking? */, ¤t); + long max_tries = (force ? (long)mi_atomic_load_relaxed(&tld->subproc->abandoned_count) : 1024); // limit latency while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL)) { mi_segment_check_free(segment,0,0,tld); // try to free up pages (due to concurrent frees) if (segment->used == 0) { @@ -1348,20 +1394,121 @@ void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld) else { // otherwise, purge if needed and push on the visited list // note: forced purge can be expensive if many threads are destroyed/created as in mstress. - mi_segment_try_purge(segment, force, tld->stats); + mi_segment_try_purge(segment, force); _mi_arena_segment_mark_abandoned(segment); } } + _mi_arena_field_cursor_done(¤t); +} + +/* ----------------------------------------------------------- + Force abandon a segment that is in use by our thread +----------------------------------------------------------- */ + +// force abandon a segment +static void mi_segment_force_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) +{ + mi_assert_internal(!mi_segment_is_abandoned(segment)); + mi_assert_internal(!segment->dont_free); + + // ensure the segment does not get free'd underneath us (so we can check if a page has been freed in `mi_page_force_abandon`) + segment->dont_free = true; + + // for all slices + const mi_slice_t* end; + mi_slice_t* slice = mi_slices_start_iterate(segment, &end); + while (slice < end) { + mi_assert_internal(slice->slice_count > 0); + mi_assert_internal(slice->slice_offset == 0); + if (mi_slice_is_used(slice)) { + // ensure used count is up to date and collect potential concurrent frees + mi_page_t* const page = mi_slice_to_page(slice); + _mi_page_free_collect(page, false); + { + // abandon the page if it is still in-use (this will free it if possible as well) + mi_assert_internal(segment->used > 0); + if (segment->used == segment->abandoned+1) { + // the last page.. abandon and return as the segment will be abandoned after this + // and we should no longer access it. + segment->dont_free = false; + _mi_page_force_abandon(page); + return; + } + else { + // abandon and continue + _mi_page_force_abandon(page); + // it might be freed, reset the slice (note: relies on coalesce setting the slice_offset) + slice = mi_slice_first(slice); + } + } + } + slice = slice + slice->slice_count; + } + segment->dont_free = false; + mi_assert(segment->used == segment->abandoned); + mi_assert(segment->used == 0); + if (segment->used == 0) { // paranoia + // all free now + mi_segment_free(segment, false, tld); + } + else { + // perform delayed purges + mi_segment_try_purge(segment, false /* force? */); + } +} + + +// try abandon segments. +// this should be called from `reclaim_or_alloc` so we know all segments are (about) fully in use. +static void mi_segments_try_abandon_to_target(mi_heap_t* heap, size_t target, mi_segments_tld_t* tld) { + if (target <= 1) return; + const size_t min_target = (target > 4 ? (target*3)/4 : target); // 75% + // todo: we should maintain a list of segments per thread; for now, only consider segments from the heap full pages + for (int i = 0; i < 64 && tld->count >= min_target; i++) { + mi_page_t* page = heap->pages[MI_BIN_FULL].first; + while (page != NULL && mi_page_block_size(page) > MI_LARGE_OBJ_SIZE_MAX) { + page = page->next; + } + if (page==NULL) { + break; + } + mi_segment_t* segment = _mi_page_segment(page); + mi_segment_force_abandon(segment, tld); + mi_assert_internal(page != heap->pages[MI_BIN_FULL].first); // as it is just abandoned + } +} + +// try abandon segments. +// this should be called from `reclaim_or_alloc` so we know all segments are (about) fully in use. +static void mi_segments_try_abandon(mi_heap_t* heap, mi_segments_tld_t* tld) { + // we call this when we are about to add a fresh segment so we should be under our target segment count. + size_t target = 0; + if (segment_count_is_within_target(tld, &target)) return; + mi_segments_try_abandon_to_target(heap, target, tld); +} + +void mi_collect_reduce(size_t target_size) mi_attr_noexcept { + mi_collect(true); + mi_heap_t* heap = mi_heap_get_default(); + mi_segments_tld_t* tld = &heap->tld->segments; + size_t target = target_size / MI_SEGMENT_SIZE; + if (target == 0) { + target = (size_t)mi_option_get_clamp(mi_option_target_segments_per_thread, 1, 1024); + } + mi_segments_try_abandon_to_target(heap, target, tld); } /* ----------------------------------------------------------- Reclaim or allocate ----------------------------------------------------------- */ -static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_slices, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) +static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_slices, size_t block_size, mi_segments_tld_t* tld) { mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX); + // try to abandon some segments to increase reuse between threads + mi_segments_try_abandon(heap,tld); + // 1. try to reclaim an abandoned segment bool reclaimed; mi_segment_t* segment = mi_segment_try_reclaim(heap, needed_slices, block_size, &reclaimed, tld); @@ -1375,7 +1522,7 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_ return segment; } // 2. otherwise allocate a fresh segment - return mi_segment_alloc(0, 0, heap->arena_id, tld, os_tld, NULL); + return mi_segment_alloc(0, 0, heap->arena_id, tld, NULL); } @@ -1383,7 +1530,7 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_ Page allocation ----------------------------------------------------------- */ -static mi_page_t* mi_segments_page_alloc(mi_heap_t* heap, mi_page_kind_t page_kind, size_t required, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) +static mi_page_t* mi_segments_page_alloc(mi_heap_t* heap, mi_page_kind_t page_kind, size_t required, size_t block_size, mi_segments_tld_t* tld) { mi_assert_internal(required <= MI_LARGE_OBJ_SIZE_MAX && page_kind <= MI_PAGE_LARGE); @@ -1394,18 +1541,18 @@ static mi_page_t* mi_segments_page_alloc(mi_heap_t* heap, mi_page_kind_t page_ki mi_page_t* page = mi_segments_page_find_and_allocate(slices_needed, heap->arena_id, tld); //(required <= MI_SMALL_SIZE_MAX ? 0 : slices_needed), tld); if (page==NULL) { // no free page, allocate a new segment and try again - if (mi_segment_reclaim_or_alloc(heap, slices_needed, block_size, tld, os_tld) == NULL) { + if (mi_segment_reclaim_or_alloc(heap, slices_needed, block_size, tld) == NULL) { // OOM or reclaimed a good page in the heap return NULL; } else { // otherwise try again - return mi_segments_page_alloc(heap, page_kind, required, block_size, tld, os_tld); + return mi_segments_page_alloc(heap, page_kind, required, block_size, tld); } } mi_assert_internal(page != NULL && page->slice_count*MI_SEGMENT_SLICE_SIZE == page_size); mi_assert_internal(_mi_ptr_segment(page)->thread_id == _mi_thread_id()); - mi_segment_try_purge(_mi_ptr_segment(page), false, tld->stats); + mi_segment_try_purge(_mi_ptr_segment(page), false); return page; } @@ -1415,10 +1562,10 @@ static mi_page_t* mi_segments_page_alloc(mi_heap_t* heap, mi_page_kind_t page_ki Huge page allocation ----------------------------------------------------------- */ -static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) +static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld) { mi_page_t* page = NULL; - mi_segment_t* segment = mi_segment_alloc(size,page_alignment,req_arena_id,tld,os_tld,&page); + mi_segment_t* segment = mi_segment_alloc(size,page_alignment,req_arena_id,tld,&page); if (segment == NULL || page==NULL) return NULL; mi_assert_internal(segment->used==1); mi_assert_internal(mi_page_block_size(page) >= size); @@ -1440,7 +1587,7 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment, mi_assert_internal(psize - (aligned_p - start) >= size); uint8_t* decommit_start = start + sizeof(mi_block_t); // for the free list ptrdiff_t decommit_size = aligned_p - decommit_start; - _mi_os_reset(decommit_start, decommit_size, &_mi_stats_main); // note: cannot use segment_decommit on huge segments + _mi_os_reset(decommit_start, decommit_size); // note: cannot use segment_decommit on huge segments } return page; @@ -1487,7 +1634,7 @@ void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_bloc if (csize > sizeof(mi_block_t)) { csize = csize - sizeof(mi_block_t); uint8_t* p = (uint8_t*)block + sizeof(mi_block_t); - _mi_os_reset(p, csize, &_mi_stats_main); // note: cannot use segment_decommit on huge segments + _mi_os_reset(p, csize); // note: cannot use segment_decommit on huge segments } } } @@ -1496,29 +1643,60 @@ void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_bloc /* ----------------------------------------------------------- Page allocation and free ----------------------------------------------------------- */ -mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) { +mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld) { mi_page_t* page; if mi_unlikely(page_alignment > MI_BLOCK_ALIGNMENT_MAX) { mi_assert_internal(_mi_is_power_of_two(page_alignment)); mi_assert_internal(page_alignment >= MI_SEGMENT_SIZE); if (page_alignment < MI_SEGMENT_SIZE) { page_alignment = MI_SEGMENT_SIZE; } - page = mi_segment_huge_page_alloc(block_size,page_alignment,heap->arena_id,tld,os_tld); + page = mi_segment_huge_page_alloc(block_size,page_alignment,heap->arena_id,tld); } else if (block_size <= MI_SMALL_OBJ_SIZE_MAX) { - page = mi_segments_page_alloc(heap,MI_PAGE_SMALL,block_size,block_size,tld,os_tld); + page = mi_segments_page_alloc(heap,MI_PAGE_SMALL,block_size,block_size,tld); } else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) { - page = mi_segments_page_alloc(heap,MI_PAGE_MEDIUM,MI_MEDIUM_PAGE_SIZE,block_size,tld, os_tld); + page = mi_segments_page_alloc(heap,MI_PAGE_MEDIUM,MI_MEDIUM_PAGE_SIZE,block_size,tld); } else if (block_size <= MI_LARGE_OBJ_SIZE_MAX) { - page = mi_segments_page_alloc(heap,MI_PAGE_LARGE,block_size,block_size,tld, os_tld); + page = mi_segments_page_alloc(heap,MI_PAGE_LARGE,block_size,block_size,tld); } else { - page = mi_segment_huge_page_alloc(block_size,page_alignment,heap->arena_id,tld,os_tld); + page = mi_segment_huge_page_alloc(block_size,page_alignment,heap->arena_id,tld); } mi_assert_internal(page == NULL || _mi_heap_memid_is_suitable(heap, _mi_page_segment(page)->memid)); mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld)); + mi_assert_internal(page == NULL || _mi_page_segment(page)->subproc == tld->subproc); return page; } +/* ----------------------------------------------------------- + Visit blocks in a segment (only used for abandoned segments) +----------------------------------------------------------- */ + +static bool mi_segment_visit_page(mi_page_t* page, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) { + mi_heap_area_t area; + _mi_heap_area_init(&area, page); + if (!visitor(NULL, &area, NULL, area.block_size, arg)) return false; + if (visit_blocks) { + return _mi_heap_area_visit_blocks(&area, page, visitor, arg); + } + else { + return true; + } +} + +bool _mi_segment_visit_blocks(mi_segment_t* segment, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) { + const mi_slice_t* end; + mi_slice_t* slice = mi_slices_start_iterate(segment, &end); + while (slice < end) { + if (mi_slice_is_used(slice)) { + mi_page_t* const page = mi_slice_to_page(slice); + if (heap_tag < 0 || (int)page->heap_tag == heap_tag) { + if (!mi_segment_visit_page(page, visit_blocks, visitor, arg)) return false; + } + } + slice = slice + slice->slice_count; + } + return true; +} diff --git a/system/lib/mimalloc/src/static.c b/system/lib/mimalloc/src/static.c index bf025eb794675..9e06ce05aaa5b 100644 --- a/system/lib/mimalloc/src/static.c +++ b/system/lib/mimalloc/src/static.c @@ -31,7 +31,7 @@ terms of the MIT license. A copy of the license can be found in the file #include "options.c" #include "os.c" #include "page.c" // includes page-queue.c -#include "random.c" +#include "random.c" #include "segment.c" #include "segment-map.c" #include "stats.c" diff --git a/system/lib/mimalloc/src/stats.c b/system/lib/mimalloc/src/stats.c index a936402744d07..34b3d4e4ce44c 100644 --- a/system/lib/mimalloc/src/stats.c +++ b/system/lib/mimalloc/src/stats.c @@ -26,38 +26,29 @@ static bool mi_is_in_main(void* stat) { static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) { if (amount == 0) return; - if (mi_is_in_main(stat)) + if mi_unlikely(mi_is_in_main(stat)) { // add atomically (for abandoned pages) int64_t current = mi_atomic_addi64_relaxed(&stat->current, amount); + // if (stat == &_mi_stats_main.committed) { mi_assert_internal(current + amount >= 0); }; mi_atomic_maxi64_relaxed(&stat->peak, current + amount); if (amount > 0) { - mi_atomic_addi64_relaxed(&stat->allocated,amount); - } - else { - mi_atomic_addi64_relaxed(&stat->freed, -amount); + mi_atomic_addi64_relaxed(&stat->total,amount); } } else { // add thread local stat->current += amount; - if (stat->current > stat->peak) stat->peak = stat->current; - if (amount > 0) { - stat->allocated += amount; - } - else { - stat->freed += -amount; - } + if (stat->current > stat->peak) { stat->peak = stat->current; } + if (amount > 0) { stat->total += amount; } } } void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount) { if (mi_is_in_main(stat)) { - mi_atomic_addi64_relaxed( &stat->count, 1 ); mi_atomic_addi64_relaxed( &stat->total, (int64_t)amount ); } else { - stat->count++; stat->total += amount; } } @@ -70,64 +61,66 @@ void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount) { mi_stat_update(stat, -((int64_t)amount)); } + +static void mi_stat_adjust(mi_stat_count_t* stat, int64_t amount) { + if (amount == 0) return; + if mi_unlikely(mi_is_in_main(stat)) + { + // adjust atomically + mi_atomic_addi64_relaxed(&stat->current, amount); + mi_atomic_addi64_relaxed(&stat->total,amount); + } + else { + // adjust local + stat->current += amount; + stat->total += amount; + } +} + +void _mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount) { + mi_stat_adjust(stat, -((int64_t)amount)); +} + + // must be thread safe as it is called from stats_merge -static void mi_stat_add(mi_stat_count_t* stat, const mi_stat_count_t* src, int64_t unit) { +static void mi_stat_count_add_mt(mi_stat_count_t* stat, const mi_stat_count_t* src) { if (stat==src) return; - if (src->allocated==0 && src->freed==0) return; - mi_atomic_addi64_relaxed( &stat->allocated, src->allocated * unit); - mi_atomic_addi64_relaxed( &stat->current, src->current * unit); - mi_atomic_addi64_relaxed( &stat->freed, src->freed * unit); - // peak scores do not work across threads.. - mi_atomic_addi64_relaxed( &stat->peak, src->peak * unit); + mi_atomic_void_addi64_relaxed(&stat->total, &src->total); + mi_atomic_void_addi64_relaxed(&stat->current, &src->current); + // peak scores do really not work across threads .. we just add them + mi_atomic_void_addi64_relaxed( &stat->peak, &src->peak); + // or, take the max? + // mi_atomic_maxi64_relaxed(&stat->peak, src->peak); } -static void mi_stat_counter_add(mi_stat_counter_t* stat, const mi_stat_counter_t* src, int64_t unit) { +static void mi_stat_counter_add_mt(mi_stat_counter_t* stat, const mi_stat_counter_t* src) { if (stat==src) return; - mi_atomic_addi64_relaxed( &stat->total, src->total * unit); - mi_atomic_addi64_relaxed( &stat->count, src->count * unit); + mi_atomic_void_addi64_relaxed(&stat->total, &src->total); } +#define MI_STAT_COUNT(stat) mi_stat_count_add_mt(&stats->stat, &src->stat); +#define MI_STAT_COUNTER(stat) mi_stat_counter_add_mt(&stats->stat, &src->stat); + // must be thread safe as it is called from stats_merge static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src) { if (stats==src) return; - mi_stat_add(&stats->segments, &src->segments,1); - mi_stat_add(&stats->pages, &src->pages,1); - mi_stat_add(&stats->reserved, &src->reserved, 1); - mi_stat_add(&stats->committed, &src->committed, 1); - mi_stat_add(&stats->reset, &src->reset, 1); - mi_stat_add(&stats->purged, &src->purged, 1); - mi_stat_add(&stats->page_committed, &src->page_committed, 1); - - mi_stat_add(&stats->pages_abandoned, &src->pages_abandoned, 1); - mi_stat_add(&stats->segments_abandoned, &src->segments_abandoned, 1); - mi_stat_add(&stats->threads, &src->threads, 1); - - mi_stat_add(&stats->malloc, &src->malloc, 1); - mi_stat_add(&stats->segments_cache, &src->segments_cache, 1); - mi_stat_add(&stats->normal, &src->normal, 1); - mi_stat_add(&stats->huge, &src->huge, 1); - mi_stat_add(&stats->large, &src->large, 1); - - mi_stat_counter_add(&stats->pages_extended, &src->pages_extended, 1); - mi_stat_counter_add(&stats->mmap_calls, &src->mmap_calls, 1); - mi_stat_counter_add(&stats->commit_calls, &src->commit_calls, 1); - mi_stat_counter_add(&stats->reset_calls, &src->reset_calls, 1); - mi_stat_counter_add(&stats->purge_calls, &src->purge_calls, 1); - - mi_stat_counter_add(&stats->page_no_retire, &src->page_no_retire, 1); - mi_stat_counter_add(&stats->searches, &src->searches, 1); - mi_stat_counter_add(&stats->normal_count, &src->normal_count, 1); - mi_stat_counter_add(&stats->huge_count, &src->huge_count, 1); - mi_stat_counter_add(&stats->large_count, &src->large_count, 1); -#if MI_STAT>1 + + // copy all fields + MI_STAT_FIELDS() + + #if MI_STAT>1 for (size_t i = 0; i <= MI_BIN_HUGE; i++) { - if (src->normal_bins[i].allocated > 0 || src->normal_bins[i].freed > 0) { - mi_stat_add(&stats->normal_bins[i], &src->normal_bins[i], 1); - } + mi_stat_count_add_mt(&stats->malloc_bins[i], &src->malloc_bins[i]); + } + #endif + for (size_t i = 0; i <= MI_BIN_HUGE; i++) { + mi_stat_count_add_mt(&stats->page_bins[i], &src->page_bins[i]); } -#endif } +#undef MI_STAT_COUNT +#undef MI_STAT_COUNTER + /* ----------------------------------------------------------- Display statistics ----------------------------------------------------------- */ @@ -178,26 +171,26 @@ static void mi_stat_print_ex(const mi_stat_count_t* stat, const char* msg, int64 if (unit != 0) { if (unit > 0) { mi_print_amount(stat->peak, unit, out, arg); - mi_print_amount(stat->allocated, unit, out, arg); - mi_print_amount(stat->freed, unit, out, arg); + mi_print_amount(stat->total, unit, out, arg); + // mi_print_amount(stat->freed, unit, out, arg); mi_print_amount(stat->current, unit, out, arg); mi_print_amount(unit, 1, out, arg); - mi_print_count(stat->allocated, unit, out, arg); + mi_print_count(stat->total, unit, out, arg); } else { mi_print_amount(stat->peak, -1, out, arg); - mi_print_amount(stat->allocated, -1, out, arg); - mi_print_amount(stat->freed, -1, out, arg); + mi_print_amount(stat->total, -1, out, arg); + // mi_print_amount(stat->freed, -1, out, arg); mi_print_amount(stat->current, -1, out, arg); if (unit == -1) { _mi_fprintf(out, arg, "%24s", ""); } else { mi_print_amount(-unit, 1, out, arg); - mi_print_count((stat->allocated / -unit), 0, out, arg); + mi_print_count((stat->total / -unit), 0, out, arg); } } - if (stat->allocated > stat->freed) { + if (stat->current != 0) { _mi_fprintf(out, arg, " "); _mi_fprintf(out, arg, (notok == NULL ? "not all freed" : notok)); _mi_fprintf(out, arg, "\n"); @@ -208,7 +201,7 @@ static void mi_stat_print_ex(const mi_stat_count_t* stat, const char* msg, int64 } else { mi_print_amount(stat->peak, 1, out, arg); - mi_print_amount(stat->allocated, 1, out, arg); + mi_print_amount(stat->total, 1, out, arg); _mi_fprintf(out, arg, "%11s", " "); // no freed mi_print_amount(stat->current, 1, out, arg); _mi_fprintf(out, arg, "\n"); @@ -225,6 +218,15 @@ static void mi_stat_peak_print(const mi_stat_count_t* stat, const char* msg, int _mi_fprintf(out, arg, "\n"); } +#if MI_STAT>1 +static void mi_stat_total_print(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg) { + _mi_fprintf(out, arg, "%10s:", msg); + _mi_fprintf(out, arg, "%12s", " "); // no peak + mi_print_amount(stat->total, unit, out, arg); + _mi_fprintf(out, arg, "\n"); +} +#endif + static void mi_stat_counter_print(const mi_stat_counter_t* stat, const char* msg, mi_output_fun* out, void* arg ) { _mi_fprintf(out, arg, "%10s:", msg); mi_print_amount(stat->total, -1, out, arg); @@ -233,7 +235,7 @@ static void mi_stat_counter_print(const mi_stat_counter_t* stat, const char* msg static void mi_stat_counter_print_avg(const mi_stat_counter_t* stat, const char* msg, mi_output_fun* out, void* arg) { - const int64_t avg_tens = (stat->count == 0 ? 0 : (stat->total*10 / stat->count)); + const int64_t avg_tens = (stat->total == 0 ? 0 : (stat->total*10 / stat->total)); const long avg_whole = (long)(avg_tens/10); const long avg_frac1 = (long)(avg_tens%10); _mi_fprintf(out, arg, "%10s: %5ld.%ld avg\n", msg, avg_whole, avg_frac1); @@ -241,7 +243,7 @@ static void mi_stat_counter_print_avg(const mi_stat_counter_t* stat, const char* static void mi_print_header(mi_output_fun* out, void* arg ) { - _mi_fprintf(out, arg, "%10s: %11s %11s %11s %11s %11s %11s\n", "heap stats", "peak ", "total ", "freed ", "current ", "unit ", "count "); + _mi_fprintf(out, arg, "%10s: %11s %11s %11s %11s %11s\n", "heap stats", "peak ", "total ", "current ", "block ", "total# "); } #if MI_STAT>1 @@ -249,7 +251,7 @@ static void mi_stats_print_bins(const mi_stat_count_t* bins, size_t max, const c bool found = false; char buf[64]; for (size_t i = 0; i <= max; i++) { - if (bins[i].allocated > 0) { + if (bins[i].total > 0) { found = true; int64_t unit = _mi_bin_size((uint8_t)i); _mi_snprintf(buf, 64, "%s %3lu", fmt, (long)i); @@ -310,44 +312,45 @@ static void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0) // and print using that mi_print_header(out,arg); #if MI_STAT>1 - mi_stats_print_bins(stats->normal_bins, MI_BIN_HUGE, "normal",out,arg); + mi_stats_print_bins(stats->malloc_bins, MI_BIN_HUGE, "bin",out,arg); #endif #if MI_STAT - mi_stat_print(&stats->normal, "normal", (stats->normal_count.count == 0 ? 1 : -(stats->normal.allocated / stats->normal_count.count)), out, arg); - mi_stat_print(&stats->large, "large", (stats->large_count.count == 0 ? 1 : -(stats->large.allocated / stats->large_count.count)), out, arg); - mi_stat_print(&stats->huge, "huge", (stats->huge_count.count == 0 ? 1 : -(stats->huge.allocated / stats->huge_count.count)), out, arg); - mi_stat_count_t total = { 0,0,0,0 }; - mi_stat_add(&total, &stats->normal, 1); - mi_stat_add(&total, &stats->large, 1); - mi_stat_add(&total, &stats->huge, 1); - mi_stat_print(&total, "total", 1, out, arg); + mi_stat_print(&stats->malloc_normal, "binned", (stats->malloc_normal_count.total == 0 ? 1 : -1), out, arg); + // mi_stat_print(&stats->malloc_large, "large", (stats->malloc_large_count.total == 0 ? 1 : -1), out, arg); + mi_stat_print(&stats->malloc_huge, "huge", (stats->malloc_huge_count.total == 0 ? 1 : -1), out, arg); + mi_stat_count_t total = { 0,0,0 }; + mi_stat_count_add_mt(&total, &stats->malloc_normal); + // mi_stat_count_add(&total, &stats->malloc_large); + mi_stat_count_add_mt(&total, &stats->malloc_huge); + mi_stat_print_ex(&total, "total", 1, out, arg, ""); #endif #if MI_STAT>1 - mi_stat_print(&stats->malloc, "malloc req", 1, out, arg); + mi_stat_total_print(&stats->malloc_requested, "malloc req", 1, out, arg); _mi_fprintf(out, arg, "\n"); #endif mi_stat_print_ex(&stats->reserved, "reserved", 1, out, arg, ""); mi_stat_print_ex(&stats->committed, "committed", 1, out, arg, ""); mi_stat_peak_print(&stats->reset, "reset", 1, out, arg ); mi_stat_peak_print(&stats->purged, "purged", 1, out, arg ); - mi_stat_print(&stats->page_committed, "touched", 1, out, arg); + mi_stat_print_ex(&stats->page_committed, "touched", 1, out, arg, ""); mi_stat_print(&stats->segments, "segments", -1, out, arg); mi_stat_print(&stats->segments_abandoned, "-abandoned", -1, out, arg); mi_stat_print(&stats->segments_cache, "-cached", -1, out, arg); mi_stat_print(&stats->pages, "pages", -1, out, arg); mi_stat_print(&stats->pages_abandoned, "-abandoned", -1, out, arg); mi_stat_counter_print(&stats->pages_extended, "-extended", out, arg); - mi_stat_counter_print(&stats->page_no_retire, "-noretire", out, arg); + mi_stat_counter_print(&stats->pages_retire, "-retire", out, arg); mi_stat_counter_print(&stats->arena_count, "arenas", out, arg); - mi_stat_counter_print(&stats->arena_crossover_count, "-crossover", out, arg); + // mi_stat_counter_print(&stats->arena_crossover_count, "-crossover", out, arg); mi_stat_counter_print(&stats->arena_rollback_count, "-rollback", out, arg); mi_stat_counter_print(&stats->mmap_calls, "mmaps", out, arg); mi_stat_counter_print(&stats->commit_calls, "commits", out, arg); mi_stat_counter_print(&stats->reset_calls, "resets", out, arg); mi_stat_counter_print(&stats->purge_calls, "purges", out, arg); + mi_stat_counter_print(&stats->malloc_guarded_count, "guarded", out, arg); mi_stat_print(&stats->threads, "threads", -1, out, arg); - mi_stat_counter_print_avg(&stats->searches, "searches", out, arg); - _mi_fprintf(out, arg, "%10s: %5zu\n", "numa nodes", _mi_os_numa_node_count()); + mi_stat_counter_print_avg(&stats->page_searches, "searches", out, arg); + _mi_fprintf(out, arg, "%10s: %5i\n", "numa nodes", _mi_os_numa_node_count()); size_t elapsed; size_t user_time; @@ -358,9 +361,9 @@ static void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0) size_t peak_commit; size_t page_faults; mi_process_info(&elapsed, &user_time, &sys_time, ¤t_rss, &peak_rss, ¤t_commit, &peak_commit, &page_faults); - _mi_fprintf(out, arg, "%10s: %5ld.%03ld s\n", "elapsed", elapsed/1000, elapsed%1000); - _mi_fprintf(out, arg, "%10s: user: %ld.%03ld s, system: %ld.%03ld s, faults: %lu, rss: ", "process", - user_time/1000, user_time%1000, sys_time/1000, sys_time%1000, (unsigned long)page_faults ); + _mi_fprintf(out, arg, "%10s: %5zu.%03zu s\n", "elapsed", elapsed/1000, elapsed%1000); + _mi_fprintf(out, arg, "%10s: user: %zu.%03zu s, system: %zu.%03zu s, faults: %zu, rss: ", "process", + user_time/1000, user_time%1000, sys_time/1000, sys_time%1000, page_faults ); mi_printf_amount((int64_t)peak_rss, 1, out, arg, "%s"); if (peak_commit > 0) { _mi_fprintf(out, arg, ", commit: "); @@ -394,6 +397,10 @@ void mi_stats_merge(void) mi_attr_noexcept { mi_stats_merge_from( mi_stats_get_default() ); } +void _mi_stats_merge_thread(mi_tld_t* tld) { + mi_stats_merge_from( &tld->stats ); +} + void _mi_stats_done(mi_stats_t* stats) { // called from `mi_thread_done` mi_stats_merge_from(stats); } @@ -465,3 +472,164 @@ mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, s if (peak_commit!=NULL) *peak_commit = pinfo.peak_commit; if (page_faults!=NULL) *page_faults = pinfo.page_faults; } + + +// -------------------------------------------------------- +// Return statistics +// -------------------------------------------------------- + +void mi_stats_get(size_t stats_size, mi_stats_t* stats) mi_attr_noexcept { + if (stats == NULL || stats_size == 0) return; + _mi_memzero(stats, stats_size); + const size_t size = (stats_size > sizeof(mi_stats_t) ? sizeof(mi_stats_t) : stats_size); + _mi_memcpy(stats, &_mi_stats_main, size); + stats->version = MI_STAT_VERSION; +} + + +// -------------------------------------------------------- +// Statics in json format +// -------------------------------------------------------- + +typedef struct mi_heap_buf_s { + char* buf; + size_t size; + size_t used; + bool can_realloc; +} mi_heap_buf_t; + +static bool mi_heap_buf_expand(mi_heap_buf_t* hbuf) { + if (hbuf==NULL) return false; + if (hbuf->buf != NULL && hbuf->size>0) { + hbuf->buf[hbuf->size-1] = 0; + } + if (hbuf->size > SIZE_MAX/2 || !hbuf->can_realloc) return false; + const size_t newsize = (hbuf->size == 0 ? mi_good_size(12*MI_KiB) : 2*hbuf->size); + char* const newbuf = (char*)mi_rezalloc(hbuf->buf, newsize); + if (newbuf == NULL) return false; + hbuf->buf = newbuf; + hbuf->size = newsize; + return true; +} + +static void mi_heap_buf_print(mi_heap_buf_t* hbuf, const char* msg) { + if (msg==NULL || hbuf==NULL) return; + if (hbuf->used + 1 >= hbuf->size && !hbuf->can_realloc) return; + for (const char* src = msg; *src != 0; src++) { + char c = *src; + if (hbuf->used + 1 >= hbuf->size) { + if (!mi_heap_buf_expand(hbuf)) return; + } + mi_assert_internal(hbuf->used < hbuf->size); + hbuf->buf[hbuf->used++] = c; + } + mi_assert_internal(hbuf->used < hbuf->size); + hbuf->buf[hbuf->used] = 0; +} + +static void mi_heap_buf_print_count_bin(mi_heap_buf_t* hbuf, const char* prefix, mi_stat_count_t* stat, size_t bin, bool add_comma) { + const size_t binsize = _mi_bin_size(bin); + const size_t pagesize = (binsize <= MI_SMALL_OBJ_SIZE_MAX ? MI_SMALL_PAGE_SIZE : + (binsize <= MI_MEDIUM_OBJ_SIZE_MAX ? MI_MEDIUM_PAGE_SIZE : + #if MI_LARGE_PAGE_SIZE + (binsize <= MI_LARGE_OBJ_SIZE_MAX ? MI_LARGE_PAGE_SIZE : 0) + #else + 0 + #endif + )); + char buf[128]; + _mi_snprintf(buf, 128, "%s{ \"total\": %lld, \"peak\": %lld, \"current\": %lld, \"block_size\": %zu, \"page_size\": %zu }%s\n", prefix, stat->total, stat->peak, stat->current, binsize, pagesize, (add_comma ? "," : "")); + buf[127] = 0; + mi_heap_buf_print(hbuf, buf); +} + +static void mi_heap_buf_print_count(mi_heap_buf_t* hbuf, const char* prefix, mi_stat_count_t* stat, bool add_comma) { + char buf[128]; + _mi_snprintf(buf, 128, "%s{ \"total\": %lld, \"peak\": %lld, \"current\": %lld }%s\n", prefix, stat->total, stat->peak, stat->current, (add_comma ? "," : "")); + buf[127] = 0; + mi_heap_buf_print(hbuf, buf); +} + +static void mi_heap_buf_print_count_value(mi_heap_buf_t* hbuf, const char* name, mi_stat_count_t* stat) { + char buf[128]; + _mi_snprintf(buf, 128, " \"%s\": ", name); + buf[127] = 0; + mi_heap_buf_print(hbuf, buf); + mi_heap_buf_print_count(hbuf, "", stat, true); +} + +static void mi_heap_buf_print_value(mi_heap_buf_t* hbuf, const char* name, int64_t val) { + char buf[128]; + _mi_snprintf(buf, 128, " \"%s\": %lld,\n", name, val); + buf[127] = 0; + mi_heap_buf_print(hbuf, buf); +} + +static void mi_heap_buf_print_size(mi_heap_buf_t* hbuf, const char* name, size_t val, bool add_comma) { + char buf[128]; + _mi_snprintf(buf, 128, " \"%s\": %zu%s\n", name, val, (add_comma ? "," : "")); + buf[127] = 0; + mi_heap_buf_print(hbuf, buf); +} + +static void mi_heap_buf_print_counter_value(mi_heap_buf_t* hbuf, const char* name, mi_stat_counter_t* stat) { + mi_heap_buf_print_value(hbuf, name, stat->total); +} + +#define MI_STAT_COUNT(stat) mi_heap_buf_print_count_value(&hbuf, #stat, &stats->stat); +#define MI_STAT_COUNTER(stat) mi_heap_buf_print_counter_value(&hbuf, #stat, &stats->stat); + +char* mi_stats_get_json(size_t output_size, char* output_buf) mi_attr_noexcept { + mi_heap_buf_t hbuf = { NULL, 0, 0, true }; + if (output_size > 0 && output_buf != NULL) { + _mi_memzero(output_buf, output_size); + hbuf.buf = output_buf; + hbuf.size = output_size; + hbuf.can_realloc = false; + } + else { + if (!mi_heap_buf_expand(&hbuf)) return NULL; + } + mi_heap_buf_print(&hbuf, "{\n"); + mi_heap_buf_print_value(&hbuf, "version", MI_STAT_VERSION); + mi_heap_buf_print_value(&hbuf, "mimalloc_version", MI_MALLOC_VERSION); + + // process info + mi_heap_buf_print(&hbuf, " \"process\": {\n"); + size_t elapsed; + size_t user_time; + size_t sys_time; + size_t current_rss; + size_t peak_rss; + size_t current_commit; + size_t peak_commit; + size_t page_faults; + mi_process_info(&elapsed, &user_time, &sys_time, ¤t_rss, &peak_rss, ¤t_commit, &peak_commit, &page_faults); + mi_heap_buf_print_size(&hbuf, "elapsed_msecs", elapsed, true); + mi_heap_buf_print_size(&hbuf, "user_msecs", user_time, true); + mi_heap_buf_print_size(&hbuf, "system_msecs", sys_time, true); + mi_heap_buf_print_size(&hbuf, "page_faults", page_faults, true); + mi_heap_buf_print_size(&hbuf, "rss_current", current_rss, true); + mi_heap_buf_print_size(&hbuf, "rss_peak", peak_rss, true); + mi_heap_buf_print_size(&hbuf, "commit_current", current_commit, true); + mi_heap_buf_print_size(&hbuf, "commit_peak", peak_commit, false); + mi_heap_buf_print(&hbuf, " },\n"); + + // statistics + mi_stats_t* stats = &_mi_stats_main; + MI_STAT_FIELDS() + + // size bins + mi_heap_buf_print(&hbuf, " \"malloc_bins\": [\n"); + for (size_t i = 0; i <= MI_BIN_HUGE; i++) { + mi_heap_buf_print_count_bin(&hbuf, " ", &stats->malloc_bins[i], i, i!=MI_BIN_HUGE); + } + mi_heap_buf_print(&hbuf, " ],\n"); + mi_heap_buf_print(&hbuf, " \"page_bins\": [\n"); + for (size_t i = 0; i <= MI_BIN_HUGE; i++) { + mi_heap_buf_print_count_bin(&hbuf, " ", &stats->page_bins[i], i, i!=MI_BIN_HUGE); + } + mi_heap_buf_print(&hbuf, " ]\n"); + mi_heap_buf_print(&hbuf, "}\n"); + return hbuf.buf; +} diff --git a/tools/system_libs.py b/tools/system_libs.py index b83a50f4708a2..d86aabc6147ed 100644 --- a/tools/system_libs.py +++ b/tools/system_libs.py @@ -1862,7 +1862,7 @@ class libmimalloc(MTLibrary): path='system/lib/mimalloc/src', glob_pattern='*.c', # mimalloc includes some files at the source level, so exclude them here. - excludes=['alloc-override.c', 'free.c', 'page-queue.c', 'static.c'], + excludes=['alloc-override.c', 'arena-abandon.c', 'free.c', 'page-queue.c', 'static.c'], ) src_files += [utils.path_from_root('system/lib/mimalloc/src/prim/prim.c')] src_files += [utils.path_from_root('system/lib/emmalloc.c')]