3636// Forward declarations 
3737static  void  bucket_update_stats (bucket_t  * bucket , int  in_use , int  in_pool );
3838static  bool  bucket_can_pool (bucket_t  * bucket );
39- static  slab_list_item_t  * bucket_get_avail_slab ( bucket_t   * bucket , 
40-                                                 bool  * from_pool );
39+ static  slab_list_item_t  * 
40+ bucket_get_avail_slab ( disjoint_pool_t   * pool ,  bucket_t   * bucket ,  bool  * from_pool );
4141
4242static  __TLS  umf_result_t  TLS_last_allocation_error ;
4343
@@ -69,7 +69,7 @@ static size_t bucket_slab_alloc_size(bucket_t *bucket) {
6969    return  utils_max (bucket -> size , bucket_slab_min_size (bucket ));
7070}
7171
72- static  slab_t  * create_slab (bucket_t  * bucket ) {
72+ static  slab_t  * create_slab (bucket_t  * bucket ,  void   * mem_ptr ) {
7373    assert (bucket );
7474
7575    umf_result_t  res  =  UMF_RESULT_SUCCESS ;
@@ -110,13 +110,17 @@ static slab_t *create_slab(bucket_t *bucket) {
110110    // padding at the end of the slab 
111111    slab -> slab_size  =  bucket_slab_alloc_size (bucket );
112112
113-     // TODO not true 
114-     // NOTE: originally slabs memory were allocated without alignment 
115-     // with this registering a slab is simpler and doesn't require multimap 
116-     res  =  umfMemoryProviderAlloc (provider , slab -> slab_size , 0 , & slab -> mem_ptr );
117-     if  (res  !=  UMF_RESULT_SUCCESS ) {
118-         LOG_ERR ("allocation of slab data failed!" );
119-         goto free_slab ;
113+     // if the mem_ptr is provided, we use the user-provided memory instead of 
114+     // allocating a new one 
115+     if  (mem_ptr ) {
116+         slab -> mem_ptr  =  mem_ptr ;
117+     } else  {
118+         res  =  umfMemoryProviderAlloc (provider , slab -> slab_size , 0 ,
119+                                      & slab -> mem_ptr );
120+         if  (res  !=  UMF_RESULT_SUCCESS ) {
121+             LOG_ERR ("allocation of slab data failed!" );
122+             goto free_slab ;
123+         }
120124    }
121125
122126    // raw allocation is not available for user so mark it as inaccessible 
@@ -301,6 +305,9 @@ static void bucket_free_chunk(bucket_t *bucket, void *ptr, slab_t *slab,
301305        // pool or freed. 
302306        * to_pool  =  bucket_can_pool (bucket );
303307        if  (* to_pool  ==  false) {
308+ 
309+             // TODO - reuse strategy? 
310+ 
304311            // remove slab 
305312            slab_list_item_t  * slab_it  =  & slab -> iter ;
306313            assert (slab_it -> val  !=  NULL );
@@ -317,8 +324,9 @@ static void bucket_free_chunk(bucket_t *bucket, void *ptr, slab_t *slab,
317324}
318325
319326// NOTE: this function must be called under bucket->bucket_lock 
320- static  void  * bucket_get_free_chunk (bucket_t  * bucket , bool  * from_pool ) {
321-     slab_list_item_t  * slab_it  =  bucket_get_avail_slab (bucket , from_pool );
327+ static  void  * bucket_get_free_chunk (disjoint_pool_t  * pool , bucket_t  * bucket ,
328+                                    bool  * from_pool ) {
329+     slab_list_item_t  * slab_it  =  bucket_get_avail_slab (pool , bucket , from_pool );
322330    if  (slab_it  ==  NULL ) {
323331        return  NULL ;
324332    }
@@ -342,7 +350,7 @@ static size_t bucket_chunk_cut_off(bucket_t *bucket) {
342350}
343351
344352static  slab_t  * bucket_create_slab (bucket_t  * bucket ) {
345-     slab_t  * slab  =  create_slab (bucket );
353+     slab_t  * slab  =  create_slab (bucket ,  NULL );
346354    if  (slab  ==  NULL ) {
347355        LOG_ERR ("create_slab failed!" )
348356        return  NULL ;
@@ -362,8 +370,93 @@ static slab_t *bucket_create_slab(bucket_t *bucket) {
362370    return  slab ;
363371}
364372
365- static  slab_list_item_t  * bucket_get_avail_slab (bucket_t  * bucket ,
373+ static  slab_list_item_t  * bucket_get_avail_slab (disjoint_pool_t  * pool ,
374+                                                bucket_t  * bucket ,
366375                                               bool  * from_pool ) {
376+     if  (pool  ==  NULL  ||  bucket  ==  NULL ) {
377+         return  NULL ;
378+     }
379+ 
380+     if  (bucket -> available_slabs  ==  NULL  &&  pool -> params .reuse_strategy  ==  1 ) {
381+         // try to find slabs in larger buckets 
382+         for  (size_t  i  =  0 ; i  <  pool -> buckets_num ; i ++ ) {
383+             bucket_t  * larger_bucket  =  pool -> buckets [i ];
384+             if  (larger_bucket -> size  <  bucket -> size ) {
385+                 continue ;
386+             }
387+ 
388+             if  (larger_bucket -> available_slabs  ==  NULL  || 
389+                 larger_bucket -> available_slabs -> val -> num_chunks_allocated  >  0 ) {
390+                 continue ;
391+             }
392+ 
393+             if  (larger_bucket -> size  % bucket -> size  !=  0 ) {
394+                 // TODO what about this case? 
395+                 continue ;
396+             }
397+ 
398+             // move available slab from larger bucket to smaller one 
399+             slab_list_item_t  * slab_it  =  larger_bucket -> available_slabs ;
400+             assert (slab_it -> val  !=  NULL );
401+             DL_DELETE (larger_bucket -> available_slabs , slab_it );
402+             // TODO check global lock + bucket locks 
403+             pool_unregister_slab (larger_bucket -> pool , slab_it -> val );
404+             larger_bucket -> available_slabs_num -- ;
405+             larger_bucket -> chunked_slabs_in_pool -- ;
406+             // 
407+             bucket_update_stats (larger_bucket , 0 , -1 );
408+ 
409+             void  * mem_ptr  =  slab_it -> val -> mem_ptr ;
410+             while  (mem_ptr  <  slab_get_end (slab_it -> val )) {
411+                 slab_t  * slab  =  create_slab (bucket , mem_ptr );
412+                 assert (slab  !=  NULL );
413+ 
414+                 // register the slab in the pool 
415+                 umf_result_t  res  =  pool_register_slab (bucket -> pool , slab );
416+                 if  (res  !=  UMF_RESULT_SUCCESS ) {
417+                     // TODO handle errors 
418+                     return  NULL ;
419+                 }
420+ 
421+                 DL_PREPEND (bucket -> available_slabs , & slab -> iter );
422+                 bucket -> available_slabs_num ++ ;
423+                 bucket -> chunked_slabs_in_pool ++ ;
424+                 // 
425+                 bucket_update_stats (bucket , 0 , 1 );
426+ 
427+                 mem_ptr  =  (void  * )((uintptr_t )mem_ptr  +  slab -> slab_size );
428+             }
429+             // Ensure that we used the whole slab 
430+             assert (mem_ptr  ==  slab_get_end (slab_it -> val ));
431+             umf_ba_global_free (slab_it -> val );
432+ 
433+             if  (bucket -> available_slabs  ==  NULL ) {
434+                 bucket_create_slab (bucket );
435+                 * from_pool  =  false;
436+                 return  bucket -> available_slabs ;
437+             }
438+ 
439+             // TODO common code 
440+             slab_t  * slab  =  bucket -> available_slabs -> val ;
441+             // Allocation from existing slab is treated as from pool for statistics. 
442+             * from_pool  =  true;
443+             if  (slab -> num_chunks_allocated  ==  0 ) {
444+                 assert (bucket -> chunked_slabs_in_pool  >  0 );
445+                 // If this was an empty slab, it was in the pool. 
446+                 // Now it is no longer in the pool, so update count. 
447+                 -- bucket -> chunked_slabs_in_pool ;
448+                 uint64_t  size_to_sub  =  bucket_slab_alloc_size (bucket );
449+                 uint64_t  old_size  =  utils_fetch_and_sub_u64 (
450+                     & bucket -> shared_limits -> total_size , size_to_sub );
451+                 (void )old_size ;
452+                 assert (old_size  >= size_to_sub );
453+                 bucket_update_stats (bucket , 1 , -1 );
454+             }
455+ 
456+             return  bucket -> available_slabs ;
457+         }
458+     }
459+ 
367460    if  (bucket -> available_slabs  ==  NULL ) {
368461        bucket_create_slab (bucket );
369462        * from_pool  =  false;
@@ -403,10 +496,12 @@ static void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool) {
403496        return ;
404497    }
405498
499+     assert (in_use  >= 0  ||  bucket -> curr_slabs_in_use  >= (size_t )(- in_use ));
406500    bucket -> curr_slabs_in_use  +=  in_use ;
407501    bucket -> max_slabs_in_use  = 
408502        utils_max (bucket -> curr_slabs_in_use , bucket -> max_slabs_in_use );
409503
504+     assert (in_pool  >= 0  ||  bucket -> curr_slabs_in_pool  >= (size_t )(- in_pool ));
410505    bucket -> curr_slabs_in_pool  +=  in_pool ;
411506    bucket -> max_slabs_in_pool  = 
412507        utils_max (bucket -> curr_slabs_in_pool , bucket -> max_slabs_in_pool );
@@ -542,7 +637,7 @@ static void *disjoint_pool_allocate(disjoint_pool_t *pool, size_t size) {
542637    utils_mutex_lock (& bucket -> bucket_lock );
543638
544639    bool  from_pool  =  false;
545-     ptr  =  bucket_get_free_chunk (bucket , & from_pool );
640+     ptr  =  bucket_get_free_chunk (pool ,  bucket , & from_pool );
546641
547642    if  (ptr  ==  NULL ) {
548643        TLS_last_allocation_error  =  UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ;
@@ -759,7 +854,7 @@ void *disjoint_pool_aligned_malloc(void *pool, size_t size, size_t alignment) {
759854
760855    utils_mutex_lock (& bucket -> bucket_lock );
761856
762-     ptr  =  bucket_get_free_chunk (bucket , & from_pool );
857+     ptr  =  bucket_get_free_chunk (pool ,  bucket , & from_pool );
763858
764859    if  (ptr  ==  NULL ) {
765860        TLS_last_allocation_error  =  UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ;
@@ -984,6 +1079,7 @@ umfDisjointPoolParamsCreate(umf_disjoint_pool_params_handle_t *hParams) {
9841079        .capacity  =  0 ,
9851080        .min_bucket_size  =  UMF_DISJOINT_POOL_MIN_BUCKET_DEFAULT_SIZE ,
9861081        .cur_pool_size  =  0 ,
1082+         .reuse_strategy  =  0 ,
9871083        .pool_trace  =  0 ,
9881084        .shared_limits  =  NULL ,
9891085        .name  =  {* DEFAULT_NAME },
@@ -1056,7 +1152,6 @@ umfDisjointPoolParamsSetMinBucketSize(umf_disjoint_pool_params_handle_t hParams,
10561152    hParams -> min_bucket_size  =  minBucketSize ;
10571153    return  UMF_RESULT_SUCCESS ;
10581154}
1059- 
10601155umf_result_t 
10611156umfDisjointPoolParamsSetTrace (umf_disjoint_pool_params_handle_t  hParams ,
10621157                              int  poolTrace ) {
@@ -1069,6 +1164,18 @@ umfDisjointPoolParamsSetTrace(umf_disjoint_pool_params_handle_t hParams,
10691164    return  UMF_RESULT_SUCCESS ;
10701165}
10711166
1167+ umf_result_t 
1168+ umfDisjointPoolParamsSetReuseStrategy (umf_disjoint_pool_params_handle_t  hParams ,
1169+                                       unsigned int   reuseStrategy ) {
1170+     if  (!hParams ) {
1171+         LOG_ERR ("disjoint pool params handle is NULL" );
1172+         return  UMF_RESULT_ERROR_INVALID_ARGUMENT ;
1173+     }
1174+ 
1175+     hParams -> reuse_strategy  =  reuseStrategy ;
1176+     return  UMF_RESULT_SUCCESS ;
1177+ }
1178+ 
10721179umf_result_t  umfDisjointPoolParamsSetSharedLimits (
10731180    umf_disjoint_pool_params_handle_t  hParams ,
10741181    umf_disjoint_pool_shared_limits_handle_t  hSharedLimits ) {
0 commit comments