Skip to content

Commit 091e23e

Browse files
committed
feat: recognize and use over sized allocations
Allocators are allowed to return a larger memory chunk than was asked for. If the amount extra is large enough, then the hash table can use the extra space. The Global allocator will not hit this path, because it won't over-size enough to matter, but custom allocators may. An example of an allocator which allocates full system pages is included in the test suite (UNIX only because it uses `mmap`).
1 parent 50e025c commit 091e23e

File tree

3 files changed

+193
-3
lines changed

3 files changed

+193
-3
lines changed

Cargo.toml

+3
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,9 @@ serde_test = "1.0"
4242
doc-comment = "0.3.1"
4343
bumpalo = { version = "3.13.0", features = ["allocator-api2"] }
4444

45+
[target.'cfg(unix)'.dev-dependencies]
46+
libc = "0.2"
47+
4548
[features]
4649
default = ["default-hasher", "inline-more", "allocator-api2", "equivalent", "raw-entry"]
4750

src/map.rs

+120
Original file line numberDiff line numberDiff line change
@@ -6514,3 +6514,123 @@ mod test_map {
65146514
);
65156515
}
65166516
}
6517+
6518+
#[cfg(all(test, unix))]
6519+
mod test_map_with_mmap_allocations {
6520+
use super::HashMap;
6521+
use crate::raw::prev_pow2;
6522+
use allocator_api2::alloc::{AllocError, Allocator};
6523+
use core::alloc::Layout;
6524+
use core::ptr::{null_mut, NonNull};
6525+
6526+
/// This is not a production quality allocator, just good enough for
6527+
/// some basic tests.
6528+
#[derive(Clone, Copy, Debug)]
6529+
struct MmapAllocator {
6530+
/// Guarantee this is a power of 2.
6531+
page_size: usize,
6532+
}
6533+
6534+
impl MmapAllocator {
6535+
fn new() -> Result<Self, AllocError> {
6536+
let result = unsafe { libc::sysconf(libc::_SC_PAGESIZE) };
6537+
if result < 1 {
6538+
return Err(AllocError);
6539+
}
6540+
6541+
let page_size = result as usize;
6542+
if !page_size.is_power_of_two() {
6543+
Err(AllocError)
6544+
} else {
6545+
Ok(Self { page_size })
6546+
}
6547+
}
6548+
6549+
fn fit_to_page_size(&self, n: usize) -> Result<usize, AllocError> {
6550+
// If n=0, give a single page (wasteful, I know).
6551+
let n = if n == 0 { self.page_size } else { n };
6552+
6553+
match n & (self.page_size - 1) {
6554+
0 => Ok(n),
6555+
rem => n.checked_add(self.page_size - rem).ok_or(AllocError),
6556+
}
6557+
}
6558+
}
6559+
6560+
unsafe impl Allocator for MmapAllocator {
6561+
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
6562+
if layout.align() > self.page_size {
6563+
return Err(AllocError);
6564+
}
6565+
6566+
let null = null_mut();
6567+
let len = self.fit_to_page_size(layout.size())? as libc::size_t;
6568+
let prot = libc::PROT_READ | libc::PROT_WRITE;
6569+
let flags = libc::MAP_PRIVATE | libc::MAP_ANON;
6570+
let addr = unsafe { libc::mmap(null, len, prot, flags, -1, 0) };
6571+
6572+
// mmap returns MAP_FAILED on failure, not Null.
6573+
if addr == libc::MAP_FAILED {
6574+
return Err(AllocError);
6575+
}
6576+
6577+
match NonNull::new(addr.cast()) {
6578+
Some(addr) => Ok(NonNull::slice_from_raw_parts(addr, len)),
6579+
6580+
// This branch shouldn't be taken in practice, but since we
6581+
// cannot return null as a valid pointer in our type system,
6582+
// we attempt to handle it.
6583+
None => {
6584+
_ = unsafe { libc::munmap(addr, len) };
6585+
Err(AllocError)
6586+
}
6587+
}
6588+
}
6589+
6590+
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
6591+
// If they allocated it with this layout, it must round correctly.
6592+
let size = self.fit_to_page_size(layout.size()).unwrap();
6593+
let _result = libc::munmap(ptr.as_ptr().cast(), size);
6594+
debug_assert_eq!(0, _result)
6595+
}
6596+
}
6597+
6598+
#[test]
6599+
fn test_tiny_allocation_gets_rounded_to_page_size() {
6600+
let alloc = MmapAllocator::new().unwrap();
6601+
let mut map: HashMap<usize, (), _, _> = HashMap::with_capacity_in(1, alloc);
6602+
6603+
// Size of an element plus its control byte.
6604+
let rough_bucket_size = core::mem::size_of::<(usize, ())>() + 1;
6605+
6606+
// Accounting for some misc. padding that's likely in the allocation
6607+
// due to rounding to group width, etc.
6608+
let overhead = 3 * core::mem::size_of::<usize>();
6609+
let num_buckets = (alloc.page_size - overhead) / rough_bucket_size;
6610+
// Buckets are always powers of 2.
6611+
let min_elems = prev_pow2(num_buckets);
6612+
// Real load-factor is 7/8, but this is a lower estimation, so 1/2.
6613+
let min_capacity = min_elems >> 1;
6614+
let capacity = map.capacity();
6615+
assert!(
6616+
capacity >= min_capacity,
6617+
"failed: {capacity} >= {min_capacity}"
6618+
);
6619+
6620+
// Fill it up.
6621+
for i in 0..capacity {
6622+
map.insert(i, ());
6623+
}
6624+
// Capacity should not have changed and it should be full.
6625+
assert_eq!(capacity, map.len());
6626+
assert_eq!(capacity, map.capacity());
6627+
6628+
// Alright, make it grow.
6629+
map.insert(capacity, ());
6630+
assert!(
6631+
capacity < map.capacity(),
6632+
"failed: {capacity} < {}",
6633+
map.capacity()
6634+
);
6635+
}
6636+
}

src/raw/mod.rs

+70-3
Original file line numberDiff line numberDiff line change
@@ -1442,6 +1442,40 @@ impl RawTableInner {
14421442
}
14431443
}
14441444

1445+
/// Find the previous power of 2. If it's already a power of 2, it's unchanged.
1446+
/// Passing zero is undefined behavior.
1447+
pub(crate) fn prev_pow2(z: usize) -> usize {
1448+
let shift = mem::size_of::<usize>() * 8 - 1;
1449+
1 << (shift - (z.leading_zeros() as usize))
1450+
}
1451+
1452+
fn maximum_buckets_in(
1453+
allocation_size: usize,
1454+
table_layout: TableLayout,
1455+
group_width: usize,
1456+
) -> usize {
1457+
// Given an equation like:
1458+
// z >= x * y + x + g
1459+
// x can be maximized by doing:
1460+
// x = (z - g) / (y + 1)
1461+
// If you squint:
1462+
// x is the number of buckets
1463+
// y is the table_layout.size
1464+
// z is the size of the allocation
1465+
// g is the group width
1466+
// But this is ignoring the padding needed for ctrl_align.
1467+
// If we remember these restrictions:
1468+
// x is always a power of 2
1469+
// Layout size for T must always be a multiple of T
1470+
// Then the alignment can be ignored if we add the constraint:
1471+
// x * y >= table_layout.ctrl_align
1472+
// This is taken care of by `capacity_to_buckets`.
1473+
let numerator = allocation_size - group_width;
1474+
let denominator = table_layout.size + 1; // todo: ZSTs?
1475+
let quotient = numerator / denominator;
1476+
prev_pow2(quotient)
1477+
}
1478+
14451479
impl RawTableInner {
14461480
/// Allocates a new [`RawTableInner`] with the given number of buckets.
14471481
/// The control bytes and buckets are left uninitialized.
@@ -1459,7 +1493,7 @@ impl RawTableInner {
14591493
unsafe fn new_uninitialized<A>(
14601494
alloc: &A,
14611495
table_layout: TableLayout,
1462-
buckets: usize,
1496+
mut buckets: usize,
14631497
fallibility: Fallibility,
14641498
) -> Result<Self, TryReserveError>
14651499
where
@@ -1468,13 +1502,29 @@ impl RawTableInner {
14681502
debug_assert!(buckets.is_power_of_two());
14691503

14701504
// Avoid `Option::ok_or_else` because it bloats LLVM IR.
1471-
let (layout, ctrl_offset) = match table_layout.calculate_layout_for(buckets) {
1505+
let (layout, mut ctrl_offset) = match table_layout.calculate_layout_for(buckets) {
14721506
Some(lco) => lco,
14731507
None => return Err(fallibility.capacity_overflow()),
14741508
};
14751509

14761510
let ptr: NonNull<u8> = match do_alloc(alloc, layout) {
1477-
Ok(block) => block.cast(),
1511+
Ok(block) => {
1512+
// Utilize over-sized allocations.
1513+
let x = maximum_buckets_in(block.len(), table_layout, Group::WIDTH);
1514+
debug_assert!(x >= buckets);
1515+
// Calculate the new ctrl_offset.
1516+
let (_oversized_layout, oversized_ctrl_offset) =
1517+
match table_layout.calculate_layout_for(x) {
1518+
Some(lco) => lco,
1519+
None => unsafe { hint::unreachable_unchecked() },
1520+
};
1521+
debug_assert!(_oversized_layout.size() <= block.len());
1522+
debug_assert!(oversized_ctrl_offset >= ctrl_offset);
1523+
ctrl_offset = oversized_ctrl_offset;
1524+
buckets = x;
1525+
1526+
block.cast()
1527+
}
14781528
Err(_) => return Err(fallibility.alloc_err(layout)),
14791529
};
14801530

@@ -4166,6 +4216,23 @@ impl<T, A: Allocator> RawExtractIf<'_, T, A> {
41664216
mod test_map {
41674217
use super::*;
41684218

4219+
#[test]
4220+
fn test_prev_pow2() {
4221+
// Skip 0, not defined for that input.
4222+
let mut pow2: usize = 1;
4223+
while (pow2 << 1) > 0 {
4224+
let next_pow2 = pow2 << 1;
4225+
assert_eq!(pow2, prev_pow2(pow2));
4226+
// Need to skip 2, because it's also a power of 2, so it doesn't
4227+
// return the previous power of 2.
4228+
if next_pow2 > 2 {
4229+
assert_eq!(pow2, prev_pow2(pow2 + 1));
4230+
assert_eq!(pow2, prev_pow2(next_pow2 - 1));
4231+
}
4232+
pow2 = next_pow2;
4233+
}
4234+
}
4235+
41694236
#[test]
41704237
fn test_minimum_capacity_for_small_types() {
41714238
#[track_caller]

0 commit comments

Comments
 (0)