From b6a6ae969a40040d73337cf0c8ef2c0da8afc218 Mon Sep 17 00:00:00 2001 From: Flecheck Date: Thu, 6 Sep 2018 15:46:38 +0200 Subject: [PATCH] Reduced the complexity of the entity allocator Renamed atomic_add1 Addressed comments Added benchmarks --- Cargo.toml | 2 +- benches/world.rs | 55 +++++++++++++ src/world/entity.rs | 190 +++++++++++++++++++++++++------------------- 3 files changed, 163 insertions(+), 84 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 3574e0074..59bb92b3d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,7 +50,7 @@ features = ["common", "serde"] cgmath = { version = "0.14", features = ["eders"] } criterion = "0.2" ron = "0.2" -rand = "0.3" +rand = "0.5" serde_json = "1.0" specs-derive = { path = "specs-derive", version = "0.2.0" } diff --git a/benches/world.rs b/benches/world.rs index 21cf6ae0d..8a6310c38 100644 --- a/benches/world.rs +++ b/benches/world.rs @@ -2,6 +2,7 @@ #[macro_use] extern crate criterion; +extern crate rand; extern crate rayon; extern crate specs; extern crate test; @@ -108,6 +109,58 @@ fn delete_later(b: &mut Bencher) { }); } +fn create_after_delete(b: &mut Bencher) { + use rand::seq::sample_indices; + use rand::thread_rng; + + let mut rng = thread_rng(); + b.iter_with_setup( + || { + let mut w = World::new(); + let eids: Vec<_> = (0..1000).map(|_| w.create_entity().build()).collect(); + + sample_indices(&mut rng, 1000, 100) + .into_iter() + .map(|i| eids[i]) + .for_each(|e| { + w.delete_entity(e) + .expect("Failed deleting entity in 'create after delete' setup"); + }); + + w.maintain(); + + w + }, + |mut w| { + for _ in 0..100 { + w.create_entity().build(); + } + }, + ) +} + +fn create_after_delete_extreme(b: &mut Bencher) { + b.iter_with_setup( + || { + let mut w = World::new(); + + let eids: Vec<_> = (0..1000).map(|_| w.create_entity().build()).collect(); + + w.delete_entity(eids[0]) + .expect("Failed deleting first entity in 'create after delete extreme' setup"); + w.delete_entity(eids[999]) + .expect("Failed deleting last entity in 'create after delete extreme' setup"); + w.maintain(); + + w + }, + |mut w| { + w.create_entity().build(); + w.create_entity().build(); + }, + ) +} + fn maintain_noop(b: &mut Bencher) { let mut w = World::new(); b.iter(|| { @@ -185,6 +238,8 @@ fn world_benchmarks(c: &mut Criterion) { .bench_function("delete now", delete_now) .bench_function("delete now with storage", delete_now_with_storage) .bench_function("delete later", delete_later) + .bench_function("create after delete", create_after_delete) + .bench_function("create after delete extreme", create_after_delete_extreme) .bench_function("maintain noop", maintain_noop) .bench_function("maintain add later", maintain_add_later) .bench_function("maintain delete later", maintain_delete_later) diff --git a/src/world/entity.rs b/src/world/entity.rs index a4a44dfcb..1dad492b3 100644 --- a/src/world/entity.rs +++ b/src/world/entity.rs @@ -51,7 +51,8 @@ pub(crate) struct Allocator { alive: BitSet, raised: AtomicBitSet, killed: AtomicBitSet, - start_from: AtomicUsize, + cache: EntityCache, + max_id: AtomicUsize, } impl Allocator { @@ -65,21 +66,17 @@ impl Allocator { } self.alive.remove(entity.id()); - self.raised.remove(entity.id()); - - // Since atomically created entities don't have a generation until merge - // is called, the first entity of a generation won't actually have a generation - // in the allocator. And it is fine for us to just ignore the entity since - // we clear `self.raised` before so it won't get allocated later. - if self.generations.len() > id { - self.generations[id].die(); - } - if id < self.start_from.load(Ordering::Relaxed) { - self.start_from.store(id, Ordering::Relaxed); + self.update_generation_length(id); + + if self.raised.remove(entity.id()) { + self.generations[id] = self.generations[id].raised(); } + self.generations[id].die(); } + self.cache.extend(delete.iter().map(|e| e.0)); + Ok(()) } @@ -122,66 +119,36 @@ impl Allocator { Entity(id, gen) } - /// Attempt to move the `start_from` value - pub fn update_start_from(&self, start_from: usize) { - loop { - let current = self.start_from.load(Ordering::Relaxed); - - // if the current value is bigger then ours, we bail - if current >= start_from { - return; - } - - if start_from - == self.start_from - .compare_and_swap(current, start_from, Ordering::Relaxed) - { - return; - } - } - } - /// Allocate a new entity pub fn allocate_atomic(&self) -> Entity { - let idx = self.start_from.load(Ordering::Relaxed); - for i in idx.. { - if !self.alive.contains(i as Index) && !self.raised.add_atomic(i as Index) { - self.update_start_from(i + 1); - - let gen = self.generations - .get(i as usize) - .map(|&gen| { - if gen.is_alive() { - gen - } else { - gen.raised() - } - }) - .unwrap_or(Generation(1)); - - return Entity(i as Index, gen); - } - } - panic!("No entities left to allocate") + let id = self.cache.pop_atomic().unwrap_or_else(|| { + atomic_increment(&self.max_id).expect("No entity left to allocate") as Index + }); + + self.raised.add_atomic(id); + let gen = self + .generations + .get(id as usize) + .map(|&gen| if gen.is_alive() { gen } else { gen.raised() }) + .unwrap_or(Generation(1)); + Entity(id, gen) } /// Allocate a new entity pub fn allocate(&mut self) -> Entity { - let idx = self.start_from.load(Ordering::Relaxed); - for i in idx.. { - if !self.raised.contains(i as Index) && !self.alive.add(i as Index) { - // this is safe since we have mutable access to everything! - self.start_from.store(i + 1, Ordering::Relaxed); - - while self.generations.len() <= i as usize { - self.generations.push(Generation(0)); - } - self.generations[i as usize] = self.generations[i as usize].raised(); - - return Entity(i as Index, self.generations[i as usize]); - } - } - panic!("No entities left to allocate") + let id = self.cache.pop().unwrap_or_else(|| { + let id = *self.max_id.get_mut(); + *self.max_id.get_mut() = id.checked_add(1).expect("No entity left to allocate"); + id as Index + }); + + self.update_generation_length(id as usize); + + self.alive.add(id as Index); + + self.generations[id as usize] = self.generations[id as usize].raised(); + + Entity(id as Index, self.generations[id as usize]) } /// Maintains the allocated entities, mainly dealing with atomically @@ -191,21 +158,15 @@ impl Allocator { let mut deleted = vec![]; + let max_id = *self.max_id.get_mut(); + self.update_generation_length(max_id + 1); + for i in (&self.raised).iter() { - while self.generations.len() <= i as usize { - self.generations.push(Generation(0)); - } self.generations[i as usize] = self.generations[i as usize].raised(); self.alive.add(i); } self.raised.clear(); - if let Some(lowest) = (&self.killed).iter().next() { - if lowest < self.start_from.load(Ordering::Relaxed) as Index { - self.start_from.store(lowest as usize, Ordering::Relaxed); - } - } - for i in (&self.killed).iter() { self.alive.remove(i); deleted.push(Entity(i, self.generations[i as usize])); @@ -213,8 +174,16 @@ impl Allocator { } self.killed.clear(); + self.cache.extend(deleted.iter().map(|e| e.0)); + deleted } + + fn update_generation_length(&mut self, i: usize) { + if self.generations.len() <= i as usize { + self.generations.resize(i as usize + 1, Generation(0)); + } + } } /// An iterator for entity creation. @@ -337,16 +306,11 @@ impl<'a> Join for &'a EntitiesRes { } unsafe fn get(v: &mut &'a EntitiesRes, idx: Index) -> Entity { - let gen = v.alloc + let gen = v + .alloc .generations .get(idx as usize) - .map(|&gen| { - if gen.is_alive() { - gen - } else { - gen.raised() - } - }) + .map(|&gen| if gen.is_alive() { gen } else { gen.raised() }) .unwrap_or(Generation(1)); Entity(idx, gen) } @@ -432,3 +396,63 @@ impl Generation { Generation(1 - self.0) } } + +#[derive(Default, Debug)] +struct EntityCache { + cache: Vec, + len: AtomicUsize, +} + +impl EntityCache { + fn pop_atomic(&self) -> Option { + atomic_decrement(&self.len).map(|x| self.cache[x - 1]) + } + + fn pop(&mut self) -> Option { + self.maintain(); + let x = self.cache.pop(); + *self.len.get_mut() = self.cache.len(); + x + } + + fn maintain(&mut self) { + self.cache.truncate(*(self.len.get_mut())); + } +} + +impl Extend for EntityCache { + fn extend>(&mut self, iter: T) { + self.maintain(); + self.cache.extend(iter); + *self.len.get_mut() = self.cache.len(); + } +} + +/// Increments `i` atomically without wrapping on overflow. +/// Resembles a `fetch_add(1, Ordering::Relaxed)` with +/// checked overflow, returning `None` instead. +fn atomic_increment(i: &AtomicUsize) -> Option { + use std::usize; + let mut prev = i.load(Ordering::Relaxed); + while prev != usize::MAX { + match i.compare_exchange_weak(prev, prev + 1, Ordering::Relaxed, Ordering::Relaxed) { + Ok(x) => return Some(x), + Err(next_prev) => prev = next_prev, + } + } + return None; +} + +/// Increments `i` atomically without wrapping on overflow. +/// Resembles a `fetch_sub(1, Ordering::Relaxed)` with +/// checked underflow, returning `None` instead. +fn atomic_decrement(i: &AtomicUsize) -> Option { + let mut prev = i.load(Ordering::Relaxed); + while prev != 0 { + match i.compare_exchange_weak(prev, prev - 1, Ordering::Relaxed, Ordering::Relaxed) { + Ok(x) => return Some(x), + Err(next_prev) => prev = next_prev, + } + } + return None; +}