Skip to content

Commit

Permalink
Merge #457
Browse files Browse the repository at this point in the history
457: Reduced the complexity of the entity allocator r=torkleyy a=Flecheck

This pull request uses a vector to store deleted entities to reduce the complexity of entity allocation to O(1)*.
The maximum memory footprint is 2x the maximum current amount but it can only happen in the worst case and is I believe acceptable for such a use case.

The current approach has the pitfall of often reusing the same entities, hence potentially making their generation quite high. Introducing a VecDequeue instead of the current Vec for the EntityCache could potentially solve this problem (with a cost in memory), so if you believe the change would be worth it I can add it relatively easily. However, this might be unnecessary as 2³¹-1 is the maximum generation.

<!-- Reviewable:start -->
---
This change is [<img src="https://reviewable.io/review_button.svg" height="34" align="absmiddle" alt="Reviewable"/>](https://reviewable.io/reviews/slide-rs/specs/457)
<!-- Reviewable:end -->


Co-authored-by: Flecheck <[email protected]>
  • Loading branch information
bors[bot] and Flecheck committed Sep 8, 2018
2 parents 7b89608 + b6a6ae9 commit 9af0044
Show file tree
Hide file tree
Showing 3 changed files with 163 additions and 84 deletions.
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ features = ["common", "serde"]
cgmath = { version = "0.14", features = ["eders"] }
criterion = "0.2"
ron = "0.2"
rand = "0.3"
rand = "0.5"
serde_json = "1.0"
specs-derive = { path = "specs-derive", version = "0.2.0" }

Expand Down
55 changes: 55 additions & 0 deletions benches/world.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

#[macro_use]
extern crate criterion;
extern crate rand;
extern crate rayon;
extern crate specs;
extern crate test;
Expand Down Expand Up @@ -108,6 +109,58 @@ fn delete_later(b: &mut Bencher) {
});
}

fn create_after_delete(b: &mut Bencher) {
use rand::seq::sample_indices;
use rand::thread_rng;

let mut rng = thread_rng();
b.iter_with_setup(
|| {
let mut w = World::new();
let eids: Vec<_> = (0..1000).map(|_| w.create_entity().build()).collect();

sample_indices(&mut rng, 1000, 100)
.into_iter()
.map(|i| eids[i])
.for_each(|e| {
w.delete_entity(e)
.expect("Failed deleting entity in 'create after delete' setup");
});

w.maintain();

w
},
|mut w| {
for _ in 0..100 {
w.create_entity().build();
}
},
)
}

fn create_after_delete_extreme(b: &mut Bencher) {
b.iter_with_setup(
|| {
let mut w = World::new();

let eids: Vec<_> = (0..1000).map(|_| w.create_entity().build()).collect();

w.delete_entity(eids[0])
.expect("Failed deleting first entity in 'create after delete extreme' setup");
w.delete_entity(eids[999])
.expect("Failed deleting last entity in 'create after delete extreme' setup");
w.maintain();

w
},
|mut w| {
w.create_entity().build();
w.create_entity().build();
},
)
}

fn maintain_noop(b: &mut Bencher) {
let mut w = World::new();
b.iter(|| {
Expand Down Expand Up @@ -185,6 +238,8 @@ fn world_benchmarks(c: &mut Criterion) {
.bench_function("delete now", delete_now)
.bench_function("delete now with storage", delete_now_with_storage)
.bench_function("delete later", delete_later)
.bench_function("create after delete", create_after_delete)
.bench_function("create after delete extreme", create_after_delete_extreme)
.bench_function("maintain noop", maintain_noop)
.bench_function("maintain add later", maintain_add_later)
.bench_function("maintain delete later", maintain_delete_later)
Expand Down
190 changes: 107 additions & 83 deletions src/world/entity.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,8 @@ pub(crate) struct Allocator {
alive: BitSet,
raised: AtomicBitSet,
killed: AtomicBitSet,
start_from: AtomicUsize,
cache: EntityCache,
max_id: AtomicUsize,
}

impl Allocator {
Expand All @@ -65,21 +66,17 @@ impl Allocator {
}

self.alive.remove(entity.id());
self.raised.remove(entity.id());

// Since atomically created entities don't have a generation until merge
// is called, the first entity of a generation won't actually have a generation
// in the allocator. And it is fine for us to just ignore the entity since
// we clear `self.raised` before so it won't get allocated later.
if self.generations.len() > id {
self.generations[id].die();
}

if id < self.start_from.load(Ordering::Relaxed) {
self.start_from.store(id, Ordering::Relaxed);
self.update_generation_length(id);

if self.raised.remove(entity.id()) {
self.generations[id] = self.generations[id].raised();
}
self.generations[id].die();
}

self.cache.extend(delete.iter().map(|e| e.0));

Ok(())
}

Expand Down Expand Up @@ -122,66 +119,36 @@ impl Allocator {
Entity(id, gen)
}

/// Attempt to move the `start_from` value
pub fn update_start_from(&self, start_from: usize) {
loop {
let current = self.start_from.load(Ordering::Relaxed);

// if the current value is bigger then ours, we bail
if current >= start_from {
return;
}

if start_from
== self.start_from
.compare_and_swap(current, start_from, Ordering::Relaxed)
{
return;
}
}
}

/// Allocate a new entity
pub fn allocate_atomic(&self) -> Entity {
let idx = self.start_from.load(Ordering::Relaxed);
for i in idx.. {
if !self.alive.contains(i as Index) && !self.raised.add_atomic(i as Index) {
self.update_start_from(i + 1);

let gen = self.generations
.get(i as usize)
.map(|&gen| {
if gen.is_alive() {
gen
} else {
gen.raised()
}
})
.unwrap_or(Generation(1));

return Entity(i as Index, gen);
}
}
panic!("No entities left to allocate")
let id = self.cache.pop_atomic().unwrap_or_else(|| {
atomic_increment(&self.max_id).expect("No entity left to allocate") as Index
});

self.raised.add_atomic(id);
let gen = self
.generations
.get(id as usize)
.map(|&gen| if gen.is_alive() { gen } else { gen.raised() })
.unwrap_or(Generation(1));
Entity(id, gen)
}

/// Allocate a new entity
pub fn allocate(&mut self) -> Entity {
let idx = self.start_from.load(Ordering::Relaxed);
for i in idx.. {
if !self.raised.contains(i as Index) && !self.alive.add(i as Index) {
// this is safe since we have mutable access to everything!
self.start_from.store(i + 1, Ordering::Relaxed);

while self.generations.len() <= i as usize {
self.generations.push(Generation(0));
}
self.generations[i as usize] = self.generations[i as usize].raised();

return Entity(i as Index, self.generations[i as usize]);
}
}
panic!("No entities left to allocate")
let id = self.cache.pop().unwrap_or_else(|| {
let id = *self.max_id.get_mut();
*self.max_id.get_mut() = id.checked_add(1).expect("No entity left to allocate");
id as Index
});

self.update_generation_length(id as usize);

self.alive.add(id as Index);

self.generations[id as usize] = self.generations[id as usize].raised();

Entity(id as Index, self.generations[id as usize])
}

/// Maintains the allocated entities, mainly dealing with atomically
Expand All @@ -191,30 +158,32 @@ impl Allocator {

let mut deleted = vec![];

let max_id = *self.max_id.get_mut();
self.update_generation_length(max_id + 1);

for i in (&self.raised).iter() {
while self.generations.len() <= i as usize {
self.generations.push(Generation(0));
}
self.generations[i as usize] = self.generations[i as usize].raised();
self.alive.add(i);
}
self.raised.clear();

if let Some(lowest) = (&self.killed).iter().next() {
if lowest < self.start_from.load(Ordering::Relaxed) as Index {
self.start_from.store(lowest as usize, Ordering::Relaxed);
}
}

for i in (&self.killed).iter() {
self.alive.remove(i);
deleted.push(Entity(i, self.generations[i as usize]));
self.generations[i as usize].die();
}
self.killed.clear();

self.cache.extend(deleted.iter().map(|e| e.0));

deleted
}

fn update_generation_length(&mut self, i: usize) {
if self.generations.len() <= i as usize {
self.generations.resize(i as usize + 1, Generation(0));
}
}
}

/// An iterator for entity creation.
Expand Down Expand Up @@ -337,16 +306,11 @@ impl<'a> Join for &'a EntitiesRes {
}

unsafe fn get(v: &mut &'a EntitiesRes, idx: Index) -> Entity {
let gen = v.alloc
let gen = v
.alloc
.generations
.get(idx as usize)
.map(|&gen| {
if gen.is_alive() {
gen
} else {
gen.raised()
}
})
.map(|&gen| if gen.is_alive() { gen } else { gen.raised() })
.unwrap_or(Generation(1));
Entity(idx, gen)
}
Expand Down Expand Up @@ -432,3 +396,63 @@ impl Generation {
Generation(1 - self.0)
}
}

#[derive(Default, Debug)]
struct EntityCache {
cache: Vec<Index>,
len: AtomicUsize,
}

impl EntityCache {
fn pop_atomic(&self) -> Option<Index> {
atomic_decrement(&self.len).map(|x| self.cache[x - 1])
}

fn pop(&mut self) -> Option<Index> {
self.maintain();
let x = self.cache.pop();
*self.len.get_mut() = self.cache.len();
x
}

fn maintain(&mut self) {
self.cache.truncate(*(self.len.get_mut()));
}
}

impl Extend<Index> for EntityCache {
fn extend<T: IntoIterator<Item = Index>>(&mut self, iter: T) {
self.maintain();
self.cache.extend(iter);
*self.len.get_mut() = self.cache.len();
}
}

/// Increments `i` atomically without wrapping on overflow.
/// Resembles a `fetch_add(1, Ordering::Relaxed)` with
/// checked overflow, returning `None` instead.
fn atomic_increment(i: &AtomicUsize) -> Option<usize> {
use std::usize;
let mut prev = i.load(Ordering::Relaxed);
while prev != usize::MAX {
match i.compare_exchange_weak(prev, prev + 1, Ordering::Relaxed, Ordering::Relaxed) {
Ok(x) => return Some(x),
Err(next_prev) => prev = next_prev,
}
}
return None;
}

/// Increments `i` atomically without wrapping on overflow.
/// Resembles a `fetch_sub(1, Ordering::Relaxed)` with
/// checked underflow, returning `None` instead.
fn atomic_decrement(i: &AtomicUsize) -> Option<usize> {
let mut prev = i.load(Ordering::Relaxed);
while prev != 0 {
match i.compare_exchange_weak(prev, prev - 1, Ordering::Relaxed, Ordering::Relaxed) {
Ok(x) => return Some(x),
Err(next_prev) => prev = next_prev,
}
}
return None;
}

0 comments on commit 9af0044

Please sign in to comment.