Skip to content

Commit

Permalink
Move Roaring bench to benches (#14)
Browse files Browse the repository at this point in the history
Co-authored-by: Rushmore Mushambi <[email protected]>
  • Loading branch information
emmanuel-keller and rushmorem authored Mar 4, 2024
1 parent dc7ff51 commit 5e87d9e
Show file tree
Hide file tree
Showing 4 changed files with 100 additions and 77 deletions.
14 changes: 14 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -76,3 +76,17 @@ jobs:

- name: Test with all features
run: cargo test --all-features --workspace

bench:
name: Run benchmarks
runs-on: ubuntu-latest
steps:

- name: Install stable toolchain
uses: dtolnay/rust-toolchain@stable

- name: Checkout sources
uses: actions/checkout@v4

- name: Bench
run: cargo bench --all-features roaring_benchmark
5 changes: 5 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -34,3 +34,8 @@ uuid = { version = "1.4.1", optional = true }

[dev-dependencies]
rand = "0.8.5"
criterion = "0.5.1"

[[bench]]
name = "roaring"
harness = false
81 changes: 81 additions & 0 deletions benches/roaring.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
use bincode::Options;
use criterion::{criterion_group, criterion_main, Criterion};
use rand::random;
use roaring::RoaringTreemap;
use std::time::SystemTime;

fn bench_roaring_serialization_benchmark() {
let mut val = RoaringTreemap::new();
for i in 0..50_000_000 {
if random() {
val.insert(i);
}
}
// COLLECTING ELAPSED TIME AND SIZE

//Bincode with default options is: Slower and bigger than direct serialization
let bincode_elapsed;
let bincode_size;
{
let mut mem: Vec<u8> = vec![];
let t = SystemTime::now();
bincode::serialize_into(&mut mem, &val).unwrap();
bincode_elapsed = t.elapsed().unwrap();
bincode_size = mem.len();
}
//Bincode with options is: As fast, but still bigger than direct serialization
let bincode_options_elapsed;
let bincode_options_size;
{
let mut mem: Vec<u8> = vec![];
let t = SystemTime::now();
bincode::options()
.with_no_limit()
.with_little_endian()
.with_varint_encoding()
.reject_trailing_bytes()
.serialize_into(&mut mem, &val)
.unwrap();
bincode_options_elapsed = t.elapsed().unwrap();
bincode_options_size = mem.len();
}
//Direct serialization is : Faster and smaller
let direct_elapsed;
let direct_size;
{
let mut mem: Vec<u8> = vec![];
let t = SystemTime::now();
val.serialize_into(&mut mem).unwrap();
direct_elapsed = t.elapsed().unwrap();
direct_size = mem.len();
}

// ASSERTIONS
assert!(
direct_elapsed < bincode_elapsed,
"direct_elapsed({direct_elapsed:?}) < bincode_elapsed({bincode_elapsed:?})"
);
let rate = direct_elapsed.as_micros() as f32 / bincode_options_elapsed.as_micros() as f32;
assert!(rate < 1.1, "rate({rate}) < 1.1");
// Direct is smaller
assert!(
direct_size < bincode_size,
"direct_size({direct_size}) < bincode_size({bincode_size})"
);
assert!(
direct_size < bincode_options_size,
"direct_size({direct_size}) < bincode_options_size({bincode_options_size})"
);
}

fn roaring_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("roaring_benchmark");
group.sample_size(10);
group.bench_function("bench_roaring_serialization_benchmark", |b| {
b.iter(bench_roaring_serialization_benchmark)
});
group.finish();
}

criterion_group!(benches, roaring_benchmark);
criterion_main!(benches);
77 changes: 0 additions & 77 deletions src/implementations/roaring.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,7 @@ impl Revisioned for RoaringBitmap {
#[cfg(test)]
mod tests {
use super::Revisioned;
use bincode::Options;
use rand::random;
use roaring::{RoaringBitmap, RoaringTreemap};
use std::time::SystemTime;

#[test]
fn test_roaring_treemap() {
Expand All @@ -65,78 +62,4 @@ mod tests {
<RoaringBitmap as Revisioned>::deserialize_revisioned(&mut mem.as_slice()).unwrap();
assert_eq!(val, out);
}

#[test]
fn test_roaring_serialization_benchmark() {
let mut val = RoaringTreemap::new();
for i in 0..10000 {
if random() {
val.insert(i);
}
}
// COLLECTING ELAPSED TIME AND SIZE

//Bincode with default options is: Slower and bigger than direct serialization
let bincode_elapsed;
let bincode_size;
{
let mut mem: Vec<u8> = vec![];
let t = SystemTime::now();
bincode::serialize_into(&mut mem, &val).unwrap();
bincode_elapsed = t.elapsed().unwrap();
bincode_size = mem.len();
}
//Bincode with options is: As fast, but still bigger than direct serialization
let bincode_options_elapsed;
let bincode_options_size;
{
let mut mem: Vec<u8> = vec![];
let t = SystemTime::now();
bincode::options()
.with_no_limit()
.with_little_endian()
.with_varint_encoding()
.reject_trailing_bytes()
.serialize_into(&mut mem, &val)
.unwrap();
bincode_options_elapsed = t.elapsed().unwrap();
bincode_options_size = mem.len();
}
//Direct serialization is : Faster and smaller
let direct_elapsed;
let direct_size;
{
let mut mem: Vec<u8> = vec![];
let t = SystemTime::now();
val.serialize_into(&mut mem).unwrap();
direct_elapsed = t.elapsed().unwrap();
direct_size = mem.len();
}

// ASSERTIONS

println!("Bincode::default, Bincode::options, Direct, Ratio direct/bincode::options");
// Direct is faster
println!(
"Elapsed - {} > {} > {} - {}",
bincode_elapsed.as_micros(),
bincode_options_elapsed.as_micros(),
direct_elapsed.as_micros(),
direct_elapsed.as_micros() as f32 / bincode_options_elapsed.as_micros() as f32
);
assert!(direct_elapsed < bincode_elapsed);
assert!(
(direct_elapsed.as_micros() as f32 / bincode_options_elapsed.as_micros() as f32) < 1.1
);
// Direct is smaller
println!(
"Size: {} > {} > {} - {}",
bincode_size,
bincode_options_size,
direct_size,
direct_size as f32 / bincode_options_size as f32
);
assert!(direct_size < bincode_size);
assert!(direct_size < bincode_options_size);
}
}

0 comments on commit 5e87d9e

Please sign in to comment.