Skip to content

Commit 3b43ea2

Browse files
starknet_committer_cli: reduce boilerplate without using Box<dyn Storage>
1 parent 8ce732f commit 3b43ea2

File tree

1 file changed

+71
-56
lines changed
  • crates/starknet_committer_cli/src

1 file changed

+71
-56
lines changed

crates/starknet_committer_cli/src/main.rs

Lines changed: 71 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ use clap::{Args, Parser, Subcommand};
77
use starknet_committer_cli::commands::run_storage_benchmark;
88
use starknet_patricia_storage::map_storage::{CachedStorage, MapStorage};
99
use starknet_patricia_storage::mdbx_storage::MdbxStorage;
10+
use starknet_patricia_storage::storage_trait::Storage;
1011
use tracing::info;
1112
use tracing::level_filters::LevelFilter;
1213
use tracing_subscriber::reload::Handle;
@@ -73,80 +74,94 @@ enum Command {
7374
StorageBenchmark(StorageArgs),
7475
}
7576

77+
/// Wrapper to reduce boilerplate and avoid having to use `Box<dyn Storage>`.
78+
/// Different invocations of this function are used with different concrete storage types.
79+
async fn run_storage_benchmark_wrapper<S: Storage>(
80+
StorageArgs {
81+
seed,
82+
n_iterations,
83+
n_diffs,
84+
storage_type,
85+
checkpoint_interval,
86+
data_path,
87+
output_dir,
88+
checkpoint_dir,
89+
..
90+
}: StorageArgs,
91+
storage: S,
92+
) {
93+
let output_dir = output_dir
94+
.clone()
95+
.unwrap_or_else(|| format!("{data_path}/{storage_type:?}/csvs/{n_iterations}"));
96+
let checkpoint_dir = checkpoint_dir
97+
.clone()
98+
.unwrap_or_else(|| format!("{data_path}/{storage_type:?}/checkpoints/{n_iterations}"));
99+
100+
let checkpoint_dir_arg = match storage_type {
101+
StorageType::Mdbx | StorageType::CachedMdbx => Some(checkpoint_dir.as_str()),
102+
StorageType::MapStorage => None,
103+
};
104+
105+
run_storage_benchmark(
106+
seed,
107+
n_iterations,
108+
n_diffs,
109+
&output_dir,
110+
checkpoint_dir_arg,
111+
storage,
112+
checkpoint_interval,
113+
)
114+
.await;
115+
}
116+
76117
pub async fn run_committer_cli(
77118
committer_command: CommitterCliCommand,
78119
log_filter_handle: Handle<LevelFilter, Registry>,
79120
) {
80121
info!("Starting committer-cli with command: \n{:?}", committer_command);
81122
match committer_command.command {
82-
Command::StorageBenchmark(StorageArgs {
83-
seed,
84-
n_iterations,
85-
n_diffs,
86-
storage_type,
87-
cache_size,
88-
checkpoint_interval,
89-
log_level,
90-
data_path,
91-
storage_path,
92-
output_dir,
93-
checkpoint_dir,
94-
}) => {
95-
modify_log_level(log_level, log_filter_handle);
96-
let output_dir = output_dir
97-
.unwrap_or_else(|| format!("{data_path}/{storage_type:?}/csvs/{n_iterations}"));
98-
let checkpoint_dir = checkpoint_dir.unwrap_or_else(|| {
99-
format!("{data_path}/{storage_type:?}/checkpoints/{n_iterations}")
100-
});
123+
Command::StorageBenchmark(storage_args) => {
124+
let StorageArgs {
125+
ref log_level,
126+
ref storage_path,
127+
ref data_path,
128+
ref storage_type,
129+
ref cache_size,
130+
..
131+
} = storage_args;
132+
133+
modify_log_level(log_level.clone(), log_filter_handle);
134+
135+
// Construct the storage path.
136+
// Create the path on filesystem only if we are using filesystem-based storage.
137+
let storage_path = storage_path
138+
.clone()
139+
.unwrap_or_else(|| format!("{data_path}/storage/{storage_type:?}"));
140+
match storage_type {
141+
StorageType::MapStorage => (),
142+
StorageType::Mdbx | StorageType::CachedMdbx => {
143+
fs::create_dir_all(&storage_path).expect("Failed to create storage directory.")
144+
}
145+
};
101146

147+
// Run the storage benchmark.
148+
// Explicitly create a different concrete storage type in each match arm to avoid
149+
// dynamic dispatch.
102150
match storage_type {
103151
StorageType::MapStorage => {
104152
let storage = MapStorage::default();
105-
run_storage_benchmark(
106-
seed,
107-
n_iterations,
108-
n_diffs,
109-
&output_dir,
110-
None,
111-
storage,
112-
checkpoint_interval,
113-
)
114-
.await;
153+
run_storage_benchmark_wrapper(storage_args, storage).await;
115154
}
116155
StorageType::Mdbx => {
117-
let storage_path = storage_path
118-
.unwrap_or_else(|| format!("{data_path}/storage/{storage_type:?}"));
119-
fs::create_dir_all(&storage_path).expect("Failed to create storage directory.");
120156
let storage = MdbxStorage::open(Path::new(&storage_path)).unwrap();
121-
run_storage_benchmark(
122-
seed,
123-
n_iterations,
124-
n_diffs,
125-
&output_dir,
126-
Some(&checkpoint_dir),
127-
storage,
128-
checkpoint_interval,
129-
)
130-
.await;
157+
run_storage_benchmark_wrapper(storage_args, storage).await;
131158
}
132159
StorageType::CachedMdbx => {
133-
let storage_path = storage_path
134-
.unwrap_or_else(|| format!("{data_path}/storage/{storage_type:?}"));
135-
fs::create_dir_all(&storage_path).expect("Failed to create storage directory.");
136160
let storage = CachedStorage::new(
137161
MdbxStorage::open(Path::new(&storage_path)).unwrap(),
138-
NonZeroUsize::new(cache_size).unwrap(),
162+
NonZeroUsize::new(*cache_size).unwrap(),
139163
);
140-
run_storage_benchmark(
141-
seed,
142-
n_iterations,
143-
n_diffs,
144-
&output_dir,
145-
Some(&checkpoint_dir),
146-
storage,
147-
checkpoint_interval,
148-
)
149-
.await;
164+
run_storage_benchmark_wrapper(storage_args, storage).await;
150165
}
151166
}
152167
}

0 commit comments

Comments
 (0)