diff --git a/.github/workflows/vss-integration.yml b/.github/workflows/vss-integration.yml index 8473ed413..043f91d8f 100644 --- a/.github/workflows/vss-integration.yml +++ b/.github/workflows/vss-integration.yml @@ -45,4 +45,4 @@ jobs: cd ldk-node export TEST_VSS_BASE_URL="http://localhost:8080/vss" RUSTFLAGS="--cfg vss_test" cargo test io::vss_store - RUSTFLAGS="--cfg vss_test" cargo test --test integration_tests_vss + RUSTFLAGS="--cfg vss_test" cargo test --test integration_tests_vss -- --nocapture diff --git a/Cargo.toml b/Cargo.toml index 544dfca08..9ed1d076c 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -65,7 +65,8 @@ serde = { version = "1.0.210", default-features = false, features = ["std", "der serde_json = { version = "1.0.128", default-features = false, features = ["std"] } log = { version = "0.4.22", default-features = false, features = ["std"]} -vss-client = "0.3" +#vss-client-ng = "0.3" +vss-client-ng = { git = "https://github.com/tnull/vss-client", rev = "7cf661b4ba45983ecad0f59e6d74050e2c84212f" } prost = { version = "0.11.6", default-features = false} [target.'cfg(windows)'.dependencies] diff --git a/src/builder.rs b/src/builder.rs index c0e39af7a..c3682c3bb 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -39,7 +39,7 @@ use lightning::util::persist::{ use lightning::util::ser::ReadableArgs; use lightning::util::sweep::OutputSweeper; use lightning_persister::fs_store::FilesystemStore; -use vss_client::headers::{FixedHeaders, LnurlAuthToJwtProvider, VssHeaderProvider}; +use vss_client_ng::headers::{FixedHeaders, LnurlAuthToJwtProvider, VssHeaderProvider}; use crate::chain::ChainSource; use crate::config::{ @@ -732,7 +732,11 @@ impl NodeBuilder { let vss_seed_bytes: [u8; 32] = vss_xprv.private_key.secret_bytes(); let vss_store = - VssStore::new(vss_url, store_id, vss_seed_bytes, header_provider, Arc::clone(&runtime)); + VssStore::new(vss_url, store_id, vss_seed_bytes, header_provider).map_err(|e| { + log_error!(logger, "Failed to setup store: {}", e); + BuildError::KVStoreSetupFailed + })?; + build_with_store_internal( config, self.chain_data_source_config.as_ref(), diff --git a/src/ffi/types.rs b/src/ffi/types.rs index 3c88a665f..e99cdc230 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -40,7 +40,7 @@ pub use lightning_liquidity::lsps1::msgs::{ }; pub use lightning_types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; pub use lightning_types::string::UntrustedString; -pub use vss_client::headers::{VssHeaderProvider, VssHeaderProviderError}; +pub use vss_client_ng::headers::{VssHeaderProvider, VssHeaderProviderError}; use crate::builder::sanitize_alias; pub use crate::config::{ diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 0e7d0872a..ac45c476d 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -13,41 +13,30 @@ use std::panic::RefUnwindSafe; use std::pin::Pin; use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; -use std::time::Duration; use bitcoin::hashes::{sha256, Hash, HashEngine, Hmac, HmacEngine}; use lightning::io::{self, Error, ErrorKind}; use lightning::util::persist::{KVStore, KVStoreSync}; use prost::Message; use rand::RngCore; -use vss_client::client::VssClient; -use vss_client::error::VssError; -use vss_client::headers::VssHeaderProvider; -use vss_client::types::{ +use vss_client_ng::client::VssClient; +use vss_client_ng::error::VssError; +use vss_client_ng::headers::VssHeaderProvider; +use vss_client_ng::types::{ DeleteObjectRequest, GetObjectRequest, KeyValue, ListKeyVersionsRequest, PutObjectRequest, Storable, }; -use vss_client::util::key_obfuscator::KeyObfuscator; -use vss_client::util::retry::{ - ExponentialBackoffRetryPolicy, FilteredRetryPolicy, JitteredRetryPolicy, - MaxAttemptsRetryPolicy, MaxTotalDelayRetryPolicy, RetryPolicy, -}; -use vss_client::util::storable_builder::{EntropySource, StorableBuilder}; +use vss_client_ng::util::key_obfuscator::KeyObfuscator; +use vss_client_ng::util::storable_builder::{EntropySource, StorableBuilder}; use crate::io::utils::check_namespace_key_validity; -use crate::runtime::Runtime; - -type CustomRetryPolicy = FilteredRetryPolicy< - JitteredRetryPolicy< - MaxTotalDelayRetryPolicy>>, - >, - Box bool + 'static + Send + Sync>, ->; // We set this to a small number of threads that would still allow to make some progress if one // would hit a blocking case const INTERNAL_RUNTIME_WORKERS: usize = 2; -const VSS_IO_TIMEOUT: Duration = Duration::from_secs(5); + +const HTTP_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(30); +const HTTP_RETRIES: u32 = 10; /// A [`KVStoreSync`] implementation that writes to and reads from a [VSS](https://github.com/lightningdevkit/vss-server/blob/main/README.md) backend. pub struct VssStore { @@ -55,7 +44,6 @@ pub struct VssStore { // Version counter to ensure that writes are applied in the correct order. It is assumed that read and list // operations aren't sensitive to the order of execution. next_version: AtomicU64, - runtime: Arc, // A VSS-internal runtime we use to avoid any deadlocks we could hit when waiting on a spawned // blocking task to finish while the blocked thread had acquired the reactor. In particular, // this works around a previously-hit case where a concurrent call to @@ -68,9 +56,9 @@ pub struct VssStore { impl VssStore { pub(crate) fn new( base_url: String, store_id: String, vss_seed: [u8; 32], - header_provider: Arc, runtime: Arc, - ) -> Self { - let inner = Arc::new(VssStoreInner::new(base_url, store_id, vss_seed, header_provider)); + header_provider: Arc, + ) -> io::Result { + let inner = Arc::new(VssStoreInner::new(base_url, store_id, vss_seed, header_provider)?); let next_version = AtomicU64::new(1); let internal_runtime = Some( tokio::runtime::Builder::new_multi_thread() @@ -86,7 +74,7 @@ impl VssStore { .unwrap(), ); - Self { inner, next_version, runtime, internal_runtime } + Ok(Self { inner, next_version, internal_runtime }) } // Same logic as for the obfuscated keys below, but just for locking, using the plaintext keys @@ -131,15 +119,7 @@ impl KVStoreSync for VssStore { let inner = Arc::clone(&self.inner); let fut = async move { inner.read_internal(primary_namespace, secondary_namespace, key).await }; - // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always - // times out. - let spawned_fut = internal_runtime.spawn(async move { - tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { - let msg = "VssStore::read timed out"; - Error::new(ErrorKind::Other, msg) - }) - }); - self.runtime.block_on(spawned_fut).expect("We should always finish")? + tokio::task::block_in_place(move || internal_runtime.block_on(fut)) } fn write( @@ -169,15 +149,7 @@ impl KVStoreSync for VssStore { ) .await }; - // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always - // times out. - let spawned_fut = internal_runtime.spawn(async move { - tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { - let msg = "VssStore::write timed out"; - Error::new(ErrorKind::Other, msg) - }) - }); - self.runtime.block_on(spawned_fut).expect("We should always finish")? + tokio::task::block_in_place(move || internal_runtime.block_on(fut)) } fn remove( @@ -206,15 +178,7 @@ impl KVStoreSync for VssStore { ) .await }; - // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always - // times out. - let spawned_fut = internal_runtime.spawn(async move { - tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { - let msg = "VssStore::remove timed out"; - Error::new(ErrorKind::Other, msg) - }) - }); - self.runtime.block_on(spawned_fut).expect("We should always finish")? + tokio::task::block_in_place(move || internal_runtime.block_on(fut)) } fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { @@ -227,15 +191,7 @@ impl KVStoreSync for VssStore { let secondary_namespace = secondary_namespace.to_string(); let inner = Arc::clone(&self.inner); let fut = async move { inner.list_internal(primary_namespace, secondary_namespace).await }; - // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always - // times out. - let spawned_fut = internal_runtime.spawn(async move { - tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { - let msg = "VssStore::list timed out"; - Error::new(ErrorKind::Other, msg) - }) - }); - self.runtime.block_on(spawned_fut).expect("We should always finish")? + tokio::task::block_in_place(move || internal_runtime.block_on(fut)) } } @@ -314,9 +270,9 @@ impl Drop for VssStore { } struct VssStoreInner { - client: VssClient, + client: VssClient, store_id: String, - storable_builder: StorableBuilder, + data_encryption_key: [u8; 32], key_obfuscator: KeyObfuscator, // Per-key locks that ensures that we don't have concurrent writes to the same namespace/key. // The lock also encapsulates the latest written version per key. @@ -327,27 +283,17 @@ impl VssStoreInner { pub(crate) fn new( base_url: String, store_id: String, vss_seed: [u8; 32], header_provider: Arc, - ) -> Self { + ) -> io::Result { let (data_encryption_key, obfuscation_master_key) = derive_data_encryption_and_obfuscation_keys(&vss_seed); let key_obfuscator = KeyObfuscator::new(obfuscation_master_key); - let storable_builder = StorableBuilder::new(data_encryption_key, RandEntropySource); - let retry_policy = ExponentialBackoffRetryPolicy::new(Duration::from_millis(10)) - .with_max_attempts(10) - .with_max_total_delay(Duration::from_secs(15)) - .with_max_jitter(Duration::from_millis(10)) - .skip_retry_on_error(Box::new(|e: &VssError| { - matches!( - e, - VssError::NoSuchKeyError(..) - | VssError::InvalidRequestError(..) - | VssError::ConflictError(..) - ) - }) as _); - - let client = VssClient::new_with_headers(base_url, retry_policy, header_provider); + let reqwest_client = build_client(&base_url).map_err(|_| { + let msg = format!("Failed to setup HTTP client: invalid URL"); + Error::new(ErrorKind::Other, msg) + })?; + let client = VssClient::from_client_and_headers(base_url, reqwest_client, header_provider); let locks = Mutex::new(HashMap::new()); - Self { client, store_id, storable_builder, key_obfuscator, locks } + Ok(Self { client, store_id, data_encryption_key, key_obfuscator, locks }) } fn get_inner_lock_ref(&self, locking_key: String) -> Arc> { @@ -413,9 +359,8 @@ impl VssStoreInner { ) -> io::Result> { check_namespace_key_validity(&primary_namespace, &secondary_namespace, Some(&key), "read")?; - let obfuscated_key = - self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); - let request = GetObjectRequest { store_id: self.store_id.clone(), key: obfuscated_key }; + let store_key = self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); + let request = GetObjectRequest { store_id: self.store_id.clone(), key: store_key.clone() }; let resp = self.client.get_object(&request).await.map_err(|e| { let msg = format!( "Failed to read from key {}/{}/{}: {}", @@ -437,7 +382,11 @@ impl VssStoreInner { Error::new(ErrorKind::Other, msg) })?; - Ok(self.storable_builder.deconstruct(storable)?.0) + let storable_builder = StorableBuilder::new(RandEntropySource); + let decrypted = storable_builder + .deconstruct(storable, &self.data_encryption_key, store_key.as_bytes())? + .0; + Ok(decrypted) } async fn write_internal( @@ -451,22 +400,27 @@ impl VssStoreInner { "write", )?; - self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { - let obfuscated_key = - self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); - let vss_version = -1; - let storable = self.storable_builder.build(buf, vss_version); - let request = PutObjectRequest { - store_id: self.store_id.clone(), - global_version: None, - transaction_items: vec![KeyValue { - key: obfuscated_key, - version: vss_version, - value: storable.encode_to_vec(), - }], - delete_items: vec![], - }; + let store_key = self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); + let vss_version = -1; + let storable_builder = StorableBuilder::new(RandEntropySource); + let storable = storable_builder.build( + buf.to_vec(), + vss_version, + &self.data_encryption_key, + store_key.as_bytes(), + ); + let request = PutObjectRequest { + store_id: self.store_id.clone(), + global_version: None, + transaction_items: vec![KeyValue { + key: store_key, + version: vss_version, + value: storable.encode_to_vec(), + }], + delete_items: vec![], + }; + self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { self.client.put_object(&request).await.map_err(|e| { let msg = format!( "Failed to write to key {}/{}/{}: {}", @@ -574,6 +528,32 @@ impl VssStoreInner { } } +fn build_client(base_url: &str) -> Result { + let url = reqwest::Url::parse(base_url).map_err(|_| ())?; + let host_str = url.host_str().ok_or(())?.to_string(); + let retry = reqwest::retry::for_host(host_str) + .max_retries_per_request(HTTP_RETRIES) + .classify_fn(|req_rep| match req_rep.status() { + // VSS uses INTERNAL_SERVER_ERROR when sending back error repsonses. These are + // currently still covered by our `RetryPolicy`, so we tell `reqwest` not to retry them. + Some(reqwest::StatusCode::INTERNAL_SERVER_ERROR) => req_rep.success(), + Some(reqwest::StatusCode::BAD_REQUEST) => req_rep.success(), + Some(reqwest::StatusCode::UNAUTHORIZED) => req_rep.success(), + Some(reqwest::StatusCode::NOT_FOUND) => req_rep.success(), + Some(reqwest::StatusCode::CONFLICT) => req_rep.success(), + Some(reqwest::StatusCode::OK) => req_rep.success(), + _ => req_rep.retryable(), + }); + let client = reqwest::Client::builder() + .timeout(HTTP_TIMEOUT) + .connect_timeout(HTTP_TIMEOUT) + .read_timeout(HTTP_TIMEOUT) + .retry(retry) + .build() + .unwrap(); + Ok(client) +} + fn derive_data_encryption_and_obfuscation_keys(vss_seed: &[u8; 32]) -> ([u8; 32], [u8; 32]) { let hkdf = |initial_key_material: &[u8], salt: &[u8]| -> [u8; 32] { let mut engine = HmacEngine::::new(salt); @@ -606,11 +586,10 @@ mod tests { use rand::distr::Alphanumeric; use rand::{rng, Rng, RngCore}; - use vss_client::headers::FixedHeaders; + use vss_client_ng::headers::FixedHeaders; use super::*; use crate::io::test_utils::do_read_write_remove_list_persist; - use crate::logger::Logger; #[test] fn vss_read_write_remove_list_persist() { @@ -620,10 +599,8 @@ mod tests { let mut vss_seed = [0u8; 32]; rng.fill_bytes(&mut vss_seed); let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); - let logger = Arc::new(Logger::new_log_facade()); - let runtime = Arc::new(Runtime::new(logger).unwrap()); let vss_store = - VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider, runtime); + VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider).unwrap(); do_read_write_remove_list_persist(&vss_store); } @@ -636,10 +613,8 @@ mod tests { let mut vss_seed = [0u8; 32]; rng.fill_bytes(&mut vss_seed); let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); - let logger = Arc::new(Logger::new_log_facade()); - let runtime = Arc::new(Runtime::new(logger).unwrap()); let vss_store = - VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider, runtime); + VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider).unwrap(); do_read_write_remove_list_persist(&vss_store); drop(vss_store) diff --git a/src/lib.rs b/src/lib.rs index 701a14dde..742470443 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -159,7 +159,7 @@ pub use types::{ }; pub use { bip39, bitcoin, lightning, lightning_invoice, lightning_liquidity, lightning_types, tokio, - vss_client, + vss_client_ng, }; use crate::scoring::setup_background_pathfinding_scores_sync; @@ -423,89 +423,89 @@ impl Node { } }); - // Regularly broadcast node announcements. - let bcast_cm = Arc::clone(&self.channel_manager); - let bcast_pm = Arc::clone(&self.peer_manager); - let bcast_config = Arc::clone(&self.config); - let bcast_store = Arc::clone(&self.kv_store); - let bcast_logger = Arc::clone(&self.logger); - let bcast_node_metrics = Arc::clone(&self.node_metrics); - let mut stop_bcast = self.stop_sender.subscribe(); - let node_alias = self.config.node_alias.clone(); - if may_announce_channel(&self.config).is_ok() { - self.runtime.spawn_cancellable_background_task(async move { - // We check every 30 secs whether our last broadcast is NODE_ANN_BCAST_INTERVAL away. - #[cfg(not(test))] - let mut interval = tokio::time::interval(Duration::from_secs(30)); - #[cfg(test)] - let mut interval = tokio::time::interval(Duration::from_secs(5)); - loop { - tokio::select! { - _ = stop_bcast.changed() => { - log_debug!( - bcast_logger, - "Stopping broadcasting node announcements.", - ); - return; - } - _ = interval.tick() => { - let skip_broadcast = match bcast_node_metrics.read().unwrap().latest_node_announcement_broadcast_timestamp { - Some(latest_bcast_time_secs) => { - // Skip if the time hasn't elapsed yet. - let next_bcast_unix_time = SystemTime::UNIX_EPOCH + Duration::from_secs(latest_bcast_time_secs) + NODE_ANN_BCAST_INTERVAL; - next_bcast_unix_time.elapsed().is_err() - } - None => { - // Don't skip if we haven't broadcasted before. - false - } - }; - - if skip_broadcast { - continue; - } - - if !bcast_cm.list_channels().iter().any(|chan| chan.is_announced && chan.is_channel_ready) { - // Skip if we don't have any public channels that are ready. - continue; - } - - if bcast_pm.list_peers().is_empty() { - // Skip if we don't have any connected peers to gossip to. - continue; - } - - let addresses = if let Some(announcement_addresses) = bcast_config.announcement_addresses.clone() { - announcement_addresses - } else if let Some(listening_addresses) = bcast_config.listening_addresses.clone() { - listening_addresses - } else { - debug_assert!(false, "We checked whether the node may announce, so listening addresses should always be set"); - continue; - }; - - if let Some(node_alias) = node_alias.as_ref() { - bcast_pm.broadcast_node_announcement([0; 3], node_alias.0, addresses); - - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = bcast_node_metrics.write().unwrap(); - locked_node_metrics.latest_node_announcement_broadcast_timestamp = unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, Arc::clone(&bcast_store), Arc::clone(&bcast_logger)) - .unwrap_or_else(|e| { - log_error!(bcast_logger, "Persistence failed: {}", e); - }); - } - } else { - debug_assert!(false, "We checked whether the node may announce, so node alias should always be set"); - continue - } - } - } - } - }); - } + //// Regularly broadcast node announcements. + //let bcast_cm = Arc::clone(&self.channel_manager); + //let bcast_pm = Arc::clone(&self.peer_manager); + //let bcast_config = Arc::clone(&self.config); + //let bcast_store = Arc::clone(&self.kv_store); + //let bcast_logger = Arc::clone(&self.logger); + //let bcast_node_metrics = Arc::clone(&self.node_metrics); + //let mut stop_bcast = self.stop_sender.subscribe(); + //let node_alias = self.config.node_alias.clone(); + //if may_announce_channel(&self.config).is_ok() { + // self.runtime.spawn_cancellable_background_task(async move { + // // We check every 30 secs whether our last broadcast is NODE_ANN_BCAST_INTERVAL away. + // #[cfg(not(test))] + // let mut interval = tokio::time::interval(Duration::from_secs(30)); + // #[cfg(test)] + // let mut interval = tokio::time::interval(Duration::from_secs(5)); + // loop { + // tokio::select! { + // _ = stop_bcast.changed() => { + // log_debug!( + // bcast_logger, + // "Stopping broadcasting node announcements.", + // ); + // return; + // } + // _ = interval.tick() => { + // let skip_broadcast = match bcast_node_metrics.read().unwrap().latest_node_announcement_broadcast_timestamp { + // Some(latest_bcast_time_secs) => { + // // Skip if the time hasn't elapsed yet. + // let next_bcast_unix_time = SystemTime::UNIX_EPOCH + Duration::from_secs(latest_bcast_time_secs) + NODE_ANN_BCAST_INTERVAL; + // next_bcast_unix_time.elapsed().is_err() + // } + // None => { + // // Don't skip if we haven't broadcasted before. + // false + // } + // }; + + // if skip_broadcast { + // continue; + // } + + // if !bcast_cm.list_channels().iter().any(|chan| chan.is_announced && chan.is_channel_ready) { + // // Skip if we don't have any public channels that are ready. + // continue; + // } + + // if bcast_pm.list_peers().is_empty() { + // // Skip if we don't have any connected peers to gossip to. + // continue; + // } + + // let addresses = if let Some(announcement_addresses) = bcast_config.announcement_addresses.clone() { + // announcement_addresses + // } else if let Some(listening_addresses) = bcast_config.listening_addresses.clone() { + // listening_addresses + // } else { + // debug_assert!(false, "We checked whether the node may announce, so listening addresses should always be set"); + // continue; + // }; + + // //if let Some(node_alias) = node_alias.as_ref() { + // // bcast_pm.broadcast_node_announcement([0; 3], node_alias.0, addresses); + + // // let unix_time_secs_opt = + // // SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + // // { + // // let mut locked_node_metrics = bcast_node_metrics.write().unwrap(); + // // locked_node_metrics.latest_node_announcement_broadcast_timestamp = unix_time_secs_opt; + // // write_node_metrics(&*locked_node_metrics, Arc::clone(&bcast_store), Arc::clone(&bcast_logger)) + // // .unwrap_or_else(|e| { + // // log_error!(bcast_logger, "Persistence failed: {}", e); + // // }); + // // } + // //} else { + // // debug_assert!(false, "We checked whether the node may announce, so node alias should always be set"); + // // continue + // //} + // } + // } + // } + // }); + //} let stop_tx_bcast = self.stop_sender.subscribe(); let chain_source = Arc::clone(&self.chain_source); diff --git a/tests/integration_tests_vss.rs b/tests/integration_tests_vss.rs index 93f167dae..a9c397520 100644 --- a/tests/integration_tests_vss.rs +++ b/tests/integration_tests_vss.rs @@ -16,42 +16,44 @@ use ldk_node::Builder; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn channel_full_cycle_with_vss_store() { let (bitcoind, electrsd) = common::setup_bitcoind_and_electrsd(); - println!("== Node A =="); - let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); - let config_a = common::random_config(true); - let mut builder_a = Builder::from_config(config_a.node_config); - builder_a.set_chain_source_esplora(esplora_url.clone(), None); - let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); - let node_a = builder_a - .build_with_vss_store_and_fixed_headers( - vss_base_url.clone(), - "node_1_store".to_string(), - HashMap::new(), - ) - .unwrap(); - node_a.start().unwrap(); + for i in 1..100 { + println!("Run {}: == Node A ==", i); + let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); + let config_a = common::random_config(true); + let mut builder_a = Builder::from_config(config_a.node_config); + builder_a.set_chain_source_esplora(esplora_url.clone(), None); + let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); + let node_a = builder_a + .build_with_vss_store_and_fixed_headers( + vss_base_url.clone(), + format!("node_{}_1_store", i), + HashMap::new(), + ) + .unwrap(); + node_a.start().unwrap(); - println!("\n== Node B =="); - let config_b = common::random_config(true); - let mut builder_b = Builder::from_config(config_b.node_config); - builder_b.set_chain_source_esplora(esplora_url.clone(), None); - let node_b = builder_b - .build_with_vss_store_and_fixed_headers( - vss_base_url, - "node_2_store".to_string(), - HashMap::new(), - ) - .unwrap(); - node_b.start().unwrap(); + println!("\nRun {}: == Node B ==", i); + let config_b = common::random_config(true); + let mut builder_b = Builder::from_config(config_b.node_config); + builder_b.set_chain_source_esplora(esplora_url.clone(), None); + let node_b = builder_b + .build_with_vss_store_and_fixed_headers( + vss_base_url, + format!("node_{}_2_store", i), + HashMap::new(), + ) + .unwrap(); + node_b.start().unwrap(); - common::do_channel_full_cycle( - node_a, - node_b, - &bitcoind.client, - &electrsd.client, - false, - true, - false, - ) - .await; + common::do_channel_full_cycle( + node_a, + node_b, + &bitcoind.client, + &electrsd.client, + false, + true, + false, + ) + .await; + } }