From 45bdf729f6f572119e9ab9a6d8a5eb974fbcea3b Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Mon, 10 Mar 2025 11:47:03 +0700 Subject: [PATCH 01/58] test: add realloc ix to flexi counter --- .../programs/flexi-counter/src/instruction.rs | 43 ++++++++++++++++ .../programs/flexi-counter/src/processor.rs | 49 +++++++++++++++++++ 2 files changed, 92 insertions(+) diff --git a/test-integration/programs/flexi-counter/src/instruction.rs b/test-integration/programs/flexi-counter/src/instruction.rs index c0f1cebd..7e061ea3 100644 --- a/test-integration/programs/flexi-counter/src/instruction.rs +++ b/test-integration/programs/flexi-counter/src/instruction.rs @@ -17,6 +17,8 @@ pub struct DelegateArgs { pub commit_frequency_ms: u32, } +pub const MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE: u16 = 10_240; + /// The counter has both mul and add instructions in order to facilitate tests where /// order matters. For example in the case of the following operations: /// +4, *2 @@ -32,6 +34,25 @@ pub enum FlexiCounterInstruction { /// 2. `[]` The system program account. Init { label: String, bump: u8 }, + /// Increases the size of the FlexiCounter to reach the given bytes. + /// Max increase is [MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE] per instruction + /// which means this instruction needs to be called multiple times to reach + /// the desired size. + /// + /// NOTE: that the account needs to be funded for the full desired account size + /// via an airdrop after [FlexiCounterInstruction::Init]. + /// + /// Accounts: + /// 0. `[signer]` The payer that created and is resizing the account. + /// 1. `[write]` The counter PDA account whose size we are increasing. + /// 2. `[]` The system program account. + Realloc { + /// The target size we try to resize to. + bytes: u64, + /// The count of invocations of realloc that this instruction represents. + invocation_count: u16, + }, + /// Updates the FlexiCounter by adding the count to it. /// /// Accounts: @@ -94,6 +115,28 @@ pub fn create_init_ix(payer: Pubkey, label: String) -> Instruction { ) } +pub fn create_realloc_ix( + payer: Pubkey, + bytes: u64, + invocation_count: u16, +) -> Instruction { + let program_id = &crate::id(); + let (pda, _) = FlexiCounter::pda(&payer); + let accounts = vec![ + AccountMeta::new(payer, true), + AccountMeta::new(pda, false), + AccountMeta::new_readonly(system_program::id(), false), + ]; + Instruction::new_with_borsh( + *program_id, + &FlexiCounterInstruction::Realloc { + bytes, + invocation_count, + }, + accounts, + ) +} + pub fn create_add_ix(payer: Pubkey, count: u8) -> Instruction { let program_id = &crate::id(); let (pda, _) = FlexiCounter::pda(&payer); diff --git a/test-integration/programs/flexi-counter/src/processor.rs b/test-integration/programs/flexi-counter/src/processor.rs index 1807ba7e..9d43b371 100644 --- a/test-integration/programs/flexi-counter/src/processor.rs +++ b/test-integration/programs/flexi-counter/src/processor.rs @@ -17,6 +17,7 @@ use solana_program::{ sysvar::Sysvar, }; +use crate::instruction::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE; use crate::{ instruction::{DelegateArgs, FlexiCounterInstruction}, state::FlexiCounter, @@ -42,6 +43,10 @@ pub fn process( use FlexiCounterInstruction::*; match ix { Init { label, bump } => process_init(program_id, accounts, label, bump), + Realloc { + bytes, + invocation_count, + } => process_realloc(accounts, bytes, invocation_count), Add { count } => process_add(accounts, count), Mul { multiplier } => process_mul(accounts, multiplier), Delegate(args) => process_delegate(accounts, &args), @@ -98,6 +103,50 @@ fn process_init( Ok(()) } +fn process_realloc( + accounts: &[AccountInfo], + bytes: u64, + invocation_count: u16, +) -> ProgramResult { + msg!("Instruction: Realloc {}", invocation_count); + + let account_info_iter = &mut accounts.iter(); + let payer_info = next_account_info(account_info_iter)?; + let counter_pda_info = next_account_info(account_info_iter)?; + + let (counter_pda, _) = FlexiCounter::pda(payer_info.key); + assert_keys_equal(counter_pda_info.key, &counter_pda, || { + format!( + "Invalid Counter PDA {}, should be {}", + counter_pda_info.key, counter_pda + ) + })?; + + let current_size = counter_pda_info.data.borrow().len() as u64; + if current_size >= bytes { + msg!( + "Counter account already has {} bytes, no need to realloc", + counter_pda_info.data.borrow().len() + ); + return Ok(()); + } + + let next_alloc_size = std::cmp::min( + bytes, + current_size + MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE as u64, + ); + + msg!( + "Allocating from {} to {} of desired {} bytes.", + current_size, + next_alloc_size, + bytes + ); + + counter_pda_info.realloc(next_alloc_size as usize, true)?; + Ok(()) +} + fn process_add(accounts: &[AccountInfo], count: u8) -> ProgramResult { msg!("Add {}", count); From 2e618824d32b99a58b753a51a0bd52f5a05cb56d Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Wed, 7 May 2025 11:14:00 +0700 Subject: [PATCH 02/58] chore: resort workspace depencencies --- Cargo.toml | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 5f33534c..b1de0828 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -58,15 +58,12 @@ bincode = "1.3.3" bs58 = "0.4.0" byteorder = "1.5.0" cargo-lock = "10.0.0" -expiring-hashmap = { path = "./utils/expiring-hashmap" } conjunto-transwise = { git = "https://github.com/magicblock-labs/conjunto.git", rev = "bf82b45" } console-subscriber = "0.2.0" -isocountry = "0.3.2" crossbeam-channel = "0.5.11" enum-iterator = "1.5.0" env_logger = "0.11.2" -magic-domain-program = { git = "https://github.com/magicblock-labs/magic-domain-program.git", rev = "eba7644", default-features = false} -magicblock-delegation-program = { git = "https://github.com/magicblock-labs/delegation-program.git", rev = "4af7f1c" } +expiring-hashmap = { path = "./utils/expiring-hashmap" } fd-lock = "4.0.2" fs_extra = "1.3.0" futures-util = "0.3.30" @@ -76,6 +73,7 @@ hostname = "0.4.0" http-body-util = "0.1.2" hyper = "1.4.1" hyper-util = "0.1.9" +isocountry = "0.3.2" itertools = "0.14" jsonrpc-core = "18.0.0" jsonrpc-core-client = "18.0.0" @@ -87,21 +85,7 @@ lazy_static = "1.4.0" libc = "0.2.153" libloading = "0.7.4" log = "0.4.20" -num_cpus = "1.16.0" -num-derive = "0.4" -num-format = "0.4.4" -num-traits = "0.2" -paste = "1.0" -prometheus = "0.13.4" -# Needs to match https://crates.io/crates/solana-storage-bigtable/2.1.13/dependencies -prost = "0.11.9" -rand = "0.8.5" -rayon = "1.10.0" -rustc_version = "0.4" -semver = "1.0.22" -serde = "1.0.217" -serde_derive = "1.0" -serde_json = "1.0" +magic-domain-program = { git = "https://github.com/magicblock-labs/magic-domain-program.git", rev = "eba7644", default-features = false } magicblock-account-cloner = { path = "./magicblock-account-cloner" } magicblock-account-dumper = { path = "./magicblock-account-dumper" } magicblock-account-fetcher = { path = "./magicblock-account-fetcher" } @@ -113,6 +97,7 @@ magicblock-api = { path = "./magicblock-api" } magicblock-bank = { path = "./magicblock-bank" } magicblock-config = { path = "./magicblock-config" } magicblock-core = { path = "./magicblock-core" } +magicblock-delegation-program = { git = "https://github.com/magicblock-labs/delegation-program.git", rev = "4af7f1c" } magicblock-geyser-plugin = { path = "./magicblock-geyser-plugin" } magicblock-ledger = { path = "./magicblock-ledger" } magicblock-metrics = { path = "./magicblock-metrics" } @@ -125,7 +110,22 @@ magicblock-rpc = { path = "./magicblock-rpc" } magicblock-tokens = { path = "./magicblock-tokens" } magicblock-transaction-status = { path = "./magicblock-transaction-status" } magicblock-version = { path = "./magicblock-version" } +num-derive = "0.4" +num-format = "0.4.4" +num-traits = "0.2" +num_cpus = "1.16.0" +paste = "1.0" +prometheus = "0.13.4" +# Needs to match https://crates.io/crates/solana-storage-bigtable/2.1.13/dependencies +prost = "0.11.9" protobuf-src = "1.1" +rand = "0.8.5" +rayon = "1.10.0" +rustc_version = "0.4" +semver = "1.0.22" +serde = "1.0.217" +serde_derive = "1.0" +serde_json = "1.0" solana-account = { git = "https://github.com/magicblock-labs/solana-account.git", rev = "7bdfefc" } solana-accounts-db = { version = "2.2" } solana-account-decoder = { version = "2.2" } @@ -150,7 +150,7 @@ solana-rpc = "2.2" solana-rpc-client = { version = "2.2" } solana-rpc-client-api = { version = "2.2" } solana-sdk = { version = "2.2" } -solana-svm = { version = "2.2", features = [ "dev-context-only-utils" ] } +solana-svm = { version = "2.2", features = ["dev-context-only-utils"] } solana-svm-transaction = { version = "2.2" } solana-storage-proto = { path = "storage-proto" } solana-system-program = { version = "2.2" } @@ -164,11 +164,11 @@ tempfile = "3.10.1" test-tools = { path = "./test-tools" } test-tools-core = { path = "./test-tools-core" } thiserror = "1.0.57" -toml = "0.8.13" # Update solana-tokio patch below when updating this version tokio = "1.0" tokio-stream = "0.1.15" tokio-util = "0.7.10" +toml = "0.8.13" # Tonic version 11 conflicts with lower level deps of solana and 0.9.x is the last # version that allows prost 0.11.x to be used tonic = "0.9.2" @@ -181,5 +181,5 @@ vergen = "8.3.1" # some solana dependencies have solana-storage-proto as dependency # we need to patch them with our version, because they use protobuf-src v1.1.0 # and we use protobuf-src v2.1.1. Otherwise compilation fails -solana-storage-proto = { path = "./storage-proto" } solana-account = { git = "https://github.com/magicblock-labs/solana-account.git", rev = "7bdfefc" } +solana-storage-proto = { path = "./storage-proto" } From 6806acf2a049dcdf94bbd5d1ae3df54c515f64f8 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Wed, 7 May 2025 11:32:53 +0700 Subject: [PATCH 03/58] chore: retire old remote scheduled commits processor and add new one --- Cargo.lock | 144 ++++++++- Cargo.toml | 1 + magicblock-accounts/Cargo.toml | 1 + magicblock-accounts/src/accounts_manager.rs | 17 +- magicblock-accounts/src/lib.rs | 1 + .../old_remote_scheduled_commits_processor.rs | 300 +++++++++++++++++ .../src/remote_scheduled_commits_processor.rs | 301 +----------------- 7 files changed, 465 insertions(+), 300 deletions(-) create mode 100644 magicblock-accounts/src/old_remote_scheduled_commits_processor.rs diff --git a/Cargo.lock b/Cargo.lock index 3ee589a4..fa5a174c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -747,7 +747,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "115e54d64eb62cdebad391c19efc9dce4981c690c85a33a12199d99bb9546fee" dependencies = [ "borsh-derive 0.10.4", - "hashbrown 0.12.3", + "hashbrown 0.13.2", ] [[package]] @@ -1185,7 +1185,7 @@ dependencies = [ "conjunto-addresses", "conjunto-core", "conjunto-providers", - "magicblock-delegation-program", + "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?rev=4af7f1c)", "serde", "solana-rpc-client", "solana-rpc-client-api", @@ -1859,6 +1859,18 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" +[[package]] +name = "fallible-iterator" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" + +[[package]] +name = "fallible-streaming-iterator" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" + [[package]] name = "fast-math" version = "0.1.1" @@ -1955,6 +1967,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + [[package]] name = "foreign-types" version = "0.3.2" @@ -2350,6 +2368,18 @@ name = "hashbrown" version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +dependencies = [ + "foldhash", +] + +[[package]] +name = "hashlink" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" +dependencies = [ + "hashbrown 0.15.2", +] [[package]] name = "hdrhistogram" @@ -3359,6 +3389,17 @@ dependencies = [ "libsecp256k1-core", ] +[[package]] +name = "libsqlite3-sys" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbb8270bb4060bd76c6e96f20c52d80620f1d82a3470885694e41e0f81ef6fe7" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "libz-sys" version = "1.1.20" @@ -3559,8 +3600,9 @@ dependencies = [ "magicblock-account-updates", "magicblock-accounts-api", "magicblock-bank", + "magicblock-committor-service", "magicblock-core", - "magicblock-delegation-program", + "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?rev=4af7f1c)", "magicblock-metrics", "magicblock-mutator", "magicblock-processor", @@ -3687,6 +3729,44 @@ dependencies = [ "test-tools-core", ] +[[package]] +name = "magicblock-committor-program" +version = "0.0.0" +dependencies = [ + "borsh 1.5.5", + "borsh-derive 1.5.5", + "log", + "paste", + "solana-account", + "solana-program", + "solana-pubkey", + "thiserror 2.0.12", +] + +[[package]] +name = "magicblock-committor-service" +version = "0.0.0" +dependencies = [ + "base64 0.22.1", + "bincode", + "borsh 1.5.5", + "log", + "magicblock-committor-program", + "magicblock-delegation-program 1.0.0", + "magicblock-rpc-client", + "magicblock-table-mania", + "rusqlite", + "solana-account", + "solana-pubkey", + "solana-rpc-client", + "solana-rpc-client-api", + "solana-sdk", + "solana-transaction-status-client-types", + "thiserror 2.0.12", + "tokio", + "tokio-util 0.7.13", +] + [[package]] name = "magicblock-config" version = "0.1.1" @@ -3710,6 +3790,21 @@ dependencies = [ "solana-sdk", ] +[[package]] +name = "magicblock-delegation-program" +version = "1.0.0" +dependencies = [ + "bincode", + "borsh 1.5.5", + "bytemuck", + "num_enum", + "paste", + "solana-curve25519", + "solana-program", + "solana-security-txt", + "thiserror 1.0.69", +] + [[package]] name = "magicblock-delegation-program" version = "1.0.0" @@ -3924,6 +4019,35 @@ dependencies = [ "tokio", ] +[[package]] +name = "magicblock-rpc-client" +version = "0.0.0" +dependencies = [ + "log", + "solana-rpc-client", + "solana-rpc-client-api", + "solana-sdk", + "solana-transaction-status-client-types", + "thiserror 2.0.12", + "tokio", +] + +[[package]] +name = "magicblock-table-mania" +version = "0.0.0" +dependencies = [ + "ed25519-dalek", + "log", + "magicblock-rpc-client", + "sha3", + "solana-pubkey", + "solana-rpc-client", + "solana-rpc-client-api", + "solana-sdk", + "thiserror 2.0.12", + "tokio", +] + [[package]] name = "magicblock-tokens" version = "0.1.1" @@ -5420,6 +5544,20 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "rusqlite" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37e34486da88d8e051c7c0e23c3f15fd806ea8546260aa2fec247e97242ec143" +dependencies = [ + "bitflags 2.9.0", + "fallible-iterator", + "fallible-streaming-iterator", + "hashlink", + "libsqlite3-sys", + "smallvec", +] + [[package]] name = "rustc-demangle" version = "0.1.24" diff --git a/Cargo.toml b/Cargo.toml index b1de0828..cf4c8db3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -95,6 +95,7 @@ magicblock-accounts-api = { path = "./magicblock-accounts-api" } magicblock-accounts-db = { path = "./magicblock-accounts-db" } magicblock-api = { path = "./magicblock-api" } magicblock-bank = { path = "./magicblock-bank" } +magicblock-committor-service = { path = "../comittor/magicblock-committor-service" } magicblock-config = { path = "./magicblock-config" } magicblock-core = { path = "./magicblock-core" } magicblock-delegation-program = { git = "https://github.com/magicblock-labs/delegation-program.git", rev = "4af7f1c" } diff --git a/magicblock-accounts/Cargo.toml b/magicblock-accounts/Cargo.toml index b4fe0a0d..f91cdf0a 100644 --- a/magicblock-accounts/Cargo.toml +++ b/magicblock-accounts/Cargo.toml @@ -19,6 +19,7 @@ magicblock-account-dumper = { workspace = true } magicblock-account-cloner = { workspace = true } magicblock-accounts-api = { workspace = true } magicblock-bank = { workspace = true } +magicblock-committor-service = { workspace = true } magicblock-core = { workspace = true } magicblock-metrics = { workspace = true } magicblock-mutator = { workspace = true } diff --git a/magicblock-accounts/src/accounts_manager.rs b/magicblock-accounts/src/accounts_manager.rs index 5cd83652..93d75ec1 100644 --- a/magicblock-accounts/src/accounts_manager.rs +++ b/magicblock-accounts/src/accounts_manager.rs @@ -13,8 +13,8 @@ use solana_sdk::{commitment_config::CommitmentConfig, signature::Keypair}; use crate::{ config::AccountsConfig, errors::AccountsResult, + old_remote_scheduled_commits_processor::OldRemoteScheduledCommitsProcessor, remote_account_committer::RemoteAccountCommitter, - remote_scheduled_commits_processor::RemoteScheduledCommitsProcessor, utils::try_rpc_cluster_from_cluster, ExternalAccountsManager, }; @@ -24,7 +24,7 @@ pub type AccountsManager = ExternalAccountsManager< RemoteAccountCommitter, TransactionAccountsExtractorImpl, TransactionAccountsValidatorImpl, - RemoteScheduledCommitsProcessor, + OldRemoteScheduledCommitsProcessor, >; impl AccountsManager { @@ -49,12 +49,13 @@ impl AccountsManager { config.commit_compute_unit_price, ); - let scheduled_commits_processor = RemoteScheduledCommitsProcessor::new( - remote_cluster, - bank.clone(), - cloned_accounts.clone(), - transaction_status_sender.clone(), - ); + let scheduled_commits_processor = + OldRemoteScheduledCommitsProcessor::new( + remote_cluster, + bank.clone(), + cloned_accounts.clone(), + transaction_status_sender.clone(), + ); Ok(Self { internal_account_provider, diff --git a/magicblock-accounts/src/lib.rs b/magicblock-accounts/src/lib.rs index ec28920c..a4e48da4 100644 --- a/magicblock-accounts/src/lib.rs +++ b/magicblock-accounts/src/lib.rs @@ -2,6 +2,7 @@ mod accounts_manager; mod config; pub mod errors; mod external_accounts_manager; +mod old_remote_scheduled_commits_processor; mod remote_account_committer; mod remote_scheduled_commits_processor; mod traits; diff --git a/magicblock-accounts/src/old_remote_scheduled_commits_processor.rs b/magicblock-accounts/src/old_remote_scheduled_commits_processor.rs new file mode 100644 index 00000000..d42eb903 --- /dev/null +++ b/magicblock-accounts/src/old_remote_scheduled_commits_processor.rs @@ -0,0 +1,300 @@ +use std::{collections::HashSet, sync::Arc}; + +use async_trait::async_trait; +use conjunto_transwise::AccountChainSnapshot; +use log::*; +use magicblock_account_cloner::{ + AccountClonerOutput, AccountClonerOutput::Cloned, CloneOutputMap, +}; +use magicblock_accounts_api::InternalAccountProvider; +use magicblock_bank::bank::Bank; +use magicblock_core::debug_panic; +use magicblock_metrics::metrics; +use magicblock_mutator::Cluster; +use magicblock_processor::execute_transaction::execute_legacy_transaction; +use magicblock_program::{ + register_scheduled_commit_sent, FeePayerAccount, SentCommit, + TransactionScheduler, +}; +use magicblock_transaction_status::TransactionStatusSender; +use solana_sdk::{pubkey::Pubkey, signature::Signature}; + +use crate::{ + errors::{AccountsError, AccountsResult}, + remote_account_committer::update_account_commit_metrics, + AccountCommittee, AccountCommitter, ScheduledCommitsProcessor, + SendableCommitAccountsPayload, +}; + +pub struct OldRemoteScheduledCommitsProcessor { + #[allow(unused)] + cluster: Cluster, + bank: Arc, + transaction_status_sender: Option, + transaction_scheduler: TransactionScheduler, + cloned_accounts: CloneOutputMap, +} + +#[async_trait] +impl ScheduledCommitsProcessor for OldRemoteScheduledCommitsProcessor { + async fn process( + &self, + committer: &Arc, + account_provider: &IAP, + ) -> AccountsResult<()> + where + AC: AccountCommitter, + IAP: InternalAccountProvider, + { + let scheduled_commits = + self.transaction_scheduler.take_scheduled_commits(); + + if scheduled_commits.is_empty() { + return Ok(()); + } + + let mut sendable_payloads_queue = vec![]; + for commit in scheduled_commits { + info!("Processing commit: {:?}", commit); + + // Determine which accounts are available and can be committed + let mut committees = vec![]; + let all_pubkeys: HashSet = HashSet::from_iter( + commit + .accounts + .iter() + .map(|ca| ca.pubkey) + .collect::>(), + ); + let mut feepayers = HashSet::new(); + + for committed_account in commit.accounts { + let mut commitment_pubkey = committed_account.pubkey; + let mut commitment_pubkey_owner = committed_account.owner; + if let Some(Cloned { + account_chain_snapshot, + .. + }) = Self::fetch_cloned_account( + &committed_account.pubkey, + &self.cloned_accounts, + ) { + // If the account is a FeePayer, we committed the mapped delegated account + if account_chain_snapshot.chain_state.is_feepayer() { + commitment_pubkey = + AccountChainSnapshot::ephemeral_balance_pda( + &committed_account.pubkey, + ); + commitment_pubkey_owner = + AccountChainSnapshot::ephemeral_balance_pda_owner(); + feepayers.insert(FeePayerAccount { + pubkey: committed_account.pubkey, + delegated_pda: commitment_pubkey, + }); + } else if account_chain_snapshot + .chain_state + .is_undelegated() + { + error!("Scheduled commit account '{}' is undelegated. This is not supported.", committed_account.pubkey); + } + } + + match account_provider.get_account(&committed_account.pubkey) { + Some(account_data) => { + committees.push(AccountCommittee { + pubkey: commitment_pubkey, + owner: commitment_pubkey_owner, + account_data, + slot: commit.slot, + undelegation_requested: commit.request_undelegation, + }); + } + None => { + error!( + "Scheduled commmit account '{}' not found. It must have gotten undelegated and removed since it was scheduled.", + committed_account.pubkey + ); + } + } + } + + let payloads = vec![ + committer + .create_commit_accounts_transaction(committees) + .await?, + ]; + + // Determine which payloads are a noop since all accounts are up to date + // and which require a commit to chain + let mut included_pubkeys = HashSet::new(); + let sendable_payloads = payloads + .into_iter() + .filter_map(|payload| { + if let Some(transaction) = payload.transaction { + included_pubkeys.extend( + payload + .committees + .iter() + .map(|(pubkey, _)| *pubkey), + ); + Some(SendableCommitAccountsPayload { + transaction, + committees: payload.committees, + }) + } else { + None + } + }) + .collect::>(); + + // Tally up the pubkeys that will not be committed since the account + // was not available as determined when creating sendable payloads + let excluded_pubkeys = all_pubkeys + .into_iter() + .filter(|pubkey| { + !included_pubkeys.contains(pubkey) + && !included_pubkeys.contains( + &AccountChainSnapshot::ephemeral_balance_pda( + pubkey, + ), + ) + }) + .collect::>(); + + // Extract signatures of all transactions that we will execute on + // chain in order to realize the commits needed + let signatures = sendable_payloads + .iter() + .map(|payload| payload.get_signature()) + .collect::>(); + + // Record that we are about to send the commit to chain including all + // information (mainly signatures) needed to track its outcome on chain + let sent_commit = SentCommit { + commit_id: commit.id, + slot: commit.slot, + blockhash: commit.blockhash, + payer: commit.payer, + chain_signatures: signatures, + included_pubkeys: included_pubkeys.into_iter().collect(), + excluded_pubkeys, + feepayers, + requested_undelegation: commit.request_undelegation, + }; + register_scheduled_commit_sent(sent_commit); + let signature = execute_legacy_transaction( + commit.commit_sent_transaction, + &self.bank, + self.transaction_status_sender.as_ref(), + ) + .map_err(Box::new)?; + + // In the case that no account needs to be committed we record that in + // our ledger and are done + if sendable_payloads.is_empty() { + debug!( + "Signaled no commit needed with internal signature: {:?}", + signature + ); + continue; + } else { + debug!( + "Signaled commit with internal signature: {:?}", + signature + ); + } + + // Queue up the actual commit + sendable_payloads_queue.extend(sendable_payloads); + } + + self.process_accounts_commits_in_background( + committer, + sendable_payloads_queue, + ); + + Ok(()) + } + + fn scheduled_commits_len(&self) -> usize { + self.transaction_scheduler.scheduled_commits_len() + } + + fn clear_scheduled_commits(&self) { + self.transaction_scheduler.clear_scheduled_commits(); + } +} + +impl OldRemoteScheduledCommitsProcessor { + pub(crate) fn new( + cluster: Cluster, + bank: Arc, + cloned_accounts: CloneOutputMap, + transaction_status_sender: Option, + ) -> Self { + Self { + cluster, + bank, + transaction_status_sender, + cloned_accounts, + transaction_scheduler: TransactionScheduler::default(), + } + } + + fn process_accounts_commits_in_background( + &self, + committer: &Arc, + sendable_payloads_queue: Vec, + ) { + // We process the queue on a separate task in order to not block + // the validator (slot advance) itself + // NOTE: @@ we have to be careful here and ensure that the validator does not + // shutdown before this task is done + // We will need some tracking machinery which is overkill until we get to the + // point where we do allow validator shutdown + let committer = committer.clone(); + tokio::task::spawn(async move { + let pending_commits = match committer + .send_commit_transactions(sendable_payloads_queue) + .await + { + Ok(pending) => pending, + Err(AccountsError::FailedToSendCommitTransaction( + err, + commit_and_undelegate_accounts, + commit_only_accounts, + )) => { + update_account_commit_metrics( + &commit_and_undelegate_accounts, + &commit_only_accounts, + metrics::Outcome::Error, + None, + ); + debug_panic!( + "Failed to send commit transactions: {:?}", + err + ); + return; + } + Err(err) => { + debug_panic!( + "Failed to send commit transactions, received invalid err: {:?}", + err + ); + return; + } + }; + + committer.confirm_pending_commits(pending_commits).await; + }); + } + + fn fetch_cloned_account( + pubkey: &Pubkey, + cloned_accounts: &CloneOutputMap, + ) -> Option { + cloned_accounts + .read() + .expect("RwLock of RemoteAccountClonerWorker.last_clone_output is poisoned") + .get(pubkey).cloned() + } +} diff --git a/magicblock-accounts/src/remote_scheduled_commits_processor.rs b/magicblock-accounts/src/remote_scheduled_commits_processor.rs index 91d7cc6d..5c5fc0dd 100644 --- a/magicblock-accounts/src/remote_scheduled_commits_processor.rs +++ b/magicblock-accounts/src/remote_scheduled_commits_processor.rs @@ -1,300 +1,23 @@ -use std::{collections::HashSet, sync::Arc}; +use std::sync::Arc; -use async_trait::async_trait; -use conjunto_transwise::AccountChainSnapshot; -use log::*; -use magicblock_account_cloner::{ - AccountClonerOutput, AccountClonerOutput::Cloned, CloneOutputMap, -}; use magicblock_accounts_api::InternalAccountProvider; -use magicblock_bank::bank::Bank; -use magicblock_core::debug_panic; -use magicblock_metrics::metrics; -use magicblock_mutator::Cluster; -use magicblock_processor::execute_transaction::execute_legacy_transaction; -use magicblock_program::{ - register_scheduled_commit_sent, FeePayerAccount, SentCommit, - TransactionScheduler, -}; -use magicblock_transaction_status::TransactionStatusSender; -use solana_sdk::{pubkey::Pubkey, signature::Signature}; +use magicblock_committor_service::CommittorService; -use crate::{ - errors::{AccountsError, AccountsResult}, - remote_account_committer::update_account_commit_metrics, - AccountCommittee, AccountCommitter, ScheduledCommitsProcessor, - SendableCommitAccountsPayload, -}; +use crate::errors::AccountsResult; -pub struct RemoteScheduledCommitsProcessor { - #[allow(unused)] - cluster: Cluster, - bank: Arc, - transaction_status_sender: Option, - transaction_scheduler: TransactionScheduler, - cloned_accounts: CloneOutputMap, -} - -#[async_trait] -impl ScheduledCommitsProcessor for RemoteScheduledCommitsProcessor { - async fn process( - &self, - committer: &Arc, - account_provider: &IAP, - ) -> AccountsResult<()> - where - AC: AccountCommitter, - IAP: InternalAccountProvider, - { - let scheduled_commits = - self.transaction_scheduler.take_scheduled_commits(); - - if scheduled_commits.is_empty() { - return Ok(()); - } - - let mut sendable_payloads_queue = vec![]; - for commit in scheduled_commits { - info!("Processing commit: {:?}", commit); - - // Determine which accounts are available and can be committed - let mut committees = vec![]; - let all_pubkeys: HashSet = HashSet::from_iter( - commit - .accounts - .iter() - .map(|ca| ca.pubkey) - .collect::>(), - ); - let mut feepayers = HashSet::new(); - - for committed_account in commit.accounts { - let mut commitment_pubkey = committed_account.pubkey; - let mut commitment_pubkey_owner = committed_account.owner; - if let Some(Cloned { - account_chain_snapshot, - .. - }) = Self::fetch_cloned_account( - &committed_account.pubkey, - &self.cloned_accounts, - ) { - // If the account is a FeePayer, we committed the mapped delegated account - if account_chain_snapshot.chain_state.is_feepayer() { - commitment_pubkey = - AccountChainSnapshot::ephemeral_balance_pda( - &committed_account.pubkey, - ); - commitment_pubkey_owner = - AccountChainSnapshot::ephemeral_balance_pda_owner(); - feepayers.insert(FeePayerAccount { - pubkey: committed_account.pubkey, - delegated_pda: commitment_pubkey, - }); - } else if account_chain_snapshot - .chain_state - .is_undelegated() - { - error!("Scheduled commit account '{}' is undelegated. This is not supported.", committed_account.pubkey); - } - } - - match account_provider.get_account(&committed_account.pubkey) { - Some(account_data) => { - committees.push(AccountCommittee { - pubkey: commitment_pubkey, - owner: commitment_pubkey_owner, - account_data, - slot: commit.slot, - undelegation_requested: commit.request_undelegation, - }); - } - None => { - error!( - "Scheduled commmit account '{}' not found. It must have gotten undelegated and removed since it was scheduled.", - committed_account.pubkey - ); - } - } - } - - let payloads = vec![ - committer - .create_commit_accounts_transaction(committees) - .await?, - ]; - - // Determine which payloads are a noop since all accounts are up to date - // and which require a commit to chain - let mut included_pubkeys = HashSet::new(); - let sendable_payloads = payloads - .into_iter() - .filter_map(|payload| { - if let Some(transaction) = payload.transaction { - included_pubkeys.extend( - payload - .committees - .iter() - .map(|(pubkey, _)| *pubkey), - ); - Some(SendableCommitAccountsPayload { - transaction, - committees: payload.committees, - }) - } else { - None - } - }) - .collect::>(); - - // Tally up the pubkeys that will not be committed since the account - // was not available as determined when creating sendable payloads - let excluded_pubkeys = all_pubkeys - .into_iter() - .filter(|pubkey| { - !included_pubkeys.contains(pubkey) - && !included_pubkeys.contains( - &AccountChainSnapshot::ephemeral_balance_pda( - pubkey, - ), - ) - }) - .collect::>(); - - // Extract signatures of all transactions that we will execute on - // chain in order to realize the commits needed - let signatures = sendable_payloads - .iter() - .map(|payload| payload.get_signature()) - .collect::>(); - - // Record that we are about to send the commit to chain including all - // information (mainly signatures) needed to track its outcome on chain - let sent_commit = SentCommit { - commit_id: commit.id, - slot: commit.slot, - blockhash: commit.blockhash, - payer: commit.payer, - chain_signatures: signatures, - included_pubkeys: included_pubkeys.into_iter().collect(), - excluded_pubkeys, - feepayers, - requested_undelegation: commit.request_undelegation, - }; - register_scheduled_commit_sent(sent_commit); - let signature = execute_legacy_transaction( - commit.commit_sent_transaction, - &self.bank, - self.transaction_status_sender.as_ref(), - ) - .map_err(Box::new)?; - - // In the case that no account needs to be committed we record that in - // our ledger and are done - if sendable_payloads.is_empty() { - debug!( - "Signaled no commit needed with internal signature: {:?}", - signature - ); - continue; - } else { - debug!( - "Signaled commit with internal signature: {:?}", - signature - ); - } - - // Queue up the actual commit - sendable_payloads_queue.extend(sendable_payloads); - } - - self.process_accounts_commits_in_background( - committer, - sendable_payloads_queue, - ); - - Ok(()) - } - - fn scheduled_commits_len(&self) -> usize { - self.transaction_scheduler.scheduled_commits_len() - } - - fn clear_scheduled_commits(&self) { - self.transaction_scheduler.clear_scheduled_commits(); - } +struct RemoteScheduledCommitsProcessor { + committer_service: Arc, } impl RemoteScheduledCommitsProcessor { - pub(crate) fn new( - cluster: Cluster, - bank: Arc, - cloned_accounts: CloneOutputMap, - transaction_status_sender: Option, - ) -> Self { - Self { - cluster, - bank, - transaction_status_sender, - cloned_accounts, - transaction_scheduler: TransactionScheduler::default(), - } - } - - fn process_accounts_commits_in_background( - &self, - committer: &Arc, - sendable_payloads_queue: Vec, - ) { - // We process the queue on a separate task in order to not block - // the validator (slot advance) itself - // NOTE: @@ we have to be careful here and ensure that the validator does not - // shutdown before this task is done - // We will need some tracking machinery which is overkill until we get to the - // point where we do allow validator shutdown - let committer = committer.clone(); - tokio::task::spawn(async move { - let pending_commits = match committer - .send_commit_transactions(sendable_payloads_queue) - .await - { - Ok(pending) => pending, - Err(AccountsError::FailedToSendCommitTransaction( - err, - commit_and_undelegate_accounts, - commit_only_accounts, - )) => { - update_account_commit_metrics( - &commit_and_undelegate_accounts, - &commit_only_accounts, - metrics::Outcome::Error, - None, - ); - debug_panic!( - "Failed to send commit transactions: {:?}", - err - ); - return; - } - Err(err) => { - debug_panic!( - "Failed to send commit transactions, received invalid err: {:?}", - err - ); - return; - } - }; - - committer.confirm_pending_commits(pending_commits).await; - }); + pub fn new(committer_service: Arc) -> Self { + Self { committer_service } } - fn fetch_cloned_account( - pubkey: &Pubkey, - cloned_accounts: &CloneOutputMap, - ) -> Option { - cloned_accounts - .read() - .expect("RwLock of RemoteAccountClonerWorker.last_clone_output is poisoned") - .get(pubkey).cloned() + async fn process(&self, account_provider: &IAP) -> AccountsResult<()> + where + IAP: InternalAccountProvider, + { + todo!() } } From 8cdc141b47a8aba1c4f082407e9bb57e9241d80f Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Wed, 7 May 2025 16:24:41 +0700 Subject: [PATCH 04/58] feat: initial impl of commits processor based on committor service --- magicblock-accounts/src/errors.rs | 12 + .../src/remote_scheduled_commits_processor.rs | 247 +++++++++++++++++- .../process_scheduled_commit_sent.rs | 2 +- 3 files changed, 253 insertions(+), 8 deletions(-) diff --git a/magicblock-accounts/src/errors.rs b/magicblock-accounts/src/errors.rs index fb34ebff..c7c18b2b 100644 --- a/magicblock-accounts/src/errors.rs +++ b/magicblock-accounts/src/errors.rs @@ -3,6 +3,7 @@ use std::collections::HashSet; use magicblock_account_cloner::{ AccountClonerError, AccountClonerUnclonableReason, }; +use magicblock_committor_service::ChangesetMeta; use solana_sdk::pubkey::Pubkey; use thiserror::Error; @@ -19,6 +20,14 @@ pub enum AccountsError { #[error("TransactionError")] TransactionError(#[from] Box), + #[error("CommittorSerivceError")] + CommittorSerivceError( + #[from] Box, + ), + + #[error("TokioOneshotRecvError")] + TokioOneshotRecvError(#[from] Box), + #[error("AccountClonerError")] AccountClonerError(#[from] AccountClonerError), @@ -48,4 +57,7 @@ pub enum AccountsError { #[error("Too many committees: {0}")] TooManyCommittees(usize), + + #[error("FailedToObtainReqidForCommittedChangeset {0:?}'")] + FailedToObtainReqidForCommittedChangeset(Box), } diff --git a/magicblock-accounts/src/remote_scheduled_commits_processor.rs b/magicblock-accounts/src/remote_scheduled_commits_processor.rs index 5c5fc0dd..ad5dd2b0 100644 --- a/magicblock-accounts/src/remote_scheduled_commits_processor.rs +++ b/magicblock-accounts/src/remote_scheduled_commits_processor.rs @@ -1,23 +1,256 @@ -use std::sync::Arc; +use conjunto_transwise::AccountChainSnapshot; +use log::*; +use magicblock_bank::bank::Bank; +use magicblock_processor::execute_transaction::execute_legacy_transaction; +use magicblock_transaction_status::TransactionStatusSender; +use solana_sdk::{account::ReadableAccount, transaction::Transaction}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; +use magicblock_account_cloner::{ + AccountClonerOutput, AccountClonerOutput::Cloned, CloneOutputMap, +}; use magicblock_accounts_api::InternalAccountProvider; -use magicblock_committor_service::CommittorService; +use magicblock_committor_service::{ + persist::BundleSignatureRow, ChangedAccount, Changeset, ChangesetMeta, + CommittorService, +}; +use magicblock_program::{ + register_scheduled_commit_sent, FeePayerAccount, Pubkey, SentCommit, + TransactionScheduler, +}; -use crate::errors::AccountsResult; +use crate::{errors::AccountsResult, AccountCommittee}; struct RemoteScheduledCommitsProcessor { - committer_service: Arc, + committor_service: Arc, + transaction_scheduler: TransactionScheduler, + cloned_accounts: CloneOutputMap, + bank: Arc, + transaction_status_sender: Arc, } impl RemoteScheduledCommitsProcessor { - pub fn new(committer_service: Arc) -> Self { - Self { committer_service } + pub fn new( + committer_service: Arc, + bank: Arc, + cloned_accounts: CloneOutputMap, + transaction_status_sender: Arc, + ) -> Self { + Self { + committor_service: committer_service, + bank, + transaction_status_sender, + cloned_accounts, + transaction_scheduler: TransactionScheduler::default(), + } } async fn process(&self, account_provider: &IAP) -> AccountsResult<()> where IAP: InternalAccountProvider, { - todo!() + let scheduled_commits = + self.transaction_scheduler.take_scheduled_commits(); + + if scheduled_commits.is_empty() { + return Ok(()); + } + + let mut changeset = Changeset::default(); + // SAFETY: we only get here if the scheduled commits are not empty + let max_slot = scheduled_commits + .iter() + .map(|commit| commit.slot) + .max() + .unwrap(); + + changeset.slot = max_slot; + + let mut sent_commits = HashMap::new(); + for commit in scheduled_commits { + // Determine which accounts are available and can be committed + let mut committees = vec![]; + let mut feepayers = HashSet::new(); + let mut excluded_pubkeys = vec![]; + for committed_account in commit.accounts { + let mut committee_pubkey = committed_account.pubkey; + let mut committee_owner = committed_account.owner; + if let Some(Cloned { + account_chain_snapshot, + .. + }) = Self::fetch_cloned_account( + &committed_account.pubkey, + &self.cloned_accounts, + ) { + // If the account is a FeePayer, we commit the mapped delegated account + if account_chain_snapshot.chain_state.is_feepayer() { + committee_pubkey = + AccountChainSnapshot::ephemeral_balance_pda( + &committed_account.pubkey, + ); + committee_owner = + AccountChainSnapshot::ephemeral_balance_pda_owner(); + feepayers.insert(FeePayerAccount { + pubkey: committed_account.pubkey, + delegated_pda: committee_pubkey, + }); + } else if account_chain_snapshot + .chain_state + .is_undelegated() + { + error!("Scheduled commit account '{}' is undelegated. This is not supported.", committed_account.pubkey); + excluded_pubkeys.push(committed_account.pubkey); + continue; + } + } + + match account_provider.get_account(&committed_account.pubkey) { + Some(account_data) => { + committees.push(( + commit.id, + AccountCommittee { + pubkey: committee_pubkey, + owner: committee_owner, + account_data, + slot: commit.slot, + undelegation_requested: commit + .request_undelegation, + }, + )); + } + None => { + error!( + "Scheduled commmit account '{}' not found. It must have gotten undelegated and removed since it was scheduled.", + committed_account.pubkey + ); + excluded_pubkeys.push(committed_account.pubkey); + continue; + } + } + } + + // Collect all SentCommit info available at this stage + // We add the chain_signatures after we sent off the changeset + let sent_commit = SentCommit { + commit_id: commit.id, + payer: commit.payer, + blockhash: commit.blockhash, + included_pubkeys: committees + .iter() + .map(|(_, committee)| committee.pubkey) + .collect(), + excluded_pubkeys, + feepayers, + requested_undelegation: commit.request_undelegation, + ..Default::default() + }; + sent_commits.insert( + commit.id, + (commit.commit_sent_transaction, sent_commit), + ); + + // Add the committee to the changeset + for (bundle_id, committee) in committees { + changeset.add( + committee.pubkey, + ChangedAccount::Full { + lamports: committee.account_data.lamports(), + data: committee.account_data.data().to_vec(), + owner: committee.owner, + bundle_id, + }, + ); + } + } + + self.process_changeset(changeset, sent_commits); + + Ok(()) + } + + fn fetch_cloned_account( + pubkey: &Pubkey, + cloned_accounts: &CloneOutputMap, + ) -> Option { + cloned_accounts + .read() + .expect("RwLock of RemoteAccountClonerWorker.last_clone_output is poisoned") + .get(pubkey).cloned() + } + + fn process_changeset( + &self, + changeset: Changeset, + mut sent_commits: HashMap, + ) { + // We process the changeset on a separate task in order to not block + // the validator (slot advance) itself + let committor_service = self.committor_service.clone(); + let bank = self.bank.clone(); + let transaction_status_sender = self.transaction_status_sender.clone(); + + tokio::task::spawn(async move { + // Create one sent commit transaction per bundle in our validator + let changeset_metadata = ChangesetMeta::from(&changeset); + for bundle_id in changeset_metadata + .accounts + .iter() + .map(|account| account.bundle_id) + .collect::>() + { + match committor_service + .get_bundle_signatures(bundle_id) + .await + // TODO: @@@ + .unwrap() + .unwrap() + { + Some(BundleSignatureRow { + processed_signature, + finalized_signature, + bundle_id, + .. + }) => { + let mut chain_signatures = vec![processed_signature]; + if let Some(finalized_signature) = finalized_signature { + chain_signatures.push(finalized_signature); + } + if let Some(( + commit_sent_transaction, + mut sent_commit, + )) = sent_commits.remove(&bundle_id) + { + sent_commit.chain_signatures = chain_signatures; + register_scheduled_commit_sent(sent_commit); + match execute_legacy_transaction( + commit_sent_transaction, + &bank, + Some(&transaction_status_sender) + ) { + Ok(signature) => debug!( + "Signaled sent commit with internal signature: {:?}", + signature + ), + Err(err) => { + error!("Failed to signal sent commit via transaction: {}", err); + } + } + } else { + error!( + "BUG: Failed to get sent commit for bundle id {} that should have been added", + bundle_id + ); + } + } + None => error!( + "Failed to get bundle signatures for bundle id {}", + bundle_id + ), + } + } + }); } } diff --git a/programs/magicblock/src/schedule_transactions/process_scheduled_commit_sent.rs b/programs/magicblock/src/schedule_transactions/process_scheduled_commit_sent.rs index 09eaf38e..58d76040 100644 --- a/programs/magicblock/src/schedule_transactions/process_scheduled_commit_sent.rs +++ b/programs/magicblock/src/schedule_transactions/process_scheduled_commit_sent.rs @@ -17,7 +17,7 @@ use crate::{ FeePayerAccount, }; -#[derive(Debug, Clone)] +#[derive(Default, Debug, Clone)] pub struct SentCommit { pub commit_id: u64, pub slot: Slot, From c0d08a4862a5516d2ca1de19e7e70877aa230598 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Wed, 7 May 2025 16:56:42 +0700 Subject: [PATCH 05/58] feat: initializing committor service at startup --- Cargo.lock | 1 + magicblock-accounts/src/accounts_manager.rs | 19 +++---- .../src/external_accounts_manager.rs | 2 +- magicblock-accounts/src/lib.rs | 2 +- .../src/remote_scheduled_commits_processor.rs | 53 ++++++++++++------- magicblock-accounts/src/traits.rs | 5 +- .../stubs/scheduled_commits_processor_stub.rs | 9 +--- magicblock-api/Cargo.toml | 1 + magicblock-api/src/errors.rs | 5 ++ magicblock-api/src/magic_validator.rs | 15 ++++++ 10 files changed, 71 insertions(+), 41 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fa5a174c..d4cae323 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3665,6 +3665,7 @@ dependencies = [ "magicblock-accounts-api", "magicblock-accounts-db", "magicblock-bank", + "magicblock-committor-service", "magicblock-config", "magicblock-core", "magicblock-geyser-plugin", diff --git a/magicblock-accounts/src/accounts_manager.rs b/magicblock-accounts/src/accounts_manager.rs index 93d75ec1..f1728fe8 100644 --- a/magicblock-accounts/src/accounts_manager.rs +++ b/magicblock-accounts/src/accounts_manager.rs @@ -7,14 +7,15 @@ use conjunto_transwise::{ use magicblock_account_cloner::{CloneOutputMap, RemoteAccountClonerClient}; use magicblock_accounts_api::BankAccountProvider; use magicblock_bank::bank::Bank; +use magicblock_committor_service::CommittorService; use magicblock_transaction_status::TransactionStatusSender; use solana_rpc_client::nonblocking::rpc_client::RpcClient; use solana_sdk::{commitment_config::CommitmentConfig, signature::Keypair}; use crate::{ config::AccountsConfig, errors::AccountsResult, - old_remote_scheduled_commits_processor::OldRemoteScheduledCommitsProcessor, remote_account_committer::RemoteAccountCommitter, + remote_scheduled_commits_processor::RemoteScheduledCommitsProcessor, utils::try_rpc_cluster_from_cluster, ExternalAccountsManager, }; @@ -24,11 +25,12 @@ pub type AccountsManager = ExternalAccountsManager< RemoteAccountCommitter, TransactionAccountsExtractorImpl, TransactionAccountsValidatorImpl, - OldRemoteScheduledCommitsProcessor, + RemoteScheduledCommitsProcessor, >; impl AccountsManager { pub fn try_new( + committer_service: Arc, bank: &Arc, cloned_accounts: &CloneOutputMap, remote_account_cloner_client: RemoteAccountClonerClient, @@ -49,13 +51,12 @@ impl AccountsManager { config.commit_compute_unit_price, ); - let scheduled_commits_processor = - OldRemoteScheduledCommitsProcessor::new( - remote_cluster, - bank.clone(), - cloned_accounts.clone(), - transaction_status_sender.clone(), - ); + let scheduled_commits_processor = RemoteScheduledCommitsProcessor::new( + committer_service, + bank.clone(), + cloned_accounts.clone(), + transaction_status_sender.clone(), + ); Ok(Self { internal_account_provider, diff --git a/magicblock-accounts/src/external_accounts_manager.rs b/magicblock-accounts/src/external_accounts_manager.rs index a19a51ea..ef47f9c5 100644 --- a/magicblock-accounts/src/external_accounts_manager.rs +++ b/magicblock-accounts/src/external_accounts_manager.rs @@ -404,7 +404,7 @@ where pub async fn process_scheduled_commits(&self) -> AccountsResult<()> { self.scheduled_commits_processor - .process(&self.account_committer, &self.internal_account_provider) + .process(&self.internal_account_provider) .await } diff --git a/magicblock-accounts/src/lib.rs b/magicblock-accounts/src/lib.rs index a4e48da4..6b2eda27 100644 --- a/magicblock-accounts/src/lib.rs +++ b/magicblock-accounts/src/lib.rs @@ -2,7 +2,7 @@ mod accounts_manager; mod config; pub mod errors; mod external_accounts_manager; -mod old_remote_scheduled_commits_processor; +// mod old_remote_scheduled_commits_processor; mod remote_account_committer; mod remote_scheduled_commits_processor; mod traits; diff --git a/magicblock-accounts/src/remote_scheduled_commits_processor.rs b/magicblock-accounts/src/remote_scheduled_commits_processor.rs index ad5dd2b0..0311ae65 100644 --- a/magicblock-accounts/src/remote_scheduled_commits_processor.rs +++ b/magicblock-accounts/src/remote_scheduled_commits_processor.rs @@ -1,3 +1,4 @@ +use async_trait::async_trait; use conjunto_transwise::AccountChainSnapshot; use log::*; use magicblock_bank::bank::Bank; @@ -22,32 +23,20 @@ use magicblock_program::{ TransactionScheduler, }; -use crate::{errors::AccountsResult, AccountCommittee}; +use crate::{ + errors::AccountsResult, AccountCommittee, ScheduledCommitsProcessor, +}; -struct RemoteScheduledCommitsProcessor { +pub struct RemoteScheduledCommitsProcessor { committor_service: Arc, transaction_scheduler: TransactionScheduler, cloned_accounts: CloneOutputMap, bank: Arc, - transaction_status_sender: Arc, + transaction_status_sender: Option, } -impl RemoteScheduledCommitsProcessor { - pub fn new( - committer_service: Arc, - bank: Arc, - cloned_accounts: CloneOutputMap, - transaction_status_sender: Arc, - ) -> Self { - Self { - committor_service: committer_service, - bank, - transaction_status_sender, - cloned_accounts, - transaction_scheduler: TransactionScheduler::default(), - } - } - +#[async_trait] +impl ScheduledCommitsProcessor for RemoteScheduledCommitsProcessor { async fn process(&self, account_provider: &IAP) -> AccountsResult<()> where IAP: InternalAccountProvider, @@ -171,6 +160,30 @@ impl RemoteScheduledCommitsProcessor { Ok(()) } + fn scheduled_commits_len(&self) -> usize { + self.transaction_scheduler.scheduled_commits_len() + } + + fn clear_scheduled_commits(&self) { + self.transaction_scheduler.clear_scheduled_commits(); + } +} + +impl RemoteScheduledCommitsProcessor { + pub fn new( + committer_service: Arc, + bank: Arc, + cloned_accounts: CloneOutputMap, + transaction_status_sender: Option, + ) -> Self { + Self { + committor_service: committer_service, + bank, + transaction_status_sender, + cloned_accounts, + transaction_scheduler: TransactionScheduler::default(), + } + } fn fetch_cloned_account( pubkey: &Pubkey, cloned_accounts: &CloneOutputMap, @@ -228,7 +241,7 @@ impl RemoteScheduledCommitsProcessor { match execute_legacy_transaction( commit_sent_transaction, &bank, - Some(&transaction_status_sender) + transaction_status_sender.as_ref() ) { Ok(signature) => debug!( "Signaled sent commit with internal signature: {:?}", diff --git a/magicblock-accounts/src/traits.rs b/magicblock-accounts/src/traits.rs index 965d801f..34808bc1 100644 --- a/magicblock-accounts/src/traits.rs +++ b/magicblock-accounts/src/traits.rs @@ -1,4 +1,4 @@ -use std::{collections::HashSet, sync::Arc}; +use std::collections::HashSet; use async_trait::async_trait; use magicblock_accounts_api::InternalAccountProvider; @@ -14,9 +14,8 @@ use crate::errors::AccountsResult; #[async_trait] pub trait ScheduledCommitsProcessor { /// Processes all commits that were scheduled and accepted - async fn process( + async fn process( &self, - committer: &Arc, account_provider: &IAP, ) -> AccountsResult<()>; diff --git a/magicblock-accounts/tests/stubs/scheduled_commits_processor_stub.rs b/magicblock-accounts/tests/stubs/scheduled_commits_processor_stub.rs index 893988b4..abce9680 100644 --- a/magicblock-accounts/tests/stubs/scheduled_commits_processor_stub.rs +++ b/magicblock-accounts/tests/stubs/scheduled_commits_processor_stub.rs @@ -1,9 +1,5 @@ -use std::sync::Arc; - use async_trait::async_trait; -use magicblock_accounts::{ - errors::AccountsResult, AccountCommitter, ScheduledCommitsProcessor, -}; +use magicblock_accounts::{errors::AccountsResult, ScheduledCommitsProcessor}; use magicblock_accounts_api::InternalAccountProvider; #[derive(Default)] @@ -11,9 +7,8 @@ pub struct ScheduledCommitsProcessorStub {} #[async_trait] impl ScheduledCommitsProcessor for ScheduledCommitsProcessorStub { - async fn process( + async fn process( &self, - _committer: &Arc, _account_provider: &IAP, ) -> AccountsResult<()> { Ok(()) diff --git a/magicblock-api/Cargo.toml b/magicblock-api/Cargo.toml index 88bde102..5687eb0f 100644 --- a/magicblock-api/Cargo.toml +++ b/magicblock-api/Cargo.toml @@ -23,6 +23,7 @@ magicblock-accounts = { workspace = true } magicblock-accounts-api = { workspace = true } magicblock-accounts-db = { workspace = true } magicblock-bank = { workspace = true } +magicblock-committor-service = { workspace = true } magicblock-config = { workspace = true } magicblock-core = { workspace = true } magicblock-geyser-plugin = { workspace = true } diff --git a/magicblock-api/src/errors.rs b/magicblock-api/src/errors.rs index ee6c08fa..13bc81b2 100644 --- a/magicblock-api/src/errors.rs +++ b/magicblock-api/src/errors.rs @@ -26,6 +26,11 @@ pub enum ApiError { #[error("Ledger error: {0}")] LedgerError(#[from] magicblock_ledger::errors::LedgerError), + #[error("CommittorSerivceError")] + CommittorSerivceError( + #[from] magicblock_committor_service::error::CommittorServiceError, + ), + #[error("Failed to load programs into bank: {0}")] FailedToLoadProgramsIntoBank(String), diff --git a/magicblock-api/src/magic_validator.rs b/magicblock-api/src/magic_validator.rs index 268430e9..39b130a8 100644 --- a/magicblock-api/src/magic_validator.rs +++ b/magicblock-api/src/magic_validator.rs @@ -37,6 +37,7 @@ use magicblock_bank::{ program_loader::load_programs_into_bank, transaction_logs::TransactionLogCollectorFilter, }; +use magicblock_committor_service::{config::ChainConfig, CommittorService}; use magicblock_config::{EphemeralConfig, LifecycleMode, ProgramConfig}; use magicblock_geyser_plugin::rpc::GeyserRpcService; use magicblock_ledger::{ @@ -316,7 +317,19 @@ impl MagicValidator { identity_keypair.pubkey(), ); + let committor_service = Arc::new(CommittorService::try_start( + identity_keypair.insecure_clone(), + // TODO: @@@ config or inside ledger dir + "/tmp/committor_service.sqlite", + &ChainConfig { + rpc_uri: remote_rpc_config.url().to_string(), + commitment: remote_rpc_config + .commitment() + .unwrap_or(CommitmentLevel::Confirmed), + }, + )?); let accounts_manager = Self::init_accounts_manager( + committor_service, &bank, &remote_account_cloner_worker.get_last_clone_output(), RemoteAccountClonerClient::new(&remote_account_cloner_worker), @@ -406,6 +419,7 @@ impl MagicValidator { } fn init_accounts_manager( + committer_service: Arc, bank: &Arc, cloned_accounts: &CloneOutputMap, remote_account_cloner_client: RemoteAccountClonerClient, @@ -418,6 +432,7 @@ impl MagicValidator { "Failed to derive accounts config from provided magicblock config", ); let accounts_manager = AccountsManager::try_new( + committer_service, bank, cloned_accounts, remote_account_cloner_client, From 9eed6089005e13a6b14ef33ae02c25e7c744cef2 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 8 May 2025 08:59:09 +0700 Subject: [PATCH 06/58] test: logging signature of failed init_committees tx --- .../schedulecommit/client/src/schedule_commit_context.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/test-integration/schedulecommit/client/src/schedule_commit_context.rs b/test-integration/schedulecommit/client/src/schedule_commit_context.rs index 5926e6a3..d287d53d 100644 --- a/test-integration/schedulecommit/client/src/schedule_commit_context.rs +++ b/test-integration/schedulecommit/client/src/schedule_commit_context.rs @@ -6,7 +6,7 @@ use program_schedulecommit::api::{ delegate_account_cpi_instruction, init_account_instruction, init_payer_escrow, pda_and_bump, }; -use solana_rpc_client::rpc_client::RpcClient; +use solana_rpc_client::rpc_client::{RpcClient, SerializableTransaction}; use solana_rpc_client_api::config::RpcSendTransactionConfig; #[allow(unused_imports)] use solana_sdk::signer::SeedDerivable; @@ -124,7 +124,12 @@ impl ScheduleCommitTestContext { ..Default::default() }, ) - .with_context(|| "Failed to initialize committees") + .with_context(|| { + format!( + "Failed to initialize committees. Transaction signature: {}", + tx.get_signature() + ) + }) } pub fn escrow_lamports_for_payer(&self) -> Result { From dae8ea881dc1303f020573eec35255b219394388 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 9 May 2025 13:52:31 +0700 Subject: [PATCH 07/58] test: adapt to expect two signatures when finalizing --- .../test-scenarios/tests/01_commits.rs | 2 +- .../tests/02_commit_and_undelegate.rs | 4 ++-- .../tests/03_commits_fee_payer.rs | 2 +- .../test-scenarios/tests/utils/mod.rs | 20 ++++++++++++++++--- 4 files changed, 21 insertions(+), 7 deletions(-) diff --git a/test-integration/schedulecommit/test-scenarios/tests/01_commits.rs b/test-integration/schedulecommit/test-scenarios/tests/01_commits.rs index 725ee895..2cbf0b6d 100644 --- a/test-integration/schedulecommit/test-scenarios/tests/01_commits.rs +++ b/test-integration/schedulecommit/test-scenarios/tests/01_commits.rs @@ -61,7 +61,7 @@ fn test_committing_two_accounts() { info!("{} '{:?}'", sig, res); let res = verify::fetch_and_verify_commit_result_from_logs(&ctx, *sig); - assert_two_committees_were_committed(&ctx, &res); + assert_two_committees_were_committed(&ctx, &res, true); assert_two_committees_synchronized_count(&ctx, &res, 1); }); } diff --git a/test-integration/schedulecommit/test-scenarios/tests/02_commit_and_undelegate.rs b/test-integration/schedulecommit/test-scenarios/tests/02_commit_and_undelegate.rs index fc506ca4..bd9f3f8d 100644 --- a/test-integration/schedulecommit/test-scenarios/tests/02_commit_and_undelegate.rs +++ b/test-integration/schedulecommit/test-scenarios/tests/02_commit_and_undelegate.rs @@ -171,7 +171,7 @@ fn test_committing_and_undelegating_one_account() { let res = verify::fetch_and_verify_commit_result_from_logs(&ctx, sig); - assert_one_committee_was_committed(&ctx, &res); + assert_one_committee_was_committed(&ctx, &res, true); assert_one_committee_synchronized_count(&ctx, &res, 1); assert_one_committee_account_was_undelegated_on_chain(&ctx); @@ -186,7 +186,7 @@ fn test_committing_and_undelegating_two_accounts_success() { let res = verify::fetch_and_verify_commit_result_from_logs(&ctx, sig); - assert_two_committees_were_committed(&ctx, &res); + assert_two_committees_were_committed(&ctx, &res, true); assert_two_committees_synchronized_count(&ctx, &res, 1); assert_two_committee_accounts_were_undelegated_on_chain(&ctx); diff --git a/test-integration/schedulecommit/test-scenarios/tests/03_commits_fee_payer.rs b/test-integration/schedulecommit/test-scenarios/tests/03_commits_fee_payer.rs index 2874aaf0..2325584e 100644 --- a/test-integration/schedulecommit/test-scenarios/tests/03_commits_fee_payer.rs +++ b/test-integration/schedulecommit/test-scenarios/tests/03_commits_fee_payer.rs @@ -121,7 +121,7 @@ fn test_committing_fee_payer_escrowing_lamports() { assert!(res.is_ok()); let res = verify::fetch_and_verify_commit_result_from_logs(&ctx, *sig); - assert_two_committees_were_committed(&ctx, &res); + assert_two_committees_were_committed(&ctx, &res, true); assert_two_committees_synchronized_count(&ctx, &res, 1); // The fee payer should have been committed diff --git a/test-integration/schedulecommit/test-scenarios/tests/utils/mod.rs b/test-integration/schedulecommit/test-scenarios/tests/utils/mod.rs index dc187029..a807dd65 100644 --- a/test-integration/schedulecommit/test-scenarios/tests/utils/mod.rs +++ b/test-integration/schedulecommit/test-scenarios/tests/utils/mod.rs @@ -52,6 +52,7 @@ fn get_context_with_delegated_committees_impl( pub fn assert_one_committee_was_committed( ctx: &ScheduleCommitTestContext, res: &ScheduledCommitResult, + finalize: bool, ) { let pda = ctx.committees[0].1; @@ -61,13 +62,20 @@ pub fn assert_one_committee_was_committed( let commit = res.included.get(&pda); assert!(commit.is_some(), "should have committed pda"); - assert_eq!(res.sigs.len(), 1, "should have 1 on chain sig"); + let sig_len = if finalize { 2 } else { 1 }; + assert_eq!( + res.sigs.len(), + sig_len, + "should have {} on chain sig", + sig_len + ); } #[allow(dead_code)] // used in 02_commit_and_undelegate.rs pub fn assert_two_committees_were_committed( ctx: &ScheduleCommitTestContext, res: &ScheduledCommitResult, + finalize: bool, ) { let pda1 = ctx.committees[0].1; let pda2 = ctx.committees[1].1; @@ -80,7 +88,13 @@ pub fn assert_two_committees_were_committed( assert!(commit1.is_some(), "should have committed pda1"); assert!(commit2.is_some(), "should have committed pda2"); - assert_eq!(res.sigs.len(), 1, "should have 1 on chain sig"); + let sig_len = if finalize { 2 } else { 1 }; + assert_eq!( + res.sigs.len(), + sig_len, + "should have {} on chain sig", + sig_len + ); } #[allow(dead_code)] @@ -92,7 +106,7 @@ pub fn assert_feepayer_was_committed( assert_eq!(res.feepayers.len(), 1, "includes 1 payer"); - let commit_payer = res.feepayers.iter().filter(|(p, _)| p == &payer).next(); + let commit_payer = res.feepayers.iter().find(|(p, _)| p == &payer); assert!(commit_payer.is_some(), "should have committed payer"); assert_eq!(res.sigs.len(), 1, "should have 1 on chain sig"); From 80e6777b0ff5ad0592b51067fd785904bfee3aca Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 9 May 2025 15:42:37 +0700 Subject: [PATCH 08/58] feat: ensure ephemeral validator is funded on chain --- magicblock-api/src/magic_validator.rs | 102 ++++++++++++++++++++------ 1 file changed, 80 insertions(+), 22 deletions(-) diff --git a/magicblock-api/src/magic_validator.rs b/magicblock-api/src/magic_validator.rs index 39b130a8..7c02063e 100644 --- a/magicblock-api/src/magic_validator.rs +++ b/magicblock-api/src/magic_validator.rs @@ -13,8 +13,9 @@ use std::{ use conjunto_transwise::RpcProviderConfig; use log::*; use magicblock_account_cloner::{ - standard_blacklisted_accounts, CloneOutputMap, RemoteAccountClonerClient, - RemoteAccountClonerWorker, ValidatorCollectionMode, + map_committor_request_result, standard_blacklisted_accounts, + CloneOutputMap, RemoteAccountClonerClient, RemoteAccountClonerWorker, + ValidatorCollectionMode, }; use magicblock_account_dumper::AccountDumperBank; use magicblock_account_fetcher::{ @@ -70,9 +71,14 @@ use solana_geyser_plugin_manager::{ geyser_plugin_manager::GeyserPluginManager, slot_status_notifier::SlotStatusNotifierImpl, }; +use solana_rpc_client::nonblocking::rpc_client::RpcClient; use solana_sdk::{ - clock::Slot, commitment_config::CommitmentLevel, - genesis_config::GenesisConfig, pubkey::Pubkey, signature::Keypair, + clock::Slot, + commitment_config::{CommitmentConfig, CommitmentLevel}, + genesis_config::GenesisConfig, + native_token::LAMPORTS_PER_SOL, + pubkey::Pubkey, + signature::Keypair, signer::Signer, }; use tempfile::TempDir; @@ -148,6 +154,7 @@ pub struct MagicValidator { >, remote_account_cloner_handle: Option>, accounts_manager: Arc, + committor_service: Arc, transaction_listener: GeyserTransactionNotifyListener, rpc_service: JsonRpcService, _metrics: Option<(MetricsService, tokio::task::JoinHandle<()>)>, @@ -300,11 +307,24 @@ impl MagicValidator { &faucet_keypair.pubkey(), ); + let committor_service = Arc::new(CommittorService::try_start( + identity_keypair.insecure_clone(), + // TODO: @@@ config or inside ledger dir + "/tmp/committor_service.sqlite", + &ChainConfig { + rpc_uri: remote_rpc_config.url().to_string(), + commitment: remote_rpc_config + .commitment() + .unwrap_or(CommitmentLevel::Confirmed), + }, + )?); + let remote_account_cloner_worker = RemoteAccountClonerWorker::new( bank_account_provider, remote_account_fetcher_client, remote_account_updates_client, account_dumper_bank, + committor_service.clone(), accounts_config.allowed_program_ids, blacklisted_accounts, accounts_config.payer_init_lamports, @@ -317,19 +337,8 @@ impl MagicValidator { identity_keypair.pubkey(), ); - let committor_service = Arc::new(CommittorService::try_start( - identity_keypair.insecure_clone(), - // TODO: @@@ config or inside ledger dir - "/tmp/committor_service.sqlite", - &ChainConfig { - rpc_uri: remote_rpc_config.url().to_string(), - commitment: remote_rpc_config - .commitment() - .unwrap_or(CommitmentLevel::Confirmed), - }, - )?); let accounts_manager = Self::init_accounts_manager( - committor_service, + committor_service.clone(), &bank, &remote_account_cloner_worker.get_last_clone_output(), RemoteAccountClonerClient::new(&remote_account_cloner_worker), @@ -373,6 +382,7 @@ impl MagicValidator { remote_account_cloner_handle: None, pubsub_handle: Default::default(), pubsub_close_handle: Default::default(), + committor_service, sample_performance_service: None, pubsub_config, token, @@ -635,12 +645,53 @@ impl MagicValidator { }) } + async fn ensure_validator_funded_on_chain(&self) -> ApiResult<()> { + // TODO: @@@ configurable? + const MIN_BALANCE_SOL: u64 = 5; + // TODO: @@ duplicate code getting remote_rpc_config + let accounts_config = try_convert_accounts_config( + &self.config.accounts, + ) + .expect( + "Failed to derive accounts config from provided magicblock config", + ); + let remote_rpc_config = RpcProviderConfig::new( + try_rpc_cluster_from_cluster(&accounts_config.remote_cluster)?, + Some(CommitmentLevel::Confirmed), + ); + + let validator_pubkey = self.bank().get_identity(); + + let lamports = RpcClient::new_with_commitment( + remote_rpc_config.url().to_string(), + CommitmentConfig { + commitment: remote_rpc_config + .commitment() + .unwrap_or(CommitmentLevel::Confirmed), + }, + ) + .get_balance(&validator_pubkey) + .await + .map_err(|err| { + ApiError::FailedToObtainValidatorOnChainBalance( + validator_pubkey, + err.to_string(), + ) + })?; + if lamports < MIN_BALANCE_SOL * LAMPORTS_PER_SOL { + Err(ApiError::ValidatorInsufficientlyFunded( + validator_pubkey, + MIN_BALANCE_SOL, + )) + } else { + Ok(()) + } + } + pub async fn start(&mut self) -> ApiResult<()> { - if let Some(ref fdqn) = self.config.validator.fdqn { - if matches!( - self.config.accounts.lifecycle, - LifecycleMode::Ephemeral - ) { + if matches!(self.config.accounts.lifecycle, LifecycleMode::Ephemeral) { + self.ensure_validator_funded_on_chain().await?; + if let Some(ref fdqn) = self.config.validator.fdqn { self.register_validator_on_chain(fdqn).await?; } } @@ -664,9 +715,9 @@ impl MagicValidator { self.token.clone(), )); + self.start_remote_account_cloner_worker().await?; self.start_remote_account_fetcher_worker(); self.start_remote_account_updates_worker(); - self.start_remote_account_cloner_worker().await?; self.ledger_truncator.start(); @@ -746,6 +797,13 @@ impl MagicValidator { if let Some(mut remote_account_cloner_worker) = self.remote_account_cloner_worker.take() { + debug!("Reserving common pubkeys for committor service"); + map_committor_request_result( + self.committor_service.reserve_common_pubkeys(), + ) + .await?; + info!("RESERVED"); + if !self.config.ledger.reset { remote_account_cloner_worker.hydrate().await?; info!("Validator hydration complete (bank hydrate, replay, account clone)"); From 3078a1cfc0e0beeabcf153bec88732228d0e6f5c Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 9 May 2025 16:59:03 +0700 Subject: [PATCH 09/58] test: add single account commit test --- .../test-scenarios/tests/01_commits.rs | 52 +++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/test-integration/schedulecommit/test-scenarios/tests/01_commits.rs b/test-integration/schedulecommit/test-scenarios/tests/01_commits.rs index 2cbf0b6d..8264bde1 100644 --- a/test-integration/schedulecommit/test-scenarios/tests/01_commits.rs +++ b/test-integration/schedulecommit/test-scenarios/tests/01_commits.rs @@ -10,12 +10,64 @@ use solana_rpc_client_api::config::RpcSendTransactionConfig; use solana_sdk::{signer::Signer, transaction::Transaction}; use test_tools_core::init_logger; use utils::{ + assert_one_committee_synchronized_count, + assert_one_committee_was_committed, assert_two_committees_synchronized_count, assert_two_committees_were_committed, get_context_with_delegated_committees, }; mod utils; +#[test] +fn test_committing_one_account() { + run_test!({ + let ctx = get_context_with_delegated_committees(1); + + let ScheduleCommitTestContextFields { + payer, + committees, + commitment, + ephem_client, + ephem_blockhash, + .. + } = ctx.fields(); + + let ix = schedule_commit_cpi_instruction( + payer.pubkey(), + pubkey_from_magic_program(magic_program::id()), + pubkey_from_magic_program(magic_program::MAGIC_CONTEXT_PUBKEY), + &committees + .iter() + .map(|(player, _)| player.pubkey()) + .collect::>(), + &committees.iter().map(|(_, pda)| *pda).collect::>(), + ); + + let tx = Transaction::new_signed_with_payer( + &[ix], + Some(&payer.pubkey()), + &[&payer], + *ephem_blockhash, + ); + + let sig = tx.get_signature(); + let res = ephem_client + .send_and_confirm_transaction_with_spinner_and_config( + &tx, + *commitment, + RpcSendTransactionConfig { + skip_preflight: true, + ..Default::default() + }, + ); + info!("{} '{:?}'", sig, res); + + let res = verify::fetch_and_verify_commit_result_from_logs(&ctx, *sig); + assert_one_committee_was_committed(&ctx, &res, true); + assert_one_committee_synchronized_count(&ctx, &res, 1); + }); +} + #[test] fn test_committing_two_accounts() { run_test!({ From 8391124b2c3fc26f109be411e24f7eb34a530729 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 9 May 2025 16:59:59 +0700 Subject: [PATCH 10/58] test: warn when we have issues fetching a transaction --- .../test-tools/src/integration_test_context.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/test-integration/test-tools/src/integration_test_context.rs b/test-integration/test-tools/src/integration_test_context.rs index 7d8d882e..22245254 100644 --- a/test-integration/test-tools/src/integration_test_context.rs +++ b/test-integration/test-tools/src/integration_test_context.rs @@ -1,3 +1,4 @@ +use log::*; use std::{str::FromStr, thread::sleep, time::Duration}; use anyhow::{Context, Result}; @@ -115,17 +116,18 @@ impl IntegrationTestContext { // Fetch Logs // ----------------- pub fn fetch_ephemeral_logs(&self, sig: Signature) -> Option> { - self.fetch_logs(sig, self.ephem_client.as_ref()) + self.fetch_logs(sig, self.ephem_client.as_ref(), "ephemeral") } pub fn fetch_chain_logs(&self, sig: Signature) -> Option> { - self.fetch_logs(sig, self.chain_client.as_ref()) + self.fetch_logs(sig, self.chain_client.as_ref(), "chain") } fn fetch_logs( &self, sig: Signature, rpc_client: Option<&RpcClient>, + label: &str, ) -> Option> { let rpc_client = rpc_client.or(self.chain_client.as_ref())?; @@ -140,7 +142,11 @@ impl IntegrationTestContext { }, ) { Ok(status) => status, - Err(_) => { + Err(err) => { + warn!( + "Failed to fetch transaction from {}: {:?}", + label, err + ); sleep(Duration::from_millis(400)); continue; } From e5aa347bce730a88dc049c663af0334582ddc501 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 9 May 2025 17:00:32 +0700 Subject: [PATCH 11/58] fix: adding change for validator fund check --- magicblock-api/src/errors.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/magicblock-api/src/errors.rs b/magicblock-api/src/errors.rs index 13bc81b2..6cebbf8d 100644 --- a/magicblock-api/src/errors.rs +++ b/magicblock-api/src/errors.rs @@ -1,4 +1,5 @@ use magicblock_accounts_db::error::AccountsDbError; +use magicblock_program::Pubkey; use thiserror::Error; pub type ApiResult = std::result::Result; @@ -26,6 +27,12 @@ pub enum ApiError { #[error("Ledger error: {0}")] LedgerError(#[from] magicblock_ledger::errors::LedgerError), + #[error("Failed to obtain balance for validator '{0}' from chain. ({1})")] + FailedToObtainValidatorOnChainBalance(Pubkey, String), + + #[error("Validator '{0}' is insufficiently funded on chain. Minimum is ({1} SOL)")] + ValidatorInsufficientlyFunded(Pubkey, u64), + #[error("CommittorSerivceError")] CommittorSerivceError( #[from] magicblock_committor_service::error::CommittorServiceError, From be98740562f884d8968a6386a94386313a35ac19 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 9 May 2025 17:01:50 +0700 Subject: [PATCH 12/58] feat: fully integrating committor service --- .../src/account_cloner.rs | 20 +++++++++++++++- .../src/remote_account_cloner_worker.rs | 24 +++++++++++++++---- .../src/remote_scheduled_commits_processor.rs | 23 +++++++++++++++++- 3 files changed, 61 insertions(+), 6 deletions(-) diff --git a/magicblock-account-cloner/src/account_cloner.rs b/magicblock-account-cloner/src/account_cloner.rs index 60ed7d4d..03e476ca 100644 --- a/magicblock-account-cloner/src/account_cloner.rs +++ b/magicblock-account-cloner/src/account_cloner.rs @@ -8,10 +8,11 @@ use futures_util::future::BoxFuture; use magicblock_account_dumper::AccountDumperError; use magicblock_account_fetcher::AccountFetcherError; use magicblock_account_updates::AccountUpdatesError; +use magicblock_committor_service::error::CommittorServiceResult; use magicblock_core::magic_program; use solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature}; use thiserror::Error; -use tokio::sync::oneshot::Sender; +use tokio::sync::oneshot::{self, Sender}; #[derive(Debug, Clone, Error)] pub enum AccountClonerError { @@ -30,6 +31,9 @@ pub enum AccountClonerError { #[error(transparent)] AccountDumperError(#[from] AccountDumperError), + #[error("CommittorSerivceError {0}")] + CommittorSerivceError(String), + #[error("ProgramDataDoesNotExist")] ProgramDataDoesNotExist, @@ -66,6 +70,20 @@ pub enum AccountClonerUnclonableReason { DelegatedAccountsNotClonedWhileHydrating, } +pub async fn map_committor_request_result( + res: oneshot::Receiver>, +) -> AccountClonerResult { + res.await + .map_err(|err| { + // Send request error + AccountClonerError::CommittorSerivceError(format!("{:?}", err)) + })? + .map_err(|err| { + // Commit error + AccountClonerError::CommittorSerivceError(format!("{:?}", err)) + }) +} + #[derive(Debug, Clone)] pub struct AccountClonerPermissions { pub allow_cloning_refresh: bool, diff --git a/magicblock-account-cloner/src/remote_account_cloner_worker.rs b/magicblock-account-cloner/src/remote_account_cloner_worker.rs index 048def46..094de1bb 100644 --- a/magicblock-account-cloner/src/remote_account_cloner_worker.rs +++ b/magicblock-account-cloner/src/remote_account_cloner_worker.rs @@ -18,6 +18,7 @@ use magicblock_account_dumper::AccountDumper; use magicblock_account_fetcher::AccountFetcher; use magicblock_account_updates::AccountUpdates; use magicblock_accounts_api::InternalAccountProvider; +use magicblock_committor_service::CommittorService; use magicblock_metrics::metrics; use magicblock_mutator::idl::{get_pubkey_anchor_idl, get_pubkey_shank_idl}; use solana_sdk::{ @@ -34,8 +35,8 @@ use tokio::{ use tokio_util::sync::CancellationToken; use crate::{ - AccountClonerError, AccountClonerListeners, AccountClonerOutput, - AccountClonerPermissions, AccountClonerResult, + map_committor_request_result, AccountClonerError, AccountClonerListeners, + AccountClonerOutput, AccountClonerPermissions, AccountClonerResult, AccountClonerUnclonableReason, CloneOutputMap, }; @@ -99,6 +100,7 @@ pub struct RemoteAccountClonerWorker { account_fetcher: AFE, account_updates: AUP, account_dumper: ADU, + committer_service: Arc, allowed_program_ids: Option>, blacklisted_accounts: HashSet, payer_init_lamports: Option, @@ -125,6 +127,7 @@ where account_fetcher: AFE, account_updates: AUP, account_dumper: ADU, + committer_service: Arc, allowed_program_ids: Option>, blacklisted_accounts: HashSet, payer_init_lamports: Option, @@ -141,6 +144,7 @@ where account_updates, account_dumper, allowed_program_ids, + committer_service, blacklisted_accounts, payer_init_lamports, validator_charges_fees, @@ -640,7 +644,7 @@ where }); } - self.do_clone_delegated_account( + let sig = self.do_clone_delegated_account( pubkey, // TODO(GabrielePicco): Avoid cloning &Account { @@ -648,7 +652,19 @@ where ..account.clone() }, delegation_record, - )? + )?; + + // Allow the committer service to reserve pubkeys in lookup tables + // that could be needed when we commit this account + map_committor_request_result( + self.committer_service.reserve_pubkeys_for_committee( + *pubkey, + delegation_record.owner, + ), + ) + .await?; + + sig } }; // Return the result diff --git a/magicblock-accounts/src/remote_scheduled_commits_processor.rs b/magicblock-accounts/src/remote_scheduled_commits_processor.rs index 0311ae65..c47bf8ba 100644 --- a/magicblock-accounts/src/remote_scheduled_commits_processor.rs +++ b/magicblock-accounts/src/remote_scheduled_commits_processor.rs @@ -4,6 +4,7 @@ use log::*; use magicblock_bank::bank::Bank; use magicblock_processor::execute_transaction::execute_legacy_transaction; use magicblock_transaction_status::TransactionStatusSender; +use solana_sdk::hash::Hash; use solana_sdk::{account::ReadableAccount, transaction::Transaction}; use std::{ collections::{HashMap, HashSet}, @@ -55,6 +56,12 @@ impl ScheduledCommitsProcessor for RemoteScheduledCommitsProcessor { .map(|commit| commit.slot) .max() .unwrap(); + // Safety we just obtained the max slot from the scheduled commits + let ephemereal_blockhash = scheduled_commits + .iter() + .find(|commit| commit.slot == max_slot) + .map(|commit| commit.blockhash) + .unwrap(); changeset.slot = max_slot; @@ -155,7 +162,7 @@ impl ScheduledCommitsProcessor for RemoteScheduledCommitsProcessor { } } - self.process_changeset(changeset, sent_commits); + self.process_changeset(changeset, sent_commits, ephemereal_blockhash); Ok(()) } @@ -198,6 +205,7 @@ impl RemoteScheduledCommitsProcessor { &self, changeset: Changeset, mut sent_commits: HashMap, + ephemeral_blockhash: Hash, ) { // We process the changeset on a separate task in order to not block // the validator (slot advance) itself @@ -208,6 +216,19 @@ impl RemoteScheduledCommitsProcessor { tokio::task::spawn(async move { // Create one sent commit transaction per bundle in our validator let changeset_metadata = ChangesetMeta::from(&changeset); + debug!( + "Committing changeset with {} accounts", + changeset_metadata.accounts.len() + ); + committor_service + .commit_changeset(changeset, ephemeral_blockhash, true) + .await + // TODO: @@@ + .unwrap(); + debug!( + "Committed changeset with {} accounts", + changeset_metadata.accounts.len() + ); for bundle_id in changeset_metadata .accounts .iter() From 5ce7a2030a1dfd6175678167abb17c0f0af3d473 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Mon, 12 May 2025 09:40:52 +0545 Subject: [PATCH 13/58] chore: update ix tests cargo --- test-integration/Cargo.lock | 213 ++++++++++++++++++++++++++++++------ test-integration/Cargo.toml | 7 +- 2 files changed, 182 insertions(+), 38 deletions(-) diff --git a/test-integration/Cargo.lock b/test-integration/Cargo.lock index 141590f5..f479b90c 100644 --- a/test-integration/Cargo.lock +++ b/test-integration/Cargo.lock @@ -1122,6 +1122,7 @@ dependencies = [ [[package]] name = "conjunto-addresses" version = "0.0.0" +source = "git+https://github.com/magicblock-labs/conjunto.git?rev=bf82b45#bf82b453af9f0b25a81056378d6bcdf06ef53b53" dependencies = [ "paste", "solana-sdk", @@ -1130,6 +1131,7 @@ dependencies = [ [[package]] name = "conjunto-core" version = "0.0.0" +source = "git+https://github.com/magicblock-labs/conjunto.git?rev=bf82b45#bf82b453af9f0b25a81056378d6bcdf06ef53b53" dependencies = [ "async-trait", "serde", @@ -1141,13 +1143,14 @@ dependencies = [ [[package]] name = "conjunto-lockbox" version = "0.0.0" +source = "git+https://github.com/magicblock-labs/conjunto.git?rev=bf82b45#bf82b453af9f0b25a81056378d6bcdf06ef53b53" dependencies = [ "async-trait", "bytemuck", "conjunto-addresses", "conjunto-core", "conjunto-providers", - "magicblock-delegation-program", + "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?rev=4af7f1c)", "serde", "solana-rpc-client", "solana-rpc-client-api", @@ -1158,6 +1161,7 @@ dependencies = [ [[package]] name = "conjunto-providers" version = "0.0.0" +source = "git+https://github.com/magicblock-labs/conjunto.git?rev=bf82b45#bf82b453af9f0b25a81056378d6bcdf06ef53b53" dependencies = [ "async-trait", "conjunto-addresses", @@ -1172,6 +1176,7 @@ dependencies = [ [[package]] name = "conjunto-transwise" version = "0.0.0" +source = "git+https://github.com/magicblock-labs/conjunto.git?rev=bf82b45#bf82b453af9f0b25a81056378d6bcdf06ef53b53" dependencies = [ "async-trait", "conjunto-core", @@ -1741,28 +1746,29 @@ dependencies = [ [[package]] name = "ephemeral-rollups-sdk" -version = "0.2.4" +version = "0.2.5" +source = "git+https://github.com/magicblock-labs/ephemeral-rollups-sdk.git?rev=c1fcb91#c1fcb917504751ab513c471e4381321d7d40bb91" dependencies = [ "borsh 0.10.4", "ephemeral-rollups-sdk-attribute-commit", "ephemeral-rollups-sdk-attribute-delegate", "ephemeral-rollups-sdk-attribute-ephemeral", - "paste", "solana-program", ] [[package]] name = "ephemeral-rollups-sdk-attribute-commit" -version = "0.2.4" +version = "0.2.5" +source = "git+https://github.com/magicblock-labs/ephemeral-rollups-sdk.git?rev=c1fcb91#c1fcb917504751ab513c471e4381321d7d40bb91" dependencies = [ - "proc-macro2", "quote", "syn 1.0.109", ] [[package]] name = "ephemeral-rollups-sdk-attribute-delegate" -version = "0.2.4" +version = "0.2.5" +source = "git+https://github.com/magicblock-labs/ephemeral-rollups-sdk.git?rev=c1fcb91#c1fcb917504751ab513c471e4381321d7d40bb91" dependencies = [ "proc-macro2", "quote", @@ -1771,7 +1777,8 @@ dependencies = [ [[package]] name = "ephemeral-rollups-sdk-attribute-ephemeral" -version = "0.2.4" +version = "0.2.5" +source = "git+https://github.com/magicblock-labs/ephemeral-rollups-sdk.git?rev=c1fcb91#c1fcb917504751ab513c471e4381321d7d40bb91" dependencies = [ "proc-macro2", "quote", @@ -1823,7 +1830,7 @@ dependencies = [ [[package]] name = "expiring-hashmap" -version = "0.1.0" +version = "0.1.1" [[package]] name = "fake-simd" @@ -1831,6 +1838,18 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" +[[package]] +name = "fallible-iterator" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" + +[[package]] +name = "fallible-streaming-iterator" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" + [[package]] name = "fast-math" version = "0.1.1" @@ -2159,7 +2178,7 @@ dependencies = [ [[package]] name = "geyser-grpc-proto" -version = "0.1.0" +version = "0.1.1" dependencies = [ "anyhow", "bincode", @@ -2319,6 +2338,15 @@ dependencies = [ "foldhash", ] +[[package]] +name = "hashlink" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" +dependencies = [ + "hashbrown 0.15.2", +] + [[package]] name = "headers" version = "0.3.9" @@ -2906,8 +2934,10 @@ version = "0.0.0" dependencies = [ "anyhow", "borsh 1.5.7", + "log", "magicblock-config", "magicblock-core", + "magicblock-delegation-program 1.0.0", "rayon", "serde", "solana-rpc-client", @@ -3296,6 +3326,17 @@ dependencies = [ "libsecp256k1-core", ] +[[package]] +name = "libsqlite3-sys" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbb8270bb4060bd76c6e96f20c52d80620f1d82a3470885694e41e0f81ef6fe7" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "libz-sys" version = "1.1.22" @@ -3416,7 +3457,7 @@ dependencies = [ [[package]] name = "magicblock-account-cloner" -version = "0.1.0" +version = "0.1.1" dependencies = [ "conjunto-transwise", "futures-util", @@ -3425,6 +3466,7 @@ dependencies = [ "magicblock-account-fetcher", "magicblock-account-updates", "magicblock-accounts-api", + "magicblock-committor-service", "magicblock-core", "magicblock-metrics", "magicblock-mutator", @@ -3436,7 +3478,7 @@ dependencies = [ [[package]] name = "magicblock-account-dumper" -version = "0.1.0" +version = "0.1.1" dependencies = [ "bincode", "magicblock-bank", @@ -3449,7 +3491,7 @@ dependencies = [ [[package]] name = "magicblock-account-fetcher" -version = "0.1.0" +version = "0.1.1" dependencies = [ "async-trait", "conjunto-transwise", @@ -3464,7 +3506,7 @@ dependencies = [ [[package]] name = "magicblock-account-updates" -version = "0.1.0" +version = "0.1.1" dependencies = [ "bincode", "conjunto-transwise", @@ -3482,7 +3524,7 @@ dependencies = [ [[package]] name = "magicblock-accounts" -version = "0.1.0" +version = "0.1.1" dependencies = [ "async-trait", "conjunto-transwise", @@ -3494,8 +3536,9 @@ dependencies = [ "magicblock-account-updates", "magicblock-accounts-api", "magicblock-bank", + "magicblock-committor-service", "magicblock-core", - "magicblock-delegation-program", + "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?rev=4af7f1c)", "magicblock-metrics", "magicblock-mutator", "magicblock-processor", @@ -3511,7 +3554,7 @@ dependencies = [ [[package]] name = "magicblock-accounts-api" -version = "0.1.0" +version = "0.1.1" dependencies = [ "magicblock-bank", "solana-sdk", @@ -3519,7 +3562,7 @@ dependencies = [ [[package]] name = "magicblock-accounts-db" -version = "0.1.0" +version = "0.1.1" dependencies = [ "lmdb-rkv", "log", @@ -3535,7 +3578,7 @@ dependencies = [ [[package]] name = "magicblock-api" -version = "0.1.0" +version = "0.1.1" dependencies = [ "agave-geyser-plugin-interface", "anyhow", @@ -3555,6 +3598,7 @@ dependencies = [ "magicblock-accounts-api", "magicblock-accounts-db", "magicblock-bank", + "magicblock-committor-service", "magicblock-config", "magicblock-core", "magicblock-geyser-plugin", @@ -3580,7 +3624,7 @@ dependencies = [ [[package]] name = "magicblock-bank" -version = "0.1.0" +version = "0.1.1" dependencies = [ "agave-geyser-plugin-interface", "bincode", @@ -3613,9 +3657,47 @@ dependencies = [ "tempfile", ] +[[package]] +name = "magicblock-committor-program" +version = "0.0.0" +dependencies = [ + "borsh 1.5.7", + "borsh-derive 1.5.7", + "log", + "paste", + "solana-account", + "solana-program", + "solana-pubkey", + "thiserror 2.0.11", +] + +[[package]] +name = "magicblock-committor-service" +version = "0.0.0" +dependencies = [ + "base64 0.22.1", + "bincode", + "borsh 1.5.7", + "log", + "magicblock-committor-program", + "magicblock-delegation-program 1.0.0", + "magicblock-rpc-client", + "magicblock-table-mania", + "rusqlite", + "solana-account", + "solana-pubkey", + "solana-rpc-client", + "solana-rpc-client-api", + "solana-sdk", + "solana-transaction-status-client-types", + "thiserror 2.0.11", + "tokio", + "tokio-util 0.7.13", +] + [[package]] name = "magicblock-config" -version = "0.1.0" +version = "0.1.1" dependencies = [ "isocountry", "magicblock-accounts-db", @@ -3630,7 +3712,7 @@ dependencies = [ [[package]] name = "magicblock-core" -version = "0.1.0" +version = "0.1.1" dependencies = [ "solana-sdk", ] @@ -3650,9 +3732,25 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "magicblock-delegation-program" +version = "1.0.0" +source = "git+https://github.com/magicblock-labs/delegation-program.git?rev=4af7f1c#4af7f1cefe0915f0760ed5c38b25b7d41c31a474" +dependencies = [ + "bincode", + "borsh 1.5.7", + "bytemuck", + "num_enum", + "paste", + "solana-curve25519", + "solana-program", + "solana-security-txt", + "thiserror 1.0.69", +] + [[package]] name = "magicblock-geyser-plugin" -version = "0.1.0" +version = "0.1.1" dependencies = [ "agave-geyser-plugin-interface", "anyhow", @@ -3679,7 +3777,7 @@ dependencies = [ [[package]] name = "magicblock-ledger" -version = "0.1.0" +version = "0.1.1" dependencies = [ "bincode", "byteorder", @@ -3698,7 +3796,7 @@ dependencies = [ "solana-measure", "solana-metrics", "solana-sdk", - "solana-storage-proto 0.1.0", + "solana-storage-proto 0.1.1", "solana-svm", "solana-timings", "solana-transaction-status", @@ -3709,7 +3807,7 @@ dependencies = [ [[package]] name = "magicblock-metrics" -version = "0.1.0" +version = "0.1.1" dependencies = [ "http-body-util", "hyper 1.6.0", @@ -3723,7 +3821,7 @@ dependencies = [ [[package]] name = "magicblock-mutator" -version = "0.1.0" +version = "0.1.1" dependencies = [ "bincode", "log", @@ -3736,7 +3834,7 @@ dependencies = [ [[package]] name = "magicblock-perf-service" -version = "0.1.0" +version = "0.1.1" dependencies = [ "log", "magicblock-bank", @@ -3745,7 +3843,7 @@ dependencies = [ [[package]] name = "magicblock-processor" -version = "0.1.0" +version = "0.1.1" dependencies = [ "lazy_static", "log", @@ -3766,7 +3864,7 @@ dependencies = [ [[package]] name = "magicblock-program" -version = "0.1.0" +version = "0.1.1" dependencies = [ "bincode", "lazy_static", @@ -3783,7 +3881,7 @@ dependencies = [ [[package]] name = "magicblock-pubsub" -version = "0.1.0" +version = "0.1.1" dependencies = [ "bincode", "geyser-grpc-proto", @@ -3805,7 +3903,7 @@ dependencies = [ [[package]] name = "magicblock-rpc" -version = "0.1.0" +version = "0.1.1" dependencies = [ "base64 0.21.7", "bincode", @@ -3838,9 +3936,38 @@ dependencies = [ "tokio", ] +[[package]] +name = "magicblock-rpc-client" +version = "0.0.0" +dependencies = [ + "log", + "solana-rpc-client", + "solana-rpc-client-api", + "solana-sdk", + "solana-transaction-status-client-types", + "thiserror 2.0.11", + "tokio", +] + +[[package]] +name = "magicblock-table-mania" +version = "0.0.0" +dependencies = [ + "ed25519-dalek", + "log", + "magicblock-rpc-client", + "sha3", + "solana-pubkey", + "solana-rpc-client", + "solana-rpc-client-api", + "solana-sdk", + "thiserror 2.0.11", + "tokio", +] + [[package]] name = "magicblock-tokens" -version = "0.1.0" +version = "0.1.1" dependencies = [ "log", "magicblock-bank", @@ -3855,7 +3982,7 @@ dependencies = [ [[package]] name = "magicblock-transaction-status" -version = "0.1.0" +version = "0.1.1" dependencies = [ "crossbeam-channel", "log", @@ -3867,7 +3994,7 @@ dependencies = [ [[package]] name = "magicblock-version" -version = "0.1.0" +version = "0.1.1" dependencies = [ "rustc_version", "semver", @@ -5314,6 +5441,20 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "rusqlite" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37e34486da88d8e051c7c0e23c3f15fd806ea8546260aa2fec247e97242ec143" +dependencies = [ + "bitflags 2.8.0", + "fallible-iterator", + "fallible-streaming-iterator", + "hashlink", + "libsqlite3-sys", + "smallvec", +] + [[package]] name = "rustc-demangle" version = "0.1.24" @@ -8590,7 +8731,7 @@ dependencies = [ [[package]] name = "solana-storage-proto" -version = "0.1.0" +version = "0.1.1" dependencies = [ "bincode", "bs58 0.4.0", @@ -9957,7 +10098,7 @@ dependencies = [ [[package]] name = "test-tools-core" -version = "0.1.0" +version = "0.1.1" dependencies = [ "env_logger 0.11.6", "log", diff --git a/test-integration/Cargo.toml b/test-integration/Cargo.toml index df1f0458..7edd2579 100644 --- a/test-integration/Cargo.toml +++ b/test-integration/Cargo.toml @@ -28,10 +28,13 @@ ephemeral-rollups-sdk = { git = "https://github.com/magicblock-labs/ephemeral-ro integration-test-tools = { path = "test-tools" } log = "0.4.20" magicblock-api = { path = "../magicblock-api" } -magicblock-accounts-db = { path = "../magicblock-accounts-db", features = [ "dev-tools" ] } -magic-domain-program = { git = "https://github.com/magicblock-labs/magic-domain-program.git", rev = "eba7644", default-features = false} +magicblock-accounts-db = { path = "../magicblock-accounts-db", features = [ + "dev-tools", +] } +magic-domain-program = { git = "https://github.com/magicblock-labs/magic-domain-program.git", rev = "eba7644", default-features = false } magicblock-config = { path = "../magicblock-config" } magicblock-core = { path = "../magicblock-core" } +magicblock-delegation-program = { path = "../../delegation-program" } program-flexi-counter = { path = "./programs/flexi-counter" } program-schedulecommit = { path = "programs/schedulecommit" } program-schedulecommit-security = { path = "programs/schedulecommit-security" } From b54cd3b2dbd08cfa5ec2aa6948260d9c289ec575 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Mon, 12 May 2025 09:41:53 +0545 Subject: [PATCH 14/58] chore: comment with requirements for schedule commit tests --- .../schedulecommit/test-scenarios/tests/01_commits.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/test-integration/schedulecommit/test-scenarios/tests/01_commits.rs b/test-integration/schedulecommit/test-scenarios/tests/01_commits.rs index 8264bde1..46533b1f 100644 --- a/test-integration/schedulecommit/test-scenarios/tests/01_commits.rs +++ b/test-integration/schedulecommit/test-scenarios/tests/01_commits.rs @@ -18,6 +18,15 @@ use utils::{ }; mod utils; +// NOTE: This and all other schedule commit tests depend on the following accounts +// loaded in the mainnet cluster, i.e. the solana-test-validator: +// +// validator: tEsT3eV6RFCWs1BZ7AXTzasHqTtMnMLCB2tjQ42TDXD +// protocol fees vault: 7JrkjmZPprHwtuvtuGTXp9hwfGYFAQLnLeFM52kqAgXg +// validator fees vault: DUH8h7rYjdTPYyBUEGAUwZv9ffz5wiM45GdYWYzogXjp +// delegation program: DELeGGvXpWV2fqJUhqcF5ZSYMS4JTLjteaAMARRSaeSh +// committor program: corabpNrkBEqbTZP7xfJgSWTdBmVdLf1PARWXZbcMcS + #[test] fn test_committing_one_account() { run_test!({ From dc2da585e3d52b2a4147da28ffb605d542bf4b29 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Tue, 13 May 2025 13:31:04 +0545 Subject: [PATCH 15/58] chore: improve test logs --- .../schedulecommit/test-scenarios/tests/utils/mod.rs | 5 +++-- test-integration/test-tools/Cargo.toml | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/test-integration/schedulecommit/test-scenarios/tests/utils/mod.rs b/test-integration/schedulecommit/test-scenarios/tests/utils/mod.rs index a807dd65..14833eac 100644 --- a/test-integration/schedulecommit/test-scenarios/tests/utils/mod.rs +++ b/test-integration/schedulecommit/test-scenarios/tests/utils/mod.rs @@ -215,9 +215,10 @@ pub fn assert_account_was_undelegated_on_chain( let owner = ctx.fetch_chain_account_owner(pda).unwrap(); assert_ne!( owner, DELEGATION_PROGRAM_ID, - "not owned by delegation program" + "{} not owned by delegation program", + pda ); - assert_eq!(owner, new_owner, "new owner"); + assert_eq!(owner, new_owner, "{} has new owner", pda); } #[allow(dead_code)] // used in 02_commit_and_undelegate.rs diff --git a/test-integration/test-tools/Cargo.toml b/test-integration/test-tools/Cargo.toml index 1fbaa830..50c3719a 100644 --- a/test-integration/test-tools/Cargo.toml +++ b/test-integration/test-tools/Cargo.toml @@ -6,6 +6,7 @@ edition.workspace = true [dependencies] anyhow = { workspace = true } borsh = { workspace = true } +log = { workspace = true } rayon = { workspace = true } serde = { workspace = true } magicblock-core = { workspace = true } From 560fd6b78b7dde9c5ce6f4380a4adeea73356686 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Tue, 13 May 2025 19:51:31 +0545 Subject: [PATCH 16/58] chore: include compute unit price when initializing committor --- magicblock-api/src/magic_validator.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/magicblock-api/src/magic_validator.rs b/magicblock-api/src/magic_validator.rs index 7c02063e..1bd6e42a 100644 --- a/magicblock-api/src/magic_validator.rs +++ b/magicblock-api/src/magic_validator.rs @@ -38,7 +38,9 @@ use magicblock_bank::{ program_loader::load_programs_into_bank, transaction_logs::TransactionLogCollectorFilter, }; -use magicblock_committor_service::{config::ChainConfig, CommittorService}; +use magicblock_committor_service::{ + config::ChainConfig, CommittorService, ComputeBudgetConfig, +}; use magicblock_config::{EphemeralConfig, LifecycleMode, ProgramConfig}; use magicblock_geyser_plugin::rpc::GeyserRpcService; use magicblock_ledger::{ @@ -311,11 +313,14 @@ impl MagicValidator { identity_keypair.insecure_clone(), // TODO: @@@ config or inside ledger dir "/tmp/committor_service.sqlite", - &ChainConfig { + ChainConfig { rpc_uri: remote_rpc_config.url().to_string(), commitment: remote_rpc_config .commitment() .unwrap_or(CommitmentLevel::Confirmed), + compute_budget_config: ComputeBudgetConfig::new( + accounts_config.commit_compute_unit_price, + ), }, )?); From b3445c13f01f6e78096d7b0451bbaecc6306ea50 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Tue, 13 May 2025 19:52:19 +0545 Subject: [PATCH 17/58] fix: mark accounts to be undelegated --- magicblock-accounts/src/remote_scheduled_commits_processor.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/magicblock-accounts/src/remote_scheduled_commits_processor.rs b/magicblock-accounts/src/remote_scheduled_commits_processor.rs index c47bf8ba..44d1cd5f 100644 --- a/magicblock-accounts/src/remote_scheduled_commits_processor.rs +++ b/magicblock-accounts/src/remote_scheduled_commits_processor.rs @@ -159,6 +159,9 @@ impl ScheduledCommitsProcessor for RemoteScheduledCommitsProcessor { bundle_id, }, ); + if committee.undelegation_requested { + changeset.request_undelegation(committee.pubkey); + } } } From 24bb27aee9999f852af18d0cf9cf01fba4f90df1 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Wed, 14 May 2025 09:58:01 +0545 Subject: [PATCH 18/58] chore: properly handle some unwraps --- .../src/remote_scheduled_commits_processor.rs | 48 ++++++++++++++----- 1 file changed, 36 insertions(+), 12 deletions(-) diff --git a/magicblock-accounts/src/remote_scheduled_commits_processor.rs b/magicblock-accounts/src/remote_scheduled_commits_processor.rs index 44d1cd5f..f2102e28 100644 --- a/magicblock-accounts/src/remote_scheduled_commits_processor.rs +++ b/magicblock-accounts/src/remote_scheduled_commits_processor.rs @@ -131,7 +131,9 @@ impl ScheduledCommitsProcessor for RemoteScheduledCommitsProcessor { // Collect all SentCommit info available at this stage // We add the chain_signatures after we sent off the changeset let sent_commit = SentCommit { + chain_signatures: vec![], commit_id: commit.id, + slot: commit.slot, payer: commit.payer, blockhash: commit.blockhash, included_pubkeys: committees @@ -141,7 +143,6 @@ impl ScheduledCommitsProcessor for RemoteScheduledCommitsProcessor { excluded_pubkeys, feepayers, requested_undelegation: commit.request_undelegation, - ..Default::default() }; sent_commits.insert( commit.id, @@ -223,28 +224,51 @@ impl RemoteScheduledCommitsProcessor { "Committing changeset with {} accounts", changeset_metadata.accounts.len() ); - committor_service + match committor_service .commit_changeset(changeset, ephemeral_blockhash, true) .await - // TODO: @@@ - .unwrap(); - debug!( - "Committed changeset with {} accounts", - changeset_metadata.accounts.len() - ); + { + Ok(Some(reqid)) => { + debug!( + "Committed changeset with {} accounts via reqid {}", + changeset_metadata.accounts.len(), + reqid + ); + } + Ok(None) => { + debug!( + "Committed changeset with {} accounts, but did not get a reqid", + changeset_metadata.accounts.len() + ); + } + Err(err) => { + error!( + "Tried to commit changeset with {} accounts but failed to send request ({:#?})", + changeset_metadata.accounts.len(),err + ); + } + } for bundle_id in changeset_metadata .accounts .iter() .map(|account| account.bundle_id) .collect::>() { - match committor_service + let bundle_signatures = match committor_service .get_bundle_signatures(bundle_id) .await - // TODO: @@@ - .unwrap() - .unwrap() { + Ok(Ok(sig)) => sig, + Ok(Err(err)) => { + error!("Encountered error while getting bundle signatures for {}: {:?}", bundle_id, err); + continue; + } + Err(err) => { + error!("Encountered error while getting bundle signatures for {}: {:?}", bundle_id, err); + continue; + } + }; + match bundle_signatures { Some(BundleSignatureRow { processed_signature, finalized_signature, From cc389e78ddb2f9a1b02bfe0f044024d0caeaa504 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Wed, 14 May 2025 10:08:23 +0545 Subject: [PATCH 19/58] test: general improvements + fixes - at this point schedule commit tests pass with maximum concurrency --- .../test-scenarios/tests/03_commits_fee_payer.rs | 2 +- .../test-scenarios/tests/utils/mod.rs | 9 ++++++++- .../test-tools/src/integration_test_context.rs | 14 ++++++++------ .../test-tools/src/scheduled_commits.rs | 6 +++--- 4 files changed, 20 insertions(+), 11 deletions(-) diff --git a/test-integration/schedulecommit/test-scenarios/tests/03_commits_fee_payer.rs b/test-integration/schedulecommit/test-scenarios/tests/03_commits_fee_payer.rs index 2325584e..f04c8bd7 100644 --- a/test-integration/schedulecommit/test-scenarios/tests/03_commits_fee_payer.rs +++ b/test-integration/schedulecommit/test-scenarios/tests/03_commits_fee_payer.rs @@ -125,6 +125,6 @@ fn test_committing_fee_payer_escrowing_lamports() { assert_two_committees_synchronized_count(&ctx, &res, 1); // The fee payer should have been committed - assert_feepayer_was_committed(&ctx, &res); + assert_feepayer_was_committed(&ctx, &res, true); }); } diff --git a/test-integration/schedulecommit/test-scenarios/tests/utils/mod.rs b/test-integration/schedulecommit/test-scenarios/tests/utils/mod.rs index 14833eac..13ac5171 100644 --- a/test-integration/schedulecommit/test-scenarios/tests/utils/mod.rs +++ b/test-integration/schedulecommit/test-scenarios/tests/utils/mod.rs @@ -101,6 +101,7 @@ pub fn assert_two_committees_were_committed( pub fn assert_feepayer_was_committed( ctx: &ScheduleCommitTestContext, res: &ScheduledCommitResult, + finalize: bool, ) { let payer = ctx.payer.pubkey(); @@ -109,7 +110,13 @@ pub fn assert_feepayer_was_committed( let commit_payer = res.feepayers.iter().find(|(p, _)| p == &payer); assert!(commit_payer.is_some(), "should have committed payer"); - assert_eq!(res.sigs.len(), 1, "should have 1 on chain sig"); + let sig_len = if finalize { 2 } else { 1 }; + assert_eq!( + res.sigs.len(), + sig_len, + "should have {} on chain sig", + sig_len + ); } #[allow(dead_code)] // used in 02_commit_and_undelegate.rs diff --git a/test-integration/test-tools/src/integration_test_context.rs b/test-integration/test-tools/src/integration_test_context.rs index 22245254..4afd9ccc 100644 --- a/test-integration/test-tools/src/integration_test_context.rs +++ b/test-integration/test-tools/src/integration_test_context.rs @@ -131,9 +131,9 @@ impl IntegrationTestContext { ) -> Option> { let rpc_client = rpc_client.or(self.chain_client.as_ref())?; - // Try this up to 10 times since devnet here returns the version response instead of + // Try this up to 50 times since devnet here returns the version response instead of // the EncodedConfirmedTransactionWithStatusMeta at times - for _ in 0..10 { + for idx in 1..=50 { let status = match rpc_client.get_transaction_with_config( &sig, RpcTransactionConfig { @@ -143,10 +143,12 @@ impl IntegrationTestContext { ) { Ok(status) => status, Err(err) => { - warn!( - "Failed to fetch transaction from {}: {:?}", - label, err - ); + if idx % 10 == 0 { + warn!( + "Failed to fetch transaction from {}: {:?}", + label, err + ); + } sleep(Duration::from_millis(400)); continue; } diff --git a/test-integration/test-tools/src/scheduled_commits.rs b/test-integration/test-tools/src/scheduled_commits.rs index 5f9e723b..e4c11ef3 100644 --- a/test-integration/test-tools/src/scheduled_commits.rs +++ b/test-integration/test-tools/src/scheduled_commits.rs @@ -214,9 +214,9 @@ impl IntegrationTestContext { let ephem_account = T::try_from_slice(&ephem_data) .with_context(|| { format!( - "Failed to deserialize ephemeral account data for {:?}", - pubkey - ) + "Failed to deserialize ephemeral account data for {:?}", + pubkey + ) })?; committed_accounts.insert(pubkey, ephem_account); }; From f7304cf59ede839a791556a22f935ca47dc5d6ef Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Wed, 14 May 2025 11:02:56 +0545 Subject: [PATCH 20/58] chore: use trait for committor service and create stub to use in tests --- magicblock-account-cloner/Cargo.toml | 1 + .../src/remote_account_cloner_client.rs | 6 ++- .../src/remote_account_cloner_worker.rs | 15 +++--- magicblock-accounts/src/accounts_manager.rs | 3 -- .../src/external_accounts_manager.rs | 8 +++- .../src/remote_scheduled_commits_processor.rs | 30 +++++++----- magicblock-accounts/src/traits.rs | 6 ++- .../tests/stubs/changeset_committor_stub.rs | 47 +++++++++++++++++++ magicblock-accounts/tests/stubs/mod.rs | 1 + .../stubs/scheduled_commits_processor_stub.rs | 6 ++- magicblock-api/src/magic_validator.rs | 5 +- magicblock-api/src/tickers.rs | 8 +++- 12 files changed, 103 insertions(+), 33 deletions(-) create mode 100644 magicblock-accounts/tests/stubs/changeset_committor_stub.rs diff --git a/magicblock-account-cloner/Cargo.toml b/magicblock-account-cloner/Cargo.toml index c6767cc0..5bfc72ea 100644 --- a/magicblock-account-cloner/Cargo.toml +++ b/magicblock-account-cloner/Cargo.toml @@ -16,6 +16,7 @@ magicblock-account-updates = { workspace = true } magicblock-account-dumper = { workspace = true } magicblock-accounts-api = { workspace = true } magicblock-core = { workspace = true } +magicblock-committor-service = { workspace = true } magicblock-metrics = { workspace = true } magicblock-mutator = { workspace = true } solana-sdk = { workspace = true } diff --git a/magicblock-account-cloner/src/remote_account_cloner_client.rs b/magicblock-account-cloner/src/remote_account_cloner_client.rs index 76f9f893..d3070022 100644 --- a/magicblock-account-cloner/src/remote_account_cloner_client.rs +++ b/magicblock-account-cloner/src/remote_account_cloner_client.rs @@ -11,6 +11,7 @@ use magicblock_account_dumper::AccountDumper; use magicblock_account_fetcher::AccountFetcher; use magicblock_account_updates::AccountUpdates; use magicblock_accounts_api::InternalAccountProvider; +use magicblock_committor_service::ChangesetCommittor; use solana_sdk::pubkey::Pubkey; use tokio::sync::{mpsc::UnboundedSender, oneshot::channel}; @@ -25,14 +26,15 @@ pub struct RemoteAccountClonerClient { } impl RemoteAccountClonerClient { - pub fn new( - worker: &RemoteAccountClonerWorker, + pub fn new( + worker: &RemoteAccountClonerWorker, ) -> Self where IAP: InternalAccountProvider, AFE: AccountFetcher, AUP: AccountUpdates, ADU: AccountDumper, + CC: ChangesetCommittor, { Self { clone_request_sender: worker.get_clone_request_sender(), diff --git a/magicblock-account-cloner/src/remote_account_cloner_worker.rs b/magicblock-account-cloner/src/remote_account_cloner_worker.rs index 094de1bb..90e1d4ad 100644 --- a/magicblock-account-cloner/src/remote_account_cloner_worker.rs +++ b/magicblock-account-cloner/src/remote_account_cloner_worker.rs @@ -18,7 +18,7 @@ use magicblock_account_dumper::AccountDumper; use magicblock_account_fetcher::AccountFetcher; use magicblock_account_updates::AccountUpdates; use magicblock_accounts_api::InternalAccountProvider; -use magicblock_committor_service::CommittorService; +use magicblock_committor_service::ChangesetCommittor; use magicblock_metrics::metrics; use magicblock_mutator::idl::{get_pubkey_anchor_idl, get_pubkey_shank_idl}; use solana_sdk::{ @@ -95,12 +95,12 @@ impl ValidatorStage { } } -pub struct RemoteAccountClonerWorker { +pub struct RemoteAccountClonerWorker { internal_account_provider: IAP, account_fetcher: AFE, account_updates: AUP, account_dumper: ADU, - committer_service: Arc, + changeset_committor: Arc, allowed_program_ids: Option>, blacklisted_accounts: HashSet, payer_init_lamports: Option, @@ -114,12 +114,13 @@ pub struct RemoteAccountClonerWorker { validator_identity: Pubkey, } -impl RemoteAccountClonerWorker +impl RemoteAccountClonerWorker where IAP: InternalAccountProvider, AFE: AccountFetcher, AUP: AccountUpdates, ADU: AccountDumper, + CC: ChangesetCommittor, { #[allow(clippy::too_many_arguments)] pub fn new( @@ -127,7 +128,7 @@ where account_fetcher: AFE, account_updates: AUP, account_dumper: ADU, - committer_service: Arc, + changeset_committor: Arc, allowed_program_ids: Option>, blacklisted_accounts: HashSet, payer_init_lamports: Option, @@ -143,8 +144,8 @@ where account_fetcher, account_updates, account_dumper, + changeset_committor, allowed_program_ids, - committer_service, blacklisted_accounts, payer_init_lamports, validator_charges_fees, @@ -657,7 +658,7 @@ where // Allow the committer service to reserve pubkeys in lookup tables // that could be needed when we commit this account map_committor_request_result( - self.committer_service.reserve_pubkeys_for_committee( + self.changeset_committor.reserve_pubkeys_for_committee( *pubkey, delegation_record.owner, ), diff --git a/magicblock-accounts/src/accounts_manager.rs b/magicblock-accounts/src/accounts_manager.rs index f1728fe8..90957e98 100644 --- a/magicblock-accounts/src/accounts_manager.rs +++ b/magicblock-accounts/src/accounts_manager.rs @@ -7,7 +7,6 @@ use conjunto_transwise::{ use magicblock_account_cloner::{CloneOutputMap, RemoteAccountClonerClient}; use magicblock_accounts_api::BankAccountProvider; use magicblock_bank::bank::Bank; -use magicblock_committor_service::CommittorService; use magicblock_transaction_status::TransactionStatusSender; use solana_rpc_client::nonblocking::rpc_client::RpcClient; use solana_sdk::{commitment_config::CommitmentConfig, signature::Keypair}; @@ -30,7 +29,6 @@ pub type AccountsManager = ExternalAccountsManager< impl AccountsManager { pub fn try_new( - committer_service: Arc, bank: &Arc, cloned_accounts: &CloneOutputMap, remote_account_cloner_client: RemoteAccountClonerClient, @@ -52,7 +50,6 @@ impl AccountsManager { ); let scheduled_commits_processor = RemoteScheduledCommitsProcessor::new( - committer_service, bank.clone(), cloned_accounts.clone(), transaction_status_sender.clone(), diff --git a/magicblock-accounts/src/external_accounts_manager.rs b/magicblock-accounts/src/external_accounts_manager.rs index ef47f9c5..6b09bf8c 100644 --- a/magicblock-accounts/src/external_accounts_manager.rs +++ b/magicblock-accounts/src/external_accounts_manager.rs @@ -16,6 +16,7 @@ use futures_util::future::{try_join, try_join_all}; use log::*; use magicblock_account_cloner::{AccountCloner, AccountClonerOutput}; use magicblock_accounts_api::InternalAccountProvider; +use magicblock_committor_service::ChangesetCommittor; use magicblock_core::magic_program; use solana_sdk::{ account::{AccountSharedData, ReadableAccount}, @@ -402,9 +403,12 @@ where .map(|x| x.last_committed_at()) } - pub async fn process_scheduled_commits(&self) -> AccountsResult<()> { + pub async fn process_scheduled_commits( + &self, + changeset_committor: &Arc, + ) -> AccountsResult<()> { self.scheduled_commits_processor - .process(&self.internal_account_provider) + .process(&self.internal_account_provider, changeset_committor) .await } diff --git a/magicblock-accounts/src/remote_scheduled_commits_processor.rs b/magicblock-accounts/src/remote_scheduled_commits_processor.rs index f2102e28..002a44d4 100644 --- a/magicblock-accounts/src/remote_scheduled_commits_processor.rs +++ b/magicblock-accounts/src/remote_scheduled_commits_processor.rs @@ -16,8 +16,8 @@ use magicblock_account_cloner::{ }; use magicblock_accounts_api::InternalAccountProvider; use magicblock_committor_service::{ - persist::BundleSignatureRow, ChangedAccount, Changeset, ChangesetMeta, - CommittorService, + persist::BundleSignatureRow, ChangedAccount, Changeset, ChangesetCommittor, + ChangesetMeta, }; use magicblock_program::{ register_scheduled_commit_sent, FeePayerAccount, Pubkey, SentCommit, @@ -29,7 +29,6 @@ use crate::{ }; pub struct RemoteScheduledCommitsProcessor { - committor_service: Arc, transaction_scheduler: TransactionScheduler, cloned_accounts: CloneOutputMap, bank: Arc, @@ -38,9 +37,14 @@ pub struct RemoteScheduledCommitsProcessor { #[async_trait] impl ScheduledCommitsProcessor for RemoteScheduledCommitsProcessor { - async fn process(&self, account_provider: &IAP) -> AccountsResult<()> + async fn process( + &self, + account_provider: &IAP, + changeset_committor: &Arc, + ) -> AccountsResult<()> where IAP: InternalAccountProvider, + CC: ChangesetCommittor, { let scheduled_commits = self.transaction_scheduler.take_scheduled_commits(); @@ -166,7 +170,12 @@ impl ScheduledCommitsProcessor for RemoteScheduledCommitsProcessor { } } - self.process_changeset(changeset, sent_commits, ephemereal_blockhash); + self.process_changeset( + changeset_committor, + changeset, + sent_commits, + ephemereal_blockhash, + ); Ok(()) } @@ -182,13 +191,11 @@ impl ScheduledCommitsProcessor for RemoteScheduledCommitsProcessor { impl RemoteScheduledCommitsProcessor { pub fn new( - committer_service: Arc, bank: Arc, cloned_accounts: CloneOutputMap, transaction_status_sender: Option, ) -> Self { Self { - committor_service: committer_service, bank, transaction_status_sender, cloned_accounts, @@ -205,15 +212,16 @@ impl RemoteScheduledCommitsProcessor { .get(pubkey).cloned() } - fn process_changeset( + fn process_changeset( &self, + changeset_committor: &Arc, changeset: Changeset, mut sent_commits: HashMap, ephemeral_blockhash: Hash, ) { // We process the changeset on a separate task in order to not block // the validator (slot advance) itself - let committor_service = self.committor_service.clone(); + let changeset_committor = changeset_committor.clone(); let bank = self.bank.clone(); let transaction_status_sender = self.transaction_status_sender.clone(); @@ -224,7 +232,7 @@ impl RemoteScheduledCommitsProcessor { "Committing changeset with {} accounts", changeset_metadata.accounts.len() ); - match committor_service + match changeset_committor .commit_changeset(changeset, ephemeral_blockhash, true) .await { @@ -254,7 +262,7 @@ impl RemoteScheduledCommitsProcessor { .map(|account| account.bundle_id) .collect::>() { - let bundle_signatures = match committor_service + let bundle_signatures = match changeset_committor .get_bundle_signatures(bundle_id) .await { diff --git a/magicblock-accounts/src/traits.rs b/magicblock-accounts/src/traits.rs index 34808bc1..94699bf3 100644 --- a/magicblock-accounts/src/traits.rs +++ b/magicblock-accounts/src/traits.rs @@ -1,7 +1,8 @@ -use std::collections::HashSet; +use std::{collections::HashSet, sync::Arc}; use async_trait::async_trait; use magicblock_accounts_api::InternalAccountProvider; +use magicblock_committor_service::ChangesetCommittor; use magicblock_metrics::metrics::HistogramTimer; use solana_rpc_client::rpc_client::SerializableTransaction; use solana_sdk::{ @@ -14,9 +15,10 @@ use crate::errors::AccountsResult; #[async_trait] pub trait ScheduledCommitsProcessor { /// Processes all commits that were scheduled and accepted - async fn process( + async fn process( &self, account_provider: &IAP, + changeset_committor: &Arc, ) -> AccountsResult<()>; /// Returns the number of commits that were scheduled and accepted diff --git a/magicblock-accounts/tests/stubs/changeset_committor_stub.rs b/magicblock-accounts/tests/stubs/changeset_committor_stub.rs new file mode 100644 index 00000000..e6752c9d --- /dev/null +++ b/magicblock-accounts/tests/stubs/changeset_committor_stub.rs @@ -0,0 +1,47 @@ +use magicblock_committor_service::ChangesetCommittor; + +#[derive(Default)] +pub struct ChangesetCommittorStub {} + +impl ChangesetCommittor for ChangesetCommittorStub { + fn commit_changeset( + &self, + _changeset: magicblock_committor_service::Changeset, + _ephemeral_blockhash: solana_sdk::hash::Hash, + _finalize: bool, + ) -> tokio::sync::oneshot::Receiver> { + unimplemented!("Not called during tests") + } + + fn get_commit_statuses( + &self, + _reqid: String, + ) -> tokio::sync::oneshot::Receiver< + magicblock_committor_service::error::CommittorServiceResult< + Vec, + >, + > { + unimplemented!("Not called during tests") + } + + fn get_bundle_signatures( + &self, + _bundle_id: u64, + ) -> tokio::sync::oneshot::Receiver< + magicblock_committor_service::error::CommittorServiceResult< + Option, + >, + > { + unimplemented!("Not called during tests") + } + + fn reserve_pubkeys_for_committee( + &self, + _committee: magicblock_program::Pubkey, + _owner: magicblock_program::Pubkey, + ) -> tokio::sync::oneshot::Receiver< + magicblock_committor_service::error::CommittorServiceResult<()>, + > { + unimplemented!("Not called during tests") + } +} diff --git a/magicblock-accounts/tests/stubs/mod.rs b/magicblock-accounts/tests/stubs/mod.rs index 5d245cb1..797bab0b 100644 --- a/magicblock-accounts/tests/stubs/mod.rs +++ b/magicblock-accounts/tests/stubs/mod.rs @@ -1,2 +1,3 @@ pub mod account_committer_stub; +pub mod changeset_committor_stub; pub mod scheduled_commits_processor_stub; diff --git a/magicblock-accounts/tests/stubs/scheduled_commits_processor_stub.rs b/magicblock-accounts/tests/stubs/scheduled_commits_processor_stub.rs index abce9680..9fe51cb3 100644 --- a/magicblock-accounts/tests/stubs/scheduled_commits_processor_stub.rs +++ b/magicblock-accounts/tests/stubs/scheduled_commits_processor_stub.rs @@ -1,15 +1,19 @@ +use std::sync::Arc; + use async_trait::async_trait; use magicblock_accounts::{errors::AccountsResult, ScheduledCommitsProcessor}; use magicblock_accounts_api::InternalAccountProvider; +use magicblock_committor_service::ChangesetCommittor; #[derive(Default)] pub struct ScheduledCommitsProcessorStub {} #[async_trait] impl ScheduledCommitsProcessor for ScheduledCommitsProcessorStub { - async fn process( + async fn process( &self, _account_provider: &IAP, + _changeset_committor: &Arc, ) -> AccountsResult<()> { Ok(()) } diff --git a/magicblock-api/src/magic_validator.rs b/magicblock-api/src/magic_validator.rs index 1bd6e42a..f308a4ba 100644 --- a/magicblock-api/src/magic_validator.rs +++ b/magicblock-api/src/magic_validator.rs @@ -152,6 +152,7 @@ pub struct MagicValidator { RemoteAccountFetcherClient, RemoteAccountUpdatesClient, AccountDumperBank, + CommittorService, >, >, remote_account_cloner_handle: Option>, @@ -343,7 +344,6 @@ impl MagicValidator { ); let accounts_manager = Self::init_accounts_manager( - committor_service.clone(), &bank, &remote_account_cloner_worker.get_last_clone_output(), RemoteAccountClonerClient::new(&remote_account_cloner_worker), @@ -434,7 +434,6 @@ impl MagicValidator { } fn init_accounts_manager( - committer_service: Arc, bank: &Arc, cloned_accounts: &CloneOutputMap, remote_account_cloner_client: RemoteAccountClonerClient, @@ -447,7 +446,6 @@ impl MagicValidator { "Failed to derive accounts config from provided magicblock config", ); let accounts_manager = AccountsManager::try_new( - committer_service, bank, cloned_accounts, remote_account_cloner_client, @@ -708,6 +706,7 @@ impl MagicValidator { self.slot_ticker = Some(init_slot_ticker( &self.bank, &self.accounts_manager, + &self.committor_service, Some(self.transaction_status_sender.clone()), self.ledger.clone(), Duration::from_millis(self.config.validator.millis_per_slot), diff --git a/magicblock-api/src/tickers.rs b/magicblock-api/src/tickers.rs index 3dd9e690..51a5f629 100644 --- a/magicblock-api/src/tickers.rs +++ b/magicblock-api/src/tickers.rs @@ -9,6 +9,7 @@ use std::{ use log::*; use magicblock_accounts::AccountsManager; use magicblock_bank::bank::Bank; +use magicblock_committor_service::CommittorService; use magicblock_core::magic_program; use magicblock_ledger::Ledger; use magicblock_metrics::metrics; @@ -25,6 +26,7 @@ use crate::slot::advance_slot_and_update_ledger; pub fn init_slot_ticker( bank: &Arc, accounts_manager: &Arc, + committor_service: &Arc, transaction_status_sender: Option, ledger: Arc, tick_duration: Duration, @@ -32,6 +34,7 @@ pub fn init_slot_ticker( ) -> tokio::task::JoinHandle<()> { let bank = bank.clone(); let accounts_manager = accounts_manager.clone(); + let committor_service = committor_service.clone(); let log = tick_duration >= Duration::from_secs(5); tokio::task::spawn(async move { while !exit.load(Ordering::Relaxed) { @@ -62,8 +65,9 @@ pub fn init_slot_ticker( // 2. Process those scheduled commits // TODO: fix the possible delay here // https://github.com/magicblock-labs/magicblock-validator/issues/104 - if let Err(err) = - accounts_manager.process_scheduled_commits().await + if let Err(err) = accounts_manager + .process_scheduled_commits(&committor_service) + .await { error!( "Failed to process scheduled commits: {:?}", From 04b01163c842d871833ed822f439e162b79a06d9 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Wed, 14 May 2025 12:03:07 +0545 Subject: [PATCH 21/58] chore: update ensure accounts tests to use stub --- magicblock-accounts/tests/ensure_accounts.rs | 29 ++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/magicblock-accounts/tests/ensure_accounts.rs b/magicblock-accounts/tests/ensure_accounts.rs index ee9526d3..cd13be55 100644 --- a/magicblock-accounts/tests/ensure_accounts.rs +++ b/magicblock-accounts/tests/ensure_accounts.rs @@ -19,6 +19,7 @@ use magicblock_accounts_api::InternalAccountProviderStub; use solana_sdk::pubkey::Pubkey; use stubs::{ account_committer_stub::AccountCommitterStub, + changeset_committor_stub::ChangesetCommittorStub, scheduled_commits_processor_stub::ScheduledCommitsProcessorStub, }; use test_tools_core::init_logger; @@ -41,6 +42,7 @@ fn setup_with_lifecycle( account_fetcher: AccountFetcherStub, account_updates: AccountUpdatesStub, account_dumper: AccountDumperStub, + changeset_committor_stub: Arc, lifecycle: LifecycleMode, ) -> (StubbedAccountsManager, CancellationToken, JoinHandle<()>) { let cancellation_token = CancellationToken::new(); @@ -50,6 +52,7 @@ fn setup_with_lifecycle( account_fetcher, account_updates, account_dumper, + changeset_committor_stub, None, HashSet::new(), Some(1_000_000_000), @@ -90,12 +93,14 @@ fn setup_ephem( account_fetcher: AccountFetcherStub, account_updates: AccountUpdatesStub, account_dumper: AccountDumperStub, + changeset_committor_stub: Arc, ) -> (StubbedAccountsManager, CancellationToken, JoinHandle<()>) { setup_with_lifecycle( internal_account_provider, account_fetcher, account_updates, account_dumper, + changeset_committor_stub, LifecycleMode::Ephemeral, ) } @@ -108,12 +113,14 @@ async fn test_ensure_readonly_account_not_tracked_nor_in_our_validator() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor_stub = Arc::new(ChangesetCommittorStub::default()); let (manager, cancel, handle) = setup_ephem( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor_stub.clone(), ); // Account should be fetchable but not delegated @@ -152,12 +159,14 @@ async fn test_ensure_readonly_account_not_tracked_but_in_our_validator() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor_stub = Arc::new(ChangesetCommittorStub::default()); let (manager, cancel, handle) = setup_ephem( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor_stub.clone(), ); // Account should be already in the bank @@ -194,12 +203,14 @@ async fn test_ensure_readonly_account_cloned_but_not_in_our_validator() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor_stub = Arc::new(ChangesetCommittorStub::default()); let (manager, cancel, handle) = setup_ephem( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor_stub.clone(), ); // Pre-clone the account @@ -246,12 +257,14 @@ async fn test_ensure_readonly_account_cloned_but_has_been_updated_on_chain() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor_stub = Arc::new(ChangesetCommittorStub::default()); let (manager, cancel, handle) = setup_ephem( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor_stub.clone(), ); // Pre-clone account @@ -304,12 +317,14 @@ async fn test_ensure_readonly_account_cloned_and_no_recent_update_on_chain() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor_stub = Arc::new(ChangesetCommittorStub::default()); let (manager, cancel, handle) = setup_ephem( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor_stub.clone(), ); // Pre-clone the account @@ -359,12 +374,14 @@ async fn test_ensure_readonly_account_in_our_validator_and_unseen_writable() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor_stub = Arc::new(ChangesetCommittorStub::default()); let (manager, cancel, handle) = setup_ephem( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor_stub.clone(), ); // One already loaded, and one properly delegated @@ -407,6 +424,7 @@ async fn test_ensure_one_delegated_and_one_feepayer_account_writable() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor_stub = Arc::new(ChangesetCommittorStub::default()); // Note: since we use a writable new account, we need to allow it as part of the configuration // We can't use an ephemeral's configuration, that forbids new accounts to be writable @@ -415,6 +433,7 @@ async fn test_ensure_one_delegated_and_one_feepayer_account_writable() { account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor_stub.clone(), LifecycleMode::Replica, ); @@ -459,12 +478,14 @@ async fn test_ensure_multiple_accounts_coming_in_over_time() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor_stub = Arc::new(ChangesetCommittorStub::default()); let (manager, cancel, handle) = setup_ephem( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor_stub.clone(), ); // Multiple delegated and undelegated accounts fetchable @@ -606,12 +627,14 @@ async fn test_ensure_accounts_seen_as_readonly_can_be_used_as_writable_later() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor_stub = Arc::new(ChangesetCommittorStub::default()); let (manager, cancel, handle) = setup_ephem( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor_stub.clone(), ); // A delegated account @@ -698,12 +721,14 @@ async fn test_ensure_accounts_already_known_can_be_reused_as_writable_later() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor_stub = Arc::new(ChangesetCommittorStub::default()); let (manager, cancel, handle) = setup_ephem( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor_stub.clone(), ); // Account already loaded in the bank, but is a delegated on-chain @@ -770,12 +795,14 @@ async fn test_ensure_accounts_already_ensured_needs_reclone_after_updates() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor_stub = Arc::new(ChangesetCommittorStub::default()); let (manager, cancel, handle) = setup_ephem( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor_stub.clone(), ); // Pre-clone account @@ -855,12 +882,14 @@ async fn test_ensure_accounts_already_cloned_can_be_reused_without_updates() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor_stub = Arc::new(ChangesetCommittorStub::default()); let (manager, cancel, handle) = setup_ephem( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor_stub.clone(), ); // Pre-clone the account From d770b950d12fee66c6c9a5252bdd2ea1691883bf Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Wed, 14 May 2025 16:53:12 +0545 Subject: [PATCH 22/58] chore: re-enable account cloner and ensure accounts tests --- Cargo.lock | 3 + magicblock-account-cloner/Cargo.toml | 3 + .../tests/remote_account_cloner.rs | 55 ++++++++++++++++++- magicblock-accounts/Cargo.toml | 3 + magicblock-accounts/tests/ensure_accounts.rs | 6 +- .../tests/stubs/changeset_committor_stub.rs | 47 ---------------- magicblock-accounts/tests/stubs/mod.rs | 1 - 7 files changed, 67 insertions(+), 51 deletions(-) delete mode 100644 magicblock-accounts/tests/stubs/changeset_committor_stub.rs diff --git a/Cargo.lock b/Cargo.lock index d4cae323..4d7ca7f9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3529,6 +3529,7 @@ dependencies = [ "magicblock-account-fetcher", "magicblock-account-updates", "magicblock-accounts-api", + "magicblock-committor-service", "magicblock-core", "magicblock-metrics", "magicblock-mutator", @@ -3763,6 +3764,7 @@ dependencies = [ "solana-rpc-client-api", "solana-sdk", "solana-transaction-status-client-types", + "static_assertions", "thiserror 2.0.12", "tokio", "tokio-util 0.7.13", @@ -4040,6 +4042,7 @@ dependencies = [ "ed25519-dalek", "log", "magicblock-rpc-client", + "rand 0.8.5", "sha3", "solana-pubkey", "solana-rpc-client", diff --git a/magicblock-account-cloner/Cargo.toml b/magicblock-account-cloner/Cargo.toml index 5bfc72ea..d412aa0f 100644 --- a/magicblock-account-cloner/Cargo.toml +++ b/magicblock-account-cloner/Cargo.toml @@ -25,3 +25,6 @@ tokio-util = { workspace = true } thiserror = { workspace = true } [dev-dependencies] +magicblock-committor-service = { workspace = true, features = [ + "dev-context-only-utils", +] } diff --git a/magicblock-account-cloner/tests/remote_account_cloner.rs b/magicblock-account-cloner/tests/remote_account_cloner.rs index a8cab3d7..f266683d 100644 --- a/magicblock-account-cloner/tests/remote_account_cloner.rs +++ b/magicblock-account-cloner/tests/remote_account_cloner.rs @@ -1,4 +1,4 @@ -use std::collections::HashSet; +use std::{collections::HashSet, sync::Arc}; use magicblock_account_cloner::{ standard_blacklisted_accounts, AccountCloner, AccountClonerError, @@ -10,6 +10,7 @@ use magicblock_account_dumper::AccountDumperStub; use magicblock_account_fetcher::AccountFetcherStub; use magicblock_account_updates::AccountUpdatesStub; use magicblock_accounts_api::InternalAccountProviderStub; +use magicblock_committor_service::stubs::ChangesetCommittorStub; use magicblock_mutator::idl::{get_pubkey_anchor_idl, get_pubkey_shank_idl}; use solana_sdk::{ bpf_loader_upgradeable::get_program_data_address, @@ -26,6 +27,7 @@ fn setup_custom( account_fetcher: AccountFetcherStub, account_updates: AccountUpdatesStub, account_dumper: AccountDumperStub, + changeset_committor: Arc, allowed_program_ids: Option>, blacklisted_accounts: HashSet, permissions: AccountClonerPermissions, @@ -42,6 +44,7 @@ fn setup_custom( account_fetcher, account_updates, account_dumper, + changeset_committor, allowed_program_ids, blacklisted_accounts, payer_init_lamports, @@ -69,6 +72,7 @@ fn setup_replica( account_fetcher: AccountFetcherStub, account_updates: AccountUpdatesStub, account_dumper: AccountDumperStub, + changeset_committor: Arc, allowed_program_ids: Option>, ) -> ( RemoteAccountClonerClient, @@ -80,6 +84,7 @@ fn setup_replica( account_fetcher, account_updates, account_dumper, + changeset_committor, allowed_program_ids, standard_blacklisted_accounts( &Pubkey::new_unique(), @@ -100,6 +105,7 @@ fn setup_programs_replica( account_fetcher: AccountFetcherStub, account_updates: AccountUpdatesStub, account_dumper: AccountDumperStub, + changeset_committor: Arc, allowed_program_ids: Option>, ) -> ( RemoteAccountClonerClient, @@ -111,6 +117,7 @@ fn setup_programs_replica( account_fetcher, account_updates, account_dumper, + changeset_committor, allowed_program_ids, standard_blacklisted_accounts( &Pubkey::new_unique(), @@ -131,6 +138,7 @@ fn setup_ephemeral( account_fetcher: AccountFetcherStub, account_updates: AccountUpdatesStub, account_dumper: AccountDumperStub, + changeset_committor: Arc, allowed_program_ids: Option>, ) -> ( RemoteAccountClonerClient, @@ -142,6 +150,7 @@ fn setup_ephemeral( account_fetcher, account_updates, account_dumper, + changeset_committor, allowed_program_ids, standard_blacklisted_accounts( &Pubkey::new_unique(), @@ -162,6 +171,7 @@ fn setup_offline( account_fetcher: AccountFetcherStub, account_updates: AccountUpdatesStub, account_dumper: AccountDumperStub, + changeset_committor: Arc, allowed_program_ids: Option>, ) -> ( RemoteAccountClonerClient, @@ -173,6 +183,7 @@ fn setup_offline( account_fetcher, account_updates, account_dumper, + changeset_committor, allowed_program_ids, standard_blacklisted_accounts( &Pubkey::new_unique(), @@ -195,12 +206,14 @@ async fn test_clone_allow_feepayer_account_when_ephemeral() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor = Arc::new(ChangesetCommittorStub::default()); // Create account cloner worker and client let (cloner, cancellation_token, worker_handle) = setup_ephemeral( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor.clone(), None, ); // Account(s) involved @@ -226,12 +239,14 @@ async fn test_clone_allow_undelegated_account_when_ephemeral() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor = Arc::new(ChangesetCommittorStub::default()); // Create account cloner worker and client let (cloner, cancellation_token, worker_handle) = setup_ephemeral( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor.clone(), None, ); // Account(s) involved @@ -259,12 +274,14 @@ async fn test_clone_fails_stale_undelegated_account_when_ephemeral() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor = Arc::new(ChangesetCommittorStub::default()); // Create account cloner worker and client let (cloner, cancellation_token, worker_handle) = setup_ephemeral( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor.clone(), None, ); // Account(s) involved @@ -293,12 +310,14 @@ async fn test_clone_allow_delegated_account_when_ephemeral() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor = Arc::new(ChangesetCommittorStub::default()); // Create account cloner worker and client let (cloner, cancellation_token, worker_handle) = setup_ephemeral( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor.clone(), None, ); // Account(s) involved @@ -324,12 +343,14 @@ async fn test_clone_allow_program_accounts_when_ephemeral() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor = Arc::new(ChangesetCommittorStub::default()); // Create account cloner worker and client let (cloner, cancellation_token, worker_handle) = setup_ephemeral( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor.clone(), None, ); // Account(s) involved @@ -376,6 +397,7 @@ async fn test_clone_program_accounts_when_ephemeral_with_whitelist() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor = Arc::new(ChangesetCommittorStub::default()); let mut allowed_program_ids = HashSet::new(); allowed_program_ids.insert(allowed_program_id); // Create account cloner worker and client @@ -384,6 +406,7 @@ async fn test_clone_program_accounts_when_ephemeral_with_whitelist() { account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor.clone(), Some(allowed_program_ids), ); // Account(s) involved @@ -451,12 +474,14 @@ async fn test_clone_refuse_already_written_in_bank() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor = Arc::new(ChangesetCommittorStub::default()); // Create account cloner worker and client let (cloner, cancellation_token, worker_handle) = setup_ephemeral( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor.clone(), None, ); // Account(s) involved @@ -487,12 +512,14 @@ async fn test_clone_refuse_blacklisted_account() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor = Arc::new(ChangesetCommittorStub::default()); // Create account cloner worker and client let (cloner, cancellation_token, worker_handle) = setup_ephemeral( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor.clone(), None, ); // Account(s) involved @@ -522,12 +549,15 @@ async fn test_clone_refuse_feepayer_account_when_programs_replica() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor = Arc::new(ChangesetCommittorStub::default()); + // Create account cloner worker and client let (cloner, cancellation_token, worker_handle) = setup_programs_replica( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor.clone(), None, ); // Account(s) involved @@ -559,12 +589,14 @@ async fn test_clone_refuse_undelegated_account_when_programs_replica() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor = Arc::new(ChangesetCommittorStub::default()); // Create account cloner worker and client let (cloner, cancellation_token, worker_handle) = setup_programs_replica( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor.clone(), None, ); // Account(s) involved @@ -597,12 +629,14 @@ async fn test_clone_refuse_delegated_account_when_programs_replica() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor = Arc::new(ChangesetCommittorStub::default()); // Create account cloner worker and client let (cloner, cancellation_token, worker_handle) = setup_programs_replica( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor.clone(), None, ); // Account(s) involved @@ -634,12 +668,14 @@ async fn test_clone_allow_program_accounts_when_programs_replica() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor = Arc::new(ChangesetCommittorStub::default()); // Create account cloner worker and client let (cloner, cancellation_token, worker_handle) = setup_programs_replica( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor.clone(), None, ); // Account(s) involved @@ -683,12 +719,14 @@ async fn test_clone_allow_undelegated_account_when_replica() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor = Arc::new(ChangesetCommittorStub::default()); // Create account cloner worker and client let (cloner, cancellation_token, worker_handle) = setup_replica( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor.clone(), None, ); // Account(s) involved @@ -716,12 +754,14 @@ async fn test_clone_allow_feepayer_account_when_replica() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor = Arc::new(ChangesetCommittorStub::default()); // Create account cloner worker and client let (cloner, cancellation_token, worker_handle) = setup_replica( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor.clone(), None, ); // Account(s) involved @@ -747,12 +787,14 @@ async fn test_clone_refuse_any_account_when_offline() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor = Arc::new(ChangesetCommittorStub::default()); // Create account cloner worker and client let (cloner, cancellation_token, worker_handle) = setup_offline( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor.clone(), None, ); // Account(s) involved @@ -828,12 +870,14 @@ async fn test_clone_will_not_fetch_the_same_thing_multiple_times() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor = Arc::new(ChangesetCommittorStub::default()); // Create account cloner worker and client let (cloner, cancellation_token, worker_handle) = setup_ephemeral( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor.clone(), None, ); // Account(s) involved @@ -878,12 +922,15 @@ async fn test_clone_properly_cached_undelegated_account_when_ephemeral() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor = Arc::new(ChangesetCommittorStub::default()); + // Create account cloner worker and client let (cloner, cancellation_token, worker_handle) = setup_ephemeral( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor.clone(), None, ); // Account(s) involved @@ -931,12 +978,14 @@ async fn test_clone_properly_cached_program() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor = Arc::new(ChangesetCommittorStub::default()); // Create account cloner worker and client let (cloner, cancellation_token, worker_handle) = setup_ephemeral( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor.clone(), None, ); // Account(s) involved @@ -1005,12 +1054,14 @@ async fn test_clone_properly_cached_delegated_account_that_changes_state() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor = Arc::new(ChangesetCommittorStub::default()); // Create account cloner worker and client let (cloner, cancellation_token, worker_handle) = setup_ephemeral( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor.clone(), None, ); // Account(s) involved @@ -1096,12 +1147,14 @@ async fn test_clone_properly_upgrading_downgrading_when_created_and_deleted() { let account_fetcher = AccountFetcherStub::default(); let account_updates = AccountUpdatesStub::default(); let account_dumper = AccountDumperStub::default(); + let changeset_committor = Arc::new(ChangesetCommittorStub::default()); // Create account cloner worker and client let (cloner, cancellation_token, worker_handle) = setup_ephemeral( internal_account_provider.clone(), account_fetcher.clone(), account_updates.clone(), account_dumper.clone(), + changeset_committor.clone(), None, ); // Account(s) involved diff --git a/magicblock-accounts/Cargo.toml b/magicblock-accounts/Cargo.toml index f91cdf0a..61505240 100644 --- a/magicblock-accounts/Cargo.toml +++ b/magicblock-accounts/Cargo.toml @@ -34,5 +34,8 @@ thiserror = { workspace = true } url = { workspace = true } [dev-dependencies] +magicblock-committor-service = { workspace = true, features = [ + "dev-context-only-utils", +] } test-tools-core = { workspace = true } tokio-util = { workspace = true } diff --git a/magicblock-accounts/tests/ensure_accounts.rs b/magicblock-accounts/tests/ensure_accounts.rs index cd13be55..7c497f78 100644 --- a/magicblock-accounts/tests/ensure_accounts.rs +++ b/magicblock-accounts/tests/ensure_accounts.rs @@ -1,3 +1,4 @@ +use log::*; use std::{collections::HashSet, sync::Arc}; use conjunto_transwise::{ @@ -16,10 +17,10 @@ use magicblock_accounts::{ errors::AccountsError, ExternalAccountsManager, LifecycleMode, }; use magicblock_accounts_api::InternalAccountProviderStub; +use magicblock_committor_service::stubs::ChangesetCommittorStub; use solana_sdk::pubkey::Pubkey; use stubs::{ account_committer_stub::AccountCommitterStub, - changeset_committor_stub::ChangesetCommittorStub, scheduled_commits_processor_stub::ScheduledCommitsProcessorStub, }; use test_tools_core::init_logger; @@ -654,7 +655,8 @@ async fn test_ensure_accounts_seen_as_readonly_can_be_used_as_writable_later() { }, "tx-sig".to_string(), ) - .await; + .await + .inspect_err(|e| error!("Error: {:?}", e)); assert!(result.is_ok()); // Check proper behaviour diff --git a/magicblock-accounts/tests/stubs/changeset_committor_stub.rs b/magicblock-accounts/tests/stubs/changeset_committor_stub.rs deleted file mode 100644 index e6752c9d..00000000 --- a/magicblock-accounts/tests/stubs/changeset_committor_stub.rs +++ /dev/null @@ -1,47 +0,0 @@ -use magicblock_committor_service::ChangesetCommittor; - -#[derive(Default)] -pub struct ChangesetCommittorStub {} - -impl ChangesetCommittor for ChangesetCommittorStub { - fn commit_changeset( - &self, - _changeset: magicblock_committor_service::Changeset, - _ephemeral_blockhash: solana_sdk::hash::Hash, - _finalize: bool, - ) -> tokio::sync::oneshot::Receiver> { - unimplemented!("Not called during tests") - } - - fn get_commit_statuses( - &self, - _reqid: String, - ) -> tokio::sync::oneshot::Receiver< - magicblock_committor_service::error::CommittorServiceResult< - Vec, - >, - > { - unimplemented!("Not called during tests") - } - - fn get_bundle_signatures( - &self, - _bundle_id: u64, - ) -> tokio::sync::oneshot::Receiver< - magicblock_committor_service::error::CommittorServiceResult< - Option, - >, - > { - unimplemented!("Not called during tests") - } - - fn reserve_pubkeys_for_committee( - &self, - _committee: magicblock_program::Pubkey, - _owner: magicblock_program::Pubkey, - ) -> tokio::sync::oneshot::Receiver< - magicblock_committor_service::error::CommittorServiceResult<()>, - > { - unimplemented!("Not called during tests") - } -} diff --git a/magicblock-accounts/tests/stubs/mod.rs b/magicblock-accounts/tests/stubs/mod.rs index 797bab0b..5d245cb1 100644 --- a/magicblock-accounts/tests/stubs/mod.rs +++ b/magicblock-accounts/tests/stubs/mod.rs @@ -1,3 +1,2 @@ pub mod account_committer_stub; -pub mod changeset_committor_stub; pub mod scheduled_commits_processor_stub; From 88dd40bbb3dc2560d27c034f3471e5a077eb42b3 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Wed, 14 May 2025 18:07:25 +0545 Subject: [PATCH 23/58] chore: move committor repo crates into magicblock monorepo - https://github.com/magicblock-labs/committor/tree/73f6a93d43995991fcc684b8853041f43fc19fa2 --- Cargo.lock | 244 +++- Cargo.toml | 20 +- magicblock-committor-program/Cargo.toml | 30 + magicblock-committor-program/src/consts.rs | 43 + magicblock-committor-program/src/error.rs | 34 + .../src/instruction.rs | 386 +++++++ .../src/instruction_chunks.rs | 53 + magicblock-committor-program/src/lib.rs | 29 + magicblock-committor-program/src/pdas.rs | 107 ++ magicblock-committor-program/src/processor.rs | 397 +++++++ .../src/state/changeset.rs | 498 ++++++++ .../src/state/changeset_chunks.rs | 165 +++ .../src/state/chunks.rs | 235 ++++ magicblock-committor-program/src/state/mod.rs | 3 + .../src/utils/account.rs | 26 + .../src/utils/asserts.rs | 60 + magicblock-committor-program/src/utils/mod.rs | 15 + .../tests/prog_init_write_and_close.rs | 346 ++++++ .../tests/prog_security.rs | 10 + magicblock-committor-service/Cargo.toml | 47 + .../src/bundle_strategy.rs | 205 ++++ magicblock-committor-service/src/bundles.rs | 273 +++++ .../src/commit/commit_using_args.rs | 299 +++++ .../src/commit/commit_using_buffer.rs | 1028 +++++++++++++++++ .../src/commit/committor_processor.rs | 560 +++++++++ .../src/commit/common.rs | 204 ++++ .../src/commit/mod.rs | 6 + .../src/commit/process_buffers.rs | 239 ++++ .../src/commit_info.rs | 177 +++ .../src/commit_stage.rs | 340 ++++++ .../src/commit_strategy.rs | 635 ++++++++++ .../src/compute_budget.rs | 218 ++++ magicblock-committor-service/src/config.rs | 42 + magicblock-committor-service/src/consts.rs | 15 + magicblock-committor-service/src/error.rs | 127 ++ magicblock-committor-service/src/finalize.rs | 66 ++ magicblock-committor-service/src/lib.rs | 35 + .../src/persist/commit_persister.rs | 254 ++++ .../src/persist/db.rs | 965 ++++++++++++++++ .../src/persist/error.rs | 38 + .../src/persist/mod.rs | 11 + .../src/persist/types/commit_status.rs | 269 +++++ .../src/persist/types/commit_strategy.rs | 54 + .../src/persist/types/commit_type.rs | 28 + .../src/persist/types/mod.rs | 7 + .../src/persist/utils.rs | 58 + .../src/pubkeys_provider.rs | 75 ++ magicblock-committor-service/src/service.rs | 367 ++++++ .../src/stubs/changeset_committor_stub.rs | 140 +++ magicblock-committor-service/src/stubs/mod.rs | 2 + .../src/transactions.rs | 778 +++++++++++++ magicblock-committor-service/src/types.rs | 57 + .../src/undelegate.rs | 103 ++ .../todo-tests/ix_commit_local.rs | 886 ++++++++++++++ .../todo-tests/utils/instructions.rs | 50 + .../todo-tests/utils/mod.rs | 51 + .../todo-tests/utils/transactions.rs | 58 + magicblock-rpc-client/Cargo.toml | 21 + magicblock-rpc-client/src/lib.rs | 512 ++++++++ magicblock-table-mania/Cargo.toml | 33 + magicblock-table-mania/src/derive_keypair.rs | 60 + magicblock-table-mania/src/error.rs | 27 + magicblock-table-mania/src/find_tables.rs | 47 + magicblock-table-mania/src/lib.rs | 10 + magicblock-table-mania/src/lookup_table.rs | 535 +++++++++ magicblock-table-mania/src/lookup_table_rc.rs | 708 ++++++++++++ magicblock-table-mania/src/manager.rs | 702 +++++++++++ .../tests/ix_lookup_table.rs | 163 +++ .../tests/ix_release_pubkeys.rs | 106 ++ .../tests/ix_reserve_pubkeys.rs | 132 +++ magicblock-table-mania/tests/utils/mod.rs | 116 ++ 71 files changed, 14612 insertions(+), 28 deletions(-) create mode 100644 magicblock-committor-program/Cargo.toml create mode 100644 magicblock-committor-program/src/consts.rs create mode 100644 magicblock-committor-program/src/error.rs create mode 100644 magicblock-committor-program/src/instruction.rs create mode 100644 magicblock-committor-program/src/instruction_chunks.rs create mode 100644 magicblock-committor-program/src/lib.rs create mode 100644 magicblock-committor-program/src/pdas.rs create mode 100644 magicblock-committor-program/src/processor.rs create mode 100644 magicblock-committor-program/src/state/changeset.rs create mode 100644 magicblock-committor-program/src/state/changeset_chunks.rs create mode 100644 magicblock-committor-program/src/state/chunks.rs create mode 100644 magicblock-committor-program/src/state/mod.rs create mode 100644 magicblock-committor-program/src/utils/account.rs create mode 100644 magicblock-committor-program/src/utils/asserts.rs create mode 100644 magicblock-committor-program/src/utils/mod.rs create mode 100644 magicblock-committor-program/tests/prog_init_write_and_close.rs create mode 100644 magicblock-committor-program/tests/prog_security.rs create mode 100644 magicblock-committor-service/Cargo.toml create mode 100644 magicblock-committor-service/src/bundle_strategy.rs create mode 100644 magicblock-committor-service/src/bundles.rs create mode 100644 magicblock-committor-service/src/commit/commit_using_args.rs create mode 100644 magicblock-committor-service/src/commit/commit_using_buffer.rs create mode 100644 magicblock-committor-service/src/commit/committor_processor.rs create mode 100644 magicblock-committor-service/src/commit/common.rs create mode 100644 magicblock-committor-service/src/commit/mod.rs create mode 100644 magicblock-committor-service/src/commit/process_buffers.rs create mode 100644 magicblock-committor-service/src/commit_info.rs create mode 100644 magicblock-committor-service/src/commit_stage.rs create mode 100644 magicblock-committor-service/src/commit_strategy.rs create mode 100644 magicblock-committor-service/src/compute_budget.rs create mode 100644 magicblock-committor-service/src/config.rs create mode 100644 magicblock-committor-service/src/consts.rs create mode 100644 magicblock-committor-service/src/error.rs create mode 100644 magicblock-committor-service/src/finalize.rs create mode 100644 magicblock-committor-service/src/lib.rs create mode 100644 magicblock-committor-service/src/persist/commit_persister.rs create mode 100644 magicblock-committor-service/src/persist/db.rs create mode 100644 magicblock-committor-service/src/persist/error.rs create mode 100644 magicblock-committor-service/src/persist/mod.rs create mode 100644 magicblock-committor-service/src/persist/types/commit_status.rs create mode 100644 magicblock-committor-service/src/persist/types/commit_strategy.rs create mode 100644 magicblock-committor-service/src/persist/types/commit_type.rs create mode 100644 magicblock-committor-service/src/persist/types/mod.rs create mode 100644 magicblock-committor-service/src/persist/utils.rs create mode 100644 magicblock-committor-service/src/pubkeys_provider.rs create mode 100644 magicblock-committor-service/src/service.rs create mode 100644 magicblock-committor-service/src/stubs/changeset_committor_stub.rs create mode 100644 magicblock-committor-service/src/stubs/mod.rs create mode 100644 magicblock-committor-service/src/transactions.rs create mode 100644 magicblock-committor-service/src/types.rs create mode 100644 magicblock-committor-service/src/undelegate.rs create mode 100644 magicblock-committor-service/todo-tests/ix_commit_local.rs create mode 100644 magicblock-committor-service/todo-tests/utils/instructions.rs create mode 100644 magicblock-committor-service/todo-tests/utils/mod.rs create mode 100644 magicblock-committor-service/todo-tests/utils/transactions.rs create mode 100644 magicblock-rpc-client/Cargo.toml create mode 100644 magicblock-rpc-client/src/lib.rs create mode 100644 magicblock-table-mania/Cargo.toml create mode 100644 magicblock-table-mania/src/derive_keypair.rs create mode 100644 magicblock-table-mania/src/error.rs create mode 100644 magicblock-table-mania/src/find_tables.rs create mode 100644 magicblock-table-mania/src/lib.rs create mode 100644 magicblock-table-mania/src/lookup_table.rs create mode 100644 magicblock-table-mania/src/lookup_table_rc.rs create mode 100644 magicblock-table-mania/src/manager.rs create mode 100644 magicblock-table-mania/tests/ix_lookup_table.rs create mode 100644 magicblock-table-mania/tests/ix_release_pubkeys.rs create mode 100644 magicblock-table-mania/tests/ix_reserve_pubkeys.rs create mode 100644 magicblock-table-mania/tests/utils/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 4d7ca7f9..c7baa0bd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1185,7 +1185,7 @@ dependencies = [ "conjunto-addresses", "conjunto-core", "conjunto-providers", - "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?rev=4af7f1c)", + "magicblock-delegation-program", "serde", "solana-rpc-client", "solana-rpc-client-api", @@ -1729,6 +1729,18 @@ dependencies = [ "sha2 0.10.8", ] +[[package]] +name = "educe" +version = "0.4.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f0042ff8246a363dbe77d2ceedb073339e85a804b9a47636c6e016a9a32c05f" +dependencies = [ + "enum-ordinalize", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "either" version = "1.13.0" @@ -1770,6 +1782,19 @@ dependencies = [ "syn 2.0.95", ] +[[package]] +name = "enum-ordinalize" +version = "3.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bf1fa3f06bbff1ea5b1a9c7b14aa992a39657db60a2759457328d7e058f49ee" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "proc-macro2", + "quote", + "syn 2.0.95", +] + [[package]] name = "env_filter" version = "0.1.3" @@ -3603,7 +3628,7 @@ dependencies = [ "magicblock-bank", "magicblock-committor-service", "magicblock-core", - "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?rev=4af7f1c)", + "magicblock-delegation-program", "magicblock-metrics", "magicblock-mutator", "magicblock-processor", @@ -3733,7 +3758,7 @@ dependencies = [ [[package]] name = "magicblock-committor-program" -version = "0.0.0" +version = "0.1.1" dependencies = [ "borsh 1.5.5", "borsh-derive 1.5.5", @@ -3741,20 +3766,25 @@ dependencies = [ "paste", "solana-account", "solana-program", + "solana-program-test", "solana-pubkey", - "thiserror 2.0.12", + "solana-sdk", + "thiserror 1.0.69", + "tokio", ] [[package]] name = "magicblock-committor-service" -version = "0.0.0" +version = "0.1.1" dependencies = [ - "base64 0.22.1", + "base64 0.21.7", "bincode", "borsh 1.5.5", + "env_logger 0.11.6", + "lazy_static", "log", "magicblock-committor-program", - "magicblock-delegation-program 1.0.0", + "magicblock-delegation-program", "magicblock-rpc-client", "magicblock-table-mania", "rusqlite", @@ -3765,7 +3795,7 @@ dependencies = [ "solana-sdk", "solana-transaction-status-client-types", "static_assertions", - "thiserror 2.0.12", + "thiserror 1.0.69", "tokio", "tokio-util 0.7.13", ] @@ -3793,21 +3823,6 @@ dependencies = [ "solana-sdk", ] -[[package]] -name = "magicblock-delegation-program" -version = "1.0.0" -dependencies = [ - "bincode", - "borsh 1.5.5", - "bytemuck", - "num_enum", - "paste", - "solana-curve25519", - "solana-program", - "solana-security-txt", - "thiserror 1.0.69", -] - [[package]] name = "magicblock-delegation-program" version = "1.0.0" @@ -4024,31 +4039,34 @@ dependencies = [ [[package]] name = "magicblock-rpc-client" -version = "0.0.0" +version = "0.1.1" dependencies = [ + "env_logger 0.11.6", "log", "solana-rpc-client", "solana-rpc-client-api", "solana-sdk", "solana-transaction-status-client-types", - "thiserror 2.0.12", + "thiserror 1.0.69", "tokio", ] [[package]] name = "magicblock-table-mania" -version = "0.0.0" +version = "0.1.1" dependencies = [ "ed25519-dalek", + "env_logger 0.11.6", "log", "magicblock-rpc-client", + "paste", "rand 0.8.5", "sha3", "solana-pubkey", "solana-rpc-client", "solana-rpc-client-api", "solana-sdk", - "thiserror 2.0.12", + "thiserror 1.0.69", "tokio", ] @@ -4613,6 +4631,25 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "opentelemetry" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" +dependencies = [ + "async-trait", + "crossbeam-channel", + "futures-channel", + "futures-executor", + "futures-util", + "js-sys", + "lazy_static", + "percent-encoding 2.3.1", + "pin-project", + "rand 0.8.5", + "thiserror 1.0.69", +] + [[package]] name = "parity-ws" version = "0.11.1" @@ -6327,6 +6364,57 @@ dependencies = [ "parking_lot 0.12.3", ] +[[package]] +name = "solana-banks-client" +version = "2.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01364483db3a7ad3546695df73eeec869fdb7399e8734b9a4d9ec5426d4bc932" +dependencies = [ + "borsh 1.5.5", + "futures 0.3.31", + "solana-banks-interface", + "solana-program", + "solana-sdk", + "tarpc", + "thiserror 2.0.12", + "tokio", + "tokio-serde", +] + +[[package]] +name = "solana-banks-interface" +version = "2.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d185017c022a9bc7b9b4709fdb15d4a3a4875548bb53d95d49f696476497879" +dependencies = [ + "serde", + "serde_derive", + "solana-sdk", + "tarpc", +] + +[[package]] +name = "solana-banks-server" +version = "2.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f72a966c0ebb198a16db924b4377f1b04dc8040afe0815ccee29cf852b4a0cc" +dependencies = [ + "bincode", + "crossbeam-channel", + "futures 0.3.31", + "solana-banks-interface", + "solana-client", + "solana-feature-set", + "solana-runtime", + "solana-runtime-transaction", + "solana-sdk", + "solana-send-transaction-service", + "solana-svm", + "tarpc", + "tokio", + "tokio-serde", +] + [[package]] name = "solana-big-mod-exp" version = "2.2.1" @@ -7935,6 +8023,43 @@ dependencies = [ "thiserror 2.0.12", ] +[[package]] +name = "solana-program-test" +version = "2.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02f25b19e0f8ef1f4e30f9aa5d986238edfd68bb35ef66131d8992cb941286f0" +dependencies = [ + "assert_matches", + "async-trait", + "base64 0.22.1", + "bincode", + "chrono-humanize", + "crossbeam-channel", + "log", + "serde", + "solana-accounts-db", + "solana-banks-client", + "solana-banks-interface", + "solana-banks-server", + "solana-bpf-loader-program", + "solana-compute-budget", + "solana-feature-set", + "solana-inline-spl", + "solana-instruction", + "solana-log-collector", + "solana-logger", + "solana-program-runtime", + "solana-runtime", + "solana-sbpf", + "solana-sdk", + "solana-sdk-ids", + "solana-svm", + "solana-timings", + "solana-vote-program", + "thiserror 2.0.12", + "tokio", +] + [[package]] name = "solana-pubkey" version = "2.2.1" @@ -10094,6 +10219,41 @@ dependencies = [ "xattr", ] +[[package]] +name = "tarpc" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c38a012bed6fb9681d3bf71ffaa4f88f3b4b9ed3198cda6e4c8462d24d4bb80" +dependencies = [ + "anyhow", + "fnv", + "futures 0.3.31", + "humantime", + "opentelemetry", + "pin-project", + "rand 0.8.5", + "serde", + "static_assertions", + "tarpc-plugins", + "thiserror 1.0.69", + "tokio", + "tokio-serde", + "tokio-util 0.6.10", + "tracing", + "tracing-opentelemetry", +] + +[[package]] +name = "tarpc-plugins" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ee42b4e559f17bce0385ebf511a7beb67d5cc33c12c96b7f4e9789919d9c10f" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "task-local-extensions" version = "0.1.4" @@ -10370,6 +10530,22 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-serde" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "911a61637386b789af998ee23f50aa30d5fd7edcec8d6d3dedae5e5815205466" +dependencies = [ + "bincode", + "bytes 1.10.1", + "educe", + "futures-core", + "futures-sink", + "pin-project", + "serde", + "serde_json", +] + [[package]] name = "tokio-stream" version = "0.1.17" @@ -10407,6 +10583,7 @@ dependencies = [ "futures-sink", "log", "pin-project-lite", + "slab", "tokio", ] @@ -10618,6 +10795,19 @@ dependencies = [ "valuable", ] +[[package]] +name = "tracing-opentelemetry" +version = "0.17.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbbe89715c1dbbb790059e2565353978564924ee85017b5fff365c872ff6721f" +dependencies = [ + "once_cell", + "opentelemetry", + "tracing", + "tracing-core", + "tracing-subscriber", +] + [[package]] name = "tracing-subscriber" version = "0.3.19" diff --git a/Cargo.toml b/Cargo.toml index cf4c8db3..ffc2e12a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,6 +15,8 @@ members = [ "magicblock-accounts-db", "magicblock-api", "magicblock-bank", + "magicblock-committor-program", + "magicblock-committor-service", "magicblock-config", "magicblock-core", "magicblock-geyser-plugin", @@ -25,6 +27,8 @@ members = [ "magicblock-processor", "magicblock-pubsub", "magicblock-rpc", + "magicblock-rpc-client", + "magicblock-table-mania", "magicblock-tokens", "magicblock-transaction-status", "magicblock-version", @@ -55,12 +59,15 @@ assert_matches = "1.5.0" async-trait = "0.1.77" base64 = "0.21.7" bincode = "1.3.3" +borsh = { version = "1.5.1", features = ["derive", "unstable__schema"] } +borsh-derive = "1.5.1" bs58 = "0.4.0" byteorder = "1.5.0" cargo-lock = "10.0.0" conjunto-transwise = { git = "https://github.com/magicblock-labs/conjunto.git", rev = "bf82b45" } console-subscriber = "0.2.0" crossbeam-channel = "0.5.11" +ed25519-dalek = "1.0.1" enum-iterator = "1.5.0" env_logger = "0.11.2" expiring-hashmap = { path = "./utils/expiring-hashmap" } @@ -95,7 +102,10 @@ magicblock-accounts-api = { path = "./magicblock-accounts-api" } magicblock-accounts-db = { path = "./magicblock-accounts-db" } magicblock-api = { path = "./magicblock-api" } magicblock-bank = { path = "./magicblock-bank" } -magicblock-committor-service = { path = "../comittor/magicblock-committor-service" } +magicblock-committor-service = { path = "./magicblock-committor-service" } +magicblock-committor-program = { path = "./magicblock-committor-program", features = [ + "no-entrypoint", +] } magicblock-config = { path = "./magicblock-config" } magicblock-core = { path = "./magicblock-core" } magicblock-delegation-program = { git = "https://github.com/magicblock-labs/delegation-program.git", rev = "4af7f1c" } @@ -108,6 +118,8 @@ magicblock-processor = { path = "./magicblock-processor" } magicblock-program = { path = "./programs/magicblock" } magicblock-pubsub = { path = "./magicblock-pubsub" } magicblock-rpc = { path = "./magicblock-rpc" } +magicblock-rpc-client = { path = "./magicblock-rpc-client" } +magicblock-table-mania = { path = "./magicblock-table-mania" } magicblock-tokens = { path = "./magicblock-tokens" } magicblock-transaction-status = { path = "./magicblock-transaction-status" } magicblock-version = { path = "./magicblock-version" } @@ -123,10 +135,12 @@ protobuf-src = "1.1" rand = "0.8.5" rayon = "1.10.0" rustc_version = "0.4" +rusqlite = { version = "0.34.0", features = ["bundled"] } # bundled sqlite 3.44 semver = "1.0.22" serde = "1.0.217" serde_derive = "1.0" serde_json = "1.0" +sha3 = "0.10.8" solana-account = { git = "https://github.com/magicblock-labs/solana-account.git", rev = "7bdfefc" } solana-accounts-db = { version = "2.2" } solana-account-decoder = { version = "2.2" } @@ -143,7 +157,9 @@ solana-log-collector = { version = "2.2" } solana-measure = { version = "2.2" } solana-metrics = { version = "2.2" } solana-perf = { version = "2.2" } +solana-program = "2.2" solana-program-runtime = { version = "2.2" } +solana-program-test = "2.2" solana-pubkey = { version = "2.2" } solana-rayon-threadlimit = { version = "2.2" } solana-pubsub-client = { version = "2.2" } @@ -157,8 +173,10 @@ solana-storage-proto = { path = "storage-proto" } solana-system-program = { version = "2.2" } solana-timings = "2.2" solana-transaction-status = { version = "2.2" } +solana-transaction-status-client-types = "2.2" spl-token = "=7.0" spl-token-2022 = "=6.0" +static_assertions = "1.1.0" strum = "0.24" strum_macros = "0.24" tempfile = "3.10.1" diff --git a/magicblock-committor-program/Cargo.toml b/magicblock-committor-program/Cargo.toml new file mode 100644 index 00000000..2b17f5b3 --- /dev/null +++ b/magicblock-committor-program/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "magicblock-committor-program" +version.workspace = true +authors.workspace = true +repository.workspace = true +homepage.workspace = true +license.workspace = true +edition.workspace = true + +[dependencies] +borsh = { workspace = true } +borsh-derive = { workspace = true } +log = { workspace = true } +paste = { workspace = true } +solana-account = { workspace = true } +solana-program = { workspace = true } +solana-pubkey = { workspace = true } +thiserror = { workspace = true } + +[dev-dependencies] +solana-program-test = { workspace = true } +solana-sdk = { workspace = true } +tokio = { workspace = true } + +[lib] +crate-type = ["cdylib", "lib"] + +[features] +no-entrypoint = [] +default = [] diff --git a/magicblock-committor-program/src/consts.rs b/magicblock-committor-program/src/consts.rs new file mode 100644 index 00000000..4af1f467 --- /dev/null +++ b/magicblock-committor-program/src/consts.rs @@ -0,0 +1,43 @@ +/// Max bytest that can be allocated as part of the one instruction. +/// For buffers that are larger than that ReallocBuffer needs to be +/// invoked 1 or more times after Init completed. +pub const MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE: u16 = 10_240; + +/// The maximum number of instructions that can be added to a single transaction. +/// See: https://github.com/solana-labs/solana/issues/33863 +pub const MAX_INSTRUCTION_TRACE_LENGTH: u8 = 64; + +/// We ran into max transaction size exceeded if we included more than +/// the below amount of instructions in a single transaction. +/// (VersionedTransaction too large: xxxx bytes (max: encoded/raw 1644/1232)) +/// Thus the [MAX_INSTRUCTION_TRACE_LENGTH] is not the upper limit, but we're +/// capped by the size of each instruction. (see [crate::instruction_chunks::chunk_realloc_ixs]) +pub const MAX_INSTRUCTION_LENGTH: u8 = 11; + +/// This size is based on exploration of the Write instruction of the BPFUpgradableLoader program +/// +/// It includes the following accounts: +/// +/// - account +/// - authority +/// +/// The write instruction: +/// +/// ```rust +/// pub enum UpgradeableLoaderInstruction { +/// Write { +/// /// Offset at which to write the given bytes. +/// offset: u32, +/// /// Serialized program data +/// bytes: Vec, +/// } +/// } +/// ``` +/// +/// The instruction data size total I measured was 1028 bytes +/// The bytes hold 1012 bytes(see tools/sh/deploy-ix-bytesize) +/// which leaves 16 bytes for: +/// - offset: 4 bytes +/// - instruction discriminator: 1 byte aligned to 4 bytes +/// - both accounts repeated in instruction: 2x 4 bytes 8 bytes +pub const MAX_INSTRUCTION_DATA_SIZE: u16 = 1028; diff --git a/magicblock-committor-program/src/error.rs b/magicblock-committor-program/src/error.rs new file mode 100644 index 00000000..35e7156d --- /dev/null +++ b/magicblock-committor-program/src/error.rs @@ -0,0 +1,34 @@ +use solana_program::msg; +use solana_program::program_error::ProgramError; +use thiserror::Error; + +pub type CommittorResult = std::result::Result; + +#[derive(Error, Debug, Clone)] +pub enum CommittorError { + #[error("Unable to serialize change set: {0}")] + UnableToSerializeChangeSet(String), + + #[error("Pubkey error")] + PubkeyError(#[from] solana_pubkey::PubkeyError), + + #[error("Offset ({0}) must be multiple of chunk size ({1})")] + OffsetMustBeMultipleOfChunkSize(usize, u16), + + #[error("Chunk of size {0} cannot be stored at offset {1} in buffer of size ({2})")] + OffsetChunkOutOfRange(usize, u32, usize), +} + +impl From for ProgramError { + fn from(e: CommittorError) -> Self { + msg!("Error: {:?}", e); + use CommittorError::*; + let n = match e { + UnableToSerializeChangeSet(_) => 0x69000, + PubkeyError(_) => 0x69001, + OffsetMustBeMultipleOfChunkSize(_, _) => 0x69002, + OffsetChunkOutOfRange(_, _, _) => 0x69003, + }; + ProgramError::Custom(n) + } +} diff --git a/magicblock-committor-program/src/instruction.rs b/magicblock-committor-program/src/instruction.rs new file mode 100644 index 00000000..8ce2e7c7 --- /dev/null +++ b/magicblock-committor-program/src/instruction.rs @@ -0,0 +1,386 @@ +use borsh::{BorshDeserialize, BorshSerialize}; +use solana_program::hash::Hash; +use solana_program::hash::HASH_BYTES; +use solana_program::instruction::{AccountMeta, Instruction}; +use solana_program::system_program; +use solana_pubkey::Pubkey; + +use crate::{consts, pdas}; + +#[derive(BorshSerialize, BorshDeserialize, Debug, Clone)] +pub enum CommittorInstruction { + /// Initializes the buffer and [Chunks] accounts which will be used to + /// [CommittorInstruction::Write] and then [CommittorInstruction::Commit]. + /// + /// Accounts: + /// 0. `[signer]` The validator authority. + /// 1. `[writable]` The PDA holding the [Chunks] data which track the + /// committed chunks. + /// 2. `[writable]` The PDA buffer account into which we accumulate the data to commit. + /// 3. `[]` The system program to facilitate creation of accounts + Init { + /// The on chain address of the account we are committing + /// This is part of the seeds used to derive the buffer and chunk account PDAs. + pubkey: Pubkey, + /// The size that the buffer account needs to have in order to track commits + chunks_account_size: u64, + /// The size that the buffer account needs to have in order to hold all commits + buffer_account_size: u64, + /// The ephemeral blockhash of the changeset we are writing, + /// needed to properly derive the seeds of the PDAs. + blockhash: Hash, + /// The bump to use when deriving seeds and PDA for the [Chunks] account. + chunks_bump: u8, + /// The bump to use when deriving seeds and PDA for the buffer account. + buffer_bump: u8, + /// The number of chunks that the [Chunks] account will track. + chunk_count: usize, + /// The size of each chunk that the [Chunks] account will track. + chunk_size: u16, + }, + /// Accounts: + /// 0. `[signer]` The validator authority. + /// 1. `[writable]` The PDA buffer account into which we accumulate the data to commit. + ReallocBuffer { + /// The on chain address of the account we are committing + /// This is part of the seeds used to derive the buffer and chunk account PDAs. + pubkey: Pubkey, + /// The size that the buffer account needs to have in order to hold all commits + buffer_account_size: u64, + /// The ephemeral blockhash of the changeset we are writing, + /// needed to properly derive the seeds of the PDAs. + blockhash: Hash, + /// The bump to use when deriving seeds and PDA for the buffer account. + buffer_bump: u8, + /// The count of invocations of realloc buffer that this instruction represents. + invocation_count: u16, + }, + /// Writes a chunk of data into the buffer account and updates the [Chunks] to + /// show that the chunk has been written. + /// + /// Accounts: + /// 0. `[signer]` The validator authority. + /// 1. `[writable]` The PDA holding the [Chunks] data which track the + /// committed chunks. + /// 2. `[writable]` The PDA buffer account into which we accumulate the data to commit. + Write { + /// The on chain address of the account we are committing + /// This is part of the seeds used to derive the buffer and chunk account PDAs. + pubkey: Pubkey, + /// The ephemeral blockhash of the changeset we are writing, + /// needed to properly derive the seeds of the PDAs. + blockhash: Hash, + /// The bump to use when deriving seeds and PDA for the [Chunks] account. + chunks_bump: u8, + /// The bump to use when deriving seeds and PDA for the buffer account. + buffer_bump: u8, + /// Offset in the buffer account where to write the data. + offset: u32, + /// The data to write into the buffer account. + data_chunk: Vec, + }, + /// This instruction closes the buffer account and the [Chunks] account. + /// + /// It is called by the validator after the instruction that processes the + /// change set stored in the buffer account and applies the commits to the + /// relevant accounts. + /// Ideally it runs in the same transaction as the 'processs' instruction. + /// + /// The lamports gained due to closing both accounts are transferred to the + /// validator authority. + /// + /// Accounts: + /// 0. `[signer]` The validator authority. + /// 1. `[writable]` The PDA holding the [Chunks] data which tracked the + /// committed chunks and we are now closing. + /// 2. `[writable]` The PDA buffer account we are closing. + Close { + /// The on chain address of the account we committed. + /// This is part of the seeds used to derive the buffer and chunk account PDAs. + pubkey: Pubkey, + /// The ephemeral blockhash of the changeset we are writing, + /// needed to properly derive the seeds of the PDAs. + blockhash: Hash, + /// The bump to use when deriving seeds and PDA for the [Chunks] account. + chunks_bump: u8, + /// The bump to use when deriving seeds and PDA for the buffer account. + buffer_bump: u8, + }, +} + +pub const IX_INIT_SIZE: u16 = + // pubkey: Pubkey, + 32 + + // chunks_account_size: u64, + 8 + + // buffer_account_size: u64, + 8 + + // blockhash: Hash, + HASH_BYTES as u16 + + // chunks_bump: u8, + 8 + + // buffer_bump: u8, + 8 + + // chunk_count: usize, + 8 + + // chunk_size: u16, + 2 + + // byte align + 6; + +pub const IX_REALLOC_SIZE: u16 = + // pubkey: Pubkey, + 32 + + // buffer_account_size: u64, + 8 + + // blockhash: Hash, + HASH_BYTES as u16 + + // buffer_bump: u8, + 8 + + // invocation_count: u16, + 2 + + // byte align + 6; + +pub const IX_WRITE_SIZE_WITHOUT_CHUNKS: u16 = + // pubkey: Pubkey, + 32+ + // blockhash: Hash, + HASH_BYTES as u16 + + // chunks_bump: u8, + 8 + + // buffer_bump: u8, + 8 + + // offset: u32 + 32; + +pub const IX_CLOSE_SIZE: u16 = + // pubkey: Pubkey, + 32 + + // blockhash: Hash, + HASH_BYTES as u16 + + // chunks_bump: u8, + 8 + + // buffer_bump: u8, + 8; + +// ----------------- +// create_init_ix +// ----------------- +pub struct CreateInitIxArgs { + /// The validator authority + pub authority: Pubkey, + /// On chain address of the account we are committing + pub pubkey: Pubkey, + /// Required size of the account tracking which chunks have been committed + pub chunks_account_size: u64, + /// Required size of the buffer account that holds the account data to commit + pub buffer_account_size: u64, + /// The latest on chain blockhash + pub blockhash: Hash, + /// The number of chunks we need to write until all the data is copied to the + /// buffer account + pub chunk_count: usize, + /// The size of each chunk that we write to the buffer account + pub chunk_size: u16, +} + +pub fn create_init_ix(args: CreateInitIxArgs) -> (Instruction, Pubkey, Pubkey) { + let CreateInitIxArgs { + authority, + pubkey, + chunks_account_size, + buffer_account_size, + blockhash, + chunk_count, + chunk_size, + } = args; + + let (chunks_pda, chunks_bump) = + pdas::chunks_pda(&authority, &pubkey, &blockhash); + let (buffer_pda, buffer_bump) = + pdas::buffer_pda(&authority, &pubkey, &blockhash); + let program_id = crate::id(); + let ix = CommittorInstruction::Init { + pubkey, + blockhash, + chunks_account_size, + buffer_account_size, + chunks_bump, + buffer_bump, + chunk_count, + chunk_size, + }; + let accounts = vec![ + AccountMeta::new(authority, true), + AccountMeta::new(chunks_pda, false), + AccountMeta::new(buffer_pda, false), + AccountMeta::new_readonly(system_program::id(), false), + ]; + ( + Instruction::new_with_borsh(program_id, &ix, accounts), + chunks_pda, + buffer_pda, + ) +} + +// ----------------- +// create_realloc_buffer_ix +// ----------------- +#[derive(Clone)] +pub struct CreateReallocBufferIxArgs { + pub authority: Pubkey, + pub pubkey: Pubkey, + pub buffer_account_size: u64, + pub blockhash: Hash, +} + +/// Creates the realloc ixs we need to invoke in order to realloc +/// the account to the desired size since we only can realloc up to +/// [consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE] in a single instruction. +/// Returns a tuple with the instructions and a bool indicating if we need to split +/// them into multiple instructions in order to avoid +/// [solana_program::program_error::MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED]J +pub fn create_realloc_buffer_ixs( + args: CreateReallocBufferIxArgs, +) -> Vec { + // We already allocated once during Init and only need to realloc + // if the buffer is larger than [consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE] + if args.buffer_account_size + <= consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE as u64 + { + return vec![]; + } + + let remaining_size = args.buffer_account_size as i128 + - consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE as i128; + + // A) We just need to realloc once + if remaining_size <= consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE as i128 { + return vec![create_realloc_buffer_ix(args, 1)]; + } + + // B) We need to realloc multiple times + // SAFETY; remaining size > consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE + create_realloc_buffer_ixs_to_add_remaining(&args, remaining_size as u64) +} + +pub fn create_realloc_buffer_ixs_to_add_remaining( + args: &CreateReallocBufferIxArgs, + remaining_size: u64, +) -> Vec { + let invocation_count = (remaining_size as f64 + / consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE as f64) + .ceil() as u16; + + let mut ixs = vec![]; + for i in 0..invocation_count { + ixs.push(create_realloc_buffer_ix(args.clone(), i + 1)); + } + + ixs +} + +fn create_realloc_buffer_ix( + args: CreateReallocBufferIxArgs, + invocation_count: u16, +) -> Instruction { + let CreateReallocBufferIxArgs { + authority, + pubkey, + buffer_account_size, + blockhash, + } = args; + let (buffer_pda, buffer_bump) = + pdas::buffer_pda(&authority, &pubkey, &blockhash); + + let program_id = crate::id(); + let ix = CommittorInstruction::ReallocBuffer { + pubkey, + buffer_account_size, + blockhash, + buffer_bump, + invocation_count, + }; + let accounts = vec![ + AccountMeta::new(authority, true), + AccountMeta::new(buffer_pda, false), + ]; + Instruction::new_with_borsh(program_id, &ix, accounts) +} + +// ----------------- +// create_write_ix +// ----------------- +pub struct CreateWriteIxArgs { + pub authority: Pubkey, + pub pubkey: Pubkey, + pub offset: u32, + pub data_chunk: Vec, + pub blockhash: Hash, +} + +pub fn create_write_ix(args: CreateWriteIxArgs) -> Instruction { + let CreateWriteIxArgs { + authority, + pubkey, + offset, + data_chunk, + blockhash, + } = args; + let (chunks_pda, chunks_bump) = + pdas::chunks_pda(&authority, &pubkey, &blockhash); + let (buffer_pda, buffer_bump) = + pdas::buffer_pda(&authority, &pubkey, &blockhash); + + let program_id = crate::id(); + let ix = CommittorInstruction::Write { + pubkey, + blockhash, + chunks_bump, + buffer_bump, + offset, + data_chunk, + }; + let accounts = vec![ + AccountMeta::new(authority, true), + AccountMeta::new(chunks_pda, false), + AccountMeta::new(buffer_pda, false), + ]; + Instruction::new_with_borsh(program_id, &ix, accounts) +} + +// ----------------- +// create_close_ix +// ----------------- +pub struct CreateCloseIxArgs { + pub authority: Pubkey, + pub pubkey: Pubkey, + pub blockhash: Hash, +} + +pub fn create_close_ix(args: CreateCloseIxArgs) -> Instruction { + let CreateCloseIxArgs { + authority, + pubkey, + blockhash, + } = args; + let (chunks_pda, chunks_bump) = + pdas::chunks_pda(&authority, &pubkey, &blockhash); + let (buffer_pda, buffer_bump) = + pdas::buffer_pda(&authority, &pubkey, &blockhash); + + let program_id = crate::id(); + let ix = CommittorInstruction::Close { + pubkey, + blockhash, + chunks_bump, + buffer_bump, + }; + let accounts = vec![ + AccountMeta::new(authority, true), + AccountMeta::new(chunks_pda, false), + AccountMeta::new(buffer_pda, false), + ]; + Instruction::new_with_borsh(program_id, &ix, accounts) +} diff --git a/magicblock-committor-program/src/instruction_chunks.rs b/magicblock-committor-program/src/instruction_chunks.rs new file mode 100644 index 00000000..a726f5e3 --- /dev/null +++ b/magicblock-committor-program/src/instruction_chunks.rs @@ -0,0 +1,53 @@ +use crate::instruction::{IX_INIT_SIZE, IX_REALLOC_SIZE}; + +use crate::consts::MAX_INSTRUCTION_DATA_SIZE; + +/// Creates chunks of realloc instructions such that each chunk fits into a single transaction. +/// - reallocs: The realloc instructions to split up +/// - init_ix: The init instruction that is combined with the first reallocs +pub fn chunk_realloc_ixs( + reallocs: Vec, + init_ix: Option, +) -> Vec> { + fn add_reallocs( + chunk: &mut Vec, + reallocs: &mut Vec, + start_size: u16, + ) { + let mut total_size = start_size; + loop { + total_size += IX_REALLOC_SIZE; + if total_size >= MAX_INSTRUCTION_DATA_SIZE { + return; + } + if let Some(realloc) = reallocs.pop() { + chunk.push(realloc); + } else { + return; + } + } + } + + let mut reallocs = reallocs; + // We add to the chunks by popping from the end and in order to retain the order + // of reallocs we reverse them here first + reallocs.reverse(); + + let mut chunks = vec![]; + + // First chunk combines reallocs with init instruction if present + if let Some(init_ix) = init_ix { + let mut chunk = vec![init_ix]; + add_reallocs(&mut chunk, &mut reallocs, IX_INIT_SIZE); + chunks.push(chunk); + } + + // All remaining chunks are pure realloc instructions + while let Some(realloc) = reallocs.pop() { + let mut chunk = vec![realloc]; + add_reallocs(&mut chunk, &mut reallocs, IX_REALLOC_SIZE); + chunks.push(chunk); + } + + chunks +} diff --git a/magicblock-committor-program/src/lib.rs b/magicblock-committor-program/src/lib.rs new file mode 100644 index 00000000..831bc793 --- /dev/null +++ b/magicblock-committor-program/src/lib.rs @@ -0,0 +1,29 @@ +use solana_pubkey::declare_id; +pub mod consts; +pub mod error; +pub mod instruction; +pub mod instruction_chunks; +pub mod pdas; +mod state; + +// #[cfg(not(feature = "no-entrypoint"))] +mod utils; + +// #[cfg(not(feature = "no-entrypoint"))] +mod processor; +// #[cfg(not(feature = "no-entrypoint"))] +pub use processor::process; + +pub use state::{ + changeset::{ + ChangedAccount, ChangedAccountMeta, ChangedBundle, Changeset, + ChangesetBundles, ChangesetMeta, CommitableAccount, + }, + changeset_chunks::{ChangesetChunk, ChangesetChunks}, + chunks::Chunks, +}; + +#[cfg(not(feature = "no-entrypoint"))] +solana_program::entrypoint!(process); + +declare_id!("corabpNrkBEqbTZP7xfJgSWTdBmVdLf1PARWXZbcMcS"); diff --git a/magicblock-committor-program/src/pdas.rs b/magicblock-committor-program/src/pdas.rs new file mode 100644 index 00000000..e28a89a9 --- /dev/null +++ b/magicblock-committor-program/src/pdas.rs @@ -0,0 +1,107 @@ +use paste::paste; + +const CHUNKS_SEED: &[u8] = b"comittor_chunks"; +const BUFFER_SEED: &[u8] = b"comittor_buffer"; + +macro_rules! seeds { + ($prefix:ident, $bytes_const:expr) => { + paste! { + #[allow(clippy::needless_lifetimes)] + pub fn [<$prefix _seeds>]<'a>( + validator_auth: &'a ::solana_pubkey::Pubkey, + pubkey: &'a ::solana_pubkey::Pubkey, + blockhash: &'a ::solana_program::hash::Hash) -> [&'a [u8]; 5] { + [ + crate::ID.as_ref(), + $bytes_const, + validator_auth.as_ref(), + pubkey.as_ref(), + blockhash.as_ref(), + ] + } + #[allow(clippy::needless_lifetimes)] + pub fn [<$prefix _seeds_with_bump>]<'a>( + validator_auth: &'a ::solana_pubkey::Pubkey, + pubkey: &'a ::solana_pubkey::Pubkey, + blockhash: &'a ::solana_program::hash::Hash, + bump: &'a [u8], + ) -> [&'a [u8]; 6] { + [ + crate::ID.as_ref(), + $bytes_const, + validator_auth.as_ref(), + pubkey.as_ref(), + blockhash.as_ref(), + bump, + ] + } + } + }; +} + +macro_rules! pda { + ($prefix:ident) => { + paste! { + #[allow(clippy::needless_lifetimes)] + pub fn [<$prefix _pda>]<'a>( + validator_auth: &'a ::solana_pubkey::Pubkey, + pubkey: &'a ::solana_pubkey::Pubkey, + blockhash: &'a ::solana_program::hash::Hash, + ) -> (::solana_pubkey::Pubkey, u8) { + let program_id = &crate::id(); + let seeds = [<$prefix _seeds>](validator_auth, pubkey, blockhash); + ::solana_pubkey::Pubkey::find_program_address(&seeds, program_id) + } + #[allow(clippy::needless_lifetimes)] + pub fn []<'a>( + validator_auth: &'a ::solana_pubkey::Pubkey, + pubkey: &'a ::solana_pubkey::Pubkey, + blockhash: &'a ::solana_program::hash::Hash, + bump: &'a [u8], + ) -> $crate::error::CommittorResult<::solana_pubkey::Pubkey> { + let program_id = &crate::id(); + let seeds = [<$prefix _seeds_with_bump>](validator_auth, pubkey, blockhash, bump); + Ok(::solana_pubkey::Pubkey::create_program_address(&seeds, program_id)?) + } + } + }; +} + +seeds!(chunks, CHUNKS_SEED); +pda!(chunks); +seeds!(buffer, BUFFER_SEED); +pda!(buffer); + +#[macro_export] +macro_rules! verified_seeds_and_pda { + ($prefix:ident, + $authority_info:ident, + $pubkey:ident, + $account_info:ident, + $blockhash:ident, + $bump:ident) => {{ + ::paste::paste! { + let seeds = $crate::pdas::[<$prefix _seeds_with_bump>]( + $authority_info.key, + $pubkey, + &$blockhash, + $bump, + ); + let pda = $crate::pdas::[]( + $authority_info.key, + $pubkey, + &$blockhash, + $bump, + ) + .inspect_err(|err| msg!("ERR: {}", err))?; + $crate::utils::assert_keys_equal($account_info.key, &pda, || { + format!( + "Provided {} PDA does not match derived key '{}'", + stringify!($prefix), + pda + ) + })?; + (seeds, pda) + } + }}; +} diff --git a/magicblock-committor-program/src/processor.rs b/magicblock-committor-program/src/processor.rs new file mode 100644 index 00000000..db1455ee --- /dev/null +++ b/magicblock-committor-program/src/processor.rs @@ -0,0 +1,397 @@ +use borsh::{to_vec, BorshDeserialize}; +use solana_program::hash::Hash; +use solana_program::log::sol_log_64; +use solana_program::program::invoke_signed; +use solana_program::program_error::ProgramError; +use solana_program::sysvar::Sysvar; +use solana_program::{account_info::AccountInfo, entrypoint::ProgramResult}; +use solana_program::{msg, system_instruction}; +use solana_pubkey::Pubkey; + +use crate::error::CommittorError; +use crate::instruction::CommittorInstruction; +use crate::utils::{ + assert_account_unallocated, assert_is_signer, assert_program_id, + close_and_refund_authority, +}; +use crate::{consts, verified_seeds_and_pda, Chunks}; + +pub fn process( + program_id: &Pubkey, + accounts: &[AccountInfo], + instruction_data: &[u8], +) -> ProgramResult { + assert_program_id(program_id)?; + + let ix = CommittorInstruction::try_from_slice(instruction_data)?; + use CommittorInstruction::*; + match ix { + Init { + pubkey, + chunks_account_size, + buffer_account_size, + blockhash, + chunks_bump, + buffer_bump, + chunk_count, + chunk_size, + } => process_init( + program_id, + accounts, + &pubkey, + chunks_account_size, + buffer_account_size, + blockhash, + chunks_bump, + buffer_bump, + chunk_count, + chunk_size, + ), + ReallocBuffer { + pubkey, + buffer_account_size, + blockhash, + buffer_bump, + invocation_count, + } => process_realloc_buffer( + accounts, + &pubkey, + buffer_account_size, + blockhash, + buffer_bump, + invocation_count, + ), + Write { + pubkey, + blockhash, + chunks_bump, + buffer_bump, + offset, + data_chunk, + } => process_write( + accounts, + &pubkey, + offset, + data_chunk, + blockhash, + chunks_bump, + buffer_bump, + ), + Close { + pubkey, + blockhash, + chunks_bump, + buffer_bump, + } => process_close( + accounts, + &pubkey, + blockhash, + chunks_bump, + buffer_bump, + ), + } +} + +// ----------------- +// process_init +// ----------------- +#[allow(clippy::too_many_arguments)] // private + only call site is close +fn process_init( + program_id: &Pubkey, + accounts: &[AccountInfo], + pubkey: &Pubkey, + chunks_account_size: u64, + buffer_account_size: u64, + blockhash: Hash, + chunks_bump: u8, + buffer_bump: u8, + chunk_count: usize, + chunk_size: u16, +) -> ProgramResult { + msg!("Instruction: Init"); + + let [authority_info, chunks_account_info, buffer_account_info, _system_program] = + accounts + else { + msg!("Need the following accounts: [authority, chunks, buffer, system program ], but got {}", accounts.len()); + return Err(ProgramError::NotEnoughAccountKeys); + }; + assert_is_signer(authority_info, "authority")?; + + let chunks_bump = &[chunks_bump]; + let (chunks_seeds, _chunks_pda) = verified_seeds_and_pda!( + chunks, + authority_info, + pubkey, + chunks_account_info, + blockhash, + chunks_bump + ); + + let buffer_bump = &[buffer_bump]; + let (buffer_seeds, _buffer_pda) = verified_seeds_and_pda!( + buffer, + authority_info, + pubkey, + buffer_account_info, + blockhash, + buffer_bump + ); + + assert_account_unallocated(chunks_account_info, "chunks")?; + assert_account_unallocated(buffer_account_info, "buffer")?; + + msg!("Creating Chunks and Buffer accounts"); + + // Create Chunks Account + let ix = system_instruction::create_account( + authority_info.key, + chunks_account_info.key, + solana_program::rent::Rent::get()? + .minimum_balance(chunks_account_size as usize), + chunks_account_size, + program_id, + ); + invoke_signed( + &ix, + &[authority_info.clone(), chunks_account_info.clone()], + &[&chunks_seeds], + )?; + + let initial_alloc_size = std::cmp::min( + buffer_account_size, + consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE as u64, + ); + + // Create Buffer Account + let ix = system_instruction::create_account( + authority_info.key, + buffer_account_info.key, + // NOTE: we fund for the full size to allow realloc without funding more + solana_program::rent::Rent::get()? + .minimum_balance(buffer_account_size as usize), + initial_alloc_size, + program_id, + ); + invoke_signed( + &ix, + &[authority_info.clone(), buffer_account_info.clone()], + &[&buffer_seeds], + )?; + + msg!( + "Initialized and allocated {} of desired {} bytes.", + initial_alloc_size, + buffer_account_size, + ); + + // Initialize Chunks Account + let chunks = Chunks::new(chunk_count, chunk_size); + chunks_account_info + .data + .borrow_mut() + .copy_from_slice(&to_vec(&chunks)?); + + Ok(()) +} + +// ----------------- +// process_realloc_buffer +// ----------------- +fn process_realloc_buffer( + accounts: &[AccountInfo], + pubkey: &Pubkey, + buffer_account_size: u64, + blockhash: Hash, + buffer_bump: u8, + invocation_count: u16, +) -> ProgramResult { + msg!("Instruction: ReallocBuffer {}", invocation_count); + + let [authority_info, buffer_account_info] = accounts else { + msg!( + "Need the following accounts: [authority, buffer ], but got {}", + accounts.len() + ); + return Err(ProgramError::NotEnoughAccountKeys); + }; + + if buffer_account_info.data.borrow().len() >= buffer_account_size as usize { + msg!( + "Buffer account already has {} bytes, no need to realloc", + buffer_account_info.data.borrow().len() + ); + return Ok(()); + } + + assert_is_signer(authority_info, "authority")?; + + let buffer_bump = &[buffer_bump]; + verified_seeds_and_pda!( + buffer, + authority_info, + pubkey, + buffer_account_info, + blockhash, + buffer_bump + ); + + let current_buffer_size = buffer_account_info.data.borrow().len() as u64; + let next_alloc_size = std::cmp::min( + buffer_account_size, + current_buffer_size + + consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE as u64, + ); + + msg!( + "Allocating from {} to {} of desired {} bytes.", + current_buffer_size, + next_alloc_size, + buffer_account_size, + ); + + // NOTE: we fund the account for the full desired account size during init + // Doing this as needed increases the cost for each realloc to 4,959 CUs. + // Reallocing without any rent check/increase uses only 4,025 CUs + // and does not require the system program to be provided. + buffer_account_info.realloc(next_alloc_size as usize, true)?; + + Ok(()) +} + +// ----------------- +// process_write +// ----------------- +fn process_write( + accounts: &[AccountInfo], + pubkey: &Pubkey, + offset: u32, + data_chunk: Vec, + blockhash: Hash, + chunks_bump: u8, + buffer_bump: u8, +) -> ProgramResult { + msg!("Instruction: Write"); + + let [authority_info, chunks_account_info, buffer_account_info] = accounts + else { + msg!("Need the following accounts: [authority, chunks, buffer ], but got {}", accounts.len()); + return Err(ProgramError::NotEnoughAccountKeys); + }; + assert_is_signer(authority_info, "authority")?; + + verify_seeds_and_pdas( + authority_info, + chunks_account_info, + buffer_account_info, + pubkey, + &blockhash, + chunks_bump, + buffer_bump, + )?; + + msg!("Updating Buffer and Chunks accounts [ _, chunks_acc_len, buffer_acc_len, offset, size ]"); + + { + let buffer_data = buffer_account_info.data.borrow(); + let chunks_data = chunks_account_info.data.borrow(); + + // Interpolating lens and offset increases CUs by ~1200. + // So we use this less pretty way since it still gives us the info we need + sol_log_64( + 0, + chunks_data.len() as u64, + buffer_data.len() as u64, + offset as u64, + data_chunk.len() as u64, + ); + + if offset as usize + data_chunk.len() > buffer_data.len() { + let err = CommittorError::OffsetChunkOutOfRange( + data_chunk.len(), + offset, + buffer_data.len(), + ); + msg!("ERR: {}", err); + return Err(err.into()); + } + } + + let mut buffer = buffer_account_info.data.borrow_mut(); + buffer[offset as usize..offset as usize + data_chunk.len()] + .copy_from_slice(&data_chunk); + + let mut chunks_data = chunks_account_info.data.borrow_mut(); + let mut chunks = Chunks::try_from_slice(&chunks_data)?; + chunks.set_offset(offset as usize)?; + chunks_data.copy_from_slice(&to_vec(&chunks)?); + + Ok(()) +} + +// ----------------- +// process_close +// ----------------- +pub fn process_close( + accounts: &[AccountInfo], + pubkey: &Pubkey, + blockhash: Hash, + chunks_bump: u8, + buffer_bump: u8, +) -> ProgramResult { + msg!("Instruction: Close"); + + let [authority_info, chunks_account_info, buffer_account_info] = accounts + else { + msg!("Need the following accounts: [authority, chunks, buffer ], but got {}", accounts.len()); + return Err(ProgramError::NotEnoughAccountKeys); + }; + assert_is_signer(authority_info, "authority")?; + + verify_seeds_and_pdas( + authority_info, + chunks_account_info, + buffer_account_info, + pubkey, + &blockhash, + chunks_bump, + buffer_bump, + )?; + + msg!("Closing Chunks and Buffer accounts"); + close_and_refund_authority(authority_info, chunks_account_info)?; + close_and_refund_authority(authority_info, buffer_account_info)?; + + Ok(()) +} + +fn verify_seeds_and_pdas( + authority_info: &AccountInfo, + chunks_account_info: &AccountInfo, + buffer_account_info: &AccountInfo, + pubkey: &Pubkey, + blockhash: &Hash, + chunks_bump: u8, + buffer_bump: u8, +) -> ProgramResult { + let chunks_bump = &[chunks_bump]; + let (_chunks_seeds, _chunks_pda) = verified_seeds_and_pda!( + chunks, + authority_info, + pubkey, + chunks_account_info, + blockhash, + chunks_bump + ); + + let buffer_bump = &[buffer_bump]; + let (_buffer_seeds, _buffer_pda) = verified_seeds_and_pda!( + buffer, + authority_info, + pubkey, + buffer_account_info, + blockhash, + buffer_bump + ); + Ok(()) +} diff --git a/magicblock-committor-program/src/state/changeset.rs b/magicblock-committor-program/src/state/changeset.rs new file mode 100644 index 00000000..4e52869c --- /dev/null +++ b/magicblock-committor-program/src/state/changeset.rs @@ -0,0 +1,498 @@ +use std::collections::{HashMap, HashSet}; + +use borsh::{BorshDeserialize, BorshSerialize}; +use solana_account::{Account, AccountSharedData, ReadableAccount}; +use solana_program::clock::Slot; +use solana_pubkey::Pubkey; + +use super::{ + changeset_chunks::{ChangesetChunks, ChangesetChunksIter}, + chunks::Chunks, +}; + +// ----------------- +// ChangedAccount +// ----------------- +pub type ChangedBundle = Vec<(Pubkey, ChangedAccount)>; + +#[derive(BorshSerialize, BorshDeserialize, PartialEq, Eq, Clone, Debug)] +pub enum ChangedAccount { + Full { + lamports: u64, + data: Vec, + /// The original owner of the delegated account on chain + owner: Pubkey, + /// This id will be the same for accounts that need to be committed together atomically + /// For single commit accounts it is still set for consistency + bundle_id: u64, + }, + // NOTE: placeholder for later without breaking existing + // buffers + Diff, +} + +#[derive(Debug, Default, Clone, PartialEq, Eq)] +pub struct ChangedAccountMeta { + /// The on chain and ephemeral address of the delegated account + pub pubkey: Pubkey, + /// The lamports the account holds in the ephemeral + pub lamports: u64, + /// The original owner of the delegated account on chain + pub owner: Pubkey, + /// This id will be the same for accounts that need to be committed together atomically + /// For single commit accounts it is still set for consistency + pub bundle_id: u64, +} + +impl From<(&Pubkey, &ChangedAccount)> for ChangedAccountMeta { + fn from((pubkey, changed_account): (&Pubkey, &ChangedAccount)) -> Self { + match changed_account { + ChangedAccount::Full { + lamports, + owner, + bundle_id, + .. + } => Self { + pubkey: *pubkey, + lamports: *lamports, + owner: *owner, + bundle_id: *bundle_id, + }, + ChangedAccount::Diff => { + unreachable!("We don't yet support account diffs") + } + } + } +} + +impl From<(Account, u64)> for ChangedAccount { + fn from((account, bundle_id): (Account, u64)) -> Self { + Self::Full { + lamports: account.lamports, + // NOTE: the owner of the account in the ephemeral is set to the original account owner + owner: account.owner, + data: account.data, + bundle_id, + } + } +} + +impl From<(AccountSharedData, u64)> for ChangedAccount { + fn from((value, bundle_id): (AccountSharedData, u64)) -> Self { + Self::Full { + lamports: value.lamports(), + owner: *value.owner(), + data: value.data().to_vec(), + bundle_id, + } + } +} + +impl ChangedAccount { + pub(crate) fn into_inner(self) -> (u64, Pubkey, Vec, u64) { + use ChangedAccount::*; + match self { + Full { + lamports, + owner, + data, + bundle_id, + } => (lamports, owner, data, bundle_id), + Diff => unreachable!("We don't yet support account diffs"), + } + } + + pub fn lamports(&self) -> u64 { + match self { + Self::Full { lamports, .. } => *lamports, + Self::Diff => unreachable!("We don't yet support account diffs"), + } + } + + pub fn data(&self) -> &[u8] { + match self { + Self::Full { data, .. } => data, + Self::Diff => unreachable!("We don't yet support account diffs"), + } + } + + pub fn owner(&self) -> Pubkey { + match self { + Self::Full { owner, .. } => *owner, + Self::Diff => unreachable!("We don't yet support account diffs"), + } + } + + pub fn bundle_id(&self) -> u64 { + match self { + Self::Full { bundle_id, .. } => *bundle_id, + Self::Diff => unreachable!("We don't yet support account diffs"), + } + } +} + +// ----------------- +// ChangeSet +// ----------------- + +/// This is data structure which holds the account changes to commit to chain. +/// Locally it will be filled with the changes to commit. +/// On chain it is initialized as empty at first and then is filled from the +/// local changeset via multiple transactions. +/// A related [Chunks] account is used in order to track which changes have been +/// applied successfully. +#[derive(Debug, Default, Clone, PartialEq, Eq)] +pub struct Changeset { + /// The accounts that should be updated + pub accounts: HashMap, + /// The ephemeral slot at which those changes were requested + pub slot: Slot, + /// The accounts that should be undelegated after they were committed + pub accounts_to_undelegate: HashSet, +} + +/// The meta data of the changeset which can be used to capture information about +/// the changeset before transferring ownership. Createing this metadata is +/// a lot cheaper than copying the entire changeset which includes the accounts data. +/// Thus it can be used to capture information to include with error responses. +#[derive(Debug, Default, Clone, PartialEq, Eq)] +pub struct ChangesetMeta { + /// The accounts that should be updated + pub accounts: Vec, + /// The ephemeral slot at which those changes were requested + pub slot: Slot, + /// The accounts that should be undelegated after they were committed + pub accounts_to_undelegate: HashSet, +} + +impl ChangesetMeta { + /// Separates information per account including the following: + /// - account commit metadata + /// - slot at which commit was requested + /// - if the account should be undelegated after it was committed + pub fn into_account_infos(self) -> Vec<(ChangedAccountMeta, Slot, bool)> { + self.accounts + .into_iter() + .map(|account| { + let undelegate = + self.accounts_to_undelegate.contains(&account.pubkey); + (account, self.slot, undelegate) + }) + .collect() + } +} + +impl From<&Changeset> for ChangesetMeta { + fn from(changeset: &Changeset) -> Self { + let accounts = changeset + .accounts + .iter() + .map(ChangedAccountMeta::from) + .collect(); + Self { + accounts, + slot: changeset.slot, + accounts_to_undelegate: changeset.accounts_to_undelegate.clone(), + } + } +} + +#[derive(Debug, Default, Clone, PartialEq, Eq)] +pub struct ChangesetBundles { + /// The bundles, each of which needs to be committed atomically + pub bundles: Vec, + /// The ephemeral slot at which those changes were requested + pub slot: Slot, + /// The accounts that should be undelegated after they were committed + pub accounts_to_undelegate: HashSet, +} + +impl Changeset { + /// Adds an account to the change set. + /// If it already exists, it will be replaced, thus the caller needs + /// to ensure that conflicting changes are added in the right order, i.e. + /// the last update needs to be added last. + /// + /// - **pubkey** public key of the account + /// - **account** account to add + /// + /// *returns* true if the account was already present and was replaced + pub fn add>( + &mut self, + pubkey: Pubkey, + account: T, + ) -> bool { + self.accounts.insert(pubkey, account.into()).is_some() + } + + /// This method should be called for all accounts that we want to + /// undelegate after committing them. + pub fn request_undelegation(&mut self, pubkey: Pubkey) { + self.accounts_to_undelegate.insert(pubkey); + } + + /// When we're ready to commit this changeset we convert it into + /// a [CommitableChangeSet] which allows to commit the changes in chunks. + pub fn into_committables(self, chunk_size: u16) -> Vec { + self.accounts + .into_iter() + .map(|(pubkey, acc)| { + let (lamports, owner, data, bundle_id) = acc.into_inner(); + CommitableAccount::new( + pubkey, + owner, + data, + lamports, + chunk_size, + self.slot, + self.accounts_to_undelegate.contains(&pubkey), + bundle_id, + ) + }) + .collect::>() + } + + pub fn account_keys(&self) -> Vec<&Pubkey> { + self.accounts.keys().collect() + } + + pub fn undelegate_keys(&self) -> Vec<&Pubkey> { + self.accounts_to_undelegate.iter().collect() + } + + pub fn owners(&self) -> HashMap { + self.accounts + .iter() + .map(|(pubkey, account)| (*pubkey, account.owner())) + .collect() + } + + /// Splits the accounts into bundles that need to be committed together + /// keeping each bundle as small as possible. + /// Accounts without a bundle id each get their own bundle here. + /// The return value returns info about accounts needing to be delegated and + /// the slot at which the changeset was created. + pub fn into_small_changeset_bundles(self) -> ChangesetBundles { + let mut bundles: HashMap = HashMap::new(); + let accounts_to_undelegate = self.accounts_to_undelegate; + let slot = self.slot; + for (pubkey, account) in self.accounts.into_iter() { + bundles + .entry(account.bundle_id()) + .or_default() + .push((pubkey, account)); + } + let bundles = bundles.into_values().collect::>(); + + ChangesetBundles { + bundles, + slot, + accounts_to_undelegate, + } + } + + pub fn is_empty(&self) -> bool { + self.accounts.is_empty() + } + + pub fn len(&self) -> usize { + self.accounts.len() + } + + pub fn overlaps(change_sets: &[&Self]) -> Vec { + let mut overlapping = HashSet::new(); + for change_set in change_sets { + for (pubkey, _) in change_set.accounts.iter() { + if overlapping.contains(pubkey) { + continue; + } + for other_change_set in change_sets { + if other_change_set == change_set { + continue; + } + if other_change_set.accounts.contains_key(pubkey) { + overlapping.insert(*pubkey); + } + } + } + } + overlapping.into_iter().collect() + } + + pub fn contains(&self, pubkey: &Pubkey) -> bool { + self.accounts.contains_key(pubkey) + } +} + +// ----------------- +// CommitableChangeSet +// ----------------- +/// There is one committable per account that we are trying to commit +#[derive(Debug)] +pub struct CommitableAccount { + /// The on chain address of the account + pub pubkey: Pubkey, + /// The original owner of the delegated account on chain + pub delegated_account_owner: Pubkey, + /// The account data to commit + pub data: Vec, + /// The lamports that the account holds in the ephemeral + pub lamports: u64, + /// Keep track of which part of the account data has been committed + chunks: Chunks, + /// The size of each data chunk that we send to fill the buffer + chunk_size: u16, + /// The ephemeral slot at which those changes were requested + pub slot: Slot, + /// If we also undelegate the account after committing it + pub undelegate: bool, + /// This id will be the same for accounts that need to be committed together atomically + /// For single commit accounts it is still set for consistency + pub bundle_id: u64, +} + +impl CommitableAccount { + #[allow(clippy::too_many_arguments)] // internal API + pub(crate) fn new( + pubkey: Pubkey, + delegated_account_owner: Pubkey, + data: Vec, + lamports: u64, + chunk_size: u16, + slot: Slot, + undelegate: bool, + bundle_id: u64, + ) -> Self { + let len = data.len(); + let chunk_count = if chunk_size == 0 { + // Special case for when the commit info is handled without chunking + 1 + } else { + let count = len / chunk_size as usize; + if len % chunk_size as usize > 0 { + count + 1 + } else { + count + } + }; + Self { + pubkey, + delegated_account_owner, + data, + lamports, + chunk_size, + chunks: Chunks::new(chunk_count, chunk_size), + slot, + undelegate, + bundle_id, + } + } + + /// Iterates all chunks of data no matter if they were committed or not. + /// Thus only use this the very first time when trying to commit all chunks. + pub fn iter_all(&self) -> ChangesetChunksIter<'_> { + ChangesetChunks::new(&self.chunks, self.chunk_size).iter(&self.data) + } + + /// Iterates all chunks of data that have not been committed yet. + /// Use this to discover chunks that failed to commit. + pub fn iter_missing(&self) -> ChangesetChunksIter<'_> { + ChangesetChunks::new(&self.chunks, self.chunk_size) + .iter_missing(&self.data) + } + + /// When all chunks were committed we query the chain to see which commits + /// actually landed. + /// We then update the chunks here in order to allow to retry the missing + /// chunks via [Self::iter_missing]. + pub fn set_chunks(&mut self, chunks: Chunks) { + self.chunks = chunks; + } + + /// The total size of the data that we we will commit. + /// Use this to initialize the empty account on chain. + pub fn size(&self) -> usize { + self.data.len() + } + + pub fn chunk_size(&self) -> u16 { + self.chunk_size + } + + pub fn chunk_count(&self) -> usize { + self.chunks.count() + } + + pub fn has_data(&self) -> bool { + !self.data.is_empty() + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_committing_changeset() { + let mut changeset = Changeset::default(); + let pubkey = Pubkey::new_unique(); + let owner = Pubkey::new_unique(); + let changed_account = ChangedAccount::Full { + lamports: 5_000, + owner, + data: vec![5; 547], + bundle_id: 1, + }; + changeset.add(pubkey, changed_account.clone()); + + // The below results in a buffer of 547 bytes and we split it into 14 chunks + let commitable = &mut changeset.into_committables(547 / 14)[0]; + eprintln!("SIZE: {}", commitable.size()); + assert_eq!(commitable.chunk_size(), 39); + assert_eq!(commitable.chunk_count(), 15); + assert_eq!(commitable.iter_all().count(), 15); + + // 1. Try to commit all chunks into a buffer simulating that some fail + let mut tgt_buf = vec![0u8; commitable.size()]; + let mut chunks = + Chunks::new(commitable.chunk_count(), commitable.chunk_size()); + + for chunk in commitable.iter_all() { + let idx = chunk.chunk_idx(); + // Skip the some chunks to simulate transactions not landing + if idx == 7 || idx == 8 || idx == 12 { + continue; + } + + chunks.set_idx(idx as usize); + + let start = chunk.offset; + for (i, d) in chunk.data_chunk.into_iter().enumerate() { + tgt_buf[start as usize + i] = d; + } + } + + // 2. Update the chunks we were able to commit + // We will get this updated data from chain as each commit landing will + // also update the chunks account + commitable.set_chunks(chunks.clone()); + assert_eq!(commitable.iter_missing().count(), 3); + + // 3. Retry the missing chunks + for chunk in commitable.iter_missing() { + chunks.set_idx(chunk.chunk_idx() as usize); + + let start = chunk.offset; + for (i, d) in chunk.data_chunk.into_iter().enumerate() { + tgt_buf[start as usize + i] = d; + } + } + + commitable.set_chunks(chunks); + assert_eq!(commitable.iter_missing().count(), 0); + + // 4. Ensure that the entire account data was committed + let (_, _, data, _) = changed_account.into_inner(); + assert_eq!(tgt_buf, data); + } +} diff --git a/magicblock-committor-program/src/state/changeset_chunks.rs b/magicblock-committor-program/src/state/changeset_chunks.rs new file mode 100644 index 00000000..990f366c --- /dev/null +++ b/magicblock-committor-program/src/state/changeset_chunks.rs @@ -0,0 +1,165 @@ +use std::collections::HashSet; + +use super::chunks::Chunks; +use borsh::{BorshDeserialize, BorshSerialize}; + +/// A chunk of change set data that we want to apply to the on chain +/// [ChangeSet] buffer +#[derive(Debug, Default, BorshSerialize, BorshDeserialize)] +pub struct ChangesetChunk { + // u32 is sufficient since the buffer size is limited and we will + // never exceed the u32 max value with an offset we need to address + // u32 max: 4_294_967_295 + // max offset ~10_000_660 + pub offset: u32, + pub data_chunk: Vec, + // chunk size can never exceed the ix max size which is well below u16::MAX (65_535) + #[borsh(skip)] + chunk_size: u16, +} + +impl From<(&[u8], u32, u16)> for ChangesetChunk { + fn from((data, offset, chunk_size): (&[u8], u32, u16)) -> Self { + let end = { + let end = (offset + chunk_size as u32) as usize; + // For the last chunk we might have less data than the chunk size left + end.min(data.len()) + }; + Self { + offset, + data_chunk: data[offset as usize..end].to_vec(), + chunk_size, + } + } +} + +impl ChangesetChunk { + /// The index that the chunk will has in the [Chunks] tracker. + pub fn chunk_idx(&self) -> u32 { + self.offset / self.chunk_size as u32 + } +} + +/// This is a helper struct which is never stored anywhere, but merely +/// combines the [Chunks] and [ChangeSetChunks::chunk_size] in order +/// to provide convenience methods. +pub struct ChangesetChunks<'chunks> { + /// The size of each data chunk that we send to fill the buffer. + /// It is a u16 since u16 max (65,535) is much larger than the max packet size (1,280) + chunk_size: u16, + /// Keeping track of which chunks have been delivered + chunks: &'chunks Chunks, +} + +impl<'chunks> ChangesetChunks<'chunks> { + pub fn new(chunks: &'chunks Chunks, chunk_size: u16) -> Self { + Self { chunks, chunk_size } + } + + fn assert_sizes(&self, data: &[u8]) { + let chunks_len = self.chunks.count() * self.chunk_size as usize; + assert!( + data.len() < chunks_len, + "data.len() ({}) >= chunks_len ({})", + data.len(), + chunks_len + ); + assert!( + chunks_len < data.len() + self.chunk_size as usize, + "chunks_len ({}) >= data.len() + chunk_size ({})", + chunks_len, + data.len() + self.chunk_size as usize + ); + } + + pub fn iter<'data>( + &'chunks self, + data: &'data [u8], + ) -> ChangesetChunksIter<'data> { + self.assert_sizes(data); + ChangesetChunksIter::new( + data, + self.chunk_size, + self.chunks.count(), + None, + ) + } + + pub fn iter_missing<'data>( + &self, + data: &'data [u8], + ) -> ChangesetChunksIter<'data> { + self.assert_sizes(data); + ChangesetChunksIter::new( + data, + self.chunk_size, + self.chunks.count(), + Some(self.chunks.get_missing_chunks()), + ) + } +} + +pub struct ChangesetChunksIter<'data> { + /// The data from which to extract chunks + data: &'data [u8], + /// Size of each chunk + chunk_size: u16, + /// Total number of chunks in the data + chunk_count: usize, + /// If set, only include chunks that are in the filter + filter: Option>, + /// Current index of the iterator + idx: usize, +} + +impl<'data> ChangesetChunksIter<'data> { + pub fn new( + data: &'data [u8], + chunk_size: u16, + chunk_count: usize, + filter: Option>, + ) -> Self { + Self { + data, + chunk_size, + chunk_count, + filter, + idx: 0, + } + } +} + +impl Iterator for ChangesetChunksIter<'_> { + type Item = ChangesetChunk; + + fn next(&mut self) -> Option { + // Skip all chunks that are not in the filter + if let Some(filter) = &self.filter { + while self.idx < self.chunk_count { + if filter.contains(&self.idx) { + break; + } + self.idx += 1; + } + } + + if self.idx >= self.chunk_count { + return None; + } + + let offset = self.idx * self.chunk_size as usize; + assert!( + offset < self.data.len(), + "offset out of bounds {} >= {}", + offset, + self.data.len() + ); + + let chunk = + ChangesetChunk::from((self.data, offset as u32, self.chunk_size)); + + self.idx += 1; + + Some(chunk) + } +} diff --git a/magicblock-committor-program/src/state/chunks.rs b/magicblock-committor-program/src/state/chunks.rs new file mode 100644 index 00000000..b68c2a26 --- /dev/null +++ b/magicblock-committor-program/src/state/chunks.rs @@ -0,0 +1,235 @@ +use borsh::{BorshDeserialize, BorshSerialize}; +use std::{collections::HashSet, fmt}; + +use crate::{ + consts, + error::{CommittorError, CommittorResult}, +}; + +const BIT_FIELD_SIZE: usize = 8; + +/// A bitfield based implementation to keep track of which chunks have been delivered. +/// This is much more memory efficient than a Vec which uses 1 byte per value. +/// [https://doc.rust-lang.org/reference/type-layout.html#r-layout.primitive.size] +#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] +pub struct Chunks { + /// The bitfields tracking chunk state. + bits: Vec, + /// The tracking capacity which is + /// ```rust + /// let capacity = bits.len() * BIT_FIELD_SIZE + /// ``` + /// The amount of tracked chunks could be a bit smaller as it might only use + /// part of the last bit in [Chunks::bits]. + /// This count gives that smaller amount. + count: usize, + /// The size of chunks that we are tracking. + chunk_size: u16, +} + +impl fmt::Display for Chunks { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + for (idx, bit) in self.bits.iter().enumerate() { + if idx % 8 == 0 { + write!(f, "\n{:05}: ", idx * BIT_FIELD_SIZE)?; + } + let bit = format!("{:08b}", bit); + let bit = bit.chars().rev().collect::(); + // add space after 4 bits + let (bit1, bit2) = bit.split_at(4); + write!(f, "{} {} ", bit1, bit2)?; + } + Ok(()) + } +} + +impl Chunks { + pub fn new(chunk_count: usize, chunk_size: u16) -> Self { + // SAFETY: this is a bug and we need to crash and burn + assert!( + Self::bytes_for_count_len(chunk_count) + < consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE as usize, + "Size ({}) needed to track {} chunks is too large track and would require to realloc. Max allowed is {} bytes", + Self::bytes_for_count_len(chunk_count), + chunk_count, + consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE + ); + Self { + bits: vec![0; Self::bits_for_count_len(chunk_count)], + count: chunk_count, + chunk_size, + } + } + + fn bits_for_count_len(count: usize) -> usize { + count / BIT_FIELD_SIZE + 1 + } + + pub fn bytes_for_count_len(count: usize) -> usize { + // bits: Vec, + Self::bits_for_count_len(count) * std::mem::size_of::() + // count: usize, + + std::mem::size_of::() + // chunk_size: u16, + + std::mem::size_of::() + } + + /// Returns `true` if the chunk at index has been delivered + pub fn get_idx(&self, idx: usize) -> bool { + if idx >= self.count { + return false; + } + let vec_idx = idx / BIT_FIELD_SIZE; + let bit_idx = idx % BIT_FIELD_SIZE; + (self.bits[vec_idx] & (1 << bit_idx)) != 0 + } + + /// Sets the chunk at index to `true` denoting that it has been delivered + pub(super) fn set_idx(&mut self, idx: usize) { + if idx < self.count { + let vec_idx = idx / BIT_FIELD_SIZE; + let bit_idx = idx % BIT_FIELD_SIZE; + self.bits[vec_idx] |= 1 << bit_idx; + } + } + + pub fn set_offset(&mut self, offset: usize) -> CommittorResult<()> { + if offset % self.chunk_size as usize != 0 { + return Err(CommittorError::OffsetMustBeMultipleOfChunkSize( + offset, + self.chunk_size, + )); + } + let idx = offset / self.chunk_size as usize; + self.set_idx(idx); + Ok(()) + } + + pub fn get_offset(&self, offset: usize) -> CommittorResult { + if offset % self.chunk_size as usize != 0 { + return Err(CommittorError::OffsetMustBeMultipleOfChunkSize( + offset, + self.chunk_size, + )); + } + let idx = offset / self.chunk_size as usize; + Ok(self.get_idx(idx)) + } + + pub fn count(&self) -> usize { + self.count + } + + pub fn chunk_size(&self) -> u16 { + self.chunk_size + } + + pub fn get_missing_chunks(&self) -> HashSet { + (0..self.count).filter(|&i| !self.get_idx(i)).collect() + } + + pub fn is_complete(&self) -> bool { + self.get_missing_chunks().is_empty() + } +} + +impl From<(Vec, u16)> for Chunks { + fn from((vec, chunk_size): (Vec, u16)) -> Self { + let bits = vec![0; vec.len() / BIT_FIELD_SIZE + 1]; + let mut chunks = Self { + bits, + count: vec.len(), + chunk_size, + }; + for (i, &d) in vec.iter().enumerate() { + if d { + chunks.set_idx(i); + } + } + chunks + } +} + +#[cfg(test)] +mod test { + use super::*; + + impl Chunks { + pub(super) fn iter(&self) -> ChunksIter { + ChunksIter { + chunks: self, + idx: 0, + } + } + } + + pub(super) struct ChunksIter<'a> { + chunks: &'a Chunks, + idx: usize, + } + + impl Iterator for ChunksIter<'_> { + type Item = bool; + fn next(&mut self) -> Option { + if self.idx < self.chunks.count { + let idx = self.idx; + self.idx += 1; + Some(self.chunks.get_idx(idx)) + } else { + None + } + } + } + + const CHUNK_SIZE: u16 = 128; + + #[test] + fn test_chunks_iter() { + let chunks = vec![true, false, false, false]; + let chunks = Chunks::from((chunks, CHUNK_SIZE)); + let vec = chunks.iter().collect::>(); + assert_eq!(vec, vec![true, false, false, false]); + } + + #[test] + fn test_chunks_set_get_idx() { + let chunks = vec![false; 12]; + let mut chunks = Chunks::from((chunks, CHUNK_SIZE)); + chunks.set_idx(0); + chunks.set_idx(10); + + assert!(chunks.get_idx(0)); + assert!(!chunks.get_idx(1)); + assert!(chunks.get_idx(10)); + + let vec = chunks.iter().collect::>(); + #[rustfmt::skip] + assert_eq!( + vec, + vec![ + true, false, false, false, false, false, false, false, + false, false, true, false + ] + ); + } + + #[test] + fn test_chunks_set_get_idx_large() { + let chunks = vec![false; 2048]; + let mut chunks = Chunks::from((chunks, CHUNK_SIZE)); + chunks.set_idx(99); + chunks.set_idx(1043); + + assert!(!chunks.get_idx(0)); + assert!(!chunks.get_idx(1)); + assert!(chunks.get_idx(99)); + assert!(!chunks.get_idx(1042)); + assert!(chunks.get_idx(1043)); + assert!(!chunks.get_idx(1044)); + + assert!(!chunks.get_idx(2048)); + assert!(!chunks.get_idx(2049)); + + assert_eq!(chunks.iter().count(), 2048); + } +} diff --git a/magicblock-committor-program/src/state/mod.rs b/magicblock-committor-program/src/state/mod.rs new file mode 100644 index 00000000..e14a7e4c --- /dev/null +++ b/magicblock-committor-program/src/state/mod.rs @@ -0,0 +1,3 @@ +pub(crate) mod changeset; +pub(crate) mod changeset_chunks; +pub(crate) mod chunks; diff --git a/magicblock-committor-program/src/utils/account.rs b/magicblock-committor-program/src/utils/account.rs new file mode 100644 index 00000000..e794106f --- /dev/null +++ b/magicblock-committor-program/src/utils/account.rs @@ -0,0 +1,26 @@ +use solana_program::msg; +use solana_program::program_error::ProgramError; +use solana_program::{account_info::AccountInfo, entrypoint::ProgramResult}; + +pub fn close_and_refund_authority( + authority: &AccountInfo, + account: &AccountInfo, +) -> ProgramResult { + // Realloc the account data to len 0 to avoid refunding attacks, i.e. keeping + // the account around in an instruction that is appended as part of this + // transaction + // https://www.helius.dev/blog/a-hitchhikers-guide-to-solana-program-security + account.realloc(0, false)?; + + // Transfer all lamports to authority + **authority.lamports.borrow_mut() = authority + .lamports() + .checked_add(account.lamports()) + .ok_or_else(|| { + msg!("Overflow when refunding authority"); + ProgramError::ArithmeticOverflow + })?; + **account.lamports.borrow_mut() = 0; + + Ok(()) +} diff --git a/magicblock-committor-program/src/utils/asserts.rs b/magicblock-committor-program/src/utils/asserts.rs new file mode 100644 index 00000000..838d139b --- /dev/null +++ b/magicblock-committor-program/src/utils/asserts.rs @@ -0,0 +1,60 @@ +use solana_program::pubkey::Pubkey; +use solana_program::{ + account_info::AccountInfo, entrypoint::ProgramResult, msg, + program_error::ProgramError, +}; + +pub fn assert_keys_equal String>( + provided_key: &Pubkey, + expected_key: &Pubkey, + get_msg: F, +) -> ProgramResult { + if provided_key.ne(expected_key) { + msg!("Err: {}", get_msg()); + msg!("Err: provided {} expected {}", provided_key, expected_key); + Err(ProgramError::Custom(1)) + } else { + Ok(()) + } +} + +pub fn assert_account_unallocated( + account: &AccountInfo, + account_label: &str, +) -> ProgramResult { + if account.data.borrow().len() != 0 { + msg!( + "Err: account '{}' ({}) was already initialized", + account_label, + account.key + ); + Err(ProgramError::AccountAlreadyInitialized) + } else { + Ok(()) + } +} + +pub fn assert_is_signer( + account: &AccountInfo, + account_label: &str, +) -> ProgramResult { + if !account.is_signer { + msg!( + "Err: account '{}' ({}) should be signer", + account_label, + account.key + ); + Err(ProgramError::MissingRequiredSignature) + } else { + Ok(()) + } +} + +pub fn assert_program_id(program_id: &Pubkey) -> ProgramResult { + if program_id != &crate::id() { + msg!("ERR: invalid program id"); + Err(ProgramError::IncorrectProgramId) + } else { + Ok(()) + } +} diff --git a/magicblock-committor-program/src/utils/mod.rs b/magicblock-committor-program/src/utils/mod.rs new file mode 100644 index 00000000..cb8e597c --- /dev/null +++ b/magicblock-committor-program/src/utils/mod.rs @@ -0,0 +1,15 @@ +mod account; +mod asserts; +pub use account::*; +pub use asserts::*; + +#[macro_export] +macro_rules! compute { + ($msg:expr=> $($tt:tt)*) => { + ::solana_program::msg!(concat!($msg, " {")); + ::solana_program::log::sol_log_compute_units(); + $($tt)* + ::solana_program::log::sol_log_compute_units(); + ::solana_program::msg!(concat!(" } // ", $msg)); + }; +} diff --git a/magicblock-committor-program/tests/prog_init_write_and_close.rs b/magicblock-committor-program/tests/prog_init_write_and_close.rs new file mode 100644 index 00000000..e5448095 --- /dev/null +++ b/magicblock-committor-program/tests/prog_init_write_and_close.rs @@ -0,0 +1,346 @@ +use borsh::{to_vec, BorshDeserialize}; +use magicblock_committor_program::{ + instruction::{ + create_init_ix, create_realloc_buffer_ixs, CreateInitIxArgs, + CreateReallocBufferIxArgs, + }, + instruction_chunks::chunk_realloc_ixs, + ChangedAccount, Changeset, Chunks, +}; +use solana_program_test::*; +use solana_pubkey::Pubkey; +use solana_sdk::{ + blake3::HASH_BYTES, hash::Hash, native_token::LAMPORTS_PER_SOL, + signer::Signer, transaction::Transaction, +}; + +macro_rules! exec { + ($banks_client:ident, $ix:expr, $auth:ident, $latest_blockhash:ident) => {{ + let mut transaction = + Transaction::new_with_payer($ix, Some(&$auth.pubkey())); + transaction.sign(&[$auth.insecure_clone()], $latest_blockhash); + $banks_client + .process_transaction(transaction) + .await + .unwrap(); + }}; +} + +macro_rules! get_chunks { + ($banks_client:expr, $chunks_pda:expr) => {{ + let chunks_data = $banks_client + .get_account($chunks_pda) + .await + .unwrap() + .unwrap() + .data; + Chunks::try_from_slice(&chunks_data).unwrap() + }}; +} + +macro_rules! get_buffer_data { + ($banks_client:expr, $buffer_pda:expr) => {{ + $banks_client + .get_account($buffer_pda) + .await + .unwrap() + .unwrap() + .data + }}; +} + +#[tokio::test] +async fn test_init_write_and_close_small_single_account() { + let mut changeset = Changeset::default(); + changeset.add( + Pubkey::new_unique(), + ChangedAccount::Full { + owner: Pubkey::new_unique(), + lamports: LAMPORTS_PER_SOL, + data: vec![1; 500], + bundle_id: 1, + }, + ); + init_write_and_close(changeset).await; +} + +const MULTIPLE_ITER: u64 = 3; + +#[tokio::test] +async fn test_init_write_and_close_small_changeset() { + let mut changeset = Changeset::default(); + for i in 1..MULTIPLE_ITER { + changeset.add( + Pubkey::new_unique(), + ChangedAccount::Full { + owner: Pubkey::new_unique(), + lamports: i, + data: vec![i as u8; 500], + bundle_id: 1, + }, + ); + } + init_write_and_close(changeset).await; +} + +#[tokio::test] +async fn test_init_write_and_close_large_changeset() { + let mut changeset = Changeset::default(); + for i in 1..MULTIPLE_ITER { + let pubkey = Pubkey::new_unique(); + changeset.add( + pubkey, + ChangedAccount::Full { + owner: Pubkey::new_unique(), + lamports: 1000 + i, + data: vec![i as u8; i as usize * 500], + bundle_id: 1, + }, + ); + if i % 2 == 0 { + changeset.request_undelegation(pubkey) + } + } + init_write_and_close(changeset).await; +} + +#[tokio::test] +async fn test_init_write_and_close_very_large_changeset() { + let mut changeset = Changeset::default(); + for i in 1..MULTIPLE_ITER { + let pubkey = Pubkey::new_unique(); + changeset.add( + pubkey, + ChangedAccount::Full { + owner: Pubkey::new_unique(), + lamports: 1000 + i, + data: vec![i as u8; i as usize * 5_000], + bundle_id: 1, + }, + ); + if i % 2 == 0 { + changeset.request_undelegation(pubkey) + } + } + init_write_and_close(changeset).await; +} + +#[tokio::test] +async fn test_init_write_and_close_extremely_large_changeset() { + let mut changeset = Changeset::default(); + for i in 1..MULTIPLE_ITER { + let pubkey = Pubkey::new_unique(); + changeset.add( + pubkey, + ChangedAccount::Full { + owner: Pubkey::new_unique(), + lamports: 1000 + i, + data: vec![i as u8; i as usize * 50_000], + bundle_id: 1, + }, + ); + if i % 2 == 0 { + changeset.request_undelegation(pubkey) + } + } + init_write_and_close(changeset).await; +} + +#[tokio::test] +async fn test_init_write_and_close_insanely_large_changeset() { + let mut changeset = Changeset::default(); + for i in 1..MULTIPLE_ITER { + let pubkey = Pubkey::new_unique(); + changeset.add( + pubkey, + ChangedAccount::Full { + owner: Pubkey::new_unique(), + lamports: 1000 + i, + data: vec![i as u8; i as usize * 90_000], + bundle_id: 1, + }, + ); + if i % 2 == 0 { + changeset.request_undelegation(pubkey) + } + } + init_write_and_close(changeset).await; +} + +async fn init_write_and_close(changeset: Changeset) { + let program_id = &magicblock_committor_program::id(); + + let (banks_client, auth, _) = ProgramTest::new( + "committor_program", + *program_id, + processor!(magicblock_committor_program::process), + ) + .start() + .await; + + let ephem_blockhash = Hash::from([1; HASH_BYTES]); + + let chunk_size = 439 / 14; + let commitables = changeset.into_committables(chunk_size); + for commitable in commitables.iter() { + let chunks = + Chunks::new(commitable.chunk_count(), commitable.chunk_size()); + + // Initialize the Changeset on chain + let (chunks_pda, buffer_pda) = { + let chunks_account_size = to_vec(&chunks).unwrap().len() as u64; + let (init_ix, chunks_pda, buffer_pda) = + create_init_ix(CreateInitIxArgs { + authority: auth.pubkey(), + pubkey: commitable.pubkey, + chunks_account_size, + buffer_account_size: commitable.size() as u64, + blockhash: ephem_blockhash, + chunk_count: commitable.chunk_count(), + chunk_size: commitable.chunk_size(), + }); + let realloc_ixs = + create_realloc_buffer_ixs(CreateReallocBufferIxArgs { + authority: auth.pubkey(), + pubkey: commitable.pubkey, + buffer_account_size: commitable.size() as u64, + blockhash: ephem_blockhash, + }); + + let ix_chunks = chunk_realloc_ixs(realloc_ixs, Some(init_ix)); + for ixs in ix_chunks { + let latest_blockhash = + banks_client.get_latest_blockhash().await.unwrap(); + exec!(banks_client, &ixs, auth, latest_blockhash); + } + + (chunks_pda, buffer_pda) + }; + + let chunks = get_chunks!(&banks_client, chunks_pda); + for i in 0..chunks.count() { + assert!(!chunks.get_idx(i)); + } + assert!(!chunks.is_complete()); + + let latest_blockhash = + banks_client.get_latest_blockhash().await.unwrap(); + + // Write the first chunk + { + let first_chunk = &commitable.iter_all().next().unwrap(); + let write_ix = magicblock_committor_program::instruction::create_write_ix( + magicblock_committor_program::instruction::CreateWriteIxArgs { + authority: auth.pubkey(), + pubkey: commitable.pubkey, + offset: first_chunk.offset, + data_chunk: first_chunk.data_chunk.clone(), + blockhash: ephem_blockhash, + }, + ); + exec!(banks_client, &[write_ix], auth, latest_blockhash); + + let chunks = get_chunks!(&banks_client, chunks_pda); + assert_eq!(chunks.count(), commitable.chunk_count()); + assert_eq!(chunks.chunk_size(), commitable.chunk_size()); + assert!(chunks.get_idx(0)); + for i in 1..chunks.count() { + assert!(!chunks.get_idx(i)); + } + assert!(!chunks.is_complete()); + + let buffer_data = get_buffer_data!(&banks_client, buffer_pda); + assert_eq!( + buffer_data[0..first_chunk.data_chunk.len()], + first_chunk.data_chunk + ); + } + + // Write third chunk + { + let third_chunk = &commitable.iter_all().nth(2).unwrap(); + let write_ix = magicblock_committor_program::instruction::create_write_ix( + magicblock_committor_program::instruction::CreateWriteIxArgs { + authority: auth.pubkey(), + pubkey: commitable.pubkey, + offset: third_chunk.offset, + data_chunk: third_chunk.data_chunk.clone(), + blockhash: ephem_blockhash, + }, + ); + exec!(banks_client, &[write_ix], auth, latest_blockhash); + + let chunks = get_chunks!(&banks_client, chunks_pda); + assert!(chunks.get_idx(0)); + assert!(!chunks.get_idx(1)); + assert!(chunks.get_idx(2)); + for i in 3..chunks.count() { + assert!(!chunks.get_idx(i)); + } + assert!(!chunks.is_complete()); + + let buffer_data = get_buffer_data!(&banks_client, buffer_pda); + assert_eq!( + buffer_data[third_chunk.offset as usize + ..third_chunk.offset as usize + + third_chunk.data_chunk.len()], + third_chunk.data_chunk + ); + } + + // Write the remaining chunks + { + for chunk in commitable.iter_missing() { + let latest_blockhash = + banks_client.get_latest_blockhash().await.unwrap(); + let write_ix = magicblock_committor_program::instruction::create_write_ix( + magicblock_committor_program::instruction::CreateWriteIxArgs { + authority: auth.pubkey(), + pubkey: commitable.pubkey, + offset: chunk.offset, + data_chunk: chunk.data_chunk.clone(), + blockhash: ephem_blockhash, + }, + ); + exec!(banks_client, &[write_ix], auth, latest_blockhash); + } + + let chunks = get_chunks!(&banks_client, chunks_pda); + for i in 0..chunks.count() { + assert!(chunks.get_idx(i)); + } + assert!(chunks.is_complete()); + + let buffer = get_buffer_data!(&banks_client, buffer_pda); + assert_eq!(buffer, commitable.data); + } + + // Close both accounts + { + let latest_blockhash = + banks_client.get_latest_blockhash().await.unwrap(); + + // Normally this instruction would be part of a transaction that processes + // the change set to update the corresponding accounts + let close_ix = magicblock_committor_program::instruction::create_close_ix( + magicblock_committor_program::instruction::CreateCloseIxArgs { + authority: auth.pubkey(), + pubkey: commitable.pubkey, + blockhash: ephem_blockhash, + }, + ); + exec!(banks_client, &[close_ix], auth, latest_blockhash); + + assert!(banks_client + .get_account(chunks_pda) + .await + .unwrap() + .is_none()); + assert!(banks_client + .get_account(buffer_pda) + .await + .unwrap() + .is_none()); + } + } +} diff --git a/magicblock-committor-program/tests/prog_security.rs b/magicblock-committor-program/tests/prog_security.rs new file mode 100644 index 00000000..12690ca0 --- /dev/null +++ b/magicblock-committor-program/tests/prog_security.rs @@ -0,0 +1,10 @@ +// TODO: add tests here that check that this program is secure +// - authority must sign +// - refund attack on close does not succeed +// - invalid PDAs are detected +// - invalid authority is detected (not matching PDAs derived from it) +#[tokio::test] +#[ignore] +async fn test_todo_security_tests() { + panic!("Implement security tests"); +} diff --git a/magicblock-committor-service/Cargo.toml b/magicblock-committor-service/Cargo.toml new file mode 100644 index 00000000..bd00cb15 --- /dev/null +++ b/magicblock-committor-service/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "magicblock-committor-service" +version.workspace = true +authors.workspace = true +repository.workspace = true +homepage.workspace = true +license.workspace = true +edition.workspace = true + +[dependencies] +base64 = { workspace = true } +bincode = { workspace = true } +borsh = { workspace = true } +log = { workspace = true } +magicblock-committor-program = { workspace = true, features = [ + "no-entrypoint", +] } +magicblock-delegation-program = { workspace = true, features = [ + "no-entrypoint", +] } +magicblock-rpc-client = { workspace = true } +magicblock-table-mania = { workspace = true } +rusqlite = { workspace = true } +solana-account = { workspace = true } +solana-pubkey = { workspace = true } +solana-rpc-client = { workspace = true } +solana-rpc-client-api = { workspace = true } +solana-sdk = { workspace = true } +solana-transaction-status-client-types = { workspace = true } +static_assertions = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true } +tokio-util = { workspace = true } + +[dev-dependencies] +env_logger = { workspace = true } +lazy_static = { workspace = true } +magicblock-table-mania = { workspace = true, features = [ + "randomize_lookup_table_slot", +] } +# program-flexi-counter = { workspace = true, features = ["no-entrypoint"] } +tokio = { workspace = true, features = ["rt", "macros"] } + +[features] +default = [] +test_table_close = [] +dev-context-only-utils = [] diff --git a/magicblock-committor-service/src/bundle_strategy.rs b/magicblock-committor-service/src/bundle_strategy.rs new file mode 100644 index 00000000..0f7a0f3b --- /dev/null +++ b/magicblock-committor-service/src/bundle_strategy.rs @@ -0,0 +1,205 @@ +use std::collections::HashMap; + +use log::*; + +use crate::CommitInfo; + +/// Tries to merge bundles into chunks to leverage the max amount of commits +/// we can have in a single transaction. +pub(crate) fn efficient_bundle_chunks( + mut bundles: HashMap>, + max_per_chunk: usize, +) -> Vec> { + let lens = bundles + .iter() + .map(|(id, commits)| Len { + id: *id, + len: commits.len(), + }) + .collect::>(); + + let chunked_ids = efficient_merge_strategy(lens, max_per_chunk); + + let mut chunked_bundles = Vec::new(); + for chunk in chunked_ids { + let mut bundle_chunk = Vec::::new(); + for id in chunk { + if let Some(bundles) = bundles.remove(&id) { + bundle_chunk.extend(bundles); + } else { + debug_assert!(false, "BUG: bundle not found for id {}", id); + continue; + } + } + chunked_bundles.push(bundle_chunk); + } + + debug_assert!(bundles.is_empty()); + + chunked_bundles +} + +#[derive(PartialEq, Eq, Debug, Clone, Copy)] +struct Len { + id: u64, + len: usize, +} + +/// Returns the most efficient merge strategy for the given lens and max size. +/// WARN: Requires that no len is larger than max_size, otherwise this method will +/// get stuck +fn efficient_merge_strategy( + mut lens: Vec, + max_size: usize, +) -> Vec> { + // NOTE: crash in dev, use escape hatch in release + debug_assert!(lens.iter().all(|len| len.len <= max_size)); + + for len in lens.iter() { + if len.len > max_size { + // NOTE: This is an escape hatch, if we have a len that is larger + // than the max size since we can't merge it. + // This is caused by a programmer error in the calling code. + // It will most likely cause an issue higher in the call stack + // but handling it this way is better than crashing or getting + // stuck. + error!( + "BUG: len {} is too large for the max_size {}", + len.len, max_size + ); + return lens.iter().map(|len| vec![len.id]).collect(); + } + } + + lens.sort_by_key(|len| len.len); + + let mut chunks: Vec> = Vec::new(); + let Some(next_len) = lens.pop() else { + return vec![]; + }; + let mut current_chunk = vec![next_len.id]; + let mut current_size = next_len.len; + 'outer: loop { + let mut remaining_lens = vec![]; + for len in lens.iter().rev() { + if current_size + len.len <= max_size { + current_chunk.push(len.id); + current_size += len.len; + } else { + remaining_lens.push(*len); + continue; + } + } + + lens = lens + .drain(..) + .filter(|len| remaining_lens.contains(len)) + .collect(); + + if lens.is_empty() { + chunks.push(current_chunk); + break; + } + + if lens + .first() + .map(|len| current_size < len.len) + .unwrap_or(false) + { + continue 'outer; + } + + // If we have no more lens to add to the current chunk create a new one + chunks.push(current_chunk); + + // No more lens to process, we are done with the entire process + let Some(next_len) = lens.pop() else { + break 'outer; + }; + current_chunk = vec![next_len.id]; + current_size = next_len.len; + } + + chunks +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_efficient_merge_strategy() { + let lens = vec![ + Len { id: 1, len: 1 }, + Len { id: 2, len: 2 }, + Len { id: 3, len: 3 }, + Len { id: 4, len: 4 }, + Len { id: 5, len: 5 }, + Len { id: 6, len: 6 }, + Len { id: 7, len: 7 }, + Len { id: 8, len: 8 }, + Len { id: 9, len: 9 }, + Len { id: 10, len: 10 }, + ]; + + let res = efficient_merge_strategy(lens.clone(), 10); + assert_eq!( + res, + vec![ + vec![10], + vec![9, 1], + vec![8, 2], + vec![7, 3], + vec![6, 4], + vec![5] + ] + ); + + let res = efficient_merge_strategy(lens.clone(), 20); + assert_eq!(res, vec![vec![10, 9, 1], vec![8, 7, 5], vec![6, 4, 3, 2]]); + + let lens = vec![ + Len { id: 1, len: 1 }, + Len { id: 2, len: 2 }, + Len { id: 3, len: 3 }, + Len { id: 4, len: 4 }, + Len { id: 5, len: 5 }, + Len { id: 6, len: 6 }, + Len { id: 7, len: 7 }, + Len { id: 8, len: 8 }, + ]; + let res = efficient_merge_strategy(lens.clone(), 8); + assert_eq!( + res, + vec![vec![8], vec![7, 1], vec![6, 2], vec![5, 3], vec![4]] + ); + let lens = vec![ + Len { id: 1, len: 1 }, + Len { id: 2, len: 2 }, + Len { id: 3, len: 2 }, + Len { id: 4, len: 2 }, + Len { id: 5, len: 2 }, + Len { id: 6, len: 6 }, + Len { id: 7, len: 6 }, + Len { id: 8, len: 8 }, + ]; + let res = efficient_merge_strategy(lens.clone(), 8); + assert_eq!(res, vec![vec![8], vec![7, 5], vec![6, 4], vec![3, 2, 1]]); + + let lens = vec![ + Len { id: 1, len: 1 }, + Len { id: 3, len: 2 }, + Len { id: 4, len: 2 }, + Len { id: 5, len: 2 }, + Len { id: 6, len: 6 }, + Len { id: 7, len: 6 }, + Len { id: 8, len: 8 }, + Len { id: 9, len: 8 }, + ]; + let res = efficient_merge_strategy(lens.clone(), 8); + assert_eq!( + res, + vec![vec![9], vec![8], vec![7, 5], vec![6, 4], vec![3, 1]] + ); + } +} diff --git a/magicblock-committor-service/src/bundles.rs b/magicblock-committor-service/src/bundles.rs new file mode 100644 index 00000000..a030842f --- /dev/null +++ b/magicblock-committor-service/src/bundles.rs @@ -0,0 +1,273 @@ +use crate::{bundle_strategy::efficient_bundle_chunks, CommitInfo}; +use std::collections::HashMap; + +#[derive(Debug, Default)] +pub struct BundleChunksResult { + /// The valid chunks + pub chunks: Vec>, + /// Commit infos that were not included in any chunk since not all infos in + /// a bundle could fit into a single chunk. + /// key: bundle_id + /// value: commit infos + pub unchunked: HashMap>, +} + +/// Creates chunks that respect the following requirements: +/// 1. A chunk cannot be larger than [max_per_chunk]. +/// 2. All commit infos with the same bundle_id must be in the same chunk. +pub(crate) fn bundle_chunks( + mut commit_infos: Vec, + max_per_chunk: usize, +) -> BundleChunksResult { + if commit_infos.is_empty() { + return BundleChunksResult::default(); + } + + // Group commit infos by bundle_id + let mut bundles: HashMap> = HashMap::new(); + let mut not_bundled: Vec = Vec::new(); + for commit_info in commit_infos.drain(..) { + bundles + .entry(commit_info.bundle_id()) + .or_default() + .push(commit_info); + } + + // Remove bundles that are too large to fit into a single chunk + let (bundles, unbundled) = bundles.into_iter().fold( + (HashMap::new(), HashMap::new()), + |(mut bundles, mut unbundled), (key, bundle)| { + if bundle.len() > max_per_chunk { + unbundled.insert(key, bundle); + } else { + bundles.insert(key, bundle); + } + (bundles, unbundled) + }, + ); + + // Merge small bundles + let mut chunks = efficient_bundle_chunks(bundles, max_per_chunk); + + // Add any commits that were not bundled to any of the bundles that still + // have some room + for chunk in chunks.iter_mut() { + let remaining_space = max_per_chunk - chunk.len(); + if remaining_space > 0 { + let range_end = remaining_space.min(not_bundled.len()); + chunk.extend(&mut not_bundled.drain(..range_end)); + } + } + + // If we still have unbundled commits then add chunks for those + while !not_bundled.is_empty() { + let range_end = (max_per_chunk).min(not_bundled.len()); + chunks.push(not_bundled.drain(..range_end).collect()); + } + + BundleChunksResult { + chunks, + unchunked: unbundled, + } +} + +/// Use this for operations on commit infos that don't have to run atomically for a bundle. +/// As an example closing buffers needed for the commit can be done without respecting +/// bundles. +pub(crate) fn bundle_chunks_ignoring_bundle_id( + commit_infos: &[CommitInfo], + max_per_chunk: usize, +) -> BundleChunksResult { + if commit_infos.is_empty() { + return BundleChunksResult::default(); + } + let chunks = commit_infos + .chunks(max_per_chunk) + .map(|chunk| chunk.to_vec()) + .collect::>(); + + BundleChunksResult { + chunks, + unchunked: HashMap::new(), + } +} + +#[cfg(test)] +mod test { + use super::*; + use solana_sdk::hash::Hash; + use solana_sdk::pubkey::Pubkey; + use std::collections::HashSet; + + fn commit_info(bundle_id: u64) -> crate::CommitInfo { + CommitInfo::BufferedDataAccount { + pubkey: Pubkey::new_unique(), + delegated_account_owner: Pubkey::new_unique(), + slot: 0, + ephemeral_blockhash: Hash::new_unique(), + undelegate: false, + buffer_pda: Pubkey::new_unique(), + chunks_pda: Pubkey::new_unique(), + commit_state: Pubkey::new_unique(), + lamports: 0, + bundle_id, + finalize: false, + } + } + + macro_rules! chunk_and_verify { + ($commit_infos:ident, $max_per_chunk:expr) => {{ + let res = bundle_chunks($commit_infos.clone(), $max_per_chunk); + + // 1. All commit infos are accounted for + let bundled_commit_infos = + res.chunks.iter().flatten().cloned().collect::>(); + let unbundled_commit_infos = res + .unchunked + .values() + .flatten() + .cloned() + .collect::>(); + + for commit_info in $commit_infos { + assert!( + bundled_commit_infos.contains(&commit_info), + "{:#?} was not bundled in {:#?}", + commit_info, + bundled_commit_infos + ); + } + assert!( + unbundled_commit_infos.is_empty(), + "Unbundled: {:#?}", + unbundled_commit_infos + ); + + // 2. Chunk size is within limits + for chunk in res.chunks.iter() { + assert!(chunk.len() <= $max_per_chunk); + } + + // 3. All commit infos with the same bundle_id are in the same chunk + // If a chunk has a bundle id then no other chunk should have it + let bundle_ids = bundled_commit_infos + .iter() + .map(|commit_info| commit_info.bundle_id()) + .collect::>(); + for id in bundle_ids { + let mut count = 0; + for chunk in res.chunks.iter() { + let mut in_chunk = false; + for commit_info in chunk { + if commit_info.bundle_id() == id { + in_chunk = true + } + } + if in_chunk { + count += 1; + } + } + assert_eq!( + count, 1, + "Bundle id {} is in {} chunks. {:#?}", + id, count, res.chunks + ); + } + res + }}; + } + + const MAX_PER_CHUNK: usize = 3; + + #[test] + fn test_empty_bundle() { + let res = bundle_chunks(Vec::new(), MAX_PER_CHUNK); + assert!(res.chunks.is_empty()); + assert!(res.unchunked.is_empty()); + } + + #[test] + fn test_single_bundle_single_commit() { + let commit_infos = vec![commit_info(0)]; + chunk_and_verify!(commit_infos, MAX_PER_CHUNK); + } + + #[test] + fn test_single_bundle() { + let commit_infos = vec![commit_info(0), commit_info(0), commit_info(0)]; + chunk_and_verify!(commit_infos, MAX_PER_CHUNK); + } + + #[test] + fn test_single_bundle_too_large() { + let commit_infos = vec![ + commit_info(0), + commit_info(0), + commit_info(0), + commit_info(0), + ]; + let res = bundle_chunks(commit_infos.clone(), MAX_PER_CHUNK); + assert!(res.chunks.is_empty()); + assert_eq!(res.unchunked.len(), 1); + assert_eq!(res.unchunked.get(&0).unwrap(), &commit_infos); + } + + #[test] + fn test_multiple_bundles() { + let commit_infos = vec![ + // Bundle 0 + commit_info(0), + commit_info(0), + // Bundle 1 + commit_info(1), + commit_info(1), + commit_info(1), + // Bundle 2 + commit_info(2), + commit_info(2), + ]; + chunk_and_verify!(commit_infos, MAX_PER_CHUNK); + } + + #[test] + fn test_multiple_bundles_with_unbundled() { + let commit_infos = vec![ + // Bundle 0 + commit_info(0), + commit_info(0), + // Bundle 1 + commit_info(1), + commit_info(5), + commit_info(1), + commit_info(6), + commit_info(1), + // Bundle 2 + commit_info(2), + commit_info(2), + commit_info(7), + ]; + chunk_and_verify!(commit_infos, MAX_PER_CHUNK); + } + + #[test] + fn test_multiple_bundles_efficiency() { + let commit_infos = vec![ + // Bundle 0 + commit_info(0), + commit_info(0), + commit_info(0), + // Bundle 1 + commit_info(1), + commit_info(1), + commit_info(1), + // Bundle 2 + commit_info(2), + commit_info(2), + // Bundle 3 + commit_info(3), + commit_info(3), + ]; + let res = chunk_and_verify!(commit_infos, 5); + assert_eq!(res.chunks.len(), 2); + } +} diff --git a/magicblock-committor-service/src/commit/commit_using_args.rs b/magicblock-committor-service/src/commit/commit_using_args.rs new file mode 100644 index 00000000..525eb531 --- /dev/null +++ b/magicblock-committor-service/src/commit/commit_using_args.rs @@ -0,0 +1,299 @@ +use crate::{ + commit::common::{ + get_accounts_to_undelegate, lookup_table_keys, send_and_confirm, + }, + commit_stage::CommitSignatures, + persist::CommitStrategy, + undelegate::undelegate_commitables_ixs, + CommitInfo, +}; + +use dlp::args::CommitStateArgs; +use log::*; +use solana_sdk::hash::Hash; +use std::{collections::HashSet, sync::Arc}; + +use magicblock_committor_program::Changeset; +use solana_sdk::signer::Signer; + +use crate::commit_stage::CommitStage; +use magicblock_rpc_client::MagicBlockSendTransactionConfig; + +use super::CommittorProcessor; + +impl CommittorProcessor { + /// Commits a changeset directly using args to include the commit state + /// - **changeset**: the changeset to commit + /// - **finalize**: whether to finalize the commit + /// - **finalize_separately**: whether to finalize the commit in a separate transaction, if + /// this is `false` we can include the finalize instructions with the process instructions + /// - **ephemeral_blockhash**: the ephemeral blockhash to use for the commit + /// - **latest_blockhash**: the latest blockhash on chain to use for the commit + /// - **use_lookup**: whether to use the lookup table for the instructions + pub async fn commit_changeset_using_args( + me: Arc, + changeset: Changeset, + (finalize, finalize_separately): (bool, bool), + ephemeral_blockhash: Hash, + latest_blockhash: Hash, + use_lookup: bool, + ) -> Vec { + // Each changeset is expected to fit into a single instruction which was ensured + // when splitting the original changeset + + let mut process_ixs = Vec::new(); + let mut finalize_ixs = Vec::new(); + let owners = changeset.owners(); + let accounts_to_undelegate = + get_accounts_to_undelegate(&changeset, finalize); + let commitables = changeset.into_committables(0); + // NOTE: we copy the commitables here in order to return them with an error + // [CommitStage] if needed. Since the data of these accounts is small + // (< 1024 bytes), it is acceptable perf overhead + // Alternatively we could include only metadata for the [CommitStage]. + for commitable in commitables.iter() { + let commit_args = CommitStateArgs { + slot: commitable.slot, + lamports: commitable.lamports, + allow_undelegation: commitable.undelegate, + data: commitable.data.clone(), + }; + + let ix = dlp::instruction_builder::commit_state( + me.authority.pubkey(), + commitable.pubkey, + commitable.delegated_account_owner, + commit_args, + ); + process_ixs.push(ix); + + // We either include the finalize instructions with the process instruction or + // if the strategy builder determined that they wouldn't fit then we run them + // in a separate transaction + if finalize { + let finalize_ix = dlp::instruction_builder::finalize( + me.authority.pubkey(), + commitable.pubkey, + ); + if finalize_separately { + finalize_ixs.push(finalize_ix); + } else { + process_ixs.push(finalize_ix); + } + } + } + + let commit_infos = commitables + .into_iter() + .map(|acc| { + CommitInfo::from_small_data_account( + acc, + ephemeral_blockhash, + finalize, + ) + }) + .collect::>(); + + let committees = commit_infos + .iter() + .map(|x| x.pubkey()) + .collect::>(); + + let table_mania = use_lookup.then(|| me.table_mania.clone()); + let table_mania_setup = table_mania.as_ref().map(|tm| { + let keys = lookup_table_keys(&me.authority, &committees, &owners); + (tm, keys) + }); + + let compute_budget_ixs = me + .compute_budget_config + .args_process_budget() + .instructions(committees.len()); + let process_sig = match send_and_confirm( + me.magicblock_rpc_client.clone(), + me.authority.insecure_clone(), + [compute_budget_ixs, process_ixs].concat(), + "commit changeset using args".to_string(), + Some(latest_blockhash), + MagicBlockSendTransactionConfig::ensure_committed(), + table_mania_setup.clone(), + ) + .await + { + Ok(sig) => sig, + Err(err) => { + error!("Failed to commit changeset using args: {:?}", err); + let strategy = CommitStrategy::args(use_lookup); + let sigs = err.signature().map(|sig| CommitSignatures { + process_signature: sig, + finalize_signature: None, + undelegate_signature: None, + }); + return commit_infos + .into_iter() + .map(|x| { + CommitStage::FailedProcess(( + x, + strategy, + sigs.as_ref().cloned(), + )) + }) + .collect(); + } + }; + + let finalize_sig = if !finalize_ixs.is_empty() { + let table_mania_setup = table_mania.as_ref().map(|tm| { + let keys = + lookup_table_keys(&me.authority, &committees, &owners); + (tm, keys) + }); + let finalize_budget_ixs = me + .compute_budget_config + .finalize_budget() + .instructions(committees.len()); + match send_and_confirm( + me.magicblock_rpc_client.clone(), + me.authority.insecure_clone(), + [finalize_budget_ixs, finalize_ixs].concat(), + "commit changeset using args".to_string(), + Some(latest_blockhash), + MagicBlockSendTransactionConfig::ensure_committed(), + table_mania_setup, + ) + .await + { + Ok(sig) => Some(sig), + Err(err) => { + error!( + "Failed to finalize changeset using args: {:?}", + err + ); + return commit_infos + .into_iter() + .map(|x| { + CommitStage::FailedFinalize(( + x, + CommitStrategy::args(use_lookup), + CommitSignatures { + process_signature: process_sig, + finalize_signature: err.signature(), + undelegate_signature: None, + }, + )) + }) + .collect(); + } + } + } else { + (!finalize_separately).then_some(process_sig) + }; + + trace!( + "Successfully processed {} commit infos via transaction '{}'", + commit_infos.len(), + process_sig + ); + + let undelegate_sig = if let Some(sig) = finalize_sig { + trace!( + "Successfully finalized {} commit infos via transaction '{}'", + commit_infos.len(), + sig + ); + + // If we successfully finalized the commit then we can undelegate accounts + if let Some(accounts) = accounts_to_undelegate { + let accounts_len = accounts.len(); + let undelegate_ixs = match undelegate_commitables_ixs( + &me.magicblock_rpc_client, + me.authority.pubkey(), + accounts, + ) + .await + { + Ok(ixs) => ixs.into_values().collect::>(), + Err(err) => { + error!( + "Failed to prepare accounts undelegation '{}': {:?}", + err, err + ); + return commit_infos + .into_iter() + .map(|x| { + CommitStage::FailedUndelegate(( + x, + CommitStrategy::args(use_lookup), + CommitSignatures { + process_signature: process_sig, + finalize_signature: finalize_sig, + undelegate_signature: err.signature(), + }, + )) + }) + .collect(); + } + }; + let undelegate_budget_ixs = me + .compute_budget_config + .undelegate_budget() + .instructions(accounts_len); + match send_and_confirm( + me.magicblock_rpc_client.clone(), + me.authority.insecure_clone(), + [undelegate_budget_ixs, undelegate_ixs].concat(), + "undelegate committed accounts using args".to_string(), + Some(latest_blockhash), + MagicBlockSendTransactionConfig::ensure_committed(), + table_mania_setup, + ) + .await + { + Ok(sig) => { + trace!("Successfully undelegated accounts via transaction '{}'", sig); + Some(sig) + } + Err(err) => { + error!( + "Failed to undelegate accounts via transaction '{}': {:?}", + err, err + ); + return commit_infos + .into_iter() + .map(|x| { + CommitStage::FailedUndelegate(( + x, + CommitStrategy::args(use_lookup), + CommitSignatures { + process_signature: process_sig, + finalize_signature: finalize_sig, + undelegate_signature: err.signature(), + }, + )) + }) + .collect(); + } + } + } else { + None + } + } else { + None + }; + + commit_infos + .into_iter() + .map(|x| { + CommitStage::Succeeded(( + x, + CommitStrategy::args(use_lookup), + CommitSignatures { + process_signature: process_sig, + finalize_signature: finalize_sig, + undelegate_signature: undelegate_sig, + }, + )) + }) + .collect() + } +} diff --git a/magicblock-committor-service/src/commit/commit_using_buffer.rs b/magicblock-committor-service/src/commit/commit_using_buffer.rs new file mode 100644 index 00000000..4d99ff99 --- /dev/null +++ b/magicblock-committor-service/src/commit/commit_using_buffer.rs @@ -0,0 +1,1028 @@ +use borsh::{to_vec, BorshDeserialize}; +use dlp::pda::commit_state_pda_from_delegated_account; +use log::*; +use magicblock_rpc_client::{ + MagicBlockRpcClientError, MagicBlockRpcClientResult, + MagicBlockSendTransactionConfig, +}; +use solana_pubkey::Pubkey; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + time::Duration, +}; +use tokio::task::JoinSet; + +use magicblock_committor_program::{ + instruction::{ + create_init_ix, create_realloc_buffer_ixs, + create_realloc_buffer_ixs_to_add_remaining, create_write_ix, + CreateInitIxArgs, CreateReallocBufferIxArgs, CreateWriteIxArgs, + }, + instruction_chunks::chunk_realloc_ixs, + Changeset, ChangesetChunk, Chunks, CommitableAccount, +}; + +use crate::{ + commit::common::get_accounts_to_undelegate, + commit_stage::CommitSignatures, + error::{CommitAccountError, CommitAccountResult}, + finalize::{ + chunked_ixs_to_finalize_commitables, + ChunkedIxsToFinalizeCommitablesResult, + }, + persist::CommitStrategy, + types::InstructionsKind, + undelegate::{ + chunked_ixs_to_undelegate_commitables, undelegate_commitables_ixs, + }, + CommitInfo, CommitStage, +}; + +use super::{ + common::send_and_confirm, + process_buffers::{ + chunked_ixs_to_process_commitables_and_close_pdas, + ChunkedIxsToProcessCommitablesAndClosePdasResult, + }, + CommittorProcessor, +}; +use solana_sdk::{hash::Hash, instruction::Instruction, signer::Signer}; + +struct NextReallocs { + missing_size: u64, + start_idx: usize, +} + +impl CommittorProcessor { + /// Commits the changeset by initializing the accounts, writing the chunks, + /// and closing the pdas. + /// NOTE: we return no error since the validator would not know how to mitigate + /// the problem. + pub async fn commit_changeset_using_buffers( + processor: Arc, + changeset: Changeset, + finalize: bool, + ephemeral_blockhash: Hash, + use_lookup: bool, + ) -> Vec { + macro_rules! handle_unchunked { + ($unchunked:ident, $commit_stages:ident, $commit_stage:expr) => { + for (bundle_id, commit_infos) in $unchunked.into_iter() { + // The max amount of accounts we can commit and process as part of a single + // transaction is [crate::max_per_transaction::MAX_COMMIT_STATE_AND_CLOSE_PER_TRANSACTION]. + warn!( + "Commit infos for bundle id {} are too many to be processed in a single transaction", + bundle_id + ); + $commit_stages.extend( + commit_infos + .into_iter() + .map($commit_stage), + ); + } + } + } + + let owners = changeset.owners(); + let accounts_len = changeset.account_keys().len(); + let commit_strategy = if use_lookup { + CommitStrategy::FromBufferWithLookupTable + } else { + CommitStrategy::FromBuffer + }; + let accounts_to_undelegate = + get_accounts_to_undelegate(&changeset, finalize); + let results = processor + .prepare_changeset_buffers( + changeset, + ephemeral_blockhash, + commit_strategy, + finalize, + ) + .await; + + let mut commit_stages = vec![]; + + // 1. Init Buffer and Chunks Account + let (mut succeeded_inits, failed_inits): (Vec<_>, Vec<_>) = { + let (succeeded, failed): (Vec<_>, Vec<_>) = + results.into_iter().partition(Result::is_ok); + ( + succeeded + .into_iter() + .map(Result::unwrap) + .collect::>(), + failed + .into_iter() + .map(Result::unwrap_err) + .collect::>(), + ) + }; + + // If we couldn't init the buffers for a specific commit then we're done with it. + for commit_err in failed_inits.into_iter() { + let commit_stage = CommitStage::from(commit_err); + let bundle_id = commit_stage.commit_metadata().bundle_id(); + commit_stages.push(commit_stage); + + // We also need to skip all committables that are in the same bundle as + // a commit we're giving up on. + let (fail_in_order_to_respect_bundle, keep): (Vec<_>, Vec<_>) = + succeeded_inits.drain(..).partition(|commit_info| { + #[allow(clippy::let_and_return)] + let same_bundle = commit_info.bundle_id() == bundle_id; + same_bundle + }); + commit_stages.extend( + fail_in_order_to_respect_bundle.into_iter().map(|x| { + CommitStage::BufferAndChunkFullyInitialized(( + x, + commit_strategy, + )) + }), + ); + succeeded_inits.extend(keep); + } + + // 2. Create chunks of instructions that process the commits and respect desired bundles + let ChunkedIxsToProcessCommitablesAndClosePdasResult { + chunked_ixs, + chunked_close_ixs, + unchunked, + } = chunked_ixs_to_process_commitables_and_close_pdas( + processor.authority.pubkey(), + succeeded_inits.clone(), + use_lookup, + ); + handle_unchunked!( + unchunked, + commit_stages, + CommitStage::PartOfTooLargeBundleToProcess + ); + + // 3. Process all chunks via transactions, one per chunk of instructions + trace!( + "ChunkedIxs: {}", + chunked_ixs + .iter() + .map(|xs| xs + .iter() + .map(|x| x.to_string()) + .collect::>() + .join("\n")) + .collect::>() + .join("]\n\n[\n") + ); + debug_assert_eq!( + chunked_ixs.iter().map(|x| x.len()).sum::() + commit_stages.len(), + accounts_len, + "Sum of instructions and early bail out stages should have one instruction per commmitted account", + ); + + let table_mania = use_lookup.then(|| processor.table_mania.clone()); + let (succeeded_process, failed_process) = processor + .process_ixs_chunks( + chunked_ixs, + chunked_close_ixs, + table_mania.as_ref(), + &owners, + ) + .await; + + commit_stages.extend(failed_process.into_iter().flat_map( + |(sig, xs)| { + let sigs = sig.map(|x| CommitSignatures { + process_signature: x, + finalize_signature: None, + undelegate_signature: None, + }); + xs.into_iter() + .map(|x| { + CommitStage::FailedProcess(( + x, + commit_strategy, + sigs.as_ref().cloned(), + )) + }) + .collect::>() + }, + )); + + let mut processed_commit_infos = vec![]; + let mut processed_signatures = HashMap::new(); + for (sig, commit_infos) in succeeded_process { + if log_enabled!(Level::Trace) { + let kinds = commit_infos + .iter() + .map(|(_, kind)| *kind) + .collect::>(); + let handled = kinds + .iter() + .map(|x| format!("{:?}", x)) + .collect::>() + .join(" | "); + trace!( + "Successfully handled ({}) for {} commit info(s) via transaction '{}'", + handled, + commit_infos.len(), + sig + ); + } + for (commit_info, _) in commit_infos + .into_iter() + .filter(|(_, kind)| kind.is_processing()) + { + let bundle_id = commit_info.bundle_id(); + debug_assert!( + processed_signatures + .get(&bundle_id) + .map(|x| x == &sig) + .unwrap_or(true), + "BUG: Same processed bundle ids should have the same signature" + ); + processed_signatures.insert(bundle_id, sig); + processed_commit_infos.push(commit_info); + } + } + + // 4. Optionally finalize + undelegate all processed commits also respecting bundles + if finalize && !processed_commit_infos.is_empty() { + // 4.1. Create chunks of finalize instructions that fit in a single transaction + let ChunkedIxsToFinalizeCommitablesResult { + chunked_ixs, + unchunked, + } = chunked_ixs_to_finalize_commitables( + processor.authority.pubkey(), + processed_commit_infos, + use_lookup, + ); + handle_unchunked!( + unchunked, + commit_stages, + CommitStage::PartOfTooLargeBundleToFinalize + ); + + // 4.2. Run each finalize chunk in a single transaction + let (succeeded_finalize, failed_finalize): (Vec<_>, Vec<_>) = + processor + .process_ixs_chunks( + chunked_ixs, + None, + table_mania.as_ref(), + &owners, + ) + .await; + commit_stages.extend(failed_finalize.into_iter().flat_map( + |(sig, infos)| { + infos + .into_iter() + .map(|x| { + let bundle_id = x.bundle_id(); + CommitStage::FailedFinalize(( + x, + commit_strategy, + CommitSignatures { + // SAFETY: signatures for all bundles of succeeded process transactions + // have been added above + process_signature: *processed_signatures + .get(&bundle_id) + .unwrap(), + finalize_signature: sig, + undelegate_signature: None, + }, + )) + }) + .collect::>() + }, + )); + + let mut finalized_commit_infos = vec![]; + let mut finalized_signatures = HashMap::new(); + for (sig, commit_infos) in succeeded_finalize { + trace!( + "Successfully finalized {} commit infos via transaction '{}'", + commit_infos.len(), + sig + ); + for (commit_info, kind) in commit_infos.iter() { + debug_assert_eq!( + kind, + &InstructionsKind::Finalize, + "Expecting separate finalize instructions onky" + ); + let bundle_id = commit_info.bundle_id(); + debug_assert!( + finalized_signatures + .get(&bundle_id) + .map(|x| x == &sig) + .unwrap_or(true), + "BUG: Same finalized bundle ids should have the same signature" + ); + + finalized_signatures.insert(bundle_id, sig); + } + let commit_infos = commit_infos + .into_iter() + .map(|(info, _)| info) + .collect::>(); + finalized_commit_infos.extend(commit_infos); + } + // 4.2. Consider undelegation by first dividing finalized accounts into two sets, + let (finalize_and_undelegate, finalize_only) = + finalized_commit_infos + .into_iter() + .partition::, _>(|x| x.undelegate()); + // 4.3.a accounts we don't need to undelegate are done + commit_stages.extend(finalize_only.into_iter().map(|x| { + let bundle_id = x.bundle_id(); + CommitStage::Succeeded(( + x, + commit_strategy, + CommitSignatures { + // SAFETY: signatures for all bundles of succeeded process transactions + // have been added above + process_signature: *processed_signatures + .get(&bundle_id) + .unwrap(), + finalize_signature: finalized_signatures + .get(&bundle_id) + .cloned(), + undelegate_signature: None, + }, + )) + })); + // 4.3.b the other accounts need to be undelegated first + if let Some(accounts) = accounts_to_undelegate { + debug_assert_eq!( + accounts.len(), + finalize_and_undelegate.len(), + "BUG: same amount of accounts to undelegate as to finalize and undelegate" + ); + let undelegate_ixs = match undelegate_commitables_ixs( + &processor.magicblock_rpc_client, + processor.authority.pubkey(), + accounts, + ) + .await + { + Ok(ixs) => Some(ixs), + Err(err) => { + error!( + "Failed to prepare accounts undelegation '{}': {:?}", + err, err + ); + commit_stages.extend( + finalize_and_undelegate.iter().map(|x| { + let bundle_id = x.bundle_id(); + CommitStage::FailedUndelegate(( + x.clone(), + CommitStrategy::args(use_lookup), + CommitSignatures { + // SAFETY: signatures for all bundles of succeeded process transactions + // have been added above + process_signature: + *processed_signatures + .get(&bundle_id) + .unwrap(), + finalize_signature: + finalized_signatures + .get(&bundle_id) + .cloned(), + undelegate_signature: err.signature(), + }, + )) + }), + ); + None + } + }; + if let Some(undelegate_ixs) = undelegate_ixs { + let chunked_ixs = chunked_ixs_to_undelegate_commitables( + undelegate_ixs, + finalize_and_undelegate, + use_lookup, + ); + let (succeeded_undelegate, failed_undelegate): ( + Vec<_>, + Vec<_>, + ) = processor + .process_ixs_chunks( + chunked_ixs, + None, + table_mania.as_ref(), + &owners, + ) + .await; + + commit_stages.extend( + failed_undelegate.into_iter().flat_map( + |(sig, infos)| { + infos + .into_iter() + .map(|x| { + let bundle_id = x.bundle_id(); + CommitStage::FailedUndelegate(( + x, + commit_strategy, + CommitSignatures { + // SAFETY: signatures for all bundles of succeeded process transactions + // have been added above + process_signature: + *processed_signatures + .get(&bundle_id) + .unwrap(), + finalize_signature: + finalized_signatures + .get(&bundle_id) + .cloned(), + undelegate_signature: sig, + }, + )) + }) + .collect::>() + }, + ), + ); + commit_stages.extend( + succeeded_undelegate.into_iter().flat_map( + |(sig, infos)| { + infos + .into_iter() + .map(|(x, _)| { + let bundle_id = x.bundle_id(); + CommitStage::Succeeded(( + x, + commit_strategy, + CommitSignatures { + // SAFETY: signatures for all bundles of succeeded process transactions + // have been added above + process_signature: + *processed_signatures + .get(&bundle_id) + .unwrap(), + finalize_signature: + finalized_signatures + .get(&bundle_id) + .cloned(), + undelegate_signature: Some(sig), + }, + )) + }) + .collect::>() + }, + ), + ); + } + } else { + debug_assert!( + finalize_and_undelegate.is_empty(), + "BUG: We should either have accounts to undelegate or an empty finalize_and_undelegate" + ); + } + } else { + commit_stages.extend(processed_commit_infos.into_iter().map(|x| { + let bundle_id = x.bundle_id(); + CommitStage::Succeeded(( + x, + commit_strategy, + CommitSignatures { + // SAFETY: signatures for all bundles of succeeded process transactions + // have been added above + process_signature: *processed_signatures + .get(&bundle_id) + .unwrap(), + finalize_signature: None, + undelegate_signature: None, + }, + )) + })); + } + + debug_assert_eq!( + accounts_len, + CommitStage::commit_infos(&commit_stages).len(), + "Should have one commit stage per commmitted account ({}) {:#?}", + accounts_len, + commit_stages + ); + + commit_stages + } + + async fn prepare_changeset_buffers( + &self, + changeset: Changeset, + ephemeral_blockhash: Hash, + commit_strategy: CommitStrategy, + finalize: bool, + ) -> Vec> { + let commitables = + changeset.into_committables(crate::consts::MAX_WRITE_CHUNK_SIZE); + let mut join_set: JoinSet> = + JoinSet::new(); + for commitable in commitables { + let me = Arc::new(self.clone()); + join_set.spawn(Self::commit_account( + me, + commitable, + ephemeral_blockhash, + commit_strategy, + finalize, + )); + } + join_set.join_all().await + } + + async fn commit_account( + me: Arc, + mut commitable: CommitableAccount, + ephemeral_blockhash: Hash, + commit_strategy: CommitStrategy, + finalize: bool, + ) -> CommitAccountResult { + let commit_info = if commitable.has_data() { + let chunks = + Chunks::new(commitable.chunk_count(), commitable.chunk_size()); + let chunks_account_size = to_vec(&chunks).unwrap().len() as u64; + + // Initialize the Changeset and Chunks accounts on chain + let buffer_account_size = commitable.size() as u64; + + let (init_ix, chunks_pda, buffer_pda) = + create_init_ix(CreateInitIxArgs { + authority: me.authority.pubkey(), + pubkey: commitable.pubkey, + chunks_account_size, + buffer_account_size, + blockhash: ephemeral_blockhash, + chunk_count: commitable.chunk_count(), + chunk_size: commitable.chunk_size(), + }); + let realloc_ixs = + create_realloc_buffer_ixs(CreateReallocBufferIxArgs { + authority: me.authority.pubkey(), + pubkey: commitable.pubkey, + buffer_account_size, + blockhash: ephemeral_blockhash, + }); + + let commit_info = CommitInfo::BufferedDataAccount { + pubkey: commitable.pubkey, + commit_state: commit_state_pda_from_delegated_account( + &commitable.pubkey, + ), + delegated_account_owner: commitable.delegated_account_owner, + slot: commitable.slot, + ephemeral_blockhash, + undelegate: commitable.undelegate, + chunks_pda, + buffer_pda, + lamports: commitable.lamports, + bundle_id: commitable.bundle_id, + finalize, + }; + + // Even though this transaction also inits the chunks account we check + // that it succeeded by querying the buffer account since this is the + // only of the two that we may have to realloc. + let commit_info = Arc::new( + me.init_accounts( + init_ix, + realloc_ixs, + commitable.pubkey, + &buffer_pda, + buffer_account_size, + ephemeral_blockhash, + commit_info, + commit_strategy, + ) + .await?, + ); + + let mut last_write_chunks_err = None; + if let Err(err) = me + .write_chunks( + commitable.pubkey, + commitable.iter_all(), + ephemeral_blockhash, + ) + .await + { + last_write_chunks_err = Some(err); + }; + + let mut remaining_tries = 10; + const MAX_GET_ACCOUNT_RETRIES: usize = 5; + loop { + let mut acc = None; + let mut last_get_account_err = None; + for _ in 0..MAX_GET_ACCOUNT_RETRIES { + match me + .magicblock_rpc_client + .get_account(&chunks_pda) + .await + { + Ok(Some(x)) => { + acc.replace(x); + break; + } + Ok(None) => { + me.wait_for_account("chunks account", None).await + } + Err(err) => { + me.wait_for_account("chunks account", Some(&err)) + .await; + last_get_account_err.replace(err); + } + } + } + let Some(acc) = acc else { + return Err(CommitAccountError::GetChunksAccount( + last_get_account_err, + commit_info.clone(), + commit_strategy, + )); + }; + let chunks = + Chunks::try_from_slice(&acc.data).map_err(|err| { + CommitAccountError::DeserializeChunksAccount( + err, + commit_info.clone(), + commit_strategy, + ) + })?; + + if chunks.is_complete() { + break; + } + + remaining_tries -= 1; + if remaining_tries == 0 { + return Err( + CommitAccountError::WriteChunksRanOutOfRetries( + last_write_chunks_err, + commit_info.clone(), + commit_strategy, + ), + ); + } + commitable.set_chunks(chunks); + if let Err(err) = me + .write_chunks( + commitable.pubkey, + commitable.iter_missing(), + ephemeral_blockhash, + ) + .await + { + last_write_chunks_err = Some(err); + } + } + commit_info + } else { + Arc::new(CommitInfo::EmptyAccount { + pubkey: commitable.pubkey, + delegated_account_owner: commitable.delegated_account_owner, + slot: commitable.slot, + ephemeral_blockhash, + undelegate: commitable.undelegate, + lamports: commitable.lamports, + bundle_id: commitable.bundle_id, + finalize, + }) + }; + + let commit_info = Arc::::unwrap_or_clone(commit_info); + + Ok(commit_info) + } + + /// Sends init/realloc transactions until the account has the desired size + /// - `init_ix` - the instruction to initialize the buffer and chunk account + /// - `realloc_ixs` - the instructions to realloc the buffer account until it reaches the + /// size needed to store the account's data + /// - `pubkey` - the pubkey of the account whose data we are storing + /// - `buffer_pda` - the address of the account where we buffer the data to be committed + /// - `buffer_account_size` - the size of the buffer account + /// - `ephemeral_blockhash` - the blockhash in the ephemeral at which we are committing + /// - `commit_info` - the commit info to be returned or included in errors + /// - `commit_strategy` - the commit strategy that is used + #[allow(clippy::too_many_arguments)] // private method + async fn init_accounts( + &self, + init_ix: Instruction, + realloc_ixs: Vec, + pubkey: Pubkey, + buffer_pda: &Pubkey, + buffer_account_size: u64, + ephemeral_blockhash: Hash, + commit_info: CommitInfo, + commit_strategy: CommitStrategy, + ) -> CommitAccountResult { + // We cannot allocate more than MAX_INITIAL_BUFFER_SIZE in a single + // instruction. Therefore we append a realloc instruction if the buffer + // is very large. + // init_ixs is the init ix with as many realloc ixs as fit into one tx + // extra_realloc_ixs are the remaining realloc ixs that need to be sent + // in separate transactions + let (init_ix_chunk, extra_realloc_ix_chunks) = { + let mut chunked_ixs = chunk_realloc_ixs(realloc_ixs, Some(init_ix)); + let init_with_initial_reallocs = chunked_ixs.remove(0); + let remaining_reallocs = if chunked_ixs.is_empty() { + None + } else { + Some(chunked_ixs) + }; + (init_with_initial_reallocs, remaining_reallocs) + }; + + debug!( + "Init+Realloc chunk ixs {}, Extra Realloc Chunks {}", + init_ix_chunk.len(), + extra_realloc_ix_chunks.as_ref().map_or(0, |x| x.len()) + ); + + // First ensure that the tx including the init ix lands + let mut init_sig = None; + let mut last_err = None; + const MAX_RETRIES: usize = 2; + 'land_init_transaction: for _ in 0..MAX_RETRIES { + // Only retry the init transaction if it failed to send and confirm + if init_sig.is_none() { + let init_budget_ixs = self + .compute_budget_config + .buffer_init + .instructions(init_ix_chunk.len() - 1); + match send_and_confirm( + self.magicblock_rpc_client.clone(), + self.authority.insecure_clone(), + [init_budget_ixs, init_ix_chunk.clone()].concat(), + "init buffer and chunk account".to_string(), + None, + MagicBlockSendTransactionConfig::ensure_committed(), + None, + ) + .await + { + Err(err) => { + last_err = Some(err); + continue; + } + Ok(sig) => { + init_sig = Some(sig); + } + } + } + + // At this point the transaction was confirmed and we should be able + // to get the initialized pda and chunk account + const MAX_GET_ACCOUNT_RETRIES: usize = 5; + for _ in 0..MAX_GET_ACCOUNT_RETRIES { + match self.magicblock_rpc_client.get_account(buffer_pda).await { + Ok(Some(_)) => { + // The account was initialized + break 'land_init_transaction; + } + Ok(None) => { + self.wait_for_account("buffer account", None).await + } + Err(err) => { + self.wait_for_account("buffer account", Some(&err)) + .await + } + } + } + } // 'land_init_transaction + + if init_sig.is_none() { + let err = last_err + .as_ref() + .map(|x| x.to_string()) + .unwrap_or("Unknown Error".to_string()); + return Err(CommitAccountError::InitBufferAndChunkAccounts( + err, + Box::new(commit_info), + commit_strategy, + )); + } + + // After that we can ensure all extra reallocs in parallel + if let Some(realloc_ixs) = extra_realloc_ix_chunks { + let mut next_reallocs = self + .run_reallocs( + buffer_pda, + realloc_ixs, + buffer_account_size, + buffer_account_size, + 0, + ) + .await; + + if next_reallocs.is_some() { + let args = CreateReallocBufferIxArgs { + authority: self.authority.pubkey(), + pubkey, + buffer_account_size, + blockhash: ephemeral_blockhash, + }; + + while let Some(NextReallocs { + missing_size, + start_idx, + }) = next_reallocs + { + let realloc_ixs = { + let realloc_ixs = + create_realloc_buffer_ixs_to_add_remaining( + &args, + missing_size, + ); + + chunk_realloc_ixs(realloc_ixs, None) + }; + next_reallocs = self + .run_reallocs( + buffer_pda, + realloc_ixs, + buffer_account_size, + missing_size, + start_idx, + ) + .await; + // TODO(thlorenz): give up at some point + } + } + } + + Ok(commit_info) + } + + /// Returns the size that still needs to be allocated after running the instructions + /// along with the idx at which we start (in order to keep increasing the idx of realloc + /// attempt). + /// Returns `None` once the desired size is reached and we're done. + async fn run_reallocs( + &self, + pda: &Pubkey, + realloc_ixs: Vec>, + desired_size: u64, + missing_size: u64, + start_idx: usize, + ) -> Option { + let mut join_set = JoinSet::new(); + let count = realloc_ixs.len(); + let latest_blockhash = + match self.magicblock_rpc_client.get_latest_blockhash().await { + Ok(hash) => hash, + Err(err) => { + error!( + "Failed to get latest blockhash to run reallocs: {:?}", + err + ); + return Some(NextReallocs { + missing_size, + start_idx, + }); + } + }; + for (idx, ixs) in realloc_ixs.into_iter().enumerate() { + let authority = self.authority.insecure_clone(); + let rpc_client = self.magicblock_rpc_client.clone(); + let realloc_budget_ixs = self + .compute_budget_config + .buffer_realloc + .instructions(ixs.len()); + // NOTE: we ignore failures to send/confirm realloc transactions and just + // keep calling [CommittorProcessor::run_reallocs] until we reach the desired size + join_set.spawn(async move { + send_and_confirm( + rpc_client, + authority, + [realloc_budget_ixs, ixs].concat(), + format!( + "realloc buffer account {}/{}", + start_idx + idx, + start_idx + count + ), + Some(latest_blockhash), + MagicBlockSendTransactionConfig::ensure_processed(), + None, + ) + .await + .inspect_err(|err| { + warn!("{:?}", err); + }) + }); + } + join_set.join_all().await; + + match self.magicblock_rpc_client.get_account(pda).await { + Ok(Some(acc)) => { + // Once the account has the desired size we are done + let current_size = acc.data.len(); + if current_size as u64 >= desired_size { + None + } else { + Some(desired_size - current_size as u64) + } + } + // NOTE: if we cannot get the account we must assume that + // the entire size we just tried to alloc is still missing + Ok(None) => { + warn!("buffer account not found"); + Some(missing_size) + } + Err(err) => { + warn!("Failed to get buffer account: {:?}", err); + Some(missing_size) + } + } + .map(|missing_size| NextReallocs { + missing_size, + start_idx: count, + }) + } + + /// Sends a transaction to write each chunk. + /// Initially it gets latest blockhash and errors if that fails. + /// All other errors while sending the transaction are logged and ignored. + /// The chunks whose write transactions failed are expected to be retried in + /// the next run. + /// - `pubkey` - the on chain pubkey of the account whose data we are writing to the buffer + /// - `chunks` - the chunks to write + /// - `ephemeral_blockhash` - the blockhash to use for the transaction + async fn write_chunks>( + &self, + pubkey: Pubkey, + chunks: Iter, + ephemeral_blockhash: Hash, + ) -> MagicBlockRpcClientResult<()> { + let mut join_set = JoinSet::new(); + + let latest_blockhash = + self.magicblock_rpc_client.get_latest_blockhash().await?; + + for chunk in chunks.into_iter() { + let authority = self.authority.insecure_clone(); + let rpc_client = self.magicblock_rpc_client.clone(); + let chunk_bytes = chunk.data_chunk.len(); + let ix = create_write_ix(CreateWriteIxArgs { + authority: authority.pubkey(), + pubkey, + offset: chunk.offset, + data_chunk: chunk.data_chunk, + blockhash: ephemeral_blockhash, + }); + let write_budget_ixs = self + .compute_budget_config + .buffer_write + .instructions(chunk_bytes); + // NOTE: we ignore failures to send/confirm write transactions and just + // keep calling [CommittorProcessor::write_chunks] until all of them are + // written which is verified via the chunks account + join_set.spawn(async move { + send_and_confirm( + rpc_client, + authority, + [write_budget_ixs, vec![ix]].concat(), + format!("write chunk for offset {}", chunk.offset), + Some(latest_blockhash), + // NOTE: We could use `processed` here and wait to get the processed status at + // least which would make things a bit slower. + // However that way we would avoid sending unnecessary transactions potentially + // since we may not see some written chunks yet when we get the chunks account. + MagicBlockSendTransactionConfig::ensure_processed(), + None, + ) + .await + .inspect_err(|err| { + error!("{:?}", err); + }) + }); + } + if log::log_enabled!(log::Level::Trace) { + trace!("Writing {} chunks", join_set.len()); + } + + join_set.join_all().await; + + Ok(()) + } + + async fn wait_for_account( + &self, + account_label: &str, + err: Option<&MagicBlockRpcClientError>, + ) { + let sleep_time_ms = { + if let Some(err) = err { + error!("Failed to {} account: {:?}", account_label, err); + } else { + warn!("Failed to {} account", account_label); + } + 100 + }; + tokio::time::sleep(Duration::from_millis(sleep_time_ms)).await; + } +} diff --git a/magicblock-committor-service/src/commit/committor_processor.rs b/magicblock-committor-service/src/commit/committor_processor.rs new file mode 100644 index 00000000..3e6ea0ab --- /dev/null +++ b/magicblock-committor-service/src/commit/committor_processor.rs @@ -0,0 +1,560 @@ +use crate::{ + commit_strategy::{split_changesets_by_commit_strategy, SplitChangesets}, + compute_budget::{ComputeBudget, ComputeBudgetConfig}, + persist::{ + BundleSignatureRow, CommitPersister, CommitStatusRow, CommitStrategy, + }, + pubkeys_provider::provide_committee_pubkeys, + types::InstructionsKind, + CommitInfo, +}; + +use log::*; +use magicblock_table_mania::{GarbageCollectorConfig, TableMania}; +use solana_sdk::{ + commitment_config::CommitmentConfig, hash::Hash, signature::Signature, +}; +use std::{ + collections::{HashMap, HashSet}, + path::Path, + sync::{Arc, Mutex}, +}; + +use magicblock_committor_program::{Changeset, ChangesetMeta}; +use solana_pubkey::Pubkey; +use solana_rpc_client::nonblocking::rpc_client::RpcClient; +use solana_sdk::{signature::Keypair, signer::Signer}; +use tokio::task::JoinSet; + +use crate::{ + commit_stage::CommitStage, config::ChainConfig, + error::CommittorServiceResult, types::InstructionsForCommitable, +}; +use magicblock_rpc_client::{ + MagicBlockSendTransactionConfig, MagicblockRpcClient, +}; + +use super::common::{lookup_table_keys, send_and_confirm}; + +pub(crate) struct CommittorProcessor { + pub(crate) magicblock_rpc_client: MagicblockRpcClient, + pub(crate) table_mania: TableMania, + pub(crate) authority: Keypair, + pub(crate) persister: Arc>, + pub(crate) compute_budget_config: Arc, +} + +impl Clone for CommittorProcessor { + fn clone(&self) -> Self { + Self { + magicblock_rpc_client: self.magicblock_rpc_client.clone(), + table_mania: self.table_mania.clone(), + authority: self.authority.insecure_clone(), + persister: self.persister.clone(), + compute_budget_config: self.compute_budget_config.clone(), + } + } +} + +impl CommittorProcessor { + pub fn try_new

( + authority: Keypair, + persist_file: P, + chain_config: ChainConfig, + ) -> CommittorServiceResult + where + P: AsRef, + { + let rpc_client = RpcClient::new_with_commitment( + chain_config.rpc_uri.to_string(), + CommitmentConfig { + commitment: chain_config.commitment, + }, + ); + let rpc_client = Arc::new(rpc_client); + let magic_block_rpc_client = MagicblockRpcClient::new(rpc_client); + let gc_config = GarbageCollectorConfig::default(); + let table_mania = TableMania::new( + magic_block_rpc_client.clone(), + &authority, + Some(gc_config), + ); + let persister = CommitPersister::try_new(persist_file)?; + Ok(Self { + authority, + magicblock_rpc_client: magic_block_rpc_client, + table_mania, + persister: Arc::new(Mutex::new(persister)), + compute_budget_config: Arc::new(chain_config.compute_budget_config), + }) + } + + pub async fn active_lookup_tables(&self) -> Vec { + self.table_mania.active_table_addresses().await + } + + pub async fn released_lookup_tables(&self) -> Vec { + self.table_mania.released_table_addresses().await + } + + pub fn auth_pubkey(&self) -> Pubkey { + self.authority.pubkey() + } + + pub(crate) async fn reserve_pubkeys( + &self, + pubkeys: HashSet, + ) -> CommittorServiceResult<()> { + Ok(self + .table_mania + .reserve_pubkeys(&self.authority, &pubkeys) + .await?) + } + + pub(crate) async fn release_pubkeys(&self, pubkeys: HashSet) { + self.table_mania.release_pubkeys(&pubkeys).await + } + + pub fn get_commit_statuses( + &self, + reqid: &str, + ) -> CommittorServiceResult> { + let commit_statuses = self + .persister + .lock() + .expect("persister mutex poisoned") + .get_commit_statuses_by_reqid(reqid)?; + Ok(commit_statuses) + } + + pub fn get_signature( + &self, + bundle_id: u64, + ) -> CommittorServiceResult> { + let signatures = self + .persister + .lock() + .expect("persister mutex poisoned") + .get_signature(bundle_id)?; + Ok(signatures) + } + + pub async fn commit_changeset( + &self, + changeset: Changeset, + finalize: bool, + ephemeral_blockhash: Hash, + ) -> Option { + let reqid = match self + .persister + .lock() + .expect("persister mutex poisoned") + .start_changeset(&changeset, ephemeral_blockhash, finalize) + { + Ok(id) => Some(id), + Err(err) => { + // We will still try to perform the commits, but the fact that we cannot + // persist the intent is very serious and we should probably restart the + // valiator + error!( + "DB EXCEPTION: Failed to persist changeset to be committed: {:?}", + err + ); + None + } + }; + let owners = changeset.owners(); + let commit_stages = self + .process_commit_changeset(changeset, finalize, ephemeral_blockhash) + .await; + + // Release pubkeys related to all undelegated accounts from the lookup tables + let releaseable_pubkeys = commit_stages + .iter() + .filter(|x| CommitStage::is_successfully_undelegated(x)) + .flat_map(|x| { + provide_committee_pubkeys(&x.pubkey(), owners.get(&x.pubkey())) + }) + .collect::>(); + self.table_mania.release_pubkeys(&releaseable_pubkeys).await; + + if let Some(reqid) = &reqid { + for stage in commit_stages { + let _ = self.persister + .lock() + .expect("persister mutex poisoned") + .update_status( + reqid, + &stage.pubkey(), + stage.commit_status(), + ).map_err(|err| { + // We log the error here, but there is nothing we can do if we encounter + // a db issue. + error!( + "DB EXCEPTION: Failed to update status of changeset {}: {:?}", + reqid, err + ); + }); + } + } + + reqid + } + + async fn process_commit_changeset( + &self, + changeset: Changeset, + finalize: bool, + ephemeral_blockhash: Hash, + ) -> Vec { + let changeset_meta = ChangesetMeta::from(&changeset); + let SplitChangesets { + args_changeset, + args_including_finalize_changeset, + args_with_lookup_changeset, + args_including_finalize_with_lookup_changeset, + from_buffer_changeset, + from_buffer_with_lookup_changeset, + } = match split_changesets_by_commit_strategy(changeset, finalize) { + Ok(changesets) => changesets, + Err(err) => { + error!("Failed to split changesets: {:?}", err); + return changeset_meta + .into_account_infos() + .into_iter() + .map(CommitStage::SplittingChangesets) + .collect(); + } + }; + + debug_assert!( + finalize + || (args_including_finalize_changeset.is_empty() + && args_including_finalize_with_lookup_changeset + .is_empty()), + "BUG: args including finalize strategies should not be created when not finalizing" + ); + + let mut join_set = JoinSet::new(); + if !args_changeset.is_empty() + || !args_with_lookup_changeset.is_empty() + || !args_including_finalize_changeset.is_empty() + || !args_including_finalize_with_lookup_changeset.is_empty() + { + let latest_blockhash = match self + .magicblock_rpc_client + .get_latest_blockhash() + .await + { + Ok(bh) => bh, + Err(err) => { + error!( + "Failed to get latest blockhash to commit using args: {:?}", + err + ); + let strategy = CommitStrategy::args( + !args_with_lookup_changeset.is_empty() + || !args_including_finalize_with_lookup_changeset + .is_empty(), + ); + return changeset_meta + .into_account_infos() + .into_iter() + .map(|(meta, slot, undelegate)| { + CommitStage::GettingLatestBlockhash(( + meta, slot, undelegate, strategy, + )) + }) + .collect(); + } + }; + + if !args_changeset.is_empty() { + join_set.spawn(Self::commit_changeset_using_args( + Arc::new(self.clone()), + args_changeset, + (finalize, true), + ephemeral_blockhash, + latest_blockhash, + false, + )); + } + + if !args_including_finalize_changeset.is_empty() { + join_set.spawn(Self::commit_changeset_using_args( + Arc::new(self.clone()), + args_including_finalize_changeset, + (finalize, false), + ephemeral_blockhash, + latest_blockhash, + false, + )); + } + + if !args_with_lookup_changeset.is_empty() { + join_set.spawn(Self::commit_changeset_using_args( + Arc::new(self.clone()), + args_with_lookup_changeset, + (finalize, true), + ephemeral_blockhash, + latest_blockhash, + true, + )); + } + + if !args_including_finalize_with_lookup_changeset.is_empty() { + join_set.spawn(Self::commit_changeset_using_args( + Arc::new(self.clone()), + args_including_finalize_with_lookup_changeset, + (finalize, false), + ephemeral_blockhash, + latest_blockhash, + true, + )); + } + } + + if !from_buffer_changeset.is_empty() { + join_set.spawn(Self::commit_changeset_using_buffers( + Arc::new(self.clone()), + from_buffer_changeset, + finalize, + ephemeral_blockhash, + false, + )); + } + if !from_buffer_with_lookup_changeset.is_empty() { + join_set.spawn(Self::commit_changeset_using_buffers( + Arc::new(self.clone()), + from_buffer_with_lookup_changeset, + finalize, + ephemeral_blockhash, + true, + )); + } + + join_set.join_all().await.into_iter().flatten().collect() + } + + pub(crate) async fn process_ixs_chunks( + &self, + ixs_chunks: Vec>, + chunked_close_ixs: Option>>, + table_mania: Option<&TableMania>, + owners: &HashMap, + ) -> ( + Vec<(Signature, Vec<(CommitInfo, InstructionsKind)>)>, + Vec<(Option, Vec)>, + ) { + let latest_blockhash = + match self.magicblock_rpc_client.get_latest_blockhash().await { + Ok(bh) => bh, + Err(err) => { + error!( + "Failed to get latest blockhash to process buffers: {:?}", + err + ); + // If we fail to get this blockhash we need to report all process + // instructions as failed + let commit_infos = ixs_chunks + .into_iter() + .map(|ixs_chunk| { + ( + None::, + ixs_chunk + .into_iter() + .map(|ixs| ixs.commit_info) + .collect::>(), + ) + }) + .collect::>(); + return (vec![], commit_infos); + } + }; + + let mut join_set = JoinSet::new(); + let successes = Arc::< + Mutex)>>, + >::default(); + let failures = + Arc::, Vec)>>>::default(); + for ixs_chunk in ixs_chunks { + let authority = self.authority.insecure_clone(); + let rpc_client = self.magicblock_rpc_client.clone(); + let compute_budget = + self.compute_budget_config.buffer_process_and_close_budget(); + let successes = successes.clone(); + let failures = failures.clone(); + let owners = owners.clone(); + let table_mania = table_mania.cloned(); + join_set.spawn(process_ixs_chunk( + ixs_chunk, + compute_budget, + authority, + rpc_client, + successes, + failures, + table_mania, + owners, + latest_blockhash, + )); + } + join_set.join_all().await; + + if let Some(chunked_close_ixs) = chunked_close_ixs { + if log::log_enabled!(log::Level::Trace) { + let ix_count = + chunked_close_ixs.iter().map(|x| x.len()).sum::(); + trace!( + "Processing {} close instruction chunk(s) with a total of {} instructions", + chunked_close_ixs.len(), + ix_count + ); + } + let latest_blockhash = match self + .magicblock_rpc_client + .get_latest_blockhash() + .await + { + Ok(bh) => Some(bh), + Err(err) => { + // If we fail to close the buffers then the commits were processed and we + // should not retry them, however eventually we'd want to close those buffers + error!( + "Failed to get latest blockhash to close buffer: {:?}", + err + ); + let commit_infos = chunked_close_ixs + .iter() + .map(|ixs_chunk| { + ixs_chunk + .iter() + .map(|ixs| ixs.commit_info.clone()) + .collect::>() + }) + .collect::>(); + error!("Therefore failed to close buffers for the following committed accounts: {:#?}", commit_infos); + None + } + }; + + if let Some(latest_blockhash) = latest_blockhash { + let mut join_set = JoinSet::new(); + let failures = Arc::< + Mutex, Vec)>>, + >::default(); + for ixs_chunk in chunked_close_ixs { + let authority = self.authority.insecure_clone(); + let rpc_client = self.magicblock_rpc_client.clone(); + let table_mania = table_mania.cloned(); + let owners = owners.clone(); + let compute_budget = + self.compute_budget_config.buffer_close_budget(); + // We ignore close successes + let successes = Default::default(); + // We only log close failures since the commit was processed successfully + let failures = failures.clone(); + join_set.spawn(process_ixs_chunk( + ixs_chunk, + compute_budget, + authority, + rpc_client, + successes, + failures, + table_mania, + owners, + latest_blockhash, + )); + } + join_set.join_all().await; + if !failures + .lock() + .expect("close failures mutex poisoned") + .is_empty() + { + error!("Failed to to close some buffers: {:?}", failures); + } + } + } + + let successes = Arc::try_unwrap(successes) + .expect("successes mutex still has multiple owners") + .into_inner() + .expect("successes mutex was poisoned"); + let failures = Arc::try_unwrap(failures) + .expect("failures mutex still has multiple owners") + .into_inner() + .expect("failures mutex was poisoned"); + + (successes, failures) + } +} + +/// Processes a single chunk of instructions, sending them as a transaction. +/// Updates the shared success or failure lists based on the transaction outcome. +#[allow(clippy::type_complexity, clippy::too_many_arguments)] +pub(crate) async fn process_ixs_chunk( + ixs_chunk: Vec, + compute_budget: ComputeBudget, + authority: Keypair, + rpc_client: MagicblockRpcClient, + successes: Arc< + Mutex)>>, + >, + failures: Arc, Vec)>>>, + table_mania: Option, + owners: HashMap, + latest_blockhash: Hash, +) { + let mut ixs = vec![]; + let mut commit_infos = vec![]; + for ix_chunk in ixs_chunk.into_iter() { + ixs.extend(ix_chunk.instructions); + commit_infos.push((ix_chunk.commit_info, ix_chunk.kind)); + } + let ixs_len = ixs.len(); + let table_mania_setup = table_mania.as_ref().map(|table_mania| { + let committees = commit_infos + .iter() + .map(|(x, _)| x.pubkey()) + .collect::>(); + let keys_from_table = + lookup_table_keys(&authority, &committees, &owners); + (table_mania, keys_from_table) + }); + let compute_budget_ixs = compute_budget.instructions(commit_infos.len()); + match send_and_confirm( + rpc_client, + authority, + [compute_budget_ixs, ixs].concat(), + "process commitable and/or close pdas".to_string(), + Some(latest_blockhash), + MagicBlockSendTransactionConfig::ensure_committed(), + table_mania_setup, + ) + .await + { + Ok(sig) => { + successes + .lock() + .expect("ix successes mutex poisoned") + .push((sig, commit_infos)); + } + Err(err) => { + error!( + "Processing {} instructions for {} commit infos {:?}", + ixs_len, + commit_infos.len(), + err + ); + let commit_infos = commit_infos + .into_iter() + .map(|(commit_info, _)| commit_info) + .collect(); + failures + .lock() + .expect("ix failures mutex poisoned") + .push((err.signature(), commit_infos)); + } + } +} diff --git a/magicblock-committor-service/src/commit/common.rs b/magicblock-committor-service/src/commit/common.rs new file mode 100644 index 00000000..c1e4317c --- /dev/null +++ b/magicblock-committor-service/src/commit/common.rs @@ -0,0 +1,204 @@ +use log::*; +use magicblock_rpc_client::{ + MagicBlockSendTransactionConfig, MagicblockRpcClient, +}; +use magicblock_table_mania::TableMania; +use solana_sdk::{hash::Hash, message::v0::Message, signature::Signature}; +use std::{ + collections::{HashMap, HashSet}, + time::{Duration, Instant}, +}; + +use magicblock_committor_program::Changeset; +use solana_pubkey::Pubkey; +use solana_sdk::{ + instruction::Instruction, message::VersionedMessage, signature::Keypair, + signer::Signer, transaction::VersionedTransaction, +}; + +use crate::{ + error::{CommittorServiceError, CommittorServiceResult}, + pubkeys_provider::{provide_committee_pubkeys, provide_common_pubkeys}, +}; + +pub(crate) fn lookup_table_keys( + authority: &Keypair, + committees: &HashSet, + owners: &HashMap, +) -> HashSet { + committees + .iter() + .flat_map(|x| provide_committee_pubkeys(x, owners.get(x))) + .chain(provide_common_pubkeys(&authority.pubkey())) + .collect::>() +} + +/// Returns the pubkeys of the accounts that are marked for undelegation we finalized +/// the commits of those accounts. +/// If we didn't finalize the commits then we cannot yet undelegate those accounts. +/// Returns tuples of the account to undelegate and its original owner +pub(crate) fn get_accounts_to_undelegate( + changeset: &Changeset, + finalize: bool, +) -> Option> { + if finalize { + let vec = changeset.accounts_to_undelegate.iter().flat_map(|x| { + let Some(acc) = changeset.accounts.get(x) else { + warn!("Account ({}) marked for undelegation not found in changeset", x); + return None; + }; + Some((*x, acc.owner())) + }).collect::>(); + (!vec.is_empty()).then_some(vec) + } else { + // if we don't finalize then we can only _mark_ accounts for undelegation + // but cannot run the undelegation instruction itself + None + } +} + +/// Gets the latest blockhash and sends and confirms a transaction with +/// the provided instructions. +/// Uses the commitment provided via the [ChainConfig::commitment] option when checking +/// the status of the transction signature. +/// - **rpc_client** - the rpc client to use +/// - **authority** - the authority to sign the transaction +/// - **ixs** - the instructions to include in the transaction +/// - **task_desc** - a description of the task included in logs +/// - **latest_blockhash** - the latest blockhash to use for the transaction, +/// if not provided it will be queried +/// - **send_config** - the send transaction config to use +/// - **use_table_mania** - whether to use table mania to optimize the size increase due +/// to accounts in the transaction via the use of lookup tables +/// +/// Returns the signature of the transaction. +pub(crate) async fn send_and_confirm( + rpc_client: MagicblockRpcClient, + authority: Keypair, + ixs: Vec, + task_desc: String, + latest_blockhash: Option, + send_config: MagicBlockSendTransactionConfig, + table_mania_setup: Option<(&TableMania, HashSet)>, +) -> CommittorServiceResult { + use CommittorServiceError::*; + // When lots of txs are spawned in parallel we reuse the blockhash + // instead of getting it for each tx + let latest_blockhash = if let Some(blockhash) = latest_blockhash { + blockhash + } else { + rpc_client.get_latest_blockhash().await.inspect_err(|err| { + error!( + "Failed to get latest blockhash to '{}': {:?}", + task_desc, err + ) + })? + }; + + let tables = + if let Some((table_mania, keys_from_tables)) = table_mania_setup { + let start = Instant::now(); + + // NOTE: we assume that all needed pubkeys were reserved earlier + let address_lookup_tables = table_mania + .try_get_active_address_lookup_table_accounts( + &keys_from_tables, + // enough time for init/extend lookup table transaction to complete + Duration::from_secs(50), + // enough time for lookup table to finalize + Duration::from_secs(50), + ) + .await?; + + if log_enabled!(Level::Trace) { + let tables = address_lookup_tables + .iter() + .map(|table| { + format!( + "\n {}: {} addresses", + table.key, + table.addresses.len() + ) + }) + .collect::>() + .join(", "); + trace!( + "Took {}ms to get finalized address lookup table(s) {}", + start.elapsed().as_millis(), + tables + ); + let all_accounts = ixs.iter().flat_map(|ix| { + ix.accounts.iter().map(|x| x.pubkey).clone() + }); + let keys_not_from_table = all_accounts + .filter(|x| !keys_from_tables.contains(x)) + .collect::>(); + trace!( + "{}/{} are provided from lookup tables", + keys_from_tables.len(), + keys_not_from_table.len() + keys_from_tables.len() + ); + trace!( + "The following keys are not:\n{}", + keys_not_from_table + .iter() + .map(|x| format!(" {}", x)) + .collect::>() + .join("\n") + ); + } + + address_lookup_tables + } else { + vec![] + }; + + let versioned_msg = match Message::try_compile( + &authority.pubkey(), + &ixs, + &tables, + latest_blockhash, + ) { + Ok(msg) => msg, + Err(err) => { + return Err( + CommittorServiceError::FailedToCompileTransactionMessage( + task_desc.clone(), + err, + ), + ); + } + }; + let tx = match VersionedTransaction::try_new( + VersionedMessage::V0(versioned_msg), + &[&authority], + ) { + Ok(tx) => tx, + Err(err) => { + return Err(CommittorServiceError::FailedToCreateTransaction( + task_desc.clone(), + err, + )); + } + }; + + let start = Instant::now(); + let res = rpc_client + .send_transaction(&tx, &send_config) + .await + .map_err(|err| { + FailedToSendAndConfirmTransaction(task_desc.clone(), err) + })?; + + trace!( + "Took {}ms to send and confirm transaction with {} instructions", + start.elapsed().as_millis(), + ixs.len() + ); + + if let Some(err) = res.error() { + Err(EncounteredTransactionError(task_desc, err.clone())) + } else { + Ok(res.into_signature()) + } +} diff --git a/magicblock-committor-service/src/commit/mod.rs b/magicblock-committor-service/src/commit/mod.rs new file mode 100644 index 00000000..f14e26aa --- /dev/null +++ b/magicblock-committor-service/src/commit/mod.rs @@ -0,0 +1,6 @@ +mod commit_using_args; +mod commit_using_buffer; +mod committor_processor; +mod common; +mod process_buffers; +pub(super) use committor_processor::CommittorProcessor; diff --git a/magicblock-committor-service/src/commit/process_buffers.rs b/magicblock-committor-service/src/commit/process_buffers.rs new file mode 100644 index 00000000..40cb2583 --- /dev/null +++ b/magicblock-committor-service/src/commit/process_buffers.rs @@ -0,0 +1,239 @@ +use std::collections::HashMap; + +use dlp::args::CommitStateFromBufferArgs; +use log::*; +use solana_pubkey::Pubkey; + +use crate::{ + bundles::{bundle_chunks, bundle_chunks_ignoring_bundle_id}, + transactions::{ + close_buffers_ix, process_and_close_ixs, process_commits_ix, + MAX_CLOSE_PER_TX, MAX_CLOSE_PER_TX_USING_LOOKUP, + MAX_PROCESS_AND_CLOSE_PER_TX, + MAX_PROCESS_AND_CLOSE_PER_TX_USING_LOOKUP, MAX_PROCESS_PER_TX, + MAX_PROCESS_PER_TX_USING_LOOKUP, + }, + types::{InstructionsForCommitable, InstructionsKind}, + CommitInfo, +}; + +/// Returns instructions to process the commit/delegation request for a commitable. +/// Requires that the [CommitInfo::buffer_pda] holds all data to be committed. +/// It appends another instruction which closes both the [CommitInfo::buffer_pda] +/// and the [CommitInfo::chunks_pda]. +fn process_commitable_and_close_ixs( + validator_auth: Pubkey, + commit_info: CommitInfo, +) -> InstructionsForCommitable { + debug!("Processing commitable: {:?}", commit_info); + let CommitInfo::BufferedDataAccount { + pubkey, + delegated_account_owner, + slot, + ephemeral_blockhash, + undelegate, + buffer_pda, + lamports, + .. + } = &commit_info + else { + panic!("Only data accounts are supported for now"); + }; + + let commit_args = CommitStateFromBufferArgs { + slot: *slot, + lamports: *lamports, + allow_undelegation: *undelegate, + }; + + let instructions = process_and_close_ixs( + validator_auth, + pubkey, + delegated_account_owner, + buffer_pda, + ephemeral_blockhash, + commit_args, + ); + InstructionsForCommitable { + instructions, + commit_info, + kind: InstructionsKind::ProcessAndCloseBuffers, + } +} + +fn close_buffers_separate_ix( + validator_auth: Pubkey, + commit_info: CommitInfo, +) -> InstructionsForCommitable { + debug!("Processing commitable: {:?}", commit_info); + let CommitInfo::BufferedDataAccount { + pubkey, + ephemeral_blockhash, + .. + } = &commit_info + else { + panic!("Only data accounts are supported for now"); + }; + + let close_ix = + close_buffers_ix(validator_auth, pubkey, ephemeral_blockhash); + InstructionsForCommitable { + instructions: vec![close_ix], + commit_info, + kind: InstructionsKind::CloseBuffers, + } +} + +fn process_commitable_separate_ix( + validator_auth: Pubkey, + commit_info: CommitInfo, +) -> InstructionsForCommitable { + let CommitInfo::BufferedDataAccount { + pubkey, + delegated_account_owner, + slot, + undelegate, + buffer_pda, + lamports, + .. + } = &commit_info + else { + panic!("Only data accounts are supported for now"); + }; + + let commit_args = CommitStateFromBufferArgs { + slot: *slot, + lamports: *lamports, + allow_undelegation: *undelegate, + }; + + let process_ix = process_commits_ix( + validator_auth, + pubkey, + delegated_account_owner, + buffer_pda, + commit_args, + ); + InstructionsForCommitable { + instructions: vec![process_ix], + commit_info: commit_info.clone(), + kind: InstructionsKind::Process, + } +} + +pub(crate) struct ChunkedIxsToProcessCommitablesAndClosePdasResult { + /// Chunked instructions to process buffers and possibly also close them + /// Since they are part of the same transaction and correctly ordered, each + /// chunk can run in parallel + pub chunked_ixs: Vec>, + /// Separate buffer close transactions. + /// Since the process transactions nee to complete first we need to run them + /// after the [Self::chunked_ixs] transactions + pub chunked_close_ixs: Option>>, + /// Commitables that could not be chunked and thus cannot be committed while + /// respecting the bundle + pub unchunked: HashMap>, +} + +/// Processes commits +/// Creates single instruction chunk for commmitables with matching bundle_id +pub(crate) fn chunked_ixs_to_process_commitables_and_close_pdas( + validator_auth: Pubkey, + commit_infos: Vec, + use_lookup: bool, +) -> ChunkedIxsToProcessCommitablesAndClosePdasResult { + // First try to combine process and close into a single transaction + let max_per_chunk = if use_lookup { + MAX_PROCESS_AND_CLOSE_PER_TX_USING_LOOKUP + } else { + MAX_PROCESS_AND_CLOSE_PER_TX + }; + let bundles_with_close = + bundle_chunks(commit_infos, max_per_chunk as usize); + + // Add instruction chunks that include process and close + let mut chunked_ixs: Vec<_> = bundles_with_close + .chunks + .into_iter() + .map(|chunk| { + chunk + .into_iter() + .map(|commit_info| { + process_commitable_and_close_ixs( + validator_auth, + commit_info, + ) + }) + .collect::>() + }) + .collect(); + + // If all bundles can be handled combining process and close then we're done + let all_bundles_handled = bundles_with_close.unchunked.is_empty(); + if all_bundles_handled { + return ChunkedIxsToProcessCommitablesAndClosePdasResult { + chunked_ixs, + chunked_close_ixs: None, + unchunked: bundles_with_close.unchunked, + }; + } + + // If not all chunks fit when trying to close and process in one transaction + // then let's process them separately + let unbundled_commit_infos = bundles_with_close + .unchunked + .into_iter() + .flat_map(|(_, commit_infos)| commit_infos) + .collect::>(); + + // For the bundles that are too large to include the close instructions add them + // as separate instruction chunks, one for process (which is the only part + // that needs to run atomic for a bundle) and another chunk for the close buffer + // instructions + let close_bundles = { + let max_per_chunk = if use_lookup { + MAX_CLOSE_PER_TX_USING_LOOKUP + } else { + MAX_CLOSE_PER_TX + }; + bundle_chunks_ignoring_bundle_id( + &unbundled_commit_infos, + max_per_chunk as usize, + ) + }; + + let process_bundles_with_separate_close = { + let max_per_chunk = if use_lookup { + MAX_PROCESS_PER_TX_USING_LOOKUP + } else { + MAX_PROCESS_PER_TX + }; + bundle_chunks(unbundled_commit_infos, max_per_chunk as usize) + }; + for bundle in process_bundles_with_separate_close.chunks { + let mut process_ixs = Vec::new(); + for commit_info in bundle { + let process_ix = + process_commitable_separate_ix(validator_auth, commit_info); + process_ixs.push(process_ix); + } + chunked_ixs.push(process_ixs); + } + + let mut close_ixs_chunks = Vec::new(); + for bundle in close_bundles.chunks { + let mut close_ixs = Vec::new(); + for commit_info in bundle { + let close_ix = + close_buffers_separate_ix(validator_auth, commit_info); + close_ixs.push(close_ix); + } + close_ixs_chunks.push(close_ixs); + } + + ChunkedIxsToProcessCommitablesAndClosePdasResult { + chunked_ixs, + chunked_close_ixs: Some(close_ixs_chunks), + unchunked: process_bundles_with_separate_close.unchunked, + } +} diff --git a/magicblock-committor-service/src/commit_info.rs b/magicblock-committor-service/src/commit_info.rs new file mode 100644 index 00000000..a669153b --- /dev/null +++ b/magicblock-committor-service/src/commit_info.rs @@ -0,0 +1,177 @@ +use dlp::pda::commit_state_pda_from_delegated_account; +use magicblock_committor_program::CommitableAccount; +use solana_pubkey::Pubkey; +use solana_sdk::{clock::Slot, hash::Hash}; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum CommitInfo { + /// A commit for an account that has no data. In this case we are trying to + /// commit changes to its lamports. + EmptyAccount { + /// The on chain address of the delegated account + pubkey: Pubkey, + /// The original owner of the delegated account on chain + delegated_account_owner: Pubkey, + /// The ephemeral slot at which those changes were requested + slot: Slot, + /// The ephemeral blockhash at which those changes were requested + ephemeral_blockhash: Hash, + /// If we also undelegate the account after committing it + undelegate: bool, + /// Lamports of the account in the ephemeral + lamports: u64, + /// This id will be the same for accounts whose commits need to + /// be applied atomically in a single transaction + /// For single account commits it is also set for consistency + bundle_id: u64, + /// If `true` the account commit is finalized after it was processed + finalize: bool, + }, + /// A commit for an account that is part of a bundle whose data is small enough + /// to fit into a single process commit instruction. + DataAccount { + /// The on chain address of the delegated account + pubkey: Pubkey, + /// The account where the delegated account state is committed and stored + /// until it is finalized + commit_state: Pubkey, + /// The original owner of the delegated account on chain + delegated_account_owner: Pubkey, + /// The ephemeral slot at which those changes were requested + slot: Slot, + /// The ephemeral blockhash at which those changes were requested + ephemeral_blockhash: Hash, + /// If we also undelegate the account after committing it + undelegate: bool, + /// Lamports of the account in the ephemeral + lamports: u64, + /// This id will be the same for accounts whose commits need to + /// be applied atomically in a single transaction + /// For single account commits it is also set for consistency + bundle_id: u64, + /// If `true` the account commit is finalized after it was processed + finalize: bool, + }, + /// A commit for an account that is part of a bundle whose total data is so large + /// that we send the data in chunks to a buffer account before processing the + /// commit. + BufferedDataAccount { + /// The on chain address of the delegated account + pubkey: Pubkey, + /// The account where the delegated account state is committed and stored + /// until it is finalized + commit_state: Pubkey, + /// The original owner of the delegated account on chain + delegated_account_owner: Pubkey, + /// The ephemeral slot at which those changes were requested + slot: Slot, + /// The ephemeral blockhash at which those changes were requested + ephemeral_blockhash: Hash, + /// If we also undelegate the account after committing it + undelegate: bool, + /// The account that tracked that all chunks got written to the [CommitInfo::buffer_pda] + chunks_pda: Pubkey, + /// The temporary address where the data of the account is stored + buffer_pda: Pubkey, + /// Lamports of the account in the ephemeral + lamports: u64, + /// This id will be the same for accounts whose commits need to + /// be applied atomically in a single transaction + /// For single account commits it is also set for consistency + bundle_id: u64, + /// If `true` the account commit is finalized after it was processed + finalize: bool, + }, +} + +impl CommitInfo { + pub fn from_small_data_account( + commitable: CommitableAccount, + ephemeral_blockhash: Hash, + finalize: bool, + ) -> Self { + Self::DataAccount { + pubkey: commitable.pubkey, + delegated_account_owner: commitable.delegated_account_owner, + slot: commitable.slot, + ephemeral_blockhash, + undelegate: commitable.undelegate, + lamports: commitable.lamports, + bundle_id: commitable.bundle_id, + finalize, + commit_state: commit_state_pda_from_delegated_account( + &commitable.pubkey, + ), + } + } + pub fn has_data(&self) -> bool { + matches!(self, Self::BufferedDataAccount { .. }) + } + + pub fn pubkey(&self) -> Pubkey { + match self { + Self::EmptyAccount { pubkey, .. } => *pubkey, + Self::DataAccount { pubkey, .. } => *pubkey, + Self::BufferedDataAccount { pubkey, .. } => *pubkey, + } + } + + pub fn commit_state(&self) -> Option { + match self { + Self::BufferedDataAccount { commit_state, .. } => { + Some(*commit_state) + } + Self::DataAccount { commit_state, .. } => Some(*commit_state), + _ => None, + } + } + + pub fn lamports(&self) -> u64 { + match self { + Self::EmptyAccount { lamports, .. } => *lamports, + Self::DataAccount { lamports, .. } => *lamports, + Self::BufferedDataAccount { lamports, .. } => *lamports, + } + } + + pub fn bundle_id(&self) -> u64 { + match self { + Self::EmptyAccount { bundle_id, .. } => *bundle_id, + Self::DataAccount { bundle_id, .. } => *bundle_id, + Self::BufferedDataAccount { bundle_id, .. } => *bundle_id, + } + } + + pub fn undelegate(&self) -> bool { + match self { + Self::EmptyAccount { undelegate, .. } => *undelegate, + Self::DataAccount { undelegate, .. } => *undelegate, + Self::BufferedDataAccount { undelegate, .. } => *undelegate, + } + } + + pub fn chunks_pda(&self) -> Option { + match self { + Self::BufferedDataAccount { chunks_pda, .. } => Some(*chunks_pda), + _ => None, + } + } + + pub fn buffer_pda(&self) -> Option { + match self { + Self::BufferedDataAccount { buffer_pda, .. } => Some(*buffer_pda), + _ => None, + } + } + + pub fn pdas(&self) -> Option<(Pubkey, Pubkey)> { + match self { + Self::BufferedDataAccount { + chunks_pda, + buffer_pda, + .. + } => Some((*chunks_pda, *buffer_pda)), + _ => None, + } + } +} diff --git a/magicblock-committor-service/src/commit_stage.rs b/magicblock-committor-service/src/commit_stage.rs new file mode 100644 index 00000000..fe8299c7 --- /dev/null +++ b/magicblock-committor-service/src/commit_stage.rs @@ -0,0 +1,340 @@ +use magicblock_committor_program::ChangedAccountMeta; +use solana_pubkey::Pubkey; +use solana_sdk::{clock::Slot, signature::Signature}; + +use crate::{ + error::CommitAccountError, + persist::{CommitStatus, CommitStatusSignatures, CommitStrategy}, + CommitInfo, +}; +use log::*; +use std::sync::Arc; + +#[derive(Debug, Clone)] +pub struct CommitSignatures { + /// The signature of the transaction processing the commit + pub process_signature: Signature, + /// The signature of the transaction finalizing the commit. + /// If the account was not finalized or it failed the this is `None`. + /// If the finalize instruction was part of the process transaction then + /// this signature is the same as [Self::process_signature]. + pub finalize_signature: Option, + /// The signature of the transaction undelegating the committed accounts + /// if so requested. + /// If the account was not undelegated or it failed the this is `None`. + /// NOTE: this can be removed if we decide to perform the undelegation + /// step as part of the finalize instruction in the delegation program + pub undelegate_signature: Option, +} + +impl CommitSignatures { + pub fn process_only(process_signature: Signature) -> Self { + Self { + process_signature, + finalize_signature: None, + undelegate_signature: None, + } + } +} + +impl From for CommitStatusSignatures { + fn from(commit_signatures: CommitSignatures) -> Self { + Self { + process_signature: commit_signatures.process_signature, + finalize_signature: commit_signatures.finalize_signature, + undelegate_signature: commit_signatures.undelegate_signature, + } + } +} + +#[derive(Debug)] +pub enum CommitStage { + /// This account was part of a changeset that could not be split into + /// args only/args with lookup table or buffered changesets. + /// The commit for this account needs to be restarted from scratch. + SplittingChangesets((ChangedAccountMeta, Slot, bool)), + + /// This account was part of a changeset for which we could not obtain the + /// latest on chain blockhash when trying to commit them via args. + /// The commit for this account needs to be restarted from scratch. + GettingLatestBlockhash((ChangedAccountMeta, Slot, bool, CommitStrategy)), + + /// No part of the commit pipeline succeeded. + /// The commit for this account needs to be restarted from scratch. + Failed((CommitInfo, CommitStrategy)), + + /// The buffer and chunks account were initialized, but could either not + /// be retrieved or deserialized. It is recommended to fully re-initialize + /// them on retry. + BufferAndChunkPartiallyInitialized((CommitInfo, CommitStrategy)), + + /// The buffer and chunks accounts were initialized and all data was + /// written to them (for data accounts). + /// This means on retry we can skip that step and just try to process + /// these buffers to complete the commit. + /// This stage is returned in the following scenarios: + /// - the commit could not be processed + /// - another account in the same bundle failed to fully initialize + /// the buffer and chunks accounts and thus the bundle could not be + /// processed + BufferAndChunkFullyInitialized((CommitInfo, CommitStrategy)), + + /// The commit is part of a bundle that contains too many commits to be included + /// in a single transaction. Thus we cannot commit any of them. + /// The max amount of accounts we can commit and process as part of a single + /// transaction is [crate::max_per_transaction::MAX_COMMIT_STATE_AND_CLOSE_PER_TRANSACTION]. + /// These commits were prepared, which means the buffer and chunk accounts were fully + /// initialized, but then this issue was detected. + PartOfTooLargeBundleToProcess(CommitInfo), + + /// The commmit was properly initialized and added to a chunk of instructions to process + /// commits via a transaction. For large commits the buffer and chunk accounts were properly + /// prepared and haven't been closed. + /// However that transaction failed. + FailedProcess((CommitInfo, CommitStrategy, Option)), + + /// The commit was properly processed but the finalize instructions didn't fit into a single + /// transaction. + /// This should never happen since otherwise the [CommitStage::PartOfTooLargeBundleToProcess] + /// would have been returned as the bundle would have been too large to process in the + /// first place. + PartOfTooLargeBundleToFinalize(CommitInfo), + + /// The commit was properly processed but the requested finalize transaction failed. + FailedFinalize((CommitInfo, CommitStrategy, CommitSignatures)), + + /// The commit was properly processed but the requested undelegation transaction failed. + FailedUndelegate((CommitInfo, CommitStrategy, CommitSignatures)), + + /// All stages of the commit pipeline for this account succeeded + /// and we don't have to retry any of them. + /// This means the commit was processed and if so requested also finalized. + /// We are done committing this account. + Succeeded((CommitInfo, CommitStrategy, CommitSignatures)), +} + +impl From for CommitStage { + fn from(err: CommitAccountError) -> Self { + use CommitAccountError::*; + macro_rules! ci { + ($ci:ident) => { + Arc::::unwrap_or_clone($ci) + }; + } + + match err { + InitBufferAndChunkAccounts(err, commit_info, commit_strategy) => { + warn!("Init buffer and chunks accounts failed: {:?}", err); + Self::Failed((*commit_info, commit_strategy)) + } + GetChunksAccount(err, commit_info, commit_strategy) => { + warn!("Get chunks account failed: {:?}", err); + Self::BufferAndChunkPartiallyInitialized(( + ci!(commit_info), + commit_strategy, + )) + } + DeserializeChunksAccount(err, commit_info, commit_strategy) => { + warn!("Deserialize chunks account failed: {:?}", err); + Self::BufferAndChunkPartiallyInitialized(( + ci!(commit_info), + commit_strategy, + )) + } + WriteChunksRanOutOfRetries(err, commit_info, commit_strategy) => { + warn!("Write chunks ran out of retries: {:?}", err); + Self::BufferAndChunkPartiallyInitialized(( + ci!(commit_info), + commit_strategy, + )) + } + } + } +} + +pub enum CommitMetadata<'a> { + CommitInfo(&'a CommitInfo), + ChangedAccountMeta((&'a ChangedAccountMeta, Slot, bool)), +} + +impl<'a> From<&'a CommitInfo> for CommitMetadata<'a> { + fn from(commit_info: &'a CommitInfo) -> Self { + Self::CommitInfo(commit_info) + } +} + +impl CommitMetadata<'_> { + pub fn pubkey(&self) -> Pubkey { + use CommitMetadata::*; + match self { + CommitInfo(ci) => ci.pubkey(), + ChangedAccountMeta((cm, _, _)) => cm.pubkey, + } + } + + pub fn commit_state(&self) -> Option { + use CommitMetadata::*; + match self { + CommitInfo(ci) => ci.commit_state(), + ChangedAccountMeta((_, _, _)) => None, + } + } + + pub fn bundle_id(&self) -> u64 { + use CommitMetadata::*; + match self { + CommitInfo(ci) => ci.bundle_id(), + ChangedAccountMeta((cm, _, _)) => cm.bundle_id, + } + } +} + +impl CommitStage { + pub fn commit_metadata(&self) -> CommitMetadata<'_> { + use CommitStage::*; + match self { + SplittingChangesets((cm, slot, undelegate)) => { + CommitMetadata::ChangedAccountMeta((cm, *slot, *undelegate)) + } + GettingLatestBlockhash((cm, slot, undelegate, _)) => { + CommitMetadata::ChangedAccountMeta((cm, *slot, *undelegate)) + } + Failed((ci, _)) + | BufferAndChunkPartiallyInitialized((ci, _)) + | BufferAndChunkFullyInitialized((ci, _)) + | PartOfTooLargeBundleToProcess(ci) + | FailedProcess((ci, _, _)) + | PartOfTooLargeBundleToFinalize(ci) + | FailedFinalize((ci, _, _)) + | FailedUndelegate((ci, _, _)) + | Succeeded((ci, _, _)) => CommitMetadata::from(ci), + } + } + + pub fn commit_strategy(&self) -> CommitStrategy { + use CommitStage::*; + match self { + SplittingChangesets((_, _, _)) => CommitStrategy::Undetermined, + + // For the below two the only strategy that would possibly have worked is the one + // allowing most accounts per bundle, thus we return that as the assumed strategy + PartOfTooLargeBundleToProcess(_) + | PartOfTooLargeBundleToFinalize(_) => { + CommitStrategy::FromBufferWithLookupTable + } + + GettingLatestBlockhash((_, _, _, strategy)) + | Failed((_, strategy)) + | BufferAndChunkPartiallyInitialized((_, strategy)) + | BufferAndChunkFullyInitialized((_, strategy)) + | FailedProcess((_, strategy, _)) + | FailedFinalize((_, strategy, _)) + | FailedUndelegate((_, strategy, _)) + | Succeeded((_, strategy, _)) => *strategy, + } + } + + pub fn commit_status(&self) -> CommitStatus { + use CommitStage::*; + match self { + SplittingChangesets((meta, _, _)) + | GettingLatestBlockhash((meta, _, _, _)) => { + CommitStatus::Failed(meta.bundle_id) + } + Failed((ci, _)) => CommitStatus::Failed(ci.bundle_id()), + BufferAndChunkPartiallyInitialized((ci, _)) => { + CommitStatus::BufferAndChunkPartiallyInitialized(ci.bundle_id()) + } + BufferAndChunkFullyInitialized((ci, _)) => { + CommitStatus::BufferAndChunkFullyInitialized(ci.bundle_id()) + } + PartOfTooLargeBundleToProcess(ci) + // NOTE: the below cannot occur if the above didn't, so we can merge them + // here + | PartOfTooLargeBundleToFinalize(ci) => { + CommitStatus::PartOfTooLargeBundleToProcess(ci.bundle_id()) + } + FailedProcess((ci, strategy, sigs)) => CommitStatus::FailedProcess(( + ci.bundle_id(), + *strategy, + sigs.as_ref().cloned().map(CommitStatusSignatures::from), + )), + FailedFinalize((ci, strategy, sigs)) => CommitStatus::FailedFinalize(( + ci.bundle_id(), + *strategy, + CommitStatusSignatures::from(sigs.clone()), + )), + FailedUndelegate((ci, strategy, sigs)) => CommitStatus::FailedUndelegate(( + ci.bundle_id(), + *strategy, + CommitStatusSignatures::from(sigs.clone()), + )), + Succeeded((ci, strategy, sigs)) => CommitStatus::Succeeded(( + ci.bundle_id(), + *strategy, + CommitStatusSignatures::from(sigs.clone()), + )), + } + } + + pub fn commit_infos(commit_stages: &[Self]) -> Vec> { + commit_stages.iter().map(Self::commit_metadata).collect() + } + + /// Pubkey of the committed account + pub fn pubkey(&self) -> Pubkey { + self.commit_metadata().pubkey() + } + + /// Pubkey of the account holding the state we commit until the commit is finalized + pub fn commit_state(&self) -> Option { + self.commit_metadata().commit_state() + } + + /// Returns `true` if we need to init the chunks and buffer accounts when we + /// retry commiting this account + pub fn needs_accounts_init(&self) -> bool { + use CommitStage::*; + matches!(self, Failed(_) | BufferAndChunkPartiallyInitialized(_)) + } + + /// Returns `true` if we need to complete writing data to the buffer account + /// when we retry committing this account + pub fn needs_accounts_write(&self) -> bool { + use CommitStage::*; + self.needs_accounts_init() + || matches!(self, BufferAndChunkFullyInitialized(_)) + } + + /// Returns `true` if we need to process the buffer account in order to apply + /// the commit when we retry committing this account + pub fn needs_process(&self) -> bool { + use CommitStage::*; + self.needs_accounts_write() + || matches!( + self, + PartOfTooLargeBundleToProcess(_) | FailedProcess(_) + ) + } + + /// Returns `true` if we need to rerun the finalize transaction when we retry + /// committing this account + pub fn needs_finalize(&self) -> bool { + use CommitStage::*; + self.needs_process() + || matches!( + self, + PartOfTooLargeBundleToFinalize(_) | FailedFinalize(_) + ) + } + + /// Returns `true` if the commit was successfully processed and the account + /// was undelegated as part of the commit + pub fn is_successfully_undelegated(&self) -> bool { + use CommitStage::*; + match self { + Succeeded((ci, _, _)) => ci.undelegate(), + _ => false, + } + } +} diff --git a/magicblock-committor-service/src/commit_strategy.rs b/magicblock-committor-service/src/commit_strategy.rs new file mode 100644 index 00000000..32c9b795 --- /dev/null +++ b/magicblock-committor-service/src/commit_strategy.rs @@ -0,0 +1,635 @@ +use std::collections::HashSet; + +use magicblock_committor_program::{ChangedBundle, Changeset}; +use solana_pubkey::Pubkey; + +use crate::{ + error::{CommittorServiceError, CommittorServiceResult}, + transactions::{ + commit_tx_report, CommitTxReport, MAX_ENCODED_TRANSACTION_SIZE, + }, +}; + +/// These are the commit strategies we can use to commit a changeset in order +/// of preference. We use lookup tables only as last resort since they are +/// slow to prepare. +#[derive(Debug)] +pub enum CommitBundleStrategy { + ArgsIncludeFinalize(ChangedBundle), + Args(ChangedBundle), + FromBuffer(ChangedBundle), + ArgsIncludeFinalizeWithLookupTable(ChangedBundle), + ArgsWithLookupTable(ChangedBundle), + FromBufferWithLookupTable(ChangedBundle), +} + +impl TryFrom<(ChangedBundle, bool)> for CommitBundleStrategy { + type Error = CommittorServiceError; + + /// Try to find the fastest/efficient commit strategy for the given bundle. + /// Order of preference: + /// 1. [CommitBundleStrategy::ArgsIncludeFinalize] + /// 2. [CommitBundleStrategy::Args] + /// 3. [CommitBundleStrategy::FromBuffer] + /// 4. [CommitBundleStrategy::ArgsIncludeFinalizeWithLookupTable] + /// 5. [CommitBundleStrategy::ArgsWithLookupTable] + /// 6. [CommitBundleStrategy::FromBufferWithLookupTable] + fn try_from( + (bundle, finalize): (ChangedBundle, bool), + ) -> Result { + let CommitTxReport { + size_args_including_finalize, + size_args, + fits_buffer, + size_args_with_lookup_including_finalize, + size_args_with_lookup, + fits_buffer_using_lookup, + } = commit_tx_report(&bundle, finalize)?; + // Try to combine process and finalize if finalize is true + if let Some(size_including_finalize) = size_args_including_finalize { + if size_including_finalize < MAX_ENCODED_TRANSACTION_SIZE { + return Ok(CommitBundleStrategy::ArgsIncludeFinalize(bundle)); + } + } + // Next still using args but with separate finalize if needed + if size_args < MAX_ENCODED_TRANSACTION_SIZE { + return Ok(CommitBundleStrategy::Args(bundle)); + } + + // Last option to avoid lookup tables + if fits_buffer { + return Ok(CommitBundleStrategy::FromBuffer(bundle)); + } + + // All the below use lookup tables and will be a lot slower + + // Combining finalize and process + if let Some(size_with_lookup_including_finalize) = + size_args_with_lookup_including_finalize + { + if size_with_lookup_including_finalize + < MAX_ENCODED_TRANSACTION_SIZE + { + return Ok( + CommitBundleStrategy::ArgsIncludeFinalizeWithLookupTable( + bundle, + ), + ); + } + } + // Using lookup tables but separate finalize + if let Some(size_with_lookup) = size_args_with_lookup { + if size_with_lookup < MAX_ENCODED_TRANSACTION_SIZE { + return Ok(CommitBundleStrategy::ArgsWithLookupTable(bundle)); + } + } + + // Worst case try to use a buffer with lookup tables + if fits_buffer_using_lookup { + return Ok(CommitBundleStrategy::FromBufferWithLookupTable(bundle)); + } + + // If none of the strategies work then we need to error + let bundle_id = bundle + .first() + .map(|(_, acc)| acc.bundle_id()) + .unwrap_or_default(); + Err(CommittorServiceError::CouldNotFindCommitStrategyForBundle( + bundle_id, + )) + } +} + +#[derive(Debug)] +pub struct SplitChangesets { + /// This changeset can be committed in one processing step, passing account data as args + pub args_changeset: Changeset, + /// This changeset can be committed in one processing step, passing account data as args + /// and the finalize instruction fits into the same transaction + pub args_including_finalize_changeset: Changeset, + /// This changeset can be committed in one processing step, passing account data as args + /// but needs to use lookup tables for the accounts + pub args_with_lookup_changeset: Changeset, + /// This changeset can be committed in one processing step, passing account data as args + /// and the finalize instruction fits into the same transaction. + /// It needs to use lookup tables for the accounts. + pub args_including_finalize_with_lookup_changeset: Changeset, + /// This changeset needs to be committed in two steps: + /// 1. Prepare the buffer account + /// 2. Process the buffer account + pub from_buffer_changeset: Changeset, + /// This changeset needs to be committed in three steps: + /// 1. Prepare the buffer account + /// 2. Prepare lookup table + /// 3. Process the buffer account + pub from_buffer_with_lookup_changeset: Changeset, +} + +pub fn split_changesets_by_commit_strategy( + changeset: Changeset, + finalize: bool, +) -> CommittorServiceResult { + fn add_to_changeset( + changeset: &mut Changeset, + accounts_to_undelegate: &HashSet, + bundle: ChangedBundle, + ) { + for (pubkey, acc) in bundle { + changeset.add(pubkey, acc); + if accounts_to_undelegate.contains(&pubkey) { + changeset.accounts_to_undelegate.insert(pubkey); + } + } + } + + let mut args_changeset = Changeset { + slot: changeset.slot, + ..Default::default() + }; + let mut args_including_finalize_changeset = Changeset { + slot: changeset.slot, + ..Default::default() + }; + let mut args_with_lookup_changeset = Changeset { + slot: changeset.slot, + ..Default::default() + }; + let mut args_including_finalize_with_lookup_changeset = Changeset { + slot: changeset.slot, + ..Default::default() + }; + let mut from_buffer_changeset = Changeset { + slot: changeset.slot, + ..Default::default() + }; + let mut from_buffer_with_lookup_changeset = Changeset { + slot: changeset.slot, + ..Default::default() + }; + + let accounts_to_undelegate = changeset.accounts_to_undelegate.clone(); + let changeset_bundles = changeset.into_small_changeset_bundles(); + for bundle in changeset_bundles.bundles.into_iter() { + let commit_strategy = + CommitBundleStrategy::try_from((bundle, finalize)); + match commit_strategy { + Ok(CommitBundleStrategy::Args(bundle)) => { + add_to_changeset( + &mut args_changeset, + &accounts_to_undelegate, + bundle, + ); + } + Ok(CommitBundleStrategy::ArgsIncludeFinalize(bundle)) => { + add_to_changeset( + &mut args_including_finalize_changeset, + &accounts_to_undelegate, + bundle, + ); + } + Ok(CommitBundleStrategy::ArgsWithLookupTable(bundle)) => { + add_to_changeset( + &mut args_with_lookup_changeset, + &accounts_to_undelegate, + bundle, + ); + } + Ok(CommitBundleStrategy::ArgsIncludeFinalizeWithLookupTable( + bundle, + )) => { + add_to_changeset( + &mut args_including_finalize_with_lookup_changeset, + &accounts_to_undelegate, + bundle, + ); + } + Ok(CommitBundleStrategy::FromBuffer(bundle)) => { + add_to_changeset( + &mut from_buffer_changeset, + &accounts_to_undelegate, + bundle, + ); + } + Ok(CommitBundleStrategy::FromBufferWithLookupTable(bundle)) => { + add_to_changeset( + &mut from_buffer_with_lookup_changeset, + &accounts_to_undelegate, + bundle, + ); + } + Err(err) => { + return Err(err); + } + } + } + + Ok(SplitChangesets { + args_changeset, + args_including_finalize_changeset, + args_with_lookup_changeset, + args_including_finalize_with_lookup_changeset, + from_buffer_changeset, + from_buffer_with_lookup_changeset, + }) +} + +#[cfg(test)] +mod test { + use super::*; + use log::*; + use magicblock_committor_program::ChangedAccount; + use solana_sdk::pubkey::Pubkey; + + fn init_logger() { + let _ = env_logger::builder() + .format_timestamp(None) + .is_test(true) + .try_init(); + } + + fn add_changed_account( + changeset: &mut Changeset, + size: usize, + bundle_id: u64, + undelegate: bool, + ) -> Pubkey { + let pubkey = Pubkey::new_unique(); + changeset.add( + pubkey, + ChangedAccount::Full { + data: vec![1; size], + owner: Pubkey::new_unique(), + lamports: 0, + bundle_id, + }, + ); + if undelegate { + changeset.accounts_to_undelegate.insert(pubkey); + } + pubkey + } + + macro_rules! debug_counts { + ($label:expr, $changeset:ident, $split_changesets:ident) => { + debug!( + "{}: ({}) {{ +args_changeset: {} +args_including_finalize_changeset: {} +args_with_lookup_changeset: {} +args_including_finalize_with_lookup_changeset: {} +from_buffer_changeset: {} +from_buffer_with_lookup_changeset: {} +}}", + $label, + $changeset.accounts.len(), + $split_changesets.args_changeset.len(), + $split_changesets.args_including_finalize_changeset.len(), + $split_changesets.args_with_lookup_changeset.len(), + $split_changesets + .args_including_finalize_with_lookup_changeset + .len(), + $split_changesets.from_buffer_changeset.len(), + $split_changesets.from_buffer_with_lookup_changeset.len() + ); + }; + } + + macro_rules! assert_accounts_sum_matches { + ($changeset:ident, $split_changesets:ident) => { + assert_eq!( + $split_changesets.args_changeset.len() + + $split_changesets.args_including_finalize_changeset.len() + + $split_changesets.args_with_lookup_changeset.len() + + $split_changesets + .args_including_finalize_with_lookup_changeset + .len() + + $split_changesets.from_buffer_changeset.len() + + $split_changesets.from_buffer_with_lookup_changeset.len(), + $changeset.len() + ); + }; + } + + macro_rules! assert_undelegate_sum_matches { + ($changeset:ident, $split_changesets:ident) => { + assert_eq!( + $split_changesets + .args_changeset + .accounts_to_undelegate + .len() + + $split_changesets + .args_including_finalize_changeset + .accounts_to_undelegate + .len() + + $split_changesets + .args_with_lookup_changeset + .accounts_to_undelegate + .len() + + $split_changesets + .args_including_finalize_with_lookup_changeset + .accounts_to_undelegate + .len() + + $split_changesets + .from_buffer_changeset + .accounts_to_undelegate + .len() + + $split_changesets + .from_buffer_with_lookup_changeset + .accounts_to_undelegate + .len(), + $changeset.accounts_to_undelegate.len() + ); + }; + } + #[test] + fn test_split_small_changesets_by_commit_strategy() { + init_logger(); + + // Setup a changeset with different bundle/account sizes + let mut changeset = Changeset { + slot: 1, + ..Default::default() + }; + + let bundle_id = 1111; + + // 2 accounts bundle that can be handled via args + for idx in 1..=2 { + add_changed_account(&mut changeset, 10, bundle_id, idx % 2 == 0); + } + + // 8 accounts bundle that needs lookup + for idx in 1..=8 { + add_changed_account( + &mut changeset, + 10, + bundle_id * 10, + idx % 2 == 0, + ); + } + + // No Finalize + let split_changesets = + split_changesets_by_commit_strategy(changeset.clone(), false) + .unwrap(); + debug_counts!("No Finalize", changeset, split_changesets); + assert_accounts_sum_matches!(changeset, split_changesets); + assert_undelegate_sum_matches!(changeset, split_changesets); + + assert_eq!(split_changesets.args_changeset.len(), 2,); + assert_eq!(split_changesets.args_with_lookup_changeset.len(), 8,); + + // Finalize + let split_changesets = + split_changesets_by_commit_strategy(changeset.clone(), true) + .unwrap(); + + debug_counts!("Finalize", changeset, split_changesets); + assert_accounts_sum_matches!(changeset, split_changesets); + assert_undelegate_sum_matches!(changeset, split_changesets); + + assert_eq!(split_changesets.args_including_finalize_changeset.len(), 2,); + assert_eq!( + split_changesets + .args_including_finalize_with_lookup_changeset + .len(), + 8, + ); + } + + #[test] + fn test_split_medium_changesets_by_commit_strategy() { + init_logger(); + + // Setup a changeset with different bundle/account sizes + let mut changeset = Changeset { + slot: 1, + ..Default::default() + }; + let bundle_id = 2222; + + // 2 accounts bundle that can be handled via args and include the finalize instructions + for idx in 1..=2 { + add_changed_account(&mut changeset, 80, bundle_id, idx % 2 == 0); + } + + // 2 accounts bundle that can be handled via args, but cannot include finalize due + // to the size of the data + for idx in 1..=2 { + add_changed_account( + &mut changeset, + 100, + bundle_id + 1, + idx % 2 == 0, + ); + } + + // 3 accounts bundle that needs lookup buffer due to overall args size + for idx in 1..=3 { + add_changed_account( + &mut changeset, + 100, + bundle_id + 3, + idx % 2 == 0, + ); + } + + // No Finalize + let split_changesets = + split_changesets_by_commit_strategy(changeset.clone(), false) + .unwrap(); + debug_counts!("No Finalize", changeset, split_changesets); + assert_accounts_sum_matches!(changeset, split_changesets); + assert_undelegate_sum_matches!(changeset, split_changesets); + + assert_eq!(split_changesets.args_changeset.len(), 4,); + assert_eq!(split_changesets.from_buffer_changeset.len(), 3,); + + // Finalize + let split_changesets = + split_changesets_by_commit_strategy(changeset.clone(), true) + .unwrap(); + debug_counts!("Finalize", changeset, split_changesets); + assert_accounts_sum_matches!(changeset, split_changesets); + assert_undelegate_sum_matches!(changeset, split_changesets); + + assert_eq!(split_changesets.args_changeset.len(), 2,); + assert_eq!(split_changesets.args_including_finalize_changeset.len(), 2,); + assert_eq!(split_changesets.from_buffer_changeset.len(), 3,); + } + + #[test] + fn test_split_large_changesets_by_commit_strategy() { + init_logger(); + + // Setup a changeset with different bundle/account sizes + let mut changeset = Changeset { + slot: 1, + ..Default::default() + }; + + let bundle_id = 3333; + + // 5 accounts bundle that needs to be handled via lookup (buffer) + for idx in 1..=5 { + add_changed_account(&mut changeset, 400, bundle_id, idx % 2 == 0); + } + + // 2 accounts bundle that can be handled without lookup (buffer) + for idx in 1..=2 { + add_changed_account( + &mut changeset, + 600, + bundle_id * 10, + idx % 2 == 0, + ); + } + + // No Finalize + let split_changesets = + split_changesets_by_commit_strategy(changeset.clone(), false) + .unwrap(); + debug_counts!("No Finalize", changeset, split_changesets); + assert_accounts_sum_matches!(changeset, split_changesets); + assert_undelegate_sum_matches!(changeset, split_changesets); + + assert_eq!(split_changesets.from_buffer_changeset.len(), 2,); + assert_eq!(split_changesets.from_buffer_with_lookup_changeset.len(), 5,); + + // Finalize + let split_changesets = + split_changesets_by_commit_strategy(changeset.clone(), true) + .unwrap(); + debug_counts!("Finalize", changeset, split_changesets); + assert_accounts_sum_matches!(changeset, split_changesets); + assert_undelegate_sum_matches!(changeset, split_changesets); + + assert_eq!(split_changesets.from_buffer_changeset.len(), 2,); + assert_eq!(split_changesets.from_buffer_with_lookup_changeset.len(), 5,); + } + + #[test] + fn test_split_different_size_changesets_by_commit_strategy() { + // Combining the different changeset sizes we already test above into one changeset to + // split + init_logger(); + + // Setup a changeset with different bundle/account sizes + let mut changeset = Changeset { + slot: 1, + ..Default::default() + }; + + // Small sized bundles + { + let bundle_id = 1111; + + // 2 accounts bundle that can be handled via args + for idx in 1..=2 { + add_changed_account( + &mut changeset, + 10, + bundle_id, + idx % 2 == 0, + ); + } + + // 8 accounts bundle that needs lookup + for idx in 1..=8 { + add_changed_account( + &mut changeset, + 10, + bundle_id * 10, + idx % 2 == 0, + ); + } + }; + + // Medium sized bundles + { + let bundle_id = 2222; + + // 2 accounts bundle that can be handled via args + for idx in 1..=2 { + add_changed_account( + &mut changeset, + 100, + bundle_id, + idx % 2 == 0, + ); + } + }; + + // Large sized bundles + { + let bundle_id = 3333; + + // 5 accounts bundle that needs to be handled via lookup (buffer) + for idx in 1..=5 { + add_changed_account( + &mut changeset, + 400, + bundle_id, + idx % 2 == 0, + ); + } + + // 2 accounts bundle that can be handled without lookup (buffer) + for idx in 1..=2 { + add_changed_account( + &mut changeset, + 600, + bundle_id * 10, + idx % 2 == 0, + ); + } + }; + + // No Finalize + { + let split_changesets = + split_changesets_by_commit_strategy(changeset.clone(), false) + .unwrap(); + + debug_counts!("No Finalize", changeset, split_changesets); + assert_accounts_sum_matches!(changeset, split_changesets); + assert_undelegate_sum_matches!(changeset, split_changesets); + + assert_eq!(split_changesets.args_changeset.len(), 4); + assert_eq!(split_changesets.args_with_lookup_changeset.len(), 8); + assert_eq!(split_changesets.from_buffer_changeset.len(), 2); + assert_eq!( + split_changesets.from_buffer_with_lookup_changeset.len(), + 5 + ); + } + + // Finalize + { + let split_changesets = + split_changesets_by_commit_strategy(changeset.clone(), true) + .unwrap(); + + debug_counts!("Finalize", changeset, split_changesets); + assert_accounts_sum_matches!(changeset, split_changesets); + assert_undelegate_sum_matches!(changeset, split_changesets); + + assert_eq!(split_changesets.args_changeset.len(), 2); + assert_eq!( + split_changesets.args_including_finalize_changeset.len(), + 2 + ); + assert_eq!( + split_changesets + .args_including_finalize_with_lookup_changeset + .len(), + 8 + ); + assert_eq!(split_changesets.from_buffer_changeset.len(), 2); + assert_eq!( + split_changesets.from_buffer_with_lookup_changeset.len(), + 5 + ); + } + } +} diff --git a/magicblock-committor-service/src/compute_budget.rs b/magicblock-committor-service/src/compute_budget.rs new file mode 100644 index 00000000..0b2aa312 --- /dev/null +++ b/magicblock-committor-service/src/compute_budget.rs @@ -0,0 +1,218 @@ +use solana_sdk::{ + compute_budget::ComputeBudgetInstruction, instruction::Instruction, +}; + +// ----------------- +// Budgets +// ----------------- +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct Budget { + base_budget: u32, + per_committee: u32, + compute_unit_price: u64, +} + +impl Default for Budget { + fn default() -> Self { + Self { + base_budget: 80_000, + per_committee: 45_000, + compute_unit_price: 1_000_000, + } + } +} + +#[derive(Debug, Clone)] +pub struct BufferWithReallocBudget { + base_budget: u32, + per_realloc_ix: u32, + compute_unit_price: u64, +} + +impl BufferWithReallocBudget { + fn total_budget(&self, realloc_ixs_count: u32) -> u32 { + self.base_budget + (self.per_realloc_ix * realloc_ixs_count) + } + + pub fn instructions(&self, realloc_ixs_count: usize) -> Vec { + let realloc_ixs_count = + u32::try_from(realloc_ixs_count).unwrap_or(u32::MAX); + + instructions( + self.total_budget(realloc_ixs_count), + self.compute_unit_price, + ) + } +} + +#[derive(Debug, Clone)] +pub struct BufferWriteChunkBudget { + base_budget: u32, + per_byte: usize, + compute_unit_price: u64, +} + +impl BufferWriteChunkBudget { + fn total_budget(&self, bytes_count: usize) -> u32 { + self.base_budget + (self.per_byte * bytes_count) as u32 + } + + pub fn instructions(&self, bytes_count: usize) -> Vec { + instructions(self.total_budget(bytes_count), self.compute_unit_price) + } +} + +// ----------------- +// ComputeBudgetConfig +// ----------------- +#[derive(Debug, Clone)] +pub struct ComputeBudgetConfig { + pub args_process: Budget, + pub finalize: Budget, + pub buffer_close: Budget, + /// The budget used for processing and process + closing a buffer. + /// Since we mix pure process and process + close instructions, we need to + /// assume the worst case and use the process + close budget for all. + pub buffer_process_and_close: Budget, + pub undelegate: Budget, + pub buffer_init: BufferWithReallocBudget, + pub buffer_realloc: BufferWithReallocBudget, + pub buffer_write: BufferWriteChunkBudget, +} + +impl ComputeBudgetConfig { + pub fn new(compute_unit_price: u64) -> Self { + Self { + args_process: Budget { + compute_unit_price, + base_budget: 80_000, + per_committee: 35_000, + }, + buffer_close: Budget { + compute_unit_price, + base_budget: 10_000, + per_committee: 25_000, + }, + buffer_process_and_close: Budget { + compute_unit_price, + base_budget: 40_000, + per_committee: 45_000, + }, + finalize: Budget { + compute_unit_price, + base_budget: 80_000, + per_committee: 25_000, + }, + undelegate: Budget { + compute_unit_price, + base_budget: 40_000, + per_committee: 35_000, + }, + buffer_init: BufferWithReallocBudget { + base_budget: 12_000, + per_realloc_ix: 6_000, + compute_unit_price: 1_000_000, + }, + buffer_realloc: BufferWithReallocBudget { + base_budget: 12_000, + per_realloc_ix: 6_000, + compute_unit_price: 1_000_000, + }, + buffer_write: BufferWriteChunkBudget { + base_budget: 10_000, + per_byte: 3, + compute_unit_price: 1_000_000, + }, + } + } +} + +impl ComputeBudgetConfig { + pub fn args_process_budget(&self) -> ComputeBudget { + ComputeBudget::Process(self.args_process) + } + pub fn buffer_close_budget(&self) -> ComputeBudget { + ComputeBudget::Close(self.buffer_close) + } + pub fn buffer_process_and_close_budget(&self) -> ComputeBudget { + ComputeBudget::ProcessAndClose(self.buffer_process_and_close) + } + pub fn finalize_budget(&self) -> ComputeBudget { + ComputeBudget::Finalize(self.finalize) + } + pub fn undelegate_budget(&self) -> ComputeBudget { + ComputeBudget::Undelegate(self.undelegate) + } +} + +// ----------------- +// ComputeBudget +// ----------------- +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum ComputeBudget { + Process(Budget), + Close(Budget), + ProcessAndClose(Budget), + Finalize(Budget), + Undelegate(Budget), +} + +impl ComputeBudget { + fn base_budget(&self) -> u32 { + use ComputeBudget::*; + match self { + Process(budget) => budget.base_budget, + Close(budget) => budget.base_budget, + ProcessAndClose(budget) => budget.base_budget, + Finalize(budget) => budget.base_budget, + Undelegate(budget) => budget.base_budget, + } + } + + fn per_committee(&self) -> u32 { + use ComputeBudget::*; + match self { + Process(budget) => budget.per_committee, + Close(budget) => budget.per_committee, + ProcessAndClose(budget) => budget.per_committee, + Finalize(budget) => budget.per_committee, + Undelegate(budget) => budget.per_committee, + } + } + + fn compute_unit_price(&self) -> u64 { + use ComputeBudget::*; + match self { + Process(budget) => budget.compute_unit_price, + Close(budget) => budget.compute_unit_price, + ProcessAndClose(budget) => budget.compute_unit_price, + Finalize(budget) => budget.compute_unit_price, + Undelegate(budget) => budget.compute_unit_price, + } + } + + fn total_budget(&self, committee_count: u32) -> u32 { + self.base_budget() + (self.per_committee() * committee_count) + } + + pub fn instructions(&self, committee_count: usize) -> Vec { + let committee_count = + u32::try_from(committee_count).unwrap_or(u32::MAX); + + instructions( + self.total_budget(committee_count), + self.compute_unit_price(), + ) + } +} + +fn instructions( + compute_budget: u32, + compute_unit_price: u64, +) -> Vec { + let compute_budget_ix = + ComputeBudgetInstruction::set_compute_unit_limit(compute_budget); + let compute_unit_price_ix = + ComputeBudgetInstruction::set_compute_unit_price(compute_unit_price); + vec![compute_budget_ix, compute_unit_price_ix] +} diff --git a/magicblock-committor-service/src/config.rs b/magicblock-committor-service/src/config.rs new file mode 100644 index 00000000..8118ca5b --- /dev/null +++ b/magicblock-committor-service/src/config.rs @@ -0,0 +1,42 @@ +use solana_sdk::commitment_config::CommitmentLevel; + +use crate::compute_budget::ComputeBudgetConfig; + +#[derive(Debug, Clone)] +pub struct ChainConfig { + pub rpc_uri: String, + pub commitment: CommitmentLevel, + pub compute_budget_config: ComputeBudgetConfig, +} + +impl ChainConfig { + pub fn devnet(compute_budget_config: ComputeBudgetConfig) -> Self { + Self { + rpc_uri: "https://api.devnet.solana.com".to_string(), + commitment: CommitmentLevel::Confirmed, + compute_budget_config, + } + } + + pub fn mainnet(compute_budget_config: ComputeBudgetConfig) -> Self { + Self { + rpc_uri: "https://api.mainnet-beta.solana.com".to_string(), + commitment: CommitmentLevel::Confirmed, + compute_budget_config, + } + } + + pub fn local(compute_budget_config: ComputeBudgetConfig) -> Self { + Self { + rpc_uri: "http://localhost:7799".to_string(), + commitment: CommitmentLevel::Processed, + compute_budget_config, + } + } +} + +impl Default for ChainConfig { + fn default() -> Self { + Self::local(ComputeBudgetConfig::new(1_000_000)) + } +} diff --git a/magicblock-committor-service/src/consts.rs b/magicblock-committor-service/src/consts.rs new file mode 100644 index 00000000..3fb495cd --- /dev/null +++ b/magicblock-committor-service/src/consts.rs @@ -0,0 +1,15 @@ +// https://solana.com/docs/core/transactions#transaction-size + +use magicblock_committor_program::{ + consts::MAX_INSTRUCTION_DATA_SIZE, + instruction::IX_WRITE_SIZE_WITHOUT_CHUNKS, +}; + +const BUDGET_SET_COMPUTE_UNIT_PRICE_BYTES: u16 = (1 + 8) * 8; +const BUDGET_SET_COMPUTE_UNIT_LIMIT_BYTES: u16 = (1 + 4) * 8; + +/// The maximum size of a chunk that can be written as part of a single transaction +pub(super) const MAX_WRITE_CHUNK_SIZE: u16 = MAX_INSTRUCTION_DATA_SIZE + - IX_WRITE_SIZE_WITHOUT_CHUNKS + - BUDGET_SET_COMPUTE_UNIT_PRICE_BYTES + - BUDGET_SET_COMPUTE_UNIT_LIMIT_BYTES; diff --git a/magicblock-committor-service/src/error.rs b/magicblock-committor-service/src/error.rs new file mode 100644 index 00000000..d130cf1c --- /dev/null +++ b/magicblock-committor-service/src/error.rs @@ -0,0 +1,127 @@ +use std::sync::Arc; + +use crate::persist::CommitStrategy; +use magicblock_rpc_client::MagicBlockRpcClientError; +use solana_pubkey::Pubkey; +use solana_sdk::signature::Signature; +use thiserror::Error; + +use crate::CommitInfo; + +pub type CommittorServiceResult = + std::result::Result; + +#[derive(Error, Debug)] +pub enum CommittorServiceError { + #[error("CommittorError: {0} ({0:?})")] + CommittorError(#[from] magicblock_committor_program::error::CommittorError), + + #[error("CommitPersistError: {0} ({0:?})")] + CommitPersistError(#[from] crate::persist::error::CommitPersistError), + + #[error("MagicBlockRpcClientError: {0} ({0:?})")] + MagicBlockRpcClientError( + #[from] magicblock_rpc_client::MagicBlockRpcClientError, + ), + + #[error("TableManiaError: {0} ({0:?})")] + TableManiaError(#[from] magicblock_table_mania::error::TableManiaError), + + #[error( + "Failed send and confirm transaction to {0} on chain: {1} ({1:?})" + )] + FailedToSendAndConfirmTransaction( + String, + magicblock_rpc_client::MagicBlockRpcClientError, + ), + + #[error("The transaction to {0} was sent and confirmed, but encountered an error: {1} ({1:?})")] + EncounteredTransactionError( + String, + solana_sdk::transaction::TransactionError, + ), + + #[error("Failed to send init changeset account: {0} ({0:?})")] + FailedToSendInitChangesetAccount( + solana_rpc_client_api::client_error::Error, + ), + + #[error("Failed to confirm init changeset account: {0} ({0:?})")] + FailedToConfirmInitChangesetAccount( + solana_rpc_client_api::client_error::Error, + ), + #[error("Init transaction '{0}' was not confirmed")] + InitChangesetAccountNotConfirmed(String), + + #[error("Task {0} failed to compile transaction message: {1} ({1:?})")] + FailedToCompileTransactionMessage( + String, + solana_sdk::message::CompileError, + ), + + #[error("Task {0} failed to creqate transaction: {1} ({1:?})")] + FailedToCreateTransaction(String, solana_sdk::signer::SignerError), + + #[error("Could not find commit strategy for bundle {0}")] + CouldNotFindCommitStrategyForBundle(u64), + + #[error("Failed to fetch metadata account for {0}")] + FailedToFetchDelegationMetadata(Pubkey), + + #[error("Failed to deserialize metadata account for {0}, {1:?}")] + FailedToDeserializeDelegationMetadata( + Pubkey, + solana_sdk::program_error::ProgramError, + ), +} + +impl CommittorServiceError { + pub fn signature(&self) -> Option { + use CommittorServiceError::*; + match self { + MagicBlockRpcClientError(e) => e.signature(), + FailedToSendAndConfirmTransaction(_, e) => e.signature(), + _ => None, + } + } +} + +pub type CommitAccountResult = std::result::Result; +#[derive(Error, Debug)] +/// Specific error that always includes the commit info +pub enum CommitAccountError { + #[error("Failed to init buffer and chunk account: {0}")] + InitBufferAndChunkAccounts(String, Box, CommitStrategy), + + #[error("Failed to get chunks account: ({0:?})")] + GetChunksAccount( + Option, + Arc, + CommitStrategy, + ), + + #[error("Failed to deserialize chunks account: {0} ({0:?})")] + DeserializeChunksAccount(std::io::Error, Arc, CommitStrategy), + + #[error("Failed to write complete chunks of commit data after max retries. Last write error {0:?}")] + WriteChunksRanOutOfRetries( + Option, + Arc, + CommitStrategy, + ), +} + +impl CommitAccountError { + pub fn into_commit_info(self) -> CommitInfo { + use CommitAccountError::*; + let ci = match self { + InitBufferAndChunkAccounts(_, commit_info, _) => { + return *commit_info; + } + GetChunksAccount(_, commit_info, _) => commit_info, + DeserializeChunksAccount(_, commit_info, _) => commit_info, + WriteChunksRanOutOfRetries(_, commit_info, _) => commit_info, + }; + Arc::::unwrap_or_clone(ci) + } +} diff --git a/magicblock-committor-service/src/finalize.rs b/magicblock-committor-service/src/finalize.rs new file mode 100644 index 00000000..b6334140 --- /dev/null +++ b/magicblock-committor-service/src/finalize.rs @@ -0,0 +1,66 @@ +use std::collections::HashMap; + +use log::*; +use solana_pubkey::Pubkey; + +use crate::{ + bundles::bundle_chunks, + transactions::{ + finalize_ix, MAX_FINALIZE_PER_TX, MAX_FINALIZE_PER_TX_USING_LOOKUP, + }, + types::{InstructionsForCommitable, InstructionsKind}, + CommitInfo, +}; + +fn finalize_commitable( + validator_auth: Pubkey, + commit_info: CommitInfo, +) -> InstructionsForCommitable { + debug!("Finalizing commitable: {:?}", commit_info); + let CommitInfo::BufferedDataAccount { pubkey, .. } = &commit_info else { + panic!("Only data accounts are supported for now"); + }; + + let ix = finalize_ix(validator_auth, pubkey); + InstructionsForCommitable { + instructions: vec![ix], + commit_info, + kind: InstructionsKind::Finalize, + } +} + +pub(crate) struct ChunkedIxsToFinalizeCommitablesResult { + pub chunked_ixs: Vec>, + pub unchunked: HashMap>, +} + +/// Finalizes the previously processed commits +/// Ensures that commitables with matching bundle id are in a single chunk +pub(crate) fn chunked_ixs_to_finalize_commitables( + validator_auth: Pubkey, + commit_infos: Vec, + use_lookup: bool, +) -> ChunkedIxsToFinalizeCommitablesResult { + let max_per_chunk = if use_lookup { + MAX_FINALIZE_PER_TX_USING_LOOKUP + } else { + MAX_FINALIZE_PER_TX + }; + let bundles = bundle_chunks(commit_infos, max_per_chunk as usize); + let chunked_ixs: Vec<_> = bundles + .chunks + .into_iter() + .map(|chunk| { + chunk + .into_iter() + .map(|commit_info| { + finalize_commitable(validator_auth, commit_info) + }) + .collect::>() + }) + .collect(); + ChunkedIxsToFinalizeCommitablesResult { + chunked_ixs, + unchunked: bundles.unchunked, + } +} diff --git a/magicblock-committor-service/src/lib.rs b/magicblock-committor-service/src/lib.rs new file mode 100644 index 00000000..274db705 --- /dev/null +++ b/magicblock-committor-service/src/lib.rs @@ -0,0 +1,35 @@ +mod bundle_strategy; +mod bundles; +mod commit; +mod commit_info; +mod commit_stage; +mod commit_strategy; +mod compute_budget; +pub mod config; +mod consts; +pub mod error; +mod finalize; +pub mod persist; +mod pubkeys_provider; +mod service; +mod transactions; +mod types; +mod undelegate; + +#[cfg(feature = "dev-context-only-utils")] +pub mod stubs; + +pub use commit_info::CommitInfo; +pub use compute_budget::ComputeBudgetConfig; +pub use service::{ChangesetCommittor, CommittorService}; + +pub use commit_stage::CommitStage; +pub use magicblock_committor_program::{ + ChangedAccount, Changeset, ChangesetMeta, +}; +pub fn changeset_for_slot(slot: u64) -> Changeset { + Changeset { + slot, + ..Changeset::default() + } +} diff --git a/magicblock-committor-service/src/persist/commit_persister.rs b/magicblock-committor-service/src/persist/commit_persister.rs new file mode 100644 index 00000000..33ade0f2 --- /dev/null +++ b/magicblock-committor-service/src/persist/commit_persister.rs @@ -0,0 +1,254 @@ +use std::path::Path; +use std::sync::atomic::{AtomicU64, Ordering}; + +use solana_sdk::hash::Hash; +use solana_sdk::pubkey::Pubkey; + +use super::db::BundleSignatureRow; +use super::error::{CommitPersistError, CommitPersistResult}; +use super::utils::now; +use super::{db::CommitStatusRow, CommitStatus, CommitType, CommittorDb}; +use magicblock_committor_program::Changeset; + +pub struct CommitPersister { + db: CommittorDb, + request_id_counter: AtomicU64, +} + +impl CommitPersister { + pub fn try_new

(db_file: P) -> CommitPersistResult + where + P: AsRef, + { + let db = CommittorDb::new(db_file)?; + db.create_commit_status_table()?; + db.create_bundle_signature_table()?; + Ok(Self::for_db(db)) + } + + fn for_db(db: CommittorDb) -> Self { + Self { + db, + request_id_counter: AtomicU64::new(1), + } + } + + /// Generates a unique request ID for a changeset + fn generate_reqid(&self) -> String { + let id = self.request_id_counter.fetch_add(1, Ordering::SeqCst); + format!("req-{}", id) + } + + pub fn start_changeset( + &mut self, + changeset: &Changeset, + ephemeral_blockhash: Hash, + finalize: bool, + ) -> CommitPersistResult { + let reqid = self.generate_reqid(); + + let mut commit_rows = Vec::new(); + + for (pubkey, changed_account) in changeset.accounts.iter() { + let undelegate = changeset.accounts_to_undelegate.contains(pubkey); + let commit_type = if changed_account.data().is_empty() { + CommitType::EmptyAccount + } else { + CommitType::DataAccount + }; + + let data = if commit_type == CommitType::DataAccount { + Some(changed_account.data().to_vec()) + } else { + None + }; + + let now = now(); + + // Create a commit status row for this account + let commit_row = CommitStatusRow { + reqid: reqid.clone(), + pubkey: *pubkey, + delegated_account_owner: changed_account.owner(), + slot: changeset.slot, + ephemeral_blockhash, + undelegate, + lamports: changed_account.lamports(), + finalize, + data, + commit_type, + created_at: now, + commit_status: CommitStatus::Pending, + last_retried_at: now, + retries_count: 0, + }; + + commit_rows.push(commit_row); + } + + // Insert all commit rows into the database + self.db.insert_commit_status_rows(&commit_rows)?; + + Ok(reqid) + } + + pub fn update_status( + &mut self, + reqid: &str, + pubkey: &Pubkey, + status: CommitStatus, + ) -> Result<(), CommitPersistError> { + // NOTE: only Pending commits don't have a bundle id, but we should + // never update to Pending + let Some(bundle_id) = status.bundle_id() else { + return Err( + CommitPersistError::CommitStatusUpdateRequiresStatusWithBundleId( + status.as_str().to_string(), + ), + ); + }; + + let bundle_signature = status.signatures().map(|sigs| { + BundleSignatureRow::new( + bundle_id, + sigs.process_signature, + sigs.finalize_signature, + sigs.undelegate_signature, + ) + }); + + self.db.update_commit_status_and_bundle_signature( + reqid, + pubkey, + &status, + bundle_signature, + ) + + // TODO(thlorenz): @@ once we see this works remove the succeeded commits + } + + pub fn get_commit_statuses_by_reqid( + &self, + reqid: &str, + ) -> CommitPersistResult> { + self.db.get_commit_statuses_by_reqid(reqid) + } + + pub fn get_commit_status( + &self, + reqid: &str, + pubkey: &Pubkey, + ) -> CommitPersistResult> { + self.db.get_commit_status(reqid, pubkey) + } + + pub fn get_signature( + &self, + bundle_id: u64, + ) -> CommitPersistResult> { + self.db.get_bundle_signature_by_bundle_id(bundle_id) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::persist::{CommitStatusSignatures, CommitStrategy}; + use magicblock_committor_program::ChangedAccount; + use solana_pubkey::Pubkey; + use solana_sdk::signature::Signature; + + #[test] + fn test_start_changeset_and_update_status() { + let mut persister = CommitPersister::try_new(":memory:").unwrap(); + + // Create a test changeset + let mut changeset = Changeset { + slot: 100, + ..Default::default() + }; + + let pubkey1 = Pubkey::new_unique(); + let pubkey2 = Pubkey::new_unique(); + let owner = Pubkey::new_unique(); + + // Add an empty account + changeset.add( + pubkey1, + ChangedAccount::Full { + lamports: 1000, + owner, + data: vec![], + bundle_id: 1, + }, + ); + + // Add a data account + changeset.add( + pubkey2, + ChangedAccount::Full { + lamports: 2000, + owner, + data: vec![1, 2, 3, 4, 5], + bundle_id: 42, + }, + ); + + changeset.request_undelegation(pubkey1); + + // Start tracking the changeset + let blockhash = Hash::new_unique(); + let reqid = persister + .start_changeset(&changeset, blockhash, true) + .unwrap(); + + // Verify the rows were inserted correctly + let rows = persister.db.get_commit_statuses_by_reqid(&reqid).unwrap(); + assert_eq!(rows.len(), 2); + + let empty_account_row = + rows.iter().find(|row| row.pubkey == pubkey1).unwrap(); + assert_eq!(empty_account_row.commit_type, CommitType::EmptyAccount); + assert!(empty_account_row.undelegate); + assert_eq!(empty_account_row.data, None); + assert_eq!(empty_account_row.commit_status, CommitStatus::Pending); + assert_eq!(empty_account_row.retries_count, 0); + + let data_account_row = + rows.iter().find(|row| row.pubkey == pubkey2).unwrap(); + assert_eq!(data_account_row.commit_type, CommitType::DataAccount); + assert!(!data_account_row.undelegate); + assert_eq!(data_account_row.data, Some(vec![1, 2, 3, 4, 5])); + assert_eq!(data_account_row.commit_status, CommitStatus::Pending); + + // Update status and verify commit status and the signatures + let process_signature = Signature::new_unique(); + let finalize_signature = Some(Signature::new_unique()); + let new_status = CommitStatus::FailedFinalize(( + 1, + CommitStrategy::Args, + CommitStatusSignatures { + process_signature, + finalize_signature, + undelegate_signature: None, + }, + )); + persister + .update_status(&reqid, &pubkey1, new_status.clone()) + .unwrap(); + + let updated_row = persister + .get_commit_status(&reqid, &pubkey1) + .unwrap() + .unwrap(); + + assert_eq!(updated_row.commit_status, new_status); + + let signatures = persister + .get_signature(new_status.bundle_id().unwrap()) + .unwrap() + .unwrap(); + assert_eq!(signatures.processed_signature, process_signature); + assert_eq!(signatures.finalized_signature, finalize_signature); + } +} diff --git a/magicblock-committor-service/src/persist/db.rs b/magicblock-committor-service/src/persist/db.rs new file mode 100644 index 00000000..8f00375f --- /dev/null +++ b/magicblock-committor-service/src/persist/db.rs @@ -0,0 +1,965 @@ +use std::{fmt, path::Path, str::FromStr}; + +use rusqlite::{params, Connection, Result, Transaction}; +use solana_pubkey::Pubkey; +use solana_sdk::{clock::Slot, hash::Hash, signature::Signature}; + +use super::{ + error::CommitPersistResult, + utils::{i64_into_u64, now, u64_into_i64}, + CommitStatus, CommitStatusSignatures, CommitStrategy, CommitType, +}; +// ----------------- +// CommitStatusRow +// ----------------- + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CommitStatusRow { + /// Request ID that is common for some accounts + pub reqid: String, + /// The on chain address of the delegated account + pub pubkey: Pubkey, + /// The original owner of the delegated account on chain + pub delegated_account_owner: Pubkey, + /// The ephemeral slot at which those changes were requested + pub slot: Slot, + /// The ephemeral blockhash at which those changes were requested + pub ephemeral_blockhash: Hash, + /// If we also undelegate the account after committing it + pub undelegate: bool, + /// Lamports of the account in the ephemeral + pub lamports: u64, + /// If `true` the account commit is finalized after it was processed + pub finalize: bool, + /// The account data in the ephemeral (only set if the commit is for a data account) + pub data: Option>, + /// The type of commit that was requested, i.e. lamports only or including data + pub commit_type: CommitType, + /// Time since epoch at which the commit was requested + pub created_at: u64, + /// The current status of the commit + /// Includes the bundle_id which will be the same for accounts whose commits + /// need to be applied atomically in a single transaction + /// For single accounts a bundle_id will be gnerated as well for consistency + /// For Pending commits the bundle_id is not set + pub commit_status: CommitStatus, + /// Time since epoch at which the commit was last retried + pub last_retried_at: u64, + /// Number of times the commit was retried + pub retries_count: u16, +} + +impl fmt::Display for CommitStatusRow { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "CommitStatusRow {{ + reqid: {} + pubkey: {}, + delegated_account_owner: {}, + slot: {}, + ephemeral_blockhash: {}, + undelegate: {}, + lamports: {}, + finalize: {}, + data.len: {}, + commit_type: {}, + created_at: {}, + commit_status: {}, + last_retried_at: {}, + retries_count: {} +}}", + self.reqid, + self.pubkey, + self.delegated_account_owner, + self.slot, + self.ephemeral_blockhash, + self.undelegate, + self.lamports, + self.finalize, + self.data.as_ref().map(|x| x.len()).unwrap_or_default(), + self.commit_type.as_str(), + self.created_at, + self.commit_status, + self.last_retried_at, + self.retries_count + ) + } +} + +const ALL_COMMIT_STATUS_COLUMNS: &str = r#" + reqid, + pubkey, + delegated_account_owner, + slot, + ephemeral_blockhash, + undelegate, + lamports, + finalize, + bundle_id, + data, + commit_type, + created_at, + commit_status, + commit_strategy, + processed_signature, + finalized_signature, + undelegated_signature, + last_retried_at, + retries_count +"#; + +const SELECT_ALL_COMMIT_STATUS_COLUMNS: &str = r#" +SELECT + reqid, + pubkey, + delegated_account_owner, + slot, + ephemeral_blockhash, + undelegate, + lamports, + finalize, + bundle_id, + data, + commit_type, + created_at, + commit_status, + commit_strategy, + processed_signature, + finalized_signature, + undelegated_signature, + last_retried_at, + retries_count +FROM commit_status +"#; + +// ----------------- +// Bundle Signature +// ----------------- +// The BundleSignature table exists to store mappings from bundle_id to the signatures used +// to process/finalize these bundles. +// The signatures are repeated in the commit_status table, however the rows in there have a +// different lifetime than the bundle signature rows. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BundleSignatureRow { + /// The id of the bundle that was commmitted + /// If an account was not part of a bundle it is treated as a single account bundle + /// for consistency. + /// The bundle_id is unique + pub bundle_id: u64, + /// The signature of the transaction on chain that processed the commit + pub processed_signature: Signature, + /// The signature of the transaction on chain that finalized the commit + /// if applicable + pub finalized_signature: Option, + /// The signature of the transaction on chain that undelegated the account(s) + /// if applicable + pub undelegate_signature: Option, + /// Time since epoch at which the bundle signature was created + pub created_at: u64, +} + +impl BundleSignatureRow { + pub fn new( + bundle_id: u64, + processed_signature: Signature, + finalized_signature: Option, + undelegate_signature: Option, + ) -> Self { + let created_at = now(); + Self { + bundle_id, + processed_signature, + finalized_signature, + undelegate_signature, + created_at, + } + } +} + +const ALL_BUNDLE_SIGNATURE_COLUMNS: &str = r#" + bundle_id, + processed_signature, + finalized_signature, + undelegate_signature, + created_at +"#; + +const SELECT_ALL_BUNDLE_SIGNATURE_COLUMNS: &str = r#" +SELECT + bundle_id, + processed_signature, + finalized_signature, + undelegate_signature, + created_at +FROM bundle_signature +"#; + +// ----------------- +// CommittorDb +// ----------------- +pub struct CommittorDb { + conn: Connection, +} + +impl CommittorDb { + pub fn new

(db_file: P) -> Result + where + P: AsRef, + { + let conn = Connection::open(db_file)?; + Ok(Self { conn }) + } + + pub fn path(&self) -> Option<&str> { + self.conn.path() + } + + // ----------------- + // Methods affecting both tables + // ----------------- + pub fn update_commit_status_and_bundle_signature( + &mut self, + reqid: &str, + pubkey: &Pubkey, + status: &CommitStatus, + bundle_signature: Option, + ) -> CommitPersistResult<()> { + let tx = self.conn.transaction()?; + Self::update_commit_status(&tx, reqid, pubkey, status)?; + if let Some(bundle_signature) = bundle_signature { + Self::insert_bundle_signature(&tx, &bundle_signature)?; + } + tx.commit()?; + Ok(()) + } + + // ----------------- + // Commit Status + // ----------------- + pub fn create_commit_status_table(&self) -> Result<()> { + // The bundle_id is NULL when we insert a pending commit + match self.conn.execute_batch( + " + BEGIN; + CREATE TABLE IF NOT EXISTS commit_status ( + reqid TEXT NOT NULL, + pubkey TEXT NOT NULL, + delegated_account_owner TEXT NOT NULL, + slot INTEGER NOT NULL, + ephemeral_blockhash TEXT NOT NULL, + undelegate INTEGER NOT NULL, + lamports INTEGER NOT NULL, + finalize INTEGER NOT NULL, + bundle_id INTEGER, + data BLOB, + commit_type TEXT NOT NULL, + created_at INTEGER NOT NULL, + commit_status TEXT NOT NULL, + commit_strategy TEXT NOT NULL, + processed_signature TEXT, + finalized_signature TEXT, + undelegated_signature TEXT, + last_retried_at INTEGER NOT NULL, + retries_count INTEGER NOT NULL + ); + CREATE INDEX IF NOT EXISTS idx_commits_pubkey ON commit_status (pubkey); + CREATE INDEX IF NOT EXISTS idx_commits_reqid ON commit_status (reqid); + COMMIT;", + ) { + Ok(_) => Ok(()), + Err(err) => { + eprintln!("Error creating commit_status table: {}", err); + Err(err) + } + } + } + + pub fn insert_commit_status_rows( + &mut self, + commit_rows: &[CommitStatusRow], + ) -> CommitPersistResult<()> { + let tx = self.conn.transaction()?; + for commit in commit_rows { + Self::insert_commit_status_row(&tx, commit)?; + } + tx.commit()?; + Ok(()) + } + + fn insert_commit_status_row( + tx: &Transaction<'_>, + commit: &CommitStatusRow, + ) -> CommitPersistResult<()> { + let (processed_signature, finalized_signature, undelegated_signature) = + match commit.commit_status.signatures() { + Some(sigs) => ( + Some(sigs.process_signature), + sigs.finalize_signature, + sigs.undelegate_signature, + ), + None => (None, None, None), + }; + tx.execute( + &format!( + "INSERT INTO commit_status ({ALL_COMMIT_STATUS_COLUMNS}) VALUES + (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19)", + ), + params![ + commit.reqid, + commit.pubkey.to_string(), + commit.delegated_account_owner.to_string(), + u64_into_i64(commit.slot), + commit.ephemeral_blockhash.to_string(), + if commit.undelegate { 1 } else { 0 }, + u64_into_i64(commit.lamports), + if commit.finalize { 1 } else { 0 }, + commit.commit_status.bundle_id().map(u64_into_i64), + commit.data.as_deref(), + commit.commit_type.as_str(), + u64_into_i64(commit.created_at), + commit.commit_status.as_str(), + commit.commit_status.commit_strategy().as_str(), + processed_signature + .as_ref() + .map(|s| s.to_string()), + finalized_signature + .as_ref() + .map(|s| s.to_string()), + undelegated_signature + .as_ref() + .map(|s| s.to_string()), + u64_into_i64(commit.last_retried_at), + commit.retries_count, + ], + )?; + Ok(()) + } + + fn update_commit_status( + tx: &Transaction<'_>, + reqid: &str, + pubkey: &Pubkey, + status: &CommitStatus, + ) -> CommitPersistResult<()> { + let query = "UPDATE commit_status + SET + commit_status = ?1, + bundle_id = ?2, + commit_strategy = ?3, + processed_signature = ?4, + finalized_signature = ?5, + undelegated_signature = ?6 + WHERE + pubkey = ?7 AND reqid = ?8"; + let stmt = &mut tx.prepare(query)?; + stmt.execute(params![ + status.as_str(), + status.bundle_id(), + status.commit_strategy().as_str(), + status.signatures().map(|s| s.process_signature.to_string()), + status + .signatures() + .and_then(|s| s.finalize_signature) + .map(|s| s.to_string()), + status + .signatures() + .and_then(|s| s.undelegate_signature) + .map(|s| s.to_string()), + pubkey.to_string(), + reqid + ])?; + Ok(()) + } + + #[cfg(test)] + fn get_commit_statuses_by_pubkey( + &self, + pubkey: &Pubkey, + ) -> CommitPersistResult> { + let query = + format!("{SELECT_ALL_COMMIT_STATUS_COLUMNS} WHERE pubkey = ?1"); + let stmt = &mut self.conn.prepare(&query)?; + let mut rows = stmt.query(params![pubkey.to_string()])?; + + extract_committor_rows(&mut rows) + } + + pub(crate) fn get_commit_statuses_by_reqid( + &self, + reqid: &str, + ) -> CommitPersistResult> { + let query = + format!("{SELECT_ALL_COMMIT_STATUS_COLUMNS} WHERE reqid = ?1"); + let stmt = &mut self.conn.prepare(&query)?; + let mut rows = stmt.query(params![reqid])?; + + extract_committor_rows(&mut rows) + } + + pub(crate) fn get_commit_status( + &self, + reqid: &str, + pubkey: &Pubkey, + ) -> CommitPersistResult> { + let query = format!( + "{SELECT_ALL_COMMIT_STATUS_COLUMNS} WHERE reqid = ?1 AND pubkey = ?2" + ); + let stmt = &mut self.conn.prepare(&query)?; + let mut rows = stmt.query(params![reqid, pubkey.to_string()])?; + + extract_committor_rows(&mut rows).map(|mut rows| rows.pop()) + } + + #[cfg(test)] + fn remove_commit_statuses_with_reqid( + &self, + reqid: &str, + ) -> CommitPersistResult<()> { + let query = "DELETE FROM commit_status WHERE reqid = ?1"; + let stmt = &mut self.conn.prepare(query)?; + stmt.execute(params![reqid])?; + Ok(()) + } + + // ----------------- + // Bundle Signature + // ----------------- + pub fn create_bundle_signature_table(&self) -> Result<()> { + match self.conn.execute_batch( + " + BEGIN; + CREATE TABLE IF NOT EXISTS bundle_signature ( + bundle_id INTEGER NOT NULL PRIMARY KEY, + processed_signature TEXT NOT NULL, + finalized_signature TEXT, + undelegate_signature TEXT, + created_at INTEGER NOT NULL + ); + CREATE INDEX IF NOT EXISTS idx_bundle_signature ON bundle_signature (bundle_id); + COMMIT;", + ) { + Ok(_) => Ok(()), + Err(err) => { + eprintln!("Error creating bundle_signature table: {}", err); + Err(err) + } + } + } + + fn insert_bundle_signature( + tx: &Transaction<'_>, + bundle_signature: &BundleSignatureRow, + ) -> CommitPersistResult<()> { + let query = if bundle_signature.finalized_signature.is_some() { + format!("INSERT OR REPLACE INTO bundle_signature ({ALL_BUNDLE_SIGNATURE_COLUMNS}) + VALUES (?1, ?2, ?3, ?4, ?5)") + } else { + format!("INSERT OR IGNORE INTO bundle_signature ({ALL_BUNDLE_SIGNATURE_COLUMNS}) + VALUES (?1, ?2, ?3, ?4, ?5)") + }; + tx.execute( + &query, + params![ + bundle_signature.bundle_id, + bundle_signature.processed_signature.to_string(), + bundle_signature + .finalized_signature + .as_ref() + .map(|s| s.to_string()), + bundle_signature + .undelegate_signature + .as_ref() + .map(|s| s.to_string()), + u64_into_i64(bundle_signature.created_at) + ], + )?; + Ok(()) + } + + pub fn get_bundle_signature_by_bundle_id( + &self, + bundle_id: u64, + ) -> CommitPersistResult> { + let query = format!( + "{SELECT_ALL_BUNDLE_SIGNATURE_COLUMNS} WHERE bundle_id = ?1" + ); + let stmt = &mut self.conn.prepare(&query)?; + let mut rows = stmt.query(params![bundle_id])?; + + if let Some(row) = rows.next()? { + let bundle_signature_row = extract_bundle_signature_row(row)?; + Ok(Some(bundle_signature_row)) + } else { + Ok(None) + } + } +} + +// ----------------- +// Commit Status Helpers +// ----------------- +fn extract_committor_rows( + rows: &mut rusqlite::Rows, +) -> CommitPersistResult> { + let mut commits = Vec::new(); + while let Some(row) = rows.next()? { + let commit_row = extract_committor_row(row)?; + commits.push(commit_row); + } + Ok(commits) +} + +fn extract_committor_row( + row: &rusqlite::Row, +) -> CommitPersistResult { + let reqid: String = row.get(0)?; + + let pubkey = { + let pubkey: String = row.get(1)?; + Pubkey::try_from(pubkey.as_str())? + }; + let delegated_account_owner = { + let delegated_account_owner: String = row.get(2)?; + Pubkey::try_from(delegated_account_owner.as_str())? + }; + let slot: Slot = { + let slot: i64 = row.get(3)?; + i64_into_u64(slot) + }; + + let ephemeral_blockhash = { + let ephemeral_blockhash: String = row.get(4)?; + Hash::from_str(ephemeral_blockhash.as_str())? + }; + + let undelegate: bool = { + let undelegate: u8 = row.get(5)?; + undelegate == 1 + }; + + let lamports: u64 = { + let lamports: i64 = row.get(6)?; + i64_into_u64(lamports) + }; + + let finalize: bool = { + let finalize: u8 = row.get(7)?; + finalize == 1 + }; + + let bundle_id: Option = { + let bundle_id: Option = row.get(8)?; + bundle_id.map(i64_into_u64) + }; + + let data: Option> = row.get(9)?; + + let commit_type = { + let commit_type: String = row.get(10)?; + CommitType::try_from(commit_type.as_str())? + }; + let created_at: u64 = { + let created_at: i64 = row.get(11)?; + i64_into_u64(created_at) + }; + let commit_status = { + let commit_status: String = row.get(12)?; + let commit_strategy = { + let commit_strategy: String = row.get(13)?; + CommitStrategy::from(commit_strategy.as_str()) + }; + let processed_signature = { + let processed_signature: Option = row.get(14)?; + processed_signature + .map(|s| Signature::from_str(s.as_str())) + .transpose()? + }; + let finalized_signature = { + let finalized_signature: Option = row.get(15)?; + finalized_signature + .map(|s| Signature::from_str(s.as_str())) + .transpose()? + }; + let undelegated_signature = { + let undelegated_signature: Option = row.get(16)?; + undelegated_signature + .map(|s| Signature::from_str(s.as_str())) + .transpose()? + }; + let sigs = processed_signature.map(|s| CommitStatusSignatures { + process_signature: s, + finalize_signature: finalized_signature, + undelegate_signature: undelegated_signature, + }); + CommitStatus::try_from(( + commit_status.as_str(), + bundle_id, + commit_strategy, + sigs, + ))? + }; + + let last_retried_at: u64 = { + let last_retried_at: i64 = row.get(17)?; + i64_into_u64(last_retried_at) + }; + let retries_count: u16 = { + let retries_count: i64 = row.get(18)?; + retries_count.try_into().unwrap_or_default() + }; + + Ok(CommitStatusRow { + reqid, + pubkey, + delegated_account_owner, + slot, + ephemeral_blockhash, + undelegate, + lamports, + finalize, + data, + commit_type, + created_at, + commit_status, + last_retried_at, + retries_count, + }) +} + +// ----------------- +// Bundle Signature Helpers +// ----------------- +fn extract_bundle_signature_row( + row: &rusqlite::Row, +) -> CommitPersistResult { + let bundle_id: u64 = { + let bundle_id: i64 = row.get(0)?; + i64_into_u64(bundle_id) + }; + let processed_signature = { + let processed_signature: String = row.get(1)?; + Signature::from_str(processed_signature.as_str())? + }; + let finalized_signature = { + let finalized_signature: Option = row.get(2)?; + finalized_signature + .map(|s| Signature::from_str(s.as_str())) + .transpose()? + }; + let undelegate_signature = { + let undelegate_signature: Option = row.get(3)?; + undelegate_signature + .map(|s| Signature::from_str(s.as_str())) + .transpose()? + }; + let created_at: u64 = { + let created_at: i64 = row.get(4)?; + i64_into_u64(created_at) + }; + + Ok(BundleSignatureRow { + bundle_id, + processed_signature, + finalized_signature, + undelegate_signature, + created_at, + }) +} + +#[cfg(test)] +mod test { + use super::*; + + fn setup_db() -> CommittorDb { + let db = CommittorDb::new(":memory:").unwrap(); + db.create_commit_status_table().unwrap(); + db.create_bundle_signature_table().unwrap(); + db + } + + // ----------------- + // Commit Status + // ----------------- + fn create_commit_status_row(reqid: &str) -> CommitStatusRow { + CommitStatusRow { + reqid: reqid.to_string(), + pubkey: Pubkey::new_unique(), + delegated_account_owner: Pubkey::new_unique(), + slot: 100, + ephemeral_blockhash: Hash::new_unique(), + undelegate: false, + lamports: 100, + finalize: true, + data: None, + commit_type: CommitType::EmptyAccount, + created_at: 1000, + commit_status: CommitStatus::Pending, + last_retried_at: 1000, + retries_count: 0, + } + } + + #[test] + fn test_round_trip_commit_status_rows() { + let one_unbundled_commit_row_no_data = CommitStatusRow { + reqid: "req-123".to_string(), + pubkey: Pubkey::new_unique(), + delegated_account_owner: Pubkey::new_unique(), + slot: 100, + ephemeral_blockhash: Hash::new_unique(), + undelegate: false, + lamports: 100, + finalize: true, + data: None, + commit_type: CommitType::EmptyAccount, + created_at: 1000, + commit_status: CommitStatus::Pending, + last_retried_at: 1000, + retries_count: 0, + }; + + let two_bundled_commit_row_with_data = CommitStatusRow { + reqid: "req-123".to_string(), + pubkey: Pubkey::new_unique(), + delegated_account_owner: Pubkey::new_unique(), + slot: 100, + ephemeral_blockhash: Hash::new_unique(), + undelegate: false, + lamports: 2000, + finalize: true, + data: Some(vec![1, 2, 3]), + commit_type: CommitType::DataAccount, + created_at: 1000, + commit_status: CommitStatus::FailedProcess(( + 2, + CommitStrategy::Args, + None, + )), + last_retried_at: 1000, + retries_count: 0, + }; + + let mut db = setup_db(); + db.insert_commit_status_rows(&[ + one_unbundled_commit_row_no_data.clone(), + two_bundled_commit_row_with_data.clone(), + ]) + .unwrap(); + + let one = db + .get_commit_statuses_by_pubkey( + &one_unbundled_commit_row_no_data.pubkey, + ) + .unwrap(); + assert_eq!(one.len(), 1); + assert_eq!(one[0], one_unbundled_commit_row_no_data); + + let two = db + .get_commit_statuses_by_pubkey( + &two_bundled_commit_row_with_data.pubkey, + ) + .unwrap(); + assert_eq!(two.len(), 1); + assert_eq!(two[0], two_bundled_commit_row_with_data); + + let by_reqid = db + .get_commit_statuses_by_reqid( + &one_unbundled_commit_row_no_data.reqid, + ) + .unwrap(); + assert_eq!(by_reqid.len(), 2); + assert_eq!( + by_reqid, + [ + one_unbundled_commit_row_no_data, + two_bundled_commit_row_with_data + ] + ); + } + + #[test] + fn test_commits_with_reqid() { + let mut db = setup_db(); + const REQID_ONE: &str = "req-123"; + const REQID_TWO: &str = "req-456"; + + let commit_row_one = create_commit_status_row(REQID_ONE); + let commit_row_one_other = create_commit_status_row(REQID_ONE); + let commit_row_two = create_commit_status_row(REQID_TWO); + db.insert_commit_status_rows(&[ + commit_row_one.clone(), + commit_row_one_other.clone(), + commit_row_two.clone(), + ]) + .unwrap(); + + let commits_one = db.get_commit_statuses_by_reqid(REQID_ONE).unwrap(); + assert_eq!(commits_one.len(), 2); + assert_eq!(commits_one[0], commit_row_one); + assert_eq!(commits_one[1], commit_row_one_other); + + let commits_two = db.get_commit_statuses_by_reqid(REQID_TWO).unwrap(); + assert_eq!(commits_two.len(), 1); + assert_eq!(commits_two[0], commit_row_two); + + // Remove commits with REQID_ONE + db.remove_commit_statuses_with_reqid(REQID_ONE).unwrap(); + let commits_one_after_removal = + db.get_commit_statuses_by_reqid(REQID_ONE).unwrap(); + assert_eq!(commits_one_after_removal.len(), 0); + + let commits_two_after_removal = + db.get_commit_statuses_by_reqid(REQID_TWO).unwrap(); + assert_eq!(commits_two_after_removal.len(), 1); + } + + // ----------------- + // Bundle Signature and Commit Status Updates + // ----------------- + fn create_bundle_signature_row( + commit_status: &CommitStatus, + ) -> Option { + commit_status + .bundle_id() + .map(|bundle_id| BundleSignatureRow { + bundle_id, + processed_signature: Signature::new_unique(), + finalized_signature: None, + undelegate_signature: None, + created_at: 1000, + }) + } + + #[test] + fn test_upsert_bundle_signature() { + let mut db = setup_db(); + + let process_only = + BundleSignatureRow::new(1, Signature::new_unique(), None, None); + let process_finalize_and_undelegate = BundleSignatureRow::new( + 2, + Signature::new_unique(), + Some(Signature::new_unique()), + Some(Signature::new_unique()), + ); + + // Add two rows, one with finalize and undelegate signatures + { + let tx = db.conn.transaction().unwrap(); + CommittorDb::insert_bundle_signature(&tx, &process_only).unwrap(); + CommittorDb::insert_bundle_signature( + &tx, + &process_finalize_and_undelegate, + ) + .unwrap(); + tx.commit().unwrap(); + } + + // Ensure we update with finalized and undelegate sigs + let process_now_with_finalize_and_undelegate = { + let tx = db.conn.transaction().unwrap(); + let process_now_with_finalize = BundleSignatureRow::new( + process_only.bundle_id, + process_finalize_and_undelegate.processed_signature, + Some(Signature::new_unique()), + Some(Signature::new_unique()), + ); + CommittorDb::insert_bundle_signature( + &tx, + &process_now_with_finalize, + ) + .unwrap(); + tx.commit().unwrap(); + + process_now_with_finalize + }; + assert_eq!( + db.get_bundle_signature_by_bundle_id(1).unwrap().unwrap(), + process_now_with_finalize_and_undelegate + ); + + // Ensure we don't erase finalized/undelegate sigs + { + let tx = db.conn.transaction().unwrap(); + let finalizes_now_only_process = BundleSignatureRow::new( + process_finalize_and_undelegate.bundle_id, + process_finalize_and_undelegate.processed_signature, + None, + None, + ); + CommittorDb::insert_bundle_signature( + &tx, + &finalizes_now_only_process, + ) + .unwrap(); + tx.commit().unwrap(); + } + assert_eq!( + db.get_bundle_signature_by_bundle_id(2).unwrap().unwrap(), + process_finalize_and_undelegate + ); + } + + #[test] + fn test_update_commit_status() { + let mut db = setup_db(); + const REQID: &str = "req-123"; + + let failing_commit_row = create_commit_status_row(REQID); + let success_commit_row = create_commit_status_row(REQID); + db.insert_commit_status_rows(&[ + failing_commit_row.clone(), + success_commit_row.clone(), + ]) + .unwrap(); + + // Update the statuses + let new_failing_status = + CommitStatus::FailedProcess((22, CommitStrategy::FromBuffer, None)); + db.update_commit_status_and_bundle_signature( + &failing_commit_row.reqid, + &failing_commit_row.pubkey, + &new_failing_status, + None, + ) + .unwrap(); + let sigs = CommitStatusSignatures { + process_signature: Signature::new_unique(), + finalize_signature: None, + undelegate_signature: None, + }; + let new_success_status = + CommitStatus::Succeeded((33, CommitStrategy::Args, sigs)); + let success_signatures_row = + create_bundle_signature_row(&new_success_status); + let success_signatures = success_signatures_row.clone().unwrap(); + db.update_commit_status_and_bundle_signature( + &success_commit_row.reqid, + &success_commit_row.pubkey, + &new_success_status, + success_signatures_row, + ) + .unwrap(); + + // Verify the statuses were updated + let failed_commit_row = db + .get_commit_status(REQID, &failing_commit_row.pubkey) + .unwrap() + .unwrap(); + assert_eq!(failed_commit_row.commit_status, new_failing_status); + + let succeeded_commit_row = db + .get_commit_status(REQID, &success_commit_row.pubkey) + .unwrap() + .unwrap(); + assert_eq!(succeeded_commit_row.commit_status, new_success_status); + let signature_row = + db.get_bundle_signature_by_bundle_id(33).unwrap().unwrap(); + assert_eq!( + signature_row.processed_signature, + success_signatures.processed_signature, + ); + assert_eq!(signature_row.finalized_signature, None); + } +} diff --git a/magicblock-committor-service/src/persist/error.rs b/magicblock-committor-service/src/persist/error.rs new file mode 100644 index 00000000..4980225f --- /dev/null +++ b/magicblock-committor-service/src/persist/error.rs @@ -0,0 +1,38 @@ +use thiserror::Error; + +pub type CommitPersistResult = Result; + +#[derive(Error, Debug)] +pub enum CommitPersistError { + #[error("RusqliteError: '{0}' ({0:?})")] + RusqliteError(#[from] rusqlite::Error), + + #[error("ParsePubkeyError: '{0}' ({0:?})")] + ParsePubkeyError(#[from] solana_sdk::pubkey::ParsePubkeyError), + + #[error("ParseSignatureError: '{0}' ({0:?})")] + ParseSignatureError(#[from] solana_sdk::signature::ParseSignatureError), + + #[error("ParseHashError: '{0}' ({0:?})")] + ParseHahsError(#[from] solana_sdk::hash::ParseHashError), + + #[error("Invalid Commity Type: '{0}' ({0:?})")] + InvalidCommitType(String), + + #[error("Invalid Commit Status: '{0}' ({0:?})")] + InvalidCommitStatus(String), + + #[error( + "Commit Status update requires status with bundle id: '{0}' ({0:?})" + )] + CommitStatusUpdateRequiresStatusWithBundleId(String), + + #[error("Commit Status needs bundle id: '{0}' ({0:?})")] + CommitStatusNeedsBundleId(String), + + #[error("Commit Status needs signatures: '{0}' ({0:?})")] + CommitStatusNeedsSignatures(String), + + #[error("Commit Status needs commit strategy: '{0}' ({0:?})")] + CommitStatusNeedsStrategy(String), +} diff --git a/magicblock-committor-service/src/persist/mod.rs b/magicblock-committor-service/src/persist/mod.rs new file mode 100644 index 00000000..21a9005a --- /dev/null +++ b/magicblock-committor-service/src/persist/mod.rs @@ -0,0 +1,11 @@ +mod commit_persister; +mod db; +pub mod error; +mod types; +mod utils; + +pub use commit_persister::CommitPersister; +pub use db::{BundleSignatureRow, CommitStatusRow, CommittorDb}; +pub use types::{ + CommitStatus, CommitStatusSignatures, CommitStrategy, CommitType, +}; diff --git a/magicblock-committor-service/src/persist/types/commit_status.rs b/magicblock-committor-service/src/persist/types/commit_status.rs new file mode 100644 index 00000000..0e6c74a3 --- /dev/null +++ b/magicblock-committor-service/src/persist/types/commit_status.rs @@ -0,0 +1,269 @@ +use std::fmt; + +use solana_sdk::signature::Signature; + +use crate::persist::error::CommitPersistError; + +use super::commit_strategy::CommitStrategy; + +/// The status of a committed account. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum CommitStatus { + /// We sent the request to commit this account, but haven't received a result yet. + Pending, + /// No part of the commit pipeline succeeded. + /// The commit for this account needs to be restarted from scratch. + Failed(u64), + /// The buffer and chunks account were initialized, but could either not + /// be retrieved or deserialized. It is recommended to fully re-initialize + /// them on retry. + BufferAndChunkPartiallyInitialized(u64), + /// The buffer and chunks accounts were initialized and could be + /// deserialized, however we did not complete writing to them + /// We can reuse them on retry, but need to rewrite all chunks. + BufferAndChunkInitialized(u64), + /// The buffer and chunks accounts were initialized and all data was + /// written to them (for data accounts). + /// This means on retry we can skip that step and just try to process + /// these buffers to complete the commit. + BufferAndChunkFullyInitialized(u64), + /// The commit is part of a bundle that contains too many commits to be included + /// in a single transaction. Thus we cannot commit any of them. + PartOfTooLargeBundleToProcess(u64), + /// The commmit was properly initialized and added to a chunk of instructions to process + /// commits via a transaction. For large commits the buffer and chunk accounts were properly + /// prepared and haven't been closed. + FailedProcess((u64, CommitStrategy, Option)), + /// The commit was properly processed but the requested finalize transaction failed. + FailedFinalize((u64, CommitStrategy, CommitStatusSignatures)), + /// The commit was properly processed and finalized but the requested undelegate transaction failed. + FailedUndelegate((u64, CommitStrategy, CommitStatusSignatures)), + /// The commit was successfully processed and finalized. + Succeeded((u64, CommitStrategy, CommitStatusSignatures)), +} + +impl fmt::Display for CommitStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + CommitStatus::Pending => write!(f, "Pending"), + CommitStatus::Failed(bundle_id) => { + write!(f, "Failed({})", bundle_id) + } + CommitStatus::BufferAndChunkPartiallyInitialized(bundle_id) => { + write!(f, "BufferAndChunkPartiallyInitialized({})", bundle_id) + } + CommitStatus::BufferAndChunkInitialized(bundle_id) => { + write!(f, "BufferAndChunkInitialized({})", bundle_id) + } + CommitStatus::BufferAndChunkFullyInitialized(bundle_id) => { + write!(f, "BufferAndChunkFullyInitialized({})", bundle_id) + } + CommitStatus::PartOfTooLargeBundleToProcess(bundle_id) => { + write!(f, "PartOfTooLargeBundleToProcess({})", bundle_id) + } + CommitStatus::FailedProcess((bundle_id, strategy, sigs)) => { + write!( + f, + "FailedProcess({}, {}, {:?})", + bundle_id, + strategy.as_str(), + sigs + ) + } + CommitStatus::FailedFinalize((bundle_id, strategy, sigs)) => { + write!( + f, + "FailedFinalize({}, {}, {:?})", + bundle_id, + strategy.as_str(), + sigs + ) + } + CommitStatus::FailedUndelegate((bundle_id, strategy, sigs)) => { + write!( + f, + "FailedUndelegate({}, {}, {:?})", + bundle_id, + strategy.as_str(), + sigs + ) + } + CommitStatus::Succeeded((bundle_id, strategy, sigs)) => { + write!( + f, + "Succeeded({}, {}, {:?})", + bundle_id, + strategy.as_str(), + sigs + ) + } + } + } +} + +impl + TryFrom<( + &str, + Option, + CommitStrategy, + Option, + )> for CommitStatus +{ + type Error = CommitPersistError; + + fn try_from( + (status, bundle_id, strategy, sigs): ( + &str, + Option, + CommitStrategy, + Option, + ), + ) -> Result { + macro_rules! get_bundle_id { + () => { + if let Some(bundle_id) = bundle_id { + bundle_id + } else { + return Err(CommitPersistError::CommitStatusNeedsBundleId( + status.to_string(), + )); + } + }; + } + macro_rules! get_sigs { + () => { + if let Some(sigs) = sigs { + sigs + } else { + return Err(CommitPersistError::CommitStatusNeedsBundleId( + status.to_string(), + )); + } + }; + } + + use CommitStatus::*; + match status { + "Pending" => Ok(Pending), + "Failed" => Ok(Failed(get_bundle_id!())), + "BufferAndChunkPartiallyInitialized" => { + Ok(BufferAndChunkPartiallyInitialized(get_bundle_id!())) + } + "BufferAndChunkInitialized" => { + Ok(BufferAndChunkInitialized(get_bundle_id!())) + } + "BufferAndChunkFullyInitialized" => { + Ok(BufferAndChunkFullyInitialized(get_bundle_id!())) + } + "PartOfTooLargeBundleToProcess" => { + Ok(PartOfTooLargeBundleToProcess(get_bundle_id!())) + } + "FailedProcess" => { + Ok(FailedProcess((get_bundle_id!(), strategy, sigs))) + } + "FailedFinalize" => { + Ok(FailedFinalize((get_bundle_id!(), strategy, get_sigs!()))) + } + "FailedUndelegate" => { + Ok(FailedUndelegate((get_bundle_id!(), strategy, get_sigs!()))) + } + "Succeeded" => { + Ok(Succeeded((get_bundle_id!(), strategy, get_sigs!()))) + } + _ => { + Err(CommitPersistError::InvalidCommitStatus(status.to_string())) + } + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CommitStatusSignatures { + /// The signature of the transaction processing the commit + pub process_signature: Signature, + /// The signature of the transaction finalizing the commit. + /// If the account was not finalized or it failed the this is `None`. + /// If the finalize instruction was part of the process transaction then + /// this signature is the same as [Self::process_signature]. + pub finalize_signature: Option, + /// The signature of the transaction undelegating the committed accounts + /// if so requested. + /// If the account was not undelegated or it failed the this is `None`. + /// NOTE: this can be removed if we decide to perform the undelegation + /// step as part of the finalize instruction in the delegation program + pub undelegate_signature: Option, +} + +impl CommitStatus { + pub fn as_str(&self) -> &str { + use CommitStatus::*; + match self { + Pending => "Pending", + Failed(_) => "Failed", + BufferAndChunkPartiallyInitialized(_) => { + "BufferAndChunkPartiallyInitialized" + } + BufferAndChunkInitialized(_) => "BufferAndChunkInitialized", + BufferAndChunkFullyInitialized(_) => { + "BufferAndChunkFullyInitialized" + } + PartOfTooLargeBundleToProcess(_) => "PartOfTooLargeBundleToProcess", + FailedProcess(_) => "FailedProcess", + FailedFinalize(_) => "FailedFinalize", + FailedUndelegate(_) => "FailedUndelegate", + Succeeded(_) => "Succeeded", + } + } + + pub fn bundle_id(&self) -> Option { + use CommitStatus::*; + match self { + Failed(bundle_id) + | BufferAndChunkPartiallyInitialized(bundle_id) + | BufferAndChunkInitialized(bundle_id) + | BufferAndChunkFullyInitialized(bundle_id) + | PartOfTooLargeBundleToProcess(bundle_id) + | FailedProcess((bundle_id, _, _)) + | FailedFinalize((bundle_id, _, _)) + | FailedUndelegate((bundle_id, _, _)) + | Succeeded((bundle_id, _, _)) => Some(*bundle_id), + Pending => None, + } + } + + pub fn signatures(&self) -> Option { + use CommitStatus::*; + match self { + FailedProcess((_, _, sigs)) => sigs.as_ref().cloned(), + FailedFinalize((_, _, sigs)) => Some(sigs.clone()), + Succeeded((_, _, sigs)) => Some(sigs.clone()), + _ => None, + } + } + + pub fn commit_strategy(&self) -> CommitStrategy { + use CommitStatus::*; + match self { + Pending => CommitStrategy::Undetermined, + Failed(_) => CommitStrategy::Undetermined, + BufferAndChunkPartiallyInitialized(_) + | BufferAndChunkInitialized(_) + | BufferAndChunkFullyInitialized(_) => CommitStrategy::FromBuffer, + PartOfTooLargeBundleToProcess(_) => CommitStrategy::Undetermined, + FailedProcess((_, strategy, _)) => *strategy, + FailedFinalize((_, strategy, _)) => *strategy, + FailedUndelegate((_, strategy, _)) => *strategy, + Succeeded((_, strategy, _)) => *strategy, + } + } + + /// The commit fully succeeded and no retry is necessary. + pub fn is_complete(&self) -> bool { + use CommitStatus::*; + matches!(self, Succeeded(_)) + } + + pub fn all_completed(stages: &[Self]) -> bool { + stages.iter().all(Self::is_complete) + } +} diff --git a/magicblock-committor-service/src/persist/types/commit_strategy.rs b/magicblock-committor-service/src/persist/types/commit_strategy.rs new file mode 100644 index 00000000..8dc011d4 --- /dev/null +++ b/magicblock-committor-service/src/persist/types/commit_strategy.rs @@ -0,0 +1,54 @@ +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum CommitStrategy { + /// The commit strategy is not known yet + Undetermined, + /// Args without the use of a lookup table + Args, + /// Args with the use of a lookup table + ArgsWithLookupTable, + /// Buffer and chunks which has the most overhead + FromBuffer, + /// Buffer and chunks with the use of a lookup table + FromBufferWithLookupTable, +} + +impl CommitStrategy { + pub fn args(use_lookup: bool) -> Self { + if use_lookup { + Self::ArgsWithLookupTable + } else { + Self::Args + } + } + + pub fn as_str(&self) -> &str { + use CommitStrategy::*; + match self { + Undetermined => "Undetermined", + Args => "Args", + ArgsWithLookupTable => "ArgsWithLookupTable", + FromBuffer => "FromBuffer", + FromBufferWithLookupTable => "FromBufferWithLookupTable", + } + } + + pub fn uses_lookup(&self) -> bool { + matches!( + self, + CommitStrategy::ArgsWithLookupTable + | CommitStrategy::FromBufferWithLookupTable + ) + } +} + +impl From<&str> for CommitStrategy { + fn from(value: &str) -> Self { + match value { + "Args" => Self::Args, + "ArgsWithLookupTable" => Self::ArgsWithLookupTable, + "FromBuffer" => Self::FromBuffer, + "FromBufferWithLookupTable" => Self::FromBufferWithLookupTable, + _ => Self::Undetermined, + } + } +} diff --git a/magicblock-committor-service/src/persist/types/commit_type.rs b/magicblock-committor-service/src/persist/types/commit_type.rs new file mode 100644 index 00000000..96324456 --- /dev/null +++ b/magicblock-committor-service/src/persist/types/commit_type.rs @@ -0,0 +1,28 @@ +use crate::persist::error::CommitPersistError; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum CommitType { + EmptyAccount, + DataAccount, +} + +impl TryFrom<&str> for CommitType { + type Error = CommitPersistError; + + fn try_from(value: &str) -> Result { + match value { + "EmptyAccount" => Ok(CommitType::EmptyAccount), + "DataAccount" => Ok(CommitType::DataAccount), + _ => Err(CommitPersistError::InvalidCommitType(value.to_string())), + } + } +} + +impl CommitType { + pub fn as_str(&self) -> &str { + match self { + CommitType::EmptyAccount => "EmptyAccount", + CommitType::DataAccount => "DataAccount", + } + } +} diff --git a/magicblock-committor-service/src/persist/types/mod.rs b/magicblock-committor-service/src/persist/types/mod.rs new file mode 100644 index 00000000..b0c68fa5 --- /dev/null +++ b/magicblock-committor-service/src/persist/types/mod.rs @@ -0,0 +1,7 @@ +mod commit_status; +mod commit_strategy; +mod commit_type; + +pub use commit_status::*; +pub use commit_strategy::*; +pub use commit_type::*; diff --git a/magicblock-committor-service/src/persist/utils.rs b/magicblock-committor-service/src/persist/utils.rs new file mode 100644 index 00000000..d5c3aaf6 --- /dev/null +++ b/magicblock-committor-service/src/persist/utils.rs @@ -0,0 +1,58 @@ +use std::time::{SystemTime, UNIX_EPOCH}; + +/// Fits a u64 into an i64, by mapping the range [0, i64::MAX] to itself, and +/// mapping the range [i64::MAX + 1, u64::MAX - 1] into the negative range of i64. +/// NOTE: this fails for u64::MAX +pub(crate) fn u64_into_i64(n: u64) -> i64 { + if n > i64::MAX as u64 { + -((n - i64::MAX as u64) as i64) + } else { + n as i64 + } +} + +/// Extracts a u64 that was fitted into an i64 by `u64_into_i64`. +pub(crate) fn i64_into_u64(n: i64) -> u64 { + if n < 0 { + n.unsigned_abs() + i64::MAX as u64 + } else { + n as u64 + } +} + +/// Gets the current timestamp in seconds since the Unix epoch +pub(crate) fn now() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time went backwards") + .as_secs() +} + +#[cfg(test)] +mod tests { + use super::*; + + fn round_trip(u: u64) { + let i = u64_into_i64(u); + let u2 = i64_into_u64(i); + assert_eq!(u, u2); + } + + #[test] + fn test_u64_i64_conversion_via_round_trip() { + round_trip(0); + round_trip(1); + round_trip(i64::MAX as u64); + round_trip(i64::MAX as u64 + 1); + + // NOTE: the below which points out that we cannot round trip u64::MAX, + assert_eq!(i64::MAX as u64 * 2 + 1, u64::MAX); + + // This is the largest we can roundtrip + round_trip(u64::MAX - 1); + round_trip(i64::MAX as u64 * 2); + + // This would fail: + // round_trip(u64::MAX); + } +} diff --git a/magicblock-committor-service/src/pubkeys_provider.rs b/magicblock-committor-service/src/pubkeys_provider.rs new file mode 100644 index 00000000..595b5af2 --- /dev/null +++ b/magicblock-committor-service/src/pubkeys_provider.rs @@ -0,0 +1,75 @@ +use log::*; +use std::collections::HashSet; + +use dlp::pda; +use solana_pubkey::Pubkey; +use solana_sdk::system_program; + +/// Returns all accounts needed to process/finalize a commit for the account +/// with the provided `delegated_account`. +/// NOTE: that buffer and chunk accounts are different for each commit and +/// thus are not included +pub fn provide_committee_pubkeys( + committee: &Pubkey, + owner_program: Option<&Pubkey>, +) -> HashSet { + let mut set = HashSet::new(); + set.insert(*committee); + set.insert(pda::delegation_record_pda_from_delegated_account(committee)); + set.insert(pda::delegation_metadata_pda_from_delegated_account( + committee, + )); + set.insert(pda::commit_state_pda_from_delegated_account(committee)); + set.insert(pda::commit_record_pda_from_delegated_account(committee)); + set.insert(pda::undelegate_buffer_pda_from_delegated_account(committee)); + + // NOTE: ideally we'd also include the rent_fee_payer here, but that is + // not known to the validator at the time of cloning since it is + // stored inside the delegation metadata account instead of the + // delegation record + + if let Some(owner_program) = owner_program { + set.insert(pda::program_config_from_program_id(owner_program)); + } else { + warn!( + "No owner program provided for committee pubkey {}", + committee + ); + } + set +} + +/// Returns common accounts needed for process/finalize transactions, +/// namely the program ids used and the fees vaults and the validator itself. +pub fn provide_common_pubkeys(validator: &Pubkey) -> HashSet { + let mut set = HashSet::new(); + + let deleg_program = dlp::id(); + let protocol_fees_vault = pda::fees_vault_pda(); + let validator_fees_vault = + pda::validator_fees_vault_pda_from_validator(validator); + let committor_program = magicblock_committor_program::id(); + + trace!( + "Common pubkeys: + validator: {} + delegation program: {} + protoco fees vault: {} + validator fees vault: {} + committor program: {}", + validator, + deleg_program, + protocol_fees_vault, + validator_fees_vault, + committor_program + ); + + set.insert(*validator); + set.insert(system_program::id()); + set.insert(deleg_program); + set.insert(protocol_fees_vault); + set.insert(validator_fees_vault); + set.insert(committor_program); + + set +} diff --git a/magicblock-committor-service/src/service.rs b/magicblock-committor-service/src/service.rs new file mode 100644 index 00000000..1b74ba21 --- /dev/null +++ b/magicblock-committor-service/src/service.rs @@ -0,0 +1,367 @@ +use std::{fmt::Display, path::Path}; + +use log::*; +use magicblock_committor_program::Changeset; +use solana_pubkey::Pubkey; +use solana_sdk::hash::Hash; +use solana_sdk::signature::Keypair; +use tokio::{ + select, + sync::{ + mpsc::{self, error::TrySendError}, + oneshot, + }, +}; +use tokio_util::sync::CancellationToken; + +use crate::{ + commit::CommittorProcessor, + config::ChainConfig, + error::CommittorServiceResult, + persist::{BundleSignatureRow, CommitStatusRow}, + pubkeys_provider::{provide_committee_pubkeys, provide_common_pubkeys}, +}; + +#[derive(Debug)] +pub struct LookupTables { + pub active: Vec, + pub released: Vec, +} + +#[derive(Debug)] +pub enum CommittorMessage { + ReservePubkeysForCommittee { + /// Called once the pubkeys have been reserved + respond_to: oneshot::Sender>, + /// The comittee whose pubkeys to reserve in a lookup table + /// These pubkeys are used to process/finalize the commit + committee: Pubkey, + /// The owner program of the committee + owner: Pubkey, + }, + ReserveCommonPubkeys { + /// Called once the pubkeys have been reserved + respond_to: oneshot::Sender>, + }, + ReleaseCommonPubkeys { + /// Called once the pubkeys have been released + respond_to: oneshot::Sender<()>, + }, + CommitChangeset { + /// Called once the changeset has been committed + respond_to: oneshot::Sender>, + /// The changeset to commit + changeset: Changeset, + /// The blockhash in the ephemeral at the time the commit was requested + ephemeral_blockhash: Hash, + /// If `true`, account commits will be finalized after they were processed + finalize: bool, + }, + GetCommitStatuses { + respond_to: + oneshot::Sender>>, + reqid: String, + }, + GetBundleSignatures { + respond_to: + oneshot::Sender>>, + bundle_id: u64, + }, + GetLookupTables { + respond_to: oneshot::Sender, + }, +} + +// ----------------- +// CommittorActor +// ----------------- +struct CommittorActor { + receiver: mpsc::Receiver, + processor: CommittorProcessor, +} + +impl CommittorActor { + pub fn try_new

( + receiver: mpsc::Receiver, + authority: Keypair, + persist_file: P, + chain_config: ChainConfig, + ) -> CommittorServiceResult + where + P: AsRef, + { + let processor = + CommittorProcessor::try_new(authority, persist_file, chain_config)?; + Ok(Self { + receiver, + processor, + }) + } + + async fn handle_msg(&self, msg: CommittorMessage) { + use CommittorMessage::*; + match msg { + ReservePubkeysForCommittee { + respond_to, + committee, + owner, + } => { + let pubkeys = + provide_committee_pubkeys(&committee, Some(&owner)); + let reqid = self.processor.reserve_pubkeys(pubkeys).await; + if let Err(e) = respond_to.send(reqid) { + error!("Failed to send response {:?}", e); + } + } + ReserveCommonPubkeys { respond_to } => { + let pubkeys = + provide_common_pubkeys(&self.processor.auth_pubkey()); + let reqid = self.processor.reserve_pubkeys(pubkeys).await; + if let Err(e) = respond_to.send(reqid) { + error!("Failed to send response {:?}", e); + } + } + ReleaseCommonPubkeys { respond_to } => { + let pubkeys = + provide_common_pubkeys(&self.processor.auth_pubkey()); + self.processor.release_pubkeys(pubkeys).await; + if let Err(e) = respond_to.send(()) { + error!("Failed to send response {:?}", e); + } + } + CommitChangeset { + changeset, + ephemeral_blockhash, + respond_to, + finalize, + } => { + let reqid = self + .processor + .commit_changeset(changeset, finalize, ephemeral_blockhash) + .await; + if let Err(e) = respond_to.send(reqid) { + error!("Failed to send response {:?}", e); + } + } + GetCommitStatuses { reqid, respond_to } => { + let commit_statuses = + self.processor.get_commit_statuses(&reqid); + if let Err(e) = respond_to.send(commit_statuses) { + error!("Failed to send response {:?}", e); + } + } + GetBundleSignatures { + bundle_id, + respond_to, + } => { + let sig = self.processor.get_signature(bundle_id); + if let Err(e) = respond_to.send(sig) { + error!("Failed to send response {:?}", e); + } + } + GetLookupTables { respond_to } => { + let active_tables = self.processor.active_lookup_tables().await; + let released_tables = + self.processor.released_lookup_tables().await; + if let Err(e) = respond_to.send(LookupTables { + active: active_tables, + released: released_tables, + }) { + error!("Failed to send response {:?}", e); + } + } + } + } + + pub async fn run(&mut self, cancel_token: CancellationToken) { + loop { + select! { + msg = self.receiver.recv() => { + if let Some(msg) = msg { + self.handle_msg(msg).await; + } else { + break; + } + } + _ = cancel_token.cancelled() => { + break; + } + } + } + } +} + +// ----------------- +// CommittorService +// ----------------- +pub struct CommittorService { + sender: mpsc::Sender, + cancel_token: CancellationToken, +} + +impl CommittorService { + pub fn try_start

( + authority: Keypair, + persist_file: P, + chain_config: ChainConfig, + ) -> CommittorServiceResult + where + P: Display + AsRef, + { + debug!( + "Starting committor service with config: {:?}, persisting to: {}", + chain_config, persist_file + ); + let (sender, receiver) = mpsc::channel(1_000); + let cancel_token = CancellationToken::new(); + { + let cancel_token = cancel_token.clone(); + let mut actor = CommittorActor::try_new( + receiver, + authority, + persist_file, + chain_config, + )?; + tokio::spawn(async move { + actor.run(cancel_token).await; + }); + } + Ok(Self { + sender, + cancel_token, + }) + } + + pub fn reserve_common_pubkeys( + &self, + ) -> oneshot::Receiver> { + let (tx, rx) = oneshot::channel(); + self.try_send(CommittorMessage::ReserveCommonPubkeys { + respond_to: tx, + }); + rx + } + + pub fn release_common_pubkeys(&self) -> oneshot::Receiver<()> { + let (tx, rx) = oneshot::channel(); + self.try_send(CommittorMessage::ReleaseCommonPubkeys { + respond_to: tx, + }); + rx + } + + pub fn get_bundle_signatures( + &self, + bundle_id: u64, + ) -> oneshot::Receiver>> + { + let (tx, rx) = oneshot::channel(); + self.try_send(CommittorMessage::GetBundleSignatures { + respond_to: tx, + bundle_id, + }); + rx + } + + pub fn get_lookup_tables(&self) -> oneshot::Receiver { + let (tx, rx) = oneshot::channel(); + self.try_send(CommittorMessage::GetLookupTables { respond_to: tx }); + rx + } + + pub fn stop(&self) { + self.cancel_token.cancel(); + } + + fn try_send(&self, msg: CommittorMessage) { + if let Err(TrySendError::Full(msg)) = self.sender.try_send(msg) { + error!("Failed to send commit message {:?}", msg); + } + } +} + +impl ChangesetCommittor for CommittorService { + fn reserve_pubkeys_for_committee( + &self, + committee: Pubkey, + owner: Pubkey, + ) -> oneshot::Receiver> { + let (tx, rx) = oneshot::channel(); + self.try_send(CommittorMessage::ReservePubkeysForCommittee { + respond_to: tx, + committee, + owner, + }); + rx + } + + fn commit_changeset( + &self, + changeset: Changeset, + ephemeral_blockhash: Hash, + finalize: bool, + ) -> oneshot::Receiver> { + let (tx, rx) = oneshot::channel(); + self.try_send(CommittorMessage::CommitChangeset { + respond_to: tx, + changeset, + ephemeral_blockhash, + finalize, + }); + rx + } + + fn get_commit_statuses( + &self, + reqid: String, + ) -> oneshot::Receiver>> { + let (tx, rx) = oneshot::channel(); + self.try_send(CommittorMessage::GetCommitStatuses { + respond_to: tx, + reqid, + }); + rx + } + + fn get_bundle_signatures( + &self, + bundle_id: u64, + ) -> oneshot::Receiver>> + { + let (tx, rx) = oneshot::channel(); + self.try_send(CommittorMessage::GetBundleSignatures { + respond_to: tx, + bundle_id, + }); + rx + } +} + +pub trait ChangesetCommittor: Send + Sync + 'static { + /// Reserves pubkeys used in most commits in a lookup table + fn reserve_pubkeys_for_committee( + &self, + committee: Pubkey, + owner: Pubkey, + ) -> oneshot::Receiver>; + + /// Commits the changeset and returns the reqid + fn commit_changeset( + &self, + changeset: Changeset, + ephemeral_blockhash: Hash, + finalize: bool, + ) -> oneshot::Receiver>; + + /// Gets statuses of accounts that were committed as part of a request with provided reqid + fn get_commit_statuses( + &self, + reqid: String, + ) -> oneshot::Receiver>>; + + /// Gets signatures of commits processed as part of the bundle with the provided bundle_id + fn get_bundle_signatures( + &self, + bundle_id: u64, + ) -> oneshot::Receiver>>; +} diff --git a/magicblock-committor-service/src/stubs/changeset_committor_stub.rs b/magicblock-committor-service/src/stubs/changeset_committor_stub.rs new file mode 100644 index 00000000..a618ee90 --- /dev/null +++ b/magicblock-committor-service/src/stubs/changeset_committor_stub.rs @@ -0,0 +1,140 @@ +use std::{ + collections::HashMap, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, Mutex, + }, + time::{SystemTime, UNIX_EPOCH}, +}; + +use magicblock_committor_program::Changeset; +use solana_pubkey::Pubkey; +use tokio::sync::oneshot; + +use crate::{ + error::CommittorServiceResult, + persist::{ + BundleSignatureRow, CommitStatus, CommitStatusRow, + CommitStatusSignatures, CommitStrategy, CommitType, + }, + ChangesetCommittor, +}; +use solana_sdk::{hash::Hash, signature::Signature}; + +#[derive(Default)] +pub struct ChangesetCommittorStub { + reserved_pubkeys_for_committee: Arc>>, + #[allow(clippy::type_complexity)] + committed_changesets: Arc>>, +} + +impl ChangesetCommittor for ChangesetCommittorStub { + fn commit_changeset( + &self, + changeset: Changeset, + ephemeral_blockhash: Hash, + finalize: bool, + ) -> oneshot::Receiver> { + static REQ_ID: AtomicU64 = AtomicU64::new(0); + let reqid = REQ_ID.fetch_add(1, Ordering::Relaxed); + let (tx, rx) = tokio::sync::oneshot::channel(); + self.committed_changesets + .lock() + .unwrap() + .insert(reqid, (changeset, ephemeral_blockhash, finalize)); + tx.send(Some(reqid.to_string())).unwrap_or_else(|_| { + log::error!("Failed to send commit changeset response"); + }); + rx + } + + fn get_commit_statuses( + &self, + reqid: String, + ) -> oneshot::Receiver>> { + let reqid = reqid.parse::().unwrap(); + let commit = self.committed_changesets.lock().unwrap().remove(&reqid); + let (tx, rx) = tokio::sync::oneshot::channel(); + let Some((changeset, hash, finalize)) = commit else { + tx.send(Ok(vec![])).unwrap_or_else(|_| { + log::error!("Failed to send commit status response"); + }); + return rx; + }; + let status_rows = changeset + .accounts + .iter() + .map(|(pubkey, acc)| CommitStatusRow { + reqid: reqid.to_string(), + pubkey: *pubkey, + delegated_account_owner: acc.owner(), + slot: changeset.slot, + ephemeral_blockhash: hash, + undelegate: changeset.accounts_to_undelegate.contains(pubkey), + lamports: acc.lamports(), + finalize, + data: Some(acc.data().to_vec()), + commit_type: CommitType::DataAccount, + created_at: now(), + commit_status: CommitStatus::Succeeded(( + reqid, + CommitStrategy::FromBuffer, + CommitStatusSignatures { + process_signature: Signature::new_unique(), + finalize_signature: Some(Signature::new_unique()), + undelegate_signature: None, + }, + )), + last_retried_at: now(), + retries_count: 0, + }) + .collect(); + tx.send(Ok(status_rows)).unwrap_or_else(|_| { + log::error!("Failed to send commit status response"); + }); + rx + } + + fn get_bundle_signatures( + &self, + bundle_id: u64, + ) -> tokio::sync::oneshot::Receiver< + crate::error::CommittorServiceResult>, + > { + let (tx, rx) = tokio::sync::oneshot::channel(); + let bundle_signature = BundleSignatureRow { + bundle_id, + processed_signature: Signature::new_unique(), + finalized_signature: Some(Signature::new_unique()), + undelegate_signature: None, + created_at: now(), + }; + tx.send(Ok(Some(bundle_signature))).unwrap_or_else(|_| { + log::error!("Failed to send bundle signatures response"); + }); + rx + } + + fn reserve_pubkeys_for_committee( + &self, + committee: Pubkey, + owner: Pubkey, + ) -> oneshot::Receiver> { + let (tx, rx) = + tokio::sync::oneshot::channel::>(); + self.reserved_pubkeys_for_committee + .lock() + .unwrap() + .insert(committee, owner); + tx.send(Ok(())).unwrap_or_else(|_| { + log::error!("Failed to send response"); + }); + rx + } +} +fn now() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time went backwards") + .as_secs() +} diff --git a/magicblock-committor-service/src/stubs/mod.rs b/magicblock-committor-service/src/stubs/mod.rs new file mode 100644 index 00000000..9cfb6e45 --- /dev/null +++ b/magicblock-committor-service/src/stubs/mod.rs @@ -0,0 +1,2 @@ +mod changeset_committor_stub; +pub use changeset_committor_stub::ChangesetCommittorStub; diff --git a/magicblock-committor-service/src/transactions.rs b/magicblock-committor-service/src/transactions.rs new file mode 100644 index 00000000..fa53f03f --- /dev/null +++ b/magicblock-committor-service/src/transactions.rs @@ -0,0 +1,778 @@ +use std::collections::HashSet; + +use base64::{prelude::BASE64_STANDARD, Engine}; +use dlp::args::{CommitStateArgs, CommitStateFromBufferArgs}; +use magicblock_committor_program::{ + instruction::{create_close_ix, CreateCloseIxArgs}, + ChangedBundle, +}; +use solana_pubkey::Pubkey; +use solana_rpc_client::rpc_client::SerializableTransaction; +use solana_sdk::hash::Hash; +use solana_sdk::instruction::Instruction; +use solana_sdk::message::v0::Message; +use solana_sdk::message::{AddressLookupTableAccount, VersionedMessage}; +use solana_sdk::signature::Keypair; +use solana_sdk::signer::Signer; +use solana_sdk::transaction::VersionedTransaction; +use static_assertions::const_assert; + +use crate::error::{CommittorServiceError, CommittorServiceResult}; + +/// From agave rpc/src/rpc.rs [MAX_BASE64_SIZE] +pub(crate) const MAX_ENCODED_TRANSACTION_SIZE: usize = 1644; + +/// How many process and commit buffer instructions fit into a single transaction +#[allow(unused)] // serves as documentation as well +pub(crate) const MAX_PROCESS_PER_TX: u8 = 3; + +/// How many process and commit buffer instructions fit into a single transaction +/// when using address lookup tables but not including the buffer account in the +/// lookup table +#[allow(unused)] // serves as documentation as well +pub(crate) const MAX_PROCESS_PER_TX_USING_LOOKUP: u8 = 12; + +/// How many close buffer instructions fit into a single transaction +#[allow(unused)] // serves as documentation as well +pub(crate) const MAX_CLOSE_PER_TX: u8 = 7; + +/// How many close buffer instructions fit into a single transaction +/// when using address lookup tables but not including the buffer account +/// nor chunk account in the lookup table +#[allow(unused)] // serves as documentation as well +pub(crate) const MAX_CLOSE_PER_TX_USING_LOOKUP: u8 = 7; + +/// How many process and commit buffer instructions combined with close buffer instructions +/// fit into a single transaction +#[allow(unused)] // serves as documentation as well +pub(crate) const MAX_PROCESS_AND_CLOSE_PER_TX: u8 = 2; + +/// How many process and commit buffer instructions combined with +/// close buffer instructions fit into a single transaction when +/// using lookup tables but not including the buffer account +#[allow(unused)] // serves as documentation as well +pub(crate) const MAX_PROCESS_AND_CLOSE_PER_TX_USING_LOOKUP: u8 = 4; + +/// How many finalize instructions fit into a single transaction +#[allow(unused)] // serves as documentation as well +pub(crate) const MAX_FINALIZE_PER_TX: u8 = 5; + +/// How many finalize instructions fit into a single transaction +/// when using address lookup tables +#[allow(unused)] // serves as documentation as well +pub(crate) const MAX_FINALIZE_PER_TX_USING_LOOKUP: u8 = 48; + +/// How many undelegate instructions fit into a single transaction +/// NOTE: that we assume the rent reimbursement account to be the delegated account +#[allow(unused)] // serves as documentation as well +pub(crate) const MAX_UNDELEGATE_PER_TX: u8 = 3; + +/// How many undelegate instructions fit into a single transaction +/// when using address lookup tables +/// NOTE: that we assume the rent reimbursement account to be the delegated account +#[allow(unused)] // serves as documentation as well +pub(crate) const MAX_UNDELEGATE_PER_TX_USING_LOOKUP: u8 = 16; + +// Allows us to run undelegate instructions without rechunking them since we know +// that we didn't process more than we also can undelegatge +const_assert!(MAX_PROCESS_PER_TX <= MAX_UNDELEGATE_PER_TX,); + +// Allows us to run undelegate instructions using lookup tables without rechunking +// them since we know that we didn't process more than we also can undelegatge +const_assert!( + MAX_PROCESS_PER_TX_USING_LOOKUP <= MAX_UNDELEGATE_PER_TX_USING_LOOKUP +); + +// ----------------- +// Process Commitables using Args or Buffer +// ----------------- +pub(crate) struct CommitTxReport { + /// Size of the transaction without lookup tables. + pub size_args: usize, + + /// The size of the transaction including the finalize instruction + /// when not using lookup tables the `finalize` param of + /// [size_of_commit_with_args_tx] is `true`. + pub size_args_including_finalize: Option, + + /// If the bundle fits into a single transaction using buffers without + /// using lookup tables. + /// This does not depend on the size of the data, but only the number of + /// accounts in the bundle. + pub fits_buffer: bool, + + /// If the bundle fits into a single transaction using buffers using lookup tables. + /// This does not depend on the size of the data, but only the number of + /// accounts in the bundle. + pub fits_buffer_using_lookup: bool, + + /// Size of the transaction when using lookup tables. + /// This is only determined if the [SizeOfCommitWithArgs::size] is larger than + /// [MAX_ENCODED_TRANSACTION_SIZE]. + pub size_args_with_lookup: Option, + + /// The size of the transaction including the finalize instructionk + /// when using lookup tables + /// This is only determined if the [SizeOfCommitWithArgs::size_including_finalize] + /// is larger than [MAX_ENCODED_TRANSACTION_SIZE]. + pub size_args_with_lookup_including_finalize: Option, +} + +pub(crate) fn commit_tx_report( + bundle: &ChangedBundle, + finalize: bool, +) -> CommittorServiceResult { + let auth = Keypair::new(); + + let ixs = bundle + .iter() + .map(|(_, account)| { + let args = CommitStateArgs { + // TODO(thlorenz): this is expensive, but seems unavoidable in order to reliably + // calculate the size of the transaction + data: account.data().to_vec(), + ..CommitStateArgs::default() + }; + dlp::instruction_builder::commit_state( + auth.pubkey(), + Pubkey::new_unique(), + Pubkey::new_unique(), + args, + ) + }) + .collect::>(); + + let size = encoded_tx_size(&auth, &ixs, &TransactionOpts::NoLookupTable)?; + let size_with_lookup = (size > MAX_ENCODED_TRANSACTION_SIZE) + .then(|| encoded_tx_size(&auth, &ixs, &TransactionOpts::UseLookupTable)) + .transpose()?; + + if finalize { + let mut ixs = ixs.clone(); + let finalize_ixs = bundle.iter().map(|(pubkey, _)| { + dlp::instruction_builder::finalize(auth.pubkey(), *pubkey) + }); + ixs.extend(finalize_ixs); + + let size_including_finalize = + encoded_tx_size(&auth, &ixs, &TransactionOpts::NoLookupTable)?; + let size_with_lookup_including_finalize = (size_including_finalize + > MAX_ENCODED_TRANSACTION_SIZE) + .then(|| { + encoded_tx_size(&auth, &ixs, &TransactionOpts::UseLookupTable) + }) + .transpose()?; + + Ok(CommitTxReport { + size_args: size, + fits_buffer: bundle.len() <= MAX_PROCESS_PER_TX as usize, + fits_buffer_using_lookup: bundle.len() + <= MAX_PROCESS_PER_TX_USING_LOOKUP as usize, + size_args_with_lookup: size_with_lookup, + size_args_including_finalize: Some(size_including_finalize), + size_args_with_lookup_including_finalize: + size_with_lookup_including_finalize, + }) + } else { + Ok(CommitTxReport { + size_args: size, + fits_buffer: bundle.len() <= MAX_PROCESS_PER_TX as usize, + fits_buffer_using_lookup: bundle.len() + <= MAX_PROCESS_PER_TX_USING_LOOKUP as usize, + size_args_including_finalize: None, + size_args_with_lookup: size_with_lookup, + size_args_with_lookup_including_finalize: None, + }) + } +} + +// ----------------- +// Process Commitables and Close Buffers +// ----------------- +pub(crate) fn process_commits_ix( + validator_auth: Pubkey, + pubkey: &Pubkey, + delegated_account_owner: &Pubkey, + buffer_pda: &Pubkey, + commit_args: CommitStateFromBufferArgs, +) -> Instruction { + dlp::instruction_builder::commit_state_from_buffer( + validator_auth, + *pubkey, + *delegated_account_owner, + *buffer_pda, + commit_args, + ) +} + +pub(crate) fn close_buffers_ix( + validator_auth: Pubkey, + pubkey: &Pubkey, + ephemeral_blockhash: &Hash, +) -> Instruction { + create_close_ix(CreateCloseIxArgs { + authority: validator_auth, + pubkey: *pubkey, + blockhash: *ephemeral_blockhash, + }) +} + +pub(crate) fn process_and_close_ixs( + validator_auth: Pubkey, + pubkey: &Pubkey, + delegated_account_owner: &Pubkey, + buffer_pda: &Pubkey, + ephemeral_blockhash: &Hash, + commit_args: CommitStateFromBufferArgs, +) -> Vec { + let process_ix = process_commits_ix( + validator_auth, + pubkey, + delegated_account_owner, + buffer_pda, + commit_args, + ); + let close_ix = + close_buffers_ix(validator_auth, pubkey, ephemeral_blockhash); + + vec![process_ix, close_ix] +} + +// ----------------- +// Finalize +// ----------------- +pub(crate) fn finalize_ix( + validator_auth: Pubkey, + pubkey: &Pubkey, +) -> Instruction { + dlp::instruction_builder::finalize(validator_auth, *pubkey) +} + +// ----------------- +// Helpers +// ----------------- +#[allow(clippy::enum_variant_names)] +enum TransactionOpts { + NoLookupTable, + UseLookupTable, +} +fn encoded_tx_size( + auth: &Keypair, + ixs: &[Instruction], + opts: &TransactionOpts, +) -> CommittorServiceResult { + use CommittorServiceError::*; + use TransactionOpts::*; + let lookup_tables = match opts { + NoLookupTable => vec![], + UseLookupTable => get_lookup_tables(ixs), + }; + + let versioned_msg = Message::try_compile( + &auth.pubkey(), + ixs, + &lookup_tables, + Hash::default(), + ) + .map_err(|err| { + FailedToCompileTransactionMessage( + "Calculating transaction size".to_string(), + err, + ) + })?; + let versioned_tx = VersionedTransaction::try_new( + VersionedMessage::V0(versioned_msg), + &[&auth], + ) + .map_err(|err| { + FailedToCreateTransaction( + "Calculating transaction size".to_string(), + err, + ) + })?; + + let encoded = serialize_and_encode_base64(&versioned_tx); + Ok(encoded.len()) +} + +fn serialize_and_encode_base64( + transaction: &impl SerializableTransaction, +) -> String { + // SAFETY: runs statically + let serialized = bincode::serialize(transaction).unwrap(); + BASE64_STANDARD.encode(serialized) +} + +fn get_lookup_tables(ixs: &[Instruction]) -> Vec { + let pubkeys = ixs + .iter() + .flat_map(|ix| ix.accounts.iter().map(|acc| acc.pubkey)) + .collect::>(); + + let lookup_table = AddressLookupTableAccount { + key: Pubkey::default(), + addresses: pubkeys.into_iter().collect(), + }; + vec![lookup_table] +} + +#[cfg(test)] +mod test { + use crate::{ + compute_budget::{Budget, ComputeBudget}, + pubkeys_provider::{provide_committee_pubkeys, provide_common_pubkeys}, + }; + + use super::*; + + use dlp::args::{CommitStateArgs, CommitStateFromBufferArgs}; + use lazy_static::lazy_static; + use solana_pubkey::Pubkey; + use solana_sdk::{ + address_lookup_table::state::LOOKUP_TABLE_MAX_ADDRESSES, + hash::Hash, + instruction::Instruction, + message::{v0::Message, AddressLookupTableAccount, VersionedMessage}, + signature::Keypair, + signer::Signer, + transaction::VersionedTransaction, + }; + + // These tests statically determine the optimal ix count to fit into a single + // transaction and assert that the const we export in prod match those numbers. + // Thus when an instruction changes and one of those numbers with it a failing + // test alerts us. + // This is less overhead than running those static functions each time at + // startup. + + #[test] + fn test_max_process_per_tx() { + assert_eq!(super::MAX_PROCESS_PER_TX, *MAX_PROCESS_PER_TX); + assert_eq!( + super::MAX_PROCESS_PER_TX_USING_LOOKUP, + *MAX_PROCESS_PER_TX_USING_LOOKUP + ); + } + + #[test] + fn test_max_close_per_tx() { + assert_eq!(super::MAX_CLOSE_PER_TX, *MAX_CLOSE_PER_TX); + assert_eq!( + super::MAX_CLOSE_PER_TX_USING_LOOKUP, + *MAX_CLOSE_PER_TX_USING_LOOKUP + ); + } + + #[test] + fn test_max_process_and_closes_per_tx() { + assert_eq!( + super::MAX_PROCESS_AND_CLOSE_PER_TX, + *MAX_PROCESS_AND_CLOSE_PER_TX + ); + assert_eq!( + super::MAX_PROCESS_AND_CLOSE_PER_TX_USING_LOOKUP, + *MAX_PROCESS_AND_CLOSE_PER_TX_USING_LOOKUP + ); + } + + #[test] + fn test_max_finalize_per_tx() { + assert_eq!(super::MAX_FINALIZE_PER_TX, *MAX_FINALIZE_PER_TX); + assert_eq!( + super::MAX_FINALIZE_PER_TX_USING_LOOKUP, + *MAX_FINALIZE_PER_TX_USING_LOOKUP + ); + } + + #[test] + fn test_max_undelegate_per_tx() { + assert_eq!(super::MAX_UNDELEGATE_PER_TX, *MAX_UNDELEGATE_PER_TX); + assert_eq!( + super::MAX_UNDELEGATE_PER_TX_USING_LOOKUP, + *MAX_UNDELEGATE_PER_TX_USING_LOOKUP + ); + } + + // ----------------- + // Process Commitables using Args + // ----------------- + #[test] + fn test_log_commit_args_ix_sizes() { + // This test is used to investigate the size of the transaction related to + // the amount of committed accounts and their data size. + fn run(auth: &Keypair, ixs: usize) { + let mut tx_lines = vec![]; + use TransactionOpts::*; + for tx_opts in [NoLookupTable, UseLookupTable] { + let mut tx_sizes = vec![]; + for size in [0, 10, 20, 50, 100, 200, 500, 1024] { + let ixs = (0..ixs) + .map(|_| make_ix(auth, size)) + .collect::>(); + + let tx_size = + encoded_tx_size(auth, &ixs, &tx_opts).unwrap(); + tx_sizes.push((size, tx_size)); + } + tx_lines.push(tx_sizes); + } + let sizes = tx_lines + .into_iter() + .map(|line| { + line.into_iter() + .map(|(size, len)| format!("{:4}:{:5}", size, len)) + .collect::>() + .join("|") + }) + .collect::>() + .join("\n"); + eprintln!("{:3} ixs:\n{}", ixs, sizes); + } + fn make_ix(auth: &Keypair, data_size: usize) -> Instruction { + let data = vec![1; data_size]; + let args = CommitStateArgs { + data, + ..CommitStateArgs::default() + }; + dlp::instruction_builder::commit_state( + auth.pubkey(), + Pubkey::new_unique(), + Pubkey::new_unique(), + args, + ) + } + + let auth = &Keypair::new(); + run(auth, 0); + run(auth, 1); + run(auth, 2); + run(auth, 5); + run(auth, 8); + run(auth, 10); + run(auth, 15); + run(auth, 20); + /* + 0 ixs: + 0: 184| 10: 184| 20: 184| 50: 184| 100: 184| 200: 184| 500: 184|1024: 184 + 0: 184| 10: 184| 20: 184| 50: 184| 100: 184| 200: 184| 500: 184|1024: 184 + 1 ixs: + 0: 620| 10: 636| 20: 648| 50: 688| 100: 756| 200: 888| 500: 1288|1024: 1988 + 0: 336| 10: 348| 20: 364| 50: 404| 100: 472| 200: 604| 500: 1004|1024: 1704 + 2 ixs: + 0: 932| 10: 960| 20: 984| 50: 1064| 100: 1200| 200: 1468| 500: 2268|1024: 3664 + 0: 400| 10: 424| 20: 452| 50: 532| 100: 668| 200: 936| 500: 1736|1024: 3132 + 5 ixs: + 0: 1864| 10: 1932| 20: 1996| 50: 2196| 100: 2536| 200: 3204| 500: 5204|1024: 8696 + 0: 588| 10: 652| 20: 720| 50: 920| 100: 1260| 200: 1928| 500: 3928|1024: 7420 + 8 ixs: + 0: 2796| 10: 2904| 20: 3008| 50: 3328| 100: 3872| 200: 4940| 500: 8140|1024:13728 + 0: 776| 10: 880| 20: 988| 50: 1308| 100: 1852| 200: 2920| 500: 6120|1024:11708 + 10 ixs: + 0: 3416| 10: 3552| 20: 3684| 50: 4084| 100: 4764| 200: 6096| 500:10096|1024:17084 + 0: 900| 10: 1032| 20: 1168| 50: 1568| 100: 2248| 200: 3580| 500: 7580|1024:14568 + 15 ixs: + 0: 4972| 10: 5172| 20: 5372| 50: 5972| 100: 6992| 200: 8992| 500:14992|1024:25472 + 0: 1212| 10: 1412| 20: 1612| 50: 2212| 100: 3232| 200: 5232| 500:11232|1024:21712 + 20 ixs: + 0: 6524| 10: 6792| 20: 7056| 50: 7856| 100: 9216| 200:11884| 500:19884|1024:33856 + 0: 1528| 10: 1792| 20: 2060| 50: 2860| 100: 4220| 200: 6888| 500:14888|1024:28860 + + Legend: + + x ixs: + data size/ix: encoded size | ... + data size/ix: encoded size | ... (using lookup tables) + + Given that max transaction size is 1644 bytes, we can see that the max data size is: + + - 1 ixs: slightly larger than 500 bytes + - 2 ixs: slightly larger than 200 bytes + - 5 ixs: slightly larger than 100 bytes + - 8 ixs: slightly larger than 50 bytes + - 10 ixs: slightly larger than 20 bytes + - 15 ixs: slightly larger than 10 bytes + - 20 ixs: no data supported (only lamport changes) + + Also it is clear that using a lookup table makes a huge difference especially if we commit + lots of different accounts. + */ + } + + // ----------------- + // Process Commitables and Close Buffers + // ----------------- + lazy_static! { + pub(crate) static ref MAX_PROCESS_PER_TX: u8 = { + max_chunks_per_transaction("Max process per tx", |auth_pubkey| { + let pubkey = Pubkey::new_unique(); + let delegated_account_owner = Pubkey::new_unique(); + let buffer_pda = Pubkey::new_unique(); + let commit_args = CommitStateFromBufferArgs::default(); + vec![super::process_commits_ix( + auth_pubkey, + &pubkey, + &delegated_account_owner, + &buffer_pda, + commit_args, + )] + }) + }; + pub(crate) static ref MAX_PROCESS_PER_TX_USING_LOOKUP: u8 = { + max_chunks_per_transaction_using_lookup_table( + "Max process per tx using lookup", + |auth_pubkey, committee, delegated_account_owner| { + let buffer_pda = Pubkey::new_unique(); + let commit_args = CommitStateFromBufferArgs::default(); + vec![super::process_commits_ix( + auth_pubkey, + &committee, + &delegated_account_owner, + &buffer_pda, + commit_args, + )] + }, + None, + ) + }; + pub(crate) static ref MAX_CLOSE_PER_TX: u8 = { + let ephemeral_blockhash = Hash::default(); + max_chunks_per_transaction("Max close per tx", |auth_pubkey| { + let pubkey = Pubkey::new_unique(); + vec![super::close_buffers_ix( + auth_pubkey, + &pubkey, + &ephemeral_blockhash, + )] + }) + }; + pub(crate) static ref MAX_CLOSE_PER_TX_USING_LOOKUP: u8 = { + let ephemeral_blockhash = Hash::default(); + max_chunks_per_transaction_using_lookup_table( + "Max close per tx using lookup", + |auth_pubkey, committee, _| { + vec![super::close_buffers_ix( + auth_pubkey, + &committee, + &ephemeral_blockhash, + )] + }, + None, + ) + }; + pub(crate) static ref MAX_PROCESS_AND_CLOSE_PER_TX: u8 = { + let ephemeral_blockhash = Hash::default(); + max_chunks_per_transaction( + "Max process and close per tx", + |auth_pubkey| { + let pubkey = Pubkey::new_unique(); + let delegated_account_owner = Pubkey::new_unique(); + let buffer_pda = Pubkey::new_unique(); + let commit_args = CommitStateFromBufferArgs::default(); + super::process_and_close_ixs( + auth_pubkey, + &pubkey, + &delegated_account_owner, + &buffer_pda, + &ephemeral_blockhash, + commit_args, + ) + }, + ) + }; + pub(crate) static ref MAX_PROCESS_AND_CLOSE_PER_TX_USING_LOOKUP: u8 = { + let ephemeral_blockhash = Hash::default(); + max_chunks_per_transaction_using_lookup_table( + "Max process and close per tx using lookup", + |auth_pubkey, committee, delegated_account_owner| { + let commit_args = CommitStateFromBufferArgs::default(); + let buffer_pda = Pubkey::new_unique(); + super::process_and_close_ixs( + auth_pubkey, + &committee, + &delegated_account_owner, + &buffer_pda, + &ephemeral_blockhash, + commit_args, + ) + }, + None, + ) + }; + } + + // ----------------- + // Finalize + // ----------------- + lazy_static! { + pub(crate) static ref MAX_FINALIZE_PER_TX: u8 = { + max_chunks_per_transaction("Max finalize per tx", |auth_pubkey| { + let pubkey = Pubkey::new_unique(); + vec![super::finalize_ix(auth_pubkey, &pubkey)] + }) + }; + pub(crate) static ref MAX_FINALIZE_PER_TX_USING_LOOKUP: u8 = { + max_chunks_per_transaction_using_lookup_table( + "Max finalize per tx using lookup", + |auth_pubkey, committee, _| { + vec![super::finalize_ix(auth_pubkey, &committee)] + }, + Some(40), + ) + }; + } + + // ----------------- + // Undelegate + // ----------------- + lazy_static! { + pub(crate) static ref MAX_UNDELEGATE_PER_TX: u8 = { + max_chunks_per_transaction("Max undelegate per tx", |auth_pubkey| { + let pubkey = Pubkey::new_unique(); + let owner_program = Pubkey::new_unique(); + vec![dlp::instruction_builder::undelegate( + auth_pubkey, + pubkey, + owner_program, + auth_pubkey, + )] + }) + }; + pub(crate) static ref MAX_UNDELEGATE_PER_TX_USING_LOOKUP: u8 = { + max_chunks_per_transaction_using_lookup_table( + "Max undelegate per tx using lookup", + |auth_pubkey, committee, owner_program| { + vec![dlp::instruction_builder::undelegate( + auth_pubkey, + committee, + owner_program, + auth_pubkey, + )] + }, + None, + ) + }; + } + + // ----------------- + // Max Chunks Per Transaction + // ----------------- + + fn max_chunks_per_transaction Vec>( + label: &str, + create_ixs: F, + ) -> u8 { + eprintln!("{}", label); + + let auth = Keypair::new(); + let auth_pubkey = auth.pubkey(); + // NOTE: the size of the budget instructions is always the same, no matter + // which budget we provide + let mut ixs = ComputeBudget::Process(Budget::default()).instructions(1); + let mut chunks = 0_u8; + loop { + ixs.extend(create_ixs(auth_pubkey)); + chunks += 1; + + // SAFETY: runs statically + let versioned_msg = + Message::try_compile(&auth_pubkey, &ixs, &[], Hash::default()) + .unwrap(); + // SAFETY: runs statically + let versioned_tx = VersionedTransaction::try_new( + VersionedMessage::V0(versioned_msg), + &[&auth], + ) + .unwrap(); + let encoded = serialize_and_encode_base64(&versioned_tx); + eprintln!("{} ixs -> {} bytes", chunks, encoded.len()); + if encoded.len() > MAX_ENCODED_TRANSACTION_SIZE { + return chunks - 1; + } + } + } + + fn extend_lookup_table( + lookup_table: &mut AddressLookupTableAccount, + auth_pubkey: Pubkey, + committee: Pubkey, + owner: Option<&Pubkey>, + ) { + let keys = provide_committee_pubkeys(&committee, owner) + .into_iter() + .chain(provide_common_pubkeys(&auth_pubkey)) + .chain(lookup_table.addresses.iter().cloned()) + .collect::>(); + lookup_table.addresses = keys.into_iter().collect(); + assert!( + lookup_table.addresses.len() <= LOOKUP_TABLE_MAX_ADDRESSES, + "Lookup table has too many ({}) addresses", + lookup_table.addresses.len() + ); + } + + fn max_chunks_per_transaction_using_lookup_table< + FI: Fn(Pubkey, Pubkey, Pubkey) -> Vec, + >( + label: &str, + create_ixs: FI, + start_at: Option, + ) -> u8 { + eprintln!("{}", label); + let auth = Keypair::new(); + let auth_pubkey = auth.pubkey(); + let mut ixs = ComputeBudget::Process(Budget::default()).instructions(1); + let mut chunks = start_at.unwrap_or_default(); + let mut lookup_table = AddressLookupTableAccount { + key: Pubkey::default(), + addresses: vec![], + }; + // If we start at specific chunk size let's prep the ixs and assume + // we are using the same addresses to avoid blowing out the lookup table + if chunks > 0 { + let committee = Pubkey::new_unique(); + let owner_program = Pubkey::new_unique(); + extend_lookup_table( + &mut lookup_table, + auth_pubkey, + committee, + Some(&owner_program), + ); + for _ in 0..chunks { + ixs.extend(create_ixs(auth_pubkey, committee, owner_program)); + } + } + loop { + let committee = Pubkey::new_unique(); + let owner_program = Pubkey::new_unique(); + ixs.extend(create_ixs(auth_pubkey, committee, owner_program)); + + chunks += 1; + extend_lookup_table( + &mut lookup_table, + auth_pubkey, + committee, + Some(&owner_program), + ); + + // SAFETY: runs statically + let versioned_msg = Message::try_compile( + &auth_pubkey, + &ixs, + &[lookup_table.clone()], + Hash::default(), + ) + .unwrap(); + // SAFETY: runs statically + let versioned_tx = VersionedTransaction::try_new( + VersionedMessage::V0(versioned_msg), + &[&auth], + ) + .unwrap(); + let encoded = serialize_and_encode_base64(&versioned_tx); + eprintln!("{} ixs -> {} bytes", chunks, encoded.len()); + if encoded.len() > MAX_ENCODED_TRANSACTION_SIZE { + return chunks - 1; + } + } + } +} diff --git a/magicblock-committor-service/src/types.rs b/magicblock-committor-service/src/types.rs new file mode 100644 index 00000000..86b4e5d7 --- /dev/null +++ b/magicblock-committor-service/src/types.rs @@ -0,0 +1,57 @@ +use std::fmt; + +use solana_sdk::instruction::Instruction; + +use crate::CommitInfo; + +/// The kind of instructions included for the particular [CommitInfo] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum InstructionsKind { + /// The commit is processed only and may include the finalize instruction + Process, + /// The buffers to facilitate are closed, but processing occurred as part + /// of another set of instructions + CloseBuffers, + /// The commit is processed and the buffers closed all as part of this set + /// of instructions + ProcessAndCloseBuffers, + /// The commit is processed previously and only finalized by this set of + /// instructions + Finalize, + /// The commit is processed and finalized previously and the committee is + /// undelegated by this set of instructions + Undelegate, +} + +impl InstructionsKind { + pub fn is_processing(&self) -> bool { + matches!( + self, + InstructionsKind::Process + | InstructionsKind::ProcessAndCloseBuffers + ) + } +} + +#[derive(Debug)] +pub struct InstructionsForCommitable { + pub instructions: Vec, + pub commit_info: CommitInfo, + pub kind: InstructionsKind, +} + +impl fmt::Display for InstructionsForCommitable { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "InstructionsForCommitable {{ + instructions.len: {}, + commit_info: {} + kind: {:?} +}}", + self.instructions.len(), + self.commit_info.pubkey(), + self.kind + ) + } +} diff --git a/magicblock-committor-service/src/undelegate.rs b/magicblock-committor-service/src/undelegate.rs new file mode 100644 index 00000000..7064d516 --- /dev/null +++ b/magicblock-committor-service/src/undelegate.rs @@ -0,0 +1,103 @@ +use std::collections::HashMap; + +use dlp::state::DelegationMetadata; +use magicblock_rpc_client::MagicblockRpcClient; +use solana_account::ReadableAccount; +use solana_pubkey::Pubkey; +use solana_sdk::instruction::Instruction; + +use crate::{ + error::{CommittorServiceError, CommittorServiceResult}, + transactions::{MAX_UNDELEGATE_PER_TX, MAX_UNDELEGATE_PER_TX_USING_LOOKUP}, + types::{InstructionsForCommitable, InstructionsKind}, + CommitInfo, +}; + +pub(crate) async fn undelegate_commitables_ixs( + rpc_client: &MagicblockRpcClient, + validator_auth: Pubkey, + accs: Vec<(Pubkey, Pubkey)>, +) -> CommittorServiceResult> { + let delegation_metadata_pubkeys = accs + .iter() + .map(|(delegated_account, _)| { + dlp::pda::delegation_metadata_pda_from_delegated_account( + delegated_account, + ) + }) + .collect::>(); + let metadata_accs = rpc_client + .get_multiple_accounts(&delegation_metadata_pubkeys, None) + .await?; + + let mut ixs = HashMap::new(); + + for (metadata_acc, (committee, owner)) in + metadata_accs.iter().zip(accs.iter()) + { + let Some(metadata_acc) = metadata_acc else { + return Err( + CommittorServiceError::FailedToFetchDelegationMetadata( + *committee, + ), + ); + }; + let metadata = DelegationMetadata::try_from_bytes_with_discriminator( + metadata_acc.data(), + ) + .map_err(|err| { + CommittorServiceError::FailedToDeserializeDelegationMetadata( + *committee, err, + ) + })?; + + ixs.insert( + *committee, + dlp::instruction_builder::undelegate( + validator_auth, + *committee, + *owner, + metadata.rent_payer, + ), + ); + } + Ok(ixs) +} + +pub(crate) fn chunked_ixs_to_undelegate_commitables( + mut ixs: HashMap, + commit_infos: Vec, + use_lookup: bool, +) -> Vec> { + let max_per_chunk = if use_lookup { + MAX_UNDELEGATE_PER_TX_USING_LOOKUP + } else { + MAX_UNDELEGATE_PER_TX + }; + + let chunks = commit_infos + .chunks(max_per_chunk as usize) + .map(|chunk| { + chunk + .iter() + .flat_map(|commit_info| { + ixs.remove(&commit_info.pubkey()).map(|ix| { + InstructionsForCommitable { + instructions: vec![ix], + commit_info: commit_info.clone(), + kind: InstructionsKind::Undelegate, + } + }) + }) + .collect::>() + }) + .collect::>(); + + debug_assert!( + ixs.is_empty(), + "BUG: Some undelegate instructions {:?} were not matched with a commit_info: {:?}", + ixs, commit_infos + ); + + chunks +} diff --git a/magicblock-committor-service/todo-tests/ix_commit_local.rs b/magicblock-committor-service/todo-tests/ix_commit_local.rs new file mode 100644 index 00000000..b3227e3b --- /dev/null +++ b/magicblock-committor-service/todo-tests/ix_commit_local.rs @@ -0,0 +1,886 @@ +use log::*; +use magicblock_committor_service::{ChangesetCommittor, ComputeBudgetConfig}; +use magicblock_rpc_client::MagicblockRpcClient; +use std::collections::{HashMap, HashSet}; +use std::time::{Duration, Instant}; +use tokio::task::JoinSet; +use utils::transactions::tx_logs_contain; + +use magicblock_committor_program::{ChangedAccount, Changeset}; +use magicblock_committor_service::{ + changeset_for_slot, + config::ChainConfig, + persist::{CommitStatus, CommitStrategy}, + CommittorService, +}; +use solana_account::{Account, AccountSharedData, ReadableAccount}; +use solana_pubkey::Pubkey; +use solana_rpc_client::nonblocking::rpc_client::RpcClient; +use solana_rpc_client_api::config::RpcSendTransactionConfig; +use solana_sdk::commitment_config::CommitmentConfig; +use solana_sdk::hash::Hash; +use solana_sdk::transaction::Transaction; +use solana_sdk::{ + native_token::LAMPORTS_PER_SOL, signature::Keypair, signer::Signer, +}; +use utils::instructions::{ + init_account_and_delegate_ixs, init_validator_fees_vault_ix, + InitAccountAndDelegateIxs, +}; + +mod utils; + +// ----------------- +// Utilities and Setup +// ----------------- +type ExpectedStrategies = HashMap; + +fn expect_strategies( + strategies: &[(CommitStrategy, u8)], +) -> ExpectedStrategies { + let mut expected_strategies = HashMap::new(); + for (strategy, count) in strategies { + *expected_strategies.entry(*strategy).or_insert(0) += count; + } + expected_strategies +} + +fn uses_lookup(expected: &ExpectedStrategies) -> bool { + expected.iter().any(|(strategy, _)| strategy.uses_lookup()) +} + +macro_rules! get_account { + ($rpc_client:ident, $pubkey:expr, $label:literal, $predicate:expr) => {{ + const GET_ACCOUNT_RETRIES: u8 = 12; + + let mut remaining_tries = GET_ACCOUNT_RETRIES; + loop { + let acc = $rpc_client + .get_account_with_commitment( + &$pubkey, + CommitmentConfig::confirmed(), + ) + .await + .ok() + .and_then(|acc| acc.value); + if let Some(acc) = acc { + if $predicate(&acc, remaining_tries) { + break acc; + } + remaining_tries -= 1; + if remaining_tries == 0 { + panic!( + "{} account ({}) does not match condition after {} retries", + $label, $pubkey, GET_ACCOUNT_RETRIES + ); + } + utils::sleep_millis(800).await; + } else { + remaining_tries -= 1; + if remaining_tries == 0 { + panic!( + "Unable to get {} account ({}) matching condition after {} retries", + $label, $pubkey, GET_ACCOUNT_RETRIES + ); + } + if remaining_tries % 10 == 0 { + debug!( + "Waiting for {} account ({}) to become available", + $label, $pubkey + ); + } + utils::sleep_millis(800).await; + } + } + }}; + ($rpc_client:ident, $pubkey:expr, $label:literal) => {{ + get_account!($rpc_client, $pubkey, $label, |_: &Account, _: u8| true) + }}; +} + +/// This needs to be run once for all tests +async fn fund_validator_auth_and_ensure_validator_fees_vault( + validator_auth: &Keypair, +) { + let rpc_client = RpcClient::new("http://localhost:7799".to_string()); + rpc_client + .request_airdrop(&validator_auth.pubkey(), 777 * LAMPORTS_PER_SOL) + .await + .unwrap(); + debug!("Airdropped to validator: {} ", validator_auth.pubkey(),); + + let validator_fees_vault_exists = rpc_client + .get_account(&validator_auth.pubkey()) + .await + .is_ok(); + + if !validator_fees_vault_exists { + let latest_block_hash = + rpc_client.get_latest_blockhash().await.unwrap(); + let init_validator_fees_vault_ix = + init_validator_fees_vault_ix(validator_auth.pubkey()); + // If this fails it might be due to a race condition where another test + // already initialized it, so we can safely ignore the error + let _ = rpc_client + .send_and_confirm_transaction_with_spinner_and_config( + &Transaction::new_signed_with_payer( + &[init_validator_fees_vault_ix], + Some(&validator_auth.pubkey()), + &[&validator_auth], + latest_block_hash, + ), + CommitmentConfig::confirmed(), + RpcSendTransactionConfig { + skip_preflight: true, + ..Default::default() + }, + ) + .await + .map_err(|err| { + error!("Failed to init validator fees vault: {}", err); + }); + } +} + +/// This needs to be run for each test that required a new counter to be delegated +async fn init_and_delegate_account_on_chain( + counter_auth: &Keypair, + bytes: u64, +) -> (Pubkey, Account) { + let rpc_client = RpcClient::new("http://localhost:7799".to_string()); + + rpc_client + .request_airdrop(&counter_auth.pubkey(), 777 * LAMPORTS_PER_SOL) + .await + .unwrap(); + debug!("Airdropped to counter auth: {} SOL", 777 * LAMPORTS_PER_SOL); + + let InitAccountAndDelegateIxs { + init: init_counter_ix, + reallocs: realloc_ixs, + delegate: delegate_ix, + pda, + rent_excempt, + } = init_account_and_delegate_ixs(counter_auth.pubkey(), bytes); + + let latest_block_hash = rpc_client.get_latest_blockhash().await.unwrap(); + // 1. Init account + rpc_client + .send_and_confirm_transaction_with_spinner_and_config( + &Transaction::new_signed_with_payer( + &[init_counter_ix], + Some(&counter_auth.pubkey()), + &[&counter_auth], + latest_block_hash, + ), + CommitmentConfig::confirmed(), + RpcSendTransactionConfig { + skip_preflight: true, + ..Default::default() + }, + ) + .await + .expect("Failed to init account"); + debug!("Init account: {:?}", pda); + + // 2. Airdrop to account for extra rent needed for reallocs + rpc_client + .request_airdrop(&pda, rent_excempt) + .await + .unwrap(); + + debug!( + "Airdropped to account: {:4} {}SOL to pay rent for {} bytes", + pda, + rent_excempt as f64 / LAMPORTS_PER_SOL as f64, + bytes + ); + + // 3. Run reallocs + for realloc_ix_chunk in realloc_ixs.chunks(10) { + let tx = Transaction::new_signed_with_payer( + realloc_ix_chunk, + Some(&counter_auth.pubkey()), + &[&counter_auth], + latest_block_hash, + ); + rpc_client + .send_and_confirm_transaction_with_spinner_and_config( + &tx, + CommitmentConfig::confirmed(), + RpcSendTransactionConfig { + skip_preflight: true, + ..Default::default() + }, + ) + .await + .expect("Failed to realloc"); + } + debug!("Reallocs done"); + + // 4. Delegate account + rpc_client + .send_and_confirm_transaction_with_spinner_and_config( + &Transaction::new_signed_with_payer( + &[delegate_ix], + Some(&counter_auth.pubkey()), + &[&counter_auth], + latest_block_hash, + ), + CommitmentConfig::confirmed(), + RpcSendTransactionConfig { + skip_preflight: true, + ..Default::default() + }, + ) + .await + .expect("Failed to delegate"); + debug!("Delegated account: {:?}", pda); + let pda_acc = get_account!(rpc_client, pda, "pda"); + + (pda, pda_acc) +} + +// ----------------- +// +++++ Tests +++++ +// ----------------- + +// ----------------- +// Single Account Commits +// ----------------- +#[tokio::test] +async fn test_ix_commit_single_account_100_bytes() { + commit_single_account(100, CommitStrategy::Args, false).await; +} + +#[tokio::test] +async fn test_ix_commit_single_account_100_bytes_and_undelegate() { + commit_single_account(100, CommitStrategy::Args, true).await; +} + +#[tokio::test] +async fn test_ix_commit_single_account_800_bytes() { + commit_single_account(800, CommitStrategy::FromBuffer, false).await; +} + +#[tokio::test] +async fn test_ix_commit_single_account_800_bytes_and_undelegate() { + commit_single_account(800, CommitStrategy::FromBuffer, true).await; +} + +#[tokio::test] +async fn test_ix_commit_single_account_one_kb() { + commit_single_account(1024, CommitStrategy::FromBuffer, false).await; +} +#[tokio::test] +async fn test_ix_commit_single_account_ten_kb() { + commit_single_account(10 * 1024, CommitStrategy::FromBuffer, false).await; +} + +async fn commit_single_account( + bytes: usize, + expected_strategy: CommitStrategy, + undelegate: bool, +) { + utils::init_logger_target(); + let slot = 10; + let validator_auth = utils::get_validator_auth(); + + fund_validator_auth_and_ensure_validator_fees_vault(&validator_auth).await; + + // Run each test with and without finalizing + for (idx, finalize) in [false, true].into_iter().enumerate() { + let service = CommittorService::try_start( + validator_auth.insecure_clone(), + ":memory:", + ChainConfig::local(ComputeBudgetConfig::new(1_000_000)), + ) + .unwrap(); + + let (changeset, chain_lamports) = { + let mut changeset = changeset_for_slot(slot); + let mut chain_lamports = HashMap::new(); + let counter_auth = Keypair::new(); + let (pda, pda_acc) = + init_and_delegate_account_on_chain(&counter_auth, bytes as u64) + .await; + let account = Account { + lamports: LAMPORTS_PER_SOL, + data: vec![8; bytes], + owner: program_flexi_counter::id(), + ..Account::default() + }; + let account_shared = AccountSharedData::from(account); + let bundle_id = idx as u64; + changeset.add(pda, (account_shared, bundle_id)); + if undelegate { + changeset.request_undelegation(pda); + } + chain_lamports.insert(pda, pda_acc.lamports()); + (changeset, chain_lamports) + }; + + ix_commit_local( + service, + changeset.clone(), + chain_lamports.clone(), + finalize, + expect_strategies(&[(expected_strategy, 1)]), + ) + .await; + } +} + +// TODO(thlorenz): once delegation program supports larger commits +// add 1MB and 10MB tests + +// ----------------- +// Multiple Account Commits +// ----------------- +#[tokio::test] +async fn test_ix_commit_two_accounts_1kb_2kb() { + utils::init_logger(); + commit_multiple_accounts( + &[1024, 2048], + 1, + expect_strategies(&[(CommitStrategy::FromBuffer, 2)]), + false, + ) + .await; +} + +#[tokio::test] +async fn test_ix_commit_four_accounts_1kb_2kb_5kb_10kb_single_bundle() { + utils::init_logger(); + commit_multiple_accounts( + &[1024, 2 * 1024, 5 * 1024, 10 * 1024], + 1, + expect_strategies(&[(CommitStrategy::FromBuffer, 4)]), + false, + ) + .await; +} + +#[tokio::test] +async fn test_commit_20_accounts_1kb_bundle_size_2() { + commit_20_accounts_1kb( + 2, + expect_strategies(&[(CommitStrategy::FromBuffer, 20)]), + ) + .await; +} + +#[tokio::test] +async fn test_commit_5_accounts_1kb_bundle_size_3() { + commit_5_accounts_1kb( + 3, + expect_strategies(&[(CommitStrategy::FromBuffer, 5)]), + false, + ) + .await; +} + +#[tokio::test] +async fn test_commit_5_accounts_1kb_bundle_size_3_undelegate_all() { + commit_5_accounts_1kb( + 3, + expect_strategies(&[(CommitStrategy::FromBuffer, 5)]), + true, + ) + .await; +} + +#[tokio::test] +async fn test_commit_5_accounts_1kb_bundle_size_4() { + commit_5_accounts_1kb( + 4, + expect_strategies(&[ + (CommitStrategy::FromBuffer, 1), + (CommitStrategy::FromBufferWithLookupTable, 4), + ]), + false, + ) + .await; +} + +#[tokio::test] +async fn test_commit_5_accounts_1kb_bundle_size_4_undelegate_all() { + commit_5_accounts_1kb( + 4, + expect_strategies(&[ + (CommitStrategy::FromBuffer, 1), + (CommitStrategy::FromBufferWithLookupTable, 4), + ]), + true, + ) + .await; +} + +#[tokio::test] +async fn test_commit_20_accounts_1kb_bundle_size_3() { + commit_20_accounts_1kb( + 3, + expect_strategies(&[(CommitStrategy::FromBuffer, 20)]), + ) + .await; +} + +#[tokio::test] +async fn test_commit_20_accounts_1kb_bundle_size_4() { + commit_20_accounts_1kb( + 4, + expect_strategies(&[(CommitStrategy::FromBufferWithLookupTable, 20)]), + ) + .await; +} + +#[tokio::test] +async fn test_commit_20_accounts_1kb_bundle_size_6() { + commit_20_accounts_1kb( + 6, + expect_strategies(&[ + (CommitStrategy::FromBufferWithLookupTable, 18), + // Two accounts don't make it into the bundles of size 6 + (CommitStrategy::FromBuffer, 2), + ]), + ) + .await; +} + +#[tokio::test] +async fn test_commit_8_accounts_1kb_bundle_size_8() { + commit_8_accounts_1kb( + 8, + expect_strategies(&[ + // Four accounts don't make it into the bundles of size 8, but + // that bundle also needs lookup tables + (CommitStrategy::FromBufferWithLookupTable, 8), + ]), + ) + .await; +} +#[tokio::test] +async fn test_commit_20_accounts_1kb_bundle_size_8() { + commit_20_accounts_1kb( + 8, + expect_strategies(&[ + // Four accounts don't make it into the bundles of size 8, but + // that bundle also needs lookup tables + (CommitStrategy::FromBufferWithLookupTable, 20), + ]), + ) + .await; +} + +async fn commit_5_accounts_1kb( + bundle_size: usize, + expected_strategies: ExpectedStrategies, + undelegate_all: bool, +) { + utils::init_logger(); + let accs = (0..5).map(|_| 1024).collect::>(); + commit_multiple_accounts( + &accs, + bundle_size, + expected_strategies, + undelegate_all, + ) + .await; +} + +async fn commit_8_accounts_1kb( + bundle_size: usize, + expected_strategies: ExpectedStrategies, +) { + utils::init_logger(); + let accs = (0..8).map(|_| 1024).collect::>(); + commit_multiple_accounts(&accs, bundle_size, expected_strategies, false) + .await; +} + +async fn commit_20_accounts_1kb( + bundle_size: usize, + expected_strategies: ExpectedStrategies, +) { + utils::init_logger(); + let accs = (0..20).map(|_| 1024).collect::>(); + commit_multiple_accounts(&accs, bundle_size, expected_strategies, false) + .await; +} + +async fn commit_multiple_accounts( + bytess: &[usize], + bundle_size: usize, + expected_strategies: ExpectedStrategies, + undelegate_all: bool, +) { + utils::init_logger(); + let slot = 10; + let validator_auth = utils::get_validator_auth(); + + fund_validator_auth_and_ensure_validator_fees_vault(&validator_auth).await; + + for finalize in [false, true] { + let mut changeset = changeset_for_slot(slot); + + let service = CommittorService::try_start( + validator_auth.insecure_clone(), + ":memory:", + ChainConfig::local(ComputeBudgetConfig::new(1_000_000)), + ) + .unwrap(); + + let committees = + bytess.iter().map(|_| Keypair::new()).collect::>(); + + let mut chain_lamports = HashMap::new(); + let expected_strategies = expected_strategies.clone(); + + let mut join_set = JoinSet::new(); + let mut bundle_id = 0; + + for (idx, (bytes, counter_auth)) in + bytess.iter().zip(committees.into_iter()).enumerate() + { + if idx % bundle_size == 0 { + bundle_id += 1; + } + + let bytes = *bytes; + join_set.spawn(async move { + let (pda, pda_acc) = init_and_delegate_account_on_chain( + &counter_auth, + bytes as u64, + ) + .await; + + let account = Account { + lamports: LAMPORTS_PER_SOL, + data: vec![idx as u8; bytes], + owner: program_flexi_counter::id(), + ..Account::default() + }; + let account_shared = AccountSharedData::from(account); + let changed_account = + ChangedAccount::from((account_shared, bundle_id as u64)); + + // We can only undelegate accounts that are finalized + let request_undelegation = + finalize && (undelegate_all || idx % 2 == 0); + ( + pda, + pda_acc, + changed_account, + counter_auth.pubkey(), + request_undelegation, + ) + }); + } + + for ( + pda, + pda_acc, + changed_account, + counter_pubkey, + request_undelegation, + ) in join_set.join_all().await + { + changeset.add(pda, changed_account); + if request_undelegation { + changeset.request_undelegation(counter_pubkey); + } + chain_lamports.insert(pda, pda_acc.lamports()); + } + + if uses_lookup(&expected_strategies) { + let mut join_set = JoinSet::new(); + join_set.spawn(service.reserve_common_pubkeys()); + let owners = changeset.owners(); + for committee in changeset.account_keys().iter() { + join_set.spawn(service.reserve_pubkeys_for_committee( + **committee, + *owners.get(committee).unwrap(), + )); + } + debug!( + "Registering lookup tables for {} committees", + changeset.account_keys().len() + ); + join_set.join_all().await; + } + + ix_commit_local( + service, + changeset.clone(), + chain_lamports.clone(), + finalize, + expected_strategies, + ) + .await; + } +} + +// TODO(thlorenz): once delegation program supports larger commits add the following +// tests +// +// ## Scenario 1 +// +// All realloc instructions still fit into the same transaction as the init instruction +// of each account + +// ## Scenario 2 +// +// Max size that is allowed on solana (10MB) +// https://solana.com/docs/core/accounts +// 9,996,760 bytes 9.53MB requiring 69.57 SOL to be rent exempt + +// This requires a chunk tracking account of 1.30KB which can be fully allocated +// as part of the init instruction. Since no larger buffers are possible this +// chunk account size suffices and we don't have to worry about reallocs +// of that tracking account + +// This test pushes the validator to the max, sending >10K transactions in +// order to allocate enough space and write the chunks. +// It shows that committing buffers in that size range is not practically +// feasible, but still we ensure here that it is handled. + +// ----------------- +// Test Executor +// ----------------- +async fn ix_commit_local( + service: CommittorService, + changeset: Changeset, + chain_lamports: HashMap, + finalize: bool, + expected_strategies: ExpectedStrategies, +) { + let rpc_client = RpcClient::new("http://localhost:7799".to_string()); + + let ephemeral_blockhash = Hash::default(); + let reqid = service + .commit_changeset(changeset.clone(), ephemeral_blockhash, finalize) + .await + .unwrap() + .unwrap(); + let statuses = service.get_commit_statuses(reqid).await.unwrap().unwrap(); + service.release_common_pubkeys().await.unwrap(); + + debug!( + "{}", + statuses + .iter() + .map(|x| x.to_string()) + .collect::>() + .join("\n") + ); + assert_eq!(statuses.len(), changeset.accounts.len()); + assert!(CommitStatus::all_completed( + &statuses + .iter() + .map(|x| x.commit_status.clone()) + .collect::>() + )); + let mut strategies = ExpectedStrategies::new(); + for res in statuses { + let change = changeset.accounts.get(&res.pubkey).cloned().unwrap(); + let lamports = if finalize { + change.lamports() + } else { + // The commit state account will hold only the lamports needed + // to be rent exempt and debit the delegated account to reach the + // lamports of the account as changed in the ephemeral + change.lamports() - chain_lamports[&res.pubkey] + }; + + // Track the strategy used + let strategy = res.commit_status.commit_strategy(); + let strategy_count = strategies.entry(strategy).or_insert(0); + *strategy_count += 1; + + // Ensure that the signatures are pointing to the correct transactions + let signatures = + res.commit_status.signatures().expect("Missing signatures"); + + assert!( + tx_logs_contain( + &rpc_client, + &signatures.process_signature, + "CommitState" + ) + .await + ); + + // If we finalized the commit then the delegate account should have the + // committed state, otherwise it is still held in the commit state account + // NOTE: that we verify data/lamports via the get_account! condition + if finalize { + assert!( + signatures.finalize_signature.is_some(), + "Missing finalize signature" + ); + assert!( + tx_logs_contain( + &rpc_client, + &signatures.finalize_signature.unwrap(), + "Finalize" + ) + .await + ); + if res.undelegate { + assert!( + signatures.undelegate_signature.is_some(), + "Missing undelegate signature" + ); + assert!( + tx_logs_contain( + &rpc_client, + &signatures.undelegate_signature.unwrap(), + "Undelegate" + ) + .await + ); + } + get_account!( + rpc_client, + res.pubkey, + "delegated state", + |acc: &Account, remaining_tries: u8| { + let matches_data = acc.data() == change.data() + && acc.lamports() == lamports; + // When we finalize it is possible to also undelegate the account + let expected_owner = if res.undelegate { + program_flexi_counter::id() + } else { + dlp::id() + }; + let matches_undelegation = acc.owner().eq(&expected_owner); + let matches_all = matches_data && matches_undelegation; + + if !matches_all && remaining_tries % 4 == 0 { + if !matches_data { + trace!( + "Account ({}) data {} != {} || {} != {}", + res.pubkey, + acc.data().len(), + change.data().len(), + acc.lamports(), + lamports + ); + } + if !matches_undelegation { + trace!( + "Account ({}) is {} but should be. Owner {} != {}", + res.pubkey, + if res.undelegate { + "not undelegated" + } else { + "undelegated" + }, + acc.owner(), + expected_owner, + ); + } + } + matches_all + } + ) + } else { + let commit_state_pda = + dlp::pda::commit_state_pda_from_delegated_account(&res.pubkey); + get_account!( + rpc_client, + commit_state_pda, + "commit state", + |acc: &Account, remaining_tries: u8| { + if remaining_tries % 4 == 0 { + trace!( + "Commit state ({}) {} == {}? {} == {}?", + commit_state_pda, + acc.data().len(), + change.data().len(), + acc.lamports(), + lamports + ); + } + acc.data() == change.data() && acc.lamports() == lamports + } + ) + }; + } + + // Compare the strategies used with the expected ones + debug!("Strategies used: {:?}", strategies); + assert_eq!( + strategies, expected_strategies, + "Strategies used do not match expected ones" + ); + + let expect_empty_lookup_tables = false; + // changeset.accounts.len() == changeset.accounts_to_undelegate.len(); + if expect_empty_lookup_tables { + let lookup_tables = service.get_lookup_tables().await.unwrap(); + assert!(lookup_tables.active.is_empty()); + + if utils::TEST_TABLE_CLOSE { + let mut closing_tables = lookup_tables.released; + + // Tables deactivate after ~2.5 mins (150secs), but most times + // it takes a lot longer so we allow double the time + const MAX_TIME_TO_CLOSE: Duration = Duration::from_secs(300); + info!( + "Waiting for lookup tables close for up to {} secs", + MAX_TIME_TO_CLOSE.as_secs() + ); + + let start = Instant::now(); + let rpc_client = MagicblockRpcClient::from(rpc_client); + loop { + let accs = rpc_client + .get_multiple_accounts_with_commitment( + &closing_tables, + CommitmentConfig::confirmed(), + None, + ) + .await + .unwrap(); + let closed_pubkeys = accs + .into_iter() + .zip(closing_tables.iter()) + .filter_map(|(acc, pubkey)| { + if acc.is_none() { + Some(*pubkey) + } else { + None + } + }) + .collect::>(); + closing_tables.retain(|pubkey| { + if closed_pubkeys.contains(pubkey) { + debug!("Table {} closed", pubkey); + false + } else { + true + } + }); + if closing_tables.is_empty() { + break; + } + debug!( + "Still waiting for {} released table(s) to close", + closing_tables.len() + ); + if Instant::now() - start > MAX_TIME_TO_CLOSE { + panic!( + "Timed out waiting for tables close. Still open: {}", + closing_tables + .iter() + .map(|x| x.to_string()) + .collect::>() + .join(", ") + ); + } + utils::sleep_millis(10_000).await; + } + } + } +} diff --git a/magicblock-committor-service/todo-tests/utils/instructions.rs b/magicblock-committor-service/todo-tests/utils/instructions.rs new file mode 100644 index 00000000..148ae6ce --- /dev/null +++ b/magicblock-committor-service/todo-tests/utils/instructions.rs @@ -0,0 +1,50 @@ +use solana_pubkey::Pubkey; +use solana_sdk::{instruction::Instruction, rent::Rent}; + +pub fn init_validator_fees_vault_ix(validator_auth: Pubkey) -> Instruction { + dlp::instruction_builder::init_validator_fees_vault( + validator_auth, + validator_auth, + validator_auth, + ) +} + +pub struct InitAccountAndDelegateIxs { + pub init: Instruction, + pub reallocs: Vec, + pub delegate: Instruction, + pub pda: Pubkey, + pub rent_excempt: u64, +} + +pub fn init_account_and_delegate_ixs( + payer: Pubkey, + bytes: u64, +) -> InitAccountAndDelegateIxs { + use program_flexi_counter::instruction::*; + use program_flexi_counter::state::*; + let init_counter_ix = create_init_ix(payer, "COUNTER".to_string()); + let rent_exempt = Rent::default().minimum_balance(bytes as usize); + let mut realloc_ixs = vec![]; + if bytes + > magicblock_committor_program::consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE + as u64 + { + // TODO: we may have to chunk those + let reallocs = bytes + / magicblock_committor_program::consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE + as u64; + for i in 0..reallocs { + realloc_ixs.push(create_realloc_ix(payer, bytes, i as u16)); + } + } + let delegate_ix = create_delegate_ix(payer); + let pda = FlexiCounter::pda(&payer).0; + InitAccountAndDelegateIxs { + init: init_counter_ix, + reallocs: realloc_ixs, + delegate: delegate_ix, + pda, + rent_excempt: rent_exempt, + } +} diff --git a/magicblock-committor-service/todo-tests/utils/mod.rs b/magicblock-committor-service/todo-tests/utils/mod.rs new file mode 100644 index 00000000..0b943374 --- /dev/null +++ b/magicblock-committor-service/todo-tests/utils/mod.rs @@ -0,0 +1,51 @@ +use std::env; + +use env_logger::Target; +use solana_sdk::signature::Keypair; + +pub mod instructions; +pub mod transactions; +pub const TEST_TABLE_CLOSE: bool = cfg!(feature = "test_table_close"); + +pub async fn sleep_millis(millis: u64) { + tokio::time::sleep(tokio::time::Duration::from_millis(millis)).await; +} + +pub fn init_logger() { + let mut builder = env_logger::builder(); + builder + .format_timestamp(None) + .format_module_path(false) + .format_target(false) + .format_source_path(true) + .is_test(true); + + if let Ok(path) = env::var("TEST_LOG_FILE") { + builder.target(Target::Pipe(Box::new( + std::fs::File::create(path).unwrap(), + ))); + } + let _ = builder.try_init(); +} + +pub fn init_logger_target() { + let _ = env_logger::builder() + .format_timestamp(None) + .is_test(true) + .try_init(); +} + +/// This is the test authority used in the delegation program +/// https://github.com/magicblock-labs/delegation-program/blob/7fc0ae9a59e48bea5b046b173ea0e34fd433c3c7/tests/fixtures/accounts.rs#L46 +/// It is compiled in as the authority for the validator vault when we build via +/// `cargo build-sbf --features=unit_test_config` +pub fn get_validator_auth() -> Keypair { + const VALIDATOR_AUTHORITY: [u8; 64] = [ + 251, 62, 129, 184, 107, 49, 62, 184, 1, 147, 178, 128, 185, 157, 247, + 92, 56, 158, 145, 53, 51, 226, 202, 96, 178, 248, 195, 133, 133, 237, + 237, 146, 13, 32, 77, 204, 244, 56, 166, 172, 66, 113, 150, 218, 112, + 42, 110, 181, 98, 158, 222, 194, 130, 93, 175, 100, 190, 106, 9, 69, + 156, 80, 96, 72, + ]; + Keypair::from_bytes(&VALIDATOR_AUTHORITY).unwrap() +} diff --git a/magicblock-committor-service/todo-tests/utils/transactions.rs b/magicblock-committor-service/todo-tests/utils/transactions.rs new file mode 100644 index 00000000..f9e2edc0 --- /dev/null +++ b/magicblock-committor-service/todo-tests/utils/transactions.rs @@ -0,0 +1,58 @@ +use solana_rpc_client::nonblocking::rpc_client::RpcClient; +use solana_rpc_client_api::config::RpcTransactionConfig; +use solana_sdk::{commitment_config::CommitmentConfig, signature::Signature}; + +pub async fn tx_logs_contain( + rpc_client: &RpcClient, + signature: &Signature, + needle: &str, +) -> bool { + // NOTE: we encountered the following error a few times which makes tests fail for the + // wrong reason: + // Error { + // request: Some(GetTransaction), + // kind: SerdeJson( Error( + // "invalid type: null, + // expected struct EncodedConfirmedTransactionWithStatusMeta", + // line: 0, column: 0)) + // } + // Therefore we retry a few times. + const MAX_RETRIES: usize = 5; + let mut retries = MAX_RETRIES; + let tx = loop { + match rpc_client + .get_transaction_with_config( + signature, + RpcTransactionConfig { + commitment: Some(CommitmentConfig::confirmed()), + max_supported_transaction_version: Some(0), + ..Default::default() + }, + ) + .await + { + Ok(tx) => break tx, + Err(err) => { + log::error!("Failed to get transaction: {}", err); + retries -= 1; + if retries == 0 { + panic!( + "Failed to get transaction after {} retries", + MAX_RETRIES + ); + } + tokio::time::sleep(tokio::time::Duration::from_millis(100)) + .await; + } + }; + }; + let logs = tx + .transaction + .meta + .as_ref() + .unwrap() + .log_messages + .clone() + .unwrap_or_else(Vec::new); + logs.iter().any(|log| log.contains(needle)) +} diff --git a/magicblock-rpc-client/Cargo.toml b/magicblock-rpc-client/Cargo.toml new file mode 100644 index 00000000..2bc7430a --- /dev/null +++ b/magicblock-rpc-client/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "magicblock-rpc-client" +version.workspace = true +authors.workspace = true +repository.workspace = true +homepage.workspace = true +license.workspace = true +edition.workspace = true + +[dependencies] +log = { workspace = true } +solana-rpc-client = { workspace = true } +solana-rpc-client-api = { workspace = true } +solana-sdk = { workspace = true } +solana-transaction-status-client-types = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true } + +[dev-dependencies] +env_logger = { workspace = true } +tokio = { workspace = true, features = ["rt", "macros"] } diff --git a/magicblock-rpc-client/src/lib.rs b/magicblock-rpc-client/src/lib.rs new file mode 100644 index 00000000..f710fb3a --- /dev/null +++ b/magicblock-rpc-client/src/lib.rs @@ -0,0 +1,512 @@ +use log::*; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; + +use solana_rpc_client::{ + nonblocking::rpc_client::RpcClient, rpc_client::SerializableTransaction, +}; +use solana_rpc_client_api::client_error::ErrorKind as RpcClientErrorKind; +use solana_rpc_client_api::{ + config::RpcSendTransactionConfig, request::RpcError, +}; +use solana_sdk::{ + account::Account, + address_lookup_table::state::{AddressLookupTable, LookupTableMeta}, + clock::Slot, + commitment_config::{CommitmentConfig, CommitmentLevel}, + hash::Hash, + pubkey::Pubkey, + signature::Signature, + transaction::TransactionError, +}; +use solana_transaction_status_client_types::UiTransactionEncoding; +use tokio::task::JoinSet; + +/// The encoding to use when sending transactions +pub const SEND_TRANSACTION_ENCODING: UiTransactionEncoding = + UiTransactionEncoding::Base64; + +/// The configuration to use when sending transactions +pub const SEND_TRANSACTION_CONFIG: RpcSendTransactionConfig = + RpcSendTransactionConfig { + preflight_commitment: None, + skip_preflight: true, + encoding: Some(SEND_TRANSACTION_ENCODING), + max_retries: None, + min_context_slot: None, + }; + +// ----------------- +// MagicBlockRpcClientError +// ----------------- +#[derive(Debug, thiserror::Error)] +pub enum MagicBlockRpcClientError { + #[error("RPC Client error: {0}")] + RpcClientError(#[from] solana_rpc_client_api::client_error::Error), + + #[error("Error getting blockhash: {0} ({0:?})")] + GetLatestBlockhash(solana_rpc_client_api::client_error::Error), + + #[error("Error getting slot: {0} ({0:?})")] + GetSlot(solana_rpc_client_api::client_error::Error), + + #[error("Error deserializing lookup table: {0}")] + LookupTableDeserialize(solana_sdk::instruction::InstructionError), + + #[error("Error sending transaction: {0} ({0:?})")] + SendTransaction(solana_rpc_client_api::client_error::Error), + + #[error("Error getting signature status for: {0} {1}")] + CannotGetTransactionSignatureStatus(Signature, String), + + #[error( + "Error confirming signature status of {0} at desired commitment level {1}" + )] + CannotConfirmTransactionSignatureStatus(Signature, CommitmentLevel), + + #[error("Sent transaction {1} but got error: {0:?}")] + SentTransactionError(TransactionError, Signature), +} + +impl MagicBlockRpcClientError { + /// Returns the signature of the transaction that caused the error + /// if available. + pub fn signature(&self) -> Option { + use MagicBlockRpcClientError::*; + match self { + CannotGetTransactionSignatureStatus(sig, _) + | SentTransactionError(_, sig) + | CannotConfirmTransactionSignatureStatus(sig, _) => Some(*sig), + _ => None, + } + } +} + +pub type MagicBlockRpcClientResult = + std::result::Result; + +// ----------------- +// SendAndConfirmTransaction Config and Outcome +// ----------------- +pub enum MagicBlockSendTransactionConfig { + /// Just send the transaction and return the signature. + Send, + /// Send a transaction and confirm it with the given parameters. + SendAndConfirm { + /// If provided we will wait for the given blockhash to become valid if + /// getting the signature status fails due to `BlockhashNotFound`. + wait_for_blockhash_to_become_valid: Option, + /// If provided we will try multiple time so find the signature status + /// of the transaction at the 'processed' level even if the recent blockhash + /// already became valid. + wait_for_processed_level: Option, + /// How long to wait in between checks for processed commitment level. + check_for_processed_interval: Option, + /// If provided it will wait for the transaction to be committed at the given + /// commitment level. If not we just wait for the transaction to be processed and + /// return the processed status. + wait_for_commitment_level: Option, + /// How long to wait in between checks for desired commitment level. + check_for_commitment_interval: Option, + }, +} + +// This seems rather large, but if we pick a lower value then test fail locally running +// against a (busy) solana test validator +// I verified that it actually takes this long for the transaction to become available +// in the explorer. Power settings on my machine actually affect this behavior. +const DEFAULT_MAX_TIME_TO_PROCESSED: Duration = Duration::from_millis(50_000); + +impl MagicBlockSendTransactionConfig { + // This will be used if we change the strategy for reallocs or writes + #[allow(dead_code)] + pub fn ensure_sent() -> Self { + Self::Send + } + + pub fn ensure_processed() -> Self { + Self::SendAndConfirm { + wait_for_blockhash_to_become_valid: Some(Duration::from_millis( + 2_000, + )), + wait_for_processed_level: Some(DEFAULT_MAX_TIME_TO_PROCESSED), + check_for_processed_interval: Some(Duration::from_millis(400)), + wait_for_commitment_level: None, + check_for_commitment_interval: None, + } + } + + pub fn ensure_committed() -> Self { + Self::SendAndConfirm { + wait_for_blockhash_to_become_valid: Some(Duration::from_millis( + 2_000, + )), + wait_for_processed_level: Some(DEFAULT_MAX_TIME_TO_PROCESSED), + check_for_processed_interval: Some(Duration::from_millis(400)), + // NOTE: that this time is after we already verified that the transaction was + // processed + wait_for_commitment_level: Some(Duration::from_millis(8_000)), + check_for_commitment_interval: Some(Duration::from_millis(400)), + } + } + + pub fn ensures_committed(&self) -> bool { + use MagicBlockSendTransactionConfig::*; + match self { + Send => false, + SendAndConfirm { + wait_for_commitment_level, + .. + } => wait_for_commitment_level.is_some(), + } + } +} + +pub struct MagicBlockSendTransactionOutcome { + signature: Signature, + processed_err: Option, + confirmed_err: Option, +} + +impl MagicBlockSendTransactionOutcome { + pub fn into_signature(self) -> Signature { + self.signature + } + + pub fn into_signature_and_error( + self, + ) -> (Signature, Option) { + (self.signature, self.confirmed_err.or(self.processed_err)) + } + + /// Returns the error that occurred when processing the transaction. + /// NOTE: this is never set if we use the [MagicBlockSendConfig::Send] option. + pub fn error(&self) -> Option<&TransactionError> { + self.confirmed_err.as_ref().or(self.processed_err.as_ref()) + } +} + +// ----------------- +// MagicBlockRpcClient +// ----------------- + +// Derived from error from helius RPC: Failed to download accounts: Error { request: Some(GetMultipleAccounts), kind: RpcError(RpcResponseError { code: -32602, message: "Too many inputs provided; max 100", data: Empty }) } +const MAX_MULTIPLE_ACCOUNTS: usize = 100; + +/// Wraps a [RpcClient] to provide improved functionality, specifically +/// for sending transactions. +#[derive(Clone)] +pub struct MagicblockRpcClient { + client: Arc, +} + +impl From for MagicblockRpcClient { + fn from(client: RpcClient) -> Self { + Self::new(Arc::new(client)) + } +} + +impl MagicblockRpcClient { + /// Create a new [MagicBlockRpcClient] from an existing [RpcClient]. + pub fn new(client: Arc) -> Self { + Self { client } + } + + pub async fn get_latest_blockhash( + &self, + ) -> MagicBlockRpcClientResult { + self.client + .get_latest_blockhash() + .await + .map_err(MagicBlockRpcClientError::GetLatestBlockhash) + } + + pub async fn get_slot(&self) -> MagicBlockRpcClientResult { + self.client + .get_slot() + .await + .map_err(MagicBlockRpcClientError::GetSlot) + } + + pub async fn get_account( + &self, + pubkey: &Pubkey, + ) -> MagicBlockRpcClientResult> { + let err = match self.client.get_account(pubkey).await { + Ok(acc) => return Ok(Some(acc)), + Err(err) => match err.kind() { + RpcClientErrorKind::RpcError(rpc_err) => { + if let RpcError::ForUser(msg) = rpc_err { + if msg.starts_with("AccountNotFound") { + return Ok(None); + } + } + err + } + _ => err, + }, + }; + Err(MagicBlockRpcClientError::RpcClientError(err)) + } + pub async fn get_multiple_accounts( + &self, + pubkeys: &[Pubkey], + max_per_fetch: Option, + ) -> MagicBlockRpcClientResult>> { + self.get_multiple_accounts_with_commitment( + pubkeys, + self.commitment(), + max_per_fetch, + ) + .await + } + + pub async fn get_multiple_accounts_with_commitment( + &self, + pubkeys: &[Pubkey], + commitment: CommitmentConfig, + max_per_fetch: Option, + ) -> MagicBlockRpcClientResult>> { + let max_per_fetch = max_per_fetch.unwrap_or(MAX_MULTIPLE_ACCOUNTS); + + let mut join_set = JoinSet::new(); + for pubkey_chunk in pubkeys.chunks(max_per_fetch) { + let client = self.client.clone(); + let pubkeys = pubkey_chunk.to_vec(); + join_set.spawn(async move { + client + .get_multiple_accounts_with_commitment(&pubkeys, commitment) + .await + }); + } + let chunked_results = join_set.join_all().await; + let mut results = Vec::new(); + for result in chunked_results { + match result { + Ok(accs) => results.extend(accs.value), + Err(err) => { + return Err(MagicBlockRpcClientError::RpcClientError(err)) + } + } + } + Ok(results) + } + + pub async fn get_lookup_table_meta( + &self, + pubkey: &Pubkey, + ) -> MagicBlockRpcClientResult> { + let acc = self.get_account(pubkey).await?; + let Some(acc) = acc else { return Ok(None) }; + + let table = + AddressLookupTable::deserialize(&acc.data).map_err(|err| { + MagicBlockRpcClientError::LookupTableDeserialize(err) + })?; + Ok(Some(table.meta)) + } + + pub async fn get_lookup_table_addresses( + &self, + pubkey: &Pubkey, + ) -> MagicBlockRpcClientResult>> { + let acc = self.get_account(pubkey).await?; + let Some(acc) = acc else { return Ok(None) }; + + let table = + AddressLookupTable::deserialize(&acc.data).map_err(|err| { + MagicBlockRpcClientError::LookupTableDeserialize(err) + })?; + Ok(Some(table.addresses.to_vec())) + } + + pub async fn request_airdrop( + &self, + pubkey: &Pubkey, + lamports: u64, + ) -> MagicBlockRpcClientResult { + self.client + .request_airdrop(pubkey, lamports) + .await + .map_err(MagicBlockRpcClientError::RpcClientError) + } + + pub fn commitment(&self) -> CommitmentConfig { + self.client.commitment() + } + + pub fn commitment_level(&self) -> CommitmentLevel { + self.commitment().commitment + } + + pub async fn wait_for_next_slot(&self) -> MagicBlockRpcClientResult { + let slot = self.get_slot().await?; + self.wait_for_higher_slot(slot).await + } + + pub async fn wait_for_higher_slot( + &self, + slot: Slot, + ) -> MagicBlockRpcClientResult { + let higher_slot = loop { + let next_slot = self.get_slot().await?; + if next_slot > slot { + break next_slot; + } + tokio::time::sleep(Duration::from_millis(100)).await; + }; + + Ok(higher_slot) + } + + /// Sends a transaction skipping preflight checks and then attempts to confirm + /// it if so configured + /// To confirm a transaction it uses the `client.commitment()` when requesting + /// `get_signature_status_with_commitment` + /// + /// Does not support: + /// - durable nonce transactions + pub async fn send_transaction( + &self, + tx: &impl SerializableTransaction, + config: &MagicBlockSendTransactionConfig, + ) -> MagicBlockRpcClientResult { + let sig = self + .client + .send_transaction_with_config(tx, SEND_TRANSACTION_CONFIG) + .await + .map_err(MagicBlockRpcClientError::SendTransaction)?; + + let MagicBlockSendTransactionConfig::SendAndConfirm { + wait_for_processed_level, + check_for_processed_interval, + wait_for_blockhash_to_become_valid, + wait_for_commitment_level, + check_for_commitment_interval, + } = config + else { + return Ok(MagicBlockSendTransactionOutcome { + signature: sig, + processed_err: None, + confirmed_err: None, + }); + }; + + // 1. Get Signature Processed Status to Fail early on failed transactions + let start = Instant::now(); + let recent_blockhash = tx.get_recent_blockhash(); + debug_assert!( + recent_blockhash != &Hash::default(), + "BUG: recent blockhash is not set for the transaction" + ); + let processed_status = loop { + let status = self + .client + .get_signature_status_with_commitment( + &sig, + CommitmentConfig::processed(), + ) + .await?; + + let check_for_processed_interval = check_for_processed_interval + .unwrap_or_else(|| Duration::from_millis(200)); + match status { + Some(status) => break status, + None => { + if let Some(wait_for_blockhash_to_become_valid) = + wait_for_blockhash_to_become_valid + { + let blockhash_found = self + .client + .is_blockhash_valid( + recent_blockhash, + CommitmentConfig::processed(), + ) + .await?; + if !blockhash_found + && &start.elapsed() + < wait_for_blockhash_to_become_valid + { + trace!( + "Waiting for blockhash {} to become valid", + recent_blockhash + ); + tokio::time::sleep(Duration::from_millis(400)) + .await; + continue; + } else if start.elapsed() + < wait_for_processed_level.unwrap_or_default() + { + tokio::time::sleep(check_for_processed_interval) + .await; + continue; + } else { + return Err(MagicBlockRpcClientError::CannotGetTransactionSignatureStatus( + sig, + format!("blockhash {} found", if blockhash_found { + "was" + } else { + "was not" + }), + )); + } + } else { + return Err(MagicBlockRpcClientError::CannotGetTransactionSignatureStatus( + sig, + "timed out finding blockhash".to_string() + )); + } + } + } + }; + + if let Err(err) = processed_status { + return Err(MagicBlockRpcClientError::SentTransactionError( + err, sig, + )); + } + + // 2. At this point we know the transaction isn't failing + // and just wait for desired status + let confirmed_status = if let Some(wait_for_commitment_level) = + wait_for_commitment_level + { + let now = Instant::now(); + let check_for_commitment_interval = check_for_commitment_interval + .unwrap_or_else(|| Duration::from_millis(200)); + loop { + let confirmed_status = self + .client + .get_signature_status_with_commitment( + &sig, + self.client.commitment(), + ) + .await?; + + if let Some(confirmed_status) = confirmed_status { + break Some(confirmed_status); + } + + if &now.elapsed() < wait_for_commitment_level { + tokio::time::sleep(check_for_commitment_interval).await; + continue; + } else { + return Err(MagicBlockRpcClientError::CannotConfirmTransactionSignatureStatus( + sig, + self.client.commitment().commitment, + )); + } + } + } else { + None + }; + + Ok(MagicBlockSendTransactionOutcome { + signature: sig, + processed_err: processed_status.err(), + confirmed_err: confirmed_status.and_then(|status| status.err()), + }) + } +} diff --git a/magicblock-table-mania/Cargo.toml b/magicblock-table-mania/Cargo.toml new file mode 100644 index 00000000..c9866425 --- /dev/null +++ b/magicblock-table-mania/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "magicblock-table-mania" +version.workspace = true +authors.workspace = true +repository.workspace = true +homepage.workspace = true +license.workspace = true +edition.workspace = true + +[dependencies] +ed25519-dalek = { workspace = true } +log = { workspace = true } +magicblock-rpc-client = { workspace = true } +rand = { workspace = true } +sha3 = { workspace = true } +solana-pubkey = { workspace = true } +solana-rpc-client = { workspace = true } +solana-rpc-client-api = { workspace = true } +solana-sdk = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true } + +[dev-dependencies] +env_logger = { workspace = true } +paste = { workspace = true } +tokio = { workspace = true, features = ["rt", "macros"] } + +[features] +default = [] +test_table_close = [] +# Needed to allow multiple tests to run in parallel without trying to +# use the same lookup table address +randomize_lookup_table_slot = [] diff --git a/magicblock-table-mania/src/derive_keypair.rs b/magicblock-table-mania/src/derive_keypair.rs new file mode 100644 index 00000000..be3315ea --- /dev/null +++ b/magicblock-table-mania/src/derive_keypair.rs @@ -0,0 +1,60 @@ +use ed25519_dalek::{PublicKey, SecretKey}; +use solana_sdk::{clock::Slot, signature::Keypair, signer::Signer}; + +pub fn derive_keypair( + authority: &Keypair, + slot: Slot, + sub_slot: Slot, +) -> Keypair { + let mut seeds = authority.pubkey().to_bytes().to_vec(); + seeds.extend_from_slice(&slot.to_le_bytes()); + seeds.extend_from_slice(&sub_slot.to_le_bytes()); + derive_from_keypair(authority, &seeds) +} + +fn derive_from_keypair(keypair: &Keypair, message: &[u8]) -> Keypair { + let sig = keypair.sign_message(message); + derive_insecure(sig.as_ref()) +} + +fn derive_insecure(message: &[u8]) -> Keypair { + let hash = ::digest(message); + let seed = &hash.as_slice()[0..32]; + + // Create a keypair using the seed bytes + let secret = SecretKey::from_bytes(seed).unwrap(); + let public = PublicKey::from(&secret); + + // Convert to Solana Keypair format + let mut keypair_bytes = [0u8; 64]; + keypair_bytes[0..32].copy_from_slice(secret.as_bytes()); + keypair_bytes[32..64].copy_from_slice(&public.to_bytes()); + + Keypair::from_bytes(&keypair_bytes).unwrap() +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_derive_keypair_is_deterministic() { + let authority = Keypair::new(); + let mut first = vec![]; + for slot in 0..100 { + for sub_slot in 0..100 { + let keypair = derive_keypair(&authority, slot, sub_slot); + first.push(keypair.to_bytes()); + } + } + let mut second = vec![]; + for slot in 0..100 { + for sub_slot in 0..100 { + let keypair = derive_keypair(&authority, slot, sub_slot); + second.push(keypair.to_bytes()); + } + } + + assert_eq!(first, second); + } +} diff --git a/magicblock-table-mania/src/error.rs b/magicblock-table-mania/src/error.rs new file mode 100644 index 00000000..dee39652 --- /dev/null +++ b/magicblock-table-mania/src/error.rs @@ -0,0 +1,27 @@ +use solana_pubkey::Pubkey; +use thiserror::Error; + +pub type TableManiaResult = std::result::Result; + +#[derive(Error, Debug)] +pub enum TableManiaError { + #[error("MagicBlockRpcClientError: {0} ({0:?})")] + MagicBlockRpcClientError( + #[from] magicblock_rpc_client::MagicBlockRpcClientError, + ), + + #[error("Cannot extend deactivated table {0}.")] + CannotExtendDeactivatedTable(Pubkey), + + #[error("Can only use one authority for a TableMania instance. {0} does not match {1}.")] + InvalidAuthority(Pubkey, Pubkey), + + #[error("Can only extend by {0} pubkeys at a time, but was provided {1}")] + MaxExtendPubkeysExceeded(usize, usize), + + #[error("Timed out waiting for remote tables to update: {0}")] + TimedOutWaitingForRemoteTablesToUpdate(String), + + #[error("Timed out waiting for local tables to update: {0}")] + TimedOutWaitingForLocalTablesToUpdate(String), +} diff --git a/magicblock-table-mania/src/find_tables.rs b/magicblock-table-mania/src/find_tables.rs new file mode 100644 index 00000000..75e0c9b2 --- /dev/null +++ b/magicblock-table-mania/src/find_tables.rs @@ -0,0 +1,47 @@ +use magicblock_rpc_client::{MagicBlockRpcClientResult, MagicblockRpcClient}; +use solana_pubkey::Pubkey; +use solana_sdk::{ + address_lookup_table::instruction::derive_lookup_table_address, + clock::Slot, signature::Keypair, signer::Signer, +}; + +use crate::LookupTable; + +pub struct FindOpenTablesOutcome { + pub addresses_searched: Vec, + pub tables: Vec, +} + +pub async fn find_open_tables( + rpc_client: &MagicblockRpcClient, + authority: &Keypair, + min_slot: Slot, + max_slot: Slot, + sub_slots_per_slot: u64, +) -> MagicBlockRpcClientResult { + let addresses_searched = + (min_slot..max_slot).fold(Vec::new(), |mut addresses, slot| { + for sub_slot in 0..sub_slots_per_slot { + let derived_auth = + LookupTable::derive_keypair(authority, slot, sub_slot); + let (table_address, _) = + derive_lookup_table_address(&derived_auth.pubkey(), slot); + addresses.push(table_address); + } + addresses + }); + + let mut tables = Vec::new(); + let accounts = rpc_client + .get_multiple_accounts(&addresses_searched, None) + .await?; + for (pubkey, account) in addresses_searched.iter().zip(accounts.iter()) { + if account.is_some() { + tables.push(*pubkey); + } + } + Ok(FindOpenTablesOutcome { + addresses_searched, + tables, + }) +} diff --git a/magicblock-table-mania/src/lib.rs b/magicblock-table-mania/src/lib.rs new file mode 100644 index 00000000..f88fb6c9 --- /dev/null +++ b/magicblock-table-mania/src/lib.rs @@ -0,0 +1,10 @@ +mod derive_keypair; +pub mod error; +mod find_tables; +mod lookup_table; +mod lookup_table_rc; +mod manager; + +pub use find_tables::find_open_tables; +pub use lookup_table::LookupTable; +pub use manager::*; diff --git a/magicblock-table-mania/src/lookup_table.rs b/magicblock-table-mania/src/lookup_table.rs new file mode 100644 index 00000000..3c1e5406 --- /dev/null +++ b/magicblock-table-mania/src/lookup_table.rs @@ -0,0 +1,535 @@ +use log::*; +use std::fmt; +use std::sync::Mutex; + +use crate::derive_keypair; +use crate::error::{TableManiaError, TableManiaResult}; +use magicblock_rpc_client::MagicBlockRpcClientError; +use magicblock_rpc_client::{ + MagicBlockSendTransactionConfig, MagicblockRpcClient, +}; +use solana_pubkey::Pubkey; +use solana_sdk::address_lookup_table::state::{ + LookupTableMeta, LOOKUP_TABLE_MAX_ADDRESSES, +}; +use solana_sdk::commitment_config::CommitmentLevel; +use solana_sdk::slot_hashes::MAX_ENTRIES; +use solana_sdk::{ + address_lookup_table as alt, + clock::Slot, + signature::{Keypair, Signature}, + signer::Signer, + transaction::Transaction, +}; + +/// Determined via trial and error. The keys themselves take up +/// 27 * 32 bytes = 864 bytes. +pub const MAX_ENTRIES_AS_PART_OF_EXTEND: u64 = 27; + +#[derive(Debug)] +pub enum LookupTable { + Active { + derived_auth: Keypair, + table_address: Pubkey, + pubkeys: Mutex>, + creation_slot: u64, + creation_sub_slot: u64, + init_signature: Signature, + extend_signatures: Vec, + }, + Deactivated { + derived_auth: Keypair, + table_address: Pubkey, + deactivation_slot: u64, + deactivate_signature: Signature, + }, +} + +impl fmt::Display for LookupTable { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Active { + derived_auth, + table_address, + pubkeys, + creation_slot, + creation_sub_slot, + init_signature, + extend_signatures, + } => { + let comma_separated_pubkeys = pubkeys + .lock() + .expect("pubkeys mutex poisoned") + .iter() + .map(|x| x.to_string()) + .collect::>() + .join(", "); + let comma_separated_sigs = extend_signatures + .iter() + .map(|x| x.to_string()) + .collect::>() + .join(", "); + write!( + f, + "LookupTable: Active {{ + derived_auth: {} + table_address: {} + pubkeys: {} + creation_slot: {} + creation_sub_slot: {} + init_signature: {} + extend_signatures: {} +}}", + derived_auth.pubkey(), + table_address, + comma_separated_pubkeys, + creation_slot, + creation_sub_slot, + init_signature, + comma_separated_sigs + ) + } + Self::Deactivated { + derived_auth, + table_address, + deactivation_slot, + deactivate_signature, + } => { + write!( + f, + "LookupTable: Deactivated {{ derived_auth: {}, table_address: {}, deactivation_slot: {}, deactivate_signature: {} }}", + derived_auth.pubkey(), + table_address, + deactivation_slot, + deactivate_signature, + ) + } + } + } +} + +impl LookupTable { + pub fn derived_auth(&self) -> &Keypair { + match self { + Self::Active { derived_auth, .. } => derived_auth, + Self::Deactivated { derived_auth, .. } => derived_auth, + } + } + pub fn table_address(&self) -> &Pubkey { + match self { + Self::Active { table_address, .. } => table_address, + Self::Deactivated { table_address, .. } => table_address, + } + } + + /// All pubkeys requested, no matter of the `reqid`. + /// The same pubkey might be included twice if requested with different `reqid`. + pub fn pubkeys(&self) -> Option> { + match self { + Self::Active { pubkeys, .. } => { + Some(pubkeys.lock().expect("pubkeys mutex poisoned").to_vec()) + } + Self::Deactivated { .. } => None, + } + } + + pub fn creation_slot(&self) -> Option { + match self { + Self::Active { creation_slot, .. } => Some(*creation_slot), + Self::Deactivated { .. } => None, + } + } + + pub fn has_more_capacity(&self) -> bool { + self.pubkeys() + .is_some_and(|x| x.len() < LOOKUP_TABLE_MAX_ADDRESSES) + } + + pub fn contains(&self, pubkey: &Pubkey, _reqid: u64) -> bool { + match self { + Self::Active { pubkeys, .. } => pubkeys + .lock() + .expect("pubkeys mutex poisoned") + .contains(pubkey), + Self::Deactivated { .. } => false, + } + } + + /// Returns `true` if the we requested to deactivate this table. + /// NOTE: this doesn't mean that the deactivation perios passed, thus + /// the table could still be considered _deactivating_ on chain. + pub fn deactivate_triggered(&self) -> bool { + use LookupTable::*; + matches!(self, Deactivated { .. }) + } + + pub fn is_active(&self) -> bool { + use LookupTable::*; + matches!(self, Active { .. }) + } + + pub fn derive_keypair( + authority: &Keypair, + slot: Slot, + sub_slot: Slot, + ) -> Keypair { + derive_keypair::derive_keypair(authority, slot, sub_slot) + } + + /// Initializes an address lookup table deriving its authority from the provided + /// [authority] keypair. The table is extended with the provided [pubkeys]. + /// The [authority] keypair pays for the transaction. + /// + /// - **rpc_client**: RPC client to use for sending transactions + /// - **authority**: Keypair to derive the authority of the lookup table + /// - **latest_slot**: the on chain slot at which we are creating the table + /// - **sub_slot**: a bump to allow creating multiple lookup tables with the same authority + /// at the same slot + /// - **pubkeys**: to extend the lookup table respecting respecting + /// solana_sdk::address_lookup_table::state::LOOKUP_TABLE_MAX_ADDRESSES] + /// after it is initialized + /// - **reqid**: id of the request adding the pubkeys + pub async fn init( + rpc_client: &MagicblockRpcClient, + authority: &Keypair, + latest_slot: Slot, + sub_slot: Slot, + pubkeys: &[Pubkey], + _reqid: u64, + ) -> TableManiaResult { + check_max_pubkeys(pubkeys)?; + + let derived_auth = + Self::derive_keypair(authority, latest_slot, sub_slot); + + let (create_ix, table_address) = alt::instruction::create_lookup_table( + derived_auth.pubkey(), + authority.pubkey(), + latest_slot, + ); + + let end = pubkeys.len().min(LOOKUP_TABLE_MAX_ADDRESSES); + let extend_ix = alt::instruction::extend_lookup_table( + table_address, + derived_auth.pubkey(), + Some(authority.pubkey()), + pubkeys[..end].to_vec(), + ); + + let ixs = vec![create_ix, extend_ix]; + let latest_blockhash = rpc_client.get_latest_blockhash().await?; + let tx = Transaction::new_signed_with_payer( + &ixs, + Some(&authority.pubkey()), + &[authority, &derived_auth], + latest_blockhash, + ); + + let outcome = rpc_client + .send_transaction(&tx, &Self::get_commitment(rpc_client)) + .await?; + let (signature, error) = outcome.into_signature_and_error(); + if let Some(error) = &error { + error!( + "Error initializing lookup table: {:?} ({})", + error, signature + ); + return Err(MagicBlockRpcClientError::SentTransactionError( + error.clone(), + signature, + ) + .into()); + } + + Ok(Self::Active { + derived_auth, + table_address, + pubkeys: Mutex::new(pubkeys.to_vec()), + creation_slot: latest_slot, + creation_sub_slot: sub_slot, + init_signature: signature, + extend_signatures: vec![], + }) + } + + fn get_commitment( + rpc_client: &MagicblockRpcClient, + ) -> MagicBlockSendTransactionConfig { + use CommitmentLevel::*; + match rpc_client.commitment_level() { + Processed => MagicBlockSendTransactionConfig::ensure_processed(), + Confirmed | Finalized => { + MagicBlockSendTransactionConfig::ensure_committed() + } + } + } + + /// Extends this lookup table with the provided [pubkeys]. + /// The transaction is signed with the [Self::derived_auth]. + /// + /// - **rpc_client**: RPC client to use for sending the extend transaction + /// - **authority**: payer for the the extend transaction + /// - **pubkeys**: to extend the lookup table with + /// - **reqid**: id of the request adding the pubkeys + pub async fn extend( + &self, + rpc_client: &MagicblockRpcClient, + authority: &Keypair, + extra_pubkeys: &[Pubkey], + _reqid: u64, + ) -> TableManiaResult<()> { + use LookupTable::*; + + check_max_pubkeys(extra_pubkeys)?; + + let pubkeys = match self { + Active { pubkeys, .. } => pubkeys, + Deactivated { .. } => { + return Err(TableManiaError::CannotExtendDeactivatedTable( + *self.table_address(), + )); + } + }; + let extend_ix = alt::instruction::extend_lookup_table( + *self.table_address(), + self.derived_auth().pubkey(), + Some(authority.pubkey()), + extra_pubkeys.to_vec(), + ); + + let ixs = vec![extend_ix]; + let latest_blockhash = rpc_client.get_latest_blockhash().await?; + let tx = Transaction::new_signed_with_payer( + &ixs, + Some(&authority.pubkey()), + &[authority, self.derived_auth()], + latest_blockhash, + ); + + let outcome = rpc_client + .send_transaction(&tx, &Self::get_commitment(rpc_client)) + .await?; + let (signature, error) = outcome.into_signature_and_error(); + if let Some(error) = &error { + error!("Error extending lookup table: {:?} ({})", error, signature); + return Err(MagicBlockRpcClientError::SentTransactionError( + error.clone(), + signature, + ) + .into()); + } else { + pubkeys + .lock() + .expect("pubkeys mutex poisoned") + .extend(extra_pubkeys); + } + + Ok(()) + } + + /// Extends this lookup table with the portion of the provided [pubkeys] that + /// fits into the table respecting [solana_sdk::address_lookup_table::state::LOOKUP_TABLE_MAX_ADDRESSES]. + /// + /// The transaction is signed with the [Self::derived_auth]. + /// + /// - **rpc_client**: RPC client to use for sending the extend transaction + /// - **authority**: payer for the the extend transaction + /// - **pubkeys**: to extend the lookup table with + /// - **reqid**: id of the request adding the pubkeys + /// + /// Returns: the pubkeys that were added to the table + pub async fn extend_respecting_capacity( + &self, + rpc_client: &MagicblockRpcClient, + authority: &Keypair, + pubkeys: &[Pubkey], + reqid: u64, + ) -> TableManiaResult> { + let Some(len) = self.pubkeys().map(|x| x.len()) else { + return Err(TableManiaError::CannotExtendDeactivatedTable( + *self.table_address(), + )); + }; + let remaining_capacity = LOOKUP_TABLE_MAX_ADDRESSES.saturating_sub(len); + if remaining_capacity == 0 { + return Ok(vec![]); + } + + let storing = if pubkeys.len() >= remaining_capacity { + let (storing, _) = pubkeys.split_at(remaining_capacity); + storing + } else { + pubkeys + }; + + let res = self.extend(rpc_client, authority, storing, reqid).await; + res.map(|_| storing.to_vec()) + } + + /// Deactivates this lookup table. + /// + /// - **rpc_client**: RPC client to use for sending the deactivate transaction + /// - **authority**: pays for the the deactivate transaction + pub async fn deactivate( + &mut self, + rpc_client: &MagicblockRpcClient, + authority: &Keypair, + ) -> TableManiaResult<()> { + let deactivate_ix = alt::instruction::deactivate_lookup_table( + *self.table_address(), + self.derived_auth().pubkey(), + ); + let ixs = vec![deactivate_ix]; + let latest_blockhash = rpc_client.get_latest_blockhash().await?; + let tx = Transaction::new_signed_with_payer( + &ixs, + Some(&authority.pubkey()), + &[authority, self.derived_auth()], + latest_blockhash, + ); + + let outcome = rpc_client + .send_transaction(&tx, &Self::get_commitment(rpc_client)) + .await?; + let (signature, error) = outcome.into_signature_and_error(); + if let Some(error) = &error { + error!( + "Error deactivating lookup table: {:?} ({})", + error, signature + ); + } + + let slot = rpc_client.get_slot().await?; + *self = Self::Deactivated { + derived_auth: self.derived_auth().insecure_clone(), + table_address: *self.table_address(), + deactivation_slot: slot, + deactivate_signature: signature, + }; + + Ok(()) + } + + /// Checks if this lookup table is deactivated via the following: + /// + /// 1. was [Self::deactivate] called + /// 2. is the [LookupTable::Deactivated::deactivation_slot] far enough in the past + pub async fn is_deactivated( + &self, + rpc_client: &MagicblockRpcClient, + current_slot: Option, + ) -> bool { + let Self::Deactivated { + deactivation_slot, .. + } = self + else { + return false; + }; + let slot = { + if let Some(slot) = current_slot { + slot + } else { + let Ok(slot) = rpc_client.get_slot().await else { + return false; + }; + slot + } + }; + // NOTE: the solana exporer will show an account as _deactivated_ once we deactivate it + // even though it is actually _deactivating_ + // I tried to shorten the wait here but found that this is the minimum time needed + // for the table to be considered fully _deactivated_ + let deactivated_slot = deactivation_slot + MAX_ENTRIES as u64; + trace!( + "'{}' deactivates in {} slots", + self.table_address(), + deactivated_slot.saturating_sub(slot), + ); + deactivated_slot <= slot + } + + pub async fn is_closed( + &self, + rpc_client: &MagicblockRpcClient, + ) -> TableManiaResult { + let acc = rpc_client.get_account(self.table_address()).await?; + Ok(acc.is_none()) + } + + /// Checks if the table was deactivated and if so closes the table account. + /// + /// - **rpc_client**: RPC client to use for sending the close transaction + /// - **authority**: pays for the the close transaction and is refunded the + /// table account rent + /// - **current_slot**: the current slot to use for checking deactivation + pub async fn close( + &self, + rpc_client: &MagicblockRpcClient, + authority: &Keypair, + current_slot: Option, + ) -> TableManiaResult { + if !self.is_deactivated(rpc_client, current_slot).await { + return Ok(false); + } + + let close_ix = alt::instruction::close_lookup_table( + *self.table_address(), + self.derived_auth().pubkey(), + authority.pubkey(), + ); + let ixs = vec![close_ix]; + let latest_blockhash = rpc_client.get_latest_blockhash().await?; + let tx = Transaction::new_signed_with_payer( + &ixs, + Some(&authority.pubkey()), + &[authority, self.derived_auth()], + latest_blockhash, + ); + + let outcome = rpc_client + .send_transaction(&tx, &Self::get_commitment(rpc_client)) + .await?; + + let (signature, error) = outcome.into_signature_and_error(); + if let Some(error) = &error { + debug!( + "Error closing lookup table: {:?} ({}) - may need longer deactivation time", + error, signature + ); + } + self.is_closed(rpc_client).await + } + + pub async fn get_meta( + &self, + rpc_client: &MagicblockRpcClient, + ) -> TableManiaResult> { + Ok(rpc_client + .get_lookup_table_meta(self.table_address()) + .await?) + } + + pub async fn get_chain_pubkeys( + &self, + rpc_client: &MagicblockRpcClient, + ) -> TableManiaResult>> { + Self::get_chain_pubkeys_for(rpc_client, self.table_address()).await + } + + pub async fn get_chain_pubkeys_for( + rpc_client: &MagicblockRpcClient, + table_address: &Pubkey, + ) -> TableManiaResult>> { + Ok(rpc_client.get_lookup_table_addresses(table_address).await?) + } +} + +fn check_max_pubkeys(pubkeys: &[Pubkey]) -> TableManiaResult<()> { + if pubkeys.len() > MAX_ENTRIES_AS_PART_OF_EXTEND as usize { + return Err(TableManiaError::MaxExtendPubkeysExceeded( + MAX_ENTRIES_AS_PART_OF_EXTEND as usize, + pubkeys.len(), + )); + } + Ok(()) +} diff --git a/magicblock-table-mania/src/lookup_table_rc.rs b/magicblock-table-mania/src/lookup_table_rc.rs new file mode 100644 index 00000000..94e298cd --- /dev/null +++ b/magicblock-table-mania/src/lookup_table_rc.rs @@ -0,0 +1,708 @@ +use log::*; +use magicblock_rpc_client::{ + MagicBlockRpcClientError, MagicBlockSendTransactionConfig, + MagicblockRpcClient, +}; +use solana_sdk::{ + address_lookup_table::{ + self as alt, + state::{LookupTableMeta, LOOKUP_TABLE_MAX_ADDRESSES}, + }, + clock::Slot, + commitment_config::CommitmentLevel, + signature::{Keypair, Signature}, + signer::Signer, + slot_hashes::MAX_ENTRIES, + transaction::Transaction, +}; +use std::{ + collections::{HashMap, HashSet}, + fmt, + ops::Deref, + sync::{ + atomic::{AtomicUsize, Ordering}, + RwLock, RwLockReadGuard, RwLockWriteGuard, + }, +}; + +use solana_pubkey::Pubkey; + +use crate::{ + derive_keypair, + error::{TableManiaError, TableManiaResult}, +}; + +// ----------------- +// RefcountedPubkeys +// ----------------- + +/// A map of reference counted pubkeys that can be used to track the number of +/// reservations that exist for a pubkey in a lookup table +pub struct RefcountedPubkeys { + pubkeys: HashMap, +} + +impl RefcountedPubkeys { + fn new(pubkeys: &[Pubkey]) -> Self { + Self { + pubkeys: pubkeys + .iter() + .map(|pubkey| (*pubkey, AtomicUsize::new(1))) + .collect(), + } + } + + /// This should only be called for pubkeys that are not already in this table. + /// It is called when extending a lookup table with pubkeys that were not + /// found in any other table. + fn insert_many(&mut self, pubkeys: &[Pubkey]) { + for pubkey in pubkeys { + debug_assert!( + !self.pubkeys.contains_key(pubkey), + "Pubkey {} already exists in the table", + pubkey + ); + self.pubkeys.insert(*pubkey, AtomicUsize::new(1)); + } + } + + /// Add a reservation to the pubkey if it is part of this table + /// - *pubkey* to reserve + /// - *returns* `true` if the pubkey could be reserved + fn reserve(&self, pubkey: &Pubkey) -> bool { + if let Some(count) = self.pubkeys.get(pubkey) { + count.fetch_add(1, Ordering::SeqCst); + true + } else { + false + } + } + + /// Called when we are done with a pubkey + /// Will decrement the ref count of it or do nothing if the pubkey was + /// not found + /// - *pubkey* to release + /// - *returns* `true` if the pubkey was released + fn release(&self, pubkey: &Pubkey) -> bool { + if let Some(count) = self.pubkeys.get(pubkey) { + count + .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| { + if x == 0 { + None + } else { + Some(x - 1) + } + }) + .is_ok() + } else { + false + } + } + + /// Returns `true` if any of the pubkeys is still in use + fn has_reservations(&self) -> bool { + self.pubkeys + .values() + .any(|rc_pubkey| rc_pubkey.load(Ordering::SeqCst) > 0) + } +} + +impl Deref for RefcountedPubkeys { + type Target = HashMap; + + fn deref(&self) -> &Self::Target { + &self.pubkeys + } +} + +/// Determined via trial and error. The keys themselves take up +/// 27 * 32 bytes = 864 bytes. +pub const MAX_ENTRIES_AS_PART_OF_EXTEND: u64 = 27; + +// ----------------- +// LookupTableRc +// ----------------- +pub enum LookupTableRc { + Active { + derived_auth: Keypair, + table_address: Pubkey, + /// Reference counted pubkeys stored inside the [Self::table]. + /// When someone _checks out_ a pubkey the ref count is incremented + /// When it is _returned_ the ref count is decremented. + /// When all pubkeys have ref count 0 the table can be deactivated + pubkeys: RwLock, + creation_slot: u64, + creation_sub_slot: u64, + init_signature: Signature, + extend_signatures: Vec, + }, + Deactivated { + derived_auth: Keypair, + table_address: Pubkey, + deactivation_slot: u64, + deactivate_signature: Signature, + }, +} + +impl fmt::Display for LookupTableRc { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Active { + derived_auth, + table_address, + pubkeys, + creation_slot, + creation_sub_slot, + init_signature, + extend_signatures, + } => { + let comma_separated_pubkeys = pubkeys + .read() + .expect("pubkeys mutex poisoned") + .iter() + .map(|(key, _)| key.to_string()) + .collect::>() + .join(", "); + let comma_separated_sigs = extend_signatures + .iter() + .map(|x| x.to_string()) + .collect::>() + .join(", "); + write!( + f, + "LookupTable: Active {{ + derived_auth: {} + table_address: {} + pubkeys: {} + creation_slot: {} + creation_sub_slot: {} + init_signature: {} + extend_signatures: {} +}}", + derived_auth.pubkey(), + table_address, + comma_separated_pubkeys, + creation_slot, + creation_sub_slot, + init_signature, + comma_separated_sigs + ) + } + Self::Deactivated { + derived_auth, + table_address, + deactivation_slot, + deactivate_signature, + } => { + write!( + f, + "LookupTable: Deactivated {{ derived_auth: {}, table_address: {}, deactivation_slot: {}, deactivate_signature: {} }}", + derived_auth.pubkey(), + table_address, + deactivation_slot, + deactivate_signature, + ) + } + } + } +} + +impl LookupTableRc { + pub fn derived_auth(&self) -> &Keypair { + match self { + Self::Active { derived_auth, .. } => derived_auth, + Self::Deactivated { derived_auth, .. } => derived_auth, + } + } + + pub fn table_address(&self) -> &Pubkey { + match self { + Self::Active { table_address, .. } => table_address, + Self::Deactivated { table_address, .. } => table_address, + } + } + + pub fn pubkeys(&self) -> Option> { + match self { + Self::Active { pubkeys, .. } => { + Some(pubkeys.read().expect("pubkeys mutex poisoned")) + } + Self::Deactivated { .. } => None, + } + } + + pub fn pubkeys_mut( + &self, + ) -> Option> { + match self { + Self::Active { pubkeys, .. } => { + Some(pubkeys.write().expect("pubkeys mutex poisoned")) + } + Self::Deactivated { .. } => None, + } + } + + pub fn creation_slot(&self) -> Option { + match self { + Self::Active { creation_slot, .. } => Some(*creation_slot), + Self::Deactivated { .. } => None, + } + } + + /// Returns `true` if the table has more capacity to add pubkeys + pub fn has_more_capacity(&self) -> bool { + self.pubkeys() + .is_some_and(|x| x.len() < LOOKUP_TABLE_MAX_ADDRESSES) + } + + pub fn is_full(&self) -> bool { + !self.has_more_capacity() + } + + pub fn contains_key(&self, pubkey: &Pubkey) -> bool { + self.pubkeys() + .is_some_and(|pubkeys| pubkeys.contains_key(pubkey)) + } + + /// Returns `true` if the table is active and any of the its pubkeys + /// is still in use + pub fn has_reservations(&self) -> bool { + self.pubkeys() + .is_some_and(|pubkeys| pubkeys.has_reservations()) + } + + pub fn provides(&self, pubkey: &Pubkey) -> bool { + self.pubkeys().is_some_and(|pubkeys| { + pubkeys + .get(pubkey) + .is_some_and(|count| count.load(Ordering::SeqCst) > 0) + }) + } + + /// Returns `true` if the we requested to deactivate this table. + /// NOTE: this doesn't mean that the deactivation perios passed, thus + /// the table could still be considered _deactivating_ on chain. + pub fn deactivate_triggered(&self) -> bool { + use LookupTableRc::*; + matches!(self, Deactivated { .. }) + } + + pub fn is_active(&self) -> bool { + use LookupTableRc::*; + matches!(self, Active { .. }) + } + + pub fn derive_keypair( + authority: &Keypair, + slot: Slot, + sub_slot: Slot, + ) -> Keypair { + derive_keypair::derive_keypair(authority, slot, sub_slot) + } + + /// Reserves the pubkey if it is part of this table. + /// - *pubkey* to reserve + /// - *returns* `true` if the pubkey could be reserved + pub fn reserve_pubkey(&self, pubkey: &Pubkey) -> bool { + self.pubkeys() + .is_some_and(|pubkeys| pubkeys.reserve(pubkey)) + } + + /// Releases one reservation for the given pubkey if it is part of this table + /// and has at least one reservation. + /// - *pubkey* to release + /// - *returns* `true` if the pubkey was released + pub fn release_pubkey(&self, pubkey: &Pubkey) -> bool { + self.pubkeys() + .is_some_and(|pubkeys| pubkeys.release(pubkey)) + } + + /// Matches pubkeys from the given set against the pubkeys it has reserved. + /// NOTE: the caller is responsible to hold a reservation to each pubkey it + /// is requesting to match against + pub fn match_pubkeys( + &self, + requested_pubkeys: &HashSet, + ) -> HashSet { + match self.pubkeys() { + Some(pubkeys) => requested_pubkeys + .iter() + .filter(|pubkey| pubkeys.contains_key(pubkey)) + .cloned() + .collect::>(), + None => HashSet::new(), + } + } + + /// Initializes an address lookup table deriving its authority from the provided + /// [authority] keypair. The table is extended with the provided [pubkeys]. + /// The [authority] keypair pays for the transaction. + /// + /// It is expectected that the provided pubkeys were not found in any other lookup + /// table nor in this one. + /// They are automatically reserved for one requestor. + /// + /// - **rpc_client**: RPC client to use for sending transactions + /// - **authority**: Keypair to derive the authority of the lookup table + /// - **latest_slot**: the on chain slot at which we are creating the table + /// - **sub_slot**: a bump to allow creating multiple lookup tables with the same authority + /// the same slot + /// - **pubkeys**: to extend the lookup table respecting respecting + /// [solana_sdk::address_lookup_table::state::LOOKUP_TABLE_MAX_ADDRESSES] + /// after it is initialized + pub async fn init( + rpc_client: &MagicblockRpcClient, + authority: &Keypair, + latest_slot: Slot, + sub_slot: Slot, + pubkeys: &[Pubkey], + ) -> TableManiaResult { + check_max_pubkeys(pubkeys)?; + + let derived_auth = + Self::derive_keypair(authority, latest_slot, sub_slot); + + let (create_ix, table_address) = alt::instruction::create_lookup_table( + derived_auth.pubkey(), + authority.pubkey(), + latest_slot, + ); + trace!("Initializing lookup table {}", table_address); + + let end = pubkeys.len().min(LOOKUP_TABLE_MAX_ADDRESSES); + let extend_ix = alt::instruction::extend_lookup_table( + table_address, + derived_auth.pubkey(), + Some(authority.pubkey()), + pubkeys[..end].to_vec(), + ); + + let ixs = vec![create_ix, extend_ix]; + let latest_blockhash = rpc_client.get_latest_blockhash().await?; + let tx = Transaction::new_signed_with_payer( + &ixs, + Some(&authority.pubkey()), + &[authority, &derived_auth], + latest_blockhash, + ); + + let outcome = rpc_client + .send_transaction( + &tx, + &Self::get_send_transaction_config(rpc_client), + ) + .await?; + let (signature, error) = outcome.into_signature_and_error(); + if let Some(error) = &error { + error!( + "Error initializing lookup table: {:?} ({})", + error, signature + ); + return Err(MagicBlockRpcClientError::SentTransactionError( + error.clone(), + signature, + ) + .into()); + } + + Ok(Self::Active { + derived_auth, + table_address, + pubkeys: RwLock::new(RefcountedPubkeys::new(pubkeys)), + creation_slot: latest_slot, + creation_sub_slot: sub_slot, + init_signature: signature, + extend_signatures: vec![], + }) + } + + /// Extends this lookup table with the provided [pubkeys]. + /// The transaction is signed with the [Self::derived_auth]. + /// + /// It is expectected that the provided pubkeys were not found in any other lookup + /// table nor in this one. + /// They are automatically reserved for one requestor. + /// + /// - **rpc_client**: RPC client to use for sending the extend transaction + /// - **authority**: payer for the the extend transaction + /// - **pubkeys**: to extend the lookup table with + pub async fn extend( + &self, + rpc_client: &MagicblockRpcClient, + authority: &Keypair, + extra_pubkeys: &[Pubkey], + ) -> TableManiaResult<()> { + use LookupTableRc::*; + + check_max_pubkeys(extra_pubkeys)?; + + let pubkeys = match self { + Active { pubkeys, .. } => pubkeys, + Deactivated { .. } => { + return Err(TableManiaError::CannotExtendDeactivatedTable( + *self.table_address(), + )); + } + }; + let extend_ix = alt::instruction::extend_lookup_table( + *self.table_address(), + self.derived_auth().pubkey(), + Some(authority.pubkey()), + extra_pubkeys.to_vec(), + ); + + let ixs = vec![extend_ix]; + let latest_blockhash = rpc_client.get_latest_blockhash().await?; + let tx = Transaction::new_signed_with_payer( + &ixs, + Some(&authority.pubkey()), + &[authority, self.derived_auth()], + latest_blockhash, + ); + + let outcome = rpc_client + .send_transaction( + &tx, + &Self::get_send_transaction_config(rpc_client), + ) + .await?; + let (signature, error) = outcome.into_signature_and_error(); + if let Some(error) = &error { + error!("Error extending lookup table: {:?} ({})", error, signature); + return Err(MagicBlockRpcClientError::SentTransactionError( + error.clone(), + signature, + ) + .into()); + } else { + pubkeys + .write() + .expect("pubkeys rwlock poisoned") + .insert_many(extra_pubkeys); + } + + Ok(()) + } + + /// Extends this lookup table with the portion of the provided [pubkeys] that + /// fits into the table respecting [solana_sdk::address_lookup_table::state::LOOKUP_TABLE_MAX_ADDRESSES]. + /// + /// The transaction is signed with the [Self::derived_auth]. + /// + /// - **rpc_client**: RPC client to use for sending the extend transaction + /// - **authority**: payer for the the extend transaction + /// - **pubkeys**: to extend the lookup table with + /// + /// Returns: the pubkeys that were added to the table + pub async fn extend_respecting_capacity( + &self, + rpc_client: &MagicblockRpcClient, + authority: &Keypair, + pubkeys: &[Pubkey], + ) -> TableManiaResult> { + let Some(len) = self.pubkeys().map(|x| x.len()) else { + return Err(TableManiaError::CannotExtendDeactivatedTable( + *self.table_address(), + )); + }; + let remaining_capacity = LOOKUP_TABLE_MAX_ADDRESSES.saturating_sub(len); + if remaining_capacity == 0 { + return Ok(vec![]); + } + + let storing = if pubkeys.len() >= remaining_capacity { + let (storing, _) = pubkeys.split_at(remaining_capacity); + storing + } else { + pubkeys + }; + + let res = self.extend(rpc_client, authority, storing).await; + res.map(|_| storing.to_vec()) + } + + /// Deactivates this lookup table. + /// + /// - **rpc_client**: RPC client to use for sending the deactivate transaction + /// - **authority**: pays for the the deactivate transaction + pub async fn deactivate( + &mut self, + rpc_client: &MagicblockRpcClient, + authority: &Keypair, + ) -> TableManiaResult<()> { + let deactivate_ix = alt::instruction::deactivate_lookup_table( + *self.table_address(), + self.derived_auth().pubkey(), + ); + let ixs = vec![deactivate_ix]; + let latest_blockhash = rpc_client.get_latest_blockhash().await?; + let tx = Transaction::new_signed_with_payer( + &ixs, + Some(&authority.pubkey()), + &[authority, self.derived_auth()], + latest_blockhash, + ); + + let outcome = rpc_client + .send_transaction( + &tx, + &Self::get_send_transaction_config(rpc_client), + ) + .await?; + let (signature, error) = outcome.into_signature_and_error(); + if let Some(error) = &error { + error!( + "Error deactivating lookup table: {:?} ({})", + error, signature + ); + } + + let slot = rpc_client.get_slot().await?; + *self = Self::Deactivated { + derived_auth: self.derived_auth().insecure_clone(), + table_address: *self.table_address(), + deactivation_slot: slot, + deactivate_signature: signature, + }; + + Ok(()) + } + + /// Checks if this lookup table is deactivated via the following: + /// + /// 1. was [Self::deactivate] called + /// 2. is the [LookupTable::Deactivated::deactivation_slot] far enough in the past + pub async fn is_deactivated( + &self, + rpc_client: &MagicblockRpcClient, + current_slot: Option, + ) -> bool { + let Self::Deactivated { + deactivation_slot, .. + } = self + else { + return false; + }; + let slot = { + if let Some(slot) = current_slot { + slot + } else { + let Ok(slot) = rpc_client.get_slot().await else { + return false; + }; + slot + } + }; + // NOTE: the solana exporer will show an account as _deactivated_ once we deactivate it + // even though it is actually _deactivating_ + // I tried to shorten the wait here but found that this is the minimum time needed + // for the table to be considered fully _deactivated_ + let deactivated_slot = deactivation_slot + MAX_ENTRIES as u64; + trace!( + "'{}' deactivates in {} slots", + self.table_address(), + deactivated_slot.saturating_sub(slot), + ); + deactivated_slot <= slot + } + + pub async fn is_closed( + &self, + rpc_client: &MagicblockRpcClient, + ) -> TableManiaResult { + let acc = rpc_client.get_account(self.table_address()).await?; + Ok(acc.is_none()) + } + + /// Checks if the table was deactivated and if so closes the table account. + /// + /// - **rpc_client**: RPC client to use for sending the close transaction + /// - **authority**: pays for the the close transaction and is refunded the + /// table account rent + /// - **current_slot**: the current slot to use for checking deactivation + pub async fn close( + &self, + rpc_client: &MagicblockRpcClient, + authority: &Keypair, + current_slot: Option, + ) -> TableManiaResult { + if !self.is_deactivated(rpc_client, current_slot).await { + return Ok(false); + } + + let close_ix = alt::instruction::close_lookup_table( + *self.table_address(), + self.derived_auth().pubkey(), + authority.pubkey(), + ); + let ixs = vec![close_ix]; + let latest_blockhash = rpc_client.get_latest_blockhash().await?; + let tx = Transaction::new_signed_with_payer( + &ixs, + Some(&authority.pubkey()), + &[authority, self.derived_auth()], + latest_blockhash, + ); + + let outcome = rpc_client + .send_transaction( + &tx, + &Self::get_send_transaction_config(rpc_client), + ) + .await?; + + let (signature, error) = outcome.into_signature_and_error(); + if let Some(error) = &error { + debug!( + "Error closing lookup table: {:?} ({}) - may need longer deactivation time", + error, signature + ); + } + self.is_closed(rpc_client).await + } + + pub async fn get_meta( + &self, + rpc_client: &MagicblockRpcClient, + ) -> TableManiaResult> { + Ok(rpc_client + .get_lookup_table_meta(self.table_address()) + .await?) + } + + pub async fn get_chain_pubkeys( + &self, + rpc_client: &MagicblockRpcClient, + ) -> TableManiaResult>> { + Self::get_chain_pubkeys_for(rpc_client, self.table_address()).await + } + + pub async fn get_chain_pubkeys_for( + rpc_client: &MagicblockRpcClient, + table_address: &Pubkey, + ) -> TableManiaResult>> { + Ok(rpc_client.get_lookup_table_addresses(table_address).await?) + } + + fn get_send_transaction_config( + rpc_client: &MagicblockRpcClient, + ) -> MagicBlockSendTransactionConfig { + use CommitmentLevel::*; + match rpc_client.commitment_level() { + Processed => MagicBlockSendTransactionConfig::ensure_processed(), + Confirmed | Finalized => { + MagicBlockSendTransactionConfig::ensure_committed() + } + } + } +} + +fn check_max_pubkeys(pubkeys: &[Pubkey]) -> TableManiaResult<()> { + if pubkeys.len() > MAX_ENTRIES_AS_PART_OF_EXTEND as usize { + return Err(TableManiaError::MaxExtendPubkeysExceeded( + MAX_ENTRIES_AS_PART_OF_EXTEND as usize, + pubkeys.len(), + )); + } + Ok(()) +} diff --git a/magicblock-table-mania/src/manager.rs b/magicblock-table-mania/src/manager.rs new file mode 100644 index 00000000..e9eb2f9d --- /dev/null +++ b/magicblock-table-mania/src/manager.rs @@ -0,0 +1,702 @@ +use log::*; +use std::{ + collections::{HashMap, HashSet}, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + time::{Duration, Instant}, +}; + +use magicblock_rpc_client::MagicblockRpcClient; +use solana_pubkey::Pubkey; +use solana_sdk::{ + address_lookup_table::state::AddressLookupTable, + commitment_config::CommitmentConfig, message::AddressLookupTableAccount, + signature::Keypair, signer::Signer, +}; +use tokio::{ + sync::{Mutex, RwLock}, + time::sleep, +}; + +use crate::{ + error::{TableManiaError, TableManiaResult}, + lookup_table_rc::{LookupTableRc, MAX_ENTRIES_AS_PART_OF_EXTEND}, +}; + +// ----------------- +// GarbageCollectorConfig +// ----------------- + +/// Configures the Garbage Collector which deactivates and then closes +/// lookup tables whose pubkeys have been released. +#[derive(Debug, Clone)] +pub struct GarbageCollectorConfig { + /// The interval at which to check for tables to deactivate. + pub deactivate_interval_ms: u64, + /// The interval at which to check for deactivated tables to close. + pub close_interval_ms: u64, +} + +impl Default for GarbageCollectorConfig { + fn default() -> Self { + Self { + deactivate_interval_ms: 1_000, + close_interval_ms: 5_000, + } + } +} + +#[derive(Clone)] +pub struct TableMania { + pub active_tables: Arc>>, + released_tables: Arc>>, + authority_pubkey: Pubkey, + pub rpc_client: MagicblockRpcClient, + randomize_lookup_table_slot: bool, +} + +impl TableMania { + pub fn new( + rpc_client: MagicblockRpcClient, + authority: &Keypair, + garbage_collector_config: Option, + ) -> Self { + let me = Self { + active_tables: Arc::>>::default(), + released_tables: Arc::>>::default(), + authority_pubkey: authority.pubkey(), + rpc_client, + randomize_lookup_table_slot: randomize_lookup_table_slot(), + }; + if let Some(config) = garbage_collector_config { + Self::launch_garbage_collector( + &me.rpc_client, + authority, + me.released_tables.clone(), + config, + ); + } + me + } + + /// Returns the number of currently active tables + pub async fn active_tables_count(&self) -> usize { + self.active_tables.read().await.len() + } + + /// Returns the number of released tables + pub async fn released_tables_count(&self) -> usize { + self.released_tables.lock().await.len() + } + + /// Returns the addresses of all tables currently active + pub async fn active_table_addresses(&self) -> Vec { + let mut addresses = Vec::new(); + + for table in self.active_tables.read().await.iter() { + addresses.push(*table.table_address()); + } + + addresses + } + + /// Returns the addresses of all released tables + pub async fn released_table_addresses(&self) -> Vec { + self.released_tables + .lock() + .await + .iter() + .map(|table| *table.table_address()) + .collect() + } + + /// Returns the addresses stored accross all active tables + pub async fn active_table_pubkeys(&self) -> Vec { + let mut pubkeys = Vec::new(); + for table in self.active_tables.read().await.iter() { + if let Some(pks) = table.pubkeys() { + pubkeys.extend(pks.keys()); + } + } + pubkeys + } + + // ----------------- + // Reserve + // ----------------- + pub async fn reserve_pubkeys( + &self, + authority: &Keypair, + pubkeys: &HashSet, + ) -> TableManiaResult<()> { + let mut remaining = HashSet::new(); + // 1. Add reservations for pubkeys that are already in one of the tables + for pubkey in pubkeys { + if !self.reserve_pubkey(pubkey).await { + remaining.insert(*pubkey); + } + } + + // 2. Add new reservations for pubkeys that are not in any table + self.reserve_new_pubkeys(authority, &remaining).await + } + + /// Tries to find a table that holds this pubkey already and reserves it. + /// - *pubkey* to reserve + /// - *returns* `true` if the pubkey could be reserved + async fn reserve_pubkey(&self, pubkey: &Pubkey) -> bool { + for table in self.active_tables.read().await.iter() { + if table.reserve_pubkey(pubkey) { + trace!( + "Added reservation for pubkey {} to table {}", + pubkey, + table.table_address() + ); + return true; + } + } + trace!("No table found for which we can reserve pubkey {}", pubkey); + false + } + + /// Reserves pubkeys that haven't been found in any of the active tables. + /// Thus this is considered the first reservation for these pubkeys and thus includes + /// initializing/extending actual lookup tables on chain. + async fn reserve_new_pubkeys( + &self, + authority: &Keypair, + pubkeys: &HashSet, + ) -> TableManiaResult<()> { + self.check_authority(authority)?; + + let mut remaining = pubkeys.iter().cloned().collect::>(); + let mut tables_used = HashSet::new(); + + // Keep trying to store pubkeys until we're done + while !remaining.is_empty() { + // First try to use existing tables + let mut stored_in_existing = false; + { + // Taking a write lock here to prevent multiple tasks from + // updating tables at the same time + let active_tables_write_lock = self.active_tables.write().await; + + // Try to use the last table if it's not full + if let Some(table) = active_tables_write_lock.last() { + if !table.is_full() { + self.extend_table( + table, + authority, + &mut remaining, + &mut tables_used, + ) + .await; + stored_in_existing = true; + } + } + } + + // If we couldn't use existing tables, we need to create a new one + if !stored_in_existing && !remaining.is_empty() { + // We write lock the active tables to ensure that while we create a new + // table the requests looking for an existing table to extend are blocked + let mut active_tables_write_lock = + self.active_tables.write().await; + + // Double-check if a new table was created while we were waiting for the lock + if let Some(table) = active_tables_write_lock.last() { + if !table.is_full() { + // Another task created a table we can use, so drop the write lock + // and try again with the read lock + drop(active_tables_write_lock); + continue; + } + } + + // Create a new table and add it to active_tables + let table = self + .create_new_table_and_extend(authority, &mut remaining) + .await?; + + tables_used.insert(*table.table_address()); + active_tables_write_lock.push(table); + } + + // If we've stored all pubkeys, we're done + if remaining.is_empty() { + break; + } + } + + Ok(()) + } + + /// Extends the table to store as many of the provided pubkeys as possile. + /// The stored pubkeys are removed from the `remaining` vector. + /// If successful the table addres is added to the `tables_used` set. + /// Returns `true` if the table is full after adding the pubkeys + async fn extend_table( + &self, + table: &LookupTableRc, + authority: &Keypair, + remaining: &mut Vec, + tables_used: &mut HashSet, + ) { + let remaining_len = remaining.len(); + let storing_len = + remaining_len.min(MAX_ENTRIES_AS_PART_OF_EXTEND as usize); + trace!( + "Adding {}/{} pubkeys to existing table {}", + storing_len, + remaining_len, + table.table_address() + ); + let table_addresses_count = table.pubkeys().unwrap().len(); + + let storing = remaining[..storing_len].to_vec(); + match table + .extend_respecting_capacity(&self.rpc_client, authority, &storing) + .await + { + Ok(stored) => { + trace!("Stored {}", stored.len()); + tables_used.insert(*table.table_address()); + remaining.retain(|pk| !stored.contains(pk)); + } + // TODO: this could cause us to loop forever as remaining + // is never updated, possibly we need to return an error + // here instead + Err(err) => error!( + "Error extending table {}: {:?}", + table.table_address(), + err + ), + } + let stored_count = remaining_len - remaining.len(); + trace!("Stored {}, remaining: {}", stored_count, remaining.len()); + + debug_assert_eq!( + table_addresses_count + stored_count, + table.pubkeys().unwrap().len() + ); + } + + async fn create_new_table_and_extend( + &self, + authority: &Keypair, + pubkeys: &mut Vec, + ) -> TableManiaResult { + static SUB_SLOT: AtomicU64 = AtomicU64::new(0); + + let pubkeys_len = pubkeys.len(); + let slot = self.rpc_client.get_slot().await?; + + if self.randomize_lookup_table_slot { + use rand::Rng; + let mut rng = rand::thread_rng(); + let random_slot = rng.gen_range(0..=u64::MAX); + SUB_SLOT.store(random_slot, Ordering::Relaxed); + } else { + static LAST_SLOT: AtomicU64 = AtomicU64::new(0); + let prev_last_slot = LAST_SLOT.swap(slot, Ordering::Relaxed); + if prev_last_slot != slot { + SUB_SLOT.store(0, Ordering::Relaxed); + } else { + SUB_SLOT.fetch_add(1, Ordering::Relaxed); + } + } + + let len = pubkeys_len.min(MAX_ENTRIES_AS_PART_OF_EXTEND as usize); + let table = LookupTableRc::init( + &self.rpc_client, + authority, + slot, + SUB_SLOT.load(Ordering::Relaxed), + &pubkeys[..len], + ) + .await?; + pubkeys.retain_mut(|pk| !table.contains_key(pk)); + + trace!( + "Created new table and stored {}/{} pubkeys. {}", + len, + pubkeys_len, + table.table_address() + ); + Ok(table) + } + + // ----------------- + // Release + // ----------------- + pub async fn release_pubkeys(&self, pubkeys: &HashSet) { + for pubkey in pubkeys { + self.release_pubkey(pubkey).await; + } + // While we hold the write lock on the active tables no one can make + // a reservation on any of them until we mark them for deactivation. + let mut active_tables = self.active_tables.write().await; + let mut still_active = Vec::new(); + for table in active_tables.drain(..) { + if table.has_reservations() { + still_active.push(table); + } else { + self.released_tables.lock().await.push(table); + } + } + for table in still_active.into_iter() { + active_tables.push(table); + } + } + + async fn release_pubkey(&self, pubkey: &Pubkey) { + for table in self.active_tables.read().await.iter() { + if table.release_pubkey(pubkey) { + trace!( + "Removed reservation for pubkey {} from table {}", + pubkey, + table.table_address() + ); + return; + } + } + trace!("No table found for which we can release pubkey {}", pubkey); + } + + // ----------------- + // Tables for Reserved Pubkeys + // ----------------- + + /// Attempts to find a table that holds each of the pubkeys. + /// It only returns once the needed pubkeys are also present remotely in the + /// finalized table accounts. + /// + /// - *pubkeys* to find tables for + /// - *wait_for_local_table_match* how long to wait for local tables to match which + /// means the [Self::reserve_pubkeys] was completed including any transactions that were sent + /// - *wait_for_remote_table_match* how long to wait for remote tables to include the + /// matched pubkeys + pub async fn try_get_active_address_lookup_table_accounts( + &self, + pubkeys: &HashSet, + wait_for_local_table_match: Duration, + wait_for_remote_table_match: Duration, + ) -> TableManiaResult> { + // 1. Wait until all keys are present in a local table + let matching_tables = { + let start = Instant::now(); + loop { + { + let active_local_tables = self.active_tables.read().await; + let mut keys_to_match = pubkeys.clone(); + let mut matching_tables = HashMap::new(); + for table in active_local_tables.iter() { + let matching_keys = table.match_pubkeys(&keys_to_match); + if !matching_keys.is_empty() { + keys_to_match + .retain(|pk| !matching_keys.contains(pk)); + matching_tables + .insert(*table.table_address(), matching_keys); + } + } + if keys_to_match.is_empty() { + break matching_tables; + } + trace!( + "Matched {}/{} pubkeys", + pubkeys.len() - keys_to_match.len(), + pubkeys.len() + ); + } + if start.elapsed() > wait_for_local_table_match { + error!( + "Timed out waiting for local tables to match requested keys: {:?} for {:?}", + pubkeys, + wait_for_local_table_match, + + ); + return Err( + TableManiaError::TimedOutWaitingForRemoteTablesToUpdate( + format!("{:?}", pubkeys), + ), + ); + } + + sleep(Duration::from_millis(200)).await; + } + }; + + // 2. Ensure that all matching keys are also present remotely and have been finalized + let remote_tables = { + let mut last_slot = self.rpc_client.get_slot().await?; + + let matching_table_keys = + matching_tables.keys().cloned().collect::>(); + + let start = Instant::now(); + let table_keys_str = matching_table_keys + .iter() + .map(|x| x.to_string()) + .collect::>() + .join(", "); + + loop { + // Fetch the tables from chain + let remote_table_accs = self + .rpc_client + .get_multiple_accounts_with_commitment( + &matching_table_keys, + // For lookup tables to be useful in a transaction all create/extend + // transactions on the table need to be finalized + CommitmentConfig::finalized(), + None, + ) + .await?; + + let remote_tables = remote_table_accs + .into_iter() + .enumerate() + .flat_map(|(idx, acc)| { + acc.and_then( + |acc| match AddressLookupTable::deserialize( + &acc.data, + ) { + Ok(table) => Some(( + matching_table_keys[idx], + table.addresses.to_vec(), + )), + Err(err) => { + error!( + "Failed to deserialize table {}: {:?}", + matching_table_keys[idx], err + ); + None + } + }, + ) + }) + .collect::>(); + + // Ensure we got the same amount of tables + if remote_tables.len() == matching_tables.len() { + // And that all locally matched keys are in the finalized remote table + let all_matches_are_remote = + matching_tables.iter().all(|(address, local_keys)| { + remote_tables.get(address).is_some_and( + |remote_keys| { + local_keys + .iter() + .all(|pk| remote_keys.contains(pk)) + }, + ) + }); + if all_matches_are_remote { + break remote_tables; + } + } + + if start.elapsed() > wait_for_remote_table_match { + error!( + "Timed out waiting for remote tables to match local tables for {:?}. \ + Local: {:#?}\nRemote: {:#?}", + wait_for_remote_table_match, matching_tables, remote_tables + ); + return Err( + TableManiaError::TimedOutWaitingForRemoteTablesToUpdate( + table_keys_str, + ), + ); + } + + if let Ok(slot) = self.rpc_client.wait_for_next_slot().await { + if slot - last_slot > 20 { + debug!( + "Waiting for remote tables {} to match local tables.", + table_keys_str + ); + } + last_slot = slot; + } + } + }; + + Ok(matching_tables + .into_keys() + .map(|address| AddressLookupTableAccount { + key: address, + // SAFETY: we confirmed above that we have a remote table for all matching + // tables and that they contain the addresses we need + addresses: remote_tables.get(&address).unwrap().to_vec(), + }) + .collect()) + } + + // ----------------- + // Garbage Collector + // ----------------- + + // For deactivate/close operations running as part of the garbage collector task + // we only log errors since there is no reasonable way to handle them. + // The next cycle will try the operation again so in case chain was congested + // the problem should resolve itself. + // Otherwise we can run a tool later to manually deactivate + close tables. + + fn launch_garbage_collector( + rpc_client: &MagicblockRpcClient, + authority: &Keypair, + released_tables: Arc>>, + config: GarbageCollectorConfig, + ) -> tokio::task::JoinHandle<()> { + let rpc_client = rpc_client.clone(); + let authority = authority.insecure_clone(); + + tokio::spawn(async move { + let mut last_deactivate = tokio::time::Instant::now(); + let mut last_close = tokio::time::Instant::now(); + let mut sleep_ms = + config.deactivate_interval_ms.min(config.close_interval_ms); + loop { + let now = tokio::time::Instant::now(); + if now + .duration_since(last_deactivate) + .as_millis() + .try_into() + .unwrap_or(u64::MAX) + >= config.deactivate_interval_ms + { + Self::deactivate_tables( + &rpc_client, + &authority, + &released_tables, + ) + .await; + last_deactivate = now; + sleep_ms = sleep_ms.min(config.deactivate_interval_ms); + } + if now + .duration_since(last_close) + .as_millis() + .try_into() + .unwrap_or(u64::MAX) + >= config.close_interval_ms + { + Self::close_tables( + &rpc_client, + &authority, + &released_tables, + ) + .await; + last_close = now; + sleep_ms = sleep_ms.min(config.close_interval_ms); + } + + tokio::time::sleep(tokio::time::Duration::from_millis( + sleep_ms, + )) + .await; + } + }) + } + + /// Deactivates tables that were previously released + async fn deactivate_tables( + rpc_client: &MagicblockRpcClient, + authority: &Keypair, + released_tables: &Mutex>, + ) { + for table in released_tables + .lock() + .await + .iter_mut() + .filter(|x| !x.deactivate_triggered()) + { + // We don't bubble errors as there is no reasonable way to handle them. + // Instead the next GC cycle will try again to deactivate the table. + let _ = table.deactivate(rpc_client, authority).await.inspect_err( + |err| { + error!( + "Error deactivating table {}: {:?}", + table.table_address(), + err + ) + }, + ); + } + } + + /// Closes tables that were previously released and deactivated. + async fn close_tables( + rpc_client: &MagicblockRpcClient, + authority: &Keypair, + released_tables: &Mutex>, + ) { + let Ok(latest_slot) = rpc_client + .get_slot() + .await + .inspect_err(|err| error!("Error getting latest slot: {:?}", err)) + else { + return; + }; + + let mut closed_tables = vec![]; + { + for deactivated_table in released_tables + .lock() + .await + .iter_mut() + .filter(|x| x.deactivate_triggered()) + { + // NOTE: [LookupTable::close] will only close the table if it was deactivated + // according to the provided slot + // We don't bubble errors as there is no reasonable way to handle them. + // Instead the next GC cycle will try again to close the table. + match deactivated_table + .close(rpc_client, authority, Some(latest_slot)) + .await + { + Ok(closed) if closed => { + closed_tables.push(*deactivated_table.table_address()) + } + Ok(_) => { + // Table not ready to be closed" + } + Err(err) => error!( + "Error closing table {}: {:?}", + deactivated_table.table_address(), + err + ), + }; + } + } + released_tables + .lock() + .await + .retain(|x| !closed_tables.contains(x.table_address())); + } + + // ----------------- + // Checks + // ----------------- + fn check_authority(&self, authority: &Keypair) -> TableManiaResult<()> { + if authority.pubkey() != self.authority_pubkey { + return Err(TableManiaError::InvalidAuthority( + authority.pubkey(), + self.authority_pubkey, + )); + } + Ok(()) + } +} + +fn randomize_lookup_table_slot() -> bool { + #[cfg(feature = "randomize_lookup_table_slot")] + { + true + } + #[cfg(not(feature = "randomize_lookup_table_slot"))] + { + std::env::var("RANDOMIZE_LOOKUP_TABLE_SLOT").is_ok() + } +} diff --git a/magicblock-table-mania/tests/ix_lookup_table.rs b/magicblock-table-mania/tests/ix_lookup_table.rs new file mode 100644 index 00000000..8511491d --- /dev/null +++ b/magicblock-table-mania/tests/ix_lookup_table.rs @@ -0,0 +1,163 @@ +use log::*; + +use magicblock_rpc_client::MagicblockRpcClient; +use magicblock_table_mania::{find_open_tables, LookupTable}; +use solana_pubkey::Pubkey; +use solana_rpc_client::nonblocking::rpc_client::RpcClient; +use solana_sdk::{ + address_lookup_table::state::LookupTableMeta, clock::Slot, + commitment_config::CommitmentConfig, native_token::LAMPORTS_PER_SOL, + signature::Keypair, signer::Signer, +}; + +mod utils; + +pub async fn setup_lookup_table( + validator_auth: &Keypair, + pubkeys: &[Pubkey], +) -> (MagicblockRpcClient, LookupTable) { + let rpc_client = { + let client = RpcClient::new_with_commitment( + "http://localhost:7799".to_string(), + CommitmentConfig::confirmed(), + ); + MagicblockRpcClient::from(client) + }; + rpc_client + .request_airdrop(&validator_auth.pubkey(), 777 * LAMPORTS_PER_SOL) + .await + .unwrap(); + + let latest_slot = rpc_client.get_slot().await.unwrap(); + let sub_slot = 0; + let reqid = 0; + let lookup_table = LookupTable::init( + &rpc_client, + validator_auth, + latest_slot, + sub_slot, + pubkeys, + reqid, + ) + .await + .unwrap(); + (rpc_client, lookup_table) +} + +async fn get_table_meta( + rpc_client: &MagicblockRpcClient, + lookup_table: &LookupTable, +) -> LookupTableMeta { + lookup_table + .get_meta(rpc_client) + .await + .unwrap() + .expect("Table not found") +} + +async fn get_table_addresses( + rpc_client: &MagicblockRpcClient, + lookup_table: &LookupTable, +) -> Vec { + lookup_table + .get_chain_pubkeys(rpc_client) + .await + .unwrap() + .expect("Table not found") +} + +async fn get_open_tables( + rpc_client: &MagicblockRpcClient, + authority: &Keypair, + start_slot: Slot, +) -> Vec { + let end_slot = rpc_client.get_slot().await.unwrap(); + find_open_tables(rpc_client, authority, start_slot, end_slot, 10) + .await + .unwrap() + .tables +} + +#[tokio::test] +async fn test_create_fetch_and_close_lookup_table() { + utils::init_logger(); + + let validator_auth = Keypair::new(); + let pubkeys = vec![0; 10] + .into_iter() + .map(|_| Pubkey::new_unique()) + .collect::>(); + + // Init table + let (rpc_client, mut lookup_table) = + setup_lookup_table(&validator_auth, &pubkeys[0..5]).await; + let creation_slot = lookup_table.creation_slot().unwrap(); + let meta = get_table_meta(&rpc_client, &lookup_table).await; + + assert_eq!(meta.authority, Some(lookup_table.derived_auth().pubkey())); + assert_eq!(meta.deactivation_slot, u64::MAX); + assert_eq!(lookup_table.pubkeys().unwrap(), pubkeys[0..5]); + assert_eq!( + get_table_addresses(&rpc_client, &lookup_table).await, + pubkeys[0..5] + ); + debug!("{}", lookup_table); + + // Extend table + let reqid = 0; + debug!("Extending table ..."); + lookup_table + .extend(&rpc_client, &validator_auth, &pubkeys[5..10], reqid) + .await + .unwrap(); + assert_eq!(lookup_table.pubkeys().unwrap(), pubkeys[0..10]); + assert_eq!( + get_table_addresses(&rpc_client, &lookup_table).await, + pubkeys[0..10] + ); + + // Deactivate table + debug!("Deactivating table ..."); + lookup_table + .deactivate(&rpc_client, &validator_auth) + .await + .unwrap(); + + let meta = get_table_meta(&rpc_client, &lookup_table).await; + assert_eq!(meta.authority, Some(lookup_table.derived_auth().pubkey())); + assert_ne!(meta.deactivation_slot, u64::MAX); + assert!(!lookup_table.is_deactivated(&rpc_client, None).await); + + assert_eq!( + get_open_tables(&rpc_client, &validator_auth, creation_slot) + .await + .len(), + 1 + ); + + #[cfg(not(feature = "test_table_close"))] + eprintln!("SKIP: close table"); + + #[cfg(feature = "test_table_close")] + { + // Wait for deactivation and close table + debug!("{}", lookup_table); + + eprintln!("Waiting for table to deactivate for about 2.5 min ..."); + while !lookup_table.is_deactivated(&rpc_client, None).await { + utils::sleep_millis(5_000).await; + } + lookup_table + .close(&rpc_client, &validator_auth, None) + .await + .unwrap(); + assert!(lookup_table.is_closed(&rpc_client).await.unwrap()); + + assert_eq!( + get_open_tables(&rpc_client, &validator_auth, creation_slot) + .await + .len(), + 0 + ); + } +} diff --git a/magicblock-table-mania/tests/ix_release_pubkeys.rs b/magicblock-table-mania/tests/ix_release_pubkeys.rs new file mode 100644 index 00000000..33fc27f0 --- /dev/null +++ b/magicblock-table-mania/tests/ix_release_pubkeys.rs @@ -0,0 +1,106 @@ +use std::collections::HashSet; + +use solana_pubkey::Pubkey; +use solana_sdk::signature::Keypair; +mod utils; + +#[tokio::test] +async fn test_single_table_two_requests_with_overlapping_pubkeys() { + utils::init_logger(); + + let authority = Keypair::new(); + let table_mania = utils::setup_table_mania(&authority).await; + + let pubkeys_req1 = (0..10) + .map(|idx| Pubkey::from([idx; 32])) + .collect::>(); + let pubkeys_req2 = (6..10) + .map(|idx| Pubkey::from([idx; 32])) + .collect::>(); + + table_mania + .reserve_pubkeys(&authority, &pubkeys_req1) + .await + .unwrap(); + table_mania + .reserve_pubkeys(&authority, &pubkeys_req2) + .await + .unwrap(); + + utils::log_active_table_addresses(&table_mania).await; + + assert_eq!(table_mania.active_tables_count().await, 1); + assert_eq!(table_mania.released_tables_count().await, 0); + + // All of req2 pubkeys are also contained in req1 + // However when we release all req1 pubkeys the table should not be released + // yet since req2 still needs them + + table_mania.release_pubkeys(&pubkeys_req1).await; + assert_eq!(table_mania.active_tables_count().await, 1); + assert_eq!(table_mania.released_tables_count().await, 0); + + // Now releasing req2 pubkeys should release the table + table_mania.release_pubkeys(&pubkeys_req2).await; + assert_eq!(table_mania.active_tables_count().await, 0); + assert_eq!(table_mania.released_tables_count().await, 1); + + utils::close_released_tables(&table_mania).await +} + +#[tokio::test] +async fn test_two_table_three_requests_with_one_overlapping_pubkey() { + utils::init_logger(); + + let authority = Keypair::new(); + let table_mania = utils::setup_table_mania(&authority).await; + + let common_pubkey = Pubkey::new_unique(); + let mut pubkeys_req1 = (0..300) + .map(|_| Pubkey::new_unique()) + .collect::>(); + + // The common pubkey will be stored in the second table + pubkeys_req1.insert(common_pubkey); + + let pubkeys_req2 = HashSet::from([common_pubkey]); + let pubkeys_req3 = HashSet::from([common_pubkey]); + + table_mania + .reserve_pubkeys(&authority, &pubkeys_req1) + .await + .unwrap(); + table_mania + .reserve_pubkeys(&authority, &pubkeys_req2) + .await + .unwrap(); + table_mania + .reserve_pubkeys(&authority, &pubkeys_req3) + .await + .unwrap(); + + utils::log_active_table_addresses(&table_mania).await; + + assert_eq!(table_mania.active_tables_count().await, 2); + assert_eq!(table_mania.released_tables_count().await, 0); + + // Releasing req2 should not release any table since it only + // has the common pubkey + table_mania.release_pubkeys(&pubkeys_req2).await; + assert_eq!(table_mania.active_tables_count().await, 2); + assert_eq!(table_mania.released_tables_count().await, 0); + + // Releasing req1 should only release the first table since the + // second table has the common pubkey + table_mania.release_pubkeys(&pubkeys_req1).await; + assert_eq!(table_mania.active_tables_count().await, 1); + assert_eq!(table_mania.released_tables_count().await, 1); + + // Releasing req3 frees the common pubkey and thus allows the + // second table to be released + table_mania.release_pubkeys(&pubkeys_req3).await; + assert_eq!(table_mania.active_tables_count().await, 0); + assert_eq!(table_mania.released_tables_count().await, 2); + + utils::close_released_tables(&table_mania).await +} diff --git a/magicblock-table-mania/tests/ix_reserve_pubkeys.rs b/magicblock-table-mania/tests/ix_reserve_pubkeys.rs new file mode 100644 index 00000000..94d35f94 --- /dev/null +++ b/magicblock-table-mania/tests/ix_reserve_pubkeys.rs @@ -0,0 +1,132 @@ +use std::collections::HashSet; + +use log::*; +use solana_pubkey::Pubkey; +use solana_sdk::{ + address_lookup_table::state::LOOKUP_TABLE_MAX_ADDRESSES, signature::Keypair, +}; +use tokio::task::JoinSet; +mod utils; + +// ----------------- +// Fitting into single table different chunk sizes +// ----------------- +macro_rules! reserve_pubkeys_in_one_table { + ($chunk_size:expr) => { + ::paste::paste! { + #[tokio::test] + async fn []() { + reserve_pubkeys_in_one_table_in_chunks($chunk_size).await; + } + } + }; +} + +reserve_pubkeys_in_one_table!(8); +reserve_pubkeys_in_one_table!(32); +reserve_pubkeys_in_one_table!(80); +reserve_pubkeys_in_one_table!(100); +reserve_pubkeys_in_one_table!(256); + +async fn reserve_pubkeys_in_one_table_in_chunks(chunk_size: usize) { + utils::init_logger(); + let authority = Keypair::new(); + + let mut pubkeys = (0..LOOKUP_TABLE_MAX_ADDRESSES) + .map(|_| Pubkey::new_unique()) + .collect::>(); + pubkeys.sort(); + + let table_mania = utils::setup_table_mania(&authority).await; + + for chunk in pubkeys.chunks(chunk_size) { + debug!("Storing chunk of size: {}", chunk.len()); + let chunk_hashset = HashSet::from_iter(chunk.iter().cloned()); + table_mania + .reserve_pubkeys(&authority, &chunk_hashset) + .await + .unwrap(); + } + + utils::log_active_table_addresses(&table_mania).await; + + let mut active_table_pubkeys = table_mania.active_table_pubkeys().await; + active_table_pubkeys.sort(); + + assert_eq!(table_mania.active_tables_count().await, 1); + assert_eq!(active_table_pubkeys, pubkeys); + + let mut first_table_pubkeys = table_mania.active_tables.read().await[0] + .get_chain_pubkeys(&table_mania.rpc_client) + .await + .unwrap() + .unwrap(); + + first_table_pubkeys.sort(); + + assert_eq!(first_table_pubkeys, pubkeys); +} + +// ----------------- +// Fitting into multiple tables different chunk sizes +// ----------------- +macro_rules! reserve_pubkeys_in_multiple_tables { + ($amount:expr, $chunk_size:expr) => { + ::paste::paste! { + #[tokio::test] + async fn []() { + reserve_pubkeys_in_multiple_tables_in_chunks($amount, $chunk_size).await; + } + } + }; +} + +reserve_pubkeys_in_multiple_tables!(257, 100); +reserve_pubkeys_in_multiple_tables!(512, 100); +reserve_pubkeys_in_multiple_tables!(1_000, 20); +reserve_pubkeys_in_multiple_tables!(2_100, 10); + +async fn reserve_pubkeys_in_multiple_tables_in_chunks( + amount: usize, + chunk_size: usize, +) { + utils::init_logger(); + let authority = Keypair::new(); + + let pubkeys = (0..amount) + .map(|_| Pubkey::new_unique()) + .collect::>(); + + let table_mania = utils::setup_table_mania(&authority).await; + + let mut join_set = JoinSet::new(); + for chunk in pubkeys.chunks(chunk_size) { + debug!("Reserving chunk of size: {}", chunk.len()); + let chunk_hashset = HashSet::from_iter(chunk.iter().cloned()); + let table_mania = table_mania.clone(); + let authority = authority.insecure_clone(); + join_set.spawn(async move { + table_mania + .reserve_pubkeys(&authority, &chunk_hashset) + .await + }); + } + join_set + .join_all() + .await + .into_iter() + .collect::, _>>() + .unwrap(); + + utils::log_active_table_addresses(&table_mania).await; + let expected_tables_count = + (amount as f32 / LOOKUP_TABLE_MAX_ADDRESSES as f32).ceil() as usize; + assert_eq!( + table_mania.active_tables_count().await, + expected_tables_count + ); + assert_eq!( + table_mania.active_table_pubkeys().await.len(), + pubkeys.len() + ); +} diff --git a/magicblock-table-mania/tests/utils/mod.rs b/magicblock-table-mania/tests/utils/mod.rs new file mode 100644 index 00000000..385b2068 --- /dev/null +++ b/magicblock-table-mania/tests/utils/mod.rs @@ -0,0 +1,116 @@ +#![allow(dead_code)] + +use log::*; +use magicblock_rpc_client::MagicblockRpcClient; +use magicblock_table_mania::{GarbageCollectorConfig, TableMania}; +use solana_rpc_client::nonblocking::rpc_client::RpcClient; +use solana_sdk::commitment_config::CommitmentConfig; +use solana_sdk::native_token::LAMPORTS_PER_SOL; +use solana_sdk::signature::Keypair; +use solana_sdk::signer::Signer; +use std::time::{Duration, Instant}; + +pub const TEST_TABLE_CLOSE: bool = cfg!(feature = "test_table_close"); + +pub async fn sleep_millis(millis: u64) { + tokio::time::sleep(tokio::time::Duration::from_millis(millis)).await; +} + +pub fn init_logger_file_path() { + let _ = env_logger::builder() + .format_timestamp(None) + .format_module_path(false) + .format_target(false) + .format_source_path(true) + .is_test(true) + .try_init(); +} + +pub fn init_logger() { + let _ = env_logger::builder() + .format_timestamp(None) + .is_test(true) + .try_init(); +} + +pub async fn setup_table_mania(validator_auth: &Keypair) -> TableMania { + let rpc_client = { + let client = RpcClient::new_with_commitment( + "http://localhost:7799".to_string(), + CommitmentConfig::processed(), + ); + MagicblockRpcClient::from(client) + }; + rpc_client + .request_airdrop(&validator_auth.pubkey(), 777 * LAMPORTS_PER_SOL) + .await + .unwrap(); + + if TEST_TABLE_CLOSE { + TableMania::new( + rpc_client, + validator_auth, + Some(GarbageCollectorConfig::default()), + ) + } else { + TableMania::new(rpc_client, validator_auth, None) + } +} + +pub async fn close_released_tables(table_mania: &TableMania) { + if TEST_TABLE_CLOSE { + // Tables deactivate after ~2.5 mins (150secs), but most times + // it takes a lot longer so we allow double the time + const MAX_TIME_TO_CLOSE: Duration = Duration::from_secs(300); + + info!( + "Waiting for table close for up to {} secs", + MAX_TIME_TO_CLOSE.as_secs() + ); + let start = Instant::now(); + let mut count = 0; + let releasing_pubkeys = table_mania.released_table_addresses().await; + + while table_mania.released_tables_count().await > 0 { + if Instant::now() - start > MAX_TIME_TO_CLOSE { + panic!("Timed out waiting for table close"); + } + count += 1; + if count % 10 == 0 { + debug!( + "Still waiting to close, {} released tables", + table_mania.released_tables_count().await + ); + } + sleep_millis(10_000).await; + } + + for released_pubkey in releasing_pubkeys { + let table = table_mania + .rpc_client + .get_account(&released_pubkey) + .await + .expect("Failed to get table account"); + assert!( + table.is_none(), + "Table {} not closed on chain", + released_pubkey + ); + } + } else { + info!("Skipping table close wait"); + } +} + +pub async fn log_active_table_addresses(table_mania: &TableMania) { + debug!( + "Active Tables: {}", + table_mania + .active_table_addresses() + .await + .into_iter() + .map(|x| x.to_string()) + .collect::>() + .join(", ") + ); +} From e6f1edd31d3195329a9b7f7f92c335a5aeebc34e Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Wed, 14 May 2025 18:26:48 +0545 Subject: [PATCH 24/58] chore: move committor service integration tests --- Cargo.toml | 2 +- magicblock-committor-service/Cargo.toml | 1 - test-integration/Cargo.lock | 42 ++++++++++++++----- test-integration/Cargo.toml | 8 ++++ .../committor-service/Cargo.toml | 28 +++++++++++++ .../committor-service/src/lib.rs | 2 + .../tests}/ix_commit_local.rs | 15 +++---- .../tests}/utils/instructions.rs | 0 .../committor-service/tests}/utils/mod.rs | 27 ------------ .../tests}/utils/transactions.rs | 0 10 files changed, 78 insertions(+), 47 deletions(-) create mode 100644 test-integration/schedulecommit/committor-service/Cargo.toml create mode 100644 test-integration/schedulecommit/committor-service/src/lib.rs rename {magicblock-committor-service/todo-tests => test-integration/schedulecommit/committor-service/tests}/ix_commit_local.rs (99%) rename {magicblock-committor-service/todo-tests => test-integration/schedulecommit/committor-service/tests}/utils/instructions.rs (100%) rename {magicblock-committor-service/todo-tests => test-integration/schedulecommit/committor-service/tests}/utils/mod.rs (63%) rename {magicblock-committor-service/todo-tests => test-integration/schedulecommit/committor-service/tests}/utils/transactions.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index ffc2e12a..1c2aac32 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -102,10 +102,10 @@ magicblock-accounts-api = { path = "./magicblock-accounts-api" } magicblock-accounts-db = { path = "./magicblock-accounts-db" } magicblock-api = { path = "./magicblock-api" } magicblock-bank = { path = "./magicblock-bank" } -magicblock-committor-service = { path = "./magicblock-committor-service" } magicblock-committor-program = { path = "./magicblock-committor-program", features = [ "no-entrypoint", ] } +magicblock-committor-service = { path = "./magicblock-committor-service" } magicblock-config = { path = "./magicblock-config" } magicblock-core = { path = "./magicblock-core" } magicblock-delegation-program = { git = "https://github.com/magicblock-labs/delegation-program.git", rev = "4af7f1c" } diff --git a/magicblock-committor-service/Cargo.toml b/magicblock-committor-service/Cargo.toml index bd00cb15..25e82451 100644 --- a/magicblock-committor-service/Cargo.toml +++ b/magicblock-committor-service/Cargo.toml @@ -43,5 +43,4 @@ tokio = { workspace = true, features = ["rt", "macros"] } [features] default = [] -test_table_close = [] dev-context-only-utils = [] diff --git a/test-integration/Cargo.lock b/test-integration/Cargo.lock index f479b90c..02cb15d4 100644 --- a/test-integration/Cargo.lock +++ b/test-integration/Cargo.lock @@ -2937,7 +2937,6 @@ dependencies = [ "log", "magicblock-config", "magicblock-core", - "magicblock-delegation-program 1.0.0", "rayon", "serde", "solana-rpc-client", @@ -3659,7 +3658,7 @@ dependencies = [ [[package]] name = "magicblock-committor-program" -version = "0.0.0" +version = "0.1.1" dependencies = [ "borsh 1.5.7", "borsh-derive 1.5.7", @@ -3668,19 +3667,19 @@ dependencies = [ "solana-account", "solana-program", "solana-pubkey", - "thiserror 2.0.11", + "thiserror 1.0.69", ] [[package]] name = "magicblock-committor-service" -version = "0.0.0" +version = "0.1.1" dependencies = [ - "base64 0.22.1", + "base64 0.21.7", "bincode", "borsh 1.5.7", "log", "magicblock-committor-program", - "magicblock-delegation-program 1.0.0", + "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?rev=4af7f1c)", "magicblock-rpc-client", "magicblock-table-mania", "rusqlite", @@ -3690,7 +3689,8 @@ dependencies = [ "solana-rpc-client-api", "solana-sdk", "solana-transaction-status-client-types", - "thiserror 2.0.11", + "static_assertions", + "thiserror 1.0.69", "tokio", "tokio-util 0.7.13", ] @@ -3938,30 +3938,31 @@ dependencies = [ [[package]] name = "magicblock-rpc-client" -version = "0.0.0" +version = "0.1.1" dependencies = [ "log", "solana-rpc-client", "solana-rpc-client-api", "solana-sdk", "solana-transaction-status-client-types", - "thiserror 2.0.11", + "thiserror 1.0.69", "tokio", ] [[package]] name = "magicblock-table-mania" -version = "0.0.0" +version = "0.1.1" dependencies = [ "ed25519-dalek", "log", "magicblock-rpc-client", + "rand 0.8.5", "sha3", "solana-pubkey", "solana-rpc-client", "solana-rpc-client-api", "solana-sdk", - "thiserror 2.0.11", + "thiserror 1.0.69", "tokio", ] @@ -5690,6 +5691,25 @@ dependencies = [ "solana-sdk", ] +[[package]] +name = "schedulecommit-committor-service" +version = "0.0.0" +dependencies = [ + "log", + "magicblock-committor-program", + "magicblock-committor-service", + "magicblock-delegation-program 1.0.0", + "magicblock-rpc-client", + "program-flexi-counter", + "solana-account", + "solana-pubkey", + "solana-rpc-client", + "solana-rpc-client-api", + "solana-sdk", + "test-tools-core", + "tokio", +] + [[package]] name = "schedulecommit-test-scenarios" version = "0.0.0" diff --git a/test-integration/Cargo.toml b/test-integration/Cargo.toml index 7edd2579..f20c8b98 100644 --- a/test-integration/Cargo.toml +++ b/test-integration/Cargo.toml @@ -5,6 +5,7 @@ members = [ "programs/schedulecommit-security", "programs/sysvars", "schedulecommit/client", + "schedulecommit/committor-service", "schedulecommit/test-scenarios", "schedulecommit/test-security", "test-cloning", @@ -34,15 +35,22 @@ magicblock-accounts-db = { path = "../magicblock-accounts-db", features = [ magic-domain-program = { git = "https://github.com/magicblock-labs/magic-domain-program.git", rev = "eba7644", default-features = false } magicblock-config = { path = "../magicblock-config" } magicblock-core = { path = "../magicblock-core" } +magicblock-committor-program = { path = "../magicblock-committor-program", features = [ + "no-entrypoint", +] } magicblock-delegation-program = { path = "../../delegation-program" } +magicblock-committor-service = { path = "../magicblock-committor-service" } +magicblock-rpc-client = { path = "../magicblock-rpc-client" } program-flexi-counter = { path = "./programs/flexi-counter" } program-schedulecommit = { path = "programs/schedulecommit" } program-schedulecommit-security = { path = "programs/schedulecommit-security" } rayon = "1.10.0" schedulecommit-client = { path = "schedulecommit/client" } serde = "1.0.217" +solana-account = { git = "https://github.com/magicblock-labs/solana-account.git", rev = "7bdfefc" } solana-program = "2.2" solana-program-test = "2.2" +solana-pubkey = { version = "2.2" } solana-rpc-client = "2.2" solana-rpc-client-api = "2.2" solana-sdk = "2.2" diff --git a/test-integration/schedulecommit/committor-service/Cargo.toml b/test-integration/schedulecommit/committor-service/Cargo.toml new file mode 100644 index 00000000..7667e32c --- /dev/null +++ b/test-integration/schedulecommit/committor-service/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "schedulecommit-committor-service" +version.workspace = true +edition.workspace = true + +[dev-dependencies] +log = { workspace = true } +magicblock-committor-program = { workspace = true, features = [ + "no-entrypoint", +] } +magicblock-delegation-program = { workspace = true, features = [ + "no-entrypoint", +] } +magicblock-committor-service = { workspace = true, features = [ + "dev-context-only-utils", +] } +magicblock-rpc-client = { workspace = true } +program-flexi-counter = { workspace = true, features = ["no-entrypoint"] } +solana-account = { workspace = true } +solana-pubkey = { workspace = true } +solana-rpc-client = { workspace = true } +solana-rpc-client-api = { workspace = true } +solana-sdk = { workspace = true } +test-tools-core = { workspace = true } +tokio = { workspace = true } + +[features] +test_table_close = [] diff --git a/test-integration/schedulecommit/committor-service/src/lib.rs b/test-integration/schedulecommit/committor-service/src/lib.rs new file mode 100644 index 00000000..10f55cb1 --- /dev/null +++ b/test-integration/schedulecommit/committor-service/src/lib.rs @@ -0,0 +1,2 @@ +#[allow(unused)] +pub const HELLO: &str = "world"; diff --git a/magicblock-committor-service/todo-tests/ix_commit_local.rs b/test-integration/schedulecommit/committor-service/tests/ix_commit_local.rs similarity index 99% rename from magicblock-committor-service/todo-tests/ix_commit_local.rs rename to test-integration/schedulecommit/committor-service/tests/ix_commit_local.rs index b3227e3b..5ba085e1 100644 --- a/magicblock-committor-service/todo-tests/ix_commit_local.rs +++ b/test-integration/schedulecommit/committor-service/tests/ix_commit_local.rs @@ -3,6 +3,7 @@ use magicblock_committor_service::{ChangesetCommittor, ComputeBudgetConfig}; use magicblock_rpc_client::MagicblockRpcClient; use std::collections::{HashMap, HashSet}; use std::time::{Duration, Instant}; +use test_tools_core::init_logger; use tokio::task::JoinSet; use utils::transactions::tx_logs_contain; @@ -282,7 +283,7 @@ async fn commit_single_account( expected_strategy: CommitStrategy, undelegate: bool, ) { - utils::init_logger_target(); + init_logger!(); let slot = 10; let validator_auth = utils::get_validator_auth(); @@ -339,7 +340,7 @@ async fn commit_single_account( // ----------------- #[tokio::test] async fn test_ix_commit_two_accounts_1kb_2kb() { - utils::init_logger(); + init_logger!(); commit_multiple_accounts( &[1024, 2048], 1, @@ -351,7 +352,7 @@ async fn test_ix_commit_two_accounts_1kb_2kb() { #[tokio::test] async fn test_ix_commit_four_accounts_1kb_2kb_5kb_10kb_single_bundle() { - utils::init_logger(); + init_logger!(); commit_multiple_accounts( &[1024, 2 * 1024, 5 * 1024, 10 * 1024], 1, @@ -477,7 +478,7 @@ async fn commit_5_accounts_1kb( expected_strategies: ExpectedStrategies, undelegate_all: bool, ) { - utils::init_logger(); + init_logger!(); let accs = (0..5).map(|_| 1024).collect::>(); commit_multiple_accounts( &accs, @@ -492,7 +493,7 @@ async fn commit_8_accounts_1kb( bundle_size: usize, expected_strategies: ExpectedStrategies, ) { - utils::init_logger(); + init_logger!(); let accs = (0..8).map(|_| 1024).collect::>(); commit_multiple_accounts(&accs, bundle_size, expected_strategies, false) .await; @@ -502,7 +503,7 @@ async fn commit_20_accounts_1kb( bundle_size: usize, expected_strategies: ExpectedStrategies, ) { - utils::init_logger(); + init_logger!(); let accs = (0..20).map(|_| 1024).collect::>(); commit_multiple_accounts(&accs, bundle_size, expected_strategies, false) .await; @@ -514,7 +515,7 @@ async fn commit_multiple_accounts( expected_strategies: ExpectedStrategies, undelegate_all: bool, ) { - utils::init_logger(); + init_logger!(); let slot = 10; let validator_auth = utils::get_validator_auth(); diff --git a/magicblock-committor-service/todo-tests/utils/instructions.rs b/test-integration/schedulecommit/committor-service/tests/utils/instructions.rs similarity index 100% rename from magicblock-committor-service/todo-tests/utils/instructions.rs rename to test-integration/schedulecommit/committor-service/tests/utils/instructions.rs diff --git a/magicblock-committor-service/todo-tests/utils/mod.rs b/test-integration/schedulecommit/committor-service/tests/utils/mod.rs similarity index 63% rename from magicblock-committor-service/todo-tests/utils/mod.rs rename to test-integration/schedulecommit/committor-service/tests/utils/mod.rs index 0b943374..9cad5484 100644 --- a/magicblock-committor-service/todo-tests/utils/mod.rs +++ b/test-integration/schedulecommit/committor-service/tests/utils/mod.rs @@ -1,6 +1,3 @@ -use std::env; - -use env_logger::Target; use solana_sdk::signature::Keypair; pub mod instructions; @@ -11,30 +8,6 @@ pub async fn sleep_millis(millis: u64) { tokio::time::sleep(tokio::time::Duration::from_millis(millis)).await; } -pub fn init_logger() { - let mut builder = env_logger::builder(); - builder - .format_timestamp(None) - .format_module_path(false) - .format_target(false) - .format_source_path(true) - .is_test(true); - - if let Ok(path) = env::var("TEST_LOG_FILE") { - builder.target(Target::Pipe(Box::new( - std::fs::File::create(path).unwrap(), - ))); - } - let _ = builder.try_init(); -} - -pub fn init_logger_target() { - let _ = env_logger::builder() - .format_timestamp(None) - .is_test(true) - .try_init(); -} - /// This is the test authority used in the delegation program /// https://github.com/magicblock-labs/delegation-program/blob/7fc0ae9a59e48bea5b046b173ea0e34fd433c3c7/tests/fixtures/accounts.rs#L46 /// It is compiled in as the authority for the validator vault when we build via diff --git a/magicblock-committor-service/todo-tests/utils/transactions.rs b/test-integration/schedulecommit/committor-service/tests/utils/transactions.rs similarity index 100% rename from magicblock-committor-service/todo-tests/utils/transactions.rs rename to test-integration/schedulecommit/committor-service/tests/utils/transactions.rs From 1c4dd7895556f419c60dcfaac0d7d1f7b5ec6d76 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Wed, 14 May 2025 18:32:51 +0545 Subject: [PATCH 25/58] chore: noting escrow/fee payer related test requirements --- .../test-scenarios/tests/03_commits_fee_payer.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/test-integration/schedulecommit/test-scenarios/tests/03_commits_fee_payer.rs b/test-integration/schedulecommit/test-scenarios/tests/03_commits_fee_payer.rs index f04c8bd7..8d606114 100644 --- a/test-integration/schedulecommit/test-scenarios/tests/03_commits_fee_payer.rs +++ b/test-integration/schedulecommit/test-scenarios/tests/03_commits_fee_payer.rs @@ -23,6 +23,10 @@ mod utils; #[test] fn test_committing_fee_payer_without_escrowing_lamports() { + // NOTE: this test requires the following config + // [validator] + // base_fees = 1000 + // see ../../../configs/schedulecommit-conf-fees.ephem.toml run_test!({ let ctx = get_context_with_delegated_committees_without_payer_escrow(2); @@ -64,9 +68,8 @@ fn test_committing_fee_payer_without_escrowing_lamports() { }, ); info!("{} '{:?}'", sig, res); - assert!(!res.is_ok()); - // Should fail because the fee payer was not escrowed + assert!(res.is_err()); assert!(res .err() .unwrap() From 8c620feffa0a0e31873a7e48cd9ff7c1a5a302e4 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 15 May 2025 11:00:41 +0545 Subject: [PATCH 26/58] chore: minor cleanup in test runner --- test-integration/test-runner/bin/run_tests.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test-integration/test-runner/bin/run_tests.rs b/test-integration/test-runner/bin/run_tests.rs index 5a85a936..a9c322a6 100644 --- a/test-integration/test-runner/bin/run_tests.rs +++ b/test-integration/test-runner/bin/run_tests.rs @@ -1,9 +1,9 @@ use integration_test_tools::validator::start_test_validator_with_config; use integration_test_tools::{ - toml_to_args::{config_to_args, rpc_port_from_config, ProgramLoader}, + toml_to_args::ProgramLoader, validator::{ resolve_workspace_dir, start_magic_block_validator_with_config, - wait_for_validator, TestRunnerPaths, + TestRunnerPaths, }, }; use std::{ @@ -21,7 +21,7 @@ pub fn main() { let Ok((security_output, scenarios_output)) = run_schedule_commit_tests(&manifest_dir) else { - // TODO: why we don't report Error case lower? + // If any test fails or cannot run we bail immediately return; }; From f3e17d13444741dc2af9532dca49e84f6fd9d487 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 15 May 2025 11:11:03 +0545 Subject: [PATCH 27/58] ix: load committor program for schedule commits --- test-integration/Makefile | 16 ++++++++++------ .../configs/schedulecommit-conf.devnet.toml | 11 ++++++++--- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/test-integration/Makefile b/test-integration/Makefile index 614c938d..153858bd 100644 --- a/test-integration/Makefile +++ b/test-integration/Makefile @@ -1,21 +1,25 @@ DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) DEPLOY_DIR := $(DIR)target/deploy +ROOT_DEPLOY_DIR := $(DIR)../target/deploy RUST_LOG ?= 'warn,geyser_plugin=warn,magicblock=trace,rpc=trace,bank=trace,banking_stage=warn,solana_geyser_plugin_manager=warn,solana_svm=warn,test_tools=trace,schedulecommit_test=trace,' \ FLEXI_COUNTER_DIR := $(DIR)programs/flexi-counter SCHEDULECOMMIT_DIR := $(DIR)programs/schedulecommit SCHEDULECOMMIT_SECURITY_DIR := $(DIR)programs/schedulecommit-security +COMMITTOR_PROGRAM_DIR := $(DIR)../magicblock-committor-program/ FLEXI_COUNTER_SRC := $(shell find $(FLEXI_COUNTER_DIR) -name '*.rs' -o -name '*.toml') SCHEDULECOMMIT_SRC := $(shell find $(SCHEDULECOMMIT_DIR) -name '*.rs' -o -name '*.toml') SCHEDULECOMMIT_SECURITY_SRC := $(shell find $(SCHEDULECOMMIT_SECURITY_DIR) -name '*.rs' -o -name '*.toml') +COMMITTOR_PROGRAM_SRC := $(shell find $(COMMITTOR_PROGRAM_DIR) -name '*.rs' -o -name '*.toml') FLEXI_COUNTER_SO := $(DEPLOY_DIR)/program_flexi_counter.so SCHEDULECOMMIT_SO := $(DEPLOY_DIR)/program_schedulecommit.so SCHEDULECOMMIT_SECURITY_SO := $(DEPLOY_DIR)/program_schedulecommit_security.so +COMMITTOR_PROGRAM_SO := $(ROOT_DEPLOY_DIR)/magicblock_committor_program.so -PROGRAMS_SO := $(FLEXI_COUNTER_SO) $(SCHEDULECOMMIT_SO) $(SCHEDULECOMMIT_SECURITY_SO) +PROGRAMS_SO := $(FLEXI_COUNTER_SO) $(SCHEDULECOMMIT_SO) $(SCHEDULECOMMIT_SECURITY_SO) $(COMMITTOR_PROGRAM_SO) list-tasks: @cat Makefile | grep "^[a-z].*:" | sed 's/:.*//g' @@ -25,19 +29,19 @@ list-programs: test: $(PROGRAMS_SO) RUST_BACKTRACE=1 \ RUST_LOG=$(RUST_LOG) \ - cargo run --package test-runner --bin run-tests + cargo run --package test-runner --bin run-tests test-force-mb: $(PROGRAMS_SO) test-ledger-restore RUST_LOG=$(RUST_LOG) \ FORCE_MAGIC_BLOCK_VALIDATOR=1 \ - cargo run --package test-runner --bin run-tests + cargo run --package test-runner --bin run-tests $(FLEXI_COUNTER_SO): $(FLEXI_COUNTER_SRC) - cargo build-sbf --manifest-path $(FLEXI_COUNTER_DIR)/Cargo.toml + cargo build-sbf --manifest-path $(FLEXI_COUNTER_DIR)/Cargo.toml $(SCHEDULECOMMIT_SO): $(SCHEDULECOMMIT_SRC) - cargo build-sbf --manifest-path $(SCHEDULECOMMIT_DIR)/Cargo.toml + cargo build-sbf --manifest-path $(SCHEDULECOMMIT_DIR)/Cargo.toml $(SCHEDULECOMMIT_SECURITY_SO): $(SCHEDULECOMMIT_SECURITY_SRC) - cargo build-sbf --manifest-path $(SCHEDULECOMMIT_SECURITY_DIR)/Cargo.toml + cargo build-sbf --manifest-path $(SCHEDULECOMMIT_SECURITY_DIR)/Cargo.toml deploy-flexi-counter: $(FLEXI_COUNTER_SO) solana program deploy \ diff --git a/test-integration/configs/schedulecommit-conf.devnet.toml b/test-integration/configs/schedulecommit-conf.devnet.toml index de18df07..e6b44cb2 100644 --- a/test-integration/configs/schedulecommit-conf.devnet.toml +++ b/test-integration/configs/schedulecommit-conf.devnet.toml @@ -6,14 +6,14 @@ commit = { frequency_millis = 9_000_000_000_000, compute_unit_price = 1_000_000 [accounts.db] # size of the main storage, we have to preallocate in advance -# it's advised to set this value based on formula 1KB * N * 3, -# where N is the number of accounts expected to be stored in +# it's advised to set this value based on formula 1KB * N * 3, +# where N is the number of accounts expected to be stored in # database, e.g. for million accounts this would be 3GB db-size = 1048576000 # 1GB # minimal indivisible unit of addressing in main storage # offsets are calculated in terms of blocks block-size = "block256" # possible values block128 | block256 | block512 -# size of index file, we have to preallocate, +# size of index file, we have to preallocate, # can be as low as 1% of main storage size, but setting it to higher values won't hurt index-map-size = 2048576 # max number of snapshots to keep around @@ -33,6 +33,11 @@ path = "../schedulecommit/elfs/dlp.so" id = "42Y73BJyGCXh2XUrqyz59WCk2DsBtqrFrt38t9ogB5sD" path = "../schedulecommit/elfs/mdp.so" +# NOTE: `cargo build-sbf` needs to run from the root to build the program +[[program]] +id = "corabpNrkBEqbTZP7xfJgSWTdBmVdLf1PARWXZbcMcS" +path = "../../target/deploy/magicblock_committor_program.so" + [[program]] id = "9hgprgZiRWmy8KkfvUuaVkDGrqo9GzeXMohwq6BazgUY" path = "../target/deploy/program_schedulecommit.so" From 6d109f933703015115f1eb7a1576048640203fc0 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 15 May 2025 11:36:46 +0545 Subject: [PATCH 28/58] ix: allow configuring loaded accounts --- test-integration/Cargo.lock | 2 + .../tests/test_domain_registry.rs | 9 ++- test-integration/test-runner/bin/run_tests.rs | 10 +++ test-integration/test-tools/Cargo.toml | 4 ++ test-integration/test-tools/src/lib.rs | 1 + .../test-tools/src/loaded_accounts.rs | 63 +++++++++++++++++++ test-integration/test-tools/src/validator.rs | 18 +++--- 7 files changed, 97 insertions(+), 10 deletions(-) create mode 100644 test-integration/test-tools/src/loaded_accounts.rs diff --git a/test-integration/Cargo.lock b/test-integration/Cargo.lock index 02cb15d4..b030e9d6 100644 --- a/test-integration/Cargo.lock +++ b/test-integration/Cargo.lock @@ -2937,8 +2937,10 @@ dependencies = [ "log", "magicblock-config", "magicblock-core", + "magicblock-delegation-program 1.0.0", "rayon", "serde", + "solana-pubkey", "solana-rpc-client", "solana-rpc-client-api", "solana-sdk", diff --git a/test-integration/test-magicblock-api/tests/test_domain_registry.rs b/test-integration/test-magicblock-api/tests/test_domain_registry.rs index 2c55e65e..180d2a32 100644 --- a/test-integration/test-magicblock-api/tests/test_domain_registry.rs +++ b/test-integration/test-magicblock-api/tests/test_domain_registry.rs @@ -109,8 +109,13 @@ impl TestValidator { root_dir, workspace_dir, }; - let process = start_test_validator_with_config(&paths, None, "CHAIN") - .expect("Failed to start devnet process"); + let process = start_test_validator_with_config( + &paths, + None, + Default::default(), + "CHAIN", + ) + .expect("Failed to start devnet process"); Self { process } } diff --git a/test-integration/test-runner/bin/run_tests.rs b/test-integration/test-runner/bin/run_tests.rs index a9c322a6..e48f03c1 100644 --- a/test-integration/test-runner/bin/run_tests.rs +++ b/test-integration/test-runner/bin/run_tests.rs @@ -1,3 +1,4 @@ +use integration_test_tools::loaded_accounts::LoadedAccounts; use integration_test_tools::validator::start_test_validator_with_config; use integration_test_tools::{ toml_to_args::ProgramLoader, @@ -65,6 +66,7 @@ fn run_restore_ledger_tests( let mut devnet_validator = match start_validator( "restore-ledger-conf.devnet.toml", ValidatorCluster::Chain(None), + Default::default(), ) { Some(validator) => validator, None => { @@ -100,6 +102,7 @@ fn run_schedule_commit_tests( let mut devnet_validator = match start_validator( "schedulecommit-conf.devnet.toml", ValidatorCluster::Chain(None), + LoadedAccounts::with_delegation_program_test_authority(), ) { Some(validator) => validator, None => { @@ -115,6 +118,7 @@ fn run_schedule_commit_tests( let mut ephem_validator = match start_validator( "schedulecommit-conf-fees.ephem.toml", ValidatorCluster::Ephem, + LoadedAccounts::with_delegation_program_test_authority(), ) { Some(validator) => validator, None => { @@ -163,6 +167,7 @@ fn run_issues_frequent_commmits_tests( let mut devnet_validator = match start_validator( "schedulecommit-conf.devnet.toml", ValidatorCluster::Chain(None), + LoadedAccounts::with_delegation_program_test_authority(), ) { Some(validator) => validator, None => { @@ -172,6 +177,7 @@ fn run_issues_frequent_commmits_tests( let mut ephem_validator = match start_validator( "schedulecommit-conf.ephem.frequent-commits.toml", ValidatorCluster::Ephem, + LoadedAccounts::with_delegation_program_test_authority(), ) { Some(validator) => validator, None => { @@ -205,6 +211,7 @@ fn run_cloning_tests(manifest_dir: &str) -> Result> { let mut devnet_validator = match start_validator( "cloning-conf.devnet.toml", ValidatorCluster::Chain(Some(ProgramLoader::BpfProgram)), + LoadedAccounts::with_delegation_program_test_authority(), ) { Some(validator) => validator, None => { @@ -214,6 +221,7 @@ fn run_cloning_tests(manifest_dir: &str) -> Result> { let mut ephem_validator = match start_validator( "cloning-conf.ephem.toml", ValidatorCluster::Ephem, + LoadedAccounts::with_delegation_program_test_authority(), ) { Some(validator) => validator, None => { @@ -332,6 +340,7 @@ impl ValidatorCluster { fn start_validator( config_file: &str, cluster: ValidatorCluster, + loaded_accounts: LoadedAccounts, ) -> Option { let log_suffix = cluster.log_suffix(); let test_runner_paths = resolve_paths(config_file); @@ -343,6 +352,7 @@ fn start_validator( start_test_validator_with_config( &test_runner_paths, program_loader, + loaded_accounts, log_suffix, ) } diff --git a/test-integration/test-tools/Cargo.toml b/test-integration/test-tools/Cargo.toml index 50c3719a..b3ea8bc7 100644 --- a/test-integration/test-tools/Cargo.toml +++ b/test-integration/test-tools/Cargo.toml @@ -11,6 +11,10 @@ rayon = { workspace = true } serde = { workspace = true } magicblock-core = { workspace = true } magicblock-config = { workspace = true } +magicblock-delegation-program = { workspace = true, features = [ + "no-entrypoint", +] } +solana-pubkey = { workspace = true } solana-rpc-client = { workspace = true } solana-rpc-client-api = { workspace = true } solana-sdk = { workspace = true } diff --git a/test-integration/test-tools/src/lib.rs b/test-integration/test-tools/src/lib.rs index 52f0abb3..10c4704d 100644 --- a/test-integration/test-tools/src/lib.rs +++ b/test-integration/test-tools/src/lib.rs @@ -1,5 +1,6 @@ pub mod conversions; mod integration_test_context; +pub mod loaded_accounts; mod run_test; pub mod scheduled_commits; pub mod tmpdir; diff --git a/test-integration/test-tools/src/loaded_accounts.rs b/test-integration/test-tools/src/loaded_accounts.rs new file mode 100644 index 00000000..ea1238da --- /dev/null +++ b/test-integration/test-tools/src/loaded_accounts.rs @@ -0,0 +1,63 @@ +use solana_pubkey::pubkey; +use solana_sdk::pubkey::Pubkey; + +pub struct LoadedAccounts { + validator_authority: Pubkey, + luzid_authority: Pubkey, +} + +impl Default for LoadedAccounts { + fn default() -> Self { + Self { + validator_authority: pubkey!( + "mAGicPQYBMvcYveUZA5F5UNNwyHvfYh5xkLS2Fr1mev" + ), + luzid_authority: pubkey!( + "LUzidNSiPNjYNkxZcUm5hYHwnWPwsUfh2US1cpWwaBm" + ), + } + } +} + +impl LoadedAccounts { + pub fn new(validator_authority: Pubkey, luzid_authority: Pubkey) -> Self { + Self { + validator_authority, + luzid_authority, + } + } + + /// This use the test authority used in the delegation program as the validator + /// authority. + /// https://github.com/magicblock-labs/delegation-program/blob/7fc0ae9a59e48bea5b046b173ea0e34fd433c3c7/tests/fixtures/accounts.rs#L46 + /// It is compiled in as the authority for the validator vault when we build + /// the delegation program via: + /// `cargo build-sbf --features=unit_test_config` + pub fn with_delegation_program_test_authority() -> Self { + Self { + validator_authority: pubkey!( + "tEsT3eV6RFCWs1BZ7AXTzasHqTtMnMLCB2tjQ42TDXD" + ), + luzid_authority: pubkey!( + "LUzidNSiPNjYNkxZcUm5hYHwnWPwsUfh2US1cpWwaBm" + ), + } + } + + pub fn validator_authority(&self) -> Pubkey { + self.validator_authority + } + + pub fn luzid_authority(&self) -> Pubkey { + self.luzid_authority + } + + pub fn validator_fees_vault(&self) -> Pubkey { + dlp::pda::validator_fees_vault_pda_from_validator( + &self.validator_authority, + ) + } + pub fn protocol_fees_vault(&self) -> Pubkey { + dlp::pda::fees_vault_pda() + } +} diff --git a/test-integration/test-tools/src/validator.rs b/test-integration/test-tools/src/validator.rs index c2f89766..7999adbd 100644 --- a/test-integration/test-tools/src/validator.rs +++ b/test-integration/test-tools/src/validator.rs @@ -6,8 +6,9 @@ use std::{ time::Duration, }; -use crate::toml_to_args::{ - config_to_args, rpc_port_from_config, ProgramLoader, +use crate::{ + loaded_accounts::LoadedAccounts, + toml_to_args::{config_to_args, rpc_port_from_config, ProgramLoader}, }; pub fn start_magic_block_validator_with_config( @@ -31,7 +32,7 @@ pub fn start_magic_block_validator_with_config( } let build_res = command.current_dir(root_dir.clone()).output(); - if build_res.map_or(false, |output| !output.status.success()) { + if build_res.is_ok_and(|output| !output.status.success()) { eprintln!("Failed to build validator"); return None; } @@ -57,6 +58,7 @@ pub fn start_magic_block_validator_with_config( pub fn start_test_validator_with_config( test_runner_paths: &TestRunnerPaths, program_loader: Option, + loaded_accounts: LoadedAccounts, log_suffix: &str, ) -> Option { let TestRunnerPaths { @@ -71,19 +73,19 @@ pub fn start_test_validator_with_config( let accounts_dir = workspace_dir.join("configs").join("accounts"); let accounts = [ ( - "mAGicPQYBMvcYveUZA5F5UNNwyHvfYh5xkLS2Fr1mev", + loaded_accounts.validator_authority().to_string(), "validator-authority.json", ), ( - "LUzidNSiPNjYNkxZcUm5hYHwnWPwsUfh2US1cpWwaBm", + loaded_accounts.luzid_authority().to_string(), "luzid-authority.json", ), ( - "EpJnX7ueXk7fKojBymqmVuCuwyhDQsYcLVL1XMsBbvDX", + loaded_accounts.validator_fees_vault().to_string(), "validator-fees-vault.json", ), ( - "7JrkjmZPprHwtuvtuGTXp9hwfGYFAQLnLeFM52kqAgXg", + loaded_accounts.protocol_fees_vault().to_string(), "protocol-fees-vault.json", ), ]; @@ -94,7 +96,7 @@ pub fn start_test_validator_with_config( let account_path = accounts_dir.join(file).canonicalize().unwrap(); vec![ "--account".to_string(), - account.to_string(), + account.clone(), account_path.to_str().unwrap().to_string(), ] }) From 0dceb8b46edbad91d7e6349af877bede4142019f Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 15 May 2025 12:21:03 +0545 Subject: [PATCH 29/58] ix: match ephem validator keypair to the one we use on chain --- .../test-ledger-restore/src/lib.rs | 20 ++++++-- .../tests/test_domain_registry.rs | 2 +- test-integration/test-runner/bin/run_tests.rs | 5 +- .../test-tools/src/loaded_accounts.rs | 51 ++++++++++++------- .../test-tools/src/scheduled_commits.rs | 1 + test-integration/test-tools/src/validator.rs | 10 +++- 6 files changed, 63 insertions(+), 26 deletions(-) diff --git a/test-integration/test-ledger-restore/src/lib.rs b/test-integration/test-ledger-restore/src/lib.rs index 7ce9aaed..1f9ff556 100644 --- a/test-integration/test-ledger-restore/src/lib.rs +++ b/test-integration/test-ledger-restore/src/lib.rs @@ -3,6 +3,7 @@ use std::{fs, path::Path, process, process::Child}; use integration_test_tools::{ expect, + loaded_accounts::LoadedAccounts, tmpdir::resolve_tmp_dir, validator::{ resolve_workspace_dir, start_magic_block_validator_with_config, @@ -11,7 +12,10 @@ use integration_test_tools::{ workspace_paths::path_relative_to_workspace, IntegrationTestContext, }; -use magicblock_config::{AccountsConfig, EphemeralConfig, LedgerConfig, LifecycleMode, ProgramConfig, RemoteConfig, ValidatorConfig, DEFAULT_LEDGER_SIZE_BYTES}; +use magicblock_config::{ + AccountsConfig, EphemeralConfig, LedgerConfig, LifecycleMode, + ProgramConfig, RemoteConfig, ValidatorConfig, DEFAULT_LEDGER_SIZE_BYTES, +}; use program_flexi_counter::state::FlexiCounter; use solana_sdk::{ clock::Slot, @@ -36,6 +40,7 @@ pub const FLEXI_COUNTER_PUBKEY: Pubkey = /// Then uses that config to start the validator. pub fn start_validator_with_config( config: EphemeralConfig, + loaded_chain_accounts: &LoadedAccounts, ) -> (TempDir, Option) { let workspace_dir = resolve_workspace_dir(); let (default_tmpdir, temp_dir) = resolve_tmp_dir(TMP_DIR_CONFIG); @@ -56,7 +61,12 @@ pub fn start_validator_with_config( }; ( default_tmpdir, - start_magic_block_validator_with_config(&paths, "TEST", release), + start_magic_block_validator_with_config( + &paths, + "TEST", + loaded_chain_accounts, + release, + ), ) } @@ -104,7 +114,7 @@ pub fn setup_offline_validator( ledger: LedgerConfig { reset, path: Some(ledger_path.display().to_string()), - size: DEFAULT_LEDGER_SIZE_BYTES + size: DEFAULT_LEDGER_SIZE_BYTES, }, accounts: accounts_config.clone(), programs, @@ -112,7 +122,7 @@ pub fn setup_offline_validator( ..Default::default() }; let (default_tmpdir_config, Some(mut validator)) = - start_validator_with_config(config) + start_validator_with_config(config, &Default::default()) else { panic!("validator should set up correctly"); }; @@ -153,7 +163,7 @@ pub fn setup_validator_with_local_remote( }; let (default_tmpdir_config, Some(mut validator)) = - start_validator_with_config(config) + start_validator_with_config(config, &Default::default()) else { panic!("validator should set up correctly"); }; diff --git a/test-integration/test-magicblock-api/tests/test_domain_registry.rs b/test-integration/test-magicblock-api/tests/test_domain_registry.rs index 180d2a32..a8119c6c 100644 --- a/test-integration/test-magicblock-api/tests/test_domain_registry.rs +++ b/test-integration/test-magicblock-api/tests/test_domain_registry.rs @@ -112,7 +112,7 @@ impl TestValidator { let process = start_test_validator_with_config( &paths, None, - Default::default(), + &Default::default(), "CHAIN", ) .expect("Failed to start devnet process"); diff --git a/test-integration/test-runner/bin/run_tests.rs b/test-integration/test-runner/bin/run_tests.rs index e48f03c1..71757c21 100644 --- a/test-integration/test-runner/bin/run_tests.rs +++ b/test-integration/test-runner/bin/run_tests.rs @@ -340,7 +340,7 @@ impl ValidatorCluster { fn start_validator( config_file: &str, cluster: ValidatorCluster, - loaded_accounts: LoadedAccounts, + loaded_chain_accounts: LoadedAccounts, ) -> Option { let log_suffix = cluster.log_suffix(); let test_runner_paths = resolve_paths(config_file); @@ -352,13 +352,14 @@ fn start_validator( start_test_validator_with_config( &test_runner_paths, program_loader, - loaded_accounts, + &loaded_chain_accounts, log_suffix, ) } _ => start_magic_block_validator_with_config( &test_runner_paths, log_suffix, + &loaded_chain_accounts, false, ), } diff --git a/test-integration/test-tools/src/loaded_accounts.rs b/test-integration/test-tools/src/loaded_accounts.rs index ea1238da..5c9203b0 100644 --- a/test-integration/test-tools/src/loaded_accounts.rs +++ b/test-integration/test-tools/src/loaded_accounts.rs @@ -1,17 +1,32 @@ use solana_pubkey::pubkey; -use solana_sdk::pubkey::Pubkey; +use solana_sdk::{pubkey::Pubkey, signature::Keypair, signer::Signer}; + +// mAGicPQYBMvcYveUZA5F5UNNwyHvfYh5xkLS2Fr1mev +const TEST_KEYPAIR_BYTES: [u8; 64] = [ + 7, 83, 184, 55, 200, 223, 238, 137, 166, 244, 107, 126, 189, 16, 194, 36, + 228, 68, 43, 143, 13, 91, 3, 81, 53, 253, 26, 36, 50, 198, 40, 159, 11, 80, + 9, 208, 183, 189, 108, 200, 89, 77, 168, 76, 233, 197, 132, 22, 21, 186, + 202, 240, 105, 168, 157, 64, 233, 249, 100, 104, 210, 41, 83, 87, +]; +// tEsT3eV6RFCWs1BZ7AXTzasHqTtMnMLCB2tjQ42TDXD +// 62LxqpAW6SWhp7iKBjCQneapn1w6btAhW7xHeREWSpPzw3xZbHCfAFesSR4R76ejQXCLWrndn37cKCCLFvx6Swps +pub const DLP_TEST_AUTHORITY_BYTES: [u8; 64] = [ + 251, 62, 129, 184, 107, 49, 62, 184, 1, 147, 178, 128, 185, 157, 247, 92, + 56, 158, 145, 53, 51, 226, 202, 96, 178, 248, 195, 133, 133, 237, 237, 146, + 13, 32, 77, 204, 244, 56, 166, 172, 66, 113, 150, 218, 112, 42, 110, 181, + 98, 158, 222, 194, 130, 93, 175, 100, 190, 106, 9, 69, 156, 80, 96, 72, +]; pub struct LoadedAccounts { - validator_authority: Pubkey, + validator_authority_kp: Keypair, luzid_authority: Pubkey, } impl Default for LoadedAccounts { fn default() -> Self { Self { - validator_authority: pubkey!( - "mAGicPQYBMvcYveUZA5F5UNNwyHvfYh5xkLS2Fr1mev" - ), + validator_authority_kp: Keypair::from_bytes(&TEST_KEYPAIR_BYTES) + .expect("Failed to create validator authority keypair"), luzid_authority: pubkey!( "LUzidNSiPNjYNkxZcUm5hYHwnWPwsUfh2US1cpWwaBm" ), @@ -20,13 +35,6 @@ impl Default for LoadedAccounts { } impl LoadedAccounts { - pub fn new(validator_authority: Pubkey, luzid_authority: Pubkey) -> Self { - Self { - validator_authority, - luzid_authority, - } - } - /// This use the test authority used in the delegation program as the validator /// authority. /// https://github.com/magicblock-labs/delegation-program/blob/7fc0ae9a59e48bea5b046b173ea0e34fd433c3c7/tests/fixtures/accounts.rs#L46 @@ -35,17 +43,26 @@ impl LoadedAccounts { /// `cargo build-sbf --features=unit_test_config` pub fn with_delegation_program_test_authority() -> Self { Self { - validator_authority: pubkey!( - "tEsT3eV6RFCWs1BZ7AXTzasHqTtMnMLCB2tjQ42TDXD" - ), + validator_authority_kp: Keypair::from_bytes( + &DLP_TEST_AUTHORITY_BYTES, + ) + .expect("Failed to create validator authority keypair"), luzid_authority: pubkey!( "LUzidNSiPNjYNkxZcUm5hYHwnWPwsUfh2US1cpWwaBm" ), } } + pub fn validator_authority_keypair(&self) -> &Keypair { + &self.validator_authority_kp + } + + pub fn validator_authority_base58(&self) -> String { + self.validator_authority_kp.to_base58_string() + } + pub fn validator_authority(&self) -> Pubkey { - self.validator_authority + self.validator_authority_kp.pubkey() } pub fn luzid_authority(&self) -> Pubkey { @@ -54,7 +71,7 @@ impl LoadedAccounts { pub fn validator_fees_vault(&self) -> Pubkey { dlp::pda::validator_fees_vault_pda_from_validator( - &self.validator_authority, + &self.validator_authority(), ) } pub fn protocol_fees_vault(&self) -> Pubkey { diff --git a/test-integration/test-tools/src/scheduled_commits.rs b/test-integration/test-tools/src/scheduled_commits.rs index e4c11ef3..4c1418ad 100644 --- a/test-integration/test-tools/src/scheduled_commits.rs +++ b/test-integration/test-tools/src/scheduled_commits.rs @@ -24,6 +24,7 @@ pub fn extract_scheduled_commit_sent_signature_from_logs( None } +#[allow(clippy::type_complexity)] pub fn extract_sent_commit_info_from_logs( logs: &[String], ) -> ( diff --git a/test-integration/test-tools/src/validator.rs b/test-integration/test-tools/src/validator.rs index 7999adbd..5fceb548 100644 --- a/test-integration/test-tools/src/validator.rs +++ b/test-integration/test-tools/src/validator.rs @@ -14,6 +14,7 @@ use crate::{ pub fn start_magic_block_validator_with_config( test_runner_paths: &TestRunnerPaths, log_suffix: &str, + loaded_chain_accounts: &LoadedAccounts, release: bool, ) -> Option { let TestRunnerPaths { @@ -26,6 +27,7 @@ pub fn start_magic_block_validator_with_config( // First build so that the validator can start fast let mut command = process::Command::new("cargo"); + let keypair_base58 = loaded_chain_accounts.validator_authority_base58(); command.arg("build"); if release { command.arg("--release"); @@ -47,9 +49,15 @@ pub fn start_magic_block_validator_with_config( .arg("--") .arg(config_path) .env("RUST_LOG_STYLE", log_suffix) + .env("VALIDATOR_KEYPAIR", keypair_base58.clone()) .current_dir(root_dir); eprintln!("Starting validator with {:?}", command); + eprintln!( + "Setting validator keypair to {} ({})", + loaded_chain_accounts.validator_authority(), + keypair_base58 + ); let validator = command.spawn().expect("Failed to start validator"); wait_for_validator(validator, port) @@ -58,7 +66,7 @@ pub fn start_magic_block_validator_with_config( pub fn start_test_validator_with_config( test_runner_paths: &TestRunnerPaths, program_loader: Option, - loaded_accounts: LoadedAccounts, + loaded_accounts: &LoadedAccounts, log_suffix: &str, ) -> Option { let TestRunnerPaths { From 7fdc7956bb263e5b36fe818ae6500c239a2baf12 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 15 May 2025 12:32:09 +0545 Subject: [PATCH 30/58] ix: ensure we always pass same loaded chain accounts to chain + ephenm setup --- .../test-ledger-restore/src/lib.rs | 5 ++- test-integration/test-runner/bin/run_tests.rs | 31 ++++++++++++------- .../src/integration_test_context.rs | 2 +- 3 files changed, 25 insertions(+), 13 deletions(-) diff --git a/test-integration/test-ledger-restore/src/lib.rs b/test-integration/test-ledger-restore/src/lib.rs index 1f9ff556..7d528fa9 100644 --- a/test-integration/test-ledger-restore/src/lib.rs +++ b/test-integration/test-ledger-restore/src/lib.rs @@ -163,7 +163,10 @@ pub fn setup_validator_with_local_remote( }; let (default_tmpdir_config, Some(mut validator)) = - start_validator_with_config(config, &Default::default()) + start_validator_with_config( + config, + &LoadedAccounts::with_delegation_program_test_authority(), + ) else { panic!("validator should set up correctly"); }; diff --git a/test-integration/test-runner/bin/run_tests.rs b/test-integration/test-runner/bin/run_tests.rs index 71757c21..bfc45009 100644 --- a/test-integration/test-runner/bin/run_tests.rs +++ b/test-integration/test-runner/bin/run_tests.rs @@ -18,7 +18,6 @@ use test_runner::cleanup::{cleanup_devnet_only, cleanup_validators}; pub fn main() { let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap(); - let Ok((security_output, scenarios_output)) = run_schedule_commit_tests(&manifest_dir) else { @@ -61,12 +60,14 @@ fn run_restore_ledger_tests( manifest_dir: &str, ) -> Result> { eprintln!("======== RUNNING RESTORE LEDGER TESTS ========"); + let loaded_chain_accounts = + LoadedAccounts::with_delegation_program_test_authority(); // The ledger tests manage their own ephem validator so all we start up here // is devnet let mut devnet_validator = match start_validator( "restore-ledger-conf.devnet.toml", ValidatorCluster::Chain(None), - Default::default(), + &loaded_chain_accounts, ) { Some(validator) => validator, None => { @@ -98,11 +99,14 @@ fn run_schedule_commit_tests( "======== Starting DEVNET Validator for Scenarios + Security ========" ); + let loaded_chain_accounts = + LoadedAccounts::with_delegation_program_test_authority(); + // Start validators via `cargo run --release -- let mut devnet_validator = match start_validator( "schedulecommit-conf.devnet.toml", ValidatorCluster::Chain(None), - LoadedAccounts::with_delegation_program_test_authority(), + &loaded_chain_accounts, ) { Some(validator) => validator, None => { @@ -118,7 +122,7 @@ fn run_schedule_commit_tests( let mut ephem_validator = match start_validator( "schedulecommit-conf-fees.ephem.toml", ValidatorCluster::Ephem, - LoadedAccounts::with_delegation_program_test_authority(), + &loaded_chain_accounts, ) { Some(validator) => validator, None => { @@ -164,10 +168,12 @@ fn run_issues_frequent_commmits_tests( manifest_dir: &str, ) -> Result> { eprintln!("======== RUNNING ISSUES TESTS - Frequent Commits ========"); + let loaded_chain_accounts = + LoadedAccounts::with_delegation_program_test_authority(); let mut devnet_validator = match start_validator( "schedulecommit-conf.devnet.toml", ValidatorCluster::Chain(None), - LoadedAccounts::with_delegation_program_test_authority(), + &loaded_chain_accounts, ) { Some(validator) => validator, None => { @@ -177,7 +183,7 @@ fn run_issues_frequent_commmits_tests( let mut ephem_validator = match start_validator( "schedulecommit-conf.ephem.frequent-commits.toml", ValidatorCluster::Ephem, - LoadedAccounts::with_delegation_program_test_authority(), + &loaded_chain_accounts, ) { Some(validator) => validator, None => { @@ -208,10 +214,13 @@ fn run_issues_frequent_commmits_tests( fn run_cloning_tests(manifest_dir: &str) -> Result> { eprintln!("======== RUNNING CLONING TESTS ========"); + let loaded_chain_accounts = + LoadedAccounts::with_delegation_program_test_authority(); + let mut devnet_validator = match start_validator( "cloning-conf.devnet.toml", ValidatorCluster::Chain(Some(ProgramLoader::BpfProgram)), - LoadedAccounts::with_delegation_program_test_authority(), + &loaded_chain_accounts, ) { Some(validator) => validator, None => { @@ -221,7 +230,7 @@ fn run_cloning_tests(manifest_dir: &str) -> Result> { let mut ephem_validator = match start_validator( "cloning-conf.ephem.toml", ValidatorCluster::Ephem, - LoadedAccounts::with_delegation_program_test_authority(), + &loaded_chain_accounts, ) { Some(validator) => validator, None => { @@ -340,7 +349,7 @@ impl ValidatorCluster { fn start_validator( config_file: &str, cluster: ValidatorCluster, - loaded_chain_accounts: LoadedAccounts, + loaded_chain_accounts: &LoadedAccounts, ) -> Option { let log_suffix = cluster.log_suffix(); let test_runner_paths = resolve_paths(config_file); @@ -352,14 +361,14 @@ fn start_validator( start_test_validator_with_config( &test_runner_paths, program_loader, - &loaded_chain_accounts, + loaded_chain_accounts, log_suffix, ) } _ => start_magic_block_validator_with_config( &test_runner_paths, log_suffix, - &loaded_chain_accounts, + loaded_chain_accounts, false, ), } diff --git a/test-integration/test-tools/src/integration_test_context.rs b/test-integration/test-tools/src/integration_test_context.rs index 4afd9ccc..a6efa7c2 100644 --- a/test-integration/test-tools/src/integration_test_context.rs +++ b/test-integration/test-tools/src/integration_test_context.rs @@ -450,7 +450,7 @@ impl IntegrationTestContext { const MILLIS_UNTIL_RETRY: u64 = 200; let mut failure_count = 0; - // Allow transactions to take up to 20 seconds to confirm + // Allow transactions to take up to 40 seconds to confirm const MAX_UNCONFIRMED_COUNT: u64 = 40; const MILLIS_UNTIL_RECONFIRM: u64 = 500; let mut unconfirmed_count = 0; From 47ccee42ecdc01aec7a1552ab03494745e0b3190 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 15 May 2025 14:10:23 +0545 Subject: [PATCH 31/58] ix: update dlp binary --- test-integration/schedulecommit/elfs/dlp.so | Bin 351544 -> 321056 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/test-integration/schedulecommit/elfs/dlp.so b/test-integration/schedulecommit/elfs/dlp.so index 8de09c66e427ee2806426b75b5ea5094c29ab7fa..71a5dd1efcde0c3e06963ed7a27e5760694b5c27 100755 GIT binary patch literal 321056 zcmeFadwg9-buYeU=fqBU$gy*5M;Bo!j$=hdMRs0}NVtNHN}cT z;_(I|I}jZIxZs2kOn$W0k^GP;y>)>$ZZECe{(4(Op>?4T18uJ+&3et6HM3_wbmJ>-n%2~m_%%KGc7oKvFHO^|UVCuj zi|5&|BNSEU{Q-W3c0TEAnxPNoOGl=&NU})o zZ?c@^(7q+2MT-eCC`=lN&kN^h|C&qJvs zDJB1M2KS+5OviB0=X!8{P#4bsA)M<_D@kHF=wCfJ->(bj?+K@yREgn$WA)&CuP&Uw zBAlzq=NJy;tsb23)`jzDgmV>YNxfBDl#eky$VEMP->D1l4-Fn_0p5lhc#w>G@cy$d zyr&gjddGmm8M=ycm9AP_0~egH2lsRY_i5?oJe0d*MC;hsNTy>muvpe!Cjh;;qGYw_w@>QkJYCA2mA2JOC5+nuob)Qd2BNYka{VQZ&b(?gYmR_;aC&IdIew{y5bJH6V@VXIHM z3Zq9fT}mF%c4B%9yUxI_fIp?=N%;l3|E|^VV*8cf({$R>q4yY%qYdzQvEp&m;5Z(~ zG+j#WZivT)ipSdxkB?h@$Kw;4PFvda9^-Mm0Uj?@JdPV2$K!;iOUdmG@pz8nalhg5 zS*!1Od`{D8%Y41Zc${p2$MY1AlLp7}IHl=QvZ5g#&s99u86ID?`i{rfG@Z80(tC`@ z=>~XwPXG>8P8%G@BhmIt$K!)uQojXvw+8U43C*Lc+3>}isfyq;068uy5ccY z+bcIY91w6V9k2CB~WG5XDA*JwTf#GpQ0gq0@<5UeE z%NpQuSn*g^z@yjj_(nrKE>b+6Z+Ki+z@x|TI9Y?o$_98GP&`%^@K|kl{Cz__W-A`r zjL7*d1w2+69w%z>Sla-PCl!yi1v~}}k54wlV~*nSPx2S~_Sym-cNiYWYw+060FSpT z9vcdHY%x6kXG1*T@|BX$8y*`Acx*B}j@96?wE-UYD;`@5c#IexHcr)%)3W063Bw~4 z@YrT}9Ie4)X9GOeDIPlucRh{q+0$0LTv?-ua*J;P(729Ki+@K~UD94+8+%<$-Ih)0X! z@g~FL<5quzr9WYKjMm_Bya681S3Hgv@Hk<3yu2YE|4s3@%i8^{)qkC(KWBLCslnr9 z13bPj4$V;IWC4#;hR2l+@pysa!HMut<*Qcz7Zi^$)~of_i#rQXKUCIg`c*K(nqJ1| zGAdx4%Jb>xu9tkspXaX^C$tUe$K{V)q0*7nhsW*ZLWTYx>UUS=(;q|rvM7J3@-@YK zcy62I0xsz`Ja>Vm$jhUb#-p+Y}w zcrHd|Jg0v+JQrM$dDP|L7VI3OG1l9#1?#oFu7nlkFn!rG#{z zPqgqz<0~F7nOv$_$qac8y?NR|VBQPVyOvr%*!`rh#PsB4tOvL!AL4IdSf0*?)(D)_ z(8C+7oIGR?*?!kj_6PIj1X$8cq{6dd{wLc$p z9N`n>mn|kZY4NAY6@0c}9q(ud7vO8VPX85b0Q|s}(f?Ye(J%Jg=sL`f6!A)jSboy) z)-LEhx?SkQX0(ui%=+Q<$m9bLCDTyV zcW|c5;(6sJ2(IwI{mdD?UrO$TOEo+B?{Y)=Vmsw&S7`y)$8Zt(P;Tnf{K+=Gsh;Ck z$)dc7e+e3Ap5|lGlMekgc{MX8iB8ft4bd5)zv+S7sYxIsS9v-gcsg9VMIei8Nk0|7 z!Eks*J{m{ewv%Xp9ERoh=;?K>Q^|5-arto?SDbNTY|ps&gBwsnX6IqNxn{>#u| zj?v*K&WA0=hYkNfAA|zor_F~s#)qG{9E=+O_wfD`Z~vIJfApu+zPe+=|Iv14?iV)=ynl)IxhFSJm2r|mnPrx5AQ^oY{C^2QIE?(?SpTW_7VEoz z;Qr>M*?G6m*Ec$`!-=cT-Yuum4_;Uo~(<;Up+<(Bh3(ugrtkn{X&nKju!%r{t zcnG5VqUYlr&(mw>Tbl8K^cou%#~D9Jui**uhHU37$rQk(%HN>n(T_Iy^`~~f`@GL# zSU(f)XZds$=^yTAd+93DKip3|(^aH@xL?7^dNJuA?y|Jg{Xn|^rc327$`RkZoY5^ShuOZg zqkmDJ{msi0pB(NyExBI)w#olLwfp2F@lY^w+6V{)VyoV>Ozbmc3hHtvk=Yf?aogMd2U~ zI+v0!GcD!y`JRtuX+CGXGqWp_x%*}*Y!~d%&&qGie|K)p^Wn!X5I@%A6Pdm0Bwxx+ zFH;CEXC<3`hP~uN`MjUg=ZNRQQ*3V6WbXgEd|t%)1?D5bf8%VM9~j-%i=058C&@nf zO}c}=ZITcBfPT)9a-e7Vyz?}TxCsbRKCeUmN`0Fzl#;J2eah!Eju*D-<4n(kjms`K zt#2buo)&&7eOGcqh&0-jbv*?Twi2&QZ_S@~IQ?(?wzeC#vV307($Y+?K^_UpGrGza)Pk!NNxRR;KZr+7`$!?iAyWPv;WE`fm z`DqrX5&Uq?7e4QPD}MuSO`Pl|KC6l6tou!U;Pa_)83_{>8UHS22do=f7Vy&n{V8!y zK1pB0^65>i@8cEp1N7N^Ipr7e4P5Cp1IU(f-s;)BDjh^_KL1Z!)>1w$GPo@Rnof7E z)_bO373Psm zQ=V@6C5-~H@^spxNu#^#*W?58P_EY1jPp-!XS#VU@8J>NGrPCY(gL?vezvY7!!y0R zrh(o)LVD$JENyW5*O}fuLi*?BES;4r=-ngia9-ZhS^0wAy@&WeD9_vE=hMO$T+lzz zv2Qy5GL3ttcT!&U?#aJYK2JW3r{Ke1XgXZMc0(KS4U5=6mt(}`u)idXQGU9H*p95@ zE1c^eprboEsL5o#JXU+UgXw0HHA#CYSChlc_ju}h!B5M#GavHRCO>8W6!Ddq9Io`j zm72!5(f*L&GfsG6l=+ipD8fP3ll2`fpV{~Agws68d-@proi?%G>8i(>auba10 zj>9P7m6B96QRpmq0`GL!GHu8Em)_CMwBhUh=o*v?a+Q*c^|n`iQ2XZ?Zg{bS=3> z^GgZ+zasrI{HP|$=1UB}OMX)Laoj8=ixtlw20!eZ&FOdC=;8D`W@)G2QA<1hj##=# zKdy(B658z_20!dO&hgu0?KytCE$#U2w6x<3V|&l;ciDH_Q}8z--S1iNCT-9hGO{=j`qyFcK5$l3e=@Nc2u zosHu|2FKusv0mce&hb2qt+cdMlq-yNTNOx5@cz_*-ebNU z5x&LyLa_f0^7|!eBOD-|Cf6zWj8AUoGCi?4Ve=&{=X#a)tkm|pmH}(v+^zS?JN1?z z4uXF8z5$PiOzz6$N3!i{lp&i;A%X0!icocMY^>Z3*AIVyOT^sNv0sh>E5{+G{R zto~|h`~iO~H_zkv0(&N+kX-)=`Qi4^<77qoKzm!|e<``2{A(cZQ=!v41dh{Z z;{(K}?|hk%0AHumJhRhIr+F?vMyGi$Uq+`aE>~7Kjz92jlbl%2RQ_$8fqc9^mScmH z$#K?>$#I5<$+6)Xw>KhfP8xjp8K7I-KKO)oUx{{|uY-!V#!aMn;(SWqhuX9=VkXWM z;rjgB*Bxev=>uOkub^E^hX=Gi@KC!iDP;7|Jx#g%QQQ53y`MBYRX&e?Un%L;cIPWr z=o$6C-n_Qk;&LcjoX<|K)~Z;=d#Uf!#JO>kxa~Q(SFsub`DCD z&Y%;{T4 z=#kpv%ZL}`4ETEgvT+52tz6BEcn@QyCyU(gGCf)(gq16dnLc|wY0UK5>id2}w@W@= z`TTA3Ek@7lNY7I83We)-y~e*ZdGv8}@-8i&kFyV3TIj0$SjF+CYh}ScT~e@5mso#l z#=mMiUP=~f1F_v!d*uFVJKN93;~SVxJE=Em59REc#^pbHGwf(}yO}+^gz{B8{urOE zUqyQr+c%Yi*~IJY<1+ECH!lAM@Em7@S3lY9POx|&;K;uuokjjjTFVpNO?oNJYad(Pu7I()w2Q+8nh}owL4W593JbJr` zyF+4J-)9J%Hc4Y&A^N>W{=o40zQA-9;dtDtb@xfNi$0HQevcHED_qO_--!|KMY`S6 z0uSldA=)U+i!ttX&7;5}jdrv3u4^gJIUGwHocUP@W_0^D$q4?z8>EzAyMstsd+%`giX-u5Y=VmXc#iH$2BR-12Ts zhf(siPorESXSSZ6wvf*0t`nqph4mnxm|w*64a^%S$L+Z&*}El;ac-xKd0})w>wDZR zTPJy#<$YW!%jgNzfFJIzXPMu5CH2$GyPovV;(XKlU2^=qrz*m$@BeztY)04xJ+Y zzTeZ{i}Cl>LxuhFeUm=!2Aj_VKc&0XAF5E^Ty9GV-VT-ZBb_C2p8es1KhiGG;g7Tx z=Glukfgc-4Pg`e)?}hdpk8~R8YviWuA{}f8?N|Aa9l)Z#|M-U23Vvo+Hr{FWwv+N# z>_@tT@V)<=JFI*M=@2fU-I{!y{n!en?cG6DD74X%#`%*y9{YK9L*dzz%Lr z1+PuC7n`3X{WAS&IYK#h{c53okoTFWe=W2J@;;NLkL!KoBgTh8>!0*P{^>kJ>2@dO z2mRPk=*NW6DU4HI7TEaNYU_KYg!{|ai!cI07$e@z_pu$11B0Hh$gr=tyWimSLBC%s z{dT$Y_?-JEVITS9@#O386hLyh|9Ac6jFWr&sp(@{re23J(k)%oruBl^)pQXRDvXhC z>7s6XPP(OwmT5XIn|uOMxiWjOP?LFo?q|B$=VdFHP9LWJY+gwFi}4xs@4B4ly7+iq zO1`B0^7n_rJmR-;*!X8~n{D2Z$^8wve0Hy*?}C=`{7SiluX_r7eVfQ<>hfcDe52|6 z^&4_{>k9A|8@xjWc-tWFkD^uCGbO)qZGN11Z@!uLw3BpoJ;>}qVg8Z!kRO?Tm^^{} z*NB|B{ee7zjtfYp%n$PMq!k-wH-!G|G+lHvnn5d_EA2{+a!=aWJ!g_8w4X|7Nqk+rRY}5RVK$v$w(CiwKvo{owQ9 zORavp-ETGdUe0ol6PC96P2J3&)~5Hq zY1HF#6VtvwvfQ*lpNH{W4n%}C9uCNpTlKN7i@=^tn!JGyDCcp?=Eqph=d<9;H^3M3 z&%jrd&*)}!bvxy7Ce7}l{vT_7;?>M;2~}-E*Yx!Qy}$gV~8EQA_0Oa^7KI)XuNV^*kMyUbYu}pX|#G{*88jyXjTQ@Ul3Q+b_2VlRj=-#(J5aUB>!h%=FCl*!9f! z@422?ec%7+^JcfV3vM9%IIcHUf94N_f9a}2DyMN=EVh4{J)1B)Z+ZZ|=~ulWeVvc4 zmu^pEeOI{Ki7)Cui262;hxTrL5H4-gdu~Twtm#t1doVo8Of%+*k zc@Gv(_VQ_ujSEq~ZVVyPsT&xc)JGW)f30L>(u8`-am z_3=5`uUUon1cxD4ww@E(JJ6h@iSNdX%pP=553>E5;*;WeqB4Lmm43We4t(M$M=0f|E5X#u%GnE?A$oztZT^vZOF%~1;!_**9<{U zKqRemayg%>U7s$r^!TCE!{4z_0Z?dY3FKf*Wcq!J6Bp7To!v^>mymeeE)*^jR%Ans*mfmc&2y0zX0@XlSiQA zQ$k0`m&%9l$Fld8oKI_no_M2vQPZHlVIQ`Wj^(B;+JKMGfGb6ljSrKqebZ?eCwsM6 zvT+XOv2Qxwe&INP@*WqRFNxF9x}4iVA1~4t%B$PMc>NvmD&LnA=Ob>4xYNvne|TWM zw7cqv@(c2e{`)zg>&0Y4o+iyNY&QQY+zu&}{@MI;jnYlZy$QLZuXpqA@5ekXNgWSX zYc;Wbmui7@*J1K=AKOpwpq!?6SpQa057QpA>z%YGPLJ!K#H^U}%r6!E;ddclknbpr zFHcB+!yIUZtYaA78_oZnG`zi^8U8{C#s7B4pAMhMz2cu}c<&rU%nhj0C<^l*{QeN%JW2wL*WUNT(Q2;1&1lRf3=QE63xX z*}loOjCYFI#&+u6qNcCte$OzT%RT7ia+*GMRLgrmGC$M&n{);H>v1L6*{X485$Ba* ze3sH_;}W(T_MK9=zW%>PY+woX*-lr8$w>x2x(Yf>o+P_I|N6Y5F8yC~w zq<660z8Q2pHr{Ub=Da0Z4|W9ob$_qc-W*lDb9-}A(`m~Iy~q5Zy*XZ!e^mHX&)!@j z`iF5x9-r~~m)n`Jk9fQOxt;O+v~z@b^z5|zR^B%a@P5Zy-nVVwebpAdZ(Pa#`~0%` zVI@Q6$98pt8qmY{>pk6-Xg&8MJig@qt;CCzk7>(HE!WlEqV+(>srtFoVTW#~9dds) z+eg?rLHsxE;r))?diQn2blWKNR~^>-B;EAP?~(T+6~B!yw|-lH(^bUR{ZoIxQqCDw zI5{2ON;+(!yl#9g;iNq$6t2e$d|y&Fp5Lm~>crPhDExdJKW=Fs$B$W>^|N@uFw@yM zZsU5|b5ijrj^pda-ovl<_rYMler>DJxx)T=Ki3PNwA`JBuleV4-kIW&W@+M;ucs03 zH1o6BFJF&yJK^+5HytOQgVs(v?_r#LNPBp9ye0miC1&S_3jVd|uYgIqg8A2G-(Ux! z*PG412?uFMVXx(%?0cg;b-nrxEqq{ zBfX)6_Z@9|_j!h|xBGj@VJYF1WgS`ACFe`YfGU^UN%s?c|48{f`fWbmqW{0&2V21Q z%GtT_-(`QURB-y92H-o~u$1}XN^5`mC7R*u%lBTjK-0y2gW=_@Hy^hV{)V1bNa5eA1AD6FZ0}tTSW#{-gUVRWmxh7wuw`9J+A%?UEU3bHa6~CBd(%)!5y-R;4 zlh@i^5zOs`zn2Mmeda+aZ|6^>_C7gm2(FAz%bAYpyiMp_o+gOP6>c=Tm0o0N0W4Qv zi5=~mP5^GdO35014u0bD^GHg`I!zn8`WyW*cQu)h=0-GI?O|D|CjBBB$>bJ3ThPsW zMrT1!=?fzu7v7`Am7R}g{FrRh?BpH1C2z2N;8R8Sft6aKivLze=|8}PpEKb0d9uUu zZ?OAycE62x@S`I9@Nvi2OMIO0_W=C;-%?`l?G^Z+;RU%HA#C84$>ojAo4lR(1y}K& z(Mys_FPk4d&BwBDg!J?A*!_)nSo?3di3K5#RrI>+QhiX;Q~GM<^}imzXVEK*_ae>< zN%#4*@3Sl0cTv*2Ne|y&?DFpK-}}6w5u9xO-T5Q>0uH3@gq!K-T&A5LzP{rAezs12 z1NrCk*fk;)$myH@-xPrR2`;zmMX&I@>t@!QyqWDR61xQc!cOTO!X=mcj|T)#_ze%r z{biD$onN?)Qjyv^llXmVmmR;f=ai=7aqSq!*}V#0_b;AA+Wbi9%Nr$~$K{ELub2A% zFO>U|$hxP~2M8~nOMHHGbZk?=Q>CMiFFvjzz7btByM~QlLPuPnJ5&vH$9=uzql(i5 z{tfcWO;CL4?_KQQdI5v{jP7<`586iw-+y>Ex`U3(r%3PLIR|=&yMV3qV+HT-CuZ_1 z-2o-ae(nLgm#ne;zEZ1JbotEW_F9wMYYDdjozFWPon!i<>GF9*APgFPg^tMa^J{z` zr~7lhjye_kmZwPHUFSmIb?m>#0by@InHu_f^o zk8->)dzf}|K96#TqquO|c_=G~qG{)0%9rm;7(*+ZKc5hN^L@QS7I^u-qIz+)LsAcN*CEAyyh(c|3@`S}*9%-P<9OcjxPP`@fO2Tv*L{5- zTFgJ~pY8w9xM18r8;9fa+CP&6l%Ms&h%dJPBQC!O^r#a@^!{EL_1E`N`FXSM4-L1| zPW$_vu>S(~Om_b4rKGRdFHh(DAzm(A#^*yf8Xs&P>2}V)CpLU5s3mq!ZfMclbv!b^ z%={_zpB6;MUMUa${jXbOoEiiap?eeUe-4lN&fiHXk1Kos+VO;jII?g$Ipi6o}hd(f)eb9#~a-5^tgw|X)^y`@Bw|gmgBF` zOVYTq^RtXTz!P};It#`N#E)zI|J%8b1oZR^bWJV3m|l3Cdf@&b$tvgV*tl>9_08wS zex3v3XdtoAmz++Mg}9~9qt;8KAc^ne@p(1)`fqQ_`_a6ISbF>A8O=$XWXbmr6wd|Q z`P|19-=`ZcxBM&Ef$JaR`EaqmsQ$8`M(SF`-4|15Dcea`pgdO@E za-{I|ugH(hqf?h7IcEsZOUZvIJwqGyAza?3fQA-#@ZQ0VAw!q)Jfh)tyKiUy@DR(T zgRGY}5l-s+*KGe~x?I6!cGtgm>GOTpPv|k^qigo7wSu_Yaf}E+O+QY747R31H7w}K-W6H;^ zjPILHD7lo%gVe8>pYnL5^%DBWzTeRO&;I$`mmb5#d^TH0 zUCj2-&WA<+-Cqvlhp86_NH2ds0PT)M{t)W@-Y*EvXqp=?=DyJ9PaX+K0{X)${KY+RW1)$%@ zg?{zq;=yN>3wdc*t{VL{KDj^S^F6dPjPY?mt7YpZ**wDH3F(E@6Sp^EANe1~P5+;w z-j&pAm3<}a3Ge#%y;gttIgkA}{S4DIGf6hzvk_e1-B6VUl4p>xhrylmfn+%BFBxAVFCy*}Sex1DCYR{#3D zI1syh`93F%N26OI?7V=_zyFf(-RQeuJ@dP^vg7_3bNYba*eL)yeeK_{?j{VE%jGcEFUcNhZRh)!$VY$e8tHF; zrT&cVk7SGldk-wL!?G@hD_h?aAWAojW75tE9UItnm-o0G*qO}EAu%HV<-*Sze*}$5 zUo(H?29%WaDD7-EUw{%wIm8!HWs!XE1b5I2cR#mxL7ODxa=d-N0_x$)-s?mqxgej$ zYWg8_UXtbO5BUSUQ}dsEKJNB7-9$a`cpSzR$f4_dNq2<_er5-o?A@H>E# z!}IwB<{xd6|DfHSPFlxi;^Cq;kbLn5kKeXAGClk{LDhjWPb)83=Z>Sg;a zXisy0f&Nuqe?sf~xQG6IUFLSa?$-8%a* z2mXFhh{kzvHXH9uZu~nvWnV`o`;&W@T(9lM^n$!*`UkT-qD89pugtuL>vaNO@sMJ; zcb#S3u4aCI7w1()ywX8y*W%~dcoB`4&k=i@ZZbaFIFoK8AB*t3pUywTZ!jLiF5q%| z;eL6I9WefY>s8;kE1rAX259t)Xpw0bx@Ljev^TILXcw@+?{VRG5#JQVl*{Meem+R{ zp71PMBe(adz`^xi-m3hxCm^?r_`u_3kZVAQO5}T*R`T{T&a>bBt{4uKV8n9KYngdO?*h&Q8`;e0@d)T5Fc72_Ok9gavdG8 z8oCb>CtpuZH&LEocR}KG8|BUS#rS(azON7bJS;MrK56#;5yDF!VZE@A`G602**sF{ zit(U4!{Trku%F@82Imgm!&<^C&m=q_zwX`6dd2>@+=D-L>3uQb`TEy_hneAe*RJ^`CGHo^iO@9@qHQDdvkWqMA}aLGCAMDH0XY<$a#84 zqVUq5S-cnaR}L*_`952xbo%)@nrTZ1>$SA$J@c!UU!oPgTv&dQrCB~K7lz4|TD@TN zuC!&o!a+MCLYcmS9>4PnFojpFmu-^e>!q;oNQeE_AG3Gy_<=ZhyiRZe>Ag}(F8Jf~ zC5`zeOdaSna5L=6A?xRAz31nbthDr~J@2*j9!oFNJNko}p~wBvpEpEZqVQS=vc*IW*E7y7%i(BF|lf46EH`1%Dq?)IiQ zuDV^vxC?uH)^S$k;q*g<47l0&a0TYEF{31acK$bY21||5c*j% zKf&enLBG&0E`aV@a!`L4>FnPR$ndxK8U6hb%@^JGb-ma=Kre#rV+MV`D87x)W6Lv% zPbqn~wkzLFuHt)`p!~=5l!7w9e3a!6OptzJ?9h)g0uge>_Dc26_~Y>i;PHSQ^c^hD z8TOOz%^S6P_I|Cex5V{6ai7F<9$-252aEJ_xpX>%Z?mEQCKnn1>|7M*U)az7XY)b# z1EpKD>&fS0;!z`?^T5q&`TT~Y(eHJTPm*2Bprn7FH&nWnp8lRzsB~DGbPkm^OOr35 zGT+j)OTG`>*BMejkJQIgKabSUVYYJ!Lc5}pr1N-(JT=Mx&GXtc!}odnx=<4qSH5)Z$T=_j4u}@3Hg%>o4AI z>9v;Lsp-x0CMdms2b_Rh zeNyxyn=j1m(0bW?VQ!nHPY}L+haGZQHLlL>)^f{|gIKFNh_w!w{9+UQLRrrA0rf|1pX{B4wfTq#TSF74ttNFt%?_znOm;94)U_{gD zrVUxWD*p6zs2l)Z6+e60vivIk_sq}otK`M}Zov8XD!gW$LWj{Gc+P3hd`-)HPkN8X zhgrgBdB2AFBvEyfj|6yyClC|f4cqk`4Q5fx6gl|w`<>$>u1_x{g!+IDdnuc z6+#C6z*nD7p?+0=TlhYXtY49T^1LlVSp00wkLfU~az%c}`Jih|XQ$7@N}uD(AKz~g z(+7HJ*ND8gqV1*a%<=iD(>09S^MyiSxx(1TS$^RKnjU`7Ouc*j*w1nFeG`4tKSDgL ze&6)pqTGPUas~VDTi>+*q4~aFwUjXJ;|1{lj)VfsiAvziYc9v+XHgF1eY~3X@cpSG{U=El)z^FA7w+g*eB*YB=Ssp`r;ig~cR@ct@p94s z{me(41^mzAh-Md1?n459@_>~Srm8#)8N405quc>0hdBIzWW{)F0oY4Sxj4 ziZ|>^47ZfLSMx*Vu)+^xRxgX+A7a0=b4!Sa$M5U4f3vh-O5RC2nXTxXzF2UkU!dc+ zeg6jISM~T!eZ(T@^V!s_B>x zCsdw~Wc91ZgVUPt=k59X_CGZ|EKZDmR*y?3Yx)Jh*5iM1f7eQWY!@L1;QJxjUlHsa z^I-E3ACERxKCJC;Zh1oQLzR!}eYoXGy@yJ*e1u9-em<_{{QEvb z6@Kq=*uE<@RN=U^M*1!PvbasWZVdjevvp(8C%VL+;P>hVE5A+r?7W!a&IgI#*iN?J z`9@2doOZ6a^j3S$_kxGU*stNvJ2Zde82i1sbFHS=wY-k?%-;L?FJYX|TbBY``f)(D zWZqA3yE8tZ_4}9F{v@|g;~OlWa_N3xxQO%LFusM)mwZd_zK&3y*3R~fF7D5PPFrHX z(e`1-@-ij;h~zT)IQxbMeH|!<2l;ckL>yZ6a@%3WH{DjNpOE8v^7{B`*!@wh7se*^ z?((|M=7Y6zz;a>sZqh^SaU1aLAw6BsLD%OZ-_8|=o&4K)-eLQmkzY06vHkeSubS`J zxuIG^}r+(_Jvd>NOS->%h@W^_)juG#$I`z_a4zx!rhsn5fEEN)`)p|Zt6 zOUY7w4!%ma&ZO=y+xMbV5L2$OL=ck;c%r3r4%6YWqnaNqo%_L^n(zBL{k&HAVYpH@ zE9dkXTxnn7TAa-L4f>4~#^GzkpN{zhK4yF{_#RILJ--FI*|^=9A2#l0AJN;@$vof7$3weoGCXgVw|{inXZS1o3&UH?7Q?>C;b;G zH*^d6lHNf%#=6!;QZ!w)Tk#%#>VV$Sp0FtCdaPXmA&txJnD%dL-oMn|PeC~pb-%;c zZ(_clf;`%}hJbVW1u-1jrMUb7Eq_Y!isu&-k{{d2BS_o$qxvRG=W+VNZ@_P*Afesj zzL;BXk@D!YTt2@6oEb08_X9}0Me*=>LyV`kKgRm7L%RhZ)c=5dSk=#wL%Y#666_qf zev6lv686*WjK2qpdcR#buW_~j0{l7Juak1IaHXmuYjppLzJHoy32ejrY5@ zGVSZN78ePXS(@+rsL-Dg!6EJ3Qj@=0^EcGww`o4d^%%}6!6-GqAdl^FM%EFMt;jOmXG++2KgA2hjM{#n5UGIKh|=fZ|r}&zgJ3*5RUnOVKMy< z$hqo`%^y+!t?{^J@*RfEPr;(4T!^p6<6i3uA*@^;_bMgN)3onDLispe*uR43ySQD( z^LX6tU%~e=+#b81q;R73;{N6Ts{QJl{;O=y&eN^wm#E9T<9^xs#6v4+hsx9cQ0vQi zxZlSQIZurD&ck{Smx3E&r)4h(F4*rjxu>frFa8~F;0yYvJ!{DiNms3d+xM0*pRDpf zdN^N2eoP066YIPd>)|o|(x*7j$NW3yL%L}<%L|-YuM#BflKl^-@KfS%de7}(?ff6{ z7mpLfcHt=Y^NkZA{EXF*!+lK4dy+~g&;^nJx$IhqaqXz)&ljU#g#*-sJf8Cw zYtP`%UtJfz$G;%Yv3~^p9gXx5`-q+|5FjtRw0~?jZ|{xu;Xkg~TJK{LhYI7=ldzY1 z5h|t!B8*jX*A6a0?l$XP&e0{^R=_I}{;k^0^bMM<^5Z}^;Q9MYAlDveqrByDILe#H z!BUgcu;1`u^8_xW$~ zW@$mj>HDNC@ya+)K8zjLJLoQ8G@cQK^=sl8Lt|ENob|$9v*TA8J;(o2;iXr>tB`oN zowx4u2!9VSw~vkKTD^YpBcki7A0J&epM|b6we)MGl>C*BUzuEI@@R5DkNV7Cc$_skkbKoI8`q2DrjP4sr}0|~SH0ifX?kYbq=~rwI<@>uJLozfrQ~Ot*)~e}x@~{1g?8?ci_){+bplm(6LOamNt`Oh!sbe+$ zggvU)&s(&gv^T-{?REnAKS%IbXL1TZA-Y7q`!qRp(FVN_x3K?m9yR5qou$L2Ccn?X z2hcsnub!U-y#C)8Hy&s6@$PzUIOa3pOPoyc0-R6wh<)5^Z8dN-1{@BTaVsr%pY&!C6uI1Q zNEd5Tp7VQvz)KNe_K4f>4E}OH0N*k0=Hufjg)i|UrC+#otrnE?b2aU8KId=P%Y6SX zMdp`Y(x&yo{zHUwC)*3};l0QYS)bB!kpGJWw>W;H^I@AO_I*mleW8KSt4y zmMscrGV&k4fpQ0}oVW#QS06Kc#yCy^-d9m>zm=1Dm+Ji>({X&STe498ckdNEqxP{r zE&Il_edwW=`)HxuDcBRkFE00+g>uUj9_ixWiGW`ZzM}p2i;98IvAqvt%hXQv&t9&G z$M~T?sJHi@Am_&4OIB*Ru)SCBZr8(ho2Qwd7`CG~u=|wXu)V|5v`cY2s$b*!SmV`7 z$A}-Jmth^#^^D^l<`Ho}T9-=45!Q!wNvF)7+qhp!dX)b4>K%QCdYOHg z-KKzL9HSk-i0!~`;Yl%G1HNc{tcCmq9p4|X&zYZ#{Hl13%~O$I6(5V{sa5eY_fy4L zsfv$nqdfwgFGg_)*~fGWm;*mupYu4G*m=p4dB@b_WIBFC_D%Vo#mQ)|YnPX+-aDH=y_Pq(2^){9);g z_p2e$d0cV>$J_Jrxa8k1!1!{UcG3I+Usq_&==>)jvyPwDa@ei*uvQK`vixc}Y|HYi z+CqDs>l5uX@T%4)+Uwz6Z_s+NoX z@;F0Z(+30#$UpT6_U#|V6)7hNfgZ+Dr626vh@@X^cFyN9DF0UYQRd(IIvMi+NJ5SN z-iyDd@AZ7Y2Fl%z`V1ds^CRS6C;9h2sc0nrT@tU4bzLF1%G{YV|FQz*k4t&%qd@-M zN?n1w9=)jU-=9eTx@P<6oz8DhT33FT;<^gw%`dzrwW;B)Ll#CYcT z#eVZU!Ivrc!|y=3{RU6;PW`Px!)picvH!Z}m!$oQm6zVYAFk);zFNwK$`+S%?T4?w z%6Gplf34-yUdC`?e|`NTm)lzV-G=^~U3ULH_M?Sm>PKHIQ`|7oro4lGRoP`^$W=-X z>yH=@m*-1l9IfF~G(OA+9gJ_Xe?t2Oeyw~4euc4I{$Ddie%*SG{A!;fzh0pHT3O)J zPmVv+r^KJPHRR8izXpHxg~~^dmt}sd?>8Cytnxqg{n-zjAC19OF8PkN!k=n=p9c;v zEW+UncY~D=shqECxlP+0tQ@p7?dzb$Ki9R)()_^+%dKmfqv@gj6MAnfuisR>@%+a- zz7_mJQK~j+x!yQNza$^Ws{Imic9Zm(e<_CY`EjALJwIQdk@u@v>|jAH|me=g(L z87bd*9LxLj&oZue&G@?ZC+uhb*~c;35sV{irT^%6(=*t`cpUo(_%MZW?0=x#IgDd( zm8HX&zm3MRAm#FLjCQ?t9HTtfi$ngX$FX0$dMe}CZzzA_{<-}2ik+PLI7YsKUvGS= z&}%})ThB*~{SMi$oXPuJrpT|Y=g6;>Q{>kQik{{CSIur=8~4TRop=6ypfKZXxGp7vLSS^n7jD z-y`z(x_v!7rW5?*Sl)gc^Na%?cVj%l<4nbPV5^-k!@F6{Mjw} z=Q94h7~_wSP}qAWn@8PcbT~ji+4pVLtT)(v(dP@XyudH7x1R@cB=cXTr_Ue2xA$FN zZRd1;v7LCtek_-eiw{jv9^W-Zd3?mxPiUhjCd5ye|EmP&l6m(AP?UMUFz7|f0X=lvA5d=kH+@a{Q6-f z8@_+8^l?8Q^D_AH=WcK5hq*mKf4=$>=}+WWHnO)%rzqcDQ{7~k$iu% zP_B`De?!Xs`0cIHyNno>XZ$(Jtswd~i<)B50m#Lh?UW*Il*NFQ&rDTid`962} zPZ05992w(KujE4>{!HmIqVkr`U9I(HAEu>OYC6V;{vhou>JMXnzTf;od*3hf2gP1j z#r?8yH*XXpFNJ|u0gwY{JDmWH2qC|YksdoR8de%=p1-u!o^=PufnA#FB!_T$Yn z>u-^MzMlg9`G+a$SZ zlKWUcTaP4P{ao4*tv{?0Iu-r1tlimGZ_wnz+nHpm@@1M1+ohg^v`2HEqMiOozJ%(&w)$q2$`(;Dz3J>EOF5kz1{vO_HPM_uBE+8wKU}_R2Es9dh72gl$#iLdnS4R_Wu$3(u|1z#{T(xGQj&Y;(ug% zC;X_QXFIWX{Tdp5uHrGcZzl2FoAB=6nQyUi+`i8s@1ZFDWWRyo&-t{!cP#tM^*QVk z^w#%VqCeH|19DsoLo|%8N6@!;e?#p#38at5^H4tCC(*yu-Uqbzt?@kG=hMH`-Uk%A z;L6?y1f2MNz$JVi(9eU!durWMzC3NA*30elETsd|QbhVoc|m`x-e(`-7$Uo$!TXfasFpSYZK)#?= z^S+Fn=RWBMaq_o3{b%&K)78I+fo&+DYW2Pq`Oe?Mt4pW= zdie$Y@wmH&OGfwv`Tm|j&3FE+zQ-rB^Te0)0n)g#@BEp(0#D$-M*4wxI@&Xns(RdY zS$kdl(sl)xt)F5X1I7J2@Q`c3@$Z#|@@bWeK(@$ttyn*O4f9bS97H`7!TAHwH~+4= z@3RkY8qkX2R=o*5$RG4W{)xSpz^+{3R`#N=>3eL~z8lxqM7!wx3Aa;j`p)A%{;;2X zP48$^{+8`Kkl|LBubq@*{|;Tth^5({+j+;&&#?!8W4kKn`)EDn$97rHk;?L8d+u;i zJ{}i*zF3ss_YOB_zvNfZztiY1`Bm@L+c`;%Ofp5b<*iRM4jn$yxJqF0+p7 z`FnsDD%>KxFxIPb0(h}s4tS7Pz#9{I(2p2j`ECZ`XMU9?wOkc{pY1+-X|IaE(Z1m} z!AtD5HYn%1DE?xvHJ#gmqnfU@1Gdgzpdzx${kVL$P5e)lz@q~LJ} z`xh#-1E_bi#0eLR+?|l;<>a|q&gbWN{$+8}g3%@3$9%;mk$(&>a(Q|8vrzA2QZK#m zgvzUYH;i&S$nw7L8RdRW;j`SmcHX1I^>(ZML+)$7ego|f{2>{)6~3>_r<<4#``J&& z5A9unxTxVF{L^|ezUsY8;zPJfi8uvvW#0p0cI~>V?bq;r*Q~3WUdH^Q92tJ&Y#(?{ z?~(TQYqRR_jYz*7Z(k2bxi1&^&w1QY_G2vP>xn`+&O?q_InHwc@AHLnw0Fm~obM-y z*WrKt9>KF>cnV$G1W!4yNz2FO_7}>n)^cn=E>|g(TPE@x#+*LH19k^~KJfj2x`luC zeqqu6xS!oWn=2DBo~|eF{c^Sb98&pVJ7J9c0Dc43UQ*x-=COJuqxV}1^dY@#>9|$ON!(EFme@VzNA9n-Sw8vXerrr0;>&XYBfE79oQ}(>T$(>2 zekll^lod`sam&shu$Y6L2gxP5AVG<@+A&ubuCQ_Nvc2 zrN0})rM%US|EgzWfX?}p>%+I@{nJp{$@#mTQww<)^fV5~2&h&YDJAbxe0+YH(J?!Z z(ax#4`~s~oe2vZ5v$*?9wf{Xqc|Ox#-p5%yKP2@U`4@QpggkdY!{xxom3wVn z-QhozpV2M;3G@Z@b^l`WF5(r2kLZtJ<2>3KkeqCuR?)}mr|nihulD^Q%9YtOv#Y@W zsb$snOXXpZ@M3@9i*it9EKl?YXiwtt=^qN^8riwe7RsH?&K{#pEsJZ?N&84}~; zdb;yVLWl8g;=i{|?|zQJVC6B>AJ>;BEN%Ky*{x~!YhB;nU#-{PYo$Gps|{ATzbn_r zSEwFl`e^Ur=kR-Jm(3rp2mhDRpHUk}_EBGpkNXdd2v}8u8CyTJv*uY4u#svpA61VV`%E60XM$j*-sm+HJq{82PZS z9kW~6qtj&gSiCefy(=Xjpuc19&v-i_+uv*BRb{Q>?d{6`9!uBSd6$=uK<;dQ-q&R7 z$39;AJjKQr*oWOR-%Br~TzQ<#W$SYzmgacn@m4=SQTE5%bJAVjYqIol zy~}&NmL}iioDNMFlQ?lQZBA4T$IeufEi57~FsI#08l@89V> zrD>;s_I~8pRa-OJ6=_5tsGD0k4x$ut3ccD&sWjpwuL#V*DChd=21PvUZSs{PucaxLH6()RrO z9{GD6^DTcw{h=5x@O3{M@(x1zx>Q_lcjWi^Jbm&#>`y!ozFCIcFtJ%{ey4mIx3t)8jpq(B?R*1$*G`dN_e_yrgXhSvB~#?ri-Yp0Hl{uwo~81X&xh?jjRW*68p~Pzd4;U6WcKlc zuonm1Pbv6QV*IP*H`_-p0w-6#kDPY3wcY%zF{a@kZHVF|kkfl!AH6SV{?EF(=0E%U zf^#oY_`?sdo%Bg~F`}1`=uPzJYx=|esnpBW#2voOa(@2b64sCH+~?LwKPodbJ9V?r zqfe~>^i&?oRitmN-}Tp8ZYtx;A45;fKF8}IGp9JdJSFw(jV}+WUeS)9%lOiY@#Vb2 z`0|8yN6wEwRn;%Ild;``J*sCf=|>PBpD(-JgdDs>eC&AKQ2V-_?fQ6ta>pc2HI;Gb z&rmKJUvwNYI)|}*9QqTKi|nh8LuOCwjW17%2*mx>ap*qc8IMDc70Ru~c=dQ~xp%6( z`FOQc$D@3ldiLYf)@Xe4aUnh*X1bsPdJcDCeA;@J@kxY9uDIVmK0ToJ?@cx1D4qvf z??pQcy3vl=m4gMla(=a4=^q@Z+g=#U^>RAkW%IvY)z^HUbxQk_pC|h|h3ES>gMFW} zb>RlKV|D=cX{2DEJ}p%TD;0&8zaO{P(r$N0H60G_(R)2Qbt|1-K{}aSH+;`!dz0ej zamnKPUksmm#qrbiDU4IkK=+5Is8@GSQLhH3s8_E$N4@%)g1$7;tIG@J8tGMgp&azV z_chhigXcVh9@KlU#pQpl^7TpO`&r*>Ik3~@WTc^-JXhh(pdIjajz@%%a%JOQ7Dwke z=;K}(rygW+16vPCFW2hXImwvsK*Can%aTU#$AqzN>Qy`IXZ1L&%XoO>cksmbQTsdt za`&%yO1|lTi0X?O%-(N_&vlIPDJ65LCzx!?1$gi675lSHliBx^eO)QNV?xWPt43L` z!g9cOH|F(YmKJ!U(mvoJ#$)a`AByS+J3qU1_B_pt^{k@+XF}4j_Z?Ca@IHh79wZ*+ z3og?7p+bCyn3Q?Mi27ME++!FQBK*)!4ELzOEno0_oGo6ZW_>flUxQ$@LpbhPc>$UB1A4&Su7KImvglTeNypU#P|0LHxV!8T?@K3%kt>sa!P0RUu@Db)q zLOC98`ASxyMCF>GygW|3YO#`6d3U`3a+zW*k$#hhk~YgN6*3g50AT}{lfP@vT`CCQjYYB=?=PjoF4V7 z*Qv?xbX(5f>i0y*-xwb3a6SGO?IH9Qdhqp^fse(Gx$?&A8dcFc;a!l{=dsrn^=vU3~rrel*Fs;Bh-gq`z_wqL#z+@8g`z ziChkp@7tNKH}Ct@Amq7C=_v2F86Ql(vwfy(E&rIxUEDtK_4#+spW1x@Tcn)Nx8?jO zg*W*y@rvV=AC^~XYS&%2XuJLG3;@LKJAdDMLg+9=c=`ERJGEYZe%6SkMM30}aggcE zFJe2+pU_Ud%hoyDg}|t1`$Bx(&i#teu8-w8<-zqgZQ=Kq)0Qu2dufZEgW2+zns&Mk zw~)RPuQ7Vl{)P(m%+67DmP2z$mGKO7nB<>l`<(^6 z59z!w*VCPNen5c91-PoGF27@>7x>mK&jD|3fp64%(km`^TcO-3wGX6MT<#YN<=PZK z(kCu=OQGCqEl0Y<U`o z7?(lE%f-Idr9;3bW4S<&Ho40_rHmesU;ln3@U4D7kMfc3%KH=3 z-~A5Mi}~mN2l8XS$MPTRm-{0q|HHng2YGdWPu`Ezej?6rMEV))lgjzHWXc8q0s7_k zIqjTJ`iQ(iA0g+fFNXY|Bwxi?$M&_=&i5-NZ&r9kI;Xp4X}y?^)%#0GXVl|96zVJN ze-*tu53_yAucCM7p)9|O-koM|qW@I{UmK_OBO4 zbk6B|tI{=8$WM=>PMW<2|JsChal4A2y~mjLBuXE&BgK@T|DVQ-NQbf| zx^85DkjB+_UXy;0*vFT$p3<8nZ(#TJ+yT_+oAyeUXLX?myfo>{*eTL+{51J;jdVn= zFwS#H(gyEEaF-SA2=z7hzo@6V|3!Hz$?i#oTUo|-%zp@#ZcDQq=v!rvviCoE?nD2q zYZSj&t{}&*f8fJzVMrJ!USWdxcwD}*A4)$c-F8ytGVS4dVzBQfgdN&!l{}+ewD;Jh zpo8_h<)rn~=+bh^(zJK7kB;dBd4CC6|BkfIBZEqb0F_JRlk~~_QNqjV%l6zKEhYC_ zy=SJg%c0L3GW@f4h@ZFP>q(GfTtzzex9w9rOUcLd-q*zUHcH8d*zKFWNP!}Jk8m+@coW4><%-^YlLj*F6p9&|_+`0{IS5Px9XTCL~fo%;vw|D`P( z?77j|{E4_7@Hvdl*K&aSa>&;K;^TA+W5!Quze;|WIzPJ=QD2V+p6FTELdu<=M3B?HZA9NvOJCShP=YGmXa0vJliK!SSKy%=@uf_QeIf&R~A?Hco*=Mf7yA05??0$ z+F2!gKAY)MatHZ2PC7SQrGN{NH>PF(2J1QB%+G0ED2$fN z`Bk#jlf1m)KZp1Jg?8Rn2G8)ec~}3!!g*W1-`?@Ph6(5%&VIpP#)bAi zpzWQlU$i@o-~rCVRd~-*{HJb5O^=LU9$#o(_(H|g&kKNmzUR$`yy5@SHQOq20QR?eEZTXX6Lxd*BzP zpJ%aqXM^waEWrC_r^q3KZ*s+c|0(1@#{6c}f456rwol*f;2O~b6zQ9`Q7g#)BI4gc zy$d^}A-QrpxL(t!ulBlw&%?vye`@wHY-fHpe$2OWK2N-8gTe`B@ACK6gh+Cw?N)v^ z;re_awRwQA%fL?p{#kk3m*@S5{fW=R1KsWuId7x~-cGDv-BZ%9Wr}yqpUEozha?TLcg=yag2h3 zH{CnYJF{nz-m2VWe$C(0?-?_@k-Z0QcBt_FS|fO+ys_op2SI{3DE#yD8 ziupSW`R|ha@EWd%hCVxgLiz-}`|rkI8SnM4+{I~zK6br^`IB&>R{Un#%}be|-e=EEzH&R|_1I6e*Aw-p+2F#D1L|E{ z*a4(*t+D*Rrhg>gD$IwSML*(wCF`{xr-|3N;kUPbzgjP21DW4?CE=#Sy!-kK;$SG5 z8b8u2_}uBA(ZS;{*GUIa{tEIVH97NmOL~btzs{cD;_!G5pzn|Dn?9oR7|;Qn_V3?9 z&#L$1t=9hL>rHP_fN5v1>ZPA^jQHGuJc;FCH`X1EZ|4$6Tqry)elI%M_vQ2N8NZ8o zZ0pYKLsk6v*`E6){iuo`N8fw?ohYsse_#7T!NkvZEB4>x^LRf``Mk^Q&lu?kIqojd zZA9cQkIye-|3pq!N8ekBC z$Jf_te!6Y7-iKQrWH~mYZ3v3lzF*fbw?mNg6R@WXSpoFG)yL0+X6IjFU*(C1_zdvM z(*!}epr@xn7rHMx4>u4h$6edm;dp+A`ssG9uH|F;d3+*uzs%wgzW*8c1JiUq8;tq- zpvZM<{#Tq2d58V^hs8pNu~DUu@BerCEjQh$2<6|idsz9Ee-D-X8?<%nb@pBC!3xW* zYx#MFJ9J9@0uz{~o=h ztJimTXXU^-SqIZ}Oee^qw_hh_fn-}7-xTc9u&s8KQoJ; z*TkEy)p~AUWgn8Z1Nr(z86)}<4(N+7komE6(fxBg|6aeOhuCl5&yD=v0m+y0xZvO5 z@_IghLAg&$eL3$z+wKEHU+@rn&Hh?``~&t}=nT1Ye#m@O%fS!tmLe$s=dZ8E zPv4_H8Y4EQy`0oF98@vv^`}$n|zNOdm?^b%foeD2Z zjOe`{9{(VCxIG!HuwH)t0O77Ho z-9Jz5yy#)EQMgLBj|TmV=TD`C{Q*C5{j}yy9&h}q;bGq$1ATtf^S2j?{!DfLRx)pO zs`Iu|BAdD7+E{s9Uth|?S#|VG8^Is_MV{AX|{EDSbt~>v?rKtyL=iiz> zuz$mypVsuoG4_9R=P^yMYxxZ8ZC5n2cu%nJI@EqAi`wDw7TIs0_4}9FcS-E~Si!$L zvh=jtEuWA2e&{fMlJ%B+i+0WYq4Kl^8NKks^$l{jC5o5H`#DOV*sjn&7}b1~uN#Mm zxPA5O#uR-$mCwi3nR32UBl%wU07eto zAAW}}%-^kPA79eWJ^z=xcY(9|s_w;q0~|u=1442L8J%#3fj~x21@gkN(Md=QVy)yc z5J|@{%n)9G+nk&2vT_E3LJqwbr!0xVF9J z_F9WxAGBI?*LQu_`u+C#%{e?Y+V<`b%C-mnh ze|NPpBNh?w6uE9{;l}aMUt? z&`G5q#<_-hZnjVBi&9F)H7VRkFV6ob(rXdr3;7%9RmsvD=`}~wbAGPUc)DvvmQKF* zX5}0Cx=hoX-P;$-yXzM@$ENvX`Yi*UqwifPKg^Hk_ol`9d2b)(DNbJ}aN>O=F5kGX zHqPhiH^Clt|62QT>Y2mD&;4qBZ68+p`#qwt1HLI^ayj{wrZ1H4u~UBeJR9%#-3PdY z7yH9L+b{XpowEF(I|9npq+B}vnBoz`TP^ANxN3OX{ZW43b2{DX+d+7c3lT$=E%%Fk z+2A1E{bG)%oTst&Q(oixKP&AG2krgigrD{h&uR(Wf_^aj?oPgM>2J7``MuradzpWN zzu_^a$9$ta5f9MoSXJoR%Fkfsl^@{$VZwu5s`EpmQy67?k?%bPIpKWsF!^DA0p#0V z$ftV3&bc?6cdc97s4wDqb37luuaNJI$a5HF{c*n4g?#ig7=Ol&D!#Fwp?ALUuf_dJ z{d4|N2@})V`^W45TIxSc`h+o)%MqiaurcPxIH$?F-VYH6oqoN*A02<(uMqL)fdW01 zJ`ugw7x)MKTt1u8d#TVT9C=XnOuCPLOF3uGxbeyT4RO24@AH2rLgnWm=@3Rq_h$Zu zIZAiVi!k1|vk3kUcOXGRDWL$zcsTw4_4!7<)j;oPyqTkNLjHj6*MjcWuj2j@(?6E` z8_z@krFef4euhax|8$qro$~mi)4Pd(;UBvo=jWq$s&sEg?>`ZGXZI+Mol@PI&(G}K zwx2(kuzvCjDqk1A&NkEX`#PK7<3&3t9HCsbmK!k`BoyD55`M>iV1)2;KLYEU;753d z15$pE;p2)|vvF-4{i?~hHo5=YZ`tIuZeUa#Nd0D=X zrdM)PUo^}k+D|Ba=#hqbSM;5ihIvH$u`GYXJRe|TgEUd)ZIaBt!U**{?BsUCRUX6nJpixLR6pm} z?EE0`M){(j@GAnpX8WPbZQ?wVAoB;MTiX5(eTET!_uc33VZ^@s)4omL=idop{(Kx7 zCL9}wCi)3`wLb8((N9SGJlD7E*NCGZ#^;S2_g5U%`jAhWD*4!sBK`e5KI(1sYn&y% zw*MdR8~t;q^?eL)ugU}Y6Q|z`K96eibbb!{Lw_8*5%jfwJ-SKZAzfZi*k4gfUZMbM zqpUaBzPg;gc3+E+^X6yI{d7D(wzd7;T2F1{*Y#O%e@350K2GE_?|t%lZEb}2z|`BR zZ)zjIs&Li_o#fw(;HTsF$!qpKrFysTzu&3#q}@M3yhly0x;I*!a+r4CukqR_+gI=A z`Jv6DZ2#8od-eTb`+fS%)^BH#PeF7$LO8&D9m^p_GG2?&p?xKj($GaaBpe_g`bU%oTI_h`f)u}!G3+4&a8t$ZIs^J5h0S9)kWmT>HVK+D1CZ?>{f^eV-22>xX~#OWFJiyxevI+Q!myu*OsB7!&>rCLdKvJ(e*yaIQO#G>zx|yY=)xG`%(ZnIu{*8Z zq+6KBn)CAnjKe<%Ow&%rT^>>IhtW~)AfmeH>H%kvpHR&dG7xd{IYhASvwCjr&GS)R|HBzxQ-~LkMr4bzpwam z5K2j^`1roE^hOdUeXYNzp73=;&-*SmkRKJN-N!V4y373N(tR83zn;CMtE4yBe?7ai z^alH{hvNq5)L{Sh3}^Yj8vFfwW}ZPqp|39kK0WliH}kg-Yy0VciT&;Cq+HCelh7w) zk|+Uoney)-X3R6YS7>_9XZqLEUCXj`@_VtyC-Qrdrq}nqIm-`( z((YAEZ}J@-rLXOSMZXf#$#_M1is`#Z=fS%bPTEcR^L3p#-^KdjpOyQ8*dmK;T_oXgrr|s)Y*FLZ9&G+-&%=YZiN7B;2 zUbweC9HLxL@UPpr6Z3=e!FAX1yIBwL`e?V%Bl2HV(Er?z8;xJT4E`Qwxru)EoPURr z?~vsa=4pR6e#!pmjL&0D}=l&J+%f$i;c-G|{YTTYye)bsb3;F10 zCw;;g{o=8nxw}*NcVwIP8;n!&xX^#qdX?iCKbQYE|CQKvhe)4rkbIBxHSpyw(hYbb zpxqxS@CV~aGya4Ef0XVfPchvezDT;?R-jWWy1z#9<$fmm=kj|1H&Nf%_uIY`f3eD^ zjR#?LBEO&#K)3nIuS#ZD#Qe(l8F#eg+f?COOFoS$dvtu7Ci3OqsYSgv6!@ilXwsjS z7x)0W#(a7W-bejN$3fS7PqF@3o@Xfii*!Gx{p~xUx5`_A-tIR+|8RPLt%CkR`=r=E za(ZKjdt*QLe)9Y$;Gh3xZ5Yb8x`%$L<~y48LmB=(MCVO@Mc-o_MDTqa#c>h!H2On0 zE+QY=^&j!P$urlZpFOW~E%KsI$epN#3H}h@*HcQ~rS!s>o@VySyP-e5-$wSz^YGt` zeS>f=du5B1Pp8|sVR{7oexs!Oxi^sq?dRD!nu2`;`x5ejx_rOD7f@dMFVcO#!1&vd z1O51{!V6EYW8Cyjy4~#e5lzb=Uw_}u)Bt}9h8r8kzoYz>oM>(qOiGJzTS^~5nYOWy$|mrIa0p5 z96d(7V!poY0{MDPAzww>EBCf)e~8CDm}qGC%*w=dTEC~Gf0h`)65@6^{a+&VFY2RQ z?{fcSc3)kluNSFY<@%cZ_j1kZ`9U2oKc@KS``}$qUa#;5+g(pC)_6F?dP>QaivPuz zn`-5rRJ}N8`&I`=e4Whe+o|uvp<#VC!~bi43A=kV4U{`4m`gZEK| z_iD}O{x95@jD2tz_b-*F+pXYF<9*5O*SSAR1i;E&Rw&naUoy*`|Gwl!geSr!Rm-?S z#~<)$Zs+`0=0|1;FQ%P?{@eOo?IxvP+D&=`zs7Z+iS(!bTSdB$5MNk>Ckpr4d;fsX zGmMyhKT17_{w8Rh!_L{Gfc<`>iT5Y}CFR5JQJHvuGWnF*C3cUO!fCia`J-%y_$mdsye9XJ+?FzZF*K zIR1@kIXydeU=$F)a^_#4{t344D7~I~KaBl3>3BW!*XIim2`P_#Wa}U=7To}+!7p$7 zs*&DoUxw2$ma|wM(ck2s^f$Xlw_PLGQQvZriz1ca8bM@y~FXBwT>&5(jpC@#@F4Eb@5u~f0YtQxE*mse>vHtsde2&HRAMlZF zMj1WNN5>G+?TnO*>3yMe+Z)lXb${)r-7_MUNPzu@{u4$yKA|7Jv{UGRct7p>5&D<1 zbDDIMOTX{Y@_vpB_7*f}wTt_h{9a#Q=kA;QO1qbMS+Y*@#p{PCw?_J*pda0XD8`C} zaQPyA0#E#MI)!W3X}as1j;#|p`%roc_BDXT`%r9OPIfLg;)n8kln9|8fj{8O9iJ9H zox`4v?Ki(?tTxL25XNXHOdS8-r1DTo=IS$y(Y{L`mkK4McAswAMSSFbWz7dYB~A7) z?E6R4SxoZ#Wx+St`+kp-?B8ZRPl4;;FQ5HgCoG-y`n{F?T_4tT@Ky6aJ%`UL`0T%) z`}FerGZ~Nj59OPB8gx8fk$SUpMGva}4h1;}KjU$sf2q}L_-5Z>i1_Aw`h+mT;W>Zv z`ivjhxn$$ZbCf&KH44KWcFwq@xAp_B<+pPUsh^h|A>4G9(bexyH-40oS1JXazu^Ge zoi1<1=Z`4eV>+HL%=3<7+@*U#`xW{p==-(1MQ*mxt__Yy`$?m-;o1JY#n~R&f2Hx- z$TaPjeZHXr(TuwLR!Dy&#!cf^{d$dazEV!IR;B zYP(v#x9`+KKF-d?yb7xD(i`?4Mf;x`?9b@j?<-eD#MbG=oZALRSbPih5z{~_d{fj)LmAJQA>WA_~) zy@5XM%ZzV_6+iIz*Mz^aUz74+-}`NLjyLWv;Ah;wV!D71eva4mllukYeB3uQvEQr% z9Y>9>Qi=DQnV*sL5lff)J$<|b`kQhh{vfvFIkP8ip4Zp*Yg$mowXEH5mv)!AOeOt| z?FXNSCFd)yVLiTY5p=5DgL()*-cK}ybpQ?{3G_08uj9GCLwOyqrd#hcj&Lp!JoiieUQZa!>HBi<>yYIWJXIfByX8Be86Orl;j4e=)aTpr`pnnw zYQ$I9-5z2+alS80zI@%SSL<_seSQv}D5r2W7%Y8Gx9dE)n$1mpF?MtpL< z6#13&X-i8!JuZCm?|TIsPkp}{>e+|>=->5hry>d?!zQQA$E94(j%vOzwwuobv{L~0 z^U@aR{diuH&8ufAx-zf+9c{SV8EzLq&JoIOZ&A3VWL)FYuT^f{u220u#wh1?X&3#P zVRXNitJ{7K|BgTocfG;=b%mSn*SK5Z=l9WaTo^FBez1M9j#~pG%s1G6oyJ?+R_JIi`;=4!mdnbZ-AZF8+8W?l&XdobQ9P=zty=A$tjm zq<{n;mwdjLuGi%3yl%N|k@3^;@_GFr%NOVMxg2y_Id;Gc8Gjc?<77PkKHMjIO~x05 zbB(`KrF`0C{QY05Z|+rmE@u3FQs6kXm4TNA|2Q}{lf#Bu>W-E3T; zUMF68zbAje&o9q!)aN=54X}P+w?MvE74$jAYw;H-K5;*|v5=2?pY_G#^^%L!`)y72 zKI@P5{x|Pz9M{iiKV|)KzOPHZd|dbW@g~&=-^nnrNnaj}gM@_Xj~N#ilUeH=d}1eSekje4~%6-X%dn_zs!yjcDvzK+@C=?*#qd&Jn|~WOUcji{Vc{^Uj(y%K(7yiUp2nZ z{B-8eY1J;lU*Xo<_bYv3`mtY7U!(mPx82_o=bNehivBLx$x1hyXZrW|;{4VxCe(+1 z&}=^P)@U9t{u|Qa@F|v$>|^jdF0X#q0n!6V+jxlw;Ay+Kya~-=_uKPlj@z?vVaUw~+M5 zbqXKpe~t7Vgzw)is-gl3zJGCz@uTmOQBC)Gc6R=AsQ{9Y`&;*G9P=q@)(e^aaXtN| zzK$A}uw2$pZ2yMi9WL9eaNO^kpW}H})3fgnUF`R93iB=4i}Cs(=<;JS-pBH$amDR2bv9 zfqFNbV|;dbN4`4>_XkrC^%~r~U)@^B2fgI;K7ExY zJdWHc`NK%B>dDrA4B{#C6m2K?d`kQv>GZSz26#4n3&|HPN#8`E1XkKzA4{>*4oJ@w4-`C{^oqc_tO#lK0VZ*{FEj&(yNPUz}rkQ(rL+m z(HbVSX9@GCJ*11yA4IjL?Is@rN7$z{Lr&RGqV1@b?^1Y!ce6a?85%j=LVA}=uhti? zUlx``W+VmQ=K_VpHhryf6vMC1c`fZ?V}AI#MeGU)pnY}7WW;|i|37tW2=SMh5I$a`s!1v_J+5Mb|dny zPw1CI-)(PWog#ACJw4;%Yezu+LK+QtUoGRW&`Z+gpX#rj@FMrMI^N`Psh7e4>j}0GA(($L3>n_H+w%s) zdx60%vz+QVA^6sEMH6Ix&32UMS)6oA%iPx?^C^43)ZXv5c#p=Fe+rKsQ|{s6`vtc# z|ESSv!H-y+bW9iAV{y_sUGQTX-&&?(690+PDJ{R1`FvkX`4Nj7oytFMandI(KcI1K zr~KMlreg4ZEB*q~-};f<$7b=<#JhbC;|D4l54Vt>Zij{Cq-S9Bl637^&G!M3f7Od? z&k)Yqg!r$l@Y%D7&+f&1wlC9XFn_blAJ$!koY{Rn-ap*_^LF7L9^^xR!8j70CclH} zOSjk1pOMe)3h&>(9)Wos-j@=7pFK>m_T3_h63z#w_8!Vfy7surYwbPPYPodn35zeX z_({t1dny)x-r^>&S7@?)x7qReJJRP;Yu6sWKOa8+J>7`s)zrIj@rE8BV9OqjpIbr-?MR;Xvr|G%#wsZSV zg_myG&8PM6yxqU6@YCtTd~e^wN>|>?`bJzo8NU}AeMPTA9$+`C+yD`a#LPX&oH}!V)CsJTS7c*ONoE&I?74C=U=s4z58GIY(J~d zbk`X^SDxl``zd{f5yK<9FO>S*{qW+aSN!Vje{A_lr+Pc@zpJmMJji(;!kvE1@=*_o z99o?6C;PZ8ewytz|FEpvSv~vg{eFwT*W#4#di!3BAGPwkEpGDCzEk7rK7+q+*xplq zWc;-FPAmVQ#gAKkTP%LU;+rjg(&C#e{=Ai6XYpeeznAeNn;5ryoWu`j?|b?F@M7xQ zT`R2oOvdednPFs(#SiNk^UP)AXxYkK3hL`918%S$X?jwD{q)-SN1faD0Am-vbLa z-evHX6yW(jT>B14D|qKE-%0#4JWY>g<;)Js-xH5;OQD>P7f!!h3iw`O|vo>+3aU-)H5`&dlIn$@*c(id)aau&gsGKl1g2r}drdRmja>NP%qMlkp$v zpOti|>AiB$LDV!hx+UWR9|7;*B6h#W<8|TxDq~9Kr?U3A9e{GTp&Zf8{8mW6R?Ta}wMgBi(&$4m_^dw`#fkg4HuJ{YZckbIIqXeJd28u7* zqxrJ$Vm`-w@E54QG(H2*!A{}(VS6v~q3g{%829-w^4*U4?;*=40uO!d^5%X%=srBzf0ohvhw=W1y%%`~KiseA`Wf=JSh4`#_X{0kcn>1K*)4ItOA7gPKEIyz zxSs~&fq+)HUlQF>lq;W?^bhbnOmi~ zE!9_w$4v2OI{#Yhr?;4VKB#m|?R(qVenA|dl_2Y#_MY&4-3$5xIf8u;>wNpS-^1cb z7@J0THlHc3|CbWlWm&$uFib)*UuYrkrR2+O*G2XdvEKX<+S4I?aTKSoFe|& zxsc~Au4p#!#rb<&_*+Uor*OfK;(EuvSJ59*T<@6Q!}Q{MN2jE_J)?SFWDs(F|I0)! z?jRm%&q=M%_w|K0*}A~KvuxkJwr*hdZY)pIon9C}KsY$Ze+F^$Q=y(ON}n(K*G0}1 zZhz-xS}?VG+@3xk{Nj0SxyOw7NA-SA>h*f^^W5weML91CClTZxMTO({zWP1r`F$b8 z+xMBcoI2gomAhHq?*SG5iF)@i?B9#*n_ST}A4h#&fH*Mxk96PoPT{BD&m8k*FZevl zreWTDRN~-wx5$;Ob1NN+_b`L*=r-pe=Rx7%}}J!kV7cG$W0_DzJp!`8Rj zZNBh!%59A2qrlVjU3_oj8#)Eg9h7Syr$U$2bAb2z_AkFq6SDmm2i~Uj_Ag&->C#{c z;Q;TU?_Vy?N(pYS_`O`=z|XPVt(;f(P3F9^R%bl(%h-C5+he~nOTk6!f+Wm^$j#1Z zpQFzgQBGJT@^wP-iRpAo`Fvc{!Ka4vls@0xekOb0cwY(oO$=|H!aFsgo>p@6w?dM0 z$&HU^g8pzRnWuySods3ty`Ru}E`%<}{*%$=_<868d)LB*{!|-zmi!W?$9gEeh9&Cu zot{!czs7ir{wL=i6i#^3@0GCm_!HLuY`rTq>3+5Tyf4gte8}z-~snCy0#8m6c?CLH0zC7)n`mEJV zKeX-A_=U*xzjr^s*zw_k^T=~!KYxI95cy?4r{1~{x}5%Z?&qs6QvWSK4_)GZUP_i1 z=)&)ATnJsZy;Y1f3;!PD!o|u<`T2v{e__9k1En9PVSvh{qOg@vih&TO*4eY4G-Ht z)qnjhnx5H%Z#TGhUu!=$sY%H9+giNs&6@7xfv?wM=pV72U+ZG|T)*5(cx;}?*-6D0 z6PWs&rQbNFGzJ?3G;yfNEOH5KkG#`)yfz z^51?-2e;WDoc|YT`-55f=Vz8x`G0-z|9_d^-v)V{bogs~q>AK6ca9wOdq!eCcd{_< ztU&w#N{Ek1_zVAo_g9QNGv8FA><%-&0{7agS{P8^${hj8w-ogHk_woG|{hi!@&3{~(O@^8Mx94^ozWPzZ95ji$%+(0=fczE=G1k?|_nc{}%iyMG+=6ORx1`DycW z+B!M#LF=ox(vJ3f&NBN3C)Q8 zw`f@@S;u(tq(0GqzaiyoOUZ+DCE;M7I86GqhjIaUDDUHACePh-wBGWh-&T0xpv6o7 zqWK}89t334Z2#+Nso(hm_$YGT{pey|oPHZ218%tu(!_HeX9b?mpHM$75!!pzD}O=r z0e@%=)QcVu`K^jvr%$l|`?`dGHxK!NSKW*%w^INg^&>2h|A6y-BDcQopV2A$PUEt- zk=7^^kAr}Ju6~dH*t*}dKgRM}(flhW$T9RsJdf~xUD19`{>%MLS}yC?Bri}#7l zE1gUL9YN3R+=Vm-aogUE-HMC`VmH`xE}%U*8Rx*o>@QI`5&|&QIC}pevh?TKX`vf z-VaI%4fG;?V>yIeLf+1PSmKwsN@kW=oB;68{!TTiKt{FU;n-p=`JZRC>{r@ZI( z#xYIL?UCb*kCESL&+9bq-@ldnxh#H$`OLrbs4#7r;8E{6s_D>&=n`r7OpVL;>6rh} z5q)O+1RkPXj#FHcH72+5c^uhZ@dD%l@T%AI`{CubUnIQF7xU#K=lI@fy+DExu36Z#zN z*{{#Ce3un+UDDGwa?jWp^VM&p^*b=ee1kUvYZ(XZyQ2Pm(lAE8q&KpkZ=GGye5rlE zCXA5I_1Q*;5%MXs3rxw&y`5UFsCN-Z&8}b3{y9zba4etSiN4Lx`=3_&#Bf{b-v<{8 zT}C!({t5cmAU(POlf@&Y4?Qs)s7iuIp}5YcTYC-I|@X}M$Z`kR^@jzl%Js$ z)JC{o=>2rSzBBFpIE;Kw@pC^+82JZ_Qyw;t{4?osi2b`Z%6{+t&h=>Dlv$dul)PV` zML79_`EV7j~~=>KK{GC+OZVSMQ_MMP!>HfUE{fZzD>*ZciH!}Gy8nl($DJC z?ZA9JaVOtj{uO+FEjmPyWUq%&uolk*p;K!H5wIug|Ykr?UxZUO7(*~YV7%nCB ztLJh``lb8M5D&>uIVHX*-zcZV7wL_1dRWUNy-`kcdNs-^>5Tl1a!P(6eM^gb!?Bgv z@55|*Z@AKR;<|^@ag5Z!JO%vpdq%STf+F{j1JLmkGC}h1S!2H0FfYsQzejGthi*QR zUox^bYJ3?xsdUN5E6xwo8)d+hP``%bYSGU+4x>SW&qv(9lk@F)!revqNXLj7>zBX% zPo`h~?4|#A=$B3Zm*|%b-Gw{*y!~!itPFCl@C<$d zzaNyR>lefknw>L&9`^Gn6ZG{(=$W!OQqHeu;8l_SQP}@dp2YDoA3x20`VRGvkH3AB zIDWcctZx$c;}*x;zBbxh(A%+|biL+!t|YTvv}4@XU0Uk3?p&{-g|S|1{4U7xY}{!a zk0^H&$D=cO{;BhnyVtM$vuDp|Hv!HCeHTRX0cbOMq5Uv%Tsn+#D3TW)rw)QjLZ@5# z^m!h}rAaz2*?3zRm-@4D38MkN?{ie!J8|ChtD1h^ap)rWbh*?Q^QrOsBHuecUC8%F z(67JSeEOL7>+cPp(v@ew1%9VJiSpI$2AL-?eeE`Vj?+z)ER@FUxg-!l&Lu&vJ|%OT zX6KPQUoYttr|kTX_kWc8HI$b` zkzPEH6pmOs_pm(b>lDM#?{CZQ$r8NLMAwg1I}hdfXY)Yw-;cw%l#rd1*~WOR-z$QT zpF>G|c&-5Te@hf!_(>8X7@gsNNBzD(B)dmL(!Y-KXlHm(ena=*{7PyAY_x0HOy@Eq3svVVzB z`;JZ+FnZYeE_@dZ1{M%3Qxsvk^zi-&rA!Hp}+m)Y}CcXT;bQmGM>gIuYHz`P(D?{I&>?SInP$ zpDo~h<_?LA9>T~HMoCYM9|&%z^|ejW_mFSSJoWXk-`C>nqj>KDRr(e?YA_|E5*H;Mf> zBuN-Q@l5xfQ22fyAM8#_wypC&^q2xbx(C#gKEznMk-5{2jQhRS6X68S&S?zVIaT`?+ZF^@Ebf^Sd2Cqm%jEo~X_Q^+^vr*GM~& z;{5k>mPNh?^C!3*my#bt@gzKDdcp1o&G#|#J^0RXNf1Q-u15PX*u7eA2WI_fC*it1 zkM>Ti8jrjIWzC-c)w(&f~emU3V!TmVE_vZxPj1Ok-o{taVIm)Np z!_R(@?JI&5a=eNAqkqpWjI#f@JrE|`L!Iw0Q-1h;t`~X_wcVF7@&4%rir2S$|FoaG zLO&_qJN*L%{dOAq?IEo$JHP4r1N8o=(0k%|d|K()F^g0SBWE@4`*h?UAd8z^`2Is% z+i`sjy@(v(A0!#|{Zbd}ONK?sT0r{7rA$8^`GJu>Wa%dh=>wAP`l?)_!t-@tpRc2T zfN!;4R#czP@waU24Sj+Yx&-i)DMV7e6Ix&5m*^u2EAUqAK_SD}LB~6p?)wr!x8Hbu z-VU@=$f@nDd+IE>F3S^TVI!>%#GU6x-15QEf)HFVW=>?RbaqGmd}e4angvqyB=OWEtZu@b4B44Bbl5{|N<9Zs$ zYt7%_zs%{|xSmG+g#69EpACE=aqjPq>3CAFudz12WoYTn!^VY=zCoZq_ zKB&!0#-HL7`x{$*PkZCPzz@ESaw0+#&y%9>7Ac@~*E-D?4ik^?7~x+cophc2jPXD_ z{Qgns3u&^-^;Gqv8hu*UH>7~$ap*5E)@zoPNd&_T_5rQx%YK{xzCgB9nof}--`0HQ`5b@sp+k_UFhF&zl7YNpS|H@ z`MQYPA4r$idOnF7UF*yC$Cr|KX6>W~EWRf`z#qZsnYSnJNB1_IPop5hzKyH0ubXgp-K$Su?}j-HzTG5G^gQ3k zJa-4s=6D9yWp>mO#2D8ngi?arv%X(0?ODZo_V7LEbszc{iznHAR&E~~vDU~FS;ArB z*>{Ny))M@?zA;}^&zPPA{CA+g8(o6WS8YGQ9O=3Z`fWK8DJ3sg_(6Dy;PVcjS7APm zoN1@N&ivh|?-sNNgTDm1=SSOH-9x|F1Uw&OK9qY%@uYmi4vX>X@8r1e@aEe5bV9ov zUu%CWe0`r9>U|mT6+R=#eQJvD3{DTS`Dt~Sbn*VWMxsc;y_^WXZxHXXUv|*s4gD($ zk-SgSdR$I@pLWV+Yk@aK-{<@?xxP^lmXPJo;^%m{e&&&QcpZ}Ii)3hDP*UB#bJ%QKEKHN8} z(LMwxOUZ3YV9*B`$oD%`J^=q@!#)J|gWNv+eJvL{*XdKnoqxlR)b3Fo@6u%T^B$L< zG2K89^ylL%ay~&Gmx-NKGrxLY8y&fu5B#F?uQv8ReQq9kN}pTX*D-zM-TJJzuh(Z7 zIiSzfzRR0`53yI%p`Ybnw*H59=3zLg9o(b&*2o}^SK%=4eJq!#UpETM6-nPHa=uue zZ(J^aBn*taSIfQ8`wj=~yVn^#J6V3fzWY);!v5=gStCM=hN6CGlsE2S{yCB!j~5+#B!h%fvQwYF zPKt4>BCm?_iS=FGZ)5kTV}20(0WePPkM;q=kBR({T$yh`24v1 z4@XE}Uw4#!#VXIO@%mBLYwJsHj}+?x{AWc$J6xa7_o5aj6~%Y(MsCaqW7KEqjXW^E0D!Wi+% z=RLN+!1w7|`P|N$%yM?WNVtJ~?!TSqSAu_^$-e8*fBT5$hukY(cCUfo7tr6ub30|b zFTIp7j`6!e|8y@V|0KOZ|Je7TG=Hprx=BBzH|QVxz7)pkiXfDBuTr>S)a{EE8pnIp zlij^py$yP@dzq%!+wa$BEC;c?L4E~PCckmMWg;K`J&;`Ad4HQIclT@liE>xG_uJ*J zB7TIpK2QHMnJ{GXVs=&VaWAuv{s`=9m}k9Pwpf;vo25lE9?JL!xsBIhmfi-qq+{$i zd_-(;pPzYt&=&#y;8H0k>wG#+`Fo^)aXIY-;%j)2%)qmjy2R70UR2 zs$Bvf`>~Mj`>DbaE4KqViN`i+U-%iu)3tnGwS6e13K$xo80p30z zAF_G5@3W%0*DzjDZj26h6A#egE}3)1_X1a>9gc^rqbog-@A?A2si$~Pz1t(m_liP3 z=q9QfcoB8;0x&Oe2MAZR-hy4P5jec{H{Pu_ixBOyYao#z|;Ms&FKEo0-Z>A zqff-2Ka_m=cbKjI>^l&flx{8uzCIE2lXy4xi^us?KF!W@e#L(1Nk+e4QhrsmuVUPd z`PFfivP{*O%l{}_CT5}5Z_cw@)BCIvE^fE?ll#$6?E4|%FzHm%)iTk~ z&j?{9__^}jUideqXFgAUT=_jPazy2Au$|-CfZ2(I?K3rfV1)Sw+b`F6ZH(h0#`9QC z9G{MBU0$%NBl;dfCL!&fM!D`jt9XLvc-JbJxy{RS@ZdR|FoO-qMrxI_Df!- z#bi99Jr&dG?7xY;jPiYK|NWKvU+6#_7-4$`XLBGJ7-4${XA^MU{B1stX7qf9a7)R@ zwH;!ID;;HiM7-VZZci*uy2|-H=C|eu&^ef|qgi`Wpg&Ss(KE?K-|9ni& zt%d{a2jL*=-&jIf`8KZnfrT2EgZ%h7<$3^cR{X2b>oL?SYb|0%Hu9D9?cdRus1P=p`zQTZC_6X+S#ze?I#>_=Js z*}6ON%jZYL)A{Q2DYwi0oG93V7Wg^y?0l&0H@#4JB^@0F&Le^_;1%roxZP+c+I9Fi z{Dn`b-1zv^%-;LTtr$yfe>;j~_z32Blh9k6>!qaC^ME3FMBdgE-3Nc+Uel+t#3=4d> zOZ_}0+VLCGwd3;8&qccl`M&kh#_@&YEA2nmuiih9|7;=u8SPj3c=Q##Kf?B4U0Ur1 z>-VxQru>ZUs}J4Mh$qK4)*t5^mwdV1VDs3KP1LJ)A4uF@w&P;$1e2qujUSO+_nb_L z!;$mZ3D7!??eTHz$nkvK5olHyztHA^F&`cNI)U$c+vT#9{2IDKffg6mHR2n$(@?*JNNSwkHNLJ|NkKI zOxHf8^sF6ZJL+p6)A;6tZ0FXs!x|s#`B{B#?dJU{gDYuoP4x4>i}~B%q0dL9F%@aS z*7pBb2(cD2SqrxnA}xh=RIg?yl=`}w|YKi)3dMaqvq z0z?KM?_;^4o+7?}UMt;qRQVt0gZ_@~0q+;5q)Fa?!st4s2jow(zu^3KIVs9VDf!Ab zf!W2cZ&b9NiR&9C59eFoIB0f~tvmVs$))7;ichfbsfE#))!#K0SQs)&1*vPhp?!Ck(5(-T=O#=IVN2 z1AM5zVQ=J8g?qgjL%N{gVL0vNPv0*W)^I$`_9-vmd#nQj(eYJ`qZZsR2RjJsVby7( z@Fbue=rvWkF$A9zNKX9w-M+8h*M0CkD_JIb0pIgRJL=tsNDnqg`q?RsLoPQp^hff^ z`>mgA^?i(Szc?fCeclQ<$GUUM_`M);{zIDojMf{cW4}+_zht;kJM>}gUq>~67(J#> z?_aeMwr4}z56};3cHD-xcI8O9O^i?p<+eAfJjL|^zK_wLWp58p8VuzkfHvwlY5 zloD3p{+A7HbCf^jwx3b@mfPN->)WmB8Gl&8B;y(!!fRS)2v`24xxZZ){u4DM{5ua(^%H_Wk3z@T(Y{>P6%CCGn58Znrq=B=~oA&V}FFzdIKk z&JX;axmIwR@%xj1buPFfu!L=Z7TJY<@0$g=9L)7AW!c-p?)FFpTV_tMa(%7e1GEL0OMHaNBQ(I z;sN?_gv8nu?8nmzFZJ&oo8JTZMD6Q4K&sAuD;~lxj)UP!jFJ=EwV;jnU;4SOX6+b` z+u`e?MyCsHM|mpy_d*%BK}n3;Xkp-SD)lop;D6NvKv&M)pC$b|HJbRo&$%2nk_D5e zpOX)Mo*MkvDD4>Ma3^*=`53%C2H*LVZehLS%USOXt8dWe??pbjoZP&Ja#f_Ke}{5> zHQ}kg0X{Zfil370^zY@`cOHxM9ly)yV0etbHi!4zYs6j}V%*y|V=v<_PoO*KFZT*) zeXyfW3*q~w{weE!j`C6)c&QfbYqNfD^y%Lr1tb*P2e}s!RJreog`Fi}Bp`y*#eZ$~hnXd(JL5&PU&;QaAp3{TZF4nH^H^T$7gt za!>5s7I;)79sGPrj7O*SH*3GQI}Dm0S;A)+WIs=R{xQgMsn1ge1-Jy?4+=ahh?{=! z`HAa=u%G(D?Q*{_#P@{;JMRU(2t8Asb-DbN{wT!9z0q#J4=mE;?7SlU{i>H1GJV4M z@(|NOhy8yfa7;hTx~Iw&_VYwxIKTgz`$2r4uKRCG3GF!Owc>ux!$P`rD(QsxQHb^f zlW-w0U-{D4Xg2X9DL!e>gZdmdLgD-r^iQZ0T8-)|yB~G@2Bsq)s!VBCCH0lG)5}xD zn3sTk9(Z3$SOD{yPWfNm{R;UjbQbmSIjJXomUxXX)?~HukuRmVC*daks4eEWRiA!B z@f}#j{Daf?YkYGbxFY_FwcMZL=WWCO59<5c4HbPKUTN}n9p$aQeYd_(d$#du-<7Yg z<-9nZzDLs^mD0=Pzn^~zV_t5PzOTQQdScM-$qr+rtM9jQe)@XZzzFdkoQ>WE{{Zpc zIvcGQy=42_>$6{Gan_U0?y|VyJNxApA13~@$*`KO6V+$`u)cS@s+k|GzU`F4 zL%+go&*z6I-*~?5c$QCINxtVb9`}pl{IPLAH1%(szhVA(NYU4E^03HDasC+ZfBq$5 zK^$-UJ=#@-pntP|;r%Al4^+2F?K0-x&~~ZH;)b>v+J4x1zqg7qh8)n_klb?HE44tm ztxM${`92)wbNS!U_I);wY`1cqI*&yDU%#gjE+n*+{E))k&^BFuqn@jEd`JG*M)^z0 zqAVXQ-*Q`r&hL=#zeo9iFPc_LC_m-4m*~6=`D%rHFdh9oWLf6fuoRK+66<3U)%L?w zbUPdGA1}Z`k0`0N#_~%F@1gNZiWT3#FM98OA;A6NC>`y9zr@$c@cvJt_x`L8b$bcfMNe<4uf@o1Hj!GvjVArF&Rkx`XLn zZ?(qxZ)X46cY7Rf&o}-I%VqnTE&{)8F@DbyPJUjT^hj;Ib^PF;+xuuF_eQ7co+lN6 z<6FC|SL1cNCm4JL1#$lnT8ubCtQTTBrajb0pyQz6>H9PN9L`(_v4o6{1w6}_KC1Nq zU#m(b+VS&!1kj7ZKIJ{&RcAejM?UYk%=qK_@mhVI40cohfe)ZYGda=p3y_mLVm{dT zJ=f$YYu}PaIVtkN*IP5Y=%F===t9AddI`MO#pPqX_e&g$S#LutZEa@hP**V7n$t=O=W$w3d{gyt)_eHr}Am@nZ$}8Uo zjsDi?Kcqb7eh>Cmm(%g}gbO*FBm<@Eb)OfwUJpx|FT9fS;Pbn=dzkL}9sUAXrrs|1 z7egPRhoPSBe<*hJn2iSoxW1p-?x%( zPtdbhlChaS#s(t^VTnEx=Ue9AvGkZ<=pVrMwu``butxZx3(7lx%9qgoUMRu=y@z=! zFb_{K{{;MJ+xRSpeUQWP_VwAhPqEwrxd#^#?Q|~UKmI!ALpxq3?HGSI;9A(R^OoYmty;zo;MFj!zC2)@%}%42kQquR{@`g8TWB0l#NeUfjR=`PQG`! z^Lt@4cpQe5Pg|f@evW!U?K8R=F{&(Ozr!MVSsc0=O(=5%NeABR#qc)t#BWGFhH) zi+%=-Q$V@8W3j?T93fusY&^ePoTqZ!&g_yAmWSQ&9nDvtGdLngP3Nlvd=EZ+LIyBD zUyJz5eh2&U$UmnK_+9k-Y!(v?=?#1_zc$jNkbS?#?*Uk2#j^W8Qo9$vZ;DhT0q_t! zJ%g8>3%o+#qj!RC8-*_L7tNTZ$+=x_?M`>mkKy}k(JzOE4&%ej@84~7x&*t=!NXgKnp|9Vqu%>-*~R-)8@jW>%!0QgTe=zD@ys9Lu-wd-C_bjsdxO zlQ`ns4nV(coR3jYr6z}^g!{OVZ)bEr8sNVdLi9@>|1zWZc9}k!VdY0qz{;#leVuQUEAJ? zY&!1h5BUSW`FA^Ujz1Ze{}Pby_n7;A>uGl{=`QsS%lly8&vU(&-4`nIfcHLM-6;G( ze;&VG{z#a+fX}QyNEGy_-j2T_H~LrFtEp1X?CP}Vq_z`yl0w9@dd2GiUi}_cRPlGd zrCaVL-c~5xwMpaty;a{&nd_-DS`X|#gs`9O_IrQa?g$5%Z-(`g&4YrNgst|y)qz2c zCWA})3}dTU?^0+wSucG^2qi)GrCK`2W8Y_-F697PGyA;0bgt%4*Ajo9mtlT$O85WQ@snL722^*>k0N#h5@v#93D35ya zTOM!28Vo`Vw?V!{4pcDYLHYW!V&L>FC1)7F2T&ki~0PuKtD$-km}1)iO?ZcD`O4f`pdt>n)43+R4)nJ9Qa2cCd)-tQ5M-0XfUsY|(q zAV=RLzJ~8mKYO<6rP<^P=nvI{dNJYw@0VO9e2bWdpXngsxL({ko8xmjdmYe}`LLbO zvhRHRI&M0h`Yr1ZM!(E{IzYJ44?*Nj?h#Nrr`->dUxF7}1bv6~EVu6*5C4w)_-4XM zS5iJp3H6irU)bM3bnCUm+wC~_^Ef`z4)S^IFrNpG@_CT`BfX1?#PLaQ6@p1fmr{Yd zUeENI-{0NNcGX6=DLnW~fKQRm;Q;YpBlsdCzR!gAhg(>$uPX&R7n03SZn64p-&VLq z3Q6$!dU?{-$|pH*qUnBLTF1c5D?V;tW1Oh~KfljY_7fwdSMV9y z48JS*3~h$L?|;(PBfl0CJOkTp`$;-xkpQlzd|fn*?67>)&;7IP`;sG1TKY+S_RnIw zU0%Z|_dB@V-`~aWWA(KyQw03GEm^xA-*(oMwa3nj1#hpPKi_Y3n#S=m>@a%R`d-*! z^zeD@j)L6mARSz8vhq$(FYom9@=i}L-;$nYKMUPKdbi_Bx_V51Zbi~_`8`g0-r{%~ z9WwbRo%8kFE{HNPmQH(-q-0}Zx-LX{&f4(@yp;(C;Y6RIlugVYUh{vEmP;0?b~$u_j!`9!}&eP zKA-e^iG9A3e^-TklKX+QotyXAe6RdQ(v$2)CWCsJ#-8lSMvMuVMwDKbi!7 zpOoJiH*pnu>VAODoIg6Bwq9l9pmT%#P5N7<{~F1! zbhh=9lFV9A)8?m0FSj$Q3pgJGoc+;#jG%8t@`E0ywy{s4JmhiLBAK6dAttnx_fj<3 z`bxsh=DqLM*U60ee2RUp^{4Y~{2V=MaSBctJ#2Ay=rGFuo?7{FqZi;ym5KXD9DaHQ z`LlJ2@nH}7;C6C2M7i|&VR_o$sC>D8fZYbTGsH$k2=664^c%=sdD>rU{!+s4QMsH4 zJ5OA06Q_rS4@iEf5MLh%N7&Cwx~e7Vmuohq8)|U92s;(^_&jc-0MT|XX8swg`1JPA z=wdwFWKWYzw{O#xn@CT)&okY(PUDz2qn>o%PQKqq{hn?g*7)Wc`BTSloIJ3W}m!+^5x%0alM)Q>3{X#P=B^xU0DRaJt2QZ^YdO7$UTC8@6q+M zuZzjJZ~Qw!IGdQiI%^^0U5sb?L-0rN@2L1Z*!>f8U4K?Io9-tANvmBeHNo#A{&al~LG*{}atb8ksM_yu4t7st4o8Gog2U;*{_B*}%PD7hzQu)n zJP+jKQn;M*nde_t$WMDQ)(QVioY=-~eEaA1|{cdSD%jtEKPNVY>&1>c44&je7~jaJ z((;=qE_@GH{54c|l^qBTa zNjLwJ%TEkf%RNPSxgUxB+5Mb;55L>@Ztp<<1JUs(SiU;I`I7S|-FI5$rIh@rX7K%J z=>Yp_@cW1c1W^g$5mF%?;4?g;$w@lEC)f_U&!Cvz5QXN!t}8f`kZ0>&irK~v=a^qJf^99pF17s&l(kOx1P87dNy z;QA&!V{su%ul($oirT5)fM0c8i*cwzy612#ZgBdi+Bj4pzw&$*&+-+(4Bg}JZ%x>|1stVKjr!?@dHyHU}tDOHm}a?X|pT*9EG1J_58jr z6S_`mJ8oWL^b~pt$fP{&^WVbL_fyR<`_SRE#N$20bH)Nb!(H~Yb&-5rgSVRU2tBx9 znt$i%<0^-4FU+`Jvn546G{f2(t~*XXJi&a`ZRVjdD$EE#rQk;-rpF?bvYOu;z1pj(HIB`}w_K-?z@@k-dcD z<9YvL+EZaUf}y9KRd6U=RGD@)z=7 zk@zKoH=6E#!)jr?c72uChf;Zc9gF#W6yOJWs7|Kbx?NUEzNqx}bI!rm^?W?VJO}mr zI%v9<^N{M@NR|EtqDp{ukCeVif2VOd@1*pv-ufAhyS;<_C{kV8tM6T2AGPwCyg|4n zpnu?JIIx%eyhoFh8DvA&Kf*!!S!P(dS}*Uvs;y>w>qCs!ON`fg*d2k5aacdmFxYgmACXE#_J_3*UNjnYO5`Mi1B*K%Jow3 z)K**i5aacdmFuNGsjas3A;#+^E7wbZYi+fq4>4XZS-D>BPphrA^dZLUB`ep<_0!sF zOCMsqUb1q%+?QHgZRtad*GpEem;3W-t1W$q@p{S1^?u9fW9dVT*GpEemwSb3t1W$q z@p{S1^->OMt1W$q@p{S1^-?}-W`Ea*7_VpY0V$My)T=Fhi1B*K%JqtZ(f5`<#CYA- zV`{w|Uuvr@eTeaT$;$PzqS|UpA7Z>-vU0uBAr-!*4>4Ys{o9(}D+Yyx+Gh!-_uWBTW#q>jMqz6u9x<5ZMCHjFs6?20H{2KjRt&pWSnu;VFechjQCzl+F{=|3=egT-RsW?gh;9iMXPC-!$rl@PM^v z>NU!blFUv~bLJCqMAV>Um-~xA{d`}x-!mQ_ zqWp#~eXZ+;$nWbseUqgU3I2U`%+vXv`qA%IcfFBz5>DSF+oxjZB=YwxS4#euwjccT zfS>-|NA=SV^9|DRy=esZd-yumT$|UqUkLUqa#roTqVCtv`oE1^u23K?YxXJoM%pk`CHrBX6>BKH1Dr- zZ(%%Zr|=g!s{_oR?Jwl8m-zRY{hYx&09?ZT=HCE54ehjZn?*d+0hX)Y!+L!E*zt0E z9P-#Bu*cUiztbbNeKk&h-=Bqg4&O=IqJ-}ob3Nhv>3rW^EdRxGcKpM3 zoonQENSo*S`Q!c@XrKGMHrTn`{u?M?Zm)#f%%5ZDc>8bXyaoCW?R7oeH`)B$)<0{P zbt*!AaxV`;ESJ#l?#}^VUoWtHf1KQNuJnW6jK`P!zA$$1R(knd^{kV6pm$@qu(t#q zxvz}v_Hz^+%T12_p1=bpU$$Q_wR;28>$Q0@F3me{J9{sxo#uD2aUb-C{HG-AIqBIa zd7HHpINjJEgN^4|zp;DO!U6UV@D*QB^!_U6Ewmo-BkD7C zI$qEI=6*omALM#H{x0zI!YAJ+;rnVbds*aVk+cu>{^JcIZ?+zSIQZ-O9Cj~i>=TY5 z#OHMtegx;I1diMB?iT|*43NMB^h}=sl)RmeH}vFEN%#AE;&DfkdtF~6-(H#OoXZYs zjz8%ZMk%+c>pPQIKdqlNce8f=y~qvTYrAaBasK&zE^haGJIm8J4|M;0xs3uF zZ2x_q?2|_*UqU_Nd~tsv==mvG7%8iD2YQz^zvJom$2ndlJzXSne2Zqw)>r(V1;G1) z!lNSc?@gAWP$e|0|D%$>&u%x*z9aJl^XK(aUfd4xeU(}Msd;|XlNRY`@i2t9g2!?` zF>Zq1-d?xw+#dAtaE(-te0`;>wV>ZKfpr3;S8u{!fqRoa)E|cw=+R$#|NGyMRC!Et z|GD>@1=1m4rJ!ASZWI}iBH8^l3nj8x;^ir9zu()iP%0 zsUTNoFZuZi_(2u#EgXNs81?^*L6-OZ&VGN7pTkf6d!ROM`MBWtyIxDLSfqf!Z>{J0 zipFP{eV<~JvIK8OrsrU2NLVQR0llF|gpV0N<=maNw>sz%cJwr<;Cp+|6Yvm6M?-1{Km1{4lCUH<=tAZk1zGh zueJCwd%wuyHa=Wl(Rg|{>Eic*q;<4L=+RnKVnt#!Wv5 zWAk9|cd*+4v3fmv3-k}`4~LEan>0D8?cmezRV%l>A_3ynS!^)+*|A~tAO_I8qf7Ya zbYb^NW>5^1u!imMc`C*wVBmIJW-k+b67D5j-*<`qHpq{9Fg}Dg$qxy^*742<&%cl6 za7wa^3FT{dl23kqpmrzuIU>EGmAxU*t$qgx;O<>!R@XZ?=CjrG7O!Pn2R zr)&4>d$i*|w8QSd2&1&CT|UEp%B!Di^zyYGBv6Lm4iR_>KHsYC0F`olA=ed&$Nl2_ zRR0r>>o2@VKH&9o(K}gxm`OS#F8`8rCi$2$Ik(^C zwT$;oCLO)M`@JGzjC}WTDP3dq@bSsvo4lvfncvT+yZrh-EyyWEJH3+hNuRAKW;324 zfD~kQ?(i%@|-gKcDV$k;@|qJ3$^JIh=kDIV|csm&=KI zkL}L&9_i|OuYNaLf$@y|OI_bRZgi-VuC6~*+eho?SW3wd`M!hgjptJz7vGEDi|u|4U&2eq8rNnvz9 z>%WG0E);&vlb;yJ{y+j!&=?M+Z*gYGr&Q;ZofMokpFi}KPl^p81%EGhYV(jgTd*VjpS`!xL#07ya@Cf#dKqLk>tcU1IQui1EFdNpn1x>|ZC{eS5# z|Dg4zOFpMh)Qk2(K0hmRL05c`&+PS&^8;XOphw zsgzr{3*NHnRr(%rlxs$x2bDfIYQAuoaDmTa3wr|Tx zKDVDB-gX|>^=vjCjF1mmzuQ5$?iUL?NOzy_Og#5!@^9xN<9>P+{c?1t*6;V}hXa(? zaBx`Dfw$7x=68^@c)lM-Nq6^e`aCF%Fg>2+ID%ot!no~_&x!* zbF%y8ZQp!YY<%gI3MDu`px4Qw9aBNQ%-6bJA&7)9Q!-14;hiqPgQ`0}JezM^CYdGp zc$V2kn%Y=TvZK-dnp)xfhbxglMQ;MRMC7LH*kLL``ZTQaXr)L<#QKmwrszQ>u;aGyMFJJ zeW?hppHZKgct{>zFiDibBEi$o^)EfEIv`zK!TTh&eaSc9KsbFF9O2gC;93pCvZxgQcuDNo_JuIpwJM_~GZ>Yq4W zREdO#^Q*Kg^T$~@QFD0*+zi8Lxy#Ig74ZQc?`WfCqo=VAU z@l*26(wjlbQ?An+2V!{OTq(Il^OdE2XgBZ_QYLXefR^vyTE45bAA=6kQyTK2AC;1u zEZ-uiSEnQ!ZhvINe>kJKgR|2e|;CIem?^Ib>JNYditBL!ymYT_UQlZ=c0}bzsLJ| z`&LV@wGUZ+YM`I}znb(|JoPuj;r3ON&)Ls`_W9ob+u!Z|zx`J4|Lwc2{>0;acDW}W z9}z4&@O(P(bhtifztQyp@KCOq-{mIc9{HV9%Ku}L|E;bc+Sj>$Xm6T+V1Ab`9{^cC z!{Jc=ZT54Z{q3$V+LxQYU_O_>x}`H8!{Lz6`8CkK$n{7264M{d=kn?MXw1iOE8j=$ zXRUp;>y!3Prcap9<&IJYIYmB(!=b!Q_OsT0h3l90A2t2Le9V!{J@PpQ14q6Mk#B+N z8^U`(aqKLI;c$RoZ$E489j1S3kQ3H~_GQ8ClYQCW<*gz8M|^Hc8Z_{&wjYd-p_yUC zUzx`j$M>r3UU~g}M8sQm+AhgagX5qiRaGru3{0-F~pNUKMnq*&p4f0=ja6ytg+6e5D({Z z)Y~kr5%ygT?-(1f3Cm|rrr(NmwUh3FO|L>C9Ou~ux9{hpQ?mUi-}$*G(8GG_9>M(FOYnC9_rBWr zG27p3;(FN?fUnK&GW50C^IYB#@A}5)Cj+zFUC$#wim-Ebe4DY|cLVD?QW z*IC?`UYq@?3dc$Ad(yS&)_0*lYxOL)2m6KS5x;dVg6pewq?g-C=NhPA(4<)B_4XEj zhaW{G9P=?j=g69tPjFzNmJuCVN_$XW;OhBDNjwy?a@^e5^|wO_fNR6q5nWtE=^`T26t`#HR5H=UgHO_3AV zx9_$1sqz)?2bb$psK>R_)FVB7!sy_7B>P2CE-At@)l2I+Gs@q7WQHrtUv2iqZFR)! zUW;5F!YCBKwhsBZz%$8%&aF@`mY{w*x6_h8KR5k)_{A&WW@`8T?WhFYnc7?YO>bOb z@cDa3rFNQ5f1bngqkUALaz8KYQT222QF52wxau{R|F@8|vn^lzl^l^Tlqb>i`sL+B zr4Bsb4|(}dsNYQTB7LrUNspY(`14!j+iCO#nlv?|+H>yC1~DNj~-7r1qQg{{05@{ZaCB27Q0l{o&{Oax%^`*Y~VzK^#UwjZ<+RxOuT+F`)T~g;>I|d!E`b*~?59j^w#?=sr)rrgl|*UW@&5Ir(*(@fgXuz7k($*Oc|w z>+c7PzHj`x@O1oo4SM~pXW$dpoyvMWm%qaIi~y6{xA-!jAEbw0vU!f~3(BscHH~(; z4g)J(c1@4@x~C|+Ch}cp(MVtA>G9p2?g7fK@$^v#=&xx-jKh@;c?=nYOKNs`;(X{!VoHn1{VbXd_QBVIK%PEa3)#T5EV9EyWsQOz{0{gMV>!g zT?o^j+KMeronL48*U)ro55*6~W@;1f!_nzH6!+Wy9S=2xEAsuVOR2AEg+SkfKKujy zer~MzT^7&N7xDetH26sO!7|;zU#7Ef33qWopL+>WQSzGcJy|anSLz6JKb%hYJFwor z*kBj{a11{M23Cz~4ChB*{(AfPCtY5Zf#Yx6+KYjlFEq-NdR^{>k zxKQtG&B8qXWTD>Kn)w#5`!u*;Lk{cvS%x{U$rjZOF27%+i>0G{wAYh`a@bykr<{xx z;#vPcT{5-4Z-l=;SK{xx-#wYXTM!n1b-qRNr*ptMFGKonFSP#~q3g*KU2iMVbwhzJ z(buBO*lPUqU-ozE`8@EK@{ylE1m78!oN}ClnAm$J94Z<&c!Q-E5Q_5Z0|?%4varqp zFl#&Okc4m$|G|!eIL{J&z$~Qq^mu2-Zj^|vA_Lh zd@tNYIdA6o!k;pHu1BRGwT~?RXnY_aZUFrMs_%u%_t8?H(6L?kQ}^03OxGLnO`M~m zOQSs{pMww=%XgC_(tZrT178;ZVc zjqh_7?SbjO1J->*CXigc~lwalET0?=ZO6!q*=v;i0}H-J45vZ#24Hf4&y{T_yZ#w}U18?=$>tE$C;j zu8r%TE#;>QWIO+-i(2FAYoX_+5-RdtI!CvUks~TY~tWJr(?Z%K4oRMLX#`@}Vea z(R0q;|M&i`!2j1F-wS5oKRHABzWjfoe3MV!62_@Fl~2E1 z4anYx!^~(-Km_SuUsti`Je2l*R$?$s*B@0XT@*M zy2DF_`8w}Cs^UtSY;uY5&ejHmqZv%(AxRpW?d6!AbCMd|l+fm+FO~&F1yr zEyS~)zh0;(!|%ixpVzyGRKng-Kk?}@_HRzuIFs#tIeedQWbu7F^&b=Rdkzm98v73T zMULA~M;P`Og86!x?lISW5Tu9cHTwz&{SSEY4|>(hW4}9D zc=4(H9RcaUvXarkkx%y`(*DN3q*ztVhQ^h=hASSX*p&@}2dfs~UEcaSyRE z{D}EQ`|cR=oo~^}(Dm-u=6E|4@v_{By8~{E`-5nFwtvS459__+t3em*qx(LVqmwY) z`J}@Qf8Kq)A3*U3ddQ>b^m#&UmY>Jf{in4#@3i)sxW)75<@9)XIXo*j=<6hLa1*2E z+1%6k7xft47x5r~B{x3e@?TB-p8aK&Pulvp^qStG8eSLqz-Q*;{JRc+VzbBV{(r60 zC|{V1_{NO6*dFn{j^y;1iy{}U2UzDS=oHGAEAiwrkTlwEdbhzio z`JIk?|B)TWd~fM$>B|n;UBN)fcTtY5U1L3eXuY9NmbG;y+;Soxiv3dWM-1J(&j5#) zI=t?sSbVA4$K{aaF{W1e!&CJ2ea{H^X5mAv{MJvRKk0oBt=kS=>2O0m?(?iW-EhdW z;6w+%A7Xpg?CTB6i+>-b_S5*;SnzLL&lK8mVX56m&<>Jg=4U@)dv7SU_g8Snatgl- zc(<3}9|yeTyeLO|N^pZHKiBJePd@HPI^BNNOme=eP(NE=DTFCMb0Fv9kLs;)km+l{ zSB)FJXhbq_pfW$&+2yk}F1r3_zk9Jzj>b#E(dk|~)kk_?_skUg3)dStKE7PQN4h=) ze%3$-lHkA7RywAmqvyd^|9>;+*j&IzI@W@Y>B>D>p!dxvHz}3-o*BwrSb%#o%3V>w z$NF~8Q10lPT;Jyk_*m|ImW!sO9iaCN^8FtydRqJdr3>kG^PmTOVB>XE|DrGsbnNkT z**SXWf#WMhbm)Mm*XG__p}aTaJrKe(eOdda3daGRn-~fa(>fi`SNvPZEx%ja2)M=` zAD@_?VZE!GuPcoq5{~xFx0sKFz%f7TrFAUo4LZGdOnu9A@`p>?>7h~6Z=E|0EZ<+J zek5MqV@*E(C_vO78+&4W?ecs&Z?pE%F;+Lxee{YSI@jSyFYDP0p9MG0H*nO?^HJYS z;(N{W4Sm@CqTOWlea7R51CD$o#u^G@J*&x|TK)8X_t4mHTX;EuR{ULCGBo}z52L>G zzy6OCy&Ecd7#ef^M0lnTjsN=`p7{koH0JMF5N_!YL(j*0qV`dT#=f1GYw6%W-O!lZ zVJ&+6y^Q50dWOdQzOgd?p?L3s@f>&Za+ex<;QyVxTuaZ({k_TYT-1=THZ=CTmOcUb zpc7nn>kAe?H0JLT=l1s4ALaEke&=-jaUO5{$m9L{wvGsMJc9c>`-?w1ccS-av!{+4 zo|S6n8y#|@$Ng;HbCx~`KD-lAdMCy3cJ)2yP{4b$|H|=%JTFH*SN#5-n{s<t!Cfb~lg4k&zic0I-89MCmfaUQyUH%S%?mHpBMz8E# z-f@+eM0tC;a>KCBg%2I_@9*{AEbZa%Z20Q0iLUf%@3)%Qtz6|Pg!|mVW51EZ9gg|S z29H^}+WiicN2mPD@q75^|7L~yr|#P(om-y%&YTYTbf$kE`n#&WC-1=89v%DT;J2Rn zB@Y2_kN;X3`cn5aGXK_lF-wbhTZj4N_&-%X-4{3!~+ zcDv~(Zutx0$@AafG5Q{!<=?dW);l_)Uz6@Dzx`AHInOuXzg9;1THl$7c@x|9nZNkj zjU6#B()-gKcmL<_{q5IsJlg}$X7^d{(K_@Pd^)Ym=v~ketV7fh-|=OpLO!2%{YO3a zPrr@$D39g*{s;f`*BuIQ&$=F+IOs7eRUYtt^{R8WbUf+tYCq;b@K-1Ec1oZ1eo)RY zc-`ZJcLKe?A^+?@{?pd|0?&keea6cfL3$lO#qWtPdOYhf_^V5jjx8S6IGMvM{_~!m z$A^B``#E`h@I&h_dHhGCJb2$lTTKj<@CAR!2bfV-Cqgtfd&9`Sq&-NH*FBfR>mYD& z!$S^7yO6(zyDYl3ep>S|>0uWezSYMK$s^A3utGc06uJ6yqJwE5?iAH9k&`pz?-ijoUFgN6L7^v&QX! zV!Yv5<95UFBW@3j0NC(6;&zSfl@=biJBsm+=Y-SK?e@_K_-%N)-Hw}BAACl<(bMgA zUoqbBbh}+NywUB%5y*t$+35D0)^*5#!?Vck(_*~gS>$%?@a=A=j$j-xJhz8kMPYzz z;cRq|c0+&-n+cjCXp84f6f8dG_9^ z+4IXhtar#dmU>w0r$bE-H{(6U=Bzo%rHGmXq^v@%ZB9~$Urk!;OJG?dzH+^X@Vakj zIA4$Yn$tP%a+h^s@(7oXd%I^{K7mQco$px}6qKcVy|XT7pz2>uo-lkJ_ZpbpGsF6+ zT_zs$c#d~FOY`MhF2pHA9Qn~|>ljpDk;9{@k+bzL}6sQE&zLT+=O;qu7z zZ{j}MV!ji|mo8u{La$%!zRr7Q`VKkXJ9>LQo4noI@3Mk*K2`S-Kkv} zi{s}jh4S@13*ne`y8Rcs@2Ie!Gyk2GFn87DsQn;)KZIWc?eF({b=rT-&iQJ+jP?H7 z=2kxT3-({~sTUK5YVvI0c{_eXuG@do&V{9MFSzd0)%lZbjgPnG`m%l-KHn;5YW;lt zJDYkg{5_W?5>`!qJm~9)`{tgodyI_!G^iz5s0N~QNXHoR1|1s|FtF$k{i-V zr~8+rud+4HC-%p=MSiY1mg_Z>mo=_ui~9;DFY#^}^RNETEj~_nUi~@rN1daT99NS* zqpQ}w#&*_KFUrw3dnfamA=dM)R?qF0uLxf)@pWFn&o%g4r@-G^g8$)w=TYdY-QooO zaxQDn@qcwNMnL}*2EhV~HOSb248{HQh-NkxdQG!={{u_g@Pl11+1TVh4Xz+^+zVBS8 zvHvx+=Za##DD2NtGR{W7Z2tXLd*BecKB1;E?=#@;;XLsqWS~>>U-K*lB#DzMpiD-;{PZdwu`nTUv6e_Q>|S z-Jk9V{`b{MSW@Oc=^Oyxb+2u9yX%GFIRE#*n%<(lLpdtbJDnZ!LC~*z9Eajvye95f zv-00?Qp@Y*tX1-n*>zAUh*8bP0SdSK6KJVPrW!aNnASeFe zcqc^XAoab%M8Cta-YX0Jai|vbLA5YH_c7=<^_U$hf${j=@h9~?1+&af94^%ERJ8+=VZF*s#@_&96XE-NPu1j63rk+A z$w3Pb;C=uU2Yhe0`PTq;mud*tDj)azcPqMo_X~M`rdN{#79Oblu$>nks9c>uOptH8 ztyA+H0mIegeuHDV9^P-^fy&#=-W#aA*X)p5Wx(xQX|fEzNZ%6k zKa?yrzhd#akC^2Vu9~<&lV9kD$hY3^!zFzTSCc#O7ww7v9=^lE1C^C-PgZUL4D(ei zTupXbI^o%atI42+3D5pjt88{V67URHlO2{$cn|kkxK>%~_YGGr^ZSPZPmN!-+c2FT z4}U!1uSqbS0si;c7Z0R|?N&{Ep@;ZA+|1!Q!q?EAR0V`z0l%7TwsgXKc$0-|mFrB- zh`$Rlgue)W)piY?9uL1i;N9M>Rj#o46XDqfs>$`1PW&FeF5rK}<}tO(D!*SE@NYHv z_vP>&UK{XNn;lR3=OTvqsS$J^G4Xr&>VUu8?^CXAO#IBL_wpEKdsdA~ho^m}``DRIIo17P40E1TwOTuTyM=xIf$6lrbxw$3@>%zK zF-#Lh=WiLVTbNvTd~FtXz07plJ-UybVW#LjEW<3hIBs2nu*NOwYxY0sf9l_D7?||F zMickW(n-$~j_`#0ECh}5pZPS-qsMtg+AllncYTU^>bvf|JBxFRj32nI{$q?ExO4E+ zI={&H=A8q37=QObKjQhz@vg95w zc{j@&EAZ=da6eI~{}1ddzjvUT?4=mN4*Y?A1u-&v!|kab*jJQ`TIC1!)&I`+74_rG zXh-Q`u18aY_F|mkxH^FM)@@#icfmM<546R+vh7A2*LkjB=ZEnpT&?X_>=)x7FRfd+ zoo6Nl@WlMGZIRC}+b;F_W!qCWzhwS-pJ?GFU2aD{XZQ!&e#qySZA*QA+4j>mzhu4x zrF?ErKO6ZL`uwu(a-UzeJz?`p=DYCYt@5hLc)<6VJyC1x^LZucTZ$CI|4FGFx8uJW z`QBl6Xszu*e`f{oJtcUbS9~epm;1c3?IDbnj{h4a{<+u|NU<<{P{whcBPA>0Q__49d*&(mvd%Y0r4xugX| z_`fOfuZDQd(`#)@d|udgz0GF`{~t>Dy?u6&9MH4O=2^A2tv0hGd~GZKglqBp6Bfht zLnS`=Ja$W-enBan{Y(2D%>S`cy64}V=RaJkpUcMw^7M;J_3`>|$kTtXl;7ul*XQY< zE#>!lW>21eyhNYle{Y`t-BP;aU!A8PnAGl9=JB1Sd_JFb1DAXmETuc%MxKAgB)nM6 zhGV{~OZ9PjSu0O}rNj^C_eFX7cqu3>Tpzw1L%>G^-TM8Efca?jI0Udr$Fo1Lfs$3po!zrb?mmE@f3ZhF@+TW8tx z@gu*d$Jsh*op~WZi}U4A{nyjammkI$G`x>m4&~kL^O|hC1+A}F@bilv#Jk4+L-Q=o z3pwu@sC4@Lv*Pllbj}AcJv^oFJ&No1yl*XA6!&=nZu3K6Lbz;^-{-~oJN2~g;o>^M zdkcKZ7RCKuh(B4Fzh{g5J667%K>d~7?)O~~A4}0aUplu^+)uiyfS>w?EywoxzY6=a z9MA8>0!TIa3{s(^Dwq1adbtG?p3AA@r}#kl9hevo&wGi(eKPS3|6+{>aO8vFv<}Yo zh<{!vkL%l{n|Qv6a*N-Qy;R80d__JGzKRa0c|F4e2hkvukX8`?Dv3&~le5oLZ zr0Z(XMY*GB3@`cB7TPly-kbgzYf?`HieoNzoRh@h2YKIkJ#mj9jqGP(aVp8A1s zOrM3{q;L3BpZT%=SSMpYyR$xQ!rk9LFr3A`7L32^?ty!dHR=2GuDi6JmcO&K*Uv-i zJLn?+igG?vd$XPAm)e=%0*F7P_Ys8CxSzAuMyW(N?zyJCv3}wU;ZLCc6Wa;Z(qr1c z)xM9uSEM}-6tJ7;x;|^W#PX#Jd`~w$mr{xH&a0a*y`k?4eiE_FUrn}KdadnZlcy07 zRYwfz`NPt=t$l2NluO6-&q7a-&s+X$L?S+m`%{Yg^Eu?xcSLMII(=Wq{6`DpbMbpH zf@&XsI=#&MZM3hOj`_Jioli=~_L=Xg5fE`d;9u6{@JwO0NO7d_xiT_oAYD3{&vqa z{f%<)Tc`s4e+ysdF_@nghrTNtsKh+D67%57yKMXzsBEzQNWJlN!TzgNjv=?}%@+#t zPP`}z^7Jc~K6RhGtxX*);@A*TcXeH-LC}vB|aan900GJk3UtS%hwYg&*90}8t~hE82JBoT6k(g!mUi| z=6~4d!+<|gqQ~v$hY1e&M(oRO^7$~}x0m3(UG@ij%!{%A{tm>Le3am6*=s+$R*89W z|zdWs6?h5#eV&C)z zpC1FhyF@=t?rPGX=_uS)oQKJ&3W{p*GPr2TB>`^jnZ z-JIw9V2KYdCm+hwzfnqeK5fj?f3}qFblsSz&nuPh?YKTq|Lan^%Y~0Kr2m2vUp#$H zp8wyK@_V~qm8bWX(tX|FvON7OrF5tNJ$d>kOZ9X7F;514zf(&0{?FF+cKx|hx|e@Z zp8k$f`L1`k>+Si!IjLQ_I_L2{rF>2|wSuSbE~Pu)*h@YA7fW<`I&0_YpDm?(yAjIM z|4S*|%Xhnq?e@KC%ZGxD{9UDXrQVuYXUR6c9K{6qiypKvhR1wgmg5h@WuN2bq<6G1z7zgF%&%0w{Il6-`|-bM(J8x<`1^`BzZe}p-;Z8Ote9(G;lAu5 z*$4lByzll>R8{wNobJBcEe7|p*-MbIox#w|Ax%7c{1Zk z?|1(k{XXJrW)-k6y51BXwJXESSB+QO({+Il{g?524}^Gs82!G5m#NH0tq8u}>+}kr z@aN}(96UdVTASknA$*+Y6NKgb@g(~FJ8+%L$!0sBTJ^Bvb3N9?c`iEQxd-w=pYC+` z$GiviN|!jG)-m-CV%FvPbNOH5VSTSuJXi1#MEY#tctvLpN8<=i@{ryWa)5m>I-S?% zx*DD8fnzkL2h-IaukXeg&zkVuEc?gAgPz`O^Y5wCdr&x>?t2$r=|A>U=2v?(KJWZw zm`?TOxj8!Kqq`%%t=Q&fN5cEW2*Gs<`Z|yFJbp8*_RHm?6ko%;hIG7dmhO%(Tx7oP zpDo@KPke0WpMl)e>~tgewBNw39{i>E-?MqIxPFW8vx<7)1o%}RCw0!5_`ibnoTgdL z;6tqANG|n`l;pm3FP_tpxA!3rvz^#J9M2ywj+@}utF}^c8|R@S za8;ZAwcvXZeJ$c|g1-;`@FKqlK=?)wPYuHK6PE7fkAM%e5vKfDe)#*C-tG4laGiys zr*oF+AeaG{_L-M#z0i4;uUD_|c=C<*k@%(YLifh#`()kgBE6>bo}zndy__!5eG}+C zh9ZZ%gP))mQJ~lD)0y%3`km6rAL9f3v*F|E=%Rf~eu9*Q^qnlxM~p$v(J+{Zo$sY3Z^Zw$~oJ8*nqu%7)lJNz8* z5C8J$&91+)-tUkfdnd(!N7fNQW;*6Hw<9K%c_8<=FtT*e& z_WvqO$YTEb4EaA$%1>;}|3oQ&&kXrLQOeJ@W`3TpESA4-hWvX<`MG|;{0|~O&lR(w zb9=H6VK~wRT@u(mP1(AIzxloQm<3)9K>L)#>8N+3Dg*-*oZyLi`cb6YBdE?XD&q zlFTpO`$0OlMgQ*n2qDl;fWI7$zv=7VyKV1h4EjItvL4&6vtZJ3jr$XiyU%piNAc9t z9dJygzb)VyUx8yh{gn~VdvI1GE?czO+Jp4S5t!{rzoB_J!lY+cyt_KoI^;3@xj;wHEI*+1os zzI!}E`ha#%9RU4{e$46FyVHF3Q-|kY$H$zUZ>)lkchdfW@kM&>#=8sC(evNxo}ZY= zUpd3yKND>uF7t^N^NJ+1|&)AE+y|JI_n|2^q!#X z$zI4oxjfc`{5nSa_=xwfVtHF=506Lr$WQtx7o&(`zN_ny>o~8Fj)y#jo!eLrg5fk@ z$hu#!{|7AKDv{xNTya$QC=KHk2h-vEz$0vKN z7sDT8WS^^DXvfwOuYS4pexLUfA6+&1j@f~|_|5#X|I%ljPfd#@9CtftS6yoS)cFh9 z$wmH=U$T?gzlpbhiDkF%r;FuieKLK<`=RW$##Mv@J(qeu;<*Gr)5ETx8w)(WnjA+8 z^d6W_{eL`s_6sIt8y#NqNx6m?o)Vsx{ps_FTBVEVCY{%!-0FVMzNJ2oEy}6Ymy=4k zzGX4b#cxu$^#=Y$``KTYLll3iaf9ExB|D*-_}=W48x`cXCc7((Db=rjI@f5#QR z&Q|Q->d%C)&58NOFhic7?WKF=i|f#q&!E8Huse7!93`>?K3#vzL*{@* zPv`gbboo3o%CC`uaJ9-WIv+l5!B)Qwxa`)c_67B3tu5-a#Pd-uW@=Z$t6#k7?Hc9h z?b`O#^JM@?IT%nw~99K_+cZ=aYIt4HJO1vMK0q=8X!23bNyV>-=$|t>OGQ+!$cn|qF zosI`w=;N$=kI8RiiOW$s=Hc|&m=9IXH-2k;O~+loXWh?PzI5FAlyx77u-7l^K4@XC zYhD66^nK*A$~?n2u@jMS<#m_ZtW_RAZl10CQ*US;M(2jv56G8}8v+jE&VJF?biLhI zcYkr!@3|#E=+;)>V}7mT^EUQllb=HtZR5W2`*4JTm*wa7!fhVEX>%(bAT4_V4jl2;&X8vuzp@BZ<2g`#`ACTWen5TCLj1CIbj7SwmV!S zzd!Lpi|D)2u;}JPJV8ZoXmloP{B-<6Ca0KauVTOPTu>5<;3OyGw4H5W_b)0GXg{|YLUtAIWq8j~TX|xBzbe#8DzgQam;v*iP_X|#<;A$1` zNVJnyBYZskJNA9b(yPhm&DT0M>t}G`-!Xryr$;}Sn(pf_MLU!I|9K}jw@1^Xu7~n< zhc>qxb?-|)Prt&7n`9RozL2NBn7=Fsn5{e19lk&KLOYr@9>ISmvA=NK(>_Ug{YCVM zj+*5L-`WoQzHjIu>Rnbza9W2c)|+~*HaFHE_5&DB@}l=?iuS&(r@Y>JftX44igRH3 zdPkltYvM@b6gl zWQ^}k!(SX%B$q6j>mcvK-j3E8(lMV;Y91}U!uciF6^idfu0WXUtL1YcyTR(>I`6Nb zUlqrZUj(BhUp0IOMW_2L^-hxZaV39qKrH#&cfElpeO|3(@h$!yPU8j;0#{A0Hecgv zHM!El>0!4=bbcQDD5fWMZcFD+CT=wFMEeOEf3yy%^TT>4riOb=sL!P570>?5_@Rp& zU#;!cQ`g|9_9yf8+R*P>Zy5@GNxPTrGrZmfg!M-gB(irJLyHWt?5uP=>eFS>`Fik? zphNlDjxV1BLZ63=Gfkse3q4Az6$BwZ}@|I zTJ*DAw%~OAXA$7DTSu+`XKRj{pOQ$p=3E%MaKty$xslbS{fqg)mo9OBG}~MGXF4~s zxs-n{^VglP#d9k&ofp|z%Fho1Cw#ch_Z?)fHMUTy&<^$&_8NS~G?ND{rtyF;5YgW1 zLCmgr??LOx9N*Z_YPd&;PUGAN+JSgBU1rgf;*n+Ac(%@T@Z{Wd@#M&K@%2Ldk)V(5 zJn3A>G62DiK-@{^Hdj+g=gk&vaxv*#2n`3gZLZ#IUy}9EdFhTwmwp;SBI~1go|l+( zE+pU?Kj~aZ#E;DJ@g!Sxz~yk`ljd_>bQSQjK60d=@=Vuwz<;QHNJky<$0J|99}D7` zukir?B0s{++5d>2C&)I&`GW3Wv2rJkUl<5f9`!PpzNil--huyP#D?({Xm7)MQxWc~ zh$q~?09^KzucP-rwf(z<*ZO$p9)@rZ0*yZHIf#XxUMz?9alYRie7IwW*Gu!zB0Sfr z9`8QC zjF37Xr*{>Jzm9m>-JTX5t&=`jF|qu^T|$l&_k?|uh`3Qqe2oKM#g&h-`K zljU>#rN8w_{`D~ZUXa5Wov)vGn6Xp$x3)g)?}lmLqnh}=4zX`PA-*JuJTI z-7K~P%j;hn=?E|^K4=}JaSvk=ANr>ye;!{=e%|C$a>o1L41R`k%lcXUP4d)tuCHtK zUGM(lb-vHpIN$BvvRsfJx>|e8t79Ig{-bj-W&O-{pnJ{x`Y4ZbpP$z#&#R;Sd|$u) zL)I?&d3We-lB|BA^L;x1(|UK_+c}@F243}_nZ~R1u-mhP5PGWAL~bGa&v zXCLCe{+Rc>Y*Cym2zbdY$C-5(0`6#xm(K)#^fTM%?wc9^df?9z9t}{K0nP zv(7tY-8&=f6P_csXos{)kt$VTmF$L!)JU^KMr~L-nMT|wRRuxCPqCS>eSA|bc9_|q}SF_V;yl0>c##neba{!F)W@H=PTHFaM>ES{~A3Q zNZ_hAi${B+f6?I_U(g|XecxUBiT#f$MZ3)8sUQa&=X8#-e}kb-^6?CE!lFp8`9Nm) zSW&pf0w=T=zZu_*?}V$#Z!(zlMSYJTGpc)v{kx9%k?{K-A{54-8{OA^Y-PCTkxYbJ zZ!@&5`DE0u<%?x6OSiaA;<_{i7Yu*#0P(bm#3)Lg0F% zJ{=Ewcw&S5Guc5~H^ljiy#3?+MZ3=BU@-$uc2T*#n>6s@s>$83`7PsFz==ek0VNw2i@$#;xlWqm^VV9V$FWQp??c9`j# z#gF}tMc_Sn3u|73d+sbudx#^9&ShxaYFtGr!8j21eKq+zUnd*4Xq&&X-DF3cah@|8 z`QTYTDnYm@=do|{_!-Y*qq3Ao-7C`C=ZSOan&(h(;cD%_VCx(GF;AqOLVl@Un&;%> z_qfj&(nEWUKaINGspS#>E|5&Bf+;1vviIF-*Ubk@-WM%{L4P8 zRec=A;h!2G1_4G6={qjUFWa*=%M0iQoiuc7 zv#zjms);i)|GuQaN6Jz8JWkP`Ccm_Q*mr~5&$*p=rPIqW9ovKUXXDCv_ua(@bo%;N zddS;-?KN&^OMbL2$@Q}nrS-#qeJwW+W3sofyS`Y$_j$rS1|)%JUnzY*(z!lylJ#6DwBB72VO(`>g!oQKqU zb#KHstFG7^4}8orVq7pjEW>wCq(tvVYCgq#g$l0TxS>Vo|G~j59NU{K4{Yzz5}oe_ zox3h_`%ULSIL@3vKU^5&9nl!wH}Ws)wamO^qR0I~cy;`5D7^;(IE`nN`;)|nvRV%J z4zVM_2~Oi9_vKjMJU!@`xWT|}9DJ4dJgC2w#`eG$aK-fsbhRW;zs<80>xY>6vcE|t z$Hlxpn;l-|b03P_&C{d2;`jSV?ooZ_fPQ?YajJ1k(5LzYUh8QTl&53XB%HIdD-24%zUk9boP7tVCb{s;SaR?`5DUf@0aeeT8u1|msJn5 zrtUaS{taj+{7n0P3m=&be=^`_`rb1s!T(vn&-A_Lfs*|G5#aHCh1Itg^j-^6C*JS$ z-gS-p6QTI;i+%p=))x(a@eSt~fb3}9Us>FbcoBS_w7^&mFuXbc z?+-d++{?Q6S^9knKjQ6`bq787yL{y5F%cX^d7XWZr#wC^M><7?-RQZGR7|gb2!d|* z42E*bRUQ9zul;erF+76*^r;s<48PHaiomhGSCshsBI_gIQ&FC^36MFc(m`xe#E4a@HOrbuf>NyakJ-NxxoDqlxOK^KKj{HqmDQ1 z;+0Q0T(U>dqW!)auef(3}VU^=u2|YkJ5nY`_O&wwJW2W{J-jSdO=Ua^4l{I1mJ_;jT zxm=tx^>*xDVZP?k*~Yqu*Ll9Umt>!ZFfZ%!Uus|RP4b`1UGOnI9Q3UBd|CH=FZb3} zUQWniHgBPaAs2M2kNAl1{49M}ji)Y9FB+F{*oU+Dl|gT|XoH2bHHrB-+##2vm61>T z#n~f!9PR&!gsTVNAG0Zhkit-eX_C~ zP+!eo4!rZc{Os1%mcJ3_4w>Gwz5{&T9`R0y-dCOxZ;a>6Kl%{tk_DcR_8I;q=@FL; zj<+novA_pph6$g)KaKpi5q?VfJZ*TIdZ7@OxD5NoV0C`quJkU;p(HJR4HpYZzrnDEq8#KUtvMLgtFv0aMwBp$-2V=lkyAK987lZ*7(kh>Ts zwmk@VxOB|>V|wUrgVQ-->>riF-Vc=CiMbvf<)5w>bTfnO$@GxRX=m6UTF1$5Jz@Pc zd+HVQbx(!vm&zXTeA^x{v^ihqpd1#ZeKgIlN5H2#%U$m9SA!4aEcr;kv8=}^#CJ+o zpT!G&Ax8PjE;PI?4hmPUUpE~bX-Urfnbk~Z& zf4}=WpWV3RZj0DD{|^j5<$@IwKctt^r$;S4J>vY!?~`2Z_*DP3U5kPrlj&WK@r?Y^ zd35O|+VNJ89gb()r`=cinh!4rU6w!EZ8>Dx_bWxYaQ2k9^F*X;{LA+43H}KGFIa|j)Zshpk$;|t z*UtL7r1a4SxQ@@bpWS-E;!Q6h zO7jirEuDu^`(b_C;Mbt(bwV20EybAanJ|IboAgPK1dJBo|K$4 z=J>>ra?9}+_Xk$*mF=Mu{Xe%@{2bbP1zxx9{VDTNaYue>bs9opmho+;T^&usR5{SKF{aX9G@ zt+VQ0Mcp%%j&GP$-bCn?j{6;t=%SqAU((4xaGHOm&#tokosWBb8vRf8l--+-u5h}? z=T0gw?}v5IKcyc=yzFJ2XUzLyw6od|_t6;siS>?8^+-qEPQ|{CmydR(6CWOqu=u8Z z+iaeL{8fKRp9%ciJzry$4~QJ+te-`H7>@5N(?bWGp1|`wvRS{lI?lT|8jK6R&YO-p zpVrQzHgbD{@Xac%A?TanZ>}d|Ty50fW9gC;jVFzKAEMjR)2E%U(tjMU&PPf*y4~n( ztoH_4j{3Ze3U!3tvi1gVaM?L}m#=ECNTEG982Uu~RsT+7Uaj+tf|EWd!>?_lb%%O~ z{%Fp2g|79ij$tpSv1-2kko;-fYT!IQo!&R#Iy|b_vdfqJu%)Mm!tO#kwyRDxE^~fs zJWt2nKHDAZz}flEf9wMopR|uAex&2hKkYZ@-BqP$WHQ_r-Gkf){`kH}?;i?(i@$zO z4(A`OT%9}Mdd*9v^_s_EyQn`f{gqPsPa&P_cQ2IUKYAYY%B?Ru{`Wh9y5~;wwzSLR zn-%XL#52=(P^(JodUvs0&{J^r#(6C~zsW^}aMX88@tg1)N_aj6u;w|fd_PypM-rKD zdnq56H+LN|H_q&eN~(9BN`X-vxmWVMmtdRq4`5U*|r{spBW})7N(xD_L1MjmxsbRt|Z5Szfii z*b#D+uhaE7{K~uCS2-Ohk8b5H?rYy;1ae(RRL6%sp7P7E@af#duBEOgb#5@^Z=dHv z{_FhL^N@#QY*#iATu!GC>&dRAu8+0P+6(&0N6q{0QvVD&x$i1N1N-PDYbW*}@?ZKZ zi+z!8D;%!Lk1OGpo%5pE{hGf~?kT6#7L)GLXCz#o?qS$&!Cb!79*wqNwFKF7#8*em zNcbH8Z5?aPi$-Uoey+hY-qu^{-T+m-hZ5~GBR;eX14U&nfPPRDKd)yV01hsB?+d{Qj@%i7u|tz#*k<*-GUwbk=<=F_|DGva6I zT*u1k`4VGnzQU0p$B*B(o>gzv8?0!hqkQM@e8hTwU(b50g(uE}&%~UU2jsTCRp2wb zSe}mZjE~E`Jz0Ok>w6>E#nxWE$ipA9*Vr9a;eXiU_(fMqG#@yQ5Yy8`F;Bw~Gg1r&-K)FoF-|N}< ziowkRKK4WQTekn=x7}+5Xq{_<41g=oD<^hEJj}s5ermtw+OX%fuCX%mcRE_E2fS+K zc3kNBxvs`~4S(6gi_h=0AG+^7J>>K+KF`l(&OyCcHR)&3Cp|0ueRasQzK7EMn0C>g zlKs(*u`?Zi$>_?u>nBM-w#MI!&*s^4F3Hu%NB-ztd6hqca_Wdr$6R0BRYL$SyVc*N z*LNa%w}{><=sS4r1J40pnPWJ{xovwad&2I_c90yThh2^rpYM8lVyVO3cbogQSuQ`%lXy7T z_1?~5FXivIp*)U^xqcup*R>1!3ZmLd2fXXOy@rqagYea4kA++9=yH(D)g#XDnc8)x zc96WNzT8J9A2eUl{;bAH+Svz7a?Y*fV*1fi`uD)wbW6zHW~Y1Y+;^fB1%s^ zTxfhjc-T6i?+fN7od@05y4>39VjNlL@tQx7Psd7p_+=bDqdug|y4)9^*IxHQi;!Jk z#B;KQ=j*_e-+3-z+>o;` zzgK$rZjYb1$$jx}4&aDq?MQ9z?>4KRMkW_V}#Z z^?~Tl)_8xPKs(cA8(+5mNxHuZ64G&p*SmA+A%{;7I~?^V>6v_<3*-Gq4PAbJ!`drc zk*80)+*3}O?|ArkeAMN0?RAKNn@)aSF3HaT|vZub7q z{A4xr-&)FlQRH7;%D>I>yWU8fp)W#yX?KvGdqI&15L7fJO04+ zf^_y6&9|$`a*J2~Z2Jle^PJgd3v_1N>p7g`W9h`}VtKnz-h`8vpDz`Ctp8qYkOorQj!EqcN965*`f7rFg6-MPJ2Fz{rH+)gB(6Ay-RM)|ouA433+b{@wmy*E5j zh<^d`k(FRtU}`L6U|dXH=b-JYD?6n2m1Yr`9T zoZr3Czpo;m6KB9fJt92f6X{|+$EyzbvI|G>n;L@Vuf+!+IM1b=aK3Nxt`_Xx5_ARo zuGiw(=x`&@o3ylg;WK_Aglg{1EYTh9nqs(PAmRZ(DgL2|2T_yao&5>Vk57t6m~J8d zPm1qhEa(AKNssnHDR-Ykc(~i!V(2kKG3@W!4zUp|?C-%2g&gP|*rEHC{`c0FLpv1q zcVmWbR@nDzsCRkr+hf7^kdtjoJ%;T<{^xRH(BgEm9;Ml%AXFd{g zPxIO8WSGUNQ1_m+*o=gBt4UkiT@ ze3t(_@^PGKEIEq5s7IH1=+8f4Kb{913qGxWS;hYUGiMKvwyN4~WS|G-vm>_dZ9Qms z@_xVEf;nB(@9)FF)VfEQT*mkH_p@Yv#C+Qt_Qn+m)4{HBS>pY(AMmHSyS?^vGdI#~7^v*PR z$SL6weWZ)=Y!A~H%ZMHNg8Be{FMP(IwZ5qX7>;_2BuH;f(Kje`l0G02a8vZeK8sEk zo~^!NfpAmwS?F{2gV&^Qa(!`@`iA`mZn!)2&3c7r&^I9;+m<*S+r#vYsvHwzpe>udT3@%U%oDVn~CboKjQB$~bfJd0#L(>Gm?M|RZ+;@MO>x6ORK zH*q<9(>L%<-@rG01K;!w@|nI_;0^se!n9l{|F3YvI~@D@ZX^`H=lB~`NawaG|2$bV zd>IhHG5m?oAkSqMOm>YsuXbNdE^=P){Uf{B(^c;9#Xdh7zQFCLD(0(nOh2REgKpa0 zW#FxO#~}}Mzt-qo;^F5}HKX^spci4I_c~ACbtve)H0V7P^hW!$exz{t(x5l&J-l0D zW0v+2X+NL4tHnpw<6X!{etsIh-mgqAt|J7#EMMXNr`h9>-e;ceXS}zF2j$bw$Tl9Z z@-#1K&8I$Y8Pi7{kMWQ2v5&{`^+3_??jAKfgac~89knptRrLCd8#)`O_}z}q2}%Aq zeo?Pz|AP1rE^gIZ_n&F~r%ADf({Tl3Od9e*`4I;1IXS? ze#8KkPQ3F={6o`Vy?ZkMn#mQGFMX6$VH~V@$5uNP@ohLOeBFkpcz+l9r+bIB-b;B} z1xV8W^0}>cSG{!K1=Be<(Y{~$Wka8&kA|F{7jiS^@tmI>WO?^^m~z7QrJT^|dkgYs z&$?$!OAo#M$N%}(F0VK4f3fyw`W;(;<9C?n-JAcse{q~s(0zfDyVuSi@89Wg&>wUP zPq}?&n!ii#G!JFJd!Z=bi(I~FkKkXjdy&r%G+!>xTVDph$miexFR;hJYriL}aW5lD$0QZ9Tq)yLi*9RGeolo7X+~3$^etNT)lXY+K@JC}Gndk76*ws+M0s;yqb z?f~4@0^f8FkntZc#25MLa$e@=@8y%gp2PHvqcV{Z47>|@$y=7^OE7S@+a}d>qEHLqDP2l za{XVOLT_2WOs@AUQ{ZQk%ja0{n_ZsRkFRC;Y7cWAolf=!?Gq$}FD}RF0v~U(?w75+ zbb;WVAL+TS7}OtH{POzfJ*Iqr>X654zTbI`dN@w8#*Hi9 z>-oYi=lKEZE3QwOKE2J7lg_2?*Jd#ldI^}X+XkMZ!n*Rh4alH>^LO}W{&)59skbUI)2^KbL8zBkF?cX|A#FSyTg z_rWnle_Q0Mo%1JFPrdJ?b1I9kyTmeT-%9ICXXbOqBt8po<0C%LmEEZL#TN#D)qc^g z9k*EV$lE!i$6C|gNd#!0O4nAvq zQU8+M3J>Lh`E}2a+E?vT!#QhW(!ML<>C(7IT<01m;yl#iAs2*fp39xi0}Ww}rPF&a zGxb~A$KURS5QgJ8%}}`?i|%H;A0xe@b4QCW`Ha_l%=J{qqmEGb0}-C}uC?8LiuF_e zYI3LL>qQ*Xzli^I>4mQ6(l}R?E@UqO-}ajaed6<1tvd~Z?mCi2!q3aYK0vX&;pjK% zFri!X70oa79ZFi7$Lc$~GxF;(Lz|yF7T;=>KeKwOemV!C^-0cOS^uHfhkPCig5hRm z*bw60S<64P0}JhNY4p>+M}6L>d3$fPU$*g}!Rx!VX8Q`ur}Y`uO;D|RHBP~ z6kWpKjQ5k&j>CH=tliSb>Q3L|(Qbz=Xye5M$C=W-Nm;Ceq#=Ll<8wX#6W(vqFM2)u z4z9BJp|7kqfA}dMXZk~b^e^)B9@P6sC)rhQ-{^ahd|nvmnPs08?LN1c+14TI zDYYA-@^+IvsQrd7_Wn40uk(9iyO*ziJ{0!a(0LBmZ1(|6{ZRU$S@D6DVY>YMAby)3 zXyYms^p*7C0t+U6UEcqD0Ytssthim1cGV4Fy8ZpHPE$XvSDr54KPu%TU6N;wgToc) zLs_4TZs|4Yuc1Dd&t_$d)4S92_w5h8=GC_SDy!ctX8`v_f2KfB?Hsle^cUib`b&CK z-vLv9k?z_#{9Djx`|AuQ>HRC*U$)SE?VqupY&pr3zFTi5>l|L+8QpI|;|u$HHF3LC z<>}l@y1#CK>4Q;T=ugo}IsDl|{ZtR>U-4c2DIIrx-%M5-Uhyy2Cl5w>pZD@6Mh(nt zWYZ_{-FJ5DQ6D!pj(Wd((BW{ed4#=vG`^(!y#3P8JHPrKTxA)C4y-nRIPClWjc%Wu zNxzi!faWo|-kJB3(-HTr$PU+fpVo!aE28}lMLp3N)CcN!TGtUD-z>c_e6RCE_c~-> z^75zXh4X@M(T|&Lp%*T7_?BK+W)UsDup|$g9Ovg8IxRlEz}u&Z?=hG@w7?C)W}Dkf z>5jU?^@rSvPoY=DAIjg0rE@&QCw{5FS8eu!?_+j^9zs6rr)_`b?KD$)7aAG)ek{uy zE6BV0X{HeA1{o%tE=Vvd{NT>8~wz22$37@T5Wxnh<+{5GHFEiD@Lts9R zd)!}Q9KKRYwgsJxM|8w@s7-;t ze9oHVTqk6M;|#0ZjPGqV|Lui58@-M`P@lYS;-LTB*5iJ2uFDniAWO2iC+1BEGixo* zSyv4k!!D4Vud!UVOUunq7IR|`j_Fg&??Jj}(D_`h`4f)z_;29d(>y=IzsdKV`wU&e za}9)3zsxqSw)WAu#Cg{@0k3iepY3%x$)ob;>*Vo$6z*dQIq-3w>ntoUyWQ;oj!Sgf zXRWm@a69*q%T4P%k@qj2@4T8s!$ z51;!a{~hvA{B(Edz9hs`!d6BZ z++WDAd#ngYQg+8ZfDGdwX!#)idVP!O>A35GTHE!Ou6y}ZpWWSioG-{U>3nS5uU3t5 zegySpzu|pW#v>p5`?n|Fw_)w&S3ELs> zx<^*uF;FkO9}8N8z-R3femb_@=-2$bSkAH6S*Z_w z-F4WVjrcz&&}l5(}v#m zBQN1QdShFx->2i7L!bCKSe75PW3%m7^8WGg3~~pO@_tloKd1X;%dhWIv%N2R`1Yvh zxYNge%yz;3Q$`ot_1__`HEz{C-V$V$1OLYYL&NJyO;Gy zv45VR9y|nOaC3lk6T*}STFC5w{{#bEOOKKYjwdd6uT}r)JMQV|e{JJgx6wg7Y$1(z zy2q#1*5&C)B*m|NTw84Ui+rPAdM$i={4c<_(BsnU(*H%gt{;8>ne_qd3wk~D-86cA zHR*ZU$h3C4^KaQsJtp_*_-gZW`2^lKMQ`8!qT$W<9y7nl57*nShbcFUzY0C=u|d?7txQNr@g2VLBMtIhs^!KIIzhx5Sx z9%?oD3zwH`R#-yqoSV!i9@cz0@T`WP*IVzJ$2l_XLr|Y1OI}~?!{&C;ex|zpYP}*h5`91YzxoXw-dIK!hm-MlG)tmQJ*Uov5!KM9XwaU++O_|7Z|$YWU;+rj&yWoZihy z&+~lRzn{`?#!gr{t^VSSecsWu_<}I&B|V{g3De&AUOMc9W?S$Vunqk0yTI3LBwzWx zR!HNBsC1prklog7TW=7p^F>Z4^$}eWo>8s+k{CyQc_C@G#s0~{6_zi(ux`H23u(VV z-`5uBKhFYE^W&z~hV6Q>xW*oGh4DTQ&7N{S*c<(~He2cI3{LNm550}O4Ek~2 z%f^45pBp%5i}AU3&h3^y@dfkp^N3Zu5vG-o_DvlijmP*G@zJkzUO@XzdM8-lK~wK{ zfJU9CP&qmupz}uQ>)_k{prge%6atq%=>1%L*F6TfCvB4a={tS$kDQy>&fG1zqg=iV z|I;I$uO(Oi!th9LYv*tjME>Ycx@SasLgz0yKVmsN9|1LYsyPdWH=OPT)jo{AgGood zy{gGSv+{;qtnA$tw&-z)1%0g_4b1(kQxm|;FrCgn_bpgu;atCf4#xC%dHm3VkdGJ_ z#LwYAk53oWJ%6-!`Zgy-=LTix=k3qI4K97!<&*9IL4=#hZ&^JgmmH^zAMnYK+Fbl# zzQ^nCkGUSE+>z4OIO6)Qic>HItex{#vm>j?5euhJES!{oruJ`Qwn{+BVH)Gwa97kj z>_72|e4(o*|J?FPjyW$-J?L{i#ljKa;=k+fbioS4BmPNGi0>T#jUORbIwz5ySGRmE zxpsUlzC2@eRq+};0b6j+r!?-_Ne{p1^)9pzO&^utOupvn!GDdnEqtyY%l)aDe97{O zPdZnJLiA6LlXZZM zgkNi;+?@-(!C3Gu>_GKr@|Su)U(ZH9I`)6k$$ber=>xsDr|~P-NB0<5G7)x-@Zw!O z=XVVq1uk_jzt?EfEuN3>nbBqQTpvk(vNbLTwJH`V;nHWky;V+n*wgd-%3@qgYu-<4 z)mVo>HBKPE?Cc!RPGUv<-|zn7)o#d4Jmg`u*U&x>7vEE0PdbkB=<~Z>YsW%9a!&A# z^=N2@3+YrPTg1@PFb;Pv`v; zMrZf~)i$G(dh`T98Ua_Ekq8^(`lc<)~gZL`vF7CEZOeIDz-oEKKAQ@ z>VNg|ltKoV&$rIN-*UZL{B0N>+K-fEotqSWBVY^5rT(X{aRvA4n|_d-_bpsyc+v|( zzTQ^1Fw@DW8V6Rmw7+hCuD9;DU_OrOJZrAU;=D!`UuO|Ay)F3nsOOtWZl>aK1(xfP zIPWSuVX@w|I|F^HZ$1u0{WK1VZpnYX?&jp$ca`L$^sv@3CeDxgtaHCs*=#@3bF51|#K##G@lmz2U-G_3-A|uBiAXq|4`zPD1AM$UqdD6NNynUy#{MM+Uyb+U zbx+o=gWis{j3i zHh=$d_l4owE?cKl7sH%kN|9t227iJL@M7ccvp^hYVMVZAIp@kJRA_tUXGt@I8{I=1gj_*tUtQ?hrd9Y{ZA zRYQk^N3{D9+5f&dk4yOhIy#T>eSwb$B&PT8=zgE} zYh*bxT&g_k-Z8RE+Eu-%+%#WJ)ayZ!0k#{u_Cq+@U>rNH5Pthdp<%!+g~DP8tp{nJ zkoI#u;_Y&X=T-Y9tUOc>+;6_&N;%drU3%|w^75UJY(kG<{#H5FxyY=!J4AS`n$I#E z(tyG9(hRVY&R;`3a;R4{C3Q3V@?=LHBP|s`u@XF792OnIZW= z7_hcT7x&uElnW8me^Rf^5qzYVP)}|Gito_RMgLpt*x9mQsJy%#F}(tahxI^9Zel+# z0xGm4RC@sAf3f$ilLdf1#t)_3L40g4EGPD$kKR`YI*?GUeXprAh>fOc|Z8k40(nL?M2iFbYEP2pH7|I zV*P91r^a-ty(aHpSs&VuA$`$4%f#Xsmc6akRa6d45Brb!>AeeUdO; z@iDaysFddv+N;z(bRqd3rv1HYd~T6wUGO$O=h`dp%LMy$3^Q)8%8`D^FMNuJdiG7= zZ&dpbjYnj8?EAjhBPD7a(|D(IKZs>GYHP?x-S7K<%K73_RX(e~&K1S9?vteR#gp|H z?N8A2ed@nx5;PpRj_ak~qW;A3itUks53a!>Mi*T&L--wPJ!h4N%TGYCS^FdHH#0f9 z*%S;`IY@4leLSHY^!;Ee$8y=fWVhO%v!Xpm>r2|#K=~+w^DvF-o?v?3P~yN*0^?_a z@Z$S44*6U#E+^8yw&cVq^=odo4=p`4%M=Ync0KhQ^*_CD#oH6>`J`=L zj?Yh6n|Qk;-w#m9D~EiDu-}frhe{|POCOScT4#zF?RiqZI_CAs`Rx=*Cq0ewdpdB^ zIs?m(^9-)@u}MmdpTXa_zRXB^Twu~KbWW4rmnQk7_n~QhK>G&tot=)4b&K-Q^ZB$} zM6z&Bp;yr9eN)>1ruPxa4$sQCUqomumhou*t%gG55PMSjA;)4lpQ(8ehb>eYp1Vl< zyYT|7rM$1^9PsJ6QYkb-HsVzn3ePDa63+8QQVqgcMThYcU^>r%`C@qE9!wq5gHFQq ztb3652(ox?f$dB0$4W=e(OLOih^at6ke$e9fq>$C z5*885Z@?K9y03@cE5`X9<%ia%l%KVaVrwUC8EU8FvRZ~+pS%M4#CjeA`QejY`kpe% z=h&MtPpR{Kjd#iKg=IJ%I~jv?8}EAUBzQ*S5ozb)nPzlY9}LKF9zTisx8E)0!FQ`v zoEhpnNWSf|Kv`8%KYP?YCzf1p!qgyrsg^~%+2sP|egMi}Khr1pGE!uF`fgh+a8@nm zj{Un#__QBI@2%2vIC`JedhV<20UBSV4@wLv0LR-zaz^kjdG4T6Jx6a>?L*H4OCft~ z2h^W2@NxeEefqv=<56|*++A`#xKZsx(0oVhq3D$|e6b^2JaM<%YiM84x}x!PNE{rs zAMIC=KA?R_x`)PN{7Lj5@mue8PSV~X$1lV_Ao0_>`uG-=@2%1o_i23sB~&_;qZQCK zjjEoFM`XR>dyRsD6|w9uVz;(EY26+q^Dp1SC|)%$ow#!57I6rhMe!Hzp3Bw zy~r3O>49AoOd0zRUFB8M4h$ZV@6&Z$CFe=nAHi}9xxcqZw(kuxUVE?9m#AN3K%#Ng ze)oXj^KB}X`9OU5U)r}YG<=)(Sag(dsh{CHM4K+K=-3(_IQ0gerT1xnNAkz%?O&AR zF~cR>(6{y!jAnh;IIEJz%^X2b%M_lGt?Z)7#tDrtvPmOWO5zKJ0n=-WlDmPvO`;=RnkR!N>HX^W=SV z6y6GSv~MtcM1;$7w1S+X-Gugvh!1`Y!#Bu#5K)fde~^zw@E6AY0uw#k{nQeZokn_t zzK0#%AoCI90peNT|IWiyAm0h)mibb-&V|eefX|)|3`s~w8Z1=5iRsAw6bi@sV+^eS z7^cVi0mDT*Oel|RM=TGvUn%&LwG(!o{k|x?FO=4A_Vc5s+Vl{ebFqJK6{jj(_uzg` zmR~(5^aBB$5AJhDcf3G&aK1pD@591_!|{L|^jw(k_iZc@!PdIG6l4qqG3&Z@B7p1_ z`fi7>LY}|qQ1%h!hh;odXXHS;=KgCM+Ldi zkKd=Fo_k=$!O{7TtUYqvXRCEZ+HM(sOnL@=Zx7Fmp=+#`KCKg}y|QrO0gm(-W`m9A zbIbPkh|venhvYOI?_agAVcn0f^d#veoOjUCdamO=?t3$B_8zTMk3k`DoYA_H&Uf19 zxe;i0dauY%m-n_wy9(cD`$zA4>k;XVpcrZMX&=x@zhJ$l!hG^U>R0f;tbVv|`ylnJ z_+M7PXQ6)YCzmLHIPZ}Bp&Z5Dvz~(!=OgjzfO*9Fy*A;`#Zk^~Vt`g0+81`&5l{1lh9dsv(mA zke*b2eY!lyb^HR!mmQRTDTHIaOB@-3uyH%0iRrhi^xM_>?;@oaX?~!4mo~1QmN5Qg z?NOgD^P_pvS1H$#be}lV#XMFepPSQqGS2@iA)og9#4Gnp-H!#~{gL4?4GhhI{aPFc z^cr7gARRa?^>K|Y9dn!M9K8yDM$R0*{g#QiSDfk!dseQtybB45TqAF#t6j7NZw`q5q#QL!SzWtV6rbTU$mRz-|gB2XkvYE{u==Ks-JVN2=~x_O;AAb zUM`lD>Jcw@kzn}8%ROD#e;LWMj5i=AwijAhrQltGVEz3N?wccJg7(vRx*xgkbO@JnLG3a_SBI?j{&i}s~Uq4Jn6 z%_F#8LzguNsUS~k|0rA8^Hd)Cu6x?21e>LQO1vqe-q!aYH_33i_uzQ3#HHYgdZ_ZJ zDV0d&py!~Cs-9>ri}^zN6XXEaM<~BE9!URYB*o7!Va^^yo?l9J%^st|Be*>bQ zqs1Meo6?_q2wO@IajF)z;il3w8 z@CJ#k_>ixN|GxQWiirPg{@C6oq;UC6IX}`lCHk&)yE?B+`;#cA*dMgM&6eV9os*|? zjP%|P?nj*znqB(k>qK$Ice0^s*7rBkRQrhatLQQ8H;0j{=-7@Z0wkZb9?oz|eM0g| z&s{T|*!yxGJOO?+WQOHNeJ}vN@8$*Y7cfL00H4mUS>Ib!>(UI@naF0vqk3EC6+u1+ zAiU!eIi9PPyo!A!6%UFIcILA}U)jg|2ndGN-m*B>c_)u?zu>2OWjL2ZDxlwI2~XS^ zk2QP}5=S5P1yl{r$E)#&l?6xRhU^U*Ul~qQgnQI^MfyH1?z^EYg?7Ots+EuSzkS@b zS0ERZAL@$^$Eg@M@LR|&_(JZLo++1NZhf~I^#b-kp0^bGN97y84@{ny#C+}YL(k#L z)$_7eNJGdGOJ9`NE=MXH`Tg)OtpNUjBmHiz_s$E5c87bBtoFy<6rU*tLi7jV#@RCQ zYN&_k2jz#7rbztPKLw{bT9h4!A1p6X8sdk6l}JYel>?88`z z;<;t|N)G7#2(0JJ&_Tyd5l!gR$~u%#>3ndm93AP+W6+7%#TFeq6K_3zPn0})BsOfyFJzjx`H#8xifS?ap>FN6^a@|YoWP5skh)2)I=^i3_9@4QP zBGOCL9tF1Vv~ zU*P=$b&ho7LMc$RuTT0)naSeqQ88|y?n2MO_!xln>eYLy@%F5c`QqRPXPwhmqP@{P zUDCG*&#>?J!ZDKgz5t|;BI@(wUpWrYFQNQYdiMCpxv@YhLOQQUdJ5;ih^;@d{$Ga- zt#P~^Q-|`dQ@*l0)6}@lu2gi0hmPL!#PNykK<~BVIb(n)t?v^9FXC!wUeSKauRkEq zA<=tW87`TTZ`D$fe%3-+Z;yCu73|N2vfZrbhpHS@ziQCiB7NnDoJ(Xm)IC^eFPsA@ zlm01%_RB`B>W%si{x^!2i0p0WN=_35OdrD=XTuM0r2sI!qP+rIb_doI*SWO6Tcqq{ z)L&yj_rZDwot54US?^M)u1HVKCurZ|Pb{Z6Ke8G9c)Ii+JME8Pf1da=^rxDa)AA)h z-LuW@<&gVl^7-l_$%pY#PtbeBs1L9QDzB_Ni5xf|$H4dPg^GZ~aBK-NPs7CL+baqx z=vX5;f5UUCy_kyRM-I|A)Q=tW+GP5YP1DC?ljviV@PA(ajEVdv>Ys5zPjp_d0NRD_ z?Z9}0*T_D@dL`1|QvbyDoc9Gg|HS>tI+6ZAuHPzv-(ysY^3waA?H9;%t8{LqU44%| z!?g^HY2BA(t#|VT8Okr~-Cicd(|WHRpKlqELo4`b*9dz9eCsir;kUh2zJ`*p=K zU0UB}sdL|rOJxU>okrgorhC@W9z=P{%9DI#5BiF6RD*n7w@UbAchh@eR4!Z(L$g@# zH{f`}25Kx6(X4e{R;Cnq>%C$s@7*J!J==RnrLWHEW-0wa@3mllSVB6Vp5ajEU-rs; zh)(Bu(0;`ykhG4b`=oIH7Sn00lJRKWG(k^Bu~lH)tLHp)Z&lV}xo?U3;UI+L_{M%} z49WJR_iWL=#T2r4z#njEN1Nb3seHN*@hH$R=H*KgpSKj_h0PxgsBbQSNZ{zbHJoR9 z7~hZJIj8m9{6fi}i0@_KqyD6G2B;@Zh|;QjY)8btg-hYDluIl((y<+U3uXVlSA49g zZ=u|$k^SNXjH#fFDfK;`_cR-zQI+dhuI@zw`uBjfW4Rn}sOKI7o{r^e{M;iWV0?Fd zJU-^L0)p_E#AP-*ouftm3mfD7_WXP-KU3g|`Mn&D$H)9~RDP=>aXRYxb{Hq65Qa6T zbqCEG$-w$V;X5Z$o=gaDRX+6#ri<~8f&Aipi1kP8+lGH7M*nyQ{)KfF@LiDy>49$o z#`PKcXgA;+#>XIi5&t3;A22$yOHh7~%z*gdn?P?>KF+5{K!&j#B0g?pf!Wy3FLd~xGE_;tM~kGQHAt1?3v;0JphZfKOPun36_Pj9V=sFkKk!Z9x6AFz zP&VH}IWO4xAO6FMg#1_)-$MDGik-jsg~a^DFQx*&ycdV~OQ9Si(BHf~c&>BgUZ-`= z75VV~w~2h;I(b^ZQ|!AN@GcNGm#CNWNgty8jydF+4JbCeM&QFajR zbD>;g)-*q*DSL_bQ&3JZJWa_jof}Dp@QM0v*Z95_zU$NDXUf4F~xf%6zUg&$OMRJlk%F;9u~DIb+Do_E6h>3I@;Uj*luY>0~XmRyrL@wAInSUtq(Q}vh{p0!;P~T+(GNi5$Te>Vx=#uXZ26zHa$L; z3;qXrkaiy3&x-wp^o@&Thts||^*6N-^*i-5_AmA;l>^hqapFtI)UY2jg&$V?-ocCH zIVXB9b5JcH>HK%4O)rXmhDO7FrSq1)&GP&m?h|5rQvcBWnRZBIXPqmfd@GebNACrB z3}t67ko?&1SdT>G>0Njrjq-!#crU)c0Uy1WN#mIGtbMCY?k6kf_u&564%*l87K(JJosPE%Xrx^y{S5V8YihT+T;vEngwG4m zQ8>w0huTNN_CQD9)8pgGevT3AOW)_Bdvxd=YF24Xq(kSvaDD($wC*QQhH_w5LA6dq zI~Kz;lHL;K>_`nr{}R9SuPT(~_lRUcJ_-b;_fy-ySS-UKQEW%L-`Hc^AR+|U$#}Wd z(hqKuKFK}V<&}FR9mfrp2g{A+#PvO5n#ZBILJww$P#h0(+_m?}a?|=e!z0h9Q9094 zxWQpN>*Z_`bexBf#eN^CXy=6@*r?nseY$^@-dlnDghlq+m9$YQtd(g zN%H0~q@Bg}1kJBmrPw<-9+cc4k@?~{!g93VE5|9#6LvlM8{i>1?wH*pA|9`h@o65; zDwXfq(YOpoWjM8WhD*MmLHm!`FOV(Xw=KsBNpx$2~)2H@M#J2|d zW-30YKDGeukDz_$x7l&@K2yB?q~4(Y7o?+T)4q#++zdfIa6Xxe`7M_D(LIg9-T8u0 z3iZYOVP8bx40X>%`yM%8)4CPYc?kA_>3k*8ab1EA!wMbWqY&f7qXYW5>|`2E^5FGz8!KbfN?Q(=kVSeASe^&&G14R+-eg~YN``^^=IJHv0{CBi_=fA1l_a|ug z;H2`d^xHou?>LW|*iOap2V5)sGvVJqFZWV!Oq6?h{*L69Zec8XM{m=Yg+Wxxy z|Lg6a^KWYZDGA!2?lGLG@1+6uLG?Y#J!)Rx9%*Qj{J+1Rm+Qlc@-E-s{a57u?{Gf_ z#sB|P&%c;pe*HhOy>qGXG-Avyy!|{QZ~Vl z-$NtqD6|`J9!ERrbKrZ7?eG`u3!KE!hjfH}1bQFSqx-ZbjF%;X-H6Us_7TKKdzIGb zt&oWrFP?>hk)G4j^GeGu3d(fO1%5yL^i{F(qQdE3qyM>bs&S2#2KPbADddcmlkBF6 z^O($sp2JPfhpi|3B?f*XJMSu##|6j*cU3|Ap_|msQ+!mO4*D*{Ju)8Y5t5Jhvhyzg zK=Ozy-9+*zpTp5T?vyXe<$1vGF$zUI>hBM%Ur0X5ekb`X7Rd@bM#$%$MS}3(rO*H8 z%lpsIKMD0AlZ&{$_1`C_i~mh>>b1!!)@v#BG(K+<_nT#yqTi8@j-G$5gK>o6v`@Me z)TzKqZn7%`yQUw+Jz~IDh3Qnw@TDO3h_fS#|ETnFz5g7pFVB^@8sg#5#rJWJ+tyWS zH8Q;Rdg<@4mU^@rAo7>OKYYU#&oyiD0Hdp|5T3C=d9nCG&p#G>;1u7;n;yhVA!IQ; zr>F$qdM{V48%m*_M7_Wl^-}f6IHF!y2yoTli}Ju9Q68Xar{)-S`*lCBG+&`A!{Q_hmSb(XbUF#O{lYt*=nc;>&aKSCY6X&3CsJ6mC^l5yRIOhunDlf@d znkq+zYqkg{IYd1(WN$~n=qNqZM-ljj(sPx? zj!dy%iuw~Bjt^Y1`_|wrATj#1ZUB)ow@2=$ zU!8|kNUutGhIPJRvBD@k;OHKP=pKnNJxr(mlyTW^6_QTvPtOTZA3*g^dJMI{Pwk=h zT~TgCdnmoV6xv6$hm!ln?$bqkY}~s@WI)g1vET6hhpY4DxiZ>U$NFJ@)IK@JBE9;4JFZzS>Rpmx~hV?-C#Bz~*({pk< zH%#qD_pWqoQTc!zVLa3m!iRRjd_#`EpTu^i`n+HJV)@X$uiO*}w)p#&=Sn<5Zd83O zxw$~eO}^SM3(9zDn^bxvH`^q(O z+3I;G%HKJF51Q}?9F-q;$*>%P2!C+?Ck*~Udfs2I>>ISR;ttANDF-syx#g-~d{wgk zbk7vkCqvy&N&9-`lFbOgcLy~Wc0ZNl z{rWS*ahAx3>QDA3$z#zf5p1njFdRi75gkM;Asxq8B7U6W6VXi@o$NTYLrTCwknpJ= zP@iJW%TXA>Wp9=~+MBDvAwu8khb=N3^$pgeT=A2gXxB$rAEKiirh&uQ=$9|VUs66b z{j2EMP5}rHD|-O#U@RiFE9Q^#P3wScKLBu4-)g87sy=(apj=bF*)}@^>0~eAd=Pg- zTZm5MfWGTQ=QL@ZhWVKg5yut!6V?qNj)JcA53M8Up6Fa9-*ldd-p6f)v_w2Pofx@u zfdI~W&$UrvdT-n}7YYE*Iv1w+>AXC>7nOFqiU;Xp47yJM&wrw$djLA35>vhKoSF%w zA{hfyD z%9Xu@^tgUbGs=~Ir}xInRezB@;CdFv7oKZ93IE6*#&gMn4!)pYC%Q|n7r-DzJIM*Y z?}DWwfaHhHF$()ogv){5apkC(LvS4?<|h=sxQo$!$ol+|ZWL8XCj0)<7*y~W=mm6S z$KZVYaiFK&An7FM^qnMX&+&1O&)C7D49o@6EtbAVe1#G8AdI_sf5CjJ#z(nQJUks=hU`FkZ?asCtN3|d z8Lr3k8bwOaP&|*ZQ4|F0;neCedJ^k_?ThuSkIINRkHpIfa;4_CYT!jVDOyVPl0PQo z5B-DAW2_W@RtBbySlTo6oJ^?~5e6%tgBgw-@mo)C>>$}a*-B3>hJHkSPV{&?n<9Re zb>2B!rYHW=AIQfe&)XN#x}!pdQGa4PVft8K6MWhSqWQE)HoFmSmho`j5`4-(sOBBw zJ4p7yCdqG|M~+Bbq~wI#ada-!vWI~kTawlhmoEST28ZJr>GYl}&0`rJxxUIUej~;s zja$8a(+!`x&z$aw#&K2v>1DXjfWIKes@ym~U_SKzuq9`mtbB0A0tiRdUo ziRh+{epJaR)^HJU(^NR^%VRqTyGNyq z`D=EMjjq`}ijL(%y9fINM@06;vRtS~F{@JWQU1bvBpuVYk4InnZrPtu{{mpiaH@V< zDEYDcSZ}gh=sdq&&xw9g{br|&eo8{DBNV0p{2^5C*B zkUp+Iw0=^0jn*I74_J?K#h<3aX@A@5CqzNi)6es6;AK*V>l*B zbhL*wJ)-z0+)u0K9n>EM5Fg8ja+={>2!CPRFB6{i9ZR}ruBcnm>AjCuh%eeX0UgVg zh+njG0)ElX3FxLRoud#L^_V?>G47!p=(1V-a-|p1J{9d)Ci&7nD*fZ1mOkoJ(T==&~=AzHi|&@#A<*V*0)=*#{xd4AEY=A0^styW}ecIl=Tv&x9)^ z9oqp*O7XL?Ac%n<9ons!&M}6Gzvz0&XMOLdMq*!dmiPf?hKQfxMr{ZAW2b;4d%&_& zKtEzW87_<=?I=;MHUW)n)sOUEO8sGZzXshWM(?|qLcOt`RGx#IW&CoPtWi`gF}+{e zu|PI>$012q;vt^TV0n-V-obJ;Uy)r)?TGs!aW{65>|)<$EIrg0>V?j?Nnr_%BES4W z`QiFxo9($NVvIrIx_u4nfJHJs?a$D844My-Kkm|FbncSQ>1RTqED>_0$9xq%_`|v{ zg!p_Fc;NWks86xH6#s*=-5sHh<^;iapY!SEAzqew+4Qef-m&db&_5lk$y>%G;oeVpA`Tp zL)~w4T#Y|m7m5B+`h@O}p>seOj`d1JhoU5;Ls1gaMgJUtKydc-O~91CHIGBSTL08w zEa)FdT=Y+y#Mw&UVSj4U`Vf7>Jg)eWuJw^ld zfpF13s-CzG75xLg=pWTD56zV2LVMQcV)T3klSP$*c0=67j3O1E_I1EeS z?qjN*jzM_}faJUFY7s!|p&~WzLr!|n+Z5r}J@;h4W242L!_sSuB{9Q;ttRCUt3vC` zqCe^Ui8W9maK3!ma2_Knu$~_5 zpUe_u$ge(ec!ms*?>oqbqkA23K0%k^*(ZEl*T?neLF;^%Z?Ss*sq6vV@5YkRIwDF|m zwHyl%&RVxAe!6df)~nXK%_irXzO&J_@ucY3jyRr@!N=$6*^5NB;vOp#^9MTmVI_yu z-e}KYk?PfaPxqaWy@=~`>_^LPRPA8djS!CU>3c0`Kcb`OEv1k?j^l$11lcH3;{^3G zI{R}EF@9Bk7zVqY)^m{ZVx)jw1@UlREmHC6`C2Iy0*C%$$1I_j$S%cu8IaXu$N_`p z!Sbdpfi5d_Av(2lu)z#A zH3i$v@IJFH)KJ}AW3FDcqoFF)W`vh|RV$mK2ZBwt!G>nD zR`B`Fvo@Nw=2>QbZ9{XYsUcWrhMJnfO&iP*;!u^@($EwNR@Maf)`iTgKY5?5P=G?m4y-mTU_T0*FQz#d!60NJPgkRyNW~fdu3h6f2h8s+oI~Kgk zJbR-VZ4Ndyn@g4m43&fin#dd8w=WuMHWzF({m81I97fh=s`+jy)GYe9ZXupG!9k zJe731BmcZ-Ztq`{^!poMo!kA?-j8H{Dt7ej|KX~FuqE!`s_M|nnow)8-$wNiZVhQk(y9^MshHem#sd)u2sQ5fC9s#-PNVbHgPHkd0+OD|9eL`CIGdZ8#H z6cX1AE6qJHHuu#wMVm$HW<97kb8iTAPq@jini22`?NH)HU%R&%PLGz zZlLNbwly_Xn4%D<8)_S(%}p&rIh5~hsjaI5wOSRb3n}fOeuCGA-e6s@ zp)wQ+w!@SWsR`AGnqVSP)Cq)m`@tjv)epBeK)v_2o6+`YbEw{oG=;01g7quS>TvT0 zGgwt$+pvu^u|Q^|W$*q_`vw!{oo3|1R;h)GK{H~}075aT{8eVKQVfDPlOpK(#e-oi zgZ6BIbX#j+9;>T`VpV~n5V}>B4#ri(zS?TpemL2{e5aCCG})TiRS=~z6om;36goC{ zxnKv~Hlc_nWL*=Cf&!1ipJ06iCb=kN3*C=;J>HcdnAlfPsH)mZ$eWVFiI9336N7pL z>rjZ>VIsG(+g%5B!!Vd+q1i%JE1=Os2WrDDQ8S8R5VX?Vy}vdRscoo+s$!|PN_han z2Gi;fD@n6>>n1b`pg&hw-9n+tK2Q+LD2s0+P@(nqq6BPh6|zNAePQ+7uCVF}S&)6T zV9(TE6{@n;g=j1Fj4X0fG%i8*&gC_ZS3<~dld3zkIQ59=F(nctPgWnGIb|i;1lEHo z29HX`Vk65+FKj#*+J%%3<%FXNh5-x(=>JVp^38@&s0!prbWgOl8abk&#+DEaTF`gf z(MsG?TOX<_g7K6iHQ!b>{I{z{+zLVjcFBI2B!y2&FVhl7!w>65rHYD-u{0AYs>B;` zC^bkk2jl|mvA7nmz{$W|2ICNvGunM9juq6Jlo>Q%EzIONVE18FYQmsbZFXy|Qs7{) zf)R+ysi8U~>_TZlN$WapCKZR8>T9D>=;E!RhT2e71Bz9+DF~6Gkzi$LQ&m+HG~1>F zU%y$?=dD_uzh>>a!200cO0b~yCTnS^ZK!Rom6A%W z6z@f=l}ImA?L%#$$`;Ys8?449TM7VT+ajhe$?yVYVAMU5?-$SKE`2+U$tFe^~3xLz!l z69S5eZ5rQTZV`%hH%=xcmbTsj(g6`fLv4rIO_UUg7ty3=Q7y`rx?pWRl1pge|Z9sIyE} zD^%L5@J3u7sJymbeeJMwE;-D3cG$f*q0zS?>itFgKcV zckbA-ZP)H?E9`lv(w3TF6C>>jRug4wT%T-eXa`li zyCs6AQmAS-sFXL!pM2`d#6M5UyjeRU{`gj1=-c;qdNyu;AY_?RabAcH{?g zhQsM{dyJtY^FmR)>_oT;j7YOma1-w=*8 z!lI?+KxZElZ%`5~5k^}&c@6I*!{sCM?6TOzO{r+|z=Xr(`5nKm#fqiv(vUi(_2 zVC{sOaeK~gK-rjrd&M|ITT4+xRb)=U3JDS*ge*ZaktoqBFl_cVh4+Iw7paZFpc0lF zEm1;ku+)l%p^#!T&1?;~fb}61vPjLWs)cC)OtE%c*Tj)U7B-;R*TFT9FWgYqj@y(W zwbFyxnG88Fc57SeZDDoca0I7s5NeSL&J}g2Nz9zk%IcM{IIXX%ZP*V&7mh50c?;T@ zDutfPRtWIsVu zEE|bElB1AXOFh~=xa75lSiC>PURhI11Qab4m2_@Pe4`ZiUSXFM%Lf(2pY@@7=&mrV zUYq33a!UiQSF6E}f`yqR6-Dd88ii>Mb~wUKQb!AYZ03ZclEqxM#abj>7!q=7C%~ei z5RCqk_+IrcDaAHQoPU#)VVE}*EmVaj{f-9MqgL^@f}Cy^a#{>xx=Ajfw+ZnqMWGaC z1thR(D@o*zX#BTD1deUE%XlG3V-X1Bc1srHzjlH^fyq}#Sz}0{5GFM}?y&7SqX}M$ zo9mZ=*zA^zlI<;Zb&%0+B|7nxFF>K$rep@md%|I%7-5S^NX%vsm~GZF0y&Vf1;r&4 z-74fI3UST{y|@$QLlp=aQFt~faoGvdf=Poq-hsQRQbJ1N5)$7^-xl5nYs{j3=L@kY zLa`{Z3xtq>4N7WoA1(~ifUWeU?5_Pee|Ya zYx#jtB}~>qTp7a-rC6T99wiL#+IrBJDD^UrWQgtF}Z#9lRS9oOUlJ(F6tcTpH+V*m>HiUxUBOiQRJqZ7X;Gv(n3!d3ea~ZK~PlE^D@PU%={eFsZ z>l%1e``z%LuU{w@r-xp@|GG;z)%#vk&kd32$iDJ;)gJ>5f4u$Z^Kbm^oZq?V6Iy;dCB!oAR{gJmF9(ic=MT`dS7rq_I%%uADMe_^}Vho*}vPRoZ}{n>O<#C!_=en!nNJk|YJa5Ryl2veKmD_Y#;-2v+xNr+i`HgjUGme-XHDO8 zvT`6=^3AD$rY!imnHo#uiB!Q8-$--c(Q*Gu$C!W8ltbF&5bIZ5xICsaM z-3fDn60EGO303X~+dSH`HwOxZ882H|8!eB5@tCvheWSs23DK}fwpg<1U)nZrqwAS@ z|M}gwpAyeAfB3{5PtJKaa^DMYKPzz23y(Z{;w!Bu-+KE6fv5g_|EAY(zUa{zCteZw ztP=;j9>^a0#EKJt5crxqO7e0pyWpqWPrM=U!#}w9ft&7c{9N$F34wp~@z)l%p8nbY z>Nw#R&v)y0w-wyH4u9puEP>Zu^!E8{Z%zN+k50@NxTw1@ z5dN>*y8iRTB7vXWaCq^Lu512<@y-f?-?;tw?LU6z`kqtY$rt$5`DbS^jo);H$;&Y!&#-bDsE*m!2KGcK!jJ))ybY?wvgX-}ZyW-+uZF z`#;t5&Se5$d(Ymh7uDVW%Wu6?CGcC%mrpC(ci$I(_Ky6%)RI4a|5u+HfB7%3ztb$j z`(r1+-21@852wESVSzt>-iPZv<$G^f{O+{^@AiK;zvAR0KRD;z8wD;|@~xY1yl&TR zm%Mwcz~A}EvdE&=zrED_?q>zQssGjfFMQ{vdvAL8Zh>#Ty1Db2@-K~l>D~JUe&){J zn?4oU`pw7Q{kp*WGt!Qak9j}#(z}lc{EfW4P|4!HAOG#$#{|CrV8_rCFQ4<7Stp+o zIO)tYmOqw$*()ngJ}dA~Z;94^X3?TA?>PB_z|((dT=VCLZ+NHj9kN>{y0hiERhH-7mt>bO$&pC9OF<(UeT1N5@I-b6LAYjOY36abGtK;caAGv9_ z_$rNI)Wm*w-`#=#_*uO;{QzezPrqZ%lBGpo=rrVex=Y`F?Ag2{m47^9Y!&JCJ}`J= zAoQ!g@2Kxs{4#Xkyiay%fOxLe@GH?IF>{`c;@=Y->afiL?)_ZOS)c;ckM< z)Bil@FDE*8Iv*4Gp0>+=dgs&TQ~R7x3H;LT1s#96WOnx<=d%KD@Mqut!oxi;-|2io z;M>0To6C0p@S7=Yenk-uZ^W^FCYfjn_YU z-^Y_&Cj?$`ZRx$~uV)XLE_u*%S>DaRd-d-0JI{1Y62A@EHnw_cI^+3HUYxa2{u zP5n21_H%PqKl{AvED`>tQ-AaITYfqB^KZJg3jAE_OW$fZe%tTU+&cyS$L5UcrrK!B zdrcWpXXngLD6n$Wk&$I9S8fi2Y2GUAE&22OmS%xy=>{e>Fode$ft`BZ zR4+ECVL@$$Y@n?GxwPevIL!*&Ur~=ZBf*ARoL0?XvzaO$DB%MzSe(J(7Fhgk+_+$) zc+iZ`Xz_V6?Y_f9V|-??0U`-H{!kRy!|i~FQ}8Sro)nu`=j5cWNLXnw)j)jwZGOK$ z&%esQ+Mn-V<6rAv=MVVT=lS#U@>b=o&dbkRleac+U0xt>{VM;eyj81KtzMPCYR#&( ztJbXwtXjX?zdCRADmYA+zk1E;wX4^y4y<0E@6XT6UzNW)KR{@VO?`GNfPYy4~S z)~s5ydQJYCHEY(cS+^#zX8l_K+Pt-^)~;Tgzjn>qwQJX{4XjOg*AO<-+cT_6xxzaEOX9yh*?x_!Yq*uEFf zOkmv$hiYUmkSQ)h@DYzVn1hQx5GQ;dF0temF=5dT&mHlhEi4oTP%Br-siMJS%c?N+ zYXfW`$ioEi`~VDgX`kz?c#QDh>4e_=m;N~4)47;)qqF6A>B^NWFW(!ghOHOar^MxV zPKDTUQhfwN1)DJ__R{2WFgQzfritsGD0F1B7M@Tn!#)#F)zqS09vc(X2_pn-bEOt0 z1i_{h?yJG`ZrCWmT?`mTMv}wfa=4wI*n6% z)HBcCS9{YfGs5M^Uwi$My>Fa2xqHtYcdpD`np5(*p1Zzy_tD-5zV*msNmHg~WNkQS z+XeR?ed@(sO*xr!T?dNa@5w%%3gP4Un6EO4FbaTEmHAM)h7Q@6wc(DVxt; zo^+-sHR*yRhbP_TiG6(UX@%a@*u7VrzHLfs(zH`HB&Duh;hq!w=2=y{rxvE9Zr^rl zp?CMR9m%P&x3;Hdxz63O&NbbenzTMS_0Zb6$!EIeU*t$zHSL-^_q9xkJ@&DkmD8^E zr_a9W%ZJas>zjwyCogwjmb4^wduop7l*4_OhCbw8pFE=gMdJ2x@3lW)o^s^3hgPRK zvXZ8|y@zi6sC&O>nkywa{g#SzQ<~3;y_FjEMl!ZvHEZgusii4%V>cW+*LB_Iw2W(u zvy+lyKRd&7_5w%5=bGns9xBM5vBBdwG_?Hif5!g0Y^OWb?Yw5j)}0$;-#aVG;V$u< zn(sU`eTBPf>P4xs2LoBtR=87=ozs(IcU&{zxj)Dao-PEl#~Q$#A$lo}?saa*{VWWk%}!DRZaJo0dL(YMMLUHFM@EDYG4O z+?kHKu6fC)I_5jGXPd4wT)rtQ9e#J7bCu($^GnVzyYKV<-TAiX9p}5QlPM3hwSW8* zNBpH_AHT6{{)^MoKD6`g6DxDix$N@tm#_WACvU#xOJDicw;zAv2S5DPD{w=hTL|>J z4QFoLao*+Eei8z|{;h96@x!N|dF55Z68AGv-mj<%U3>GLpZmen&rF-KY{Qw`c9dRv z*%jqgp-`hBwn)}-84dHbg&)K}K>*n3nErUN8`pNKfzlFP6 z&GOR^zvMo=)qAQtX~v=Zr^oK|WTzZD)iu}aaOb-7-N`OTa#He))Z(<6$tB4y_x#ip zm)Di-a)KV6>h`#%Bsr#Mcy=Y9np~RfOqw&b*uBN&1N}53DQ)Toch=%^v)+B>;@F_) zaGz^l(&2Yp7bnk7$xK0syD}*?X<~=aJcfOtZ>gun&OJx4?($k=euHe zd(U#Exz0)sc+c=0J~<=Pn>)kjT9CFNEq0^(@a=P_WOUx@$@QEGqL!Hwd-U|?sj;8W zJCyE;y)@;|pLeZGIds{qSid*+BTs7PnXc5NfOorhYEtu*(_ELhFHVVFlQ}|;sy-92@VJMS*{q34$*PxW|W_og3uGudG-PlC`-xMPpHPIaYCn^Zq5Jzb8w(V(Iy z@P(qVr-sUVLJ00+?S=Cr!a|y8gf@hwu{SZKsurGvM!{IlPhgG8-C^9Ufyb$^U5XFW zt7;G6)C8x(aDyCXJg}#)aht}?o(kjgQ+kYlRJ>^}73}2iNCj-nHI8KUnye)9%_?kRL1_yYj9J zioy#n{M;jVU1&TL+#UMKUAv9v7wj=ceqHk5%fZq=yt3fpq0zf8HjURV9&;SL#E8K0 zd>@z_PWaDJIK`il?g)VxIh_voLdR*RUOHt%N{S=X?MQ(s&vS?7IL+yDOhN1c2FEPtY?wKL8`AbVl3l6J(;R0) z+^G;V2a<u^p< zaloK)v^Y<99B{dvDUKx9FQE~jw8@yF)0>p)bojIL+TL6c~>0FECu6a8#H^QmxZ)J5o(&u@m$wlzpz#6{An%5|)URGm%_)O)$p>v#>D%>gebq^GCDD`1Y79iN8FH=vg8 z99Y}l1L+&iV%PR5dF~F!+O%a*&s0|)q?zp4=vw4)c+YlBb>^qQq~R!cVN*h*I6mia zc{4=IIvled)017E?|ZQ_bFfvRuaM8-{5_N{3H(!?C0?XliR}TQAs2Kiyb$4Vz6m`K z65;5A{M-&RH77~*RFc!R5*i#XEd=fhXG2jSm8+5<188%QHq6RlK=0;zJow9zlxBb? zF&yW(;TjO9vC=sQUQ&Z+#9ps6`84;fE@Pd0mDe%dG27!vgH&gT6g^dr9*DEi4K0vd zpKMgb#-ueld|KS96pL*I2h}@ME!*Ad?GqW$He0?ggb3eNBG;)d_nj=fPlXfT@BwMg zj!^#DGNlT|NAz(O{%I(O{r!*_lmlL;5YAqX(O=6A?Vka^yfp^it5JODjRAeW!gFl& zspq}lEBVM* z^9B?S+{M68eT!pFzF}?{KL)zIO;WHKN$0zbbL3C)_dLM%@f-w9ZG#LC0=C!Vn}C;2Bn^ zVLT0ZstrF6*ghWr3fSHb`9*GHuHrumKB-eX{Q;npF8rL0|06)BvOER!r79itcs{(I ze*yj#Hva5lx7B8e;-3fMREBQ=1JxnK;vEDmB|#>B2(W!#I&4b^Io^-|Mc}u~8Rvf$ z!f8xndT#?(ulXT--}I+l5UI|=x70^sNGc3V0C$$VUwL-=AF zKM%hJ!f7lw0w2k{o!$U+sD)C*Iymo|@DJVnP#&1pEcZ2A`H=Wy zz%X4|?m58r_Wv_r`#7DuN7px}mBj123{s&!4gj5W1CCpwUj+1e0zm)%MREP|0MJPn z|JX*~0en;@JN*ivldiPW_u1&NuQ;voXdjo|56e;SERE;?D#Y`_KXh%A#4CPK#+xiW zhDOVK#7}>p@JQc##~*p0@N_8md*xrF%KseHn{0?7Xm`?;cKT0%PP)QQe+uX{*V*ac z1iC45H;jsb#O*$=KK?LS_{gKO{F8-us`x)Ea~pXoFZ5;rrm<56Nti0kPG1FrPIOG4 z>SmXV9-yP+@ldwW)u?d%4b`<=Btg79$dAgHcdk0=eh3_KF;Ph9{+TRZ@0^MxDUeZ`ZDJaPRqWs%L%P3 zCkyB6&$8=au>h_N(nHs#d|H1JJ)-Diui@r_bmSw70Ug~q_|z`sBfZU~wE63a%k2j` zx>1OqNd66#K890$$;Ws(wtNwj+jP0!t~&wS%T<%=O4yF$vt)UDGNc~>K6LxwGCjMU z+6tKFOq>hI#;s8N7{{Jpx1v7*@yI@IhpL;%2<~m5FOWaQsSna;b~>(&i9SE*w$?H& zK&Sle^aGR7;c#6-{;MXTe;4S9^8Gu|XM>&-;<{SeSkPryGN=!wcd%|s#;z>5o4w{UZ>cY|~muh06W_v^&xFK?SG}?DTS=pRRI#>Po%ceg&Aqkfoc| zlZPAob-QdfV3G-pzYs9#62t+(XW$Rz2BEz6_Nl3JTYJ99@;KPz_IbXLmFEEP+vB$a zw%aj>0WYDE{nC37`0aKm=YI^ssr_;PitNajfWBDef_*}I-A+fl zm)hoFliRX=i-De_;@at#0gcLX6!@vH13;%Xu+wqrO7R+Jxi}*TZ^AY=wTn0HjVQbkkMC!f zH@7t#;!QPpNw%)ME?iwMen9AfP`P;P6W%WYU(4B3{j4 zr@@O{@bX%@xI+Q1=7DkuR(Kr+UO|$XKxxYH)yQ)7Mr65qA&wX;BkhtABGtn?D;B7% zkCa1=VXdG9oAhwE5^M`Ag+_RY;a(-s6ketV9pMPWed=KVh4-x%#jxhDL54dw=;1vK z4>3H-aOM_0zMtX3ZF+bY!@Uegw(I=S_!x4si z8184dLK#_JnXJ*qaDTC$-w?yaN*_}E8ioUV^!UXL$1c*thZ*iG)x!rF9=k*jFT7O8 z0h~C%QTd7)?v}&A3h!gMr(TcW&+rJtISo308N&k%d&R5~FJFM+!2^1FBW*g)>CkcE zhjkn{sN>A*bv(*&W~Uxre3OoA7;a;DoZ*TtJ^nbu>9^?NWej^irH2Z_Axxf@ZfzqzxRF} z=P*3&2*aJn^zcE3)4!>QcQV}dEj@gk;fhD}@SN}KIQ@q@ z9{;J1$9|#XfHD}Z_GWmD;gMhJ@%vuTanFl79%0z{6$@v0jN#&!bpCFJGhfle{S0Tm zs)u(m-1(n+c=~U3JkD_8s2*bqN1z8r=t)OQOhyqjTjo}OL> z!!d??Pu2NH8TQK8$>QzBa3D*MZ=R;(UWP~11=m#mF^1C@=<#D`=y=Sh<5;eaYy3Kn zG2G4Y&}yAON8M{q{T&YyFhj$^n{4UXC`6EEJj;U0!_%JlFqhAS@C z!+RJWV7TBioj<~G7sK7V9cz5AT&ae}v&ahKCsLYSQBm zGF%qb!@C*oXLz((=g(=;aUsL$_&~$fpIr>Mwd&zx3HOyP4Bw>Vp4)Uha)*w`8BV`b z4Jn8#?wg?CsaX3mL9q*!-r>U%_yU;jwS){QgIDT+Fcd zJ9>B_!!-<>kLvst496JGIj-~9Fx<(o_j@{jA;TjK=X_u1A7VKDF+JSRaNr3&JjQSj z!)18kzHNMWGu+Q`!4Gx*PKJ9KF8qbUSN9mg2%W4QOPI{zrc-f=yA@Pv-j-_da=!+i{=zpL{{ z7&aa9c>>Mn@V<;bUKmby>fwcM9d|O^%kV&w&hJ$p#HIX;8ICaAeX1URgyFKKdU($= z9gi?i~| z`1SBuo{kGw>9{kX;~|EN*X!Zk8+1H!mW~I`(Xm;e;{e0mTlMg=Z8|pKMKSw)kjZf1 zLOr~g;j-O&crU{>_`w};B+oI1Gt2bwf-7{~#qco0IpsS4AiMx)&#!yGj{6xNVtABc zvrdm+$Z&c@4=-cb_^=*6c&&~zuhVgi;h|gf@XSx?IQ>B#=P(>#xRc>whQ}E0e@IV% zh~ZI&1ARJwF~gbP(8K)<*D&1A@DRiFK~9pd9Q8pU!V!i$86IMIl;QM#y}T6+w=rD& zEuFvb+dB4tN5^IAY%rDAP-lS&7c*SLaGN@#OZ-C&k1}k0SI@tg;Tnbm-_!YH3`f4N zhYvrl<1vPfC-iVX!`>(L@NtICK|Q>f;WmZ`7#?PLl;PqZ=;`+`Jj`&;4|V=xhI<$u zWH|Fjdi(;0yBHo}xa`Mz{KDsTT*GjoI+INDGRUy^1wDQN!@UfTF>Jo5$FE>`oZ-N) zbpB3;`xzc(xZou{evIKBhQ}EWjOg*p817`apW*ah>+y>j9$>ipWu1S7;X*tp2#)%v zi{T-Ly|3u}-aqKLSG^NR=?yS!ys7gC7>+R9!|)Kp##?%NeugU;?qax~;ZcUu|Ei~7 zz;J}&9)<@P9%0xR*V8u{E?~HV;TXd`3=c3o!tgl5nSayE6JWTQ;RwTB4EHfS#PAry z>3`Sr_cL6~aD?Gbj^EbP>tlG3;q((ae-6Xl3=c7!`HmjHnBf|RI~nd}*mze@FTijy z!(9ybGTe4jPp^yNZuKS`jo)F0E1WJ(U-U3M#BiT_mx9t8WZ3K0(<@-Ojp06qM;K05 z-G|a_T+DC{!@~u7{4s{T zoAhv#;Xa0k86IQU+^nZpQKI9>MGTkexSQbthDR74XE^<0J-tGPD;Vx%xQF3EhDR8V zT%zaK$#BnQdU!v>Lky2HY+SC#&t%xoa3RCJSLpFa7%q$G;cX0eF+9xh7{g|xo?d|A zGKM1z_eJ&i!wmPg=;34SIv)P8j>j1`KB9*gGTa%{!-p9jV>t7W&hKZqjNu5w#$i2v zH^Vj8=;7XLb==SJIKzSKbp9B_BMj$Uuk(*}>e%?Gjx!ndGhE1U1;?Mz(~B_N$#4(D z{R|H=+;)?m{y>+Gy*KN)jNyKU#~3cWMUUUjaQ~XzNm-yGu(Z*9zMiy#XWj>8^Zfz>59S<=)(5r|0zog@l`*dvFuVa(p z9)`USFn)#$8SZCz;2}MJ_g8h?&+stAWna_zGrz9mE{5A4*29f&=s5C}j{ASCfxQEI_~<5j?>@JafIOkhRrv1{usmk43E8~^Y{Es$Hw1v9ALPH;ckZg@96Q%7>+S) zzN_(8TwU=^;igCD?_oG|iXL9TaD?H^X*z!=!(G$$ za3f8}>FGL-G2Asn4>x8qe2R`E9M96jM;JEd=;0BDd*|xmne%i!!f@uPdU!9x>GSn) zf3}WC8E!N6@W28cw=vwqaId-(llp&v;X#J!4o%`8W8vcrr?=|m$z<4MIEP_B!vTg1 z7%pVEnBfYB+Zc{9+|6(w!-EWuFg(t1dYj%JISdytT*h#O;ZBBo8183ykl_)A#~Jpv z>*X~W4lrEIa1FyThPxT=V|b9^5r)SZPQOYoZw|u+43{w+VYrjw9)|lF9%Oi!;ZcUi z8TNMQ< z!!d@t8SZ0vfZ-vAM;IPs*tlA+Upm7%31W|X439Ew z9Af>$u%F=o!^I5OFdSjHli@Cgdl~Lyc#z>ChDRA5W7vCGuWvfTISl(5E@ZfZ;Wmc5 z817}bkKsXvhZr7Zc#L8AP=mI=mC3N5;X;Os8LnYC#&9RYJq-6TJjn1c!($8^*RuLB zoWpPd!^I5OFdSpJo8dl&`xzc!c#z>?hDRA5XV|OmM5OVY$#4$C0fq}1raKlXy$B0$ zW4MdqZiagp?q#@-;eLh(7#?JJh~ZI&#~Airugga|!#NE587^eFnBf|RV+?mO+{17` z!vhQtGCaia2*cwHr{AF0Ka*iU!-Wi2FxoeGCsWJi_oe!|5N@%bUY+0mEesM;Pv8xQF3>hKCp)W!U(bUcO9*{R|f}T)}V~ z!(9ybGCaWWFvDXEdq1w1*JL=ra52L*47V}d&2TTn{R|H=Jk0PY!{ZF6->BColi?hO z0}K~3T*hz>!)*+AGu+GY0K3H<@EF71Pw4eE8TK<=z;H3c6%0ohjxpTDa1X8LnVB!f+?UJq-6VJjC!Q!-l$lkL-a=hW!kW zrO5sGRIn=X&9~yHFfYYD-J)ggmG8r0IO5D#blmn~c|MWC$8G6}`06|sh4-oZju3m` z-;lb`h;WU%Ux;v~x*v#eK;3skxJ%t{L%2=dUqiS;-Csj^NZnUMIHw;90giBux_^c6 zxVmqJ@R+*4gm9O-&x7#jym(=SL=R8DT*ra^ zIyTk&{S?1Xz0XfLa!}_lyhg|WUuoB_(=ZSO-2g%2;nD>~govoHsHoil`2@-!63Iw} zH%RFs(ZCrv{kr0d*M^^Z zZuv{VjKG+f*Tykz+3 zK49ztj}HMa8y-Iej6P-^-~Uj@l~BKfI&yeUHiLf!{;hyl4Ue}2UNan}w*yNMnf6EL zcABl(>Kw$uS!xFjrq^c8;oi}(vd6>IlPJB(E!xhtwU`S~aHoG2%pxwlBb2sJ>z~c` zecbL1dJ5spDxff3WYjzGc-g~UMY`2~tJZa^fbY9&6zP8_*&0*|sy;$d}FeKEOv+d<_e034M#NysffKdu0(`css!=Z#~wV_{u9Q ftGu#U(2 literal 351544 zcmeFa3!GI~bw7Rva>E3x43G=tR>}$}!D z=bn2fkc1?*=6n*)to>Mf@3q%!uf5N{Z@%(ZubR}<6!|kb`hEmx)zL|s)MXhb*B0!r zDC&xqMQ6#s=S0m~7CEhkBL8mr=vi9N-%$(Ep!nJH|DwNSe*S&(r7Y*~Y(3OCI+^{c zd%uSDj@sMnVKiuOq1DU353yS0?`DG^uy>fzrGFJCU83(Pg77hU;x?v-7S;$eG&?Fz zB0-8mDLlnVOZ6RQ&f$9o2~miq5^f;$x#nY!Q!g4 z;cx-_RV=((2^iX#UY^N!+-dYQc#8A~1>+^mufB?Jq$__dF?^G|6<>L#zM|^6R!)Wj z(!?vQ{;SjU{{(-ou=;1MVEuDR$m(n>H-+;RZ?*cX)AS#s{#L6$rIYn%OULA?a$2G( zk_~FcJ2W|}uHhT$KSKQ-R=;@(>(3$WtLIrcVNc9oe7n{EjWqp-sDHcFKc}7b&m+TC z$p(>hU*$UHu%c>@)tA=~P=AuukLI&}3$%c=H}wShI)ewe8sYhm8a(G29+U>2<8^q@ zVk0~!Yw)xf9`-MudaMo)B)1Ws@7Lg&Vt6R$@zkSrcp#aL@O-ZZ&wom(C@Qy5j^n8h z)!}*G1bDt%gXg=32cSj$X{}#wL22MQREK98(i`CUW(}Ud*ZSoalm?#hIy_*>MtDxt z;CWE%m(6a(Q%CCXOrHSHqcwP{h6kn5pS^W>VA2}(=aCvbf2Q@zEhr51&N@6$eU0#Zy#~*I!$UZn2+wh>Yv^%w@4$sdby@7mvxdzYmTEEEvN}9JHvyh6)ZppX z`sEgs2A)NAc;+F!0ewDSgJ-$cpCV@Tn536mP)q7vLpua((h8Vj91WfgD2hUb?G4Ui z`B33F24}H9p+fo$&f+*jg>)aBMf!yb=W}ot=Ot9gCxf#%-=RYO8=OTx3KjD8;4JcI zs0`?M2WOG*LuJTd%2TKe8%#M3m7NAt{zGNA!PG0Eve#e^G*m_mrXCEHaf9g~gvud< zskcMru)(wkp>o7v+L=&!&|uoHPgYB23>s2nqxc05#$8_anLl@kWj?-;zB ze$3!o=?@LwOuuUID*9)Gmyr=d<)qb9Hz9g}FY&op5B@G|M9~E@&+vm6UN7^1Hrqvc zR2d|h6aP9aAYJ0c*1!4S5~=)neueZ zUC=xuKjy^x`U<9|@__eS5TYv1Ygu+(!s<#MBR zUYDjjUxgW@Q?}geU!RuCrkCS1J-Z$S-8v={PsgR?z6UtGC~j)izg(ZjZOn+THveon z=hOS^{NZ#5KcDzUs$b#~>F@I%mu!AZoL`jpm>XJ(OPuE{9UlU{h3?Idrg%Z056JlN zmh8v8KmPXkSJnEzTH28*i(Ky@u7S7&^opR4I+jww7n|5dXnRcOE6gW-!hG~DihfW2 zNBMj^XxgS?>sPXA)lLM>Rml;x{7uvJlcGT>q$B&9j~Bp`XRGK5KNh{wL|5*7Slzb5O51 znFx~lHqPG3j7x_3HXhS6g^1}sZ{WL$;&Kn>;%nP7d=@NT>h?A)MQ=ohTR-EKgnQ4j z{w*axr1{1l3s^3VuUS1G*8;0&<4WxMj4mx0rZ3~$>}Z0=>}jv#F~11{+=!AKRP477Ueq!h_)9CPw&%=Pt!^Y?4K`0QO?Rl7K^YD!G!HCWOUcR64 z_8+qLAAFYDS2s-N|KzjO{vtI*VfsS8pCx^m?h7dy692&UCH$A=Z+_pWkbC*!p03#R zE7HGu{fFLy^jlvz<2T*k@_N~QF6iMa&c6voqbn8+>f7_N-vtk5+PRvn_UM?DFPL8& z4$w}=tLJEeaKQYq)$~)s0rR(4Q}2fZ=5Mbyd}0tm^cTbkH~Suzoz1cta1{ z_kEqv!+!hzOz2Vc5;r3~!hpd}rvWQpuz46Td zbUbKul>Q$5bWF+{9h36274HUdFI(lW zzqWT(%Hz5IocAg{!v-6VjVo-h@!Gh<2KF~Lx&&G4;EDV6rE6!*1LBCFgU2oAzN(^r zVwK^kXo|dbz7856dcT_uwtmmO?O(JW#)%Gg4EqKaOoZ989kjCwED0nXl|jM0B0W+( z1{19`IFNywmsw?Q;fLu4DbZS|=>f zR}{`?Zn(ho+I$YchM|RkKkj4s_(JByuQB~_73;?quY~;t$+lcb{!QfPvOc9`a$dvY z#Bo3II{o6(Zl!Z7S34DsOMCg=HLUMc&X}Iana~COowkv9Q#oU~u+!vB3Spj|?wf8i zdAo`Auug`&c{^@zG5;7yy?jwWdEB-Tksqr!6W`n`DQDHU5uWx6zQb<56Fb;uur$&o zf6lcy$OAGi^tZ$IQvstNiQMhxJfwIGHazQ^Ozw7@+(}*FuamnblRLqWC+y~Yr+5uc z@S5C7`Bo`k?MJOBl{64`ccRzv{I%6f z^`9qyc9hWy=<>7rzT#$<_kL}VY}KPXG&`C`H74>*{;1NOi{`Co{sx(EE#F2M{M0Ic z#%qVjkG5}%_}44nF7z5RJ{uWezYnlnqQAEj&$NENWm=06huS0Qp0I!PiLdxR(1#)lCA(QW^Y{HUHoraT?z>oAi2C)dZ-mCTEpEqzmuHK1KTvuSg z^Oh?Z&0S3Df?h3uo#I0~3#AC!8PBy-2x&X;dvyFeh;P~oz7x4Jx}#rz`9Fm2dyRf? zV>#akPWVG`^5Det~6P4q%w|9$`1_gj2_ zIW8SCI*=Z5Y20AaD=v*F?0g@WdYHeRe2;m?6SuSdFhaT)o48L8Kg1uEBP&-GeS)VD z{hYpiA0XaAyjyN(JRABHaymuI%b!ul-9NDYQ7+Q)_&oY}j%&GeJjV>?c*0_~~LN$c)xxB0{vYCe@zCf{cVF~6g`8%%TtIxt#4mj_of5I@O2xBhly94(0)J)M4jpeLO{>9@25H7*pZ>Nc6d+)#!8U zC-PCx{olBg^XmJ9S-tJ!mExzRw-;so(x;%e9}+@9j{bZ@s!#mfM{ItT^KG5Jz8k$2 zf8<)e6TN-4>6eN0wv|uxwyl@Wyx!L7mN{5OzV<o7i8Mhk2$q=Gy*yA^J5f@BUGu zr>uk|Su6MRv>w3fG=7{3;b0(*&KdaZV-Ay+eUHh&7GCYhB{HS(nJ-AI^ z;bXwC8%J|?W5q4$JkMLD#iO|h=YFa~ zEMHJ*mv(0m27hTk?jY>`Ls&vM-b($oWd`*J=!UL!%z8xW;Ck8D8L2%ys`;_U73`dm z@AvvSq}cX-{hX5T_inn?=J6Hmhx1{_ZwP_paX%=LkL@hybaDSJ;lCZ0Z{za)$ayQ7 zo|JnHVa&sqWFD&56ZUyX=)BzO2{FJ?RmX31^>bLUeK+Cfv|`^cv~yg(kLKsReBZ5l z8}Y-wf?xdH5co&_i>lU7_{o5;xR&R?lKC`$CZUJTFUG0!y_NWs%e(@ez=|4jR}-TJkhZ^+k>{4a4PEf@Ch z)_3n@NtDO^1c+xNeedV{R!hE~(-30OlVp1xZNyWEmMh-;`oZTTj16mkycn}V{ekzE zHu7oQW^%HCauUxZ9L9EPz3jO%Ac!sA5uXbz(1+9?a~H687|)K6*ZR4Oq#X3svYfse ze?zLTHqB&zAeXS~9ZOPt$N#DpU#sEk$>Dof2A}H*@SXDEQmG^l)=?P8mgQ_Ok)NVw zN7F9lJB%GB-7e)iBaG2+iZ7)chp}95V3jiN3_1LUlP5TqbK?U{o?+0d=2?%%eCYy z*VO<$Rxw?VSOFWx@vY^%U1 zuj4j3ua7gfODU0qZuYMrr)2@p-EHy5TL6jv_j$gY@MlCFb;+Nh&0y&h;JIb$rX;>N zdxjRMi!WYI`AP8@Y?=x@G`>jn zTOVIsZ}CO5(;i=Rzi+Vg4y9)p;r#a6zHQun!uW>tj&~hXID0;BT;@M}KJggfG3{Oi z{q+ukA#eLcP<&llh&r`EcooZST0%Z4inYXe&@a4QjuJo7_KUnc_!8yTwNOrcJW1TV zbGP;vdPBPLSlm8x%Kq=3p#MA2e>>lv`kAJ;>|9~ucO`!2xY3Vv3zee^htUW1?dRZ} zE*&%d-26lA=K|8l{ob&&55tX$XORzm{F|0ge$hO%aVDJ;>)4z`h)2Wxi{+jM_CHwBt{VJ!U`%k|AQoWG$3bPmLADafrkAA*k z+I7rFeO=eBprE1O%X0C-OQb0JeInQI?U{Zj>rC_4H!Wm)S8Po2Zpq=j$nZAj&IjI{ z!JF9YP3-TM+lhC}%lVGm$$xcrKj**uxn>FU_Cb8! z=OUmj59oBdan#pl#^dL$oo}6A!zjn$e2ew1_S44U{uB6Qw)8iQP~O5M>SgE^ln>~< zJic!6@6Kb7PaEIH9-lV8PvX+uM>RP$xeKEvr*_UKj8gx2JE3WjA_|jQ_1$ans$vsiKfh6Iny2|; zjQYs^0_b&UAYbRh4^n^K_|E+=|K4HR9z~O^TQM(ZV_r<3VqOKM=#ScY1%D}C8@Ky$ zsQ;OrsLy^)n??Mt|J{EIJD8r2E1O zA>XYzIV@!OHkco~$>P-2yHQ*C%=AneA4=up_ME&gk@C9!%C&Ryri}hO7rhZbYo+Or z%zEx)Iegylo*ceMGWfP^Fg?+ESm`itfua|`pYpTC&S&_3f3p8DoqX-`IgRWch3)(! zv6tIfE{V%*$1Gr7O@0mAtvx@7-$oeo1ikL(=Up$w-8TQWP7GHPU+A)SOyBvwQ+S!- zUuExw*1u*ezlQZ)zqy{M-pKMvJk;gye3Ro*lRuYlmp?ye;PQ7q@#OcNTp!O{!Th;x zDPNqf-h6*%9^>vqI)90s7rxEv$3)+alYSmgf?Sn%Lar!>KA)cN`ZkP_AABBM&Qbq= zp#JC)Z9lXv(gB6}t@=*&47eM8;yYMBimsPz-R}A((SM7zn#wKog6kdE8~&Y&*NZ#Z z5AQ$v*DL*V*!54NaX}wVk$=Du+422OhCaonsoJ5sIF#vY@a>TNpRD&RzKL~@BuDxE zpkyD&^ljKrIrR8tvFUl{_i%pZSv)7vd$Ozn-bC*$=>r&fJhe{movYbTNbhaT>5tcE z_|EOxrr)3*sqRMW;;-6wiJ^6&!kZ$~S6wNee?6zKeqTyL{|)EbTa#%o?i^P>^ZmZK zoAh!!f%#KQ_nmu31F z+FG@S?>7ds$H{r0i!?h57Ybv_v*||bSNBei%gz~iyutNEqLCB&sQuILd!EvdAY} z^V6||>)%af@{ij~-yej2kgi1GKF)7ANWKr_dzD_n;+P$6MQzaieb7zEXZ>)x0YB*B z^ujn5&d?L}rpQ0&C&qgp=$V;!S z4EsmOA1g`kc)boc@&9A{uK6w|i(f)KA<-Y?7uO%b&S!1%^DpKnOffy|cG&mx{d_#; zL-ot@DcTbJ%+u8`houeB6LOa76Pwoy*zvFsTqpYEHLg!M?k(mgy8TP!;%e55*FLE9 z@p*o#>!G-leCF#?pHJ7%E_X?smF_^IZw?BRs6MfI^7Cq54|I5AMov?GV(l6}m&@eb z%nXh<+4ntdeR4?YlhDoNJfWBEKY0J#4?;h$g1lEqUzeXO9Uu=Ige~#*`SE_&>l4Z? z+Ch2Xe=YFuC;lrok-w*{;y9rvRG(}o94|M0vd8p^RF51coo#$mScM^6z zjrm7Ar_?J`%>UYI66A6Ij(e{ zn&V3MsW~o>-vzrLC7!WJ8;oyX$oJM(efzuwvzNuDK27)a9QaGR7;PHly!TF?uKB)? z**lpNS-nq_)eaw#0ay1MEP7qfVfGWo{_Vz0oDb`I;P>yOLAO?^;QLA_Ka?qt{1bzz zJ_nzJz8-hnAni#~`aRQ?Y&X0b)>giA(D{(@*u90*Ze_mjk3S%w+L<0LE^|Ty5pO%L z{1WdwVeeyncOT{3?=5JjK5_r}@#6lVlaIe3C$sQ`nb10!ep>t>tCy5sGEVL1TfCqA z-Y4#ypr6~UpSk`)FX8cXkEhdLln?v+bljMa4RX${5Lx@DphrT#gr1~hwq2!*@g49l zk^30p&4;zULPYuSae{t$JpLNz#+S6W&*sbM@ObV0iL{reU%c){c=^;~zDK|58P zM8)R%-u<{XJe?YfC5xxxf58HeV z>ASay^$XEFtq?Atyw&yF?j>=$yL{A*V=d)vlb;u~d2~Ny;>As93*coEs+?$6Y#( z0UO6geHZi;XKFVdkAGfW!#^J|Jbn6hzAQwG6)xJjokH|Og)x5lPu49x3J;q7O!mhX zu>Z+DruLmeSiE5p2qgq|gym^`S zk&=9$wVa6Ez9sij(cRO19wUX?t&TGHGxPoN?+*O@p!;tgX8?VUi7kuYDu2imzm-`1 z{Hxn}l)F;sh&SZh=Qm5|Ycl!%9ih)}agsQK@`U9!zfGKvu#)ZePG)}ZloeXezvC@L zzoc+%-`P0*<5|Sx-x24>oy-H+XIN1vosxN=*^H`JGjCp*Z|5)AbId@;)iw{zj(Rm! z`~ha&(z})h@&e9R5s~Pu9+J}kG-}yN- zpWpaG*7p7sqVp90PyRT4UcR97(r7#=5!?Qm%azMVR^OwX-{MB^a%Z= zlYa-jLBt379T(wW;hT%}63b5(VV+*o+uMs$@+x!;vL5^jJpLWZMN$qd zy+P73)qZb`_sjQ}+^&S{P&%k}lR^1~APZ}a2L59IEN$`9rV^|JHyWaZ}tPt!cD>~AbTS7*|n{5&mC z!cHhh4dw~-fu6(L<1YtA56JKzdxN}RD(Mr;38X^&llA(lp*uJtkq=uZpGt4J{)9gH z9Q<(G=RK49Yi5SdSvhITq(cwx&D>uYw{ty%^qn>7Tqn5x_qZ6!wR{Vl$R}>+x(Mau zKlyHNC+(fZ&9dt$+>7Y(GvH~pJwIUb^>()WfR^Vv59Jqi!Q$|V+qsVS{l_f*hov6q zyiF#`?W&&x^7BHWm5!PJZUeqM2A>q|`$4pqd73-yoDT4Qr7z`kzjqJv45}0-(V)Yg z`h!3H9)`MgfUT1}?v(gp3s`$1|I5FVPSydY?^a7E^!o>C6yw2z_B&R-SpUwB3+ev5 z4gP(q-(TbQ*!}K!+ew{Q_lG@x;{K?Q-`6o7XU@jiK_B0*t6rtu(e=YMZ;|xt2v6I~ zcf5=3;ky_ic~qP-Eozmw-$OiaJIgtrPuprRml098n4`a95APkx@(26~@3;34^Ln|Y zhy7fKhjF%Fhzowzk_z(*O%cR^Z$bFL!un$)Uo{n z9T4Ka_#KOIKF8O8CExzL7Kq27yw78?aJLrlI2Z7Ng8m(pe^1oWuNn6JW{b$*LpqKL z{c6a=FmREdl68}<cw&q9fj?!+^-;dP7+<$= zJpv5b_2papQaYmFhco&MbpETgf>-Vzz(YC+Z1gVJVCv-Ib}gRlm%9G*^(*l9zeW17 z+sfUUCeW%GmjE=25*?^8HG#=BUqy2{#ljkROxcW;j<-M0sEK@yZRgYiZ;kRqQv!uCdLB<7PfuN>-l{USKM{JR+wk?5_z_D_Z53M-*xhO z4cl@0Wc9~)#h$wU$d5OSm^|A(X#PFWG;7c8R>$p(mtY?b{Z8in4z}z38Ly>1@_W26 z5A%fP;Q-Tpex2UaOab`*XSQ8n9&96i-#2i2#{0-mRjYr+O=oMtBwl0mfSqZ{?Q488 zb8o5Rn`iUcv6u7c@j9nZLYFf8(<}FMp=5al;iR9!HlR;z{NwRp=bu7!SozWC)z1gS z*9f8H@%^P3`b3^W#P1m)pJ>nJGnH5JX*`Sd{rgztpDX1dzmI0`*%Er-rU1;#_40!* zjwk7d=_~(T9@o#VzuZ3!O>7r@aF*cfnDHw4S9FuKtNPgb-@ibUqqv3Tr&#&8)zarN2Y)8|)qEuMF_{r2m?l<^%Eb%tl8@Chw`#Obax>of4 zaF2goB;C~U7+#NyCwygb;`my&3%iMtd3)LOFRhaP2Fb)bjF{ZocOibidm*|=a)o{s zed)Zb-yn3w1O93jIS6Ca_u)S3nc~^J2gvVTbU%OH9Nr(7#i#L&9mMDM#ecsF_&*}? zcK@!+*AL+!=@!PR_vQWvmA6Lr@5`cNC$@hS1jq+C_tbKS-ILUg*2$C6-|w$-eV3M_ z{5m~c5BNFq$Mcuo_PW&0`#I!Lw?FVao$q9SK-8rSE#Y_QiQ4^v3v@i$`Ox`YqRBeH zXt#c!falbceS%y&r`{)^KuxFzplpWOhZ;QqWhalzeX>tLvyjU3MEeBh_n=*BDq9!y zU#@ub{fYZEIl2EY*+&^mST-hy5O1vL5ntXEu+}8OPqtK9iml;MMYiK6AZ~_C>|%gK`}$m;P@`=Zg9=EVSsQkUbbbv#J`-yQ2^`Fa6+=3CL z>sYiuE7LyO0miIfydabB>)vpwqS5a?QQr46isG-~0gT7j88OYB>~Bm`{AgFmrR!?E zll%L6SP!&Qye1Dtg#@qhXY78z-1mxdh3LCFeqV=&3tJUY`J#ndSPX63bm0~Y;iTV@t!-M(7Jp1=WPS1JQXm;|uFK-aG z0B893nR8!tnHB&aT%E?xdpp|5XQ11$-%Ryau75u{yqAG_R$Y;1mXwEuN9W- zMS?sYH+aJ9coA1jQMkt1w{>4wqiCaIGxeYI)eYOL%IEoUKH+)YPW_)f85!^cl*iz1m?KZtZ;cb+v#nhfcv=P(jt~G zEwp#5z7zjoIXmd(!gA`p*vg0H)QfS6^@7>8xHL!cpdFcF=?Ccz`o8ZI)Ym-;{GlG{h{`|ON5}cNg1vx-g&p z@N)$4_ps3R@pddAVF2Sveh+LhVYQFH%fEGe_WLhRr@HaYtREi?JI43Jb*a71&qr+S zCF8U26cm>F{=Jc(m(S`E$WN$5I?rh5o?n)E&{Jtr&wuxHjAOv~Q6BAIl;La4!_P|* z>?8DK_F?9T`W?u?!2iQC#rLT~Wud|2q*tZYV4Ii9 z9D}I`<$H6rx5@V=v*|_oy+_HtLw2uc?DrD2v3jV3|8JSysu^Ldh3y+%;)~`p z-Qy@1(ccbBWT5iI7qMViVt)2Tyhmiq>=mr%dTjQ~4CZ*^*}rNq#~aUPd^xd~vtO(E zE>Bx#UuWfeNQc>PGI*82Hyhk#@NXNu%;486toU@ibQt}9jQFAvmbZJj;*0hg+{gMC z?KXIg!8;Y+GJD+8cU$^L4c=++2Mr!J_)iTUGWgvJZ<+mn^zCt6#1lU$DG5Ide`6-V zU$5Q{zv2Yz&swPUl6CW}R)ddOKIaAeTVuy(Ez)vGud(~HmL=&me!{G-B)!IN&RV7E zpeyWz$7B86UH*KapEvS&YO%0V>*HMLB>AflU8!){)^WuqgO_V^6t?o6z0c-hk$e9h z9`>YFiuiZNz8?%aYX8~~DP3TnzL3$MaXa~Nu=KFz!=IKR$-Sr#vAh5^<1U>{(rf5c z;=I(+HRF%g%!92r^ZGO{MJF)Qcwe)=v-dh+=Cb8?%J{SN7zca+wNt+y`cO!t{PTJd z*?#*STG)GVNI|3HQr`3v`0Nv}l=l7iskZ|g>wjF*!zlUL&oxd1vE=bMsjo+R&nYP# zL63=kU+vJhsY_8G?f&fze9JJBIKA@|_>6qQ-?IP=|F{sn1l2XZf_W}Pj9-;U->&o2 zKek`r>o0qumJ4HlsBnB)(crrbe!juKXRsJhc?K_J9}Cfom_OLex9P#&NwB_@SM)o! z-=PglDSxrW>B3?epgfRo`7iSC8$+3L=fC5co|8Ub%P03S+jmLeb38HCuYAvLc!fBM z*X|whI63%zJahja=zM2yEgvc0>>ysp?|v1^e@5oL-_|cF-(WUKck&VG(m(n!()&WA z_vnX6&kIS2F#5j@X8bsezQ@}Vs$pCJFiT>jxqeiBDFzt;5+ z_y;}ixO$;B=;wS_OFI1LV$%hhUo4!@_{9qLE7&vT1|9EYe+GXs2O_!o^ z_cDbCcOCgLYC8XRY^NOsj7R)KEr)R|%wZR*A^Jl_RNg~9P;AW^7eQY=z@9j_!Q)$Hh#l?#oO2~kZE-`oU7`&Hn;$>-g7t5!bR^pz^j+M(CYcXZ1mAyC@tU4X{J%>TZ}z*~ z;fDBn6d&|M-k?r(nAU>bQ3TM{HwPc?EckT45mGC z|EdsCuKGurAI97cQoqSPysY;c+J*QkA&@-rW$efL#4qN)T|FMFe--P8{iZi3z~lXL zz6fpFochbq*M1(L5H*=Tr@i-eMmiU&#(35DPI_~R-B50|eCjPfe+YlIM_S6Z zqxNV_i|c(iXxH^C+W)ZXDcX&+zlh$+xwgu=%2&lEt|zm2^LSpE(O11qQ*@^}j9dTB zUp*cDbZ$fa!~@{2=j6Kr@=2lxPa16URf<$jQ#(GQF!a^lO#IjV0F=W7xxe7^kR31h z*XILx#{Wo|+#Zn|#b3>=8}LA9gnu2#u%zf8WrSNi!rBIuF#({9hkhe~JI&k*%lek4e2I_Y~@MmHb?mk@>A>AA5GG; z>oK_pAW5(F>mN+gYxt&gMAOrG=~4Mh`FoYZDSuOL(VtrWrrd@K_55Jz%?;()^+CMN z{HBTySM4+P1^F25$$!EpuAk^X%HNc$Y&z^sKK=kc-6cODl`ZFb!W;&j5k>SL~eDw9nGC!Z;3x_#7p|os_9w!LT`srmY4hT zG#~hmUnBJ@Ebo3d{2!g~Hbtl71DyXpxwolH%N30N|BH0 zUoI-__uBN2en|VbKJkbDx59Da55LD?_lNfzT|vwB3_iTa*vVckI&kWFP5A_@jKs+YEll;GG5^Raoj9e2nltLkfp6 z%3%y6C{Nno{R)RYyRE;AwB6J0?-|UA}Xi@4N2jd-uDUUokwT zBg%KK@8v#3gGmqBmo}L67%aWpVA5l-bicu*$6#sPVA8|m+3_~h|GSKzO>em0Df`}( zLyL3AQ;#b>2d92o@yT~ngxCHH;qH@!yN?p?j99<(A;Rs=gxeofSoV_*{}HX1IbUSy zhb_HNvC z>528Y+a=he4_*CK?9p;{1PZ#E1|HhE$MXlqcQp0~UbFr2{ekbkv9Uk!$CCb3{DB*# zKj16OU*Zq!rylaShmXtoIZI#0H*8&z;cKP4@vX1J;!esJ=zb08zK8tce#PnQGn4y7 zPfl>2e2+9FkNYR5kH3}jo#1!7@on16JLvcL_Zp|N7v($K*^h4Or}CYQ_sMy0)gR%` zR?DY;3U|_A#oeZ#>iWU^U&2$igZw@qD7aKNjvZJJ?OVU%u1qU^n$; z`A)Ng-DVf>G&|T$J0SJ79r+H@=s-Idcbi=_K8(-TV*0yCP4Z_U`Y7k&$=bvHX2{lDfa9Bb4&k+IZ*XlhVdwYmh;U+$Pulq1 z9&glqIp=HbJ#+RK78CmGZNJvY{(cqgDR#i<E_s?XR3K<9v;p96ZD0|4E3K zKxNCiLg|41)W)%v89y_=bEViCWjWu+ z!##y)xe(C~hQ!~aeb3|baU^(^3#A_uu8-eOy5-_rr;E3IhxvV-v~%S<%xXSkzP z3FUrqxMPmN^c%t$=f8g4KFWSQW&iJ;#B&GZAgAlkrhcFMb?*1ke@Xp5`Y+|g?=!#d z&S8$z{NZrtkiqm1>c?wwAM^L}@&G;rA0%>MeCcw6^ZMkInBzT7|8CIymO=AZGJcBr zC-h5Xe@*eq_b0?}ew};|YB1@N`hBEJ>i3Z@gQcTZo_){ z(Egf3*(%h%2ELQQ_0{S4Pj$4O6kw8 z`q9^`)I+J>a{c0Zi~1$iTdq$YH2uPU$?p#sZ1(&oBz`RGrXOL((m3y7gP*zeC0r8d zzYpvAki7n5JmtT?{(HIdM;&>NB!19^l`?|e(L9g-|ZD5e%}@Lj&Odq>r2~hXuCVW~K6!uOv)Z0~ zFU)qHv_J5VhUeM#2T0F!KiSt!zMo8brtxgj)7LXkbU!)W|K<8E@eA%BQF_Mvo_W9E zgK{3H{=8kYMl$~U`vqo)X3)Om_nour%~S3B{#e$R;Yr8Weq`~rvdTsFyw}&?Byli{ z17^>A?R%r7+d9VcJ&p!{? zu2`e^`Q31qj!$fVw^ii={k|LYP2zjjUQYiY-Uy=(P&Wv*64^j`^ zE)B>N7U)aQX{(<0I1rCN>$p)OIq#6ffjWujj=hQ?@YsLk_m28^cm8{${@q=9kHzy! z)Cc7~7SA(1ANI5=KG*YMkNHI<>LtI=6!&0qJQuScdq>!hx9IDX{fJkwT#Bbv@s#hO z-6`*;y(!;CI};N7!gyVHXU@LZKB$l1?>9@BM z?BH5o*OG3&u8sF`JzKy3O8!sdZ{(BIUb+2pdqw+{+AG>Kw^yNJeB;k@00quoZ$Do_&&dV|L^CulY3&E-`c2u{Ckiv+NW|9 zlJpVEU-J9t;|5c2C-RZBYw@|tUXw@4m-sUVGhURnOWaplruncpAH5QM#`CGYP5)%y z)3n+-0XxTfE)8t3^E7#U*>iY)e)qGNr+Cvi^tY6M^Yz2%s-!=MGW|*Geb@M&{gU(T zT0Y!w{ky~ZcRT5u$B%Jd{SrA}day^wRh0cNIDUobJBD|mrpKio(%ZgQj@wre9%DOk zXBXl9q?g|_5Zm|jvHgC%f2S8;#r_l`p8Ixx=b6|i{Ic=PQJzF(JvmRL&ns{RrBX)Q_OONc{-fi`0*xeF%OIXEEm` z-QSwWe(fF6jBpF{Pt~7zSsu?U#WSHlapxi8-$Oq_;<;wOyU5QbU*2x^elyU;?qFYe#S(81^FoT zE8Nf6tNoDsF04P4ucu=_?_cHoNBz3>QS#Aq=cP#hr|rDd|D;?^ux^&`Q<(o#9}m1x z^;aR9Pd&Cn*)ur@d;0r%zis-*^kHt@8=GB9)>n3Zrd}UjZ+M<+{O+|mefVtc=lS}_ zzXzcnO3x!u|D@}oA)N=;TTeWGN4@BAyJsU__p*k1Z|6^m-WycCR)~Iw`t7G)zc9cx z@p*+4q|?*3kGD(xH2EEM`fJb42jy_W^CM69e5|qgc(uO!?R(Z>>$I@n)~^R_U1#^G z6r$JZL8#~UQx_TiUS#yM?|_r@SCsJey=YrR{hXld9}ey70%+tIlC~* zxJeezBEi!;rB(Y8Dg)Z?U}+Kc%^ppT!tIPd#O;g^$UX9P_;p`pl;yGiqUZX?43>Ul zTU&e8Iv_Gi#wv~D zT!a>su=0}FnjXG|p2gdoWo49X; z9SG+`KcDX3tNA_7VRSF+8@?pp{MP(;)`jSmC()7oeZ0MspT~Q0*B(-Qard~sv-5pK z_#?~jhxFJ&W)8 ziuB_gMH^N7?QL;4^ygvm+wNj=a-WySoxoq&do<=Q5kZj0&j}PFyJx`GTPUBsk7M1E zAE^9Je6Qm7G~f3JC%#wln_ABAO-H!yv*{yY49S0=}-7RZn^TP<+ob?BL>eg_!|bd82ojEn+^V& z!I8mVHTb03!*b={4L)J;af2BbD_8!_V6MZ;m9H4gby~UdWrMknDp$T_u*EGaUo@EU zzjEaZ1|PQeK5y_LgUN?+Y24t4Eq|xMpEG#3!Tf$j;^YeVSB_f#h{E!{q{BLX`JU3? zVa@k@yI`kUg{QLo3_eslk-OLU?H9rRP%e6#X7IZv+m{HozZsU3zrzZP*RK@Dl_&1r z&3?QLS{JbOH@RQH`$zow{(%4g;-%1C=AYcyr+C5v;w?n$6z(6Te)M}d!stJ8+=-v_ zNrT9NRFGkm@(KY!UIyQIe-=Q-*wOY#K-et6JTLJvNsTmwM^a{r7 zzr%%b?97ZK-|x6h3ZuOFN3e^S>Fn=`C_bW(Lh*#sHK33@vC$=d)k#f9eQ*Z!G>Txq z9r~{KEanHB?>t|GG15Qo9#XzWzpoJcB;N-czaKIFcX>E$u=Cp?gUNRye}s*%g6LvA zbK6onNmu3fTwb8*pc^b&L2hG1Lb%%aew29E&9L7uyqW`deZFpn*)bqPy=y2JVT}D< zx8Pff5BpP&4)Uw=RpI!GO&?*p;Axe=oNn@6Ch4~=r5}1I z`>>i0`qkbmO!^@`ckh_;8`5+4jwR_?KgazEl+XH0e*QmCf4^VC>FD>R$nR|Eyg`p2 zk$z|E!7s_)SEv4l-8&JtlP=hY!#qN6-gg1?JN=Y&U-q4ni+k5@)cJ`^ecB%S(JJXq z$2dbjY)xh9S9`DUPZ<3Ul73lxctZMr`gB}FdSvMM)`oN(5UfJStqb*CoD>L`*AQ-c zgTCYT9>rHaNP5@v8PBcM^I4zHkCxB=-dJf(l3vSaJ(`}*<09qL^jyJ0g^^ErXifOI zRvzY9J@SRer?d0}-?@Jbe5yC>d%bMAIpTLX|D&AzCw%03obs40Kd$A;PucWgNpIv= zzgO&xZ)Z856Hq`$@JvpSD{ zjuH9)SEgF-4b*wfo_makgwML$2dO8#e`s&9jMw#iTw=RnjQ#7F{3-2##8I@r zVfusGANk&&{jVsPtbgQOh^E7y^hp-*<9m2H-_82wzd*kMaoooG&L7G725lAkpTC4E z-*;=l^%t_d+#{`gS{|ib^^d(>`9HlE?^cD=d-2|Au&r0eZZf!_2-AC{rzdiPDUsh1 zpgfF{Z^Ib*M}9|K)B6w5-<03ufq9Z~I-MR;yt4i=x+!?dbN2GxxR$T~F8ITWAdDIQ z#S4`}KF{uVdOQPshXU?zI3I?kt*qC*NZTnd{VMy}9U1)Z1~(gg+~5|4!_s30&oTI@ z!L5Y%_vzbz$E7#9hmM9ST(U^pKi%)I>*;W>wwKU(#xmkNw@csU;wrvdUcvY5Zhgm{ z%h<2ZNax+i-6OI&Blc}s)2&XoIj4L)x5Nssh9Xwt*=ot*P! z`^o(_T?V`US!J;Kt=o>Oz8c(CBA-q@MtJIZgx4M?ymk)Z?h^{f-Sk`I_LGF$n>k*) z=QG~6(fT{AZ}D4LuGFV+R-g8%KIS=TmrwUovgOn-8P#dg{bTh@#taszP=Axtujhnd zFQ6Aba%Cg`t=l7zaRGux1XhNo}N8I0N1miW7Ddq zqFM`$U@gc1juGZHn?P`o5xjd;o z`iEM4({uK8Hp}BXdvK)dKb%9wlklyLJFGCf+D~7UpurZDE5bOrL6t*_4ij`50iZb>dQj(1DkjA=SI!H8?$xQ{$unT-(*FEqP@Ie3L@i=^NKf&~Bc03yA zXg51(b{h8nKd;K#`##b!r)P_{f8gzVzY;xede+y)sXq7h$$;T?dry6O+V*(N^lzQL z=e}ODe#))qPai+~S6*uNzMFcnywvP{H}#^dH^|*vY`-((>vZwksjrfCyvq^wLAuU&dwjy^NWM+&KlyfI`%gYj?LYZAwf`n} ziT&^6JSO&^`o!a+S$%Ul>uBgPx37P@vXOoL4`qbs>?`^1>9enWx}P-hy4cBw-J8&?hP|JIcWaz8E=172T+LKWqe8g_wz-k z9|!jJ&I?U$DOXP(&rHKZd42MDrWzh!uecs_z2vzU&F*7#J(%h-*K59xalLlj`gXKuZLfFb;{ zr49Ar+l=pA4*$Ex6K~adEkxTnzd!YQ2Ub%2^a;jirTI(jjHCa$nSBg-}^dSZ=~lHnJ&L$ul^DA zcK;PoG`Q{E3j6o=a;~5C+jROP`ED+kzc-i9a|3d|&;0F^T3&vKLEqjV`Mru9p79)> zKeqCWZ-~7!nEsladoY-OocR3)lOJSVpl~{lLpl8XeYG_Ia4!G7x%x+P`G1nje=wK- zzFhu8xqLf!v7K}cqepZ3|1($rST6rhbNR<}`Tr}Ie*x%`$~KF@`v?a#^Oe;`-CHJAUvT>ipb{-5Xa7v=Ill*?b1%l~jLzblvj zkzD?&Tz)l|-;>KflFMI{%m0g9eqS#Cqq+Qzx%|J(=jrqA5s4Z`yZ%Uh;G*kzFzk8_O7qOUcIXXTPc?fkyBgWp+@u0`>#Nc-XYND{wLSmGM=-;(%}R*S+C4ph#QF@K3P z1U!D<$r3@Va2N4(l5Roz!t`aDK3H1AeE%LQ`CXZX#9J9wdP#hS<@U|dcQ#IuA2-f< zWzQu>0)q}O)!%(3y?dK}Tj{Om*{*$)4(5*GgTEY?h7>>2YtLB@BN@AxZPR~`3RBvJrL+e?fJ%G!~2l!M;%l+OAo|tPUk$_ zu%4$NJ>p$Q+0R|HD_ML8wH)nFHXY~V8l8jt_p#V7NTVu2z=__PiVMN&S$XQiO;`%AIoqV|A6*CoTtN)`;3%~ zr;bY-f2DEhhgIGjUwKAJ=SR*t>wJ`FoM$lYOL+z)7y8ij-kcWB=j>*_?Y!m_pBIli z{a@?PGLFyoZ=QJk>SKoI*^U>Hj%mDzbWG#kq@(Y9$GeVEUu`q~+h_b}{GP^zC{G?2 z3YEjGSL)MwlX4cPAGdV+A5vb^v-<9c>eW-9Kf}45tX|YO`k3*ZEW_1a9Uz=NAOA=f z>~xQ|oB9KvGk&r0J$L>r%G0w??2zj}=)J`&pNii5lztH--=UjaoZ9Zpoxtw=&DMLeeqf2?JN^A!liRxe;$NuI!~Pv_Xfyc|X@S7tyf@yEBRqid zB)?yk+-rM&ExwtwvvuczWuL1CpTKy+0os>xkL^28rT!@QT&(%=RO*?~(`qpFTnzs@!LwOrAp_Dc+OO}_-jlbm->?z{E--K@VwvtMg_Xur;*{VKPR z!9vmO)f}@|2m5Gea{Ide9Xt56rXK?1NzZM4pYg+kX0M(!o=5CHCAU{Rr<9(@;{LCn z#~LiHGX3uR8(jwb{>Czc*)Q48H<;s{_hjZIL!TB^8-+>>=-4`N@CglflqaJg6n4GH;bU@(eYdZEQI)%X}6P>Hd_8)f3 z?Nu0~d|({CuvflbnC^Cqa+UYfi+*n$i=iIntPQ;oT%tD7vqH4S^p&kcW=KQwga!I~ z68ES7S}nfmwd=yuy$99DSuUF0ayw8oyS0}3C=^?@qw!j^W7ExUmCTO09$U=ur1zn| zq_&^F{<`Bd$NiE#z8BZvE4R>om5XM#&NaI=^Gw^VR<`?;?bdAT|Fdhi2B-2oz~EZ5 zSKVf>I$LO;++OXYz4G`$dS1!xiQBCn<@?lbxgGO;{=D5HAEoD&+>Yh#mdR6cPN~*z zW#zf&G}c?$b4o9HX(KzM?$&d5reglj({>JcT=`|YLQxroM51`^m?fT;I8W zP5t<6#Fs{Yxsl%cbMn`Jd;gmIB!y^<>#d)9{lb9Af7SCyeh*z3-AQ|4dJOwRP+cD9 zDMXij4-6ijn{Uebvv&UMuhd=_qQ9X1e#-kvRkQzGSK<4J-I(Xmi1K~E#S3g*lHC{k z$QFsqn!k|Uhx_p|S-06eUs?YW@p`|f5$nYBWo_d7_PtG)avw8)ADa0kc|6q}It{wM zeO?An8t-4qd9wS|+)oOFTxiZ+AWB>w|K14e=cBpx^X%)=_49Llf3xx3)-%L+C8hsU z-49Lm*T^aKm)#FCZv1Tfe7;`{e!NcjIeSm2`oCPJi#$9&e)xSTn)GLn4lju>$LA3) zMAs<&sv;wJ^7oI%YsWP|?jB*e3d=zs@6O#rIU*fIzo&=~Tdwf@S+9JbCa$gYm+Q+y zG*1!8cYPZ7jJFNxyWj4ulr$HNZ%eGjN2a~pN!f#guV|8z2bJBQ$)E|$%@;Lkk3ko^^N($ z7KYq2rZD*A3!g~);rD0`ZhJV%uf4BrnaZ{D*QIBF4XwZZ%jfNt{e>jG_8if|B)#?= zQLCoc(LFPsam@dIv><(!E;=6y>6EW768`b~UL}sB{Z04#P8hsb+mZ9A3YQNaQrPz$ zC0@gFclM!~D6G(z+*`O#&Js*#*!zDk{;`3d^zc_rmujO zl>9N$Z{32=YrgL<79zqPuZG_1lk#zCjneH@^C#!#SZ)lh$UMsA9R;6u_h>r!e*SUs zLz44xb2uN#@3yrXOnLKro(8u~QF>(C2Y+PGqk&J7-$S!|zU24k>gr9=dgMo+Z@#-2N3#skyJtX z-`WiMoI}2~dotu4w~kxxd9w6H+P<9Y)AVfn=%?@7r}NT$0zJx>8-Tq!zp4ddIfx+wHM0g#urlx^w+?bKgjX%F+B%Dc?ov^3(DP)E7zj@*vEdDokzKixpG}v zj`YgP*Lsx8_y@Zi@UiPv;9Zw1e#U$mXD^L19% zL7J>wxW4*_e--%=eGN^O`UQ7tyr6&7?rThbzvL|{@2Q{hy9T?T!SAH=zhBa*-L2AY zA-c`m(f+3X%v-466MyD+4NlrIxYnO()XoFi&Ngi)zdq}oe2)76BdvllIw&!%ey%14xgO2dy3ei`YFEHk%5FJ<8;}*b|wcG2K@Lp;^w}t%d zIs0`>c>fptay%3N{=zqOK0J;CxmzUV>wkYC^%H7;e}Q(&^||;voZnKt|FBB+$6n2c zy{P^Dg&x-1wTk_zXt#C!97(;b9^NT1`1a(Lkef)$htU>&BVDpHzrRpbL?K{QLVszu zRo=1PJK^;AdnVAY+iTV*#2@b?ezkKo-#MiAGE0B)1GPW(c9HZ!dX_)rI|VJDCg;WdfnTA zUO$O++Wmy+#C`cJogS8umD4HJlcK!kc|3YSzO(eIy_cE%^^-`iBhv5F=d0s&eD#5b z^jaxc1@EA~bbWRLaK&KZFXj7vJjdYvL)^*pd64hxUWj?1yrlE+u+$WNU~=K#iNHT; zm7F^KcCN2#=V&m`KmhzVs66EH|AXRp`wh9v$6L^E=;86)?{SnG;^Xc3p`}`cDpC6&VNza2&&xDzx(B(6}yhHE|?(3>65B`(5iH7ORs(i615X{X(X{X*-LFuVVa`+}C*e{a^O;jr#v0!%IEm=jol^VScOT&z-^c zq2Do*V$*U>hrad)9%UhVxq?rXznouP4}>0o&4=s>!xTksVv zpVEP_>^Et;e-{>}uhM?R?M_#R`Q2Be?_i*B+)wqmfW(P8-*zuQ^bw}O?SF661$A*X z>Os~+`Ik4;E94isuR{Cn-}$Ec%^qK-WSDnbh%0G>vk;|{NC0H=nzJ2+?o6)sp)h&mEQ9BHtd+v*Y<%=W#_Wz zO70PQ`FG{Peg_fbx)bT!Io@gLlsvv~*40rl=i9^5&13@LG9s6Etpf=-X*J z@ZbCDRDMD*`MX^*$omvdbor7((fk?dzhZs@$;-)d85`3GYJ>vdpbOY zs88Rq#qE7x0RAMNcnRMg|G~VYW>S8f`p@Zt{qp>}fTr(b_YDC)KNuuP+J_^%>;wSWe#h zuSw+%`48su*JbjPb2N63bXd*(x^Ol9jE>oq<+)`@2k6E1VM@Q3YP$F-s$b{Xy;cSN z6-fDZxUAVxbs^vKdzf0TYUNzMpeH7SzZnjWI__bGtb8341>t@J%3rASdra}A`uGBE zXkK7@p>jg=MZfYr-}-U8<ahU%wQU6WP__{OS|3dUPI$l%X+6Q2CKe-T*(EX!dB3;IeKKqSc z2Tp4Gsn+w(uXkw+LLae_c&f%vy_2`i;=HrkXhOKGRdV z{w8r|S&T^2QM}pqy^T+NUm!cq5%9Ccm7T9K?wuL`6Z#DJz$be}X8P?MSUUdK==jSO z`i0P&HREolACB~zakn2KolrhI?socJ@jmLG!O}l#xqQE6ya_$C{q^6Eknxj$Hj|Dr zFA8Vz{Ikrn>_^w7AH_c3s18f{1N?yTf9ms+o})X`&lS7ie9XueIYS zf3wK(MKZ5DrCc#ut_}Hl8RUOQ&Z+siH$Ml5^wG_dZu;o~Zbv2e^SFP6a$Vxm#8dZ@ zZW;aYz1PEEW%>=XFsp(uhX?ZnKL~k`J@)8I;5#Dysk_H#6G}+^?R=x$|0m-LZzUXW zW;-3{{ERjfhL&kYA^KZ|p{KC2cKIztFILY8=~HUbU!>{%qd%j4O4mJSDV*+0+V65F z=b!9%vM;3lNZXmO_{z49>L2@%;tBgHx9gW$oMP<##J{wt{SIUAHQ3{ojNimdEna!P zwR5f(7b{iNcETw6FpL>L>?c3;AF%uW$Jws@UaOYR^1=U_fDih?2hD89>7SkdNao-9 z#Pcyfe8phh2EOFY$E_(uOH?|r~ISMyItH^j}>MPSe z&*z2Er5V|E8iq6>UAl+09gh#D_dES7`Fz&^^EVG!e>M{TRr-pih~7~A@(elFAU@;e@U zcdlann+I6GUtg!3SO5L+D!Y;RDJfkJYkQ$QWc?b}cloBBd=I>h?>Fqy_u#f|T7Gb9 z;Ct;5``bRicWEQvoBR0gUZd|&>ESp^^shsO^cpO6TR!PESn~TlNU!vM52Ht^%gP&F z{C6MrEwXgdDZSUj=DXBt`8%~Aey@kbu?*(-(z2gq@Ej|@*ZSRJ@UX#Vhwb+r<6S2; zUB0Kb{1XNrH~6I0KkE4kXZ6Pk(Lb(Nz;9r=M*3t}%4O}i>X$K{9`0Mh&YS-Oy#@sg zJ@c;1Vb6!P-PF(F4{WFB6>J;}Qhd;h$vV^Q$s7v#+-`{`&6WX7Brf-NgW6z#Pd|@N;-L0=DU~j1JQhP%^m)e^i z)hp$#YZS5Ue^dW#rM;2wEeu{|`5UQ+%->Dz&-a)j!?;C*{J?#*s@h7?Zi&alA~*`1xNpZx9+=fltI z#8V^dkI^~#UEu+%cS7G;c|RSyk+l~OUDn86+^q`WbnV5{r6-^5`;wk*I}ygoZ`t`( zeSWQqkKTs|6%D(8J@okwrYG^Ng;q{x6BFv|k%{kz7*u`=H+E^ou$=YXE=;HoZQYpq zo12MeXe04#KRrC*ramnguO)w%Z!*7oE#<0wllk3iDOcs2Ob@Q59EAZYrf}0L;;GZm zt0^}Y?+Rf+%boVR<2)^%#4XbC{wl}2b%^+G9@cj`?Bsi>*1r}##eO9B)QuRt(Bz%t zPk+zsRC-Hssq|;%-Y3Fq&7bWy{g&KkM?RC^i_>x*XG!8w+ZJm2;5Pd4 z@vcRLchQfJ_bns5Zx!|R<}QWf&D5uUpLpE9jN?0aocY^M629-Kl|P|ymfxQ)e_32Y zm78S^^`8Wlt)Yg=O;p82QTYi%;~@0+#pnPhKB| z{p8PZfP5Q9)+n8No6a}C;&XbQK8%=swDW-Jxw@UoNBsxhV*GWIbe7l>9^5ma^Kdia zUJyo}u)*k*+2-m~{-+t%B)*DItbk8-u0y{Ku{V~pK#q>=QH!yvZ z#0{1y-BbU~^iATwt*M zNj(>De&+Sty?VZ9So?`}9ZX^&`aNZc)75VtUrzMG0^@UGdzAw4o4u~Asx7Q znHUs#;#K6k^=l;HCC_?hN4|gh+`KATqEqwL(><>bD!&w>kCAWAke&XoFK^s;D?}Bg zOZ|TTd7_);JBYAQC$e}B7aZmC8p2IH_>uh%C{jmx6|7DHl`@E?g;Q5MBUZnV4 ze`6p0`K_<~Boe=~?7sKRUBU%kvi_Sy#hyJM0e?C@AHjV__#0CtUG@ioO6|Y6RsvIw z&(ywXBL8Mz(Ek%sDd|h(x{B{e#(Of33atT5GShzn;Bg{z>ia;JgF;RDRewj%?%@@soJvi=Ed1 z+(~@tIHLJxI==z=mZHLMj{Ayq;y&?d)+hPoANG03ZWaxeP{oacCv!n;Pg=U(|$+<*Uo*#1ys z`Qo{bEpL{5?9MU2_anf)aI>`A*6FhmE5BQ-iTex4_rXnoKcx7@_c08v*9$&jT#3?t zzTX(vZQiJH_I^}j?jOW(# z@53G?U#~B-+!)VoXV?3Ai+5N$w0r)2SSi_}@iU#XkbA5|W68OjDfsfhG<;bx17H4J z=$+aANRck@lz7k+&iNR|NSC)PXTpS~`}<~Iek%T~1%E^z6!{az2`^OlX+2>`J*^ik zcfC}U?|o-;?qUz&WZi@hx$2bl9lv+)Y|l63=MB?qZPr zdV56)koB@3#d8;aUV`FT*sr493)@GvTv(;vjOPvFdLoAf1$dvOJnuM8c(oZw=(jPxPjVxFDfs~5ZhKM_!r)2u)1g!Jt6!wQ^GoW-^A_E< z?`7}zW%C9Vj=WDs-@bp%HrRa3)=BbxFVZRB_adG0eJ|1}UpFD0@_jF(ldqff+Pu8B zk?r(-FS8r@`ZVP+-}f5S{MASq+c(5`IPa~#l%e(vJ` z-t`qVPyZL3w+Kpq|6VPgw-7F&-*#Syzq#Fm-Gb}0d)X&U-?_ai%Gapy(a#~fy@XtR z^u@yeL0C2M34Uh%D&^o%FZIb!P`t??X z)rzIF--qf#hMVlo<*Yv)WxBo7>fb-lxX;+Vp8aQRi2dC5FLFA~BmAn_AKPb{5pFN_ zUf6z&c=b}xq}$E@^e)r-((Pt{db=!a_Q%dagzZPHd=K&5ZuX~l^)&d}Ioj#$kH{bR zAMKCHV>a(%!W&sg-}XbZHmyF%-*2IQ3~qiZPfJK9#Kx4?Gfcw-bXgPDPKNM%g-~qJueLA-XWH?v)|b0Ar_^?#a~ zxbBeKfmaj0>%F0qT0vSpMSsWS@pt2SjO@U&27OSm@H6Wbxkpm#&Cg-)T-GFuIp9+`U@s^K%;6NDspuD_Osv!#I43 z;Sn1@A9|AEU8gmFQQn`c@w2E#?Kg9#)PFLl7MG1s?|#4Q!6H8z?FrULTpvLXy?TY{ zAz$AT+QRv~Hfv|t^-&yOKskvESw7^NKdwY1xK^z-L-{^HOX!W$xde9x!z zo(CE3n=1-bTo{rBad|q!VU9-U=Qaizj^{Q|KwLc6;p1&TcX7!2^LelTSG3+z@?Q17 z?Q<6oE0~;59A@jf7+BNDr>1ik_WecUI);x6r{i}Hr*7vj7An8;bp+Eh@%)A9nfNYi z7xhLwf3ecSD^2gMwQv{p(0S9Z)T>!M*T!*LevWOg)q7g~P@^6Z{YpH(3wrK8nUgfH z6TFl1^Y4D0;764YvL8r(jFMk|zsa=cxBlJg57T?o>aUILKZk6db>8*YGuNM8%>h<+ zy%-d6*)7zwoA2aARNkR@3h`%s?&1f0BtOqPA9+tnDdBg8Kj6nak=JU{u7)K8YfBl|T-#?W1n6y2M+Nd9$o+YIg_fN6(iT%*&-U*uhDcmQ{@AOd~ zCuH*P{PTFwJCFa*TJJ#ph3e<`2`tfYexJbeER0hQqMz%RS(th|hsS;ruCsA=o#FmA z`!4%BA%K=YqUHPB{#t$DtMSX({xE)&={Nob)1}?m`R;2WT}%n;F5+kJz4Xoc-Gk8MhL8&*?*?*C~66uF6UC68QyW8+E{Q&vxk|^Z=O#fv0?|~k%_7(Ge zx*^}7&L^p_e7&l;UU`vJAn%K4f9z{{vz{|4;tjk||8Iz#gCCHTT<%`4{K#w_J?rLt zA*~(Lct3aG=ZWO~5v@nw9WkHv7S~sQR`AU_PHH;JSERmp|6{$!%2BT%UG-SuewRsl z%s7&2c#>=I+&)H zlM@<$jmXe=%~sb`**FVw$M;vH_| zc_4=~)p?Uyf=|22IaeWu|V5arm z#(8=e>(YArS}wKqlTT}Xa*w3O=X(5p4d;6N=Pc}ce7l7w>+vGK&i5PCzWjsc4}PwG zjC>E{p7tDVcTK)Lr2t4x04QOmFwkcuyu6%PC1)*M%P=id8@Vm$vaVOM_fl?yQ{6; zmcCZgtF14!FzHaW^BSRMbco*-Qto{GmTgD~e}l;-U}q!86>p)Z;uY*Z z@n?N*<5I&&J1*X*fvv45RE9eJy{inx>TexVCF5`}5{aQr{Hz=Q7xzxmqmtSJzj-^S|^L z?2g$7@7GAbN7DWN3&`|BC2e|{^W9+}c|3|@xDhc58mKo1Mn4I>_dXCRMZofwf5LS^6GvSr);J7^7 zKz*0)xK;~hw*M7&R4mMX6n0Qi$9qfSIU1YSoa=Yr8%!_vQXeO4*#XLJ*lu?3 zMr)ra9Z1X*GVOsM#|NAS$@s_QKHbN5v{H|zsoB3)>d`datLY&OGv5;RlD?9k6Z!=8 z!?saP2uqoc_u#aC32Ob4A1fc8i3k1%_--r$(MjrW8YKjS`z+rJ*mPkGDjZ>7QhuD9??%TM{s&#AaQUuNm# zbAE1;_SEMIp=NZC=TXdF+J1%KJ1zUc3a_Xqo-2DPCQI$*>qG@N+sk1Ur1NGc7f{}? zPpNjYjbYbo;qhfk@0q_RH9J{1J8As{@9HXC>z94A(K^wSOQl>Dux`#dS0bChO*VX%YnovxjBkD}X0*ef`UZ{gy($DqNcbrD~e zgxxvVC2*hhy;9eaZaV~!xNsZmo5?;j&p)icZM%nXOpYQPVLMtD$NLn6B#rkLBw_exO@#CxUu{yvkJK3N9_{8DlS+cPvsJa(%$^Ss06KR(~c=NUgjc){$8jXPqy zjIXg>5{C2dEV6F1D^;^A-DX#+do`czn^LZ-!xlDu)pUM`_m%p*s&T&=^JS&4$q&MC z**vo|8%*!n{2^;&dQP{~+Rx*Bs(M5-#QBt+KT7TUfa*N6GaJm#^zK(WW!+|H>^)og z9*FS=tY4n9ogsbP&d7Nb#Z%Vr$(IIulZ}`jvhjUp?+fPhEVnb1x7^OS-SK%|qn)Ar z<#xvC8qY(zU8xveZdVps*zMBA8ZO$Avzbp7*8%r-PGuLqq5L{;c43D3Q=IR7kItLq zTo>s+ZgO`MUt=HH%IUi?l zG`&E3Ap0B^rrz=Ii=L}>s;rmxTIO?FZ#F{t%etwbWL{o@HhecrzBaXI|n8UMcTDYUSZ{1)Zz`(Ce@-k$ax&oJfp&=CFeuGgNq z{+Z74-}n4Dq;x1HW9sMcqgfU)&i$X{POHp4Fg{|e|Hz>XePgp zeoXbVj~}wu;~LKF+)Z)ZzfjkiPiQ*eOnx8T<{S3ibFti@3=E6=(b(UHy}Se8-PZ<5 zhdT8i;xUc{K9>vLIi3HR?byKiRo0yl&-*wp3fG|3V#f3Mv6m9-mh5Tp=_wlIxLBCofXAcKzl=slouf)iS3x{YoLyh*;-^ca&d?S3otxMGoH|LYVb2tks zg#XTs^dC_#S+v~vDRKh+QJ%G06TnB<)lwpdmBpp?=V?0z##X2w#;F(bccMBqK8x=} zEw!-UTeQf+vVkbB+VD=4pLm>$NAU*pEVgHN*DosbUEAGZjj z;9t*;t?)grV0Koc(C_L z!=+so&v(ImKgZvL4VSEB`gKPYp07)l5{4V)RNm!Py$gB$Cs`YFdF_*T#6h8K*Dr2;@EbH-Ru1*;1QGEN`zeRr7{fc31wZ)&&_$Y^$kPl&O zEz>XMJgF@2BEw-kPR&u>3=g6%wHde-C*aArw+%gH|}AbICZ+pGOTVt&HxiR&fg|M`C5_WwC@Ar}_iMqd)$pqk4(I{7mhVnAe;6a30PlBYo!{|x zeHiz3+E^a)A^)b>*Li`)my)k&MwdgMzXAS#Q}{NYY2~^=z7Nwp4}=_efAeu^7&}3{ z$B(EFxW6nkbNih4yAMf^So1fI=bFcjHm-}~MjO|y z@NuJ!>!x{Mv~SK)tM9*PeWA{NB<-SHk5jG(4su*oN|rI-?oq)O{qhXk4<#!O{6sq- zm&%uh^XXxX`SW`negE!vwS0IGk|^{sxts}}&!58z%yI>v-a*ADU18_DdxtE{^Ke=3 zu!WBzyX4YFy$}2=rF_GCpA5@{+gfvcudp70~O*spurj2@Aoq2aNe&4-EM{j z%Lwn%F7?yPR?;uOl>Wut>Sy;Qs@Jn!XUL}-+wJ$0gc{pjZT*s^v)$EJdk3t>c2`?Z zSvuS8>zDcYHZ?%T2^ID1LhP4cDxRT-C^$hneWVm}H z%lkfK$2|;p%+>TzqnyfjGK_CMZt;f|fBAmW;*V+kB>gS=#n1D)zL;vAXRYKb+Ji=W zTkQWuJ3DYK`ukd~H}CHkD4*m!B`_OK3BE3ke`wrYUNI41LrTY$xd}yTSu;^3Q z`_P-?FZg!!{Wt7<`is~v&%)mBgI%?LTeP>Y_-Tp1kbLs=qMiNxx`LfGd+7Plj)x_^ z@qOe_KdkZ@&#OE9XRyB;1V6|bKPSH5tI9s@G zPtp|~3ODPe9!gh8Cl}}TC9J4ecu46MUIeQy>D|K&kCESb``ymjJ@vz*n*VIS$7=qb z>c3O}z1{33m#gykEoBU%_SfH2mM~myXUtx9P;aIS%wBd-zorY!UUsb3_M{8UUUpEA zhXqFzp1ga>_FiK4vUQ_|v(1{E^oaon9&&Ft@pXG?{RaCBNO!$lhQYTm{Oqmxi*b>| zd4l@=e#VDe&0gMY_Hvck%NxyJzKZs;+S{w;svWPUZ|BUj4LuspcCDm;Ul;vuo`aF| zfoxBF&zSVd?=vKQ{60fD53l+C-Z8mH*UDe4aOE9C3)8OU?@F4y#QO~QTE5d}pYJia zZqI4&b9;Wq(%qg9S^1Mze%Qip&-Yu{?fIyM%}&Mh@0{=C_P5dQe&_5h2AE~^xKbq`q$>8n>c(LP$l}~32x7prq)bhE#rQXZ! zE%jb*Z--dlwD$HZ)PLW$y*0i1ZQI)}wlCV-l@@k;+hbw3x2xH{Xm8gt9PRCT4L94{ zKV!SUZF~C(g){LTv$v#A{#_R7liOR;C%3nxPi}8XAHUZjx4$#l+x?_tyw9F;n9rXn zhtu2JK`Za}cF4kRZ-*`H_I9s@r?$66{je5#r>@;gzpHMXM?ud*&$xYj@S3UY<2Mz+ z59}kyckvzuu^Djj{Qq)><97__{J!E{%2(f)!Fna^M%G3>UP>reVbs?J?xFr1+em+W zz4`+O?_l^blaIo9S>gG(7>J8I*Y{FqewQw>`p;00ocH>FUGXj@ze4~WfjbVL{D|efFFJj?ZoD_oH%S|bktvcEqlI&e5K@V@Z`SF z?{_;g6P?Z8KWFcK^PbOA@)6a4Go4!rqsI5O#`hlMdzbOOG9BMsw^8v;BQJj=xrnl^Sp3ug4UysWC+?nkU zhfA35>r|!WB?@2Ou_xVE7~EBa>-%h3n`TSGss{M>9Z%!=6YPRCo@0OCGND_IaDbok zYZJqM4}!1HfzE$bnTk*BKysd#_@2jeBCi(OHp>NhhyD+WG@cVFC3|!tf4=oiDcPxT zAwMS@G29^xr6?PuxlRX1xpl4ZkK{%5c2*nQV^o47mLj8Ta`8d%M=M zob8Ju{q>UW=byYEApTCoTR!g>zJA$xg&-=<<*853H6rF*`o|l4@^*(h@$&m=!i|D} zxV~BZ06%PU{n4ZNhb^XGHkm%L?}fr9o+}bP!+e{lSF(8**x!9lW9h>Y@;^Xw6s=vLUp}+M= zL;SqeRQ;`5f&BNVUeER7mBho|&j6pVlziE|)7t;QKX(}U`8qS;BWc3y9p>+6r2ftY zEa3IyUK6+po7bZqXMRDWbp6cfHL;0t*@2bH7t}k4QJSxF0qwWjY48n{2fpj%K12B6 z<3Rll+P=-pd_2H)ootzp2VSG;*)kswyh_8R zDM!c;x_g;yg!Z_z?YJVm+3XhN9w=w`aa>VudASyupjl3?k=*DZF2D0luB1Np+x_GV zlKNAk>JT%jM!tjGw5|Upi}^%2Ux!X}__2kXzt6aSQWs{=LL} zgB#mz=od6fb!FJOEQm-CSvF8f!Aa83VooKpJ~B3zTp0fV=R?FzP_6YerP+-&}fg^=R1rPh9a zrloQU25&7_G+oYhvK{5CEIe#so>z+R)RoC_nXg%Tx8_%UtMJ$nPDxf zudy)cm@U1-!pAKC8#KJNOhqK?+S(tp^3OA$orlcI+^BH zR0Q6Cd>vfoudFxT$H@K??`s@isOiDx5k5Zi`7_h<|T| zzMcQcI=UE+_c||QJtik{pP)_AkLz@9|GZs&Ez_KTdxU&H_j_S|--GWdgxMxvtB8k> z-_Tz{S|8WA-bf#{^KBbWsNANH+W9uS4=#OlnF7i-oTOYoTCuRv`B9VG%QU)h{_by- zUg08Z*F#L737_fiaZOtcPditTwiurFT~XR%c>4ElEfuX$^a1f!DfI`#9U@dguykr|J0_+v(rGTtT|z=N9>$V7ANljSmbf{&FuX{a&uS`2EUZq49f| zaG+knXSWAs4Ka)y*?rvK_WN4X1xDA7L4}_#AYIcdNzdT=<_hC8t1G-`{Fu?f^ybAz z2XrcN7)L5TGTu=6GdmRL8yy?juI_v2_h>O$*K@h%J%Td6A>URIk8piOGp5%O&vd!r z{UWxf+Wk$%tJ?7|^!CXz7Kx#sm4uhl6}(9fq49<#8FpLBeEkYV%}h%Cl2 z_q0P#tR-GPzLYq)^L0KT*f=#DTB+?j=lg5VGd?rdZ*pw!5ryw~{U?nd)=$rS{c_ob zxarSpaNz2E!~F6BEs^M#(M9=$+(C}MvP9q+e+TS7!9KBL&|t2Ak?-FY$+%qfBb={e zA^wM+pCs2v+`+#kxAJl>OU_6qSo zC-Hcv1fgefZ_SO>33dE z_`WW)&hiC&_r=#~O8R;g*X>v`(R-#!3CE2C_3he^^7WOU(Qv-La+igDeTCn3XUov* z<=!Kj71>XHqlUxSY1M~|Y(7;=)@gi^&(I%e_rD7Nn%A#{7~;a?rblf4A14QBTn z;e~od@dVtjJ_B6mLpWsc$2k88%L%{#>Nl}|oA3F26>u*RKKpzM;}r1A_qB@ek!w7c z;gT%^ts?(-n(yO^QnFrR1m5@=rB~R zFbq6jD|6A}yIMog-@BRa^Kj%lj(PVXgClaU-}9PW`??MCeH8g>maipAMx}wl-j6`~ z1kxY3bm<4+udj3Zz7FKHOR^yU#Q@gZ@`6i4mus_fzfV=N1 z$kTeIQ@$Q5lWcLt`dt2&e}wmUsa~lkTF$>GEvZ$T+;1Qu!SZD`e|c1)OTBfjJIZ|l z)VJd%UkBGSJWBZ4S?*hx65c0pzTr+Mr={fYh^O)YY~{PSZUNO7#r0kAWu|Yo{g-aaIpBc4*-Mx&RA~@=T|I1Le4OWB!*J*BFDg8r&n#NF zOv8)Ko)r0gM*l(D7@}LwJ<@m+=bR+0*f(nm_7+ z7a1LGpSExIA8I=AP`cejdIX!FWlJ1O**ox4Ax8Jte#x#Xwc+Z7H|eqGG_;||;XHqCB01 zJUy({#eM*Oz=2+Kb^O3_b8bi8rs=ZYO}ThTgPQ+0L0>j&Qa$@n&mQLUcA%a{J@6S> z8Z6>BEagIdpULh1Y324kYayrXufY3P1rO8%{sp_Iy-^QL-~WGx?Th`LhTz%v@3$yC z>=(d2Q~y@`e-?E5?FKq+R5}eBorb2RQ>t{z`}a*e?O?vf;am&`29x69lLMh`y?~>>s7=*_Gi|2{`>PQwVvmqKi_hW{rQ)L zP6M^Z{wy6u9Oi2}{0rb5|l`%XSi z#5iQS^F?PG&V^k6^2y_u!LOT=61>6xyJbrPzH|c}-%JPh;)ZfBXn;Fixn&LImNb-G z-;n;g27G!N($^HyC)O+ZlZl(nzsCH_>4*C{zA5U(k7FKVc3|Rl%!l?bYM1_O<5vIv zA&h-o$McJpi$M_Q{i6BauTN>d{^gDLe*F)NKcoCac{sn{IE?)z)32jE^v&YH&iCgW zkMb-LK5=>dZ48SX!1;Gtkc+MY|7D$y^-4Ul`aX{D*A(f}CH(jA9{^|aea9;)kGs0m zFP1w6de#*U{oVtD1bSYt_eh-5_W8XRE@$X3xEUwEhwv^1;Ojqqv-qyI+v&bp?01dy z>udSE;?p-vm?tjmwflmMkErkSKPKZ4Z9V1{D>Ywv?(OQQDF^0#Eo(LG>n=`DguybW z8|c#WjPzaDM4$6b*B_dJuFyz1U0*t7yU>o|XKcq@^1Jo9Z_ig`%Rtusr#a;Cffk9x0B-@UV0-{IZt zsPAr4@+HM{u|I#+RCQ;~nba@7d-T~h1AWl%ONj_4+P9l}y)66paIUw@EwqE453W<>%ZChG1XAw-~H!uXXBUIGU*iux^be13kNhI2j) zYkvaW_f3*Z_A}%IzH)D<_1mGQ{F~@FnKkx~0D_0)A8^N#2Pd zT@Rd4AMx1Pk$dPO9ZpQT-}P9GpM1aTnHWF$p3~DYe)2u1r(*o%cLq*se6e5dZ{Y7S zgvZ$~oQpUqVa(IfGr-^Xw~3yq9Z@*>zW!kg?^WN|^PJyuZyM7(hM4a22e(@@&KoE< z&G!YfzW#R7C+honUohplKi(G%sVvB&>4TE)mN32|>2gmb@jXEL=6uWPI+<^zEBfbT zzL8FdpUgMXOY={9CuCIXMf_yGk$#PQ^YaDq9e^5IC;PBcUimm?VQDt_7{CA7#(3yC z=j&%>{3G9YX#M#*oasY>2R-HbFs*+~^9_uBPW}Ae_W!2g?0PEHF!m=F_WM2gE>U(p z?NPPiUi8?%RuaGX9ft8K?^h;&BcvnZ(N8lwrvmzYZc@J@{gmFDeqILyj|?Arg?#^| z_#u8We~!oa$^DA-Y39#S&0pltUxMCswgc;@+RqC8>S#m1THc0!^$pT1o|pNuh4-o- zYJYFxVGDDd6VJ;8*5~_9!QK(>U+|-v5PUtj&hq^W>^#9kHh!wJo_@P`2y}%D4=c&! z9{zn=Z@=vqm0M^JJ)hqf8qY6Fd4MP-Jm=*6MSjN9?jc-}d~)uU?fV|%YdS;)|LySi zpQV90U&y!qw!a`BBVWkh{x*(JL*xtPpug=&&F}Qi`9itrU;b&Pi@c%7^|hG3lQ4ey zK1I1jI=(ocHaFE@!VRz{y;n$^>AMc$34IslH~T( z`+q6ndBO=BmrZL&JJ??V2d1NFM<>5Sl=sug`uL2tv$?;Yj`5TG`>7Z|Ss$O&_@X>O zzq&pKexT!2=ek*@dIO|U#^FLN8@un z{F@ebJ$%r@&#Z^D-qSj7mG_<1cYW;l?q(YX8Gjq`zeXCZ=YY_7IR}LC!oNtGubcb& zKjH^uX(^s38en;s8>HXzQ<6?Gi|26=|I};5PG4mCQIEK$^FIb}s`Ed8Aoxso{wF+c z?Rd~k!8P8a7SERqF})~170AUN#>;!#N>|iB zt4;LA4iX|5oqeAVb{fw2b>n;H7fy42dr$z1JJ)+W<@;5|CqMto{v6`>dkS{O`=rzD z)c0BMF@=|Ir(Vr^k6U<|0?K+%XgF-ISolc`(+>H*XO54p1H9MTm!2IyS?^JW>*tDc zc+|7m$Pw0im(hP!16+|qa3J!*;uZAg2rUO4vkv!o3wOpWaQp(~WFT`ewGXdY_$do@eKqH&Fj(Hm=S(s9!~&upPam zd;AW>#%FS0x3(ug-%NV>`DVFy-_nmOocP^IEA4>n&yp^!M_JD9Cg6Oqr>p#hQ$4$Pz!}S;W3ciK0L6r-)6X4(9 zOP6YX|Kye0E+03A`bjO9pPRc`(+BF$WBbM|J>KVWna1b)r$20AtU?IA>a#6u<3Z!| zd6OF(H)8&yd~VZf#V#7ZY@U&)o1V4x1^M2K?P_PaD1XZh&IJbNobSDxseY4}y2;CV zuixfl)3o2mcLVibV*8u;dcX0F1|igc&ce>u?G~QQSD%0RIL^n}MLWU0;V04lx`hR)9)xY2f_ChX zJ*r~3F75X5vY$sszHbRBoR3&9h9sAgUIm<{w8P~VhO>DKW4kBMgV@gIdC=NC|D<&( zqn~uoN%NqQo~HJrUDM8kw&m^blAlF7D}8JpB=<`Y4{6+V@0Q*#-~BhAGwIX{{MjB^4e4N{N0p(<3i1B!?eIwE(UqyUh z|CR5%h-WMLn#<2AjfY&E-8%5I>~8{pQh)Pv(KSI*9Oi+LpK1GBA)m_Cn3WTHL9XI@ z@E*n&*BusW|2n4aEVo?D@noI)F6&sP@qT{VzdH_QFSCx7nlAUtG2a-&+4VMW80*sb z?0QjX;=&l=P5)lzfadpkIP~Dp$hRqw#pIr^K8^VhTo_$QxTg1t`QI%0!x+=OT;~NF zQB2;^C7=ENpKHXJLGE87{$1kxy%38ZLT-^0kr&kH^Qg}2Sij4c%OmDZaDE>`);*W> zs8PS69j_C-e0^lT!H?&Jr5?C-R$t%TS7>;GWSpcwAqVnZj=~4rRNyv$?<0r8#JRqR z_flM?(Me`<;OD9GbH7Xvj~kr~Pt@}=;BR<_8tLojDLY$Uqy?Mw-NS^3@^6?D&!w8- z+2UEWqt{HrkMy5!^h19JhdPIifAo@d7T+gw57&gh*)ib%swwgR5iR#@@dsW&v6TF} z@)LT-o!YOsZ}o5ghWMGjTge*Ur7a~7v!B>`bJPb-TuL6&_~!AU7#(rxtJO=Y2dEcN zZk9xKKE!l{;rxCJ;Jxg0alACD_#z*&`h6CD|3#6`&`*932H;E{cby`hHLWi0clkO( z@jEM=GbxU{Msdz3WjXNgmaB!og>dzQ7;mEYPd(nml2l1|n`G=-x`=u}@SABI`!0d& z<2WDJq5cocyuCP{!#3cqlxWSJ9@H_o&|E%=(`@E38>DT_^BS_!V z|66zUw|!O9o&VuRb}-!k^!xW+bTjh(>e`K~`mM$HeoIQF_-LPhy5dKl@e6%Xw%ANjxy5}jQB;U$0O<9x*b^~L2^ zApRSh|I-VK=UHYQ{^y4f|JuE^BSPmU`BJ&5BZtsT+P@KUR2Zj0&V2j?z8)36hWHNL z8Pc!j>piC}%yp}*mGT_w>>pXjNv7{+efc;|Di-H<$K@m+FA+}jJ~``~exIE9<@@9| zkK^!6=VQXkWb1|GYi8e}WcHot=3CguH(#&lC7bW$d~Bzbub|wW&({QbFL`RZ?qNC5 z?Xc*!ficpd?}9FlI}Vd>zRnjeqnv^csJZjTdli7wY2d|#=XM+8B;<>H?O{4(UxqnP zXY*DnUU!$S)&5*A(N31--Y_XD{b!jLC?{`FALk;zCFRl=v)x%Q%VT{4B+I%fKjqSO zELZwjg`;w*F8=bL~d{F6G zF3nZ@P%d4r^`Rd5msDF#&Nop0i+UgZ4E5jjG3a5^uQ9!-&(ZIY|Mjw=p4LomT1KQ6 z>3?;-;c18VgKF_{TFm`Id!^pwn%3dSDyVmoj-#AUz2*%<*>%W?*ji} z`6K^ly^iS}%e4OTtQ)?k<)D=OzVZ!n0y@Bn7^pn_*7L>wgL&foemGyZ1Kl83o$XxL z^8JvKQcL(Si~V(?O^fUJ3h;|^FKpNKY!Shv)82peed^HmwBi-6qV$HV2%vKS8;v{g(QrThk`sD7v&V*L07!-i+pvCzWA*YD{}M;~MUgVt~Bl*fs7 zwj!;wo@z%g^VM0;R=YoNbL%f^e0AQ4`dc^bRX^-8x(gGO?o^ZV{VEAkO5}1xaRYVc zPY)7)b;C!Lj`tp1ZSXjr+}y$atgXFvE~3u%WF7W?pxrx=b^Iy$x`6fXl={K)Y6tJT zars0%II^|l zwoLP77gHVp2bRJ<_Y=8%kCqSDF+JG4y1(r!77r~7{dGda{cZoGKHzBlate5`@0Rb_h>bG4|C)9o$vi7*4?OQ(a(WNNpAoV zy*Eom>sne^=?V z`D%W@6zacCdR^`7`@dmf%4MjtA7>5c=Ss<2HJ^{;{l2^?SF2pEXb+p@%Es+sjCRxI z3gZIUwZ0b4LlB1kb~!J%m|h-)G)cVBTm2nR(!ZVIMGMdiIG#rqw-5O_q&E_OJ69HN zXaDG5VCRf%J|1poJNg%JUK0DoEbAAqqu<}od5Pb{0ef{!>J5h}r>-x{Ek7=?&HVuU zyi+E2&GNom^ZU3$<`=}<_Mv?r($`TMY1!{h03+eYYaa5mbJdz z;@4U{`IWW4%Hr2E-t<9eQ?NQuRy}QcvwYDeuu;C}>ten39eCD3c)s46b=dgH_N%fE z;^*Vn%*J!YcCN#(*DoNSQdIN|=~m2lO!JLudE}FSlF#``{Z-8WpwKnWV``FF9L7C+r=fT2M}-fC ze9nhA$dK0Mudl`CS#f@9@_g9%TwAF4L2iFfR8ra2>9Yid~$;^?Jc4$T`yd-lq!% zFVu(lc<<95G%G2UciTm!_aZ&uTxG-$sZ^J1E!BWI z1$$ow@-!x&=7sHZnZ8Or?Y9sh$R&KAH+jFv$R0T_G0k~C8&H1R_p$CUxxVe}@Ja_e zG-EdJl+ryNyw<|Jk1PyUG#rL#hvT_K*$+WG^ZSoZu$H9sx^C}va@e%!akFmAS-unx+MD>*HKWaWXFQ<4_ zTfa&DW%HnF>%Ul-bgRa9!;Nn79n-l6@08^?zP6rZ{K%6QPyOrf_xOFNaz4<~DTi`C z(868nS6h!;m~!L$6f%w@J^dc;n$g$p;dXoD_FLXrS2#ZI@$-ptFSyZRucZ%bxTq(F zMbVtie(g!gR~$d7eiQR z+KAW8=eo#8*(W9++Vl26|N6Z{@f_fJKi73J`BR@u|Gd}#Rqa=${`G;&8rB@sQt&hrM_5groVf&_G5$D4?j;eV*RekZjc_++YPoqw;QBG zHm%(-KAPR|@t>RnWBcO0%_l5OxqJ5Z!}uKShtWCO52JT{zhScdDE9BO(fdVxkFyrd z`u=m2k00oJ>V@Y_--{s@=lid@Jv2RR^TcqN^>7&HII2mn<1mA`d|Ww5c&7hk z-<;vI)UWAw(|f(9-?yJq`0{-j_4M|W7G}Q<2?{?FFtF+k6^MwsvTF&Q_ z&35Y!!u4_Uj)rmbpy|D#R;9eWOGQ0u?^EUDLDw${>ua4(zbw=Ae0)ke)f z$n(SDkjd>3;p056&IcZ3xR1{kCSizrHBV<@AKXn#zFqR-FIu= zZ~J||Pnfl?r~R^hHUAD)_Vu3v!NRsKrCU*NVm}M|aY)W_hJ!4>Mdn@dFS%9zUn{9upl>F7V zkp2TcJ~JJ|=yT0SzBg7%{*dFj^K_2I?-%y&R$NPk&EALsIm=OGrY5QQ%e>q;2P`#VPdemL?M&sFrV;JpsP-u*^; z@jOQV3Oko$>)C#Pg~BP^ztz8-=iT~d*y%CxlR6v^Y19+&8gD}qdmwUHX$VTxt%^vpMXG1FL-f+lkPaL2vkc6&tLjIinuRB z%=6QD_Xqe?J9a8v^Krxx4P$k5Z{hxJzZcKvNtxXb;ooBx<@lJumw9@GGxT;~Oy~C- z74z@Z{KqtZF@8wmi~B|UBn*4~rNz?z+K}cCcE1%Cldlc47Xo<|6E%5m04LE5~E^_3EyAHBQf3gt_=w|r3Gd^^JKiE7(DjRncxAR*MFv1{tH(A@L9L#>!*t4KWF9HuV<>?-$yU@ zqroo};r*Gx74&@S_pq*H>pl*5iRE8K-{05UJisu9qbQ&4As*1H=x1=y5Pn`b z+jUa;5_abKpwBd;{OPb7ySNC#GtT@jJHb zCc>L@YxmlG**) zVSwd)pB;EsB+Bv3$5*Gww^8~&zH9tGIi!RSI|TjFxquxN_8;H(`F$V*JXVloS1>=; zIaJ?VLwt&Qbr|CvTfYGve&_e0&-M}@d+({-@_a4m>(b@9+&IhES$Z^oc3`!YO;Ra-ly}Vun_r$G)eVx_K%F+Ldx+Au%>i^lk~S}1v&pl6)(^4=c7>XCAV?F@z z@p6$L$AmsUu15LE--935^2PjHH2*QiAJXRP`u{x=UmPbZpG|Lr&-V*Ui{FF)fBzZt zOODqdr++Es(^Xt=^LJpm^I9(+QP{X5^p92QrlZCDSx9+pBY-Vfzis8rmWQ6~g&i*I+gchbglzWUBe)W7U zpW62-azCEs#Zay19;V;S?+?>e)aUZ~uym%vT`vK+qI^lzszk}r`ZZco!3AavswuAgxLp6bSj)|9)XFC$gk>I24 zSjF-aJ`Nk5%XEomJ1*96lYCpb>;UI^u|0xEMau3pKjKlNe%ji4zrx#W_g|*(Py3=1C>f0N&I{GE)Kb^`Cz?DLu%B_J-_!1>ZTsYmH({nz;d z{Fh0-bd2rEy2-!TPfT9Y8u?MR_g^8m6)CsVe3xVIS844*#cPd?|FYKCYdEd_y88Z3 zR$4QD+xhCW_6|!wqWnv1q+7HLwYOV3`&C-oW8uA)f47DAF+4V`;k5RCi>F+qHCtz~ zbK7a{S1kRc!Qp+U@?E5r=Y6M9@7DHPe#%{X@V%Cg^!9VrY3)(Vx8LB}dX9ZBmOlQ7 zm9zV3Y=1Yc@g5pk|FUvEZaQJ%|3}ODyCd;?wQj4b@Ce-x5$6e^+~qV=_)>)_Al|nDEWo_N>}M`aNhsCzq=mUDd`nScm25L zA>@|z-Rn7i+ImZyhQk=;*!h>+OX{u2XY21_oN^uMJf_W1`>uGcP_RjM`3niZMtxYl z{N)4dL3>m#h%WWC&j)O3AgFes{|cE1Yu6CAQ9bnXa=cyoPcZ3Hy6g<(5lry|k?B63D-~AwNcHrG(?> za?5459(skYha&%9HROkhD49UA)KXuq9Gmf;PW`lGd}v_e80l)VO=WwAgJL9kst8Q-vhwD z$^OFr0?LoCr7ccR5^oK?DBQ|4`qN2K;}iP~Q`RuVjhm+51X!v^{+SZ@t9N5xkWi?Gg}& zeh)hJT_6eKGFu-39K~yuM#udiUoY_W!>}CCq`sF*gT(pzhu`lf?;WT<#QF&8aXJ*= zu?7Ut<4y8Cv*;tu-v~DdkKz2@Tfg5Ac)nTUGs6@6On{d?GBrNI)?b&l%$*URpDN;G z^*7?xh)*^EEQO{Gc%*Ob(fIy{Ilpu|KpsTowEeG~sy?Ty&1<~f)j~Ze@9mB2xo>6r zF~5ggYvuB%^YVWE8P{`tz1r^2aK6R$TpM>4`6VP*e6KwRd^Z>HMR~`!d|{X3bB&M% z^5*LW{@#0B&z-hCH);7Wq+a6pfcU*7iyvnEQn@!rNIj+ed3)wh)gHfB#QAVL;mtR= zn>R5Geiz&2bi84Jdd~Unbe!1Ca*+E3_4;~lnepLDlivou$Ms+<@82^5&L>|f?R$rn zw|+GLUWT3CIJXHp$MxJRY(2Mu|H0OEvHn|;{BeDA3VA>}>i>J3=kosR{l(}u|53># z?%Q796gpP~j<*N;TH8}Ur81u1=MHY5os5Tl0{`JIlmEc?Loz2Zv=ZOf&)-ovt?&R} z^^3h@7IrI$B-lC!?11Xiw=%wSn-ma-cSJP4&Uolu*pUgF9~b%mDERge^SRtlaN8wW z?EKpgXwq&pR-D({x$QL?_WO`!9<6x$IRW2i!gv3Q&wDg`(z$;IJWVeY@f-%8Da%JZ z-^=jgE%fDHeXT#+X80~({6e9}iNby3o%?@D6CB_4svZqjZC?!d0z=$K3rK!%L20iH!Uar{EN02T*1nxs$Ls~R8k zU8TWz4~E}^yV>j@_y)+2?E;K1}Rq zeU4Y>w%2RKH9~Y$E$4wW?D_)jFY>V@FI~gQdXLf@c%xi?56E$i_jR6PJFY*E?T~dW z)-QO=_VLr5p$BLs<+)d@-^^Fy8?K#(uh*{Ba^9|Rl?J6KSjI#%ld9!)?Pm^6;>eLOHn(xJyNjs*oeFXR15m`@@c&=JL99`R+#m)gE%*1ewcE|py( z>6z_YRY$mw<#=@N=KcY~aK-aclkXEAR(QyVtiHeW-P}KVxoi!2e*oX^{VnVtaX+HC zKQ%0LDcXg#5=Q=I7*~z%)pmur&xqMPQCguue1tjlycI3*a6RW+2hx0AzwL9R+ZeG$n4#@SMF_;9yykDyV3Ka* zf;RFgy?4;S`o5I!C-^#4sF7Zf6J0-FML52H9crX|On=DJ8Sn2<#Pr>s?tERW5x`UN z*-m}$^p)=_m0z*^bzYwFY07>P%Ws3#68_n^!Qb&nw{gD@?XF0AnzBEl+%n{ElRx6p zD&;%fCIc#Q{tihxq(Pyd6wvsEl%s&&Bjt9Y7swc{m+SdqobB^-I6WL>I{*jt zU$Y*$rGFo+5T3+$Nf`ZNrTA-Jhg8ZH;O&%nqK(Ft! za6d)v57hLsoF}VD{wIXIkX(${rh5c9Z+^6$<+0(<^k4tNNt)3yPm-}I2$1P0$`~E-pF8{=Sv){=0NAvZyuwNk^ zE>9=F(0?b%wdv1L8`Scy=e&PMJz{z)?thCMtNbr#J@Q>W@xOkdM#%br!i(RdT!`G# zPi!9{yIzA5CjQmydo-ePKR@d46`r5;ft}F$-)`}v!&+bULe2v=UtZA!tb1zzx{3L= zUcE$rgc|u0>ieIu-JlmBK_6(pwQ(={H{#Ks;jgiIncue_=`8vH6v^$w5v|YnqZ;WQ z&jSeDB7GwtZWCbQ!WiWfcz~Z_{7JUM?Omi7_o2eU6HIr#WqPTRK8@}2_Pf5yuD*u# z7xwwJe%lX2d$he~Ph?$&?KM3Nermf`DH!=)iT53cv1KYh)$3oW39-Fx%vU_uq4kYX zPXJ%7uif)e9~A9_)^{V**9m?q4<@&b_9eF0?cvoj2@qFG-l6R{n|+6q4fBXcU!2b) z&@b;2cvI~=te1GVTNr;LjD3Jccpomnn`@VyC`Mu1=`3Tpg`27v$ z6&L?+A&xmY=XM%D!EcNp{r)n)f6V8{u-iZWCwV#W42l7C z_(ixtLY%*clG%N`nY|M%?>B4tIKSS_{C@AOpC5<3O@J=@E#2th`yGDHHhYliOE0{D z^Xmq>_`b30&0_z7e4(C?{=c&CYVRI^pA`W)Ve8?Y1OJQd6?!4QlzdUcVrSKFXU_WI@kxbZlWAPFR zh?4?dA9c#ztJQ_rH-J^5o;U zb%LOdf3|2g$#+!C#e7i4;zEt%#%uvvL_4nK9?_trkH&JqKRjmR(S~;TcsM-HaWCf6 zijU1FAomJ)fN+6FMSkuy-^ZhVE-;MQI9Hge^*%mV`4#8sFD0BXX5-GsP7_bFZ)-MN z`eP?GzPd>g#BJWj_=#KSH=PGPbTR7@awwl}wfy&6{=*5=rE%Imi(fa}+P9tgFdiz> zaqA|@ATC?Lb+b^bD4gno#TxJJ-znuP665O?e%>xy&IyV4Bkbdz6g@G=`kUNGrSM#C z8|mxko;w$C9WmHU z=WVAIUa^0G0_d;68;Rom!kgMOTXR3Y%Ho9{$uNK8XL;72YW@Dbaw+*_#HS+ZodciJ z%-)XF-emwiAV=veTQ8JS%3tfJshkUg^Zt?AJl6Z^I-$4n^J>i|UwhcPnZT_`R65Jn z%>?c;3Hv%5`a9Bhn!hHNmk4pFuS@(C8~F%}w^IByWVh7A9}%xvR2&XJm2+fTPT)Wu z(+k)j*8^EMo(IEN!G`)xoUGr+DX9w8-)7jQ9C0Xfu(epJ!p3n4vun~Cf6uxzd zzh(>Jq_f%2(u=r`pI*py?`$6JM{4K#vR${Z-yR};quzdyaQr=*aER@SdL`g&D$*}ODC|~KpS2JGNsr;#})^Pk@#MW=K z?$>I3)^Vr$kxnj0^`9afp_jrLdkx{V8y!UHfR1R_GKos-gax#8EX?$GYtdTzP`>EQ^mVT7|Z~RpnP9M98e(gp}-^cW^Rg6E>W9j{t zZvD6R1`CrP>EWNYd>bs@`fZKp^8H@ew6@;TX=l^gIt$bOrjL`pX|2!Vhlx+^4h!$M z@arthaZFmf-NGDKq_x{DJZkyZTKI^C*I4+lh1q}ndoVvA)L;I%rWfZSC&l1QSU<={ z77|YHAoGEl;xN8EDfg!M`I9it^l+H@{hXbD#~2RM?v|1dD4ePG9sjGSFxX4juTZ~7 z^Z9(l-#3O_BHhQ^n3rR}5%W#?DeuSa&~oLL?dpda_Z@#$^DT|{B~$MS{u=Le0DP%h z+x^SGDf1fZH)w}D#hYq)(thYZLD1*BK2L3Yk1_Z?f{&5^P46>`-rV=|tO@lrz0Y_J z(}0g;Rfl|?S5R+*uIRVGV^DsBe;dVQ`aD7BK`f^9&8iHb9Ng9`?$ck{6&vZ_M?C_& z1d3(zO#bGvKRLhVo1S%kq5a^~#9=@cTm`8C;a}FleuH%iIIq|HyPpeldGzz!4i|C& z#I{~bJfT;Bm!CV#=NY3a&tdE^{qZC84<4nQ-wy35?YR+JRs4o?L}8Te46&cD)N<+g zM)fh?1Rjm_^!bk33&8DQx$^8EQhJ2lR&P9SXZ8Dc{ky?EDd+FJ?$)4$Pinmr6kB;; zauKSRwsYLIsPbbP_H>McE0UgVuy$xF;1W)-d33C&r2!8c*ZBIMpWpQJXnyWQz9Ut< zT+jGDG`U_lrs+=zE*NPBo6jz~`Hd11zTKf-GDqa4B0sZU!}m6$(?a^;Hp9>E0SLDl zo$OpqF!}TIgP}G+y!@TM1(3#k{v6-Kt=((+j;r6lfa@WlmRda9*T3M5hFxAmJurRj zNtSE>I@3))8|9*}g#)UHXDjIv+tJGUV>_L$PKQ?36An_I7TvL2+Y?qb$kVC@KCWtz zr&U@kmjf^FboTO2XD{z`_VTP&=EIG2<~SO3l7DfX#rv`IIqY^kj1E!mjSjoL9VTC~ z-;fTA?%=q-BwtwrQfT7imo2`LkN-eE0XdXw;^QezFSmS2eaF}3vXQUQvjFAmir(KE z^SPYIe&qDFdnYodx1HzBoZfaXiOaRym(0#{R690WJ4r`B|0L@e8t?1mp+-8&{;%aH z9ev&W-iN49J1^$=!`Fwx`!*`2$~G_X`E{PZUFld#zDc;(X>gL=?t)!3z2)|`a{(EE zFr2UBPAp}3p-ex!rm}+q2m22bueW>xrf3Hn>E3x|vt2wPWnd5ACR0bZi`gTDmjzhO z+%9ds((K`tq{G%L?$Pw@y$t*L2dB%#dkxNp3h94e7wOW)c(<2x1d5Iq+zy%?b=ths z`HXwj;d~w0*B4;_Cyf)-y2yH!z*G7Rus*CW6zS91MtH(6j(gTJAMyxUpZ5JZuYYTs zwZF~e!0si9=ap^U&fkrnzP?W>9UA%L^<@J_cfl8~vEHp$8{ID_9vDxcB~F*lVQ-hM zCs94d^O&Hw%Atq@aylI_PJxSZDEC&u`MNOZJV_2`{f)L`oye`y+2p+>t2iik@57`Q z;KAN?wv+mRbLtPQGKmL$E8>G5YftC#kjF1woy(=K>&%gSz-;SF2_N$;Ksj+6CXQ$*lZ@(GBpW$|m^PW#rS=>x>@{Z3J5K{U*oJ z;W3kUpC61bQ+&#$h)-oUsG@aOwEjQ0^P`Vr)?T>5j(UrPRk?XiB8?;o*$m1n;I z)$6*u(&McfOeR*-ce|6_*Q0QX<4BcvJ6DSF959IO-pumz+v!JqP2RJS)e0}Xnd$zn zn%ozx@sLl2KQc)CM>f*G?;iE<9Uvb&%Upk(u=SSm+)EUor}wp-()^I0@#DFiKz=|v z$j_0l74ZlPenu48Em z{d~G}S1UGx>`B|;E!CD8`(J>KUj6W1aZ;~?q{m#_KPFh0t&<0;^PXSs#<$VB{O{AabElI)rR zg3q6Qojdb)Ssxb0iu3PO&~9i1pJ$hncRhvl@snb+}>P}bI7ARv?z1A z*~i#*;z3_)E#h}xJac{f8|r&Wp}x%OLwiAKzyHSf4PkFiN%vy@uxLE z*MahJC)a&kZr$!?2aXv(NE6@Bnz)DU?JSe7-|d?75Avw}-qu0px3Rph zhh*I=RSp)j`^I}weSY7$go-xU`s?R}_&ow5Tvky)pA+;DUbKREVjL*Ve7f_d1)7d9 zFfAz+rQD^WIK|~~mN9H_`d55|a%tx!@_bzl`M#?8eB2o>r9I5^uWZQAdBGOJry?mI zH{bQqTVD4uB`CsReRihmG)kF`(o^vM7I zF3$(4D$t?F+9z>6^1r`}8w9?N5^ic}Uyrpf=C}Qh3$1KEAhEuN8Xb5*53^0gD%-~LKHq>IM%IZ37~eUt zR_X2hcRgE5)@TO*9zPpk{|xq>cQzml6BpiU^$gGtHh#?pG+EEmK(EX<{9soq624(M z<%v*}VTn@rCS(@3Q+#NW`Rg^9bl%T__TqMiAum@6eS*JtX5Vjk`+Yw?Yh!*tXD#2u zY5()@E&W{nA;AmsD(@I8J?5J|fV_advhG3qWAcRY5hwsWUL<(9K8SpB`8|{)i_Jo9DN%<`BX5tB|SYS{b`TkXZc;;L;Fc>?+w>|h3yu)NtR^k+!pm^ z-c7kOdprL!!f|-h;$!_}{!-=-x0`Qv#?O8EJui??Gy(O(mwU2V?-G`Cd++;XF87>8 zi2T3knBpJx#!dF#u+5J<+t4bZ!~KK@`>WsAt>%2t$JPE_1N3H6_})Bg?IC>-FHY_= z)UeM-k-zJs-~?UPYI>{2F!V}~@C|lgWrIEXBWa`CqvCiKRfRFqKO14c?_Btu$9HW5 zzQ0|>_eQoa$|1XbGJhH4!yZP!&VV`-!e_YoW~bcFg-wjl?yD$W!<$(y>zJ$I`ByW& zbAUJDWxEoM_xWq*owtEpvcIv6d>NrW4DVn$pLc`rfSmPG&raOM^u;FE{*Ec|g$TQz z_4QDk2SdDn&m7OO-fD1cJmTZ}Y(tmgbHlpVY6jnbi*VXl&ihmGoa}~|K(BKi;`566 zwvHO_f!J;3AEu9a0|?>k18=#blXPmx_m=G)8V`Mm_Jncnch5I^xcx3Aw%)gtdN;d+ zeppI9o!vn{EH!@JK|fqYeB!#7;ph7+HX@^oz*HM}R$Wx?I!4f|KfhPWnxc z#D^{m%Ws4iRXqRmB_!z!xSo^ex3Hx3$p8Kd?Q~dhx&f|*V?GK*v_rN(mBZzFgxjV5 z3di+6zw!Cq#|7_K3e1U${1LZB{;OOO?c|W8!+xOuXneb7i}NENx8Gg*6tX?-cB7R1 ziQ*sa)GXqUFhI+@*P5R79;Y6?hH(9RXop*rw;zVQ+5VTWBSm=&R{l2g8|7`3a{iXt zf64k9^7(f&!S1hn+w9M3zFhvO=c4=>d_T`8^BB$V^WyTX|HIz9fLC=~_u~6V9Kf`R zF!r{IYN8{95LR-6>|ldQi3J!N(BiH27TbiW3PQTuiHf@vk-}d_Uk?TD9|9ldr<_IW&53jkRy@f(IW!av|8yTKL}Tp)jYs2QTg=FU!H3d;q?L6t_=&-)dm`8$}LgVDMvlHZNR& z0NjLm;S!Cn*P|p6UXOWQf35O?@+dduX(k^Chw?GDM)4#^biUU_Cc@GFAY3x4^PP0F z&Wp;oFjmv)b2`VEO2aQP$n|mIfBC9km9WoOh!6jw@~W#O-QQCi%F5C5eg2a?c}Uyw zYC!`!6mOP($*#Iy2TG@xYlX; zx>es)r(67hhHIUcuUpsgtJ5uhK*P08%h#>@ht=s8KcL}Sr{(L`efjEiiyzQ%t<&;# z>w0K)y2TG@xYlX;x^>;MI^E(2G+gVneBIx(`dj>fhHIUcubZddaNcdY#Sdt>hD{}j z?^ZdePPh004c9s?U$@F-)%s&?K*M%z2J%7aBYuk?&~UBO@^#bdWV*!sC`MS}u zCEel&G+gVneBIh{s?#lgK*P08%h#=hRi|70fQD%5{m-Qou{TQk6fK@@dFyJby~h|)qmCL7C)fjTBqgfRy(5#JxEs@&~UBO@^z~|u1>f3 z0S(tWHB5b3LOybX_QVV{8}**Q(;PR9eium?2lk0P(O#cw9sFc&zi0dWz&_tbEV|yc zBWD-?r?&-geG2Dx0N0`N@LK}-v(fWRuAGe_Ju1GL-eozZ9Q>}J9AC%wd#{thuM6Pa z-U!b%VmnLrwVp#+$x|nVpIA3k-}Xb3;TJVLdP@9sxqb%>?=8PX&gAiB_ZMr%x9#e4 zUbN|7>htzKy9XE-en|FP>gO<2&%s{S^8CElu+o*C!}5ECe%@Mc#pi9Ko zPdUG*r1^V640j?t1KpDHRWW_?xT1;tdwIzoqX%t8$8nwTylY8UX+#8^uM5=EwZDen z0hWBN0*-Ok&fVC(L6>uXce5Djy*9S1JN``Gt|3{U+^@eGA7lNK&hJ@ye`&c$q9b1~ zal5j-THzU{^L-P)XXW?K4yxLW!uu9lGh5e~qk3_|zS11UJ4U9%IbYrP^D8cwGEY}f zSvSb;qfu?{*957j?~nPsvrp4nR;r)X6Ya|ymiw%pR48!1{#v1+!1=j|gU08r*GRP7 zj~iFdKGm;zI@3OdcYDh1uxy>x#yiU6$6*7F^(wsc0qI4*wJ4^L1&gYVz1H5p*Ze(o@Y-!1Uk-@^F4h!5XWI)pImF$-;*4DXe4laYtS zXFQV6c)b{2gC!0RdpmnQYt1^|kBurnyx%w;KmWt_WJ~!vySHzKcfRI#c}eD=U^ovG z-*a!N)8X-b^VRV7{?Y?6X3okq?GQbXhZ4fxuUu~s-|r)<@5eLzYtRLLuax1xMA+%` z@~YMbya!L`>m}?rfB47RhZs*gn<3(Px`TLiT&B2^4}Q*c>?on3j{q#^Yg#o-e7`;k zzPYH@+;7y&A^x{c!WYKB>x~oX|9<#I9*6w8@$KXJwFlumzkHp9a>0J+?+1tS!g{C6 z7rn%qQ%}Aa)GuA$nZ=(D_>nH3o5epj4qor$`?y9qCFlKKKEu=iIDakqvvDrq`{#pl z*p7s!p8nGyp8Wk3+SThjwh%yY6Z`iw(yy|8?1ydqdw60$BBe!zau zy~N6Lj)^~pI?y$(QNOxD{dlI@o4(GR9Mkfvwm!mjJ=UX1)qXT~Nb`{!tdE}`Z%6qI zx4ci&Yadge_5~?%`y$TvS#+Km+j(5aGwrY*!Q)e?;2H_f;Iu-&ts~ zdRITLcEhN z!h51?@iUXR*wSy_FF$G*YnXN(>oso?|58qM-GKdsUATO}Ad zZ;TH2)`f4WpG&uQK0JTgDqxxZ>l@Z{5B+wozi;?&B!7SN=ftntx$d~|Z)eD##NL;! z*}Ncor}^R2TJF9V#dkUKdl!9n&bz-^<-c!O@%K0Dx=tuxtnb<6YwrK9%N^xwSr8^) zmRw$!FW+BWvFB4lm#@cjyxI}SDf4~gtwFv6$|u5|%)$K}cu07*ALWwzfN=YBaEAjp zm-Ddv!#Vgz1Nac`nH=0b0iEBEUGTiv0U#WX^|?N0N8K6NQI88$ecViJ-jwML-A>ES ze-Pgu=ncgwM^pi0@0NUszr#SKi7Xe`xoG6y=X}+p>nI0wG7lW=?Ud^Q* z){3Qe?u7c7F17accWRQA8t;10zne;YZGunR(eInZb`ICu$MrbjrwP0kRFtp4pgceK z@}S8Zk_nN0uU*kcWBZkk4Bp-ysMD?KF8|IqAAbl(ys_Pz@O(WMUnwZV{X3)nJ>x-? z&wS-}{zZGVix;(FK%rLTd1dh-cZ8r``h`z^@{@#u*HEr$J|9ngf2tjDoq!ocKK#Tv zSpEe*1&4k7lJpbjQ?gx>Ba*>8 zA0sxdnO&0l`6H8m=PT!1K&+EYdFTtd8K8O7O>mc681=ssNZ)d)r59ezaX}vjqP_LfP zy!3Eje}?nYZwLA>oR_{l4u0ailr7-%P@kVpHXr@_06+CBSLit({YDVa`h68$-Q_vC z1QBqF%?rZz^M0}w`Z>1!gLL1^6Z#k7!gV9g)2RpM2K8dO)8}JcGkxOYTV=Z1!;_V} z6Yv?GHs1TX$^gDMjUu9f#**}xxUo&@!TzrL!00?G(UY#5ygA?t`NgKDeHXT0FB>sOSopy+(yeED=)4i8j@(6M(M9VMY7EdRb-K2EKDT`h$BnH(HdF6HG$ z$f=Lh>U z?kQh|4gu%-h2nH z{_9m=w@3V(@0g0E=xcHyDJUN*h0739J5BgITjRNINj@gU5|g8OSL=I5Nvi~-z6;e( z^7BhAHa~OwhWW{_?0%FsPgKFVQaaZcgkyhh<9uon>0+rEAFouskam_dJX_;eT&?*l z({#U@ARly; zGrH`(lldCP}R(&1T z?Vw~t?QdTfOXn+I|L#j_^IEsBQadNj^+)3Ob3#q`8lCe?@*}-yk@#MopG!_xE1dI> zdTrc1qEGb{`EVE$N9Tv%FKCCnb^?XhkNQRGY44@x=Vjz8(~0M1JhYh{HTz#ica?9c z#~!7Na8Cr~rpC{7mfH7uetWxl`=+<)x^^+rbqAN{cGQz}kC`0!xm91!s$B$Q6fUv- z@7hH$#w7fl$U)8cneCrnRI>DwlAhZ5;onV5`&lcre@(ooZ&=R(^)2Gg zXt9KS{+M>SJiI7)eVp?4PVBwj@|Bcwj@!Ska=KE2k+-*hUo~B>c;dp_BLe!n2gT^4 zlEL?jGyZ7vN7+4$g<6lKU-3IV)cd5s_wg8}OVk|0`{Ej|;9WSvxg5Fg@7`wR*mt6m zM>U`GgY7Ei_|8E829{92^ zZTY;v+vj4?=i~flNxruxqRmJr^|0%kJh?fe=6VV z`cwJi`ZIk@>*ej3>CKw9hwJ0i?%}4UcZ$*bg`c#mkLT>yrvm%c_kI0*kl*J`E;jrZ zj_=RuI;AgNZgOGm#&PkYQ>N6=4z54Oj8y8{zYF2ya{Phwagg>DK(p_0#tr0xmQUZ;>u+y;yC8Nw;(CnjwE*pvjGoqZjr3h| zmQUyFx%mE>q^GmKuKsOmmkrFg5OrdEZIyv2VXEbcv^qaljV)m-jUv6D11i1cfvihZc zMi=-%yv6803BLwr+$8dwKB0OSaw#t!`#HLL{rtY)lN0z%`FA?~zLlR#$n)L%0mmbX zLY~grBXv5N?(J2Ti;6`qr~LGtBJ#=pO1xmmq_MdUZ0(T@|q_4?HN z=imgnQ9Zj^_&!nI&c8o>iFBN!KlS&5D7bUg-}z5IQ%=frHVNH%InDPcm(x5ylWSA~ z@w?jRE0@0&*2nleRr$_0&WC7GuzsZ5>G5%4%Kml$z;MjRhVXpx^lueP2>Ur<|NgLj zjxp#DcD`a5z;Lu5Wq-i*lJD<^HEQR*tPsxuwK3{+E^E0I9ZY(V}9n^AMzx#Mi z{nCmgA9r58{#Z~b0nh89cTaNOY3rwTe(HGTcC**()r?!uhwXKiaSN>a59|*+P&)gA z+S_e~;aA-s{tNkeub_)2_J@y3!fV+d4u$;9>{aa#_4Yb{d;3A+J4-upJaoO~c11Bd zCH1P8cmDS6-jM#Ylq>sI)HiTmuJ@}6{dvNCch;o+`MmAiWnEHka!sfBZ8$H?@$PK; zigqgX!>z2}RCewGK{vs!&DUqe0bNL%s=QDiz2_|DO{TxT^0u?MrX_2 zN{{Q|4Yp*QUK%UiN;st@}~`Q)2St&#NB?`lAx? zy&mJh4Aygsap1LY?_U&#y^{9+(Ep+K{=09z^7hWRZ@&N5^~U+nlm8TSWb!tZ{^%1n zX7@rVZ_5ysr{~qnTl~7o+i#N3Q^?ztcCP6I)~|qix?ZYGzf|aSzWcs@+^|a+lpK9g z?L&J%Gre}L#;>M!1D)&!{vSgBl=^?E(h;she0Wm+erZxXldZcw5XxU}TrNg$`z}k&(=!>K zpFAs`jpvQ0Mm!Vfcx2N2ldb3N3hBt~q$6j|AC9a2%%A1^$?r{8|H;a|b<%SGdGh=! zCfR(;$GdR8wGsFx*q1iW*6F)s(suTJ4X1N5{5PBz{&y7nSdjO0`g|J1!^GG5C$v!2rJMD=2j7=YH`}`p zPii{z5x@7t>^s8Cb)Uxf`~BQN(uxdl$wtMKcIdty-W7a_@oC2);Y(boFo=CWomcvK z4W9qF9P!S7-;YW=bbll{qI#+wFrCc*q|(8AjW2>9e$F%8-y%hRo+UNC!*PHbhI*8K zdYi7xr*^JA8PW3QwRa&E^q9Rwy(;vn8jJkBIX}1N@1OfQmE(mB2zo{{+U;UnS~qyHv;p3Y}Yg#CVt@6)**%5`WdXQh^#JZ5xX zrF6#)zpZ%fT!oK|shyj3d#)(dq27M)v03Ff89uCZ`a3DsU)>o=76#>iv4Z)@S)%tJg!y zpPQ{8+qnRIx9KJ3^L_uUe_Oxj_(qATf1m!4;E4+=Kh%4F_y>RR2gCudF?mYU%fWfS zCm^$~ubj_`ig)(Dbi4IK)-_b_4#6x^8YdlC^hUqQa*&|kC@N(4BVqZmYyIXwrtxV_ z;ru>N+NgL^dsoWuC;RzH%K7EM#A=HV{W3RwTx-l_8X z-rlF*r}2K|?MeQ0fRAHZy(pg9DR`(4`ODX9{C!A&AAtG3@mBO7+c--0AObF_DWb&r z^Q6h?W|eEaE2na}+2nrn2@OAdQhhroklyy9hS!}|-}HB;@9q0?p&j-N_=E2r3w$!V zU;U#y)PHif_@rC-D7zba?tv|n*n24T??^Tm|mEf)v~j z#3gUIxi%l?Z;gMS$VV8@niiw?5(N2wSZ-Dh;Jd?bL^y8fkaII}!&~(pQh*O9w~=o> zvmswB`;&wx2;m_@s|9@c@5?{0QNBqP-X`oDfg`7wvmgAw2n7j7k&W+4KBdatKep z`u7>a_=_c87m&huQttPQ!g$J8_8qK14j`I{C$;0-Gx&B za;c$gXZ-sXt7J3HxSwvjeX1>4gt#Zb6_RnJU0fGaM z-f$i$v}!*rT&?}EaESos)(uhPE@ZPLF9AIcfwHGH+W%NtMJ+{wI4H`de85} zGpzlPPzq1Jct57x-Ers89sRQb$aI7&ckDt~g18^Abozaq)bG6moF(>Oqj33tnRe*- zOMG;GPM!UA{5e*&D+otWzfY5$s~wi?a*pK$9~wQh2iZbwXV#1LC?Gv+ zk1j;QxxV!Mu5{nm#jZ;CnVn|uxTO0|Nj&+$AGKz)KaUC;xyQ?Stlxj;JfAOYyMFa~ zzMlhk{S=E7D}R9+?W}ep<>BREJ;mRTN%y^E^eLT$8@Em}$vc#!i|+%>d|CT6DQ5Yf z27{cA>afQio@TBBW^C5cGx*Z8zc^<8p5r3&y}9pWeV8~(1VENta>Etl}F&Q;cgXQ`c^%}4hgmUusxpLQI#u$61)?%htv?q6i*^-l;q<(011tokHe z-(&v{-$B@ma5B7G@=;!n2j9OWpWCsXnDw8fiZ@$_G`+y@%1Jxn4>=XK=g<`GNjT#F zy|6tG82|i!%mL%C-Lpv!7$5AOO>#i#Nfk`(0?@E)c1aRa=kF87_a~I^KHp25pH}`> zt(@jz3tKtOAGffT)BK2pYfqdXUtKRv#@FwBGvMouia*~^GChSl<@?FIrr@jhlaGdc zeI&=%M{<09B*)iBtiDt5wa$08BKlv}F4ixycC&twwX60Ekxyx-Rg&M^iSZ4H$?ktN zYkRpIr`A8tCC_K$`yYjTe--jPZwkJ3G1W#Pmzz_ipX}xqq)CQ8B=}+G*(@>1G=@*4enV{AG!++WRwp@1;7T@2{mD zrz~B+H{k2_)e-$3XWF59tvaIT%v|qPNB&mJGrd?H`8x}%-mH%3{bbj#)e)o1*8Qs^ zx(?;*{?!rvp19j<)e$>K)uD1&9ntR(rX8xct0VgT)wDzPXLZEBm(j7q>Z9)rruO~s zs@;o7J2qLmz6Y1u_id{q`kjEZW38p@ea*C^%fh;To^~v=u)bH4c63@;-}(3NI#rL` zyZ*NCRej3d=|5uc@jt2S_|>C&AHF(j@92-%d-+e>{o6-cL?COeYS-7UoTuTHYS-6p z#-xiXS&v_*2>ux!&>iJD`yQ1 zD_?4@YX2uA8w?+3OH5b3)>^H9jr3YNXAw--cB!@M{2&?8cB{4OeSBYctF?00LwIfX zS}SK)3_oP}bpDWx9I`NHAxuAPVe8K$M=T7bAnC^~tP2l`y>nh`oLX<=$fhEG`-ic`WbT3CX0d(6h^@LnGGqqxpW=j+iNx9MoFEP~JR`B|uRPj|bk zBC~G9_X)VnM~Et%M+p7^&rQJN>mn2KS-Hgjcet10>%ZxPia)bI+}=QSIkMDj{WM(n zKy;VC1I6*=Fq$^o=NwjiocC~_vtA!-zu}jKf9WNrukBt<>hBWSy-|Ocjra+fe~-cI z7p}K{4)vO>9wp=_-^oAHbMwEAw|^PWbiy(HKSOk!?zESwNCzEsc0Dv}_hddYz@=9z zT&Aa6B|3xib)X^ODItmLKEgmi%-8Q<79%wrm`=E2 zbV$+%aKD5agX=)#*FfPaTc3Ept+y0xp8g?$XMSpgV)T%N2MTWxyK7jDk2pQKqnT|e1p`8wr>Wn6f-tX#%;Uv&Xe zSY8|aVsuxQPs26Kx7gO3)&WN0+0%>B9T~iaZ_nVVG2_Aw@?|07XSf*c$lx_Rn8C9} zBXo|!g1j`v8Uofm#t6fJ9$O9g{kexcE!D6gI{6mMemgLBGSY5 z@^y9cSHl&9e@9fpuek7|QLFd`X{TbeLDC6-IsW_ldR(~H*8i60*8kRJ@YKw{z8)8r z%8GJaSQfP-p7bn|^y@Qt4X>7P4Edy{jj_HsQ~ZLh54|&kr#u&<6%yuq4;;DT!jgz1 z8Szt2i_tPkC;b{;D&ZLN$B~BLS?35uc=nT`Xd~sPhOZQO=GS#XU6*5+BWN)yXXzYC z{QYRA&zJBOSvuv;-|u2N<;w5>GR%?I*Vh@=^)j}G(gW3jboL4jLnR~3T4|UnwJ2QB z@TC&g@tgQc5@xKX6SIF$m+87b$XaVUM^ay3XZk`3>-fts+p#FEukdUyf0vu-Y&YLu zW|-~c=ieA+JNY=mFx$i5!C{!~;_u-zOisGqrJiJe3g@Sz|BCrAJ0)DjzPm#>{1Z6B zQ{Vn081H(Q=^um&*k$7c$DbXgJAa%wN;~h^?c)XG2ey}fg7E`)4BqAA2IDJt4D4n6 z?ty;9^B2lPJo{HT9{eom3-^V|AEpzI^n5u-7wJuHzMQ>B(Thkp;-@_NxWw}mUAgt4 zg@|*0ozMHMkApskCtY-DtNI@AKIg$J@A>5~Fm>sb8@l{^BaHutf#>gM`~#QG`9MCu z&x8HD={;Zq<#=FbqqI|u`^|LKUX?SKm$5;K#(wZj(StrN4}lLQB$0n5*a0pt)m}uv z_254Zfh<3Aw=ujMf8e}6%>OCm_xCw`pPBXr-{m4d$da;s_b;Al?_BRtyE@rP>4zQn zsCd~v^@kB;KA!&pnls6xTEA>x$kw&IJkqh9a@?nQTt32e&|aix?={U=IB#EnXNh>W zK~;GDeE*2;b2uj_Z$rm)IQQ%CAzm!~cmVGJN;@^K(spXxA_XV5uZ8ape2)>7OFEp_ z%EGKa*1aXn{_^#pe5`wCaLkH#UL+i@cb`H7V4Xe7f8u_Qbn1%HC*%j|qy8Fbyk6~s z#?3Nb`1&pDz4vqVe9q5eG-COip=&S>G~O%rQTC3;g(Stx9qXmz#zDy!H@;Kt7_=KDG|aa% z$miw6fMfo+@dmL+l8dzd$;B$Sp5Nc$!uPYRoHZinE)S%GE?sVNfUwBHa+3puXNLS* zZgPNlk%Q$X2cwFI=jcQZmYWWdz^($oa|dQ_hbpuggP3<7W|i-79rXz0-rK?VtvDY15==$6 zw>+J4|6s1()T4wS0lbd~{+@KE7ipZqjjdFl@Sh0alLfSDkbae>a~x!CGJG^9;l>mU z>&_Y`Jsc+)f_Kl>)_T1D;@_9|j;|=#_qB-U)Sj>%4hwwxu&x({>mGtn*czv1g`J5af zO62H$;6p`ff%Z}}X@dRb=aKv!c$W*d7g&k!uj#s7*iJ8jUfM@*L%QEz8QZS)A^$(q zqVhrh`TJfm&ZjZ#a^v=;hy6Q$ZvXmug1E6k%Herq<}WX#wE%wHr{f~w8DDAStQ7cJ zc{s=K>uyc08t?X%%VXU5uV+{fr^DZCT5s?@Ku-L%xmuq07e7}To`ZZ#&=0Gz(TDbB zex7Hh@89S1^UeM);vlM3Li&*Taj_uv_VIlEY9zwdd>x9DHaPOT09EPzJJD%ml<4#s z-y1Z#->w~=`T!(l@NS<@X2)I-;5{4MzXovM&-X+wZfCoZI<42OZ48rZ8vYQ%UeBp~ zuSaI*8=uHWVj{o3_P^@$t$MvE?U2`iuYVu3=h@)C7}Wm<>wtcaO6^mw8=~2;&hvwH zK=!~NtOG7(H-kL&eki$QA?SC}r{>NO5hgW>IZr{ZX&jT83a)>&SvuCK1i_vqEKG0zMAPps1zZ<}LY;rJe z*d*(6%)j)Db-biY`TC6I|6y5AAHcbAm?_9tk(mbbeJYnv*G-?cd~)VF_#r7ymJH-v}mh}e0e>9+<^yvD#)+26sm#iNU zKazx=YlC(xMh^(yxZypz?*Y0mHMkFrgWGR#H;bJUgCCTCmj9I;y~+@9#>l@_D-+u;w%oU?+vV5y^<#HzA`$^3Au3WmVuh8mOexAyuD_tE~`c32H7)Gk) zTc4|s@^OBazJ1bkY8@@NBj7Xpov!Clx-@-hE?xIm$yH6CnZvL7*(#bI=hC%ay1q}o zJUD5+b-u^+kL1!7zfL5X{&WsMmloW9XZj!K(w8CK_tE{ml*dUdoWCRJ?<$A$P})tL z*KsDpai@X_=-Kvjejc!d!!-3wZ9jg%rMq<=l=ezc`g{RDv+}iBqr~=+e16UOANAfq zp+)8o+~=h8bna8IoT<)7q|J+7B0SeKULLnDqTe^>ypwv~zXwIQok6>&&Gx-x#2+4) zU%xBG{SCHgdYhi(B|fko`1CxlpR4wHFxLzE1N@XbjyG(tXM%ND^6xN2G5a17_IZ&t z8Uk<$zSo~{eOULZ&C+u;V^1Lwj&$*Sh{O4OnfqnG7?i_#FzF?p6DTiS*EtpBXTGrh zgkORV=ieD;_-7FIeb}+%LWbO%6gm!QIHcnPAs;{l+ktpikdEz^AMuFF_La!LkWb_v z>F2%sJbm|LAaMGauO}=I`Ph@Z9Oi#9XkXItlRAVc0+sQvf{f1=;^ec2E1 zDAhg)(5SzEpyvC)jK6dDz#hi;-L>;hKR4~~uw?fP?7i!dk70k9N**W&&*k{|4%U~% z!;#+KVA$lKzP|Q5!e77FPPt?j!hZ&QC`P}j{9h<&vULu>cS5@zSzsT|R6WGwhIZ8#Ac8qa$6w(5R=AE>V*8_9I;IankC4A7 z*AbJtw7E;^9@a4R?FQsao0myE<%`b0zs~$S0{;1WGT{j7>&|{YI;3Zr;MH~PWO$L_ z_jmA<;gW@Q9XZrfyIGGuAw%l%3=Hz@-oAb3*4LH7^}?UItlo|j`ZI_v0cC&l60PT1 zTw*&djR&;)|UFLoZ%EVmQE);>u+C$q}+k^Q} z-52K9b@{OUmPq^c3Vk9+KNiZ7uAhhS+TQ1G&%4o{_5P;yjvGEB ze}3>5oi_viAH(qn<)~f$l;!hv!;N-+u%h#4Cam}KR?(l<2mc|+qrUd zJ>laSyv(0*u95RF!jA^^VEuI-^H>H?e#V9MI)4WI!W_Kfr$!{)dvyK`{6Ho6pUBap z^Vfq0zroHAZq#`+@P9amU)$q=!P|VgVDo9fzahsTou{P+->vg$oc|=>Sl>6~@GC!M z=mY%~J3qKd=hJ}i&f!=1I}Lx2&Zi+ylxO1q_@wf<)8OBy^XbB^I-dso7jpFIJh0#3 z->>uO!UuFd4fviMysjT?5qOq=doEq)!|%(|j|ct8*JqjUuX63AeBYSmE9Lm0>F>$X z-!f_Xx-9+a9Dc3m4O#l(9Nmh4RhHhIOV@h6GfUr>!>@9=JWK!ST)N7k&8t!WhsN>c z?ODG2bLHxMdvTWjFS&H3>n&OO_j2i4ZhMygjT~JnU-Pr{#$0~Y8!cJ->Ph(yqZW?s z`kq`m?f#%IwXaCu(p){9n|S@|9tlCcK>s*Z`Z07B>qDoonRB&ljZPRRgOP9|F}T&{r~a%+|&%O z$M?B=jo*|5|9&L-e@77S{P%r)x5xc`FSp}+Ab0qO@0V+TpYprt?Sz8dtJ;w7*K@q# ze2~7+bB{qjNY@bj>k$7R@m+LkhS!7dR{}5joaa01>G9dRnyqWa4b-NfXC=8r`$YAk zze87Ppf=I@wD|Z#PRBE_$qnc0NSsGAp5=V?pD_QaN-}Io#qaM?_&X2YzJ7k&+uz@H zOIB)r+FQiy?`jeMzl_7rRQ7RdRm>ai1^}GnbNtzPcm}5J$CYFE z1nu5Z_D+T2^YiAeU$XLpcL??dde7e>$oSyjac%zk8PY?28|tz2EbkFK6yPO)ehvS# z_SRu4J8$aez?fcy{>*adf@5_g9pAPk_^+WIH$sUuhhAUV- zybmUxoP*&n|E?m>ml87J;xJs(TJ;$ZQsg~B%Je4wCmz=$Mc0_WN~xed|>k?lC#9;GHVEK@iIv&*H!MnjPokIZ9*w zWM+f(NAmG^KzGLP1CpH4!}y=1yjgrjWZBC$7?^|MS3^I(4S)6gMa6^*Ov^R7P-#%lknUdij(!8-w(%c@SBz< z*Q1XFa?W+J6G2$`3Ys$h=H$=&cm6yM`LQ4P%X3tIoIm{?dVg2KXyfvn5}@jOVlq6G zfckfCwL^KiJ&+6P;Tz%ahhJ;f`@o()gm`K&rk|E{Eq@5(7nk;W;7fk^`Em*`5KM*h`&CZw#QG>bPVbGN z>jd)E_8A`$W>@p?!A*@v*B3pVe3AOYKOg>`jV|^`@)7?c()Uu1K8RS9?9lo;|5^?p z5^hD+#)&=+dw&f3!>6!bS;xo{NJna>4RE+rf+rnm3@D4 zOx018Bt~bI+7fp(AJP0P`o@oElfBQ#bIJAmny<<7XZ7mWc+&Sg>C@?XlzvRhrCy

Xnc8@u3JU7{Yr#x7U1}=z);1eJVKDLFaP7c4bZD!u1lL^c@kO{gWd~dCnpU z`+j4V4)K@tifCnWceW;bdaGm`$VV4S0mYMgQX@Al9j^5LK4`-#7#q^N6}uYMnu z6x^5&3(+7z2=D9RO{*k1`@W{H?^<(3i?7!7rU%sT-=IFZz|#C2DED7Pel}@(JPnM2 zD@OWGfsdncQE6-|+$ifb{Tmg22Edua;nHi4NJ26Cg0xS)-fAE^+#vWhu0A#%dV7xz zDE_`%)Ng0;fA#OhCM6^4U!(Cfw-_~qKPC7a*QGvP2*CMwy(iRXnj&f|l(jy_&#CGY z$_<(na)W~5Tn}*IK;EwtRLI|uVLDT`AdkOEdTyoeqv+(KhNdwnj-(9Tz-{&me6;wP?+;jRFQMxF`fUB>K}=+r zkNqVnNw&-$p#6H!ivfR8F19zM|IGY$J)Ze@g?JXJKC9q+m~_PRpL2L{3|H`!a(K1{ zc+%xNzQz1WI|_iS@ZnlG;(IQK&*@s0!?!8GH>TnfHGNRfh`az*RNlb9b$hV~a>STQ z;}1(hdFCAP@jYdsCtcSoejEBNDZ{;AN%!~6nrg%bd>2Gf7ku^)@;7eO^Cx3Vn5zAU z8T}l17*9E#ES_ASES?;nES~gD7GDbDj~RWm&nj?85`WC}8TA(F+`5J-7^k|`C;Z#t z7@|LRt;TP?N&O+~k6Tx3xXIGT9;>IbJ|0hf-UP=~`dba2@dY@>(_d`yL*VZ+#E}kn z1fm|GUpBq!dSU0nxdK*g#UKHP{V8qF<-Ol#xPR6O3A4Y@m8a>CA^a}kzwc}Icl@Ei zg?9Eyya(p_Sl(g4vtL|wIp`6}L8swGlG1mtM)-Vzc2Wsx{mnn2^gO&neC8*L^1b}4 z_?UALMpY;Lw0Y)t7$4HJ8{fT|jGmIwLrmnWJHcNYkN*Y3?|8~Jqj!(y8bYvypZy&d zOZol#W{Z#er4#6xivhy%g#39U=n3h5J_q*+;3pq1Alx=Bx8+gw+fn|ich7z|^}+rP z7hTWTi}2sSKj3`Be(!gg*5CY`(B*XFy&-LHt#)+#B==%Y@VgOKF6sC<6jw;^4t`%K zNPj$+J{$G%dtwJ^1jBi|Ie$ZZ&jCKf_tU_aJgxdBc}nT^@ox}xv;AK%pYMTC&v-dK zD32Cfh(BMSldvPh^n88(Fqf|NQNfuEf1dv5X{SD={VObI6X*=>Y0^cxID-0+&p+M@ zxxVdXv1jn^w#t+7xqOK>n4fcKYgjEO8{&oJc}-zud+U4M>U>y z8R_CW1D(Hn+<(7@*B?@!aQG*BU3}Azem||eQ48t;3@OdlTdW=3PELlEzKTdK%ezbp z=wqY8_1&O;lgW+W^XGa+2}widXYULmj^%CH!N1TG@>lH$#`yeX>_JVS9G39Y&lzob zP~RCNKDuJ0^ZmAlb&c%^dp$Bc@~D8xd!OW6w*HQGV0ryE&iOpl^QZRiS#n(arO#K& z%am?k-(q`S0X${<{)V4VDMnvH&qu!)RsBtRI1apEKHig*c6WQ<=M$s@&5dzb%klR` zVw_{A3*~}xoXrcC==?WKm-^0^Y*F9UHa|v?n76D^{LX*Y1I3;xuhDnU+)gM)`c4}4 z*Rn5DlGIU8a>_{kd@^Uxyb}f@`v>tcUm=%ospWenmv3n<-Vdy_>X+QsynjMjh z=y+V0UzLYqbWHSFNI&(h^VRiEjQ9EJ!h3`i_$I5z*b%1c99#06Uh6YG(GFkgf49Q* z_o`1Yww(8O&f}#1gPP9!iFDb#tD z>chbq?riNzc<(o_d3##a=WOBlxB%5ijM0c<-MA?=$DX zJ0y7R{FIeXyr`1kO^A0?$8&#Y(7*GB_a3Ex`}c+Ley)U*NZ%sFQ(xxSgFcC^E7zB4v(im?&X;^$ z#OY6tYB}ZA+7FUp3t#H}qDb~mSRaa~buu~{J0hN}eNitpb!phoYqP(xU7OYz9QeTg z*H_W;qYojb%q!~ql`D#i1u!n?Ig3nBAeIz`dPL;qNdb>o4$JrZ*8Mi$U2pn<>1+bm zhcm!8!Pl$#%U61RFO>+lLyjRo$DO?0(6>qRHSJJ;?4bICXs;4}4w?Tf@)PrxgNmPF zx*q&zOcUVf#`dZ|!OlUnrejpdGP%d;L>~g_zjvarZT&Uiv z%Y}tAxwz;1mJ6{%*JyuWe|@d=g^XK2kc(F+7oFquMP~n)U6C$-M#rtuL*mbS?BfET z4DXO|)0Nha5GIZowN?!$PpRF6cW(vG=Q-tVl094RE@yJJL=t>`j{2MtgZM)^&E`7@ zQ(j_8WxNs^7i5WT+`SwbujVK{eV0$1@6gWl@8$Hp-R3)xN0z(AzBl+Rl_wnaGRKMX ze4FodF_z`)e8=^+?{f=0($iVbN&VB!hm{Y*l1<)MIoEnYZwu`b|&{QIn#Lrkd{)A2~w)OFKTO{PFhb0ey^V`l#ZY zYW-o$4t=kj_~?q!UrN8ix=f)RVXu$d{h{6Rs@4sTs-E?C)5_W%viFgaqbf(`cC8P- z?E)eCG%2DRAcBzUYXiTR)hX{s~P#DxTPdGOtE{I`1FJliI)H;#`Rz>lUv* zpR@Iz(B2h1X6N;7Pic1=XU83ewP?V{n_4;W) zAKR$qdcE3;e>T28u1^du?U;NX?)o{Kk6)~KDOYT2-`@`FC-d=FTR-3(*M9kZZYw{l zSK)^5Q8xNb?tERz?do)$&BtvV#XI$)uP4kmkLvi*hWi|gfG=HlNXL_0eKY%fMEO8{ zAmiXF4TpT-ym?Cdyfl8^#`z`PisBNP_s4}!@wvXk77q1^%*T%kc-HTH{*sL&)^Ec3 z8dxjm=mekjpK`0qkM>VrM?MBvj>m)WOZe~otM5hx;Ietw-5So%?>HV1^On06Kf`oA zR-dN#YS`a}oiOh+xGjS=Z!>y*-sb$O&)?o4{OSQ(mgwUi&Z$W}*F9JL1393M`J3@` zvibjE9MSjrKgS=@i{{_HtYY?`Y({C0bd@K*r7JVz_Y zFy(JDx;}&<#_0;zL7(S-(BkI`mWdX zKCNCf?-qS$x4Z_3;rzV?x5J9j5{XZas=ey>5Aa=A=|4?3OLFAjZO3;VG=8DR`#9nH z%GYb-hD$~M-0t#t){k;oy}%9IbPl_`+jzMl^TrBw^Xa2oevv(M97Z9a(3j(`O%=i`MO#$`f0Jhyj_dY zhcs;MQH&01_)*1A`uFGXUK!v`Hy`;9A=Bl@#ZTDD;KK7JQ=MBmox^)^fH#@1^{+I} z5PZ}#Q=MCRIfv(mLOdljJn4~@K=gm$nd-dC?C;d|80DoQz%%xMpvm?zTo0Ew0U=zb zhw+;+mFRv(N9Fq-7N2^v3%~r_gYUC(oMXS^_j&2uJ|6<#iD&&)5MCaxg-757j)(O^oO?( z=_n!o10ugpXH z4-*^oDbQ}mI%Ej<3B(ib%YaLt(DnKLCwjHN?vQLz%U<={(dd(&$53-*TsfRaW&790 zhudwv!`ClDy10&cP}SYU^Ap>&e9}4aCe-IX4RgFF-(N7_`I|hg?K04)=lVVVpwSV| z2V|Y>nBvR!f2tZDz&jwhqG?-TG>)-m>Ie!s`V_Betl=DU%6 z-lzMc&!Rmz;)nLYEiBLUWZA|M-~VZwu~^qLAQ!Yl9M03Tcb|ZFs{Q8Ya{6dD2q0ao zC-vA=`^`sk`E~~R#@GmOO~6Api2hrG@B5YO2N(-_&5m5kcK+-W@96J^57%zt7K5Xl z(!Qw_^ga*u89uLmrVaO?E&%)>!fZ;?yCFCy>F4A7Y#gsln?v*hm*nh8o~LAg&F?3@ zC#Y9q-$g24j0(ZA-z=IU-;cpob3Voe{l1#-_YQ(i(p!vl{~{Twx4+nvtxN@fTEzpL zeh-cELFe~nyxzVp*wm)!#9zWspMS5nd}G!g*L5kJ&%+m6xW(YeU*UIy^4sg}_Hf<~ zZYzA{4Ef;pV7gD|ZTvnA9o~Bu{p9OKp?ph!J1Sse-?6_qpC+8g>8-;-ka6a~z?+<)EF%F};_a$0_`r?8AFKsOKmlq24;{x*7^+`JUg`OUrit&+kJ} zzC`}6H2E_-6W@7|d6SnLH}cf-)aB>(xZB?|KsV*8??RnF_g!!E=#{qrxst7lcEz|N z^tMV6@_(89h#M6?FF*Bup@y~h8}Dbnjvexga_8;yiq9Q@IMyROM`HKSvU3zhcXsZe z;eC3J!q%tQzrynk0qb^9^j0!}qvk|M{50XXE4nE7!)maGbjhP3Y(P z{Cg&T4%X{SeYp#^Lvq~4(WeYQ`h%>S^i7Tb1Hhj?Z08sZj(91_#M?UsUKyXyg11}n z+WDmbFPcK|R)E*_UdSKLqX1{bSzzhYE!Ebx`7C5A}71 zc$$7k)935SsM6`@39w!#<)*VXjHAE2P|%`Z&sTcqvlQqmBx6)+p3VB3H)+T=W^~_V0c5uhsbVz4dhRm2`^_WP*gUHb$}MwlxlzxTGE7&Dz9Q|9bJ@}_$WN$#oa5DYaXns_FYOm~`Lb{(PhXbxLZ9Ed zJjIPFFk?qp>e=N;)_sj{=aQop)6~9+8{Q&#GkNgwYgD48T}6K#u`oQ92i6wzxi0ad z^@HlO*+mZW@~~Ia>-SYJm56h;v+?_!?QHG2#oL*L!0|g{Y`>=aB^ZsZRevfwXvdCm3-x+MAs@&ftdcv#8gWrYY%US60i<4R7i(*ErLF~zn%5NCpDh?DU#2|BVQM3M=~wHre1~f^$@}f-w<8G zfA7E9zI|?A+v#^Yvi37yJ3H(4Hp0e1EZTjn2C>JMe0y zmti`#2kp!9)q37*jLd^8M*olW%VbpBeZ^V!uO7;+Cw_HKef?}+qc&XD|Frn1#rR73 zV*lejuK^2rULNkBh+OTm_ciMKUD}U*zbpIBG4K&}#pd%qPgp;nwSm22>Arqk@%z11 zO=o)uAKs({7NcHCz&uar@cvwFRrnr&5z_I+g_0!rI_`=+luo1@ey&e;<@ofw0iT>d ze&4L=Zpog_Pv-cWFI{U&^t1hwb*c<~ko%B+O0uDT$wqT07211H+{?wVS=ZsUCC8_AM!oRAuoD zzRm*q1UiPj9coq&!clHwEMTxi$H#VL*w@9J?vUP3fnLw=?dN=1sD16CBr5SpJGc=S$xHE*F%8O*uLEDb&f=n+HQI3wgLO3=O9QnKm{$%U%cLeZU??2yjA(bHC*iBLl z_LHAc{lvoH&SyXQ>pA+@1$dKoG&u3F0pUpBRQt)%cTGtGj_t5A#A9+E?n_U#pFB5* zXIX&9*Z-QX7IH*hDOcHkvXRgOpYfGxJpx!hxJCT73;exxczH<=e zQ!=u2gz3WiAvk+%jYNx`Kszm~kHtIQLGXuM&eDx8@;Pu_ULUp&jXtb!f$o_}u zLxkxM%(zVOvER)8?*@!`%F#Udtlx{@s>ie6{5jgx^%(O_wawto%lGLZALl36v0k8;{JW=$0G!Wfw%(`Vbooi)---jY?iHTCze5=Avz!E; z@Lbt*4382T;|ch73Ssi;uL3&K&AZ376YO>gr^~hdd>zHlPoy12Pj+9-&iA7pM}R+F zcU?LeHK3$OL!iUiC>AG4Nc+?%+PP+h=LVVab;qRes!gm*#W(e5|XE zr-YwFMrX26%Nx590XXlUiy<#`>Ap=u*Vt|i zJHMPiO(l&V0uT5zy{%W_w`shWn|9bfNvp<_AB6XM_`bd0%O7+)&G-4)V$1LOhR`0w zlP*`j`~Lo7TklA3+adJue2kQ5ev?jrqp|yu8IJm`1b@i+ro&RS*z^+514%k$hrWML zc=1657~Xe8%K_g?2>blE$?zmk8ecXMI*JyrRe!4TeO~SJyTuC(ewF%p{xq2!;M|_j zu`@eor~Sj%5A1!I{fY(lcu9YGem|GA^D6ECX-B8F=SGF|{<`=o4f{DnoIez}#kSv= zE?+C*WK`>&JgM#D={}AoqbgtZcGR%Hi-7eI!Q*o4a^&ghaxK@-`6MIS-VX2dVSh{T zc)nyr+soTKT{l$s6!`{xB+?O=J_4|Vb#nS`-I{bVVa?GeZ ztskZx4+%W=B;#qvL!F&TJN8RF_KDPQJ}&+}Hs6@4e>@Aho!t)lHqX|44=)qHyg&Ny+u{vvkBu{t>NjQNB(9xO3H? zmB;&Z`Eu)Yt0H0f5CwU+Z~sRRN99EC7I@dy_n?RX-os#n`&Nk>S(s5eGHTq0%wL>`wzcxs6bdS-=_7b|SUR!Ov^>X~(6Q0kL{0B9^ zuRoWmVBw09-UH3-fSSU!*m&gn*Y!Ep>7-uetJtfw{pi=zO`e}td2YFaso>W(^%p;? zzPEpq#it#;f;Ty8_?LW609$q{eENj)ZN=j!B?9+x1%84ZIJ!gejLg;a`I9jrZ|1UA|Z7$nLY6 zUhw@xoTr!cOfMLJTpm|!drACckIodSeLsp#(Ur z{|S9}EISP1iWw4vBB^e4}$U zZ0SuI9VN|g;TB8BJ!*+hMos=L-Pcc>z!y3nx4m4-0a@MBFUgP>t-sre^?WKHp6?*^ z4_$hj#`|{y@D7;LXL`%q%g4DD*I>~W&gItkiEyt(^W7-PQOhdz%l!Bwoa<$uSNQno z_{;rDkjt_6!!h)0y5yMQxA|vznVx^(IH7W`?O!vwPL3-dnoJ*%Pa@~{>p2SVKgCGb zjhjr5`#E|K=jGVzksO<;?e*j$@mJif^tc}GL8Gv|3ce#q=X%!reVGahu4RvSQOltE z3`Wl=3#+M@mMPNx8JMvCVX*yim`P`8bn|bV~ZJw%LL>jg=0ZAC&ks z*7DqLo@~8)Z*KkN7<53kP6YA~<AYXD+`{{}`dOkL> zc}<<4%4feXhkXsHkN2X%oc5IE;+GNSF~`&W-E8031l;B&j3;|3+T zyixJ8*lGmf_naM=GvJg zGylx*DbGna<>uW%{+;u6<<|L3IX(@(=HI0)U#$FfIA7lz1pX56X6YBP6!bF|4#$2V z>(vJIZe+jn)ia6OnUm8CDyT)uus`eeNtht@q9`_?7ApqI|Y^mTvK3{g6D{ z&)IR@ll5w^?|j2&cn48FxtZ11;wcY->l(B1WCql!tXJFpW{>xEgh)i-_oaH*0U2oUogKN{sx4ReZub}IM!!<1-7LNf8f|1I`QAun;8XH$kv-*K#1wdsEy~= z4=87hf5H6Qv-Z;g#`R!JD}dm9J*LIRANFU-Z+_X%#f*JK;d@YD&g{~C&q@DseeK`F zrrpGZ?7K(nzAkVxfRFu<{g&-N?@jvzp}#XfMh3ufoG8I}eKxj}v51G=U4oyj?<9-` z{U1|*vE^^MTf@n*m!;gMOEsLW?-=~Ni|0#%zlWcUYCFuk=kh**!?7FQd!! z_foVc>r>uCXv*>5NVyy*r?N*&IXh(;@u$tRC7dpwss1ea ztWHMX5a=iYE*U;4<=<)ccDi}4!27)x|2~x8Uv&O+-IuNytr0&NHaeP>j$~Nn4DZ;# zs`3ZXNlU=&`&@V0_}=uO6o+}C)^930i*g#MuJ^rtlBZM-X@5|@df;CNl*y>_AvtzJ z_~G|BRy?8uOLElY<6*#XVPR{0#856F+OWXn3p52ir}KGdhl{oX@*h_4wEV zg}-OJ`tdZCuX=w`JHqD!{vA~62acSXeqna5F9-SzU7?;1c-4z{OFfyN@WtpZ3D?_E z<-p}CeOUQDRl82r4lXZ=)t7S16I+ktKiy>Wx@9?e|3~o7<=w|y+V2~3a9<1IZZP~y z&A-BYHS42fqsi&bTF(`8S_HuDcVGYUcZiDe^<&6MxAEZ?lk0UV7s&?gU;eI<>qmdb z%=ZC8dmddi8nb@avc~8$IiOx;`ieFBURj+E_HO8XRYy^a**)bep$Oqt41&RMV}0Uf z-`6C+cI5c=Yj3FY)#WTT{&@SW7+g6ezQZ|uPY3v1|2LUD`+A1A_q=Pg;y!=z^<4H3 z_Djw;SpNy<6s=xUo>SbW_4akO8ITF$S+T8E5{OsuBa-nPU)W#MS=!$yXTJf`e4H)H zD?|!MR>#wI+$X;nu5r--&hf<4X9-@fr}uYXSNHP0ywt`!mU|54Iv(#YPRAh1Af4mq zQyAZokM0HhXZnh}Avkb;pL-B=GX4egZ=bF9>k2AZxXI*Xf398c2-GpC3B~+AH8J9rrz0 z17Dx;`5fup4tjn4du%_U>*G54xA7l7`@4*Ld2r{Vv{m`Q_`lur9bk>3Cm;En3)5Eq zs^?2uOAJQIkYf005@5 z+>p+PUMD)c&OztSDd_xYNN2n9!RhsSaa<}PX~_Hmyn{!((BHA-_Z?}s^8OMno=hJC zo(g5xC29v{{ooQr!ZCd79fbc;2@e$BEGJLm!lm-XYvQ{cDcL%S*-t&hix!9rx_=~i z&s7c(6(hYLcwX?GgjYlKyx{L5{Cug$dBN|g*DEf(QAQB*^HyNsIvA@MH>yl~J5Zkf z2>rMc?U!=G_!BTaE0T+Nb>h=+_66x-4u9va7^$7=_r`iy82-nFU>rBz`vna6|ad{?dd062BKe z;CL<%O)B@fCH{jFE$^$b1$&@p7(W-C>ocEaiRx%Kh2f@diwFF;_y;T=<&29bk#N-3 zPG}-zL`TA1`*E+NftB)#NtwX=dTwUd1{jPBp@N_l%E_C%? z4}V)^pt{k+`aSaMRUX#;NVXR@A~Y6!pQPPcHE(x;M~1V(lm8BX48jI+IOFpiwlDWR zh7eCVWIy41cr7NMzVE|$zI#WxAc@S!u%9b+d`xF6dOY*-eYbAJ5?3KEX}i53a<=;;QHV?4*#UFc|; z-8RBl^h3aZ1O3p?q0tUZ5nuE3aR}cE-M!27)y@&sIlC6BqD@A0yh&{y;pNvB>U^eV z_7UFI5?$r{S;@i@i^unO7pb49eeLT2)NfDZ>|~BJw3BI%`F_B&_zhKpeSeNXz;{6L zkPbROpGtfi0y`4#90@&yLlptHSHgY{E4<%w1pLX))0uuG-#Cu>z8vv)-CrNa{QgSX z@toE>$Va`Lc042T%(v`;FyC>dqxp#D8wD^N*K2lz4(E@L*KE(XpqVG~<7&y59AIg* zf7EWmJ$l94C(*J`^Hh${An7c8?vRY(`%~nPzpv%z!YD_bfFyk{qwaaS{d^tM`RVmy z)FOD3gW{nbO};KNxfs@XjvKu!Z;ys44{Se<7u)~muSn#9OP>F(-v{_*<2%!TFMabL z5#SRiM}99E?G--!vW>Ir=S$Y?JbC>cCQ4nMezw;u^0n{V1E?u^#(FId0bVc*c*PZ@;MRJS^nPc+2xRON!Mw4={m(vdum*{>rM%LYWC=4^VJi;>*rKR&tD--AKn>1f01_N`aw_+ z%A4b5y-$SwuK8sx-{-?YzT=Wl=?~}0&#>I1#wXP0G{aR5v%QZ5{AT;Ge{h~mpLBm5 zc^p2RC;wOA@p|R?;rn5v|E1ubq3@STy@GPnW?SDIF*;E=+b5)(=Wcxe<5}RP<2Z04 zz(abNl+m?(nYCA^`s=#HPc~|KX~$X(zt{E~HtKpK?KmdHg}1U8_-_-ur2BUReS+_r zOMcP=6C3MBn-pKGWXs+^z$Kxr6#-So2dKbOA1HJ|TEn{@2IJgUU6& z*DL8^zD~(!dKme7--L!Vhj#-7KDGN@#5+z;Hy@sWZ-d}Fr18g$K2-V?>k-J;A%+9_ znhovej`buJlrQ=|?`HYG74Wqv(x-aw`5->zH|298zrQ$vZslK{599f#d~<$GZ@bRfUv5B>>%=u?ib0Hd6rVgGwC`0R4-_XGXDW!kLmZcy~qDQw2+NfoL`tdZTD*x$^mG~EP+zskmvthOTonn7;|DF6O-=g#V zwE3j)#n+{%w_XytQ28ofe3>9({b>0#^-2&D0D|FII_c(l6#BleR=zk!2;u1_r#mDl z_RuUihUovYvOnTG0%yMWJ$Wk$-uId|Df}_i8|0kXYWWj{>HHkaFWdfN)B7|&gWsv) z^&e87{bxTML-e;=zW9Q#Y5o12i=FqFx4K<2`aY2F6H(6Trsi|gI6jXV-g39Dr}#U* z9zX9=TTgYo)~^2Dy;c;y1ZT8?)Ua~y)tj@(768L>qym$ zbxn+LtY&*^5BOrQ$`&o7iixkN=2Cew)7-+$-g0T+nx|vidDzsVLXhn>l|a zU#hmh_be0z!<9mfV7TXUSol-jrgE3;l<27MktOoO*BN_2G+PqpyB*V%Nr>hdJRlIMvc70TWl8^1~aDGm*7_Ab3 z7;;4C_f#@I14St=h_WXS-ZJBc>C2BYJaTVt^6MA1u$IJ&#ixZ zzxMuHvG+)**R`Lkf^$7zwEg;ex*B%LHbEctwQ9fbL1y-1zb})tmIPq3{qAp1Qm>+% zUIstTlJ8r&e5BXqxHetu-GeyBXZ2oy09>Zu0LFCJTh%_5%L=}~N0$sLp1!K-GqYoR zzz&fiM(7Ae5_QhV6+!=94TQB!>3 z{Nt?p!RXD_vF_4#NIq!#VRxtCtqz%f*!RlxL%tvTyyIMY!~5yf{aD-KHS5PJZc*() ztv}CaQ!>(zO7){D`?0+Xne-`KrJy>}$JP3{{bm7Rn9l83Pp^+7%J;<1zf~{}la6Yi z8g`X}jrRvih($lSccT1Uk&_?hclmLB<@a6t4&?QQ-Qzn@Laxd2M}Ng0sE+a(;*TT! z4sGpj3jBnOYyuKNIvg-*x-KTiG-TfS~%ctk7^IsX@>lS{Wzb*%6 zzdurv`XrOs6NR(1gO``-3$qLVziv;wYJK7S9}#xR{+EvTud$x6{_Fg$U88b2)p+mY zb=B5o{64g=%OtJVzLT6sJKH$x{pkC_-*^`Jas27JW%BR|-hUF`-^WOsyEJV2aS-bY z)C05=x&O~rz%9CHtJyn@N3_K^W%p#ZUP?&V1&7t=`4VcR9{6X#>v*Q>8s@LN*^$rx z*BgF65AFSqcHv|A&-otvamC&T^7$O?2%;SS{^Cpb__^|}w`ge3RC>jD$ar zu+Lj5cg&fko8EN2K{&dm2h{iPu8wWeaCZL5@;Tmoe!j2sM<{pK!#dul%U_bVb3519 z9n;&?POdfUdYGU8=m8zfKzQ=azc<117*3v4d5R0VUPt}=oxiM)E0v=1hw_UG>g?fCz)_a^XFRoDLbxl<;F1mrSF%!y0^ zxg>#*geVe5!6J!5430OKt6N!FdE-ejMwW&cqX?!7m8`D&_oa+cgf z+jm6KIqMm6mHTv%JM>icEv>z}ACUdZy(&4ShqchNyBUUlJ@t3>ohns+dhbv9_B|I$ z*Cj74r#ku$BOLT1FiibW5VbX4hkP$O(KD~!$U4mjt&ic-v-Wel?wfp!>qYOg^nLUR zhV?$XlMmINcBta2eJ(t`Fa0gb6?Bvz`Z67IA(}Tl_eJPacJ_Wy-`OVTS5?III*Zfq zn_R-M?q6ZoAV=im>Zd0%J@BLkDh{0ahpWe(+ zjZQyZKmB=Hx{-Eo_tQH$UOkmx@e3nsyEOfD1`{A1bP4~*`splaE7_kbl{d7X&H{Jb zZ>R02C7hT7rMI7zdnNKcB)Ql7zWZsCzv`#;ynNVxTE7$VLHp^jkI#F_dogwPdM6LO za9_yPAG5fOwww6}!kzs(`Z37CUeD6?CG89P_b>Gels+8tb&eZC>eXMyzZbvGcrtFN zC%(`q!oHKZ9_ju7O6awp&USvB+L5zAt|1uvl$goGPFB!>LhQGEFGAJ@KB(Q7`v#)_ zPI-Qi{w`$9Chk)0QTkzdr!MR-dn!g{e{L$*4}DWPI-4q?%P6N->turdT{Cm z^X2I;?$G_kM~|p{ob-^7wU_NU^yW;Gz4RCIeh$VF&h>fFO(l`qbhTQ(ia^T#YY|Qu$H7!)m0>c$6vrnk}?fH#9hlg=KT;F9pzUsS->&dD#8Lz(sQJ2N~ZI!aT!oDpk z+}d}D^=AglubG%Zsil9+)%~NT`)#a0q6R|hQ~-_Jy!#V3^Sn&3UrO1p--Sk`Rk0$iU9Kw$Orj?L+o?I&%c`2->h2{8Qt&U^}&$A@3vbDS>EC7 zwTb}qMigZxrBg}1ytihJ<#NlpSxe8`kqOo% zdv3#)3 zaiR+~&hC=+{9Dqoc-m)Jyv$7PRNEu{K9RM9+l6%n%VBu_9#?XbbVJ6Y3jW~yJ?u)y z%U{U&7T1S<&(9je!B{6e2>UFN$Zc^UmDWR^tp;u`y#=U6MHG^ zKC%uc_GO@2ks0>6_zU!V4tKD9tJ7!rVHYDrD)umT4Mx@64!M4*fskYSIc5l*oKvVL zV$j#=^%Uz|ea=(QOJLvNob*7+KED0_#12J9>FD+6>xvY@y4qB}oOh9RHtR0dhXFl4 zhn+>&345vd(f7>doki+?t*Cgu$A@$l3%g;poDsEp9AD_#Tb6fJRrF{1o<=9H1mwV~fKOu)JDZi8) zxE`%rVI8R+V#=fQG{;zv?_#|WJ(GO`$ORJd?w>#(P9wbNjdd@_haCfb$l!fJ=Q%v& zDEqHCZv-dj&179v_9x~0$fypahy8Q(v!b`TyHs>K7s&D)pnTzEpDbj2LzSzO&77Jy zp#~o(rGqc)=keg{g9DVVtmDbMkg**lNRGC zgTK=m&K|Ai4fZ)PeZDJo?ttYvoLQT<6zUquy6<*{d?J-}U<9om5knu!V*Dqkm zL+q^Z85w`IE5DF&rGn-Bvi-f5SxUTizajPs{ijJmz1rvYJBH3%0ZZ*}hw==$e_`7Z zqKA|I3H!+c!CrMB5d&>JVl5WW0^O$!3hKeC4;~06TsAj!F)4s@j^~1g| zq4yKQq2&rM@1x1NUU|Pw-a$sYf|GN1;h;YE$!3+_N9Ot;u$V4?A)m{T^(v{SPF+y> zepNG(qXj*#{W#rPRrPl}r-OLNTIjJqbDZEvh% z?&Vp8l23XV~?v`$v(Z+`p50O35*z_`rA(nl0zog)eVEBK=I?9jw=)_p$3r zR65o*TFs`H^&0AGA>)#{$8V2MARehf{A z?U;-o<-D_;>&V~8dh}lWNqOylL6=YZ8L`KbzP&%HxTgJY@=-3a%d*ZRcGX(Kgkj%# z>W8(2`I2=QYaWMNm73p$TrR7e?+$JUeb;k6_08sSWanA1HZ+f2#`P6j!UhKO z45Zjs#p8{>D(;u%{Y|-V8206>_|_7W{bfb$m+5-BLV@=AnmS{nLJYSagz+DojCZA9 zmG>o+oy;%n5jd7{n5-i-mU1~Pd7o6zcMEo?WbO045&c~{>2Jio^;PjWLe{@U&UjA= zE}X&V&Sz3Rql6e2AqafqeO07~85PoNf=}iK@7>DxG_2i(Bq#Sa5MQN3>EV5!x(td) zPR{d3cJO(!kiiQn@=j#SE^YVqJ%TW`R*25p&5VYOKdW&^%P!upkNDW`p&a-X3G#PW z0F&9HfBO4;1S6e@pU# zJMFb&kqM9r>9zVx=}P^|JY{^P3bACKGM?Mx5Ie_Y_PsV)FSf2=e!~IHw|6~MmtW>( z;2-)t)V^<3eAs;Ib-A#m0J)DE4vS9z~|cbN@~CEAJD9v-G-& zmp?tv!#eiw9Otg&-pug)HmHom9@u>Ab*cPG7pw3VtxvLFhj*q>BSY4+a3~7+n?;Un z|A)`NR?*A%n8c32t}UUE$PWH~h@H;_NDcXNy591_{#8>r@}s7arhb+ZmcocVj2owC z{cW(C;E?eXwp)5%QTl`AWd0sVqh41{$${-!*f&baLG&Bz8IXY7vvuMj5n2BPpJ+Of zlfv|u?KkWp_Ti6GY;kCB;|T{J?Y!j{s$gd0*%ih1eaLcK}9_>u8<=C-e0W z%RLl`8T{FDKObC@+|Sbf|0%gQYPmOQxyQBK4{|8|>VR8nqm-MmP53fI!Dp_NIr=4{F(bIl!G8KW%hqPBc-x|V)oD(9x;F7bsJ<5E? z*^V(E@XphrA9O`}uyY@`XR*IS?xRqr#_g8(KV+UO<4eFQ+|LcfI>7eDzK60`+X=ac z(rGGwtu*(T-t`^|I#+V`JrsShW^lyU;7X{Lf=ySEX+Ahd`bN)#ORD{kFjwkaGtmlLDaqY`GO58VHjU%O=FkYpi z)4B)Gmpk?P1lq}B!Vf?QR|4K3P|i{Ny`@ct4GbLVBV3<6^kZ<-J7qQqG-ld-AFA`Wtlq zu5&McA0_O2bg-Y&>D>G3AbCdTK3)frcQe5+0>b_ygUHX?cTmwquB!k!-6Q4kdqmGq z@@KIEg$&;J5d9OqllBJrIQ0PhVcyer(i~1VO9{k??3||HeEl72c|TqHf!v!_up#rw zf?F7t`L^sYN_&#_U#HJS#`f~Mu*{<@Ef<-8L*Gzf^aK6mWBm=jl&|xEe$R0)ub(XH z)cY^Gf5iM1a!cO9;nRHu{NCR_LXyL}3dTt?&+a^x@xrgH+ey2Zd0U;ne;YFXOVx)h z=N%k>I623C$JDyG(93s3vX6` zxJG?`IJS@b-M-7X-q*L&aM6F!ll)sbAxT%h z!!%s`kam@7CtPo$kEp*>DaRptsr!wAe#K5|{J4x^n=f52wmr~x8g^UR1MLg{asTh! z`nr_U6TOALzCuzI2mSd4rD5yQG)=Gbk^V*G1-g~a`S3)hx8L(DEK&$-lBs;B9A*** za-ynN^$SVYDeF%A2RjKM=N!L6kCh!JLgagvvcodY3@E)4`+|B#yOH%6CHL+?k{U z`79=iv&io!KRKD-y_N7@!ed;6@Z&0+*B5)qc)`-tlds~F-;eMrekUCIO#hR7rqN%r z3xy^;$9E3-Kb;)G59C)*{%~?axt;G*;5~iWUzPPl*oCjr`EJplff9;D z4r+{lSl_`qVu8lX`NOb!?Va=@NDPu2s8*gjmw{hl^(r~(33nUzIii6DOqUyHzq5}0 z0XBPlADK}3S5>J1*%y&~BMp4MOU{`|zTg%1E)vRD@#GyV%u5FeGm!}J4m1Mc7xVUn z!aLt{ll5wh$4NAF{zUh8Jj^oW{WjT0d|)$Dr1I}zU(UN8pf*G73`R9x*7I%p>omQ^ zzD=+3HF`XUeOkN&2P*if_u%yYAPP=S);Z-oRsMRdkJT#JD5zy$z6T)lE_p9jzQ@s>kL+o#`cmiKF&{nnIFY{`{?8~eP2-Qi`a8devwAzKVT&J2(TNK0`RK;YT|PxGQUjGKd}V2^ZZKxNX`_+kou_eb+CQJQnGy76Mk>q!Eetb_$#=+1-Ne3|^Cjr8EYLoJ1 zTp;(hI(I0v;jO1|rm4nJ;mp&FzYq!X#mZPc`N-I-KhrZWWPU092x3A@YHMAH4B zg*)Y>`bn(^i%OY)Io~7e0Z0dQow_}E>r?a!>$C$%zgZPe)vMA!Nxz~{1=#IQr-$@G zH(dKkr9VlfA2PN=TuNWwA>Uihy3BFLtH9fGM{9tdj#0jsK2Gnre!;`HQ!bGt30%eJ zf~4bL$%m64qVFmGeVYGE75~;f$lFN)2>H!V?g%{@_k&4r;tNr-PJ0*Pu~w6 z;AW-1lZ14o9|1jF{sdqtJ-u%z-~E<-4!O6Ndy67d`hxP-Wo4^!%RQRhB^)01v;CHH z=k`<#evsP$i0CL^ zq=WTMif-Qr>ZN`bqL6dXxPR}Q*MyAKh;DSAr9Aarb=9Ad*x;l8MY{1!eIJNmIJpmG z6)`+;4g0+m480`!%Ba!yOWwQAUB&d)G$J4;`~P-)Di6xhOTLP)TEX;?3nUjUV>nmKN6lMRjiKIrMzO|&9(G*xQ|7^8KP#HF8##X;_VWa` zo-%oh;x&^GzF`lJkRQ3}RQk($dxU&@J!c)TR6ac)4F}Fpu*8#f7JJ-C@;Cx|-QF>6 zRQV9UaEOPWaz7smDvU5^CdH3hN&*$rQ2xiDh`;qa7 zw6n0^RN?YIxXg>Ot_W98;zbh8+Q+!b+5W5sP!a!BZaB18)qd!=(tq-m{?{Uu>YLTw z$TbzH>;v=w`#8!D>3mb~n)10X)Yq6VA?0FRQ1qTRl@Z_<;3brWcma}fO#?^cII(r%>*htr2ROJ&IU*z zphE(kc`6z8^*T3Qj!lPtxib%cN&fWDO^2J9oZj0M)|rRD>FB6#h%Ry-D_s6`7I4Vm zf{EV3iah#jt+ezzavswG?|ioe;{g;kavr2a`i;sZ*LUG6me1sB_AlGOeon3OBU1|% zU1XZc{xXgqGR{`8bKRqs_?7ouQGf35sJx={Ii`GPylNHi=n#CR!XHt1q~~5oc|xam zcu2ew74Hz^4=6TO|JqM@=Q`sdzgGyI`$VaUV)uJIpL!neT)t;i(H)RMfbk$STEA;(feJD zq56KG>%P&zddh^HoF_$o*HSrT9ZJq~FPh25soBX{@WqXWjvT$uj7qlU;4`e-H*!tiS!4kHGD;O995_|^GCu9p^(5nB@4jW+ zU(EcC<#aF}SxhMm=zhDN%2NQiL?x^2CQ4RgGCZBucV3{M@(v!-M|fu*{U8VNA-x&~ zje!ba$*xklF;5*Z0a17d`>=l}=x3y!*WvB-#tWNL2p6;iHoJX0b$n11wyp8rg z+3}7c`eWS7m-}<5_qEP;_@Q?34TVp4zOkCh;p|sBUgSDL{&MffnGf5=l>I%l6dbZkXLnmw`tRTFHYoZ- zT)qRGF6Nj0gnzgGW3DgoXFPXvf_j(P>czHNGh5oAF(_bB?1$f(zFgu3WdC>3n(ErHs4yE*ma<4TV z{UHav)W4jcfL%6;_g?Ko?tmjxXVPERW3U7~diyS~}n{$T&UOQe0XxqSkDglPI^>-KgxM?m~dFQwvxzd00y_h}z?;N^Ze z=>P4N6umRQVo!br3Lcu@^53T7Bflb@->nWj>M=>}%Bv5uZ-RF9*s8Y&p?@aD$NU|> z)CR5P2#Ip`>1D>6Z&|<$7f0iq;rbj zRD6gY*zd^9Kq!@e0{a+0phJtqsDjAB&RqO^U8w{^Uohf=9mosl$|4wgd79JCY$hZ* ztmA>oO6~zj`+9udN5=269*6p&qzze*l65ZBqr&&7==OQ{OyY~gGYI{}c@yY8e3TD8 zEY?BMej+6(5O}yq`LdoS{Xt}wgmcFe(|XZ3O_8g9N~JIP=zMV=3;D}_m%PV={-uDT zLXIZ+B1bvLJDu)@AV+D3qOUSvgM8tXJgH1p*B%Au?o=LqPmvY9Zzkit??}mbLgX`@ z-VcMNg`ObkK7B4!_D5neKWXOlui+C3p%$K%{{0o^b9HNRPH{Cx=r7!pLq&(Ih z9>>Uhr&0IoPW~VV86VY?T~PU5rIJy;PbUL{^ zbUu0>lGw;_Vl$IFqdRVu#7Z$Rsroa+f2 zH@MP=Ueo`iH$_VB*bjq~{B1qg>tV{CQMi0xSK6WIsq;KG%AG6o!s}GBhRn-!Z# z&a*)esCnAoxyU3wz*lUmB2(*C2oGoQd8NLbO2u#Ag(d7?wu<>Dy8>tJsaCMuPq*%3 z`G9}ufqXZB-kDPLu?poG#k$?bmM|>$du2S|xSsK_kKhMMeOej41FOWFjIZ$$*s)CJ1y?!9?QOmstYwre?zm&Ug zHpA8dmYa1q%Td~m*aaE)+kBBdfRpz$q(AAqp35cm)_1+h@^4V&syz5*>&-2UxAkT( z!_Y@n9zB1P?~Ta*X1>Y$A!1)Z54jJl*71oy)JII~G3o)~VY(*|ioV&b=W-t!<1U34 zJ+jw*wLX!smA>e88riRpb#c7no$SZT*w0<5{Tb{xHnK1JA?*$GIi)X@zeyz0?}lkz z2hP@)eOg}%6`JlBxa%vN!TMwA_m}1Sp<*}W-JFo>#Z&#(5dArc(-(fNyn zuC-6=k(P(lr|4VActpwBYbU@DTz=*nSJVq-OZV+{xk~jp)hdQ%p8kINkxXa5FrD+Y zbUzha#|lzU`9T7-o}mcBK4<~K>#;*YdVH?(RR8c7S{IXZnS#gs01o}f%YROt|An03 zWZe<+$Grg1BPY2>n%h7CIp_Nv{jk&Jj8A54S3g89&i!>%tgLUq&ViMPUT>B8JH{P2 zzk_jO3BqH1?+9=`{gZbxob9)t@Y8*H27JL^!)ikIg6Eme{#5a+?E~5q;`PZp<#!W6 z&RJhb3jKb;ufQ=o$OS41InJf~1)@LD(|*E(eh+ag4s`qe;TR|UUm$OEX&**;)6+}u zOWxgS!Mf_-zn*jdp3-x^Cn@?Ydip=9=Rcxz zrSC`2SNvZpch>h0pWNxn;QNyM_%w32V7$o<)ps6N%`oWIaYmDZ4(r9p`yj zO21j(KTP`F^FHPLJ34m_dyalz`pplcoLQegd~#m(KIQz7L(VeZip}G3kjw+?NeQ9i zb$T38PyMe+uzUx|{@x1V(T}6vZYMvazgH;ZO7!PyyhC;y>B)Pfa-Mt0ct?-VljHSx zhth{W$h^0gGE?m%q}vA)B`5Eo+v6OfDj{?eyNp9p_P@yJM>>V{hunLP zb2OdAA2QC-;~>a^vVPZb&KOU4bb$R!Yw8cR`+CGrp zas3X}yE!UJ`@J)V{+&f3vhObX_fOjo(L)(`iyqc;iF(OXdibx}=^^r*^*^SEN1XE( zDlgoJQGV}FAAk0q`gpQKAJN`sQhBj2gZE7%VH3sI_j1WD=<#g@HD-j%dg4r)S1DNQ zML~m#=CViX-8!Ocq98eUKQoizf(}i8fPKt|pCx;jdltjkPg3@S)=df>^Q7F(9Nuy@ z`&*i)svpht7xb`oxFL-5%&vF@!?iRh&)AYVUHy=Kh3TOe2M3S;H3WDjg`j;ml5gL~ z)pDpOIjM4yugay%k2rv__XpQZzT$`eD1Hd%;wMj$sr?76UpwcMN)AgnyoEhu3lbw| z&$}sq^e>SxuUkldxW4YnNArzxy=y*oDtTjzp6|=PH{z*&=xz?jx{%^;g@%c*68f3c z`3zSM=_fF5gscj5`Rw_$PG81vYF+$il(}}$Q#jbuvmNbK`f<6RA^i^M_7mfu=4gh{ z#l|tBnbI@q=i}Os(?=auIW=F>56bsq?DaWqcjI&(AM|2xk>73U@*BhXIr+LN4V|pZ z$T$RiEvBf_FF&C961g4FcDa{)&>{b;)1{jx{~ZoGORuxZ_jv4gXLLXQNlF`{7dcA* zBR=x|DJhnO3;*8l^oaaG|C*!zM1C0e7NWe4`hjLkJ@x8(f}H}Ld>1rK-;wkYjPVZ) zUtPu|<;!_;S$C6jY_jiIz-EKa86!5*^1(N`Nbomc-~LW^$aqP?SocB zPAa}gK2E-{dgp{?UIzIUP#EO>Gm02PBILj)nsAuTnVICvx-sbCVvW!OqN8|B#|4Lc z!M~h$mi2czzZhGgqN#hG_`bO8Gl>3*oWp_fim*W2AK0;pf8ab3kgNF+I{E|vbB)RGIo!Xxgk8&PH@bX?!u9lM_WN%7FG;77*N4|18r3T0(PmhriH zIp2={26$Wl3ke_xy@q^Y$DZ(As1){s|_@p+2z{5m|uVgJJlxZY}$*vVSQ1$QBfYTwe$KnUo&VL4Sax zBkOsc`f~jpLi@WDx;=}2$o`d_n@p}xrH6FdPI`meK?CD$xyBfV{eZrO483kH^3dgl zJ}7zUd}oq;lsw8f{`BCfN*;?gPE!ck=dC9?+>^MxjL&%qzbGH_6ZudxX1IdW8-NhV z^g^Z!iLm&i}r z$&h+pfK;sy%v(@ydGA-w=R{}e_!Ow}(R}5u*70xA;S!&Z z`>1hxE;)q4k7|6hlIdXwT;qdPj4#&d_Ga_Fue#7|6(H;Qa^AC-uqcn{rJS3qfwBW% z%RcD9cRD=En+~t)IZZmMzS7`LM>+>6T*=*$Klq$T<#FmubQdAX6-asw3N&PVY|EX( zmE5OuxV(!tpba6$DJX}H4>O&}+bd7pcdjN9oJ&SkSbO<=Qk{<#O5|xB)OO%1P7nE@ z95ovlmh)u5EBU6uEBU6uEBU6uEBU6un~r=BP-G=v4`1kCUGj}Hp9PYhmW#X}Cw4*F zQM8ch<^3Bcos#cF4j26`Ud6D;5$EWt3AN}trSkT@dha|O>!yo|E~I9&q^F&njN>X*#(fd>NP82Hv*oW8t=0z90xpX)!eUPZUZ z=L1@9`FcDm=l$z-dXjIQ|A>lMpyh%5o%wq6D=6Z83o6(T8HZJTk#{RoUXePZi5w7JAeE?h|dZNd#i&qkooXi`B zq(j3hlfrwoFX`6wAQr`2$Npk%hX?8zmh|j=3mK2~DwBTJlxw;!_N6?c9JP4DQEwFkQh-?0e-4JuoRKb?$JvBJ8`YM8(ItTs2_|ZdYh^E`B@x zbdEFnsX&rTz%YQeQzb^Q)VhHmvq-3Yt>^pm+!auItsZ;KB{F3UM^+xms^^>5yrM~3;qE~M-DSt3R`y(oSnWu{0^j&Bw zeBaqc98T@3n&=B|qd(;2J#5)`t0z6*4*Xsftkxfn6MucPxjy=;hU(*m#HW0xT-vjj zPir5SzgM@jMTNW{<;-V~Qx5lNJ#1t?ocg~8`R(1K$`kf|O%W`f#B?F!tLlffm*pXN zYcJPJfgYFFlN`L|JWBZw*KSZBcaWf+{2oa|Z)v-1-Oc!j zfws>Dy4}kDS}&0!hCQ#5e!(&CK$g;9$agFY&O+)GZ$10ypU@<0Rxs?u!vdzmlPJ>S zO$Xi{XFBo$zZj;t#zER1$a`QizQp+3Ij@MyxSQ-G$`#f8+T+Sw2}w@MBjbOh;~MAd zbcT%YC4CxyR#G~30X?qoR>`V#mArMonNBWB-f86c|D}ARA5wYBxZc{&_Dm)M8Y_=oS?$oFRD9*W$jgx!Eczk;6+(m(i-)jA*g4VTNq z5~Bc0O3q%FqIhsJpD57f!8$IPwLy6YP52KPmneFt{oC~}c2ndFJLo4xx8<>Smy)ma z&oVxUbZWk-Pk^)b?N)S>zoo|y_IpWMZ}atjuDt(KPw|mbU4YXI89XqTcf(~KAm0%Z zJ1_ey!l%eJQla_M@{;Yr8D( zrpR}^L+aK#*<-!_Aaa!c!}osQ6penGF0PB=s7r}+`Rly)usjeM`wI>>S!xSILR)#Dwzo^*XjHtTeC z{cw1w%4wf&Lx#R5I9k$c|tRDbB#1U@`JFF$A64GRsi$(Q`qAT!?Mpd+l#?}T zni-aNpZf|~;C(w7FLuaT-_;aUr|>rk8n3Um?SN`h1A+iSYpBB(R)olJV06dfwvQ&y4DP`l?@e9E{^?3B!lv947{q?jKM(!YU;91rbJ;mKF{s~DdNeWL>v&_h4@ zky;iQ>=K-;(+l17&}S4q*2kfTpdoL&oU4uMg6nJ%Opf+X_4^AVhv+RF60yfIoFVpU zQZCEhKazLx-EyOFT@SMF6*7LPqUVP=A=wvq^Yg~`aNr3==T6V)mwlNB^!#<8fw>78 z-{o|@{Bp4VOX-Hbqka!bSM&Kmj7d}cLh@wyL)s5~L&5fVS=WPGE_p`eTq)l?MUQbx zHPQ*5#__ej_7WbHvaVDk;{dIP2=`NXIy^KX9Ud(-9bWYxY0@_x^aqF;=neLtUHx>J zh{%cki!NZ;d4A5-Piy{VzaH~{lq0Igm9nnqTpvJr1P^(-`e|vGgm>+8IpkDH@zE|| zIScgo6m}A<)|0R74jLIbq>pmqH=TAeBI7;vO(knSaYWUEojYg+mXKZjCMn)-Mr`b4flr;A8FK{sZHJcL-;n zA90oM#P%9AR&sLB2J!u__GHl>L6YI= z^o|nvMn3RCZ<3Gx3cj4Hk@EnFZbFi4ruUfklxk(nmiccB7z#0CG}Z zw0n#bob>`bDC7PDoi5rB@N#b5J?=ZYOz_eUob#S)idPrnd0kk4|JoUjbhn>Jf5;7) z7ioSI)hgJ22YLHChUFfD*hiTkZP)S_IZHn{-2BL%4^7~7AJ^p-d;P#z#usQ`_UUZ8 zMh=&Gl&mXy`(yOK2kG7G2evZ3%-h5c$@u#k4mR30LhGo0pv2@vzTWwqyZ<9QqxDww z)t(2TvdOvooxFZI=OpjUVSK?kgF3H2l0k|^PVBnSMM_MCcc(}5D!p$k=ak+0Li3}r zie{g$qW4BscJki%u<^8=7Cz{^#VVfcQ_H+c>dVcS(Jyk$*Zb)-9#HXhcfd>;Z)~UU z(kQ&NoBXmI1&2fIOFH3TCeuyjsB->A+LH|zayaBF{R76|vThk|((=&tjD8dCxSsy0 zd9k)@XwM2yKJaqBCuA@I#(C8g$DVKLeoN*r14}88oV?p9=erS3Nv6faKBc2q_Vzra zMtU)weWZgTq@y<-c(Fg|A6@gUnK~Uku8Beb$n(KL7}8Q)-(`6PS)8Fj`F3$t9ohxwVPhmQ-eag(oyx425%CnqkFd;9d2k z@sN|MCyJ-)seyiw!+waWC-PN2>3VrU`cKLfmaduV5x#q#)o;{jdO3fUukVlz6d`re zQwW>fgIX>R((h{e7wKn(lzCd5&hJ64^8G70KPLN<7*{x)&(@IaeiP>o>xcUZ%k- zy-b5wdYJ~V^fC?Jq%0-h1H_EdOUl=!mxxRb`U8HWwScJn0LVd)T=qlvLs3cx4suj_ zsp)fdxSVH2xYA3FAF^KKz`OKP;0OH5{XSU-80aS?xog<7_gAESCy+Qfv>R1k?(P)~yv!%Dj%^aW{VHII zPc08gCq!RG0AAXq@cC&V$q5|R-{%g|ttjB5WAPW|;C+mi_N}qjmL@aS)toG@OUApB z#Z7H3-CZrky@{?Z#mz0<#T~7&?oElVwn%p((V8qy^mG?@^(4ECTU$1E#k#EG#za@V zIN8-$ys53bxGm8%w=222y(zxUBuX=}$=sOeX>Uq`pee4yx5m0!V(r~#i=rzr&sb!( zm}i(}E$!X$uJ%}~8Sm;!bXA#g!0{%tr@bp4Yup^$*cvw*6HW0=v6fascPGqEEv>C) z@8$#rZ0y;zDc+R|>g{Ujj=RGC)dD74TNPMGua*M>NaQ0P%sIhHQqjyHnC|_ zGTv<#E;36%szIi%Ymaa1h&OfvRo2=> zw@Gcx+-P;jlhkNqO=6yv8L2Yon09lQ_RZ#JNDH59uBXPfsiiB~t&%g_$Yh%v<77h; zU1q5muk+%`o>mfw+jzB`4J*33wDCeqY-vw+clD_D9^Kf}(%Ka7l6DD|Pc_lhuZT2O z)ruocEt@tqCfcZLi7=uoPW?s`+idC|xRJ*r+K9*F$;j3iv9>j`$^P9DX-u?l+8Aq% zwKr1i_I9>FRCcnDDe*U;7R^~?`AMoU*)8qOW&?~)JINQl0{Rg$hAq^ZfZBu(YIB0* ze?`msFCdo`KkGHqPo7xj( z-4k6gij?e#HO7}THFZ(_F4;<5m+Hrq?ljmv!lu>)89Cy#BS9_PY$z$6H@|E_c}3;I z*v3Zc@lXvNU5UnclKLO&b0Ny85+zlDJ5Yo(CF?**dzqy`_64DdqXlPXOm*1Qnyowy`#5S!=AN4e0vKG#F`ZNjiuV zUAhqK6CHIOD>^pEp=GU0S-(YKUi`AwL^AG0qGU=JL@~H^>3Zf6trh4;?NH4C>u{`t zB(^o)6eVNB5eLmKVc!;6S6V=-mD zk?4|?Xz`lW%T}yiw_kSqO_eK<94Wj;Eu~?i2eoW-tjmL1 zA8QuOax%I!h=(~@(r!_+U)R&oK_laM(>g+}X+gWuHsKuVdRE1@#@DD3>N0{7Z79n} z$dYcMf9n!$arBSW#gJL%E-*pv9fbUj2tyOH*avv*ptSE2ggsCIBD|9Q}RzKFqxq6 z)Tz^^&zM;>YxZe_b0Tw#Q>I&m+lG9}(q+q6tUPPg*&kVR&f3~@&s(>??)>@<7hK2& zzIk)YN4K=KCptQ*&+ggUyUn_ctG&C+QhkDM%?<;lF&#Y{x5O<1>e^M$qNYVO!HAh# zTNFYowi%;7h|C<#l-fGTV#k{d1Cpc3Usue+L?IrduD<~z%w{tA4K(ATp%gU(vw`}5 zSjQeRcRZ)j;|{7CvitGgZmWYjIyFdWi*;x+HOZ4eJ?HE0>7coq1~`I1b4v`P!|q1S zCe@Ep^H25jn|hMev&OqIH+6Q_4x-pbrG)6$H8$QP)|NCK4SWpa!Edffa2;njNy<_vq+)K4wKuiKTlBP0%@NHGs$UwXQA5^f%&xl8bz}0*6X?t0UDV^m zw_sY^(~e(Cr;w(DyUw0OcZ}rINLm$dQW?^6f!Rvy3N(Xf8%cVXv{F5_D!9TmQVz)o zS$3LUQ<_Q@Kv35(LlayLd8;#ruqS2blAkp5Ost+|c>gwqrD@3e{LF!3O zk}>b588Z0ObJMnX8&y|=rlwuI>d@2PNZoyNqKnK}6QgR9ZPaYY;M1r$(ZvQ#SsSw` zkz^8c)-qdB&xo6Hrjf*#7__(^A7HD+5<)o)e zm7dm;nl54eT%pvn9!jY?TuNZca?!}u$<%KfNfawEt2~div4)g!rLBvpUu#IA)Q$GL!mO}|cAx-e8Pp}7T(0ybNpa301zAICBSTjZV(hU*tIHbF79>r&vKq5z){wfC zhNPBJRwOpjsI_L(IZ7>RpcZvbg-{w$WwO>_vX&riK~vy2)WHGZ7kkb>VO=Q(%fTpt4z+2kLWU@ivTCAv=jHm>#ygH8lJwgAD(%PlD zmi8Y;1&sbHPoYf+`UvTF|1iS1bpb7p|9j%~tEUo`VeEMA{;Mup(iV9Y3#Qun3O+i} z^ry=9Kiz)d=YRS8;@<`2z>E-(?>qC;bF)A5Lg4MnaBlHS-BbVg*QbA+^<40gpC9_+ zLzA}8zb7!G;CE~Fdaoneke|Mf&E0*`H9vm1>kH2wyQVli_OaUMw{*0h{dDfJPyeL7 z^Q$usZF=lW)5^p77yfwZ8Kc(^HXcdVJvrgEzsFxMdZ_NdAAhd@xj(i4<*C=6i~jez zkdanIn_9c)HN`RHrhXvaY!x@rJTK8s0vjssrnzLkx-q*?8`jd^lN>Ivqjk72nkDNs z9ID21mTZWwTYXk^`RcP)uV0ro7vf-Ujjw({bN(Z$6{on&%#R_^q$>4*u=U z=M+5hr(2f1cGLL}k9q4Q1)uTO_FZ2pIQq#sZ~alhSKLuoT6FQbKVJFPUljcH@7?pI z8}IM@T$wHxBu|z ztM{DzcA0`-zJK?;=L^p~apjh`*C_Z~ zKm76Jnah9q=v8m8SMY7$oBr*mzOdz%J#SyE;4AOmc=@!}`+xDRx0@9Fx1UExZP;|* zT|ar7zZ*T{zy9O-Ti$r_&#%4Rt-=rY4}S6Bm%je>oOeE^;7^?WvDQ#@<2BRYxl+OF zO8&j9Vep~vEq><)1=r2^)=f8Dwf43P-?>%6-}(5gj%mGrf1&%G&no!F!!IBH!gpS{ z=f-#TEBMySyRUsZdhZ+ezH`5VpT6_pjkk0x|K=m_d`-by!np%)oXEQVg?AoO@Ha|J z<8{*y{qXPaJfh(HxAz@=?8U{O88`T(f-_D(ZT2I17r!)j@EHaF_~vBGXQoa2;_AWY z6g>L-#ucxB{hGHM2VYX~A105vxhd=Ahdw^|M+HB5ZEYg^$%k(E%-~-XJSRTs#SIf4 z{L#M+zNKJp?A4!rYA|J7^6q}-l~)F_D+{;r{zw0??MnguF6@=rx4yA$;o=>q7<}LD z-@=*S>wD_HBz_`+s-e{>uOSNt@aoBKv*n9TR5Etog#V z2A?;Z`Q{_fl`OlihuA+Pux%$fBw*Sj5P}WMf|?WpUQgtietul1>ce1x%BV( zmpt;iaj}9o{PyH~=Cqu1^C(}Ff`9+Xj>%20`hGss*Q(&NdtbZbGk3M^Tk7jp@Kswb z|61R+?AI^$eN4f<6Mt##eQEvIdVNRUv%}&z8e(0=#tx>cy!%@C+_#% zs^EXybNfFo{@UzMKkoaif`1*|^O^5_Vde9`^6gjf^cxobqU^hO-u;&Eeg$9rh23{` z-SODp#{0jf;HMt`-THL{&plY`e@MYEE;;q}34ea;+BN=16nyu#i+_CQQ|6PK{7)+Q zg>{8}f4*@1?j8PT6kJtOaQky#-}B;~{^t~Y+rRz#;&nfI_TV@CFDdwp{LU zJnH|Wg7cQ|Kd@%a)^Gp8{}%;M{%rL(Ui;L2pU4QjrQkVN*58x&TES5>z#Ch$N^knz z%lq^0JUzhYYd$yfvwPpXGw-)`fpOSgGiLqe^o`G&MfZ0ErYQKTrw?3q=|`6LeBT*1%wzVNN~f!qF&8(gE{_t#%EceNyY-mSYh&173kI~ql$<{4+0<)yP!`Dt8$ z=>xXDnp(EDG|{$~Wnzdv)PNVFmmmYLrW-}GWZB@K+ON{Rn423E7_a#?(Z5#${I2|- zt9?fG(p;new%vyDtG|Yf@85CyyUK(&42N=0%wJS;33ZJdXO_$y8cZ{n?qsSDESp<0 zH*(tA*jk$Nr|D>2{bgHh8x0R<$&`o2o3uzg>{jO7xl0o?>hDzpIsSQ0Pq%_(R!F0D zB8WFt(Rh&OiEU~v(xR>rs?9MMTLvh{sor) zu}vXs!?YKH740fBhw=EKF;%A7N1Fn)#X%bo=H*32 zeo0x$f|Bx*ijvBbg{38>rKR&q=a-h1E+{Q8tthQ5T{y2~Ug^Af^XAVho3~(I`MipG zmGc(PFPUFDe;yroD4V}ve);^0`IYk*mX(y1mdz`hUshJOpsc*CqO7uP;ewI{r3>aQ zn7^QG!GZI_VdLczy2-LsmHpNqd|T{L&@+)Fmbn`ym7Y4e3v z7qIk!c~OSX7w`rBq0CHwR#vt@Cp5x8I+*7h;~yJ3Y0NlZ*gwHPY1EWZepZ2Rn(w2* zE&c}rU-3WWf7<^e|1%?hlKoTv&-_349S{A=|J&dz{#VTt!8iQ>2)yYVIrH>0*Vf$l z`Okl$?-RG&w(qOoy80^_nc3xw&OHCmM}HI?H?h3p{QB)*{PF|eS#W&pHP?OW^TAQ0 z$Bdm-T2{4UFnRPsz;688L1``NFD$_a1vTyJFW(2Qza{KXX&djW>@; zLjEnac%JqMor;hxM% zlTV#~=HlP}@wLH69}AjOr_U&wUs<*4>@~IP>dxPA;l-CWG{!e=Np8D*`}O<3_`snf zN5A|)d*b0+Edc(} zzpc#451h5SA}~5DCu3n|&W`d)nWqP)obSt>H|mNzH}#C@f8_c#jiatC$s2#;7k8eu z_nSKxX3h>?oG~M3WlmA(q@9N@ihm@yFmp^bRO0qGvabB;?CgEN-7!Dcm!B~@n6=}E z>w;TCqXOBPc{ewlmECRz++JcOX{-2x{ zI(sS7){o35UdppZ z6KQs4{|~0;T$y3`0-;bwhCeeSD>Hjc&Xf_8Mou1;H+p1lFfTB6>`B?#CUV8b&s4**6H^uv}{OUsw5B%h3C;sb-t3I*+o`(k>ef;U4|6c+2pCPUX+=Yn>Vxg)z{h+6^j-xU9sz?b_*>V5q{`*1&**i`SOv>^Fi-Tps%z!U5BXdknZSL62y39auN=|klE07uR zlNlWu3KL<97wz(Xc{+#8jMR$ z+GC8JU>1xt8wy^Vv*)zgCFY#OJ;&$x4>m+jd9z`z@s3&k`N4+rw|&1V_vI8;OdIv9 ziu*>#78Xz3yRc+RY}KDn*}JB?ELMBsqkGS-Nfe&ngm3$WMh6{hjL@LMPyhK=jVKA{`QoH4e!nj`)pyFt z7mcXO&h||V`m(8G3!N4?BWw0VpIJc>f?1>$nK}M^UlpPUvxp?eKiTK^FQgtY=qD}o zo#GGpMgR^Gfp46DJoS%6O=)NOG6OmOQ+%gW+>sQsh?1vN10mASO#cX#Dp;aS{eY+V z7ZQJxOTKTFFG#y_K3|saT%SL4WY$KXKYK*xYX8ZUp3he?+DAEtM);;>`!)rA8N`%- zl0O*83yz|n89us;5ikPz{!{3`YQHZt%jX}F?W4Bg>+zrJ+ZqV^vwazXUyu-p+f1bB z&&tU0`$`H*gCzuqd_~zK{U#OC7pSB_iWsQM^80rOe4~7s$S~l4tlIGXN1+k;q_4p= zGFtpb(3fNSYyD(JiT6qVkneW?VdevibcpealGle*Z`+SFvwCrRw*G zsNA#tS-w{xHXj`h&CAQ9qr<)zeV?XB_o}ff<7~+C_~j$hCeWu1Wu0=67_lG zi4{uavJA?AWKP_d0Q&cqLxb`dgX%0j1$29&P;sfWy#Z?m@`Z&3Z zkLs1o$8U)KWKBF>B9ZTVB&b)f3}b==kMyKG;v+aq7yb-N?`%zf4o%kiL^i#_N$H)V z@u$rSE+`p=LYr0jbMU(U%G*^dV8vH(@|SkSmohkcgz8T8d(7k-Ze z|5c*rTN50~86Wo`3Updd{6a;tq8wP{AWoXQ`4mL9O0*>!GDhuk+M3|`Tj8Y zzY~6Znsl=0K1;fEvWLMhBz(Gb?xgh7$#Ly4_&DjQ(9fXyxm5FZ0m+Sy2Po&nPau48 z8vH)OiF}@ZBB*caGvV_D;|{C1-w`Zz1zgtm+;HtM?d(`Bx7+iGUg`+#SNM61@K_%c z*Yf?K-Tr}lmSAa1pCx#*Cj1-W^)+>4JK;`AgTIP!Vk>fgkfOhY@WM+G;f3Zp!V53U z2rqdd=}R1RfDaSwv?r51e0#%>5WVOM;{VW--YXtB@MMZ#&`%*)_~{^6_;lhs2`_cz z#J3|p{eyGjKSA&r3hTyyl5i{1;BO~fx_m!F_;mT=o<+KRuP3}o|KQ50Tu%G{H%B>V zQ8=l(a!&kg2mT4tf0Id+o(O&34}-Q&%yG~=@ev1pxPIdB7*2dGr6+mU1qc z*Cv4!@YAPK_1;Q&NyCW;z4UobJf8WJcJ9R25#H39I`NkhUUV$)x76shBed;~zQTBC zyx&l$sVTB3p6Ips2&(-h#~Uu(e2?({=ifd4=JyCc@gCuOe)I0>AJP0xr1FWrIm_4a zD#vdi`4ovPv=49BpE%qpH(ZzFo22yPXlgvQl0dOdPW(lLx5r!#`bG!7_z>@ipf2d5Wj6F&Ph2JhAg{6TTFL+MOU+bmm2(GbP~6Kw3G; zIL{a++&dmL-{A6;5ItP2_K#FBEcjI#UsN5i+k&%PdsZ?18>`tjhz_obF*xG!)+j6Y>7*LI(>u-UVOys(BWQw+n;fl-y+z_R}npak&d5|F6QT$ z_TSjV@K=7C(r4uRX@aGXC?wUTA&+tCi6cJ?8io4z8uCRq zoOq0>1z&qCXpcpD2rv0N@mq(%(}A9}^p_2T|98Tt%l99Imp16kmu7+XINXW1hDm?h zFnGM4kgi-KhQW^<2H#0|u_I1?wi3Rc@`iKbuOPh4Bb<1QU(=QMBZNPdlX0apn(*oP zALYRJKOZ#a>bRGaudn_ZPW-)umvo%?FAjshZy5ZShrvHE3?A|q8}7^({EI#~@zulN zzcftx_YZ@AZW#Ieav1#c!{Fyr`%1@uIpNds-#QHb*MvvggL9S_UDnx*cHy6N;BS2) zHGg=BXr&&V`0osZe|Q-Dc1mCLaQ1Qc`pCHiQ?Tt?36}m8$d3~&<8i=wWOtlyH-(cp zZRdrvlIiVR?XMYstz%q^L=JlLHz>Z?Z}CAl@haxy?=-KHegODKDZX=EM2A8+`ZT2_lOZUNFrN-MW#Gmk9PU*@R`WzCl z)SnaoIw^{`!> z$k<-m-9H8g69~!E35Z;e1vtL&B<+H+v=$iRUnoD^bL`n=)8PB|P?oV&qT|2d7I{p^ z9If%Ijxqi)2+3jYGKMwI(E zjkpXLrAsn&Z^%Z{1rfRdo5E2>jU5(eM8DhUK)nq#wsl0Q#D<~O!R}v9uzJ{V9-C?# zF7m*eb5$~S_-5U3#O@!`4M*T7bi)z&4G(P6+9I5Ucj!hg@U2si3i?X;tp4j84*hIp9n>Fz_lKDvj^^9Mb6))eWywy-2D&n>#$=+uZO( zOc2RQ`Bz=$hD}V6$Vqsm2VUiY8+N+sk6z)1U-7`TSGmLUu6DyE9{3FpTz!q3{;&t$ zbFDl4$aQWw>v}hQ%mYun!5zNmlWus`EpB+12kyVs9sb0p-EhP0Zg|&0H*Dw*SmdAe zb9eZ}U%2504_x%TJN(42+;GXS-EhZCZn*k4Znzc`QF6jh2_FNu;f~YX;fLnBVYAo` zzu|#vOWfhdJn(KlHg4y4+yif0mLad4D(n+L94>kdCs zT0ga+cAh)_ArE|_%pIOr<%TzS;G-VcINeQubcGwPTc#F!UMnJfwQi1=eNNFAN0Tz zuXfWPy}=D{_>>#o?Sai3-QktH+;G-SZunXc+;Foy{GbQ!xWyg5YPTCc`Z+gje%=l5 z^1wx3aECwPfsH-x@QEJy@LqTL|5Mui$J%e#bsRr1gi17)DcCI-4<>2>`O^L2tR`f~ zF=JH>^#^$NlYZSsI^9_F-Iwi*>8w8>M`BEkfm9P&MNQ6smO!A2i;a<_i4jhSiA~tD zDlR!jW!(l_Cv13n@8`q4ujjuux!>N$eZ4=Qw&$LE@45H;`Fy(nxO(*$>KZ=$wdUjB zs-4fOgGbd7+`?X~^#QzvD|r2n+CGDw&uPB;Cv^sw@CYyGKct`g`i$V^ziJ-B30%N6 z+`$v<_jfv#9V8;TST-9-GBJ4k*^(%M{@8A~pzO4NP@Cx2Msr3b1!9BdN zV;Ri#`S2Rf;ogk#_}1%7Gj_qchetRxVzax?Z1K#uy4lV*!~jO`vvVUfHQap53u`-+TRM! z;0oTu6TJK-?LUTdxP}MV{blVhfMYm=3%G_m_y9Za(&PJZ1SfC~S8xmW@C1AB*7FJB zHN1gKxPg0kf}OPsxsHe$6}BxnJ`L&fx|=z$Z6=JNNhm^Nr%pIf8Sz zgyUS>r|=f8;1=%T@`Kuc{t|*a@bWWSAHf^Agll+&oksf);TX>0688T_`&+@;XEoo# zJGg}hc!IsZ)BXdvd{pxuuK!u{Jv_qB=e0iif;xdqxQ2Upgu^dt|0x{(i{^Q+F5&H$ zG_T>|G0lU=)y@;@6wcuq?%)Y_2kn0cul`MQ_uthe+`-kCwSIuz|Ij>z8`ynP>&Jt7 z@tw$I@jb|$SazM&poR97#l8#w)@=5{8IeLdHj z5w_N=m%Tz>?=*1na?Lw9y+ZRG_Ftj-0O#MPdHPCq2UoAsJa*LXx2tz>@M_Hqc=?^0 zhj0YD-=+0ycnf=Gz^r}z{nu#w7~aADm0G`o`|r_wg8kQOp21sq2X9`d?MJx(UddxzHV;qaZBM>nYxcz}bOwZ4E4@bVU| z-@u1kHSgb}UR+d%a0-|32*-S=vJ45#oR)%wL<>hx}P2_Ki1 zlCOvQk~+DpF5vz9HDBDXjvi2N;C7?=2>Tz5Y2L!oZ)zSstls~Yy35tu-&VJ9 z{5zUAaQ?fRdmmEQa0}<=2I%(tZ?x6+8+iO5%@=0P8{6Klfn)6>58w!n;WeDVDV)I@ zIES}z3D_v;*Kh|9 z@B}ZO)ARS>01n|8Uc(u@feW~VYq){;a1W301iR1U`r!Z$;TTTg94_G<+`>IPz!U6D zdOdcH6#M)Nk+0x2oWLoZ!5cV-w{QWMa1A$b3wLl2AK(dgUeL$SgMB!JBX|v`@CGj6 z60YF}-orgS!V~QNFRmZ<;Si4C1Ww@$-oRV9f*ZJndw7JMuj}RHQd1iJi*7iAIRskFyn(>ef;P=6VN&R@1m2nFXyl973`Yz4(9qf_sx0-)&*R{ zp;_O+)>mdd18e6E$4-x%_29#1Jp-FZZ_>QHpw7*D2DUyh>l0Y#aBbEnuz70M8?bKS z!mKx7^XPg#ehn|)u6cKZdS&MS+x}`ejN7202jAuzJvWl^A)_m zsQCeoZ`VA9n>#e0-m6}v>a|%vz+Rtk)(5a|E@^#upLz}N;2J($*7o_lK+AEn$1CCH zA>_ZIPRx1(wtf4s=8NA``)0iWTOYyGpKJZ%<7&^$zqjqvCo~TRb^nxlfFm`eA&qf*t~+>hcx$K=PxyP;r$mi@8PYPk8O{inEBV% zTQk4fdI#rbeznb;r}cQ#GwSL&_5KBQ4=>DoY}@}5Zr#)QH#Q$&?=_moa0VCj4{Q4p z-e0GA@=kRMXCKtOg@X@iUj32!@Mr4qW9l8edPMWi%%`-ESM*n!XP;1S8}-u6ue9wG zGk?-LGV>v=8+dExL)v`#IqknT^BZlRnE8y>v6*jZy@7Y|0J~;>qHVu|*Ji$-&6_9n z{6^R_^Yd)|3eMmX?*2>rAK=K$-?RNEaAfAE*}Q_?XSIL-d36P6FKE7j-LGq2!+SG7 z&Yn+r^%e5{Z4IZe_x)O5!0R8-y#66|Z|1kzRDgEzmT zd3CdTdz;!x)E-=(d%Zfp{;c2~T*CuA!iQ9kzqm`C{F-_Lr*~^!zzZ{8-hMtJIJsBb zSNEy+_p7}J)CpX}d9L*hJi@gZUvAI0{)o1ZKB{g?_0o(7x9vCZ9`?+5a9f|iqZ$8g zbN>-N-l9^6@aB`6SD#YvKCSLy=QEmzZ~_-_2QM4#FNJG(ZN_8U>#xlCY3m+#&G>1X zNAMo5AJy~e%y?m6J+59pp)TP8 z_6M!sneoB)_^}xuY@K{X>r*qn)#ektH{)4to|^Hh*75V&Uo@%LSI*z{&VK(`U!@+d zQFqs=12bOC9&cmDi&-yk(E6 z@lp2p1MHgdP&W6>cqZ%Ij9;>@;n0j{vUzt&&u4d8?cJ{~9#A_Es#tw$Lhwcw>Gz*^U$n6X1zA+VOdAtefD~1uCL7cNH)*R z`bXB$>$E=d)TvqD$kyjqYrZh+71_LlclT)h&a9_l>+^Fz-{c;Q2a?>pXu_PD=z`Qn93ciw&B(!Gn5{V_OAAH3&efAOBXZeLuuFFjTt z??!v9xp!_d_w@hT=HEi+xA`%ex8>=trQh1S|8RQVr|H?>hpkt~cjhKnnh)4}$JqVf zzsUq<=PFO`&os9`eRPkXH!}{+?!(t=gZXoFetXLi^SRshzWKYS-PfkrUY~7m&*KM= z`TY4ieRdqzi8bx*e!gGLyv^LU1ViT2 From c85b52979fe393fe02777c3fedee198a82c69962 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 15 May 2025 14:10:43 +0545 Subject: [PATCH 32/58] ix: add committor tests to run_tests --- test-integration/test-runner/bin/run_tests.rs | 42 ++++++++++++++++++- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/test-integration/test-runner/bin/run_tests.rs b/test-integration/test-runner/bin/run_tests.rs index bfc45009..f5030695 100644 --- a/test-integration/test-runner/bin/run_tests.rs +++ b/test-integration/test-runner/bin/run_tests.rs @@ -18,13 +18,15 @@ use test_runner::cleanup::{cleanup_devnet_only, cleanup_validators}; pub fn main() { let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap(); + let Ok(committor_output) = run_committor_tests(&manifest_dir) else { + // If any test run panics (i.e. not just a failing test) then we bail + return; + }; let Ok((security_output, scenarios_output)) = run_schedule_commit_tests(&manifest_dir) else { - // If any test fails or cannot run we bail immediately return; }; - let Ok(issues_frequent_commits_output) = run_issues_frequent_commmits_tests(&manifest_dir) else { @@ -45,6 +47,7 @@ pub fn main() { }; // Assert that all tests passed + assert_cargo_tests_passed(committor_output); assert_cargo_tests_passed(security_output); assert_cargo_tests_passed(scenarios_output); assert_cargo_tests_passed(cloning_output); @@ -92,6 +95,41 @@ fn run_restore_ledger_tests( Ok(output) } +fn run_committor_tests(manifest_dir: &str) -> Result> { + eprintln!("======== Starting DEVNET Validator for Committor ========"); + + let loaded_chain_accounts = + LoadedAccounts::with_delegation_program_test_authority(); + + let mut devnet_validator = match start_validator( + "committor-conf.devnet.toml", + ValidatorCluster::Chain(None), + &loaded_chain_accounts, + ) { + Some(validator) => validator, + None => { + panic!("Failed to start devnet validator properly"); + } + }; + + // NOTE: the committor tests run directly against a chain validator + // therefore no ephemeral validator needs to be started + + let test_committor_dir = + format!("{}/../{}", manifest_dir, "schedulecommit/committor-service"); + eprintln!("Running committor tests in {}", test_committor_dir); + let test_output = match run_test(test_committor_dir, Default::default()) { + Ok(output) => output, + Err(err) => { + eprintln!("Failed to run committor: {:?}", err); + cleanup_devnet_only(&mut devnet_validator); + return Err(err.into()); + } + }; + cleanup_devnet_only(&mut devnet_validator); + Ok(test_output) +} + fn run_schedule_commit_tests( manifest_dir: &str, ) -> Result<(Output, Output), Box> { From cf609d48cbcec2f0285efb07aa1f3673d0629336 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 15 May 2025 14:54:01 +0545 Subject: [PATCH 33/58] ix: move table mania tests to integration --- Cargo.lock | 2 -- magicblock-table-mania/Cargo.toml | 6 ----- test-integration/Cargo.lock | 15 ++++++++++++ test-integration/Cargo.toml | 5 +++- test-integration/test-table-mania/Cargo.toml | 21 +++++++++++++++++ test-integration/test-table-mania/src/lib.rs | 2 ++ .../tests/ix_lookup_table.rs | 3 ++- .../tests/ix_release_pubkeys.rs | 5 ++-- .../tests/ix_reserve_pubkeys.rs | 5 ++-- .../test-table-mania}/tests/utils/mod.rs | 23 ++++--------------- 10 files changed, 54 insertions(+), 33 deletions(-) create mode 100644 test-integration/test-table-mania/Cargo.toml create mode 100644 test-integration/test-table-mania/src/lib.rs rename {magicblock-table-mania => test-integration/test-table-mania}/tests/ix_lookup_table.rs (98%) rename {magicblock-table-mania => test-integration/test-table-mania}/tests/ix_release_pubkeys.rs (97%) rename {magicblock-table-mania => test-integration/test-table-mania}/tests/ix_reserve_pubkeys.rs (98%) rename {magicblock-table-mania => test-integration/test-table-mania}/tests/utils/mod.rs (88%) diff --git a/Cargo.lock b/Cargo.lock index c7baa0bd..634cfec2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4056,10 +4056,8 @@ name = "magicblock-table-mania" version = "0.1.1" dependencies = [ "ed25519-dalek", - "env_logger 0.11.6", "log", "magicblock-rpc-client", - "paste", "rand 0.8.5", "sha3", "solana-pubkey", diff --git a/magicblock-table-mania/Cargo.toml b/magicblock-table-mania/Cargo.toml index c9866425..5767af61 100644 --- a/magicblock-table-mania/Cargo.toml +++ b/magicblock-table-mania/Cargo.toml @@ -20,14 +20,8 @@ solana-sdk = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true } -[dev-dependencies] -env_logger = { workspace = true } -paste = { workspace = true } -tokio = { workspace = true, features = ["rt", "macros"] } - [features] default = [] -test_table_close = [] # Needed to allow multiple tests to run in parallel without trying to # use the same lookup table address randomize_lookup_table_slot = [] diff --git a/test-integration/Cargo.lock b/test-integration/Cargo.lock index b030e9d6..2c6e1869 100644 --- a/test-integration/Cargo.lock +++ b/test-integration/Cargo.lock @@ -10118,6 +10118,21 @@ dependencies = [ "teepee", ] +[[package]] +name = "test-table-mania" +version = "0.0.0" +dependencies = [ + "log", + "magicblock-rpc-client", + "magicblock-table-mania", + "paste", + "solana-pubkey", + "solana-rpc-client", + "solana-sdk", + "test-tools-core", + "tokio", +] + [[package]] name = "test-tools-core" version = "0.1.1" diff --git a/test-integration/Cargo.toml b/test-integration/Cargo.toml index f20c8b98..4c8d6dc4 100644 --- a/test-integration/Cargo.toml +++ b/test-integration/Cargo.toml @@ -11,9 +11,10 @@ members = [ "test-cloning", "test-issues", "test-ledger-restore", + "test-magicblock-api", "test-runner", + "test-table-mania", "test-tools", - "test-magicblock-api", ] resolver = "2" @@ -41,6 +42,8 @@ magicblock-committor-program = { path = "../magicblock-committor-program", featu magicblock-delegation-program = { path = "../../delegation-program" } magicblock-committor-service = { path = "../magicblock-committor-service" } magicblock-rpc-client = { path = "../magicblock-rpc-client" } +magicblock-table-mania = { path = "../magicblock-table-mania" } +paste = "1.0" program-flexi-counter = { path = "./programs/flexi-counter" } program-schedulecommit = { path = "programs/schedulecommit" } program-schedulecommit-security = { path = "programs/schedulecommit-security" } diff --git a/test-integration/test-table-mania/Cargo.toml b/test-integration/test-table-mania/Cargo.toml new file mode 100644 index 00000000..3d16007e --- /dev/null +++ b/test-integration/test-table-mania/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "test-table-mania" +version.workspace = true +edition.workspace = true + +[dev-dependencies] +log = { workspace = true } +magicblock-rpc-client = { workspace = true } +magicblock-table-mania = { workspace = true, features = [ + "randomize_lookup_table_slot", +] } +paste = { workspace = true } +solana-pubkey = { workspace = true } +solana-rpc-client = { workspace = true } +solana-sdk = { workspace = true } +test-tools-core = { workspace = true } +tokio = { workspace = true } + +[features] +default = [] +test_table_close = [] diff --git a/test-integration/test-table-mania/src/lib.rs b/test-integration/test-table-mania/src/lib.rs new file mode 100644 index 00000000..10f55cb1 --- /dev/null +++ b/test-integration/test-table-mania/src/lib.rs @@ -0,0 +1,2 @@ +#[allow(unused)] +pub const HELLO: &str = "world"; diff --git a/magicblock-table-mania/tests/ix_lookup_table.rs b/test-integration/test-table-mania/tests/ix_lookup_table.rs similarity index 98% rename from magicblock-table-mania/tests/ix_lookup_table.rs rename to test-integration/test-table-mania/tests/ix_lookup_table.rs index 8511491d..5bec6e2c 100644 --- a/magicblock-table-mania/tests/ix_lookup_table.rs +++ b/test-integration/test-table-mania/tests/ix_lookup_table.rs @@ -9,6 +9,7 @@ use solana_sdk::{ commitment_config::CommitmentConfig, native_token::LAMPORTS_PER_SOL, signature::Keypair, signer::Signer, }; +use test_tools_core::init_logger; mod utils; @@ -80,7 +81,7 @@ async fn get_open_tables( #[tokio::test] async fn test_create_fetch_and_close_lookup_table() { - utils::init_logger(); + init_logger!(); let validator_auth = Keypair::new(); let pubkeys = vec![0; 10] diff --git a/magicblock-table-mania/tests/ix_release_pubkeys.rs b/test-integration/test-table-mania/tests/ix_release_pubkeys.rs similarity index 97% rename from magicblock-table-mania/tests/ix_release_pubkeys.rs rename to test-integration/test-table-mania/tests/ix_release_pubkeys.rs index 33fc27f0..dedcbd81 100644 --- a/magicblock-table-mania/tests/ix_release_pubkeys.rs +++ b/test-integration/test-table-mania/tests/ix_release_pubkeys.rs @@ -2,11 +2,12 @@ use std::collections::HashSet; use solana_pubkey::Pubkey; use solana_sdk::signature::Keypair; +use test_tools_core::init_logger; mod utils; #[tokio::test] async fn test_single_table_two_requests_with_overlapping_pubkeys() { - utils::init_logger(); + init_logger!(); let authority = Keypair::new(); let table_mania = utils::setup_table_mania(&authority).await; @@ -50,7 +51,7 @@ async fn test_single_table_two_requests_with_overlapping_pubkeys() { #[tokio::test] async fn test_two_table_three_requests_with_one_overlapping_pubkey() { - utils::init_logger(); + init_logger!(); let authority = Keypair::new(); let table_mania = utils::setup_table_mania(&authority).await; diff --git a/magicblock-table-mania/tests/ix_reserve_pubkeys.rs b/test-integration/test-table-mania/tests/ix_reserve_pubkeys.rs similarity index 98% rename from magicblock-table-mania/tests/ix_reserve_pubkeys.rs rename to test-integration/test-table-mania/tests/ix_reserve_pubkeys.rs index 94d35f94..47ededfb 100644 --- a/magicblock-table-mania/tests/ix_reserve_pubkeys.rs +++ b/test-integration/test-table-mania/tests/ix_reserve_pubkeys.rs @@ -5,6 +5,7 @@ use solana_pubkey::Pubkey; use solana_sdk::{ address_lookup_table::state::LOOKUP_TABLE_MAX_ADDRESSES, signature::Keypair, }; +use test_tools_core::init_logger; use tokio::task::JoinSet; mod utils; @@ -29,7 +30,7 @@ reserve_pubkeys_in_one_table!(100); reserve_pubkeys_in_one_table!(256); async fn reserve_pubkeys_in_one_table_in_chunks(chunk_size: usize) { - utils::init_logger(); + init_logger!(); let authority = Keypair::new(); let mut pubkeys = (0..LOOKUP_TABLE_MAX_ADDRESSES) @@ -90,7 +91,7 @@ async fn reserve_pubkeys_in_multiple_tables_in_chunks( amount: usize, chunk_size: usize, ) { - utils::init_logger(); + init_logger!(); let authority = Keypair::new(); let pubkeys = (0..amount) diff --git a/magicblock-table-mania/tests/utils/mod.rs b/test-integration/test-table-mania/tests/utils/mod.rs similarity index 88% rename from magicblock-table-mania/tests/utils/mod.rs rename to test-integration/test-table-mania/tests/utils/mod.rs index 385b2068..8ddd08c9 100644 --- a/magicblock-table-mania/tests/utils/mod.rs +++ b/test-integration/test-table-mania/tests/utils/mod.rs @@ -1,5 +1,3 @@ -#![allow(dead_code)] - use log::*; use magicblock_rpc_client::MagicblockRpcClient; use magicblock_table_mania::{GarbageCollectorConfig, TableMania}; @@ -10,29 +8,14 @@ use solana_sdk::signature::Keypair; use solana_sdk::signer::Signer; use std::time::{Duration, Instant}; +#[allow(unused)] // used in tests pub const TEST_TABLE_CLOSE: bool = cfg!(feature = "test_table_close"); pub async fn sleep_millis(millis: u64) { tokio::time::sleep(tokio::time::Duration::from_millis(millis)).await; } -pub fn init_logger_file_path() { - let _ = env_logger::builder() - .format_timestamp(None) - .format_module_path(false) - .format_target(false) - .format_source_path(true) - .is_test(true) - .try_init(); -} - -pub fn init_logger() { - let _ = env_logger::builder() - .format_timestamp(None) - .is_test(true) - .try_init(); -} - +#[allow(unused)] // used in tests pub async fn setup_table_mania(validator_auth: &Keypair) -> TableMania { let rpc_client = { let client = RpcClient::new_with_commitment( @@ -57,6 +40,7 @@ pub async fn setup_table_mania(validator_auth: &Keypair) -> TableMania { } } +#[allow(unused)] // used in tests pub async fn close_released_tables(table_mania: &TableMania) { if TEST_TABLE_CLOSE { // Tables deactivate after ~2.5 mins (150secs), but most times @@ -102,6 +86,7 @@ pub async fn close_released_tables(table_mania: &TableMania) { } } +#[allow(unused)] // used in tests pub async fn log_active_table_addresses(table_mania: &TableMania) { debug!( "Active Tables: {}", From b968ea5797808004b00949762afb2da430aaf51c Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 15 May 2025 15:00:29 +0545 Subject: [PATCH 34/58] ix: run table mania as part of test suite --- test-integration/test-runner/bin/run_tests.rs | 47 +++++++++++++------ 1 file changed, 33 insertions(+), 14 deletions(-) diff --git a/test-integration/test-runner/bin/run_tests.rs b/test-integration/test-runner/bin/run_tests.rs index f5030695..c8caed44 100644 --- a/test-integration/test-runner/bin/run_tests.rs +++ b/test-integration/test-runner/bin/run_tests.rs @@ -18,7 +18,9 @@ use test_runner::cleanup::{cleanup_devnet_only, cleanup_validators}; pub fn main() { let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap(); - let Ok(committor_output) = run_committor_tests(&manifest_dir) else { + let Ok((table_mania_output, committor_output)) = + run_table_mania_and_committor_tests(&manifest_dir) + else { // If any test run panics (i.e. not just a failing test) then we bail return; }; @@ -47,6 +49,7 @@ pub fn main() { }; // Assert that all tests passed + assert_cargo_tests_passed(table_mania_output); assert_cargo_tests_passed(committor_output); assert_cargo_tests_passed(security_output); assert_cargo_tests_passed(scenarios_output); @@ -95,8 +98,10 @@ fn run_restore_ledger_tests( Ok(output) } -fn run_committor_tests(manifest_dir: &str) -> Result> { - eprintln!("======== Starting DEVNET Validator for Committor ========"); +fn run_table_mania_and_committor_tests( + manifest_dir: &str, +) -> Result<(Output, Output), Box> { + eprintln!("======== Starting DEVNET Validator for TableMania and Committor ========"); let loaded_chain_accounts = LoadedAccounts::with_delegation_program_test_authority(); @@ -112,22 +117,36 @@ fn run_committor_tests(manifest_dir: &str) -> Result> { } }; - // NOTE: the committor tests run directly against a chain validator - // therefore no ephemeral validator needs to be started + // NOTE: the table mania and committor tests run directly against + // a chain validator therefore no ephemeral validator needs to be started + + let test_table_mania_dir = + format!("{}/../{}", manifest_dir, "test-table-mania"); + let table_mania_test_output = + match run_test(test_table_mania_dir, Default::default()) { + Ok(output) => output, + Err(err) => { + eprintln!("Failed to run table-mania: {:?}", err); + cleanup_devnet_only(&mut devnet_validator); + return Err(err.into()); + } + }; let test_committor_dir = format!("{}/../{}", manifest_dir, "schedulecommit/committor-service"); eprintln!("Running committor tests in {}", test_committor_dir); - let test_output = match run_test(test_committor_dir, Default::default()) { - Ok(output) => output, - Err(err) => { - eprintln!("Failed to run committor: {:?}", err); - cleanup_devnet_only(&mut devnet_validator); - return Err(err.into()); - } - }; + let committor_test_output = + match run_test(test_committor_dir, Default::default()) { + Ok(output) => output, + Err(err) => { + eprintln!("Failed to run committor: {:?}", err); + cleanup_devnet_only(&mut devnet_validator); + return Err(err.into()); + } + }; cleanup_devnet_only(&mut devnet_validator); - Ok(test_output) + + Ok((table_mania_test_output, committor_test_output)) } fn run_schedule_commit_tests( From 7bde5967bf35a31f02b41d8f27d7fd9eca3f2712 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 15 May 2025 15:18:33 +0545 Subject: [PATCH 35/58] chore: fmt --- .../src/remote_scheduled_commits_processor.rs | 17 +++---- magicblock-accounts/tests/ensure_accounts.rs | 2 +- magicblock-committor-program/src/error.rs | 3 +- .../src/instruction.rs | 9 ++-- .../src/instruction_chunks.rs | 7 +-- magicblock-committor-program/src/lib.rs | 1 - magicblock-committor-program/src/processor.rs | 27 ++++++----- .../src/state/changeset_chunks.rs | 3 +- .../src/state/chunks.rs | 3 +- .../src/utils/account.rs | 7 +-- .../src/utils/asserts.rs | 3 +- magicblock-committor-service/src/bundles.rs | 10 ++-- .../src/commit/commit_using_args.rs | 24 ++++------ .../src/commit/commit_using_buffer.rs | 37 +++++++-------- .../src/commit/committor_processor.rs | 47 +++++++++---------- .../src/commit/common.rs | 19 ++++---- .../src/commit_stage.rs | 5 +- .../src/commit_strategy.rs | 3 +- magicblock-committor-service/src/error.rs | 3 +- magicblock-committor-service/src/lib.rs | 5 +- .../src/persist/commit_persister.rs | 26 +++++----- .../src/persist/types/commit_status.rs | 3 +- .../src/pubkeys_provider.rs | 2 +- magicblock-committor-service/src/service.rs | 3 +- .../src/stubs/changeset_committor_stub.rs | 2 +- .../src/transactions.rs | 28 +++++------ magicblock-rpc-client/src/lib.rs | 4 +- magicblock-table-mania/src/lookup_table.rs | 26 +++++----- magicblock-table-mania/src/lookup_table_rc.rs | 22 ++++----- magicblock-table-mania/src/manager.rs | 2 +- 30 files changed, 180 insertions(+), 173 deletions(-) diff --git a/magicblock-accounts/src/remote_scheduled_commits_processor.rs b/magicblock-accounts/src/remote_scheduled_commits_processor.rs index 002a44d4..6727009e 100644 --- a/magicblock-accounts/src/remote_scheduled_commits_processor.rs +++ b/magicblock-accounts/src/remote_scheduled_commits_processor.rs @@ -1,28 +1,29 @@ -use async_trait::async_trait; -use conjunto_transwise::AccountChainSnapshot; -use log::*; -use magicblock_bank::bank::Bank; -use magicblock_processor::execute_transaction::execute_legacy_transaction; -use magicblock_transaction_status::TransactionStatusSender; -use solana_sdk::hash::Hash; -use solana_sdk::{account::ReadableAccount, transaction::Transaction}; use std::{ collections::{HashMap, HashSet}, sync::Arc, }; +use async_trait::async_trait; +use conjunto_transwise::AccountChainSnapshot; +use log::*; use magicblock_account_cloner::{ AccountClonerOutput, AccountClonerOutput::Cloned, CloneOutputMap, }; use magicblock_accounts_api::InternalAccountProvider; +use magicblock_bank::bank::Bank; use magicblock_committor_service::{ persist::BundleSignatureRow, ChangedAccount, Changeset, ChangesetCommittor, ChangesetMeta, }; +use magicblock_processor::execute_transaction::execute_legacy_transaction; use magicblock_program::{ register_scheduled_commit_sent, FeePayerAccount, Pubkey, SentCommit, TransactionScheduler, }; +use magicblock_transaction_status::TransactionStatusSender; +use solana_sdk::{ + account::ReadableAccount, hash::Hash, transaction::Transaction, +}; use crate::{ errors::AccountsResult, AccountCommittee, ScheduledCommitsProcessor, diff --git a/magicblock-accounts/tests/ensure_accounts.rs b/magicblock-accounts/tests/ensure_accounts.rs index 036e1072..38ee4c54 100644 --- a/magicblock-accounts/tests/ensure_accounts.rs +++ b/magicblock-accounts/tests/ensure_accounts.rs @@ -1,4 +1,3 @@ -use log::*; use std::{collections::HashSet, sync::Arc}; use conjunto_transwise::{ @@ -6,6 +5,7 @@ use conjunto_transwise::{ transaction_accounts_holder::TransactionAccountsHolder, transaction_accounts_validator::TransactionAccountsValidatorImpl, }; +use log::*; use magicblock_account_cloner::{ AccountCloner, RemoteAccountClonerClient, RemoteAccountClonerWorker, ValidatorCollectionMode, diff --git a/magicblock-committor-program/src/error.rs b/magicblock-committor-program/src/error.rs index 35e7156d..d201ae9d 100644 --- a/magicblock-committor-program/src/error.rs +++ b/magicblock-committor-program/src/error.rs @@ -1,5 +1,4 @@ -use solana_program::msg; -use solana_program::program_error::ProgramError; +use solana_program::{msg, program_error::ProgramError}; use thiserror::Error; pub type CommittorResult = std::result::Result; diff --git a/magicblock-committor-program/src/instruction.rs b/magicblock-committor-program/src/instruction.rs index 8ce2e7c7..b6bf1e28 100644 --- a/magicblock-committor-program/src/instruction.rs +++ b/magicblock-committor-program/src/instruction.rs @@ -1,8 +1,9 @@ use borsh::{BorshDeserialize, BorshSerialize}; -use solana_program::hash::Hash; -use solana_program::hash::HASH_BYTES; -use solana_program::instruction::{AccountMeta, Instruction}; -use solana_program::system_program; +use solana_program::{ + hash::{Hash, HASH_BYTES}, + instruction::{AccountMeta, Instruction}, + system_program, +}; use solana_pubkey::Pubkey; use crate::{consts, pdas}; diff --git a/magicblock-committor-program/src/instruction_chunks.rs b/magicblock-committor-program/src/instruction_chunks.rs index a726f5e3..fe4622f4 100644 --- a/magicblock-committor-program/src/instruction_chunks.rs +++ b/magicblock-committor-program/src/instruction_chunks.rs @@ -1,6 +1,7 @@ -use crate::instruction::{IX_INIT_SIZE, IX_REALLOC_SIZE}; - -use crate::consts::MAX_INSTRUCTION_DATA_SIZE; +use crate::{ + consts::MAX_INSTRUCTION_DATA_SIZE, + instruction::{IX_INIT_SIZE, IX_REALLOC_SIZE}, +}; /// Creates chunks of realloc instructions such that each chunk fits into a single transaction. /// - reallocs: The realloc instructions to split up diff --git a/magicblock-committor-program/src/lib.rs b/magicblock-committor-program/src/lib.rs index 831bc793..eb0d5125 100644 --- a/magicblock-committor-program/src/lib.rs +++ b/magicblock-committor-program/src/lib.rs @@ -13,7 +13,6 @@ mod utils; mod processor; // #[cfg(not(feature = "no-entrypoint"))] pub use processor::process; - pub use state::{ changeset::{ ChangedAccount, ChangedAccountMeta, ChangedBundle, Changeset, diff --git a/magicblock-committor-program/src/processor.rs b/magicblock-committor-program/src/processor.rs index db1455ee..e1068b60 100644 --- a/magicblock-committor-program/src/processor.rs +++ b/magicblock-committor-program/src/processor.rs @@ -1,20 +1,21 @@ use borsh::{to_vec, BorshDeserialize}; -use solana_program::hash::Hash; -use solana_program::log::sol_log_64; -use solana_program::program::invoke_signed; -use solana_program::program_error::ProgramError; -use solana_program::sysvar::Sysvar; -use solana_program::{account_info::AccountInfo, entrypoint::ProgramResult}; -use solana_program::{msg, system_instruction}; +use solana_program::{ + account_info::AccountInfo, entrypoint::ProgramResult, hash::Hash, + log::sol_log_64, msg, program::invoke_signed, program_error::ProgramError, + system_instruction, sysvar::Sysvar, +}; use solana_pubkey::Pubkey; -use crate::error::CommittorError; -use crate::instruction::CommittorInstruction; -use crate::utils::{ - assert_account_unallocated, assert_is_signer, assert_program_id, - close_and_refund_authority, +use crate::{ + consts, + error::CommittorError, + instruction::CommittorInstruction, + utils::{ + assert_account_unallocated, assert_is_signer, assert_program_id, + close_and_refund_authority, + }, + verified_seeds_and_pda, Chunks, }; -use crate::{consts, verified_seeds_and_pda, Chunks}; pub fn process( program_id: &Pubkey, diff --git a/magicblock-committor-program/src/state/changeset_chunks.rs b/magicblock-committor-program/src/state/changeset_chunks.rs index 990f366c..d1e3333b 100644 --- a/magicblock-committor-program/src/state/changeset_chunks.rs +++ b/magicblock-committor-program/src/state/changeset_chunks.rs @@ -1,8 +1,9 @@ use std::collections::HashSet; -use super::chunks::Chunks; use borsh::{BorshDeserialize, BorshSerialize}; +use super::chunks::Chunks; + /// A chunk of change set data that we want to apply to the on chain /// [ChangeSet] buffer #[derive(Debug, Default, BorshSerialize, BorshDeserialize)] diff --git a/magicblock-committor-program/src/state/chunks.rs b/magicblock-committor-program/src/state/chunks.rs index b68c2a26..06b3e3a6 100644 --- a/magicblock-committor-program/src/state/chunks.rs +++ b/magicblock-committor-program/src/state/chunks.rs @@ -1,6 +1,7 @@ -use borsh::{BorshDeserialize, BorshSerialize}; use std::{collections::HashSet, fmt}; +use borsh::{BorshDeserialize, BorshSerialize}; + use crate::{ consts, error::{CommittorError, CommittorResult}, diff --git a/magicblock-committor-program/src/utils/account.rs b/magicblock-committor-program/src/utils/account.rs index e794106f..cdae0e96 100644 --- a/magicblock-committor-program/src/utils/account.rs +++ b/magicblock-committor-program/src/utils/account.rs @@ -1,6 +1,7 @@ -use solana_program::msg; -use solana_program::program_error::ProgramError; -use solana_program::{account_info::AccountInfo, entrypoint::ProgramResult}; +use solana_program::{ + account_info::AccountInfo, entrypoint::ProgramResult, msg, + program_error::ProgramError, +}; pub fn close_and_refund_authority( authority: &AccountInfo, diff --git a/magicblock-committor-program/src/utils/asserts.rs b/magicblock-committor-program/src/utils/asserts.rs index 838d139b..b83c21f0 100644 --- a/magicblock-committor-program/src/utils/asserts.rs +++ b/magicblock-committor-program/src/utils/asserts.rs @@ -1,7 +1,6 @@ -use solana_program::pubkey::Pubkey; use solana_program::{ account_info::AccountInfo, entrypoint::ProgramResult, msg, - program_error::ProgramError, + program_error::ProgramError, pubkey::Pubkey, }; pub fn assert_keys_equal String>( diff --git a/magicblock-committor-service/src/bundles.rs b/magicblock-committor-service/src/bundles.rs index a030842f..086163df 100644 --- a/magicblock-committor-service/src/bundles.rs +++ b/magicblock-committor-service/src/bundles.rs @@ -1,6 +1,7 @@ -use crate::{bundle_strategy::efficient_bundle_chunks, CommitInfo}; use std::collections::HashMap; +use crate::{bundle_strategy::efficient_bundle_chunks, CommitInfo}; + #[derive(Debug, Default)] pub struct BundleChunksResult { /// The valid chunks @@ -94,11 +95,12 @@ pub(crate) fn bundle_chunks_ignoring_bundle_id( #[cfg(test)] mod test { - use super::*; - use solana_sdk::hash::Hash; - use solana_sdk::pubkey::Pubkey; use std::collections::HashSet; + use solana_sdk::{hash::Hash, pubkey::Pubkey}; + + use super::*; + fn commit_info(bundle_id: u64) -> crate::CommitInfo { CommitInfo::BufferedDataAccount { pubkey: Pubkey::new_unique(), diff --git a/magicblock-committor-service/src/commit/commit_using_args.rs b/magicblock-committor-service/src/commit/commit_using_args.rs index 525eb531..eb6dba19 100644 --- a/magicblock-committor-service/src/commit/commit_using_args.rs +++ b/magicblock-committor-service/src/commit/commit_using_args.rs @@ -1,26 +1,22 @@ +use std::{collections::HashSet, sync::Arc}; + +use dlp::args::CommitStateArgs; +use log::*; +use magicblock_committor_program::Changeset; +use magicblock_rpc_client::MagicBlockSendTransactionConfig; +use solana_sdk::{hash::Hash, signer::Signer}; + +use super::CommittorProcessor; use crate::{ commit::common::{ get_accounts_to_undelegate, lookup_table_keys, send_and_confirm, }, - commit_stage::CommitSignatures, + commit_stage::{CommitSignatures, CommitStage}, persist::CommitStrategy, undelegate::undelegate_commitables_ixs, CommitInfo, }; -use dlp::args::CommitStateArgs; -use log::*; -use solana_sdk::hash::Hash; -use std::{collections::HashSet, sync::Arc}; - -use magicblock_committor_program::Changeset; -use solana_sdk::signer::Signer; - -use crate::commit_stage::CommitStage; -use magicblock_rpc_client::MagicBlockSendTransactionConfig; - -use super::CommittorProcessor; - impl CommittorProcessor { /// Commits a changeset directly using args to include the commit state /// - **changeset**: the changeset to commit diff --git a/magicblock-committor-service/src/commit/commit_using_buffer.rs b/magicblock-committor-service/src/commit/commit_using_buffer.rs index 4d99ff99..e33a3f7f 100644 --- a/magicblock-committor-service/src/commit/commit_using_buffer.rs +++ b/magicblock-committor-service/src/commit/commit_using_buffer.rs @@ -1,18 +1,12 @@ -use borsh::{to_vec, BorshDeserialize}; -use dlp::pda::commit_state_pda_from_delegated_account; -use log::*; -use magicblock_rpc_client::{ - MagicBlockRpcClientError, MagicBlockRpcClientResult, - MagicBlockSendTransactionConfig, -}; -use solana_pubkey::Pubkey; use std::{ collections::{HashMap, HashSet}, sync::Arc, time::Duration, }; -use tokio::task::JoinSet; +use borsh::{to_vec, BorshDeserialize}; +use dlp::pda::commit_state_pda_from_delegated_account; +use log::*; use magicblock_committor_program::{ instruction::{ create_init_ix, create_realloc_buffer_ixs, @@ -22,7 +16,22 @@ use magicblock_committor_program::{ instruction_chunks::chunk_realloc_ixs, Changeset, ChangesetChunk, Chunks, CommitableAccount, }; +use magicblock_rpc_client::{ + MagicBlockRpcClientError, MagicBlockRpcClientResult, + MagicBlockSendTransactionConfig, +}; +use solana_pubkey::Pubkey; +use solana_sdk::{hash::Hash, instruction::Instruction, signer::Signer}; +use tokio::task::JoinSet; +use super::{ + common::send_and_confirm, + process_buffers::{ + chunked_ixs_to_process_commitables_and_close_pdas, + ChunkedIxsToProcessCommitablesAndClosePdasResult, + }, + CommittorProcessor, +}; use crate::{ commit::common::get_accounts_to_undelegate, commit_stage::CommitSignatures, @@ -39,16 +48,6 @@ use crate::{ CommitInfo, CommitStage, }; -use super::{ - common::send_and_confirm, - process_buffers::{ - chunked_ixs_to_process_commitables_and_close_pdas, - ChunkedIxsToProcessCommitablesAndClosePdasResult, - }, - CommittorProcessor, -}; -use solana_sdk::{hash::Hash, instruction::Instruction, signer::Signer}; - struct NextReallocs { missing_size: u64, start_idx: usize, diff --git a/magicblock-committor-service/src/commit/committor_processor.rs b/magicblock-committor-service/src/commit/committor_processor.rs index 3e6ea0ab..0db9171b 100644 --- a/magicblock-committor-service/src/commit/committor_processor.rs +++ b/magicblock-committor-service/src/commit/committor_processor.rs @@ -1,41 +1,40 @@ -use crate::{ - commit_strategy::{split_changesets_by_commit_strategy, SplitChangesets}, - compute_budget::{ComputeBudget, ComputeBudgetConfig}, - persist::{ - BundleSignatureRow, CommitPersister, CommitStatusRow, CommitStrategy, - }, - pubkeys_provider::provide_committee_pubkeys, - types::InstructionsKind, - CommitInfo, -}; - -use log::*; -use magicblock_table_mania::{GarbageCollectorConfig, TableMania}; -use solana_sdk::{ - commitment_config::CommitmentConfig, hash::Hash, signature::Signature, -}; use std::{ collections::{HashMap, HashSet}, path::Path, sync::{Arc, Mutex}, }; +use log::*; use magicblock_committor_program::{Changeset, ChangesetMeta}; +use magicblock_rpc_client::{ + MagicBlockSendTransactionConfig, MagicblockRpcClient, +}; +use magicblock_table_mania::{GarbageCollectorConfig, TableMania}; use solana_pubkey::Pubkey; use solana_rpc_client::nonblocking::rpc_client::RpcClient; -use solana_sdk::{signature::Keypair, signer::Signer}; +use solana_sdk::{ + commitment_config::CommitmentConfig, + hash::Hash, + signature::{Keypair, Signature}, + signer::Signer, +}; use tokio::task::JoinSet; +use super::common::{lookup_table_keys, send_and_confirm}; use crate::{ - commit_stage::CommitStage, config::ChainConfig, - error::CommittorServiceResult, types::InstructionsForCommitable, -}; -use magicblock_rpc_client::{ - MagicBlockSendTransactionConfig, MagicblockRpcClient, + commit_stage::CommitStage, + commit_strategy::{split_changesets_by_commit_strategy, SplitChangesets}, + compute_budget::{ComputeBudget, ComputeBudgetConfig}, + config::ChainConfig, + error::CommittorServiceResult, + persist::{ + BundleSignatureRow, CommitPersister, CommitStatusRow, CommitStrategy, + }, + pubkeys_provider::provide_committee_pubkeys, + types::{InstructionsForCommitable, InstructionsKind}, + CommitInfo, }; -use super::common::{lookup_table_keys, send_and_confirm}; - pub(crate) struct CommittorProcessor { pub(crate) magicblock_rpc_client: MagicblockRpcClient, pub(crate) table_mania: TableMania, diff --git a/magicblock-committor-service/src/commit/common.rs b/magicblock-committor-service/src/commit/common.rs index c1e4317c..ebfa0a1a 100644 --- a/magicblock-committor-service/src/commit/common.rs +++ b/magicblock-committor-service/src/commit/common.rs @@ -1,19 +1,22 @@ -use log::*; -use magicblock_rpc_client::{ - MagicBlockSendTransactionConfig, MagicblockRpcClient, -}; -use magicblock_table_mania::TableMania; -use solana_sdk::{hash::Hash, message::v0::Message, signature::Signature}; use std::{ collections::{HashMap, HashSet}, time::{Duration, Instant}, }; +use log::*; use magicblock_committor_program::Changeset; +use magicblock_rpc_client::{ + MagicBlockSendTransactionConfig, MagicblockRpcClient, +}; +use magicblock_table_mania::TableMania; use solana_pubkey::Pubkey; use solana_sdk::{ - instruction::Instruction, message::VersionedMessage, signature::Keypair, - signer::Signer, transaction::VersionedTransaction, + hash::Hash, + instruction::Instruction, + message::{v0::Message, VersionedMessage}, + signature::{Keypair, Signature}, + signer::Signer, + transaction::VersionedTransaction, }; use crate::{ diff --git a/magicblock-committor-service/src/commit_stage.rs b/magicblock-committor-service/src/commit_stage.rs index fe8299c7..66a5e858 100644 --- a/magicblock-committor-service/src/commit_stage.rs +++ b/magicblock-committor-service/src/commit_stage.rs @@ -1,3 +1,6 @@ +use std::sync::Arc; + +use log::*; use magicblock_committor_program::ChangedAccountMeta; use solana_pubkey::Pubkey; use solana_sdk::{clock::Slot, signature::Signature}; @@ -7,8 +10,6 @@ use crate::{ persist::{CommitStatus, CommitStatusSignatures, CommitStrategy}, CommitInfo, }; -use log::*; -use std::sync::Arc; #[derive(Debug, Clone)] pub struct CommitSignatures { diff --git a/magicblock-committor-service/src/commit_strategy.rs b/magicblock-committor-service/src/commit_strategy.rs index 32c9b795..22ac7992 100644 --- a/magicblock-committor-service/src/commit_strategy.rs +++ b/magicblock-committor-service/src/commit_strategy.rs @@ -235,11 +235,12 @@ pub fn split_changesets_by_commit_strategy( #[cfg(test)] mod test { - use super::*; use log::*; use magicblock_committor_program::ChangedAccount; use solana_sdk::pubkey::Pubkey; + use super::*; + fn init_logger() { let _ = env_logger::builder() .format_timestamp(None) diff --git a/magicblock-committor-service/src/error.rs b/magicblock-committor-service/src/error.rs index d130cf1c..54344a97 100644 --- a/magicblock-committor-service/src/error.rs +++ b/magicblock-committor-service/src/error.rs @@ -1,12 +1,11 @@ use std::sync::Arc; -use crate::persist::CommitStrategy; use magicblock_rpc_client::MagicBlockRpcClientError; use solana_pubkey::Pubkey; use solana_sdk::signature::Signature; use thiserror::Error; -use crate::CommitInfo; +use crate::{persist::CommitStrategy, CommitInfo}; pub type CommittorServiceResult = std::result::Result; diff --git a/magicblock-committor-service/src/lib.rs b/magicblock-committor-service/src/lib.rs index 274db705..9c0c1ad8 100644 --- a/magicblock-committor-service/src/lib.rs +++ b/magicblock-committor-service/src/lib.rs @@ -20,13 +20,12 @@ mod undelegate; pub mod stubs; pub use commit_info::CommitInfo; -pub use compute_budget::ComputeBudgetConfig; -pub use service::{ChangesetCommittor, CommittorService}; - pub use commit_stage::CommitStage; +pub use compute_budget::ComputeBudgetConfig; pub use magicblock_committor_program::{ ChangedAccount, Changeset, ChangesetMeta, }; +pub use service::{ChangesetCommittor, CommittorService}; pub fn changeset_for_slot(slot: u64) -> Changeset { Changeset { slot, diff --git a/magicblock-committor-service/src/persist/commit_persister.rs b/magicblock-committor-service/src/persist/commit_persister.rs index 33ade0f2..952ea4f1 100644 --- a/magicblock-committor-service/src/persist/commit_persister.rs +++ b/magicblock-committor-service/src/persist/commit_persister.rs @@ -1,14 +1,17 @@ -use std::path::Path; -use std::sync::atomic::{AtomicU64, Ordering}; +use std::{ + path::Path, + sync::atomic::{AtomicU64, Ordering}, +}; -use solana_sdk::hash::Hash; -use solana_sdk::pubkey::Pubkey; - -use super::db::BundleSignatureRow; -use super::error::{CommitPersistError, CommitPersistResult}; -use super::utils::now; -use super::{db::CommitStatusRow, CommitStatus, CommitType, CommittorDb}; use magicblock_committor_program::Changeset; +use solana_sdk::{hash::Hash, pubkey::Pubkey}; + +use super::{ + db::{BundleSignatureRow, CommitStatusRow}, + error::{CommitPersistError, CommitPersistResult}, + utils::now, + CommitStatus, CommitType, CommittorDb, +}; pub struct CommitPersister { db: CommittorDb, @@ -152,12 +155,13 @@ impl CommitPersister { #[cfg(test)] mod tests { - use super::*; - use crate::persist::{CommitStatusSignatures, CommitStrategy}; use magicblock_committor_program::ChangedAccount; use solana_pubkey::Pubkey; use solana_sdk::signature::Signature; + use super::*; + use crate::persist::{CommitStatusSignatures, CommitStrategy}; + #[test] fn test_start_changeset_and_update_status() { let mut persister = CommitPersister::try_new(":memory:").unwrap(); diff --git a/magicblock-committor-service/src/persist/types/commit_status.rs b/magicblock-committor-service/src/persist/types/commit_status.rs index 0e6c74a3..38bcad99 100644 --- a/magicblock-committor-service/src/persist/types/commit_status.rs +++ b/magicblock-committor-service/src/persist/types/commit_status.rs @@ -2,9 +2,8 @@ use std::fmt; use solana_sdk::signature::Signature; -use crate::persist::error::CommitPersistError; - use super::commit_strategy::CommitStrategy; +use crate::persist::error::CommitPersistError; /// The status of a committed account. #[derive(Debug, Clone, PartialEq, Eq)] diff --git a/magicblock-committor-service/src/pubkeys_provider.rs b/magicblock-committor-service/src/pubkeys_provider.rs index 595b5af2..d7ad1472 100644 --- a/magicblock-committor-service/src/pubkeys_provider.rs +++ b/magicblock-committor-service/src/pubkeys_provider.rs @@ -1,7 +1,7 @@ -use log::*; use std::collections::HashSet; use dlp::pda; +use log::*; use solana_pubkey::Pubkey; use solana_sdk::system_program; diff --git a/magicblock-committor-service/src/service.rs b/magicblock-committor-service/src/service.rs index 1b74ba21..4e022eea 100644 --- a/magicblock-committor-service/src/service.rs +++ b/magicblock-committor-service/src/service.rs @@ -3,8 +3,7 @@ use std::{fmt::Display, path::Path}; use log::*; use magicblock_committor_program::Changeset; use solana_pubkey::Pubkey; -use solana_sdk::hash::Hash; -use solana_sdk::signature::Keypair; +use solana_sdk::{hash::Hash, signature::Keypair}; use tokio::{ select, sync::{ diff --git a/magicblock-committor-service/src/stubs/changeset_committor_stub.rs b/magicblock-committor-service/src/stubs/changeset_committor_stub.rs index a618ee90..6e39bd65 100644 --- a/magicblock-committor-service/src/stubs/changeset_committor_stub.rs +++ b/magicblock-committor-service/src/stubs/changeset_committor_stub.rs @@ -9,6 +9,7 @@ use std::{ use magicblock_committor_program::Changeset; use solana_pubkey::Pubkey; +use solana_sdk::{hash::Hash, signature::Signature}; use tokio::sync::oneshot; use crate::{ @@ -19,7 +20,6 @@ use crate::{ }, ChangesetCommittor, }; -use solana_sdk::{hash::Hash, signature::Signature}; #[derive(Default)] pub struct ChangesetCommittorStub { diff --git a/magicblock-committor-service/src/transactions.rs b/magicblock-committor-service/src/transactions.rs index fa53f03f..63dfcddc 100644 --- a/magicblock-committor-service/src/transactions.rs +++ b/magicblock-committor-service/src/transactions.rs @@ -8,13 +8,14 @@ use magicblock_committor_program::{ }; use solana_pubkey::Pubkey; use solana_rpc_client::rpc_client::SerializableTransaction; -use solana_sdk::hash::Hash; -use solana_sdk::instruction::Instruction; -use solana_sdk::message::v0::Message; -use solana_sdk::message::{AddressLookupTableAccount, VersionedMessage}; -use solana_sdk::signature::Keypair; -use solana_sdk::signer::Signer; -use solana_sdk::transaction::VersionedTransaction; +use solana_sdk::{ + hash::Hash, + instruction::Instruction, + message::{v0::Message, AddressLookupTableAccount, VersionedMessage}, + signature::Keypair, + signer::Signer, + transaction::VersionedTransaction, +}; use static_assertions::const_assert; use crate::error::{CommittorServiceError, CommittorServiceResult}; @@ -318,13 +319,6 @@ fn get_lookup_tables(ixs: &[Instruction]) -> Vec { #[cfg(test)] mod test { - use crate::{ - compute_budget::{Budget, ComputeBudget}, - pubkeys_provider::{provide_committee_pubkeys, provide_common_pubkeys}, - }; - - use super::*; - use dlp::args::{CommitStateArgs, CommitStateFromBufferArgs}; use lazy_static::lazy_static; use solana_pubkey::Pubkey; @@ -338,6 +332,12 @@ mod test { transaction::VersionedTransaction, }; + use super::*; + use crate::{ + compute_budget::{Budget, ComputeBudget}, + pubkeys_provider::{provide_committee_pubkeys, provide_common_pubkeys}, + }; + // These tests statically determine the optimal ix count to fit into a single // transaction and assert that the const we export in prod match those numbers. // Thus when an instruction changes and one of those numbers with it a failing diff --git a/magicblock-rpc-client/src/lib.rs b/magicblock-rpc-client/src/lib.rs index f710fb3a..87f7753a 100644 --- a/magicblock-rpc-client/src/lib.rs +++ b/magicblock-rpc-client/src/lib.rs @@ -1,14 +1,14 @@ -use log::*; use std::{ sync::Arc, time::{Duration, Instant}, }; +use log::*; use solana_rpc_client::{ nonblocking::rpc_client::RpcClient, rpc_client::SerializableTransaction, }; -use solana_rpc_client_api::client_error::ErrorKind as RpcClientErrorKind; use solana_rpc_client_api::{ + client_error::ErrorKind as RpcClientErrorKind, config::RpcSendTransactionConfig, request::RpcError, }; use solana_sdk::{ diff --git a/magicblock-table-mania/src/lookup_table.rs b/magicblock-table-mania/src/lookup_table.rs index 3c1e5406..ad7cc7b6 100644 --- a/magicblock-table-mania/src/lookup_table.rs +++ b/magicblock-table-mania/src/lookup_table.rs @@ -1,27 +1,29 @@ -use log::*; -use std::fmt; -use std::sync::Mutex; +use std::{fmt, sync::Mutex}; -use crate::derive_keypair; -use crate::error::{TableManiaError, TableManiaResult}; -use magicblock_rpc_client::MagicBlockRpcClientError; +use log::*; use magicblock_rpc_client::{ - MagicBlockSendTransactionConfig, MagicblockRpcClient, + MagicBlockRpcClientError, MagicBlockSendTransactionConfig, + MagicblockRpcClient, }; use solana_pubkey::Pubkey; -use solana_sdk::address_lookup_table::state::{ - LookupTableMeta, LOOKUP_TABLE_MAX_ADDRESSES, -}; -use solana_sdk::commitment_config::CommitmentLevel; -use solana_sdk::slot_hashes::MAX_ENTRIES; use solana_sdk::{ address_lookup_table as alt, + address_lookup_table::state::{ + LookupTableMeta, LOOKUP_TABLE_MAX_ADDRESSES, + }, clock::Slot, + commitment_config::CommitmentLevel, signature::{Keypair, Signature}, signer::Signer, + slot_hashes::MAX_ENTRIES, transaction::Transaction, }; +use crate::{ + derive_keypair, + error::{TableManiaError, TableManiaResult}, +}; + /// Determined via trial and error. The keys themselves take up /// 27 * 32 bytes = 864 bytes. pub const MAX_ENTRIES_AS_PART_OF_EXTEND: u64 = 27; diff --git a/magicblock-table-mania/src/lookup_table_rc.rs b/magicblock-table-mania/src/lookup_table_rc.rs index 94e298cd..386a28ed 100644 --- a/magicblock-table-mania/src/lookup_table_rc.rs +++ b/magicblock-table-mania/src/lookup_table_rc.rs @@ -1,8 +1,19 @@ +use std::{ + collections::{HashMap, HashSet}, + fmt, + ops::Deref, + sync::{ + atomic::{AtomicUsize, Ordering}, + RwLock, RwLockReadGuard, RwLockWriteGuard, + }, +}; + use log::*; use magicblock_rpc_client::{ MagicBlockRpcClientError, MagicBlockSendTransactionConfig, MagicblockRpcClient, }; +use solana_pubkey::Pubkey; use solana_sdk::{ address_lookup_table::{ self as alt, @@ -15,17 +26,6 @@ use solana_sdk::{ slot_hashes::MAX_ENTRIES, transaction::Transaction, }; -use std::{ - collections::{HashMap, HashSet}, - fmt, - ops::Deref, - sync::{ - atomic::{AtomicUsize, Ordering}, - RwLock, RwLockReadGuard, RwLockWriteGuard, - }, -}; - -use solana_pubkey::Pubkey; use crate::{ derive_keypair, diff --git a/magicblock-table-mania/src/manager.rs b/magicblock-table-mania/src/manager.rs index e9eb2f9d..d2202a3e 100644 --- a/magicblock-table-mania/src/manager.rs +++ b/magicblock-table-mania/src/manager.rs @@ -1,4 +1,3 @@ -use log::*; use std::{ collections::{HashMap, HashSet}, sync::{ @@ -8,6 +7,7 @@ use std::{ time::{Duration, Instant}, }; +use log::*; use magicblock_rpc_client::MagicblockRpcClient; use solana_pubkey::Pubkey; use solana_sdk::{ From 52fd8310e2e4c839980c0f0db616e63147cd2f2e Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 15 May 2025 16:48:54 +0545 Subject: [PATCH 36/58] chore: update delegation program reference --- Cargo.lock | 22 +++++++++++++++++++--- Cargo.toml | 2 +- test-integration/Cargo.lock | 9 +++++---- test-integration/Cargo.toml | 2 +- 4 files changed, 26 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dce0e98f..d5c825ea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1185,7 +1185,7 @@ dependencies = [ "conjunto-addresses", "conjunto-core", "conjunto-providers", - "magicblock-delegation-program", + "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?rev=4af7f1c)", "serde", "solana-rpc-client", "solana-rpc-client-api", @@ -3641,7 +3641,7 @@ dependencies = [ "magicblock-bank", "magicblock-committor-service", "magicblock-core", - "magicblock-delegation-program", + "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?branch=main)", "magicblock-metrics", "magicblock-mutator", "magicblock-processor", @@ -3797,7 +3797,7 @@ dependencies = [ "lazy_static", "log", "magicblock-committor-program", - "magicblock-delegation-program", + "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?branch=main)", "magicblock-rpc-client", "magicblock-table-mania", "rusqlite", @@ -3836,6 +3836,22 @@ dependencies = [ "solana-sdk", ] +[[package]] +name = "magicblock-delegation-program" +version = "1.0.0" +source = "git+https://github.com/magicblock-labs/delegation-program.git?branch=main#9570abf24ab6d03ba75ecdbf8a6a787bb0d69d3a" +dependencies = [ + "bincode", + "borsh 1.5.5", + "bytemuck", + "num_enum", + "paste", + "solana-curve25519", + "solana-program", + "solana-security-txt", + "thiserror 1.0.69", +] + [[package]] name = "magicblock-delegation-program" version = "1.0.0" diff --git a/Cargo.toml b/Cargo.toml index 14857532..261f614b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -108,7 +108,7 @@ magicblock-committor-program = { path = "./magicblock-committor-program", featur magicblock-committor-service = { path = "./magicblock-committor-service" } magicblock-config = { path = "./magicblock-config" } magicblock-core = { path = "./magicblock-core" } -magicblock-delegation-program = { git = "https://github.com/magicblock-labs/delegation-program.git", rev = "4af7f1c" } +magicblock-delegation-program = { git = "https://github.com/magicblock-labs/delegation-program.git", branch = "main" } magicblock-geyser-plugin = { path = "./magicblock-geyser-plugin" } magicblock-ledger = { path = "./magicblock-ledger" } magicblock-metrics = { path = "./magicblock-metrics" } diff --git a/test-integration/Cargo.lock b/test-integration/Cargo.lock index 8dda50d8..1e744609 100644 --- a/test-integration/Cargo.lock +++ b/test-integration/Cargo.lock @@ -2937,7 +2937,7 @@ dependencies = [ "log", "magicblock-config", "magicblock-core", - "magicblock-delegation-program 1.0.0", + "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?branch=main)", "rayon", "serde", "solana-pubkey", @@ -3550,7 +3550,7 @@ dependencies = [ "magicblock-bank", "magicblock-committor-service", "magicblock-core", - "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?rev=4af7f1c)", + "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?branch=main)", "magicblock-metrics", "magicblock-mutator", "magicblock-processor", @@ -3692,7 +3692,7 @@ dependencies = [ "borsh 1.5.7", "log", "magicblock-committor-program", - "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?rev=4af7f1c)", + "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?branch=main)", "magicblock-rpc-client", "magicblock-table-mania", "rusqlite", @@ -3733,6 +3733,7 @@ dependencies = [ [[package]] name = "magicblock-delegation-program" version = "1.0.0" +source = "git+https://github.com/magicblock-labs/delegation-program.git?branch=main#9570abf24ab6d03ba75ecdbf8a6a787bb0d69d3a" dependencies = [ "bincode", "borsh 1.5.7", @@ -5711,7 +5712,7 @@ dependencies = [ "log", "magicblock-committor-program", "magicblock-committor-service", - "magicblock-delegation-program 1.0.0", + "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?branch=main)", "magicblock-rpc-client", "program-flexi-counter", "solana-account", diff --git a/test-integration/Cargo.toml b/test-integration/Cargo.toml index 4c8d6dc4..beaaa3af 100644 --- a/test-integration/Cargo.toml +++ b/test-integration/Cargo.toml @@ -39,7 +39,7 @@ magicblock-core = { path = "../magicblock-core" } magicblock-committor-program = { path = "../magicblock-committor-program", features = [ "no-entrypoint", ] } -magicblock-delegation-program = { path = "../../delegation-program" } +magicblock-delegation-program = { git = "https://github.com/magicblock-labs/delegation-program.git", branch = "main" } magicblock-committor-service = { path = "../magicblock-committor-service" } magicblock-rpc-client = { path = "../magicblock-rpc-client" } magicblock-table-mania = { path = "../magicblock-table-mania" } From 714dd68edf7f0c081cdd75e74d99d0b76beba897 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 15 May 2025 17:00:44 +0545 Subject: [PATCH 37/58] chore: opt out of doctests for added crates --- magicblock-committor-program/Cargo.toml | 1 + magicblock-committor-service/Cargo.toml | 3 +++ magicblock-rpc-client/Cargo.toml | 3 +++ magicblock-table-mania/Cargo.toml | 3 +++ 4 files changed, 10 insertions(+) diff --git a/magicblock-committor-program/Cargo.toml b/magicblock-committor-program/Cargo.toml index 2b17f5b3..15164f42 100644 --- a/magicblock-committor-program/Cargo.toml +++ b/magicblock-committor-program/Cargo.toml @@ -24,6 +24,7 @@ tokio = { workspace = true } [lib] crate-type = ["cdylib", "lib"] +doctest = false [features] no-entrypoint = [] diff --git a/magicblock-committor-service/Cargo.toml b/magicblock-committor-service/Cargo.toml index 25e82451..34174a8f 100644 --- a/magicblock-committor-service/Cargo.toml +++ b/magicblock-committor-service/Cargo.toml @@ -7,6 +7,9 @@ homepage.workspace = true license.workspace = true edition.workspace = true +[lib] +doctest = false + [dependencies] base64 = { workspace = true } bincode = { workspace = true } diff --git a/magicblock-rpc-client/Cargo.toml b/magicblock-rpc-client/Cargo.toml index 2bc7430a..1004e7c7 100644 --- a/magicblock-rpc-client/Cargo.toml +++ b/magicblock-rpc-client/Cargo.toml @@ -7,6 +7,9 @@ homepage.workspace = true license.workspace = true edition.workspace = true +[lib] +doctest = false + [dependencies] log = { workspace = true } solana-rpc-client = { workspace = true } diff --git a/magicblock-table-mania/Cargo.toml b/magicblock-table-mania/Cargo.toml index 5767af61..5cca6e5f 100644 --- a/magicblock-table-mania/Cargo.toml +++ b/magicblock-table-mania/Cargo.toml @@ -7,6 +7,9 @@ homepage.workspace = true license.workspace = true edition.workspace = true +[lib] +doctest = false + [dependencies] ed25519-dalek = { workspace = true } log = { workspace = true } From 0f94802d304e1f077059dd94d49e12ce5017e47f Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 15 May 2025 17:03:12 +0545 Subject: [PATCH 38/58] ix: add rule to make committor program --- test-integration/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test-integration/Makefile b/test-integration/Makefile index 153858bd..c4fa08a4 100644 --- a/test-integration/Makefile +++ b/test-integration/Makefile @@ -42,6 +42,8 @@ $(SCHEDULECOMMIT_SO): $(SCHEDULECOMMIT_SRC) cargo build-sbf --manifest-path $(SCHEDULECOMMIT_DIR)/Cargo.toml $(SCHEDULECOMMIT_SECURITY_SO): $(SCHEDULECOMMIT_SECURITY_SRC) cargo build-sbf --manifest-path $(SCHEDULECOMMIT_SECURITY_DIR)/Cargo.toml +$(COMMITTOR_PROGRAM_SO): $(COMMITTOR_PROGRAM_SRC) + cargo build-sbf --manifest-path $(COMMITTOR_PROGRAM_DIR)/Cargo.toml deploy-flexi-counter: $(FLEXI_COUNTER_SO) solana program deploy \ From c8ddb165bfc55bafa4d4c9de9ee203332002c09b Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 15 May 2025 17:04:32 +0545 Subject: [PATCH 39/58] fix: error misspelling --- magicblock-api/src/errors.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/magicblock-api/src/errors.rs b/magicblock-api/src/errors.rs index 6cebbf8d..0404bb37 100644 --- a/magicblock-api/src/errors.rs +++ b/magicblock-api/src/errors.rs @@ -33,8 +33,8 @@ pub enum ApiError { #[error("Validator '{0}' is insufficiently funded on chain. Minimum is ({1} SOL)")] ValidatorInsufficientlyFunded(Pubkey, u64), - #[error("CommittorSerivceError")] - CommittorSerivceError( + #[error("CommittorServiceError")] + CommittorServiceError( #[from] magicblock_committor_service::error::CommittorServiceError, ), From b246170cd8545b5f7102299a59e81abcb16ca978 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 15 May 2025 17:23:10 +0545 Subject: [PATCH 40/58] chore: address some greptiles --- magicblock-accounts/src/errors.rs | 2 +- magicblock-accounts/src/lib.rs | 1 - .../old_remote_scheduled_commits_processor.rs | 300 ------------------ .../src/remote_scheduled_commits_processor.rs | 4 +- magicblock-committor-program/src/consts.rs | 4 +- .../src/instruction.rs | 31 +- magicblock-committor-program/src/pdas.rs | 2 +- magicblock-committor-program/src/processor.rs | 4 +- .../src/state/chunks.rs | 4 +- .../programs/flexi-counter/src/instruction.rs | 4 +- .../programs/flexi-counter/src/processor.rs | 4 +- .../tests/utils/instructions.rs | 4 +- 12 files changed, 32 insertions(+), 332 deletions(-) delete mode 100644 magicblock-accounts/src/old_remote_scheduled_commits_processor.rs diff --git a/magicblock-accounts/src/errors.rs b/magicblock-accounts/src/errors.rs index c7c18b2b..61a3b5a2 100644 --- a/magicblock-accounts/src/errors.rs +++ b/magicblock-accounts/src/errors.rs @@ -58,6 +58,6 @@ pub enum AccountsError { #[error("Too many committees: {0}")] TooManyCommittees(usize), - #[error("FailedToObtainReqidForCommittedChangeset {0:?}'")] + #[error("FailedToObtainReqidForCommittedChangeset {0:?}")] FailedToObtainReqidForCommittedChangeset(Box), } diff --git a/magicblock-accounts/src/lib.rs b/magicblock-accounts/src/lib.rs index 6b2eda27..ec28920c 100644 --- a/magicblock-accounts/src/lib.rs +++ b/magicblock-accounts/src/lib.rs @@ -2,7 +2,6 @@ mod accounts_manager; mod config; pub mod errors; mod external_accounts_manager; -// mod old_remote_scheduled_commits_processor; mod remote_account_committer; mod remote_scheduled_commits_processor; mod traits; diff --git a/magicblock-accounts/src/old_remote_scheduled_commits_processor.rs b/magicblock-accounts/src/old_remote_scheduled_commits_processor.rs deleted file mode 100644 index d42eb903..00000000 --- a/magicblock-accounts/src/old_remote_scheduled_commits_processor.rs +++ /dev/null @@ -1,300 +0,0 @@ -use std::{collections::HashSet, sync::Arc}; - -use async_trait::async_trait; -use conjunto_transwise::AccountChainSnapshot; -use log::*; -use magicblock_account_cloner::{ - AccountClonerOutput, AccountClonerOutput::Cloned, CloneOutputMap, -}; -use magicblock_accounts_api::InternalAccountProvider; -use magicblock_bank::bank::Bank; -use magicblock_core::debug_panic; -use magicblock_metrics::metrics; -use magicblock_mutator::Cluster; -use magicblock_processor::execute_transaction::execute_legacy_transaction; -use magicblock_program::{ - register_scheduled_commit_sent, FeePayerAccount, SentCommit, - TransactionScheduler, -}; -use magicblock_transaction_status::TransactionStatusSender; -use solana_sdk::{pubkey::Pubkey, signature::Signature}; - -use crate::{ - errors::{AccountsError, AccountsResult}, - remote_account_committer::update_account_commit_metrics, - AccountCommittee, AccountCommitter, ScheduledCommitsProcessor, - SendableCommitAccountsPayload, -}; - -pub struct OldRemoteScheduledCommitsProcessor { - #[allow(unused)] - cluster: Cluster, - bank: Arc, - transaction_status_sender: Option, - transaction_scheduler: TransactionScheduler, - cloned_accounts: CloneOutputMap, -} - -#[async_trait] -impl ScheduledCommitsProcessor for OldRemoteScheduledCommitsProcessor { - async fn process( - &self, - committer: &Arc, - account_provider: &IAP, - ) -> AccountsResult<()> - where - AC: AccountCommitter, - IAP: InternalAccountProvider, - { - let scheduled_commits = - self.transaction_scheduler.take_scheduled_commits(); - - if scheduled_commits.is_empty() { - return Ok(()); - } - - let mut sendable_payloads_queue = vec![]; - for commit in scheduled_commits { - info!("Processing commit: {:?}", commit); - - // Determine which accounts are available and can be committed - let mut committees = vec![]; - let all_pubkeys: HashSet = HashSet::from_iter( - commit - .accounts - .iter() - .map(|ca| ca.pubkey) - .collect::>(), - ); - let mut feepayers = HashSet::new(); - - for committed_account in commit.accounts { - let mut commitment_pubkey = committed_account.pubkey; - let mut commitment_pubkey_owner = committed_account.owner; - if let Some(Cloned { - account_chain_snapshot, - .. - }) = Self::fetch_cloned_account( - &committed_account.pubkey, - &self.cloned_accounts, - ) { - // If the account is a FeePayer, we committed the mapped delegated account - if account_chain_snapshot.chain_state.is_feepayer() { - commitment_pubkey = - AccountChainSnapshot::ephemeral_balance_pda( - &committed_account.pubkey, - ); - commitment_pubkey_owner = - AccountChainSnapshot::ephemeral_balance_pda_owner(); - feepayers.insert(FeePayerAccount { - pubkey: committed_account.pubkey, - delegated_pda: commitment_pubkey, - }); - } else if account_chain_snapshot - .chain_state - .is_undelegated() - { - error!("Scheduled commit account '{}' is undelegated. This is not supported.", committed_account.pubkey); - } - } - - match account_provider.get_account(&committed_account.pubkey) { - Some(account_data) => { - committees.push(AccountCommittee { - pubkey: commitment_pubkey, - owner: commitment_pubkey_owner, - account_data, - slot: commit.slot, - undelegation_requested: commit.request_undelegation, - }); - } - None => { - error!( - "Scheduled commmit account '{}' not found. It must have gotten undelegated and removed since it was scheduled.", - committed_account.pubkey - ); - } - } - } - - let payloads = vec![ - committer - .create_commit_accounts_transaction(committees) - .await?, - ]; - - // Determine which payloads are a noop since all accounts are up to date - // and which require a commit to chain - let mut included_pubkeys = HashSet::new(); - let sendable_payloads = payloads - .into_iter() - .filter_map(|payload| { - if let Some(transaction) = payload.transaction { - included_pubkeys.extend( - payload - .committees - .iter() - .map(|(pubkey, _)| *pubkey), - ); - Some(SendableCommitAccountsPayload { - transaction, - committees: payload.committees, - }) - } else { - None - } - }) - .collect::>(); - - // Tally up the pubkeys that will not be committed since the account - // was not available as determined when creating sendable payloads - let excluded_pubkeys = all_pubkeys - .into_iter() - .filter(|pubkey| { - !included_pubkeys.contains(pubkey) - && !included_pubkeys.contains( - &AccountChainSnapshot::ephemeral_balance_pda( - pubkey, - ), - ) - }) - .collect::>(); - - // Extract signatures of all transactions that we will execute on - // chain in order to realize the commits needed - let signatures = sendable_payloads - .iter() - .map(|payload| payload.get_signature()) - .collect::>(); - - // Record that we are about to send the commit to chain including all - // information (mainly signatures) needed to track its outcome on chain - let sent_commit = SentCommit { - commit_id: commit.id, - slot: commit.slot, - blockhash: commit.blockhash, - payer: commit.payer, - chain_signatures: signatures, - included_pubkeys: included_pubkeys.into_iter().collect(), - excluded_pubkeys, - feepayers, - requested_undelegation: commit.request_undelegation, - }; - register_scheduled_commit_sent(sent_commit); - let signature = execute_legacy_transaction( - commit.commit_sent_transaction, - &self.bank, - self.transaction_status_sender.as_ref(), - ) - .map_err(Box::new)?; - - // In the case that no account needs to be committed we record that in - // our ledger and are done - if sendable_payloads.is_empty() { - debug!( - "Signaled no commit needed with internal signature: {:?}", - signature - ); - continue; - } else { - debug!( - "Signaled commit with internal signature: {:?}", - signature - ); - } - - // Queue up the actual commit - sendable_payloads_queue.extend(sendable_payloads); - } - - self.process_accounts_commits_in_background( - committer, - sendable_payloads_queue, - ); - - Ok(()) - } - - fn scheduled_commits_len(&self) -> usize { - self.transaction_scheduler.scheduled_commits_len() - } - - fn clear_scheduled_commits(&self) { - self.transaction_scheduler.clear_scheduled_commits(); - } -} - -impl OldRemoteScheduledCommitsProcessor { - pub(crate) fn new( - cluster: Cluster, - bank: Arc, - cloned_accounts: CloneOutputMap, - transaction_status_sender: Option, - ) -> Self { - Self { - cluster, - bank, - transaction_status_sender, - cloned_accounts, - transaction_scheduler: TransactionScheduler::default(), - } - } - - fn process_accounts_commits_in_background( - &self, - committer: &Arc, - sendable_payloads_queue: Vec, - ) { - // We process the queue on a separate task in order to not block - // the validator (slot advance) itself - // NOTE: @@ we have to be careful here and ensure that the validator does not - // shutdown before this task is done - // We will need some tracking machinery which is overkill until we get to the - // point where we do allow validator shutdown - let committer = committer.clone(); - tokio::task::spawn(async move { - let pending_commits = match committer - .send_commit_transactions(sendable_payloads_queue) - .await - { - Ok(pending) => pending, - Err(AccountsError::FailedToSendCommitTransaction( - err, - commit_and_undelegate_accounts, - commit_only_accounts, - )) => { - update_account_commit_metrics( - &commit_and_undelegate_accounts, - &commit_only_accounts, - metrics::Outcome::Error, - None, - ); - debug_panic!( - "Failed to send commit transactions: {:?}", - err - ); - return; - } - Err(err) => { - debug_panic!( - "Failed to send commit transactions, received invalid err: {:?}", - err - ); - return; - } - }; - - committer.confirm_pending_commits(pending_commits).await; - }); - } - - fn fetch_cloned_account( - pubkey: &Pubkey, - cloned_accounts: &CloneOutputMap, - ) -> Option { - cloned_accounts - .read() - .expect("RwLock of RemoteAccountClonerWorker.last_clone_output is poisoned") - .get(pubkey).cloned() - } -} diff --git a/magicblock-accounts/src/remote_scheduled_commits_processor.rs b/magicblock-accounts/src/remote_scheduled_commits_processor.rs index 6727009e..92a268fc 100644 --- a/magicblock-accounts/src/remote_scheduled_commits_processor.rs +++ b/magicblock-accounts/src/remote_scheduled_commits_processor.rs @@ -62,7 +62,7 @@ impl ScheduledCommitsProcessor for RemoteScheduledCommitsProcessor { .max() .unwrap(); // Safety we just obtained the max slot from the scheduled commits - let ephemereal_blockhash = scheduled_commits + let ephemeral_blockhash = scheduled_commits .iter() .find(|commit| commit.slot == max_slot) .map(|commit| commit.blockhash) @@ -175,7 +175,7 @@ impl ScheduledCommitsProcessor for RemoteScheduledCommitsProcessor { changeset_committor, changeset, sent_commits, - ephemereal_blockhash, + ephemeral_blockhash, ); Ok(()) diff --git a/magicblock-committor-program/src/consts.rs b/magicblock-committor-program/src/consts.rs index 4af1f467..dfea9ce8 100644 --- a/magicblock-committor-program/src/consts.rs +++ b/magicblock-committor-program/src/consts.rs @@ -1,7 +1,7 @@ -/// Max bytest that can be allocated as part of the one instruction. +/// Max bytes that can be allocated as part of the one instruction. /// For buffers that are larger than that ReallocBuffer needs to be /// invoked 1 or more times after Init completed. -pub const MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE: u16 = 10_240; +pub const MAX_ACCOUNT_ALLOC_PER_INSTRUCTION_SIZE: u16 = 10_240; /// The maximum number of instructions that can be added to a single transaction. /// See: https://github.com/solana-labs/solana/issues/33863 diff --git a/magicblock-committor-program/src/instruction.rs b/magicblock-committor-program/src/instruction.rs index b6bf1e28..df6cb7a1 100644 --- a/magicblock-committor-program/src/instruction.rs +++ b/magicblock-committor-program/src/instruction.rs @@ -85,7 +85,7 @@ pub enum CommittorInstruction { /// It is called by the validator after the instruction that processes the /// change set stored in the buffer account and applies the commits to the /// relevant accounts. - /// Ideally it runs in the same transaction as the 'processs' instruction. + /// Ideally it runs in the same transaction as the 'process' instruction. /// /// The lamports gained due to closing both accounts are transferred to the /// validator authority. @@ -119,9 +119,9 @@ pub const IX_INIT_SIZE: u16 = // blockhash: Hash, HASH_BYTES as u16 + // chunks_bump: u8, - 8 + + 1 + // buffer_bump: u8, - 8 + + 1 + // chunk_count: usize, 8 + // chunk_size: u16, @@ -137,7 +137,7 @@ pub const IX_REALLOC_SIZE: u16 = // blockhash: Hash, HASH_BYTES as u16 + // buffer_bump: u8, - 8 + + 1 + // invocation_count: u16, 2 + // byte align @@ -149,9 +149,9 @@ pub const IX_WRITE_SIZE_WITHOUT_CHUNKS: u16 = // blockhash: Hash, HASH_BYTES as u16 + // chunks_bump: u8, - 8 + + 1 + // buffer_bump: u8, - 8 + + 1 + // offset: u32 32; @@ -161,9 +161,9 @@ pub const IX_CLOSE_SIZE: u16 = // blockhash: Hash, HASH_BYTES as u16 + // chunks_bump: u8, - 8 + + 1 + // buffer_bump: u8, - 8; + 1; // ----------------- // create_init_ix @@ -238,7 +238,7 @@ pub struct CreateReallocBufferIxArgs { /// Creates the realloc ixs we need to invoke in order to realloc /// the account to the desired size since we only can realloc up to -/// [consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE] in a single instruction. +/// [consts::MAX_ACCOUNT_ALLOC_PER_INSTRUCTION_SIZE] in a single instruction. /// Returns a tuple with the instructions and a bool indicating if we need to split /// them into multiple instructions in order to avoid /// [solana_program::program_error::MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED]J @@ -246,23 +246,24 @@ pub fn create_realloc_buffer_ixs( args: CreateReallocBufferIxArgs, ) -> Vec { // We already allocated once during Init and only need to realloc - // if the buffer is larger than [consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE] + // if the buffer is larger than [consts::MAX_ACCOUNT_ALLOC_PER_INSTRUCTION_SIZE] if args.buffer_account_size - <= consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE as u64 + <= consts::MAX_ACCOUNT_ALLOC_PER_INSTRUCTION_SIZE as u64 { return vec![]; } let remaining_size = args.buffer_account_size as i128 - - consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE as i128; + - consts::MAX_ACCOUNT_ALLOC_PER_INSTRUCTION_SIZE as i128; // A) We just need to realloc once - if remaining_size <= consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE as i128 { + if remaining_size <= consts::MAX_ACCOUNT_ALLOC_PER_INSTRUCTION_SIZE as i128 + { return vec![create_realloc_buffer_ix(args, 1)]; } // B) We need to realloc multiple times - // SAFETY; remaining size > consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE + // SAFETY; remaining size > consts::MAX_ACCOUNT_ALLOC_PER_INSTRUCTION_SIZE create_realloc_buffer_ixs_to_add_remaining(&args, remaining_size as u64) } @@ -271,7 +272,7 @@ pub fn create_realloc_buffer_ixs_to_add_remaining( remaining_size: u64, ) -> Vec { let invocation_count = (remaining_size as f64 - / consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE as f64) + / consts::MAX_ACCOUNT_ALLOC_PER_INSTRUCTION_SIZE as f64) .ceil() as u16; let mut ixs = vec![]; diff --git a/magicblock-committor-program/src/pdas.rs b/magicblock-committor-program/src/pdas.rs index e28a89a9..7e2a4dd6 100644 --- a/magicblock-committor-program/src/pdas.rs +++ b/magicblock-committor-program/src/pdas.rs @@ -93,7 +93,7 @@ macro_rules! verified_seeds_and_pda { &$blockhash, $bump, ) - .inspect_err(|err| msg!("ERR: {}", err))?; + .inspect_err(|err| ::solana_program::msg!("ERR: {}", err))?; $crate::utils::assert_keys_equal($account_info.key, &pda, || { format!( "Provided {} PDA does not match derived key '{}'", diff --git a/magicblock-committor-program/src/processor.rs b/magicblock-committor-program/src/processor.rs index e1068b60..3becb7a2 100644 --- a/magicblock-committor-program/src/processor.rs +++ b/magicblock-committor-program/src/processor.rs @@ -161,7 +161,7 @@ fn process_init( let initial_alloc_size = std::cmp::min( buffer_account_size, - consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE as u64, + consts::MAX_ACCOUNT_ALLOC_PER_INSTRUCTION_SIZE as u64, ); // Create Buffer Account @@ -241,7 +241,7 @@ fn process_realloc_buffer( let next_alloc_size = std::cmp::min( buffer_account_size, current_buffer_size - + consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE as u64, + + consts::MAX_ACCOUNT_ALLOC_PER_INSTRUCTION_SIZE as u64, ); msg!( diff --git a/magicblock-committor-program/src/state/chunks.rs b/magicblock-committor-program/src/state/chunks.rs index 06b3e3a6..824a3345 100644 --- a/magicblock-committor-program/src/state/chunks.rs +++ b/magicblock-committor-program/src/state/chunks.rs @@ -49,11 +49,11 @@ impl Chunks { // SAFETY: this is a bug and we need to crash and burn assert!( Self::bytes_for_count_len(chunk_count) - < consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE as usize, + < consts::MAX_ACCOUNT_ALLOC_PER_INSTRUCTION_SIZE as usize, "Size ({}) needed to track {} chunks is too large track and would require to realloc. Max allowed is {} bytes", Self::bytes_for_count_len(chunk_count), chunk_count, - consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE + consts::MAX_ACCOUNT_ALLOC_PER_INSTRUCTION_SIZE ); Self { bits: vec![0; Self::bits_for_count_len(chunk_count)], diff --git a/test-integration/programs/flexi-counter/src/instruction.rs b/test-integration/programs/flexi-counter/src/instruction.rs index 7e061ea3..391f1952 100644 --- a/test-integration/programs/flexi-counter/src/instruction.rs +++ b/test-integration/programs/flexi-counter/src/instruction.rs @@ -17,7 +17,7 @@ pub struct DelegateArgs { pub commit_frequency_ms: u32, } -pub const MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE: u16 = 10_240; +pub const MAX_ACCOUNT_ALLOC_PER_INSTRUCTION_SIZE: u16 = 10_240; /// The counter has both mul and add instructions in order to facilitate tests where /// order matters. For example in the case of the following operations: @@ -35,7 +35,7 @@ pub enum FlexiCounterInstruction { Init { label: String, bump: u8 }, /// Increases the size of the FlexiCounter to reach the given bytes. - /// Max increase is [MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE] per instruction + /// Max increase is [MAX_ACCOUNT_ALLOC_PER_INSTRUCTION_SIZE] per instruction /// which means this instruction needs to be called multiple times to reach /// the desired size. /// diff --git a/test-integration/programs/flexi-counter/src/processor.rs b/test-integration/programs/flexi-counter/src/processor.rs index 9d43b371..922e2587 100644 --- a/test-integration/programs/flexi-counter/src/processor.rs +++ b/test-integration/programs/flexi-counter/src/processor.rs @@ -17,7 +17,7 @@ use solana_program::{ sysvar::Sysvar, }; -use crate::instruction::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE; +use crate::instruction::MAX_ACCOUNT_ALLOC_PER_INSTRUCTION_SIZE; use crate::{ instruction::{DelegateArgs, FlexiCounterInstruction}, state::FlexiCounter, @@ -133,7 +133,7 @@ fn process_realloc( let next_alloc_size = std::cmp::min( bytes, - current_size + MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE as u64, + current_size + MAX_ACCOUNT_ALLOC_PER_INSTRUCTION_SIZE as u64, ); msg!( diff --git a/test-integration/schedulecommit/committor-service/tests/utils/instructions.rs b/test-integration/schedulecommit/committor-service/tests/utils/instructions.rs index 148ae6ce..1b9510c0 100644 --- a/test-integration/schedulecommit/committor-service/tests/utils/instructions.rs +++ b/test-integration/schedulecommit/committor-service/tests/utils/instructions.rs @@ -27,12 +27,12 @@ pub fn init_account_and_delegate_ixs( let rent_exempt = Rent::default().minimum_balance(bytes as usize); let mut realloc_ixs = vec![]; if bytes - > magicblock_committor_program::consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE + > magicblock_committor_program::consts::MAX_ACCOUNT_ALLOC_PER_INSTRUCTION_SIZE as u64 { // TODO: we may have to chunk those let reallocs = bytes - / magicblock_committor_program::consts::MAX_ACOUNT_ALLOC_PER_INSTRUCTION_SIZE + / magicblock_committor_program::consts::MAX_ACCOUNT_ALLOC_PER_INSTRUCTION_SIZE as u64; for i in 0..reallocs { realloc_ixs.push(create_realloc_ix(payer, bytes, i as u16)); From d3892d151bf6b4bab046c897f27144b104392ed4 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 15 May 2025 17:23:23 +0545 Subject: [PATCH 41/58] ix: check in missing config --- .../configs/committor-conf.devnet.toml | 52 +++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 test-integration/configs/committor-conf.devnet.toml diff --git a/test-integration/configs/committor-conf.devnet.toml b/test-integration/configs/committor-conf.devnet.toml new file mode 100644 index 00000000..9e68ade3 --- /dev/null +++ b/test-integration/configs/committor-conf.devnet.toml @@ -0,0 +1,52 @@ +[accounts] +remote = "devnet" +lifecycle = "offline" +commit = { frequency_millis = 9_000_000_000_000, compute_unit_price = 1_000_000 } + + +[accounts.db] +# size of the main storage, we have to preallocate in advance +# it's advised to set this value based on formula 1KB * N * 3, +# where N is the number of accounts expected to be stored in +# database, e.g. for million accounts this would be 3GB +db-size = 1048576000 # 1GB +# minimal indivisible unit of addressing in main storage +# offsets are calculated in terms of blocks +block-size = "block256" # possible values block128 | block256 | block512 +# size of index file, we have to preallocate, +# can be as low as 1% of main storage size, but setting it to higher values won't hurt +index-map-size = 2048576 +# max number of snapshots to keep around +max-snapshots = 7 +# how frequently (slot-wise) we should take snapshots +snapshot-frequency = 1024 + +[validator] +millis_per_slot = 50 +sigverify = true + +[[program]] +id = "DELeGGvXpWV2fqJUhqcF5ZSYMS4JTLjteaAMARRSaeSh" +path = "../schedulecommit/elfs/dlp.so" + +# NOTE: `cargo build-sbf` needs to run from the root to build the program +[[program]] +id = "corabpNrkBEqbTZP7xfJgSWTdBmVdLf1PARWXZbcMcS" +path = "../../target/deploy/magicblock_committor_program.so" + +[[program]] +id = "9hgprgZiRWmy8KkfvUuaVkDGrqo9GzeXMohwq6BazgUY" +path = "../target/deploy/program_schedulecommit.so" + +[[program]] +id = "f1exzKGtdeVX3d6UXZ89cY7twiNJe9S5uq84RTA4Rq4" +path = "../target/deploy/program_flexi_counter.so" + +[rpc] +port = 7799 + +[geyser_grpc] +port = 10001 + +[metrics] +enabled = false From ca5e9f4cf3a41eb0dfbaab8a6d0ee6e07bfe6719 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 15 May 2025 17:57:24 +0545 Subject: [PATCH 42/58] chore: more greptiles --- magicblock-committor-program/src/processor.rs | 10 +++++++++- .../src/state/changeset.rs | 2 +- magicblock-committor-program/src/utils/asserts.rs | 4 ++-- magicblock-committor-service/src/bundles.rs | 2 +- .../src/commit/commit_using_buffer.rs | 2 +- .../src/commit/process_buffers.rs | 4 ++-- magicblock-committor-service/src/commit_info.rs | 3 --- magicblock-committor-service/src/commit_stage.rs | 4 ++-- magicblock-committor-service/src/compute_budget.rs | 14 ++++++++++++-- 9 files changed, 30 insertions(+), 15 deletions(-) diff --git a/magicblock-committor-program/src/processor.rs b/magicblock-committor-program/src/processor.rs index 3becb7a2..320ae5ca 100644 --- a/magicblock-committor-program/src/processor.rs +++ b/magicblock-committor-program/src/processor.rs @@ -307,7 +307,15 @@ fn process_write( data_chunk.len() as u64, ); - if offset as usize + data_chunk.len() > buffer_data.len() { + let end_offset = offset + .checked_add(data_chunk.len() as u32) + .map(|sum| sum as usize) + .ok_or(CommittorError::OffsetChunkOutOfRange( + data_chunk.len(), + offset, + buffer_data.len(), + ))?; + if end_offset > buffer_data.len() { let err = CommittorError::OffsetChunkOutOfRange( data_chunk.len(), offset, diff --git a/magicblock-committor-program/src/state/changeset.rs b/magicblock-committor-program/src/state/changeset.rs index 4e52869c..3caf5532 100644 --- a/magicblock-committor-program/src/state/changeset.rs +++ b/magicblock-committor-program/src/state/changeset.rs @@ -409,7 +409,7 @@ impl CommitableAccount { self.chunks = chunks; } - /// The total size of the data that we we will commit. + /// The total size of the data that we will commit. /// Use this to initialize the empty account on chain. pub fn size(&self) -> usize { self.data.len() diff --git a/magicblock-committor-program/src/utils/asserts.rs b/magicblock-committor-program/src/utils/asserts.rs index b83c21f0..c6d5482d 100644 --- a/magicblock-committor-program/src/utils/asserts.rs +++ b/magicblock-committor-program/src/utils/asserts.rs @@ -21,7 +21,7 @@ pub fn assert_account_unallocated( account: &AccountInfo, account_label: &str, ) -> ProgramResult { - if account.data.borrow().len() != 0 { + if account.try_borrow_data()?.len() != 0 { msg!( "Err: account '{}' ({}) was already initialized", account_label, @@ -51,7 +51,7 @@ pub fn assert_is_signer( pub fn assert_program_id(program_id: &Pubkey) -> ProgramResult { if program_id != &crate::id() { - msg!("ERR: invalid program id"); + msg!("Err: invalid program id"); Err(ProgramError::IncorrectProgramId) } else { Ok(()) diff --git a/magicblock-committor-service/src/bundles.rs b/magicblock-committor-service/src/bundles.rs index 086163df..ee778658 100644 --- a/magicblock-committor-service/src/bundles.rs +++ b/magicblock-committor-service/src/bundles.rs @@ -62,7 +62,7 @@ pub(crate) fn bundle_chunks( // If we still have unbundled commits then add chunks for those while !not_bundled.is_empty() { - let range_end = (max_per_chunk).min(not_bundled.len()); + let range_end = max_per_chunk.min(not_bundled.len()); chunks.push(not_bundled.drain(..range_end).collect()); } diff --git a/magicblock-committor-service/src/commit/commit_using_buffer.rs b/magicblock-committor-service/src/commit/commit_using_buffer.rs index e33a3f7f..1c15bd7f 100644 --- a/magicblock-committor-service/src/commit/commit_using_buffer.rs +++ b/magicblock-committor-service/src/commit/commit_using_buffer.rs @@ -308,7 +308,7 @@ impl CommittorProcessor { debug_assert_eq!( kind, &InstructionsKind::Finalize, - "Expecting separate finalize instructions onky" + "Expecting separate finalize instructions only" ); let bundle_id = commit_info.bundle_id(); debug_assert!( diff --git a/magicblock-committor-service/src/commit/process_buffers.rs b/magicblock-committor-service/src/commit/process_buffers.rs index 40cb2583..542c4c3f 100644 --- a/magicblock-committor-service/src/commit/process_buffers.rs +++ b/magicblock-committor-service/src/commit/process_buffers.rs @@ -116,7 +116,7 @@ fn process_commitable_separate_ix( ); InstructionsForCommitable { instructions: vec![process_ix], - commit_info: commit_info.clone(), + commit_info, kind: InstructionsKind::Process, } } @@ -127,7 +127,7 @@ pub(crate) struct ChunkedIxsToProcessCommitablesAndClosePdasResult { /// chunk can run in parallel pub chunked_ixs: Vec>, /// Separate buffer close transactions. - /// Since the process transactions nee to complete first we need to run them + /// Since the process transactions need to complete first we need to run them /// after the [Self::chunked_ixs] transactions pub chunked_close_ixs: Option>>, /// Commitables that could not be chunked and thus cannot be committed while diff --git a/magicblock-committor-service/src/commit_info.rs b/magicblock-committor-service/src/commit_info.rs index a669153b..40b060ca 100644 --- a/magicblock-committor-service/src/commit_info.rs +++ b/magicblock-committor-service/src/commit_info.rs @@ -104,9 +104,6 @@ impl CommitInfo { ), } } - pub fn has_data(&self) -> bool { - matches!(self, Self::BufferedDataAccount { .. }) - } pub fn pubkey(&self) -> Pubkey { match self { diff --git a/magicblock-committor-service/src/commit_stage.rs b/magicblock-committor-service/src/commit_stage.rs index 66a5e858..6e555ece 100644 --- a/magicblock-committor-service/src/commit_stage.rs +++ b/magicblock-committor-service/src/commit_stage.rs @@ -16,7 +16,7 @@ pub struct CommitSignatures { /// The signature of the transaction processing the commit pub process_signature: Signature, /// The signature of the transaction finalizing the commit. - /// If the account was not finalized or it failed the this is `None`. + /// If the account was not finalized or it failed then this is `None`. /// If the finalize instruction was part of the process transaction then /// this signature is the same as [Self::process_signature]. pub finalize_signature: Option, @@ -293,7 +293,7 @@ impl CommitStage { } /// Returns `true` if we need to init the chunks and buffer accounts when we - /// retry commiting this account + /// retry committing this account pub fn needs_accounts_init(&self) -> bool { use CommitStage::*; matches!(self, Failed(_) | BufferAndChunkPartiallyInitialized(_)) diff --git a/magicblock-committor-service/src/compute_budget.rs b/magicblock-committor-service/src/compute_budget.rs index 0b2aa312..1dacc425 100644 --- a/magicblock-committor-service/src/compute_budget.rs +++ b/magicblock-committor-service/src/compute_budget.rs @@ -54,7 +54,14 @@ pub struct BufferWriteChunkBudget { impl BufferWriteChunkBudget { fn total_budget(&self, bytes_count: usize) -> u32 { - self.base_budget + (self.per_byte * bytes_count) as u32 + u32::try_from( + self.per_byte + .checked_mul(bytes_count) + .unwrap_or(u32::MAX as usize), + ) + .unwrap_or(u32::MAX) + .checked_add(self.base_budget) + .unwrap_or(u32::MAX) } pub fn instructions(&self, bytes_count: usize) -> Vec { @@ -192,7 +199,10 @@ impl ComputeBudget { } fn total_budget(&self, committee_count: u32) -> u32 { - self.base_budget() + (self.per_committee() * committee_count) + self.per_committee() + .checked_mul(committee_count) + .and_then(|product| product.checked_add(self.base_budget())) + .unwrap_or(u32::MAX) } pub fn instructions(&self, committee_count: usize) -> Vec { From bc48d5873f5dda003ab8ef830d59e2517c25b61b Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 15 May 2025 18:09:04 +0545 Subject: [PATCH 43/58] ix: move table mania/committor tests last since they are the slowest --- test-integration/test-runner/bin/run_tests.rs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/test-integration/test-runner/bin/run_tests.rs b/test-integration/test-runner/bin/run_tests.rs index d101cc0a..f735d7c0 100644 --- a/test-integration/test-runner/bin/run_tests.rs +++ b/test-integration/test-runner/bin/run_tests.rs @@ -18,15 +18,10 @@ use test_runner::cleanup::{cleanup_devnet_only, cleanup_validators}; pub fn main() { let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap(); - let Ok((table_mania_output, committor_output)) = - run_table_mania_and_committor_tests(&manifest_dir) - else { - // If any test run panics (i.e. not just a failing test) then we bail - return; - }; let Ok((security_output, scenarios_output)) = run_schedule_commit_tests(&manifest_dir) else { + // If any test run panics (i.e. not just a failing test) then we bail return; }; let Ok(issues_frequent_commits_output) = @@ -48,15 +43,21 @@ pub fn main() { return; }; + let Ok((table_mania_output, committor_output)) = + run_table_mania_and_committor_tests(&manifest_dir) + else { + return; + }; + // Assert that all tests passed - assert_cargo_tests_passed(table_mania_output); - assert_cargo_tests_passed(committor_output); assert_cargo_tests_passed(security_output); assert_cargo_tests_passed(scenarios_output); assert_cargo_tests_passed(cloning_output); assert_cargo_tests_passed(issues_frequent_commits_output); assert_cargo_tests_passed(restore_ledger_output); assert_cargo_tests_passed(magicblock_api_output); + assert_cargo_tests_passed(table_mania_output); + assert_cargo_tests_passed(committor_output); } // ----------------- From 9fb082a5b6ceb618ab17af4a7178415ed48beb2d Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 15 May 2025 18:18:09 +0545 Subject: [PATCH 44/58] chore: rollback delegation program version --- Cargo.lock | 22 +++-------------- Cargo.toml | 2 +- test-integration/Cargo.lock | 26 ++++---------------- test-integration/Cargo.toml | 2 +- test-integration/schedulecommit/elfs/dlp.so | Bin 321056 -> 319832 bytes 5 files changed, 10 insertions(+), 42 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d5c825ea..dce0e98f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1185,7 +1185,7 @@ dependencies = [ "conjunto-addresses", "conjunto-core", "conjunto-providers", - "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?rev=4af7f1c)", + "magicblock-delegation-program", "serde", "solana-rpc-client", "solana-rpc-client-api", @@ -3641,7 +3641,7 @@ dependencies = [ "magicblock-bank", "magicblock-committor-service", "magicblock-core", - "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?branch=main)", + "magicblock-delegation-program", "magicblock-metrics", "magicblock-mutator", "magicblock-processor", @@ -3797,7 +3797,7 @@ dependencies = [ "lazy_static", "log", "magicblock-committor-program", - "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?branch=main)", + "magicblock-delegation-program", "magicblock-rpc-client", "magicblock-table-mania", "rusqlite", @@ -3836,22 +3836,6 @@ dependencies = [ "solana-sdk", ] -[[package]] -name = "magicblock-delegation-program" -version = "1.0.0" -source = "git+https://github.com/magicblock-labs/delegation-program.git?branch=main#9570abf24ab6d03ba75ecdbf8a6a787bb0d69d3a" -dependencies = [ - "bincode", - "borsh 1.5.5", - "bytemuck", - "num_enum", - "paste", - "solana-curve25519", - "solana-program", - "solana-security-txt", - "thiserror 1.0.69", -] - [[package]] name = "magicblock-delegation-program" version = "1.0.0" diff --git a/Cargo.toml b/Cargo.toml index 261f614b..14857532 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -108,7 +108,7 @@ magicblock-committor-program = { path = "./magicblock-committor-program", featur magicblock-committor-service = { path = "./magicblock-committor-service" } magicblock-config = { path = "./magicblock-config" } magicblock-core = { path = "./magicblock-core" } -magicblock-delegation-program = { git = "https://github.com/magicblock-labs/delegation-program.git", branch = "main" } +magicblock-delegation-program = { git = "https://github.com/magicblock-labs/delegation-program.git", rev = "4af7f1c" } magicblock-geyser-plugin = { path = "./magicblock-geyser-plugin" } magicblock-ledger = { path = "./magicblock-ledger" } magicblock-metrics = { path = "./magicblock-metrics" } diff --git a/test-integration/Cargo.lock b/test-integration/Cargo.lock index 1e744609..93033d32 100644 --- a/test-integration/Cargo.lock +++ b/test-integration/Cargo.lock @@ -1150,7 +1150,7 @@ dependencies = [ "conjunto-addresses", "conjunto-core", "conjunto-providers", - "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?rev=4af7f1c)", + "magicblock-delegation-program", "serde", "solana-rpc-client", "solana-rpc-client-api", @@ -2937,7 +2937,7 @@ dependencies = [ "log", "magicblock-config", "magicblock-core", - "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?branch=main)", + "magicblock-delegation-program", "rayon", "serde", "solana-pubkey", @@ -3550,7 +3550,7 @@ dependencies = [ "magicblock-bank", "magicblock-committor-service", "magicblock-core", - "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?branch=main)", + "magicblock-delegation-program", "magicblock-metrics", "magicblock-mutator", "magicblock-processor", @@ -3692,7 +3692,7 @@ dependencies = [ "borsh 1.5.7", "log", "magicblock-committor-program", - "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?branch=main)", + "magicblock-delegation-program", "magicblock-rpc-client", "magicblock-table-mania", "rusqlite", @@ -3730,22 +3730,6 @@ dependencies = [ "solana-sdk", ] -[[package]] -name = "magicblock-delegation-program" -version = "1.0.0" -source = "git+https://github.com/magicblock-labs/delegation-program.git?branch=main#9570abf24ab6d03ba75ecdbf8a6a787bb0d69d3a" -dependencies = [ - "bincode", - "borsh 1.5.7", - "bytemuck", - "num_enum", - "paste", - "solana-curve25519", - "solana-program", - "solana-security-txt", - "thiserror 1.0.69", -] - [[package]] name = "magicblock-delegation-program" version = "1.0.0" @@ -5712,7 +5696,7 @@ dependencies = [ "log", "magicblock-committor-program", "magicblock-committor-service", - "magicblock-delegation-program 1.0.0 (git+https://github.com/magicblock-labs/delegation-program.git?branch=main)", + "magicblock-delegation-program", "magicblock-rpc-client", "program-flexi-counter", "solana-account", diff --git a/test-integration/Cargo.toml b/test-integration/Cargo.toml index beaaa3af..b64288d2 100644 --- a/test-integration/Cargo.toml +++ b/test-integration/Cargo.toml @@ -39,7 +39,7 @@ magicblock-core = { path = "../magicblock-core" } magicblock-committor-program = { path = "../magicblock-committor-program", features = [ "no-entrypoint", ] } -magicblock-delegation-program = { git = "https://github.com/magicblock-labs/delegation-program.git", branch = "main" } +magicblock-delegation-program = { git = "https://github.com/magicblock-labs/delegation-program.git", rev = "4af7f1c" } magicblock-committor-service = { path = "../magicblock-committor-service" } magicblock-rpc-client = { path = "../magicblock-rpc-client" } magicblock-table-mania = { path = "../magicblock-table-mania" } diff --git a/test-integration/schedulecommit/elfs/dlp.so b/test-integration/schedulecommit/elfs/dlp.so index 71a5dd1efcde0c3e06963ed7a27e5760694b5c27..748c0d84b4a07c530a2cc84390bb5ab5647d6303 100755 GIT binary patch delta 39171 zcmbuo3tW`N_xS(J?1FeBKwZ}raJ?a#BKa{yQ$#abO_4MaO^HeoD~Ym@6@~RiXhzf& zhx!zumeHpo3S+5n!geDpH7YYKMJzQcBf4mG5r1c%3%fJb=ktHP{?F@`!~2{wGiPSb zT%Kp2-O3O4YksR=u{9(&b`4#LTeIFsp2&N|2@|nE2y=@iR5OoaQ(* zyPSu14V;s4zh?B8bfIRHp{GXS+!0=Q?G7!UH)WJ8Aol7-WK=MXznkTSG|)d%Swg?$ z;NrVATVCQebaHw1i~V{K8oG()jTy{7>VH42O=YPA;{IY7#$FsSa7;z2qSO!NnX%? z(X6Xk>TpYl{T@;K?={s0!(;sElX=y3qa!H&`bKp4k?Qqhr=$;u>#NZfw()ylvPP>%Cq% zZ&UH@)h+*uBjgGecIC4qwEEpEhfvzQft{U{O%H6SPL175$Z+O2+Y(snTuX3{q|Ig9 z<_@7xB&a=7EGh4&bPpxA`sj2kWb*w$L@}K&%AA9$v`Lt?XwI%H+ zA(N}Wzj-_%^I6!fljwyTs*`U`CcKR+7Mn;K+qhefWiq-=I}O*T#XjHPC#)2wWW zYaj`%&z;eeRsGgVg86lZ$x~cKVkpBiX$Ce6ori%3npYs;_j7BJ{qsEOk=@8NxRnig%3opo~#}imdK{8$DVC@BU^tA+5d0!??M_pka{dWQ@D?Cf0tG8_*LdZ?* znH}}mAuI2%rzht_*~i3I9lvubB@VW+=rbDVfR=ZuWmuKZf^PLwlqMhM{qu4fD|*n9 zf5&`9S>%W7t3C#N7t=47pYoXc=KvE?xZF=!t8%W*rHldQmEMvk5n@ZFs)Hj=Wr!o{iFVAHBowCp2AC)F6GW?QZu-b?WoF~)2MRp6ptpy z3VLw)lvD!;Li+I&XXlW1J*rVAsWkXn>vwbY}w`fxC9@eaGu{Vrl1!W_~E>ku2cWDN^ zpFf<2#RO`?uErtK)Yr57pNhLeuZSNC(tI{J?$ClbbZD1qt@OZLRoLP6x3J%zvMmlmm2;QY zcRZ_2v*Q1{!iJ7f$+=q_1Lqzs2y%yO9yoVvW05;tb3C`7mf24k$v${^6x;EbjW*># z>U*S;ewPS!wYVM~W5HAOQif{Qk7|i>*Y0w4q?~)S<-z&1ZZ}b{ZO^VtZ0qwQ68Nl+ z&c%VyFW$nR@mQqJc1gZ*xh7X6`TFIo1R`jYgVjM4J&RKiQiIE-Zfju6OY5-(Ec~$o z-jKjrlE;?aX=PI%w-RUdqQ`$Fv@@F}K9NXU-(h7>q|=j~(6tPsDyx}7Au zelnZ(p3T~yEFmVgt!(_pkGA@bN=nhJHP#P)D`f3lwqD1T~( z-1PVKCR(K?UXOvvyF_k!B0FFH6ivDZ$_CQ}+H7SlyOJktoQDzO6T*wzFJ1CeBVF%T zX{imF4TpOZ3vHUl@^;6OZ`l6b*V0F?g}~d0g``!7?YWQ8A+y=ur;pO;*(~pwY`)B0 zz~q@ZEOT!jy)cZm?@h)?7)9+q&LllvU|w{a9v9RSlaV< z@L@dsd?lI4Hoj0#QrNT?fflc0M_)|9Erx|xCYY-)T)G6VAQH;9Ryt_bI(E1+kCd=^ zFTF%ou!}EUOCDv{RJ}wdvGY~aX%bg2V1+MFq>c;KbuSkX`guG{sE(lVoh%cRt1x*t zg2r^Ry6PyhnSEb9ifm*9_KhMrY}&r@pbI`T1F?jBFAxPQ9~=*IIb@uhG!u{PcudJcUpK0<8l;aVFVaE?{hrcdcA;@vu*!(5=~k98+*;KW0RTd%>nG< z+hJ_x{t)s!TeN>Xy><;te8)oWXHV`QMZ?yxA_U4#>>p2dvA%Vq$b;<4x@*ZEwy|y$ zJ$8Y&lN@1(>c$g0yNK$+Y-Ig|6nEpih7s)ZdK*puj@31c?VWUyPgp)%gV{LGD7r_> zTE?@|Z*ON^brJN;?d+&0j~rwh8%DDAZ`$aD@2X4QEFsto7WHEr>nzhM%Mjbl+B-$LGJYu+72-_^1OYuWm+ zC*RAbKTl-gjd5g2_3}nMKIO3o4%`>qAx+FH*~s^wrDeDCPNrY3;ZDX2P0_Yc@~AqaINAyZ{uBthwv8t=V)%9YPy}iGL^M_ z5JB?Qy4I_8-K*90=4y4I-&oBmu_^A=-0c#`l|H>sP&n&`sQo(%~9(k*RW4N9Z$#m zG_H%pF#NDj#vFYTV z>Uqb$Am)0DqOh@F#72xCjl1u2on3rlI4e5TR9&;VK)YOFR<`4drA9$LXJPgA`~(5Z zm(YRa2^Mx7_)=SQ{2elk<(?SLSHbWfL)lv=9;Uxt!&kwHV`|jRCbw6#IuCM}+O>2} zyp~RJsA>J(+^u8QVI5s~AR7~@3i(0HcTe(NflFB5lOvN__$Gsg2(1ujh~~F*D4w#+PEui(!z_)!aszB^4zpVmzL6m6&C z_wKw9=Ps=s1ISp_&aFCx@^VdHuN9-L!j#7lKj+{HNOc2~9^>qR{4A^OmTCzK`z|X6 zpELA2m16!Kv)}_6TYmjRnvS2Kcw6d+>x{*6H|q8HbPnj$TBtQx$kifkBsdU|qX% zv|-upy2#G`7+Uf=3CQ&u2%u9!o>;?m!D7uE29TDzqTtP2hJ|r!BN(4(@^$HurQm zv9jAwUppvBt%@Juaro73C=3s~c=l!YmkpUZ6LaDdH2l|&X|7GX`*7}xRdX3##Pz*Y zy>{~D_2I!L{0u+X@EeLaHuq~QJ9j3Ey>dDP@7u8ex%B6wLu7yIREGSHX+K zo4}&81$ZKY%5V~cMJDv?{F4{r>wPpYyyrPC#=<+;7hezM=h?2WEdkpH@rHGD!NCGD zBHZ`nuZGWVZQS|H#)TuX%|bU0Vu2?|kayVqZR&{&PT361|`hf@SP<(Rb#4@?v zC~S8trUR50II06wzl!U%{Pld)bh+s^EVq|(vzoCUGxCeC2K?d$mte-+Uafk?-U@S3 z&c~2nxi;Og-J0E+p_(GMm*KE-5&m3A2IXgHnfM&V)l02(r5Z$D?+m%oO74V7ZJpL? zXRuW19<7{lrt_-8cZ~SmS$BE6+y3m&E%*(08m7G90aI{A>iF5#M=P6)uKSTi{SewG zQgx0C5`JW5-{W5TeSh*W+xEk#Q4geZZzI*Q$+b1BU6X6m=B;y=U6(hp!R_-Yj`AJt z)`@pV^DHiPYRdP9hJ#^xulXr8+)ahGe$#*IXJ>Ga+NE;${iczjevdB41{L$g3|q)0 z-097ec>&+q61@%U|D#w`<3-*%`pE9~w)Zm0PTj)v5oTHE`cJ9edfS4?XW( zY^&-+vf58_|ET>kMjJEd9?fIl$vDlYt9VP<&a!a-O=?Sdj|A<;9`RFhRRKo)CAPC8 zDrCPpT6gJp(at@dOYBHTRL@scF|M6#@K57OCA;pY`T5mqPToiT{$WlmUaigvHSga| ziEpu8e>WvQMEU=m61{!q1bgEhtMzt1k@S6QrMkCZs65^FCtLAzV!qZ*_0On2tDX8O zKDzMje3u&HP3pk&QwP6z<96eY<7=0tjbJLRjGBpr==| zf*&GApIj+AnDh_P@j2W7%gWHd#sLP9+JHC5vzBu;T-6)WOB)b_VM+4z^9OzdFoKZo}AcG+w; z?uW46-U%~{J^B4$J~L?XN_P1B2)x~U^p{Z+3Z#yRRi~)ohc@r{z#9$DQX6dM4Tf`% z;Z}R@n}C3>(>&Tg>|%eOkMn6-{!b`(mg|%E8LiFEr?tZ61;%)AX7p6p2%{^trPMeZ zc+Z)w+PvvLXKH5;=TPm8f@@t5-GF_3A-~rQbwK!0gWdS+l1Z;TFWc`xSgT`3QTUb6URcpk6UwT(}G4`e~2HoZ6X8-LF=ly#8v< zlU!ZSkB6NYegjC5l7cF}ukn;`*8v-_>-CF!v3)uBTHkng>l<8UZM1Sjl62O( zCO3qI{GM*konpVpHvAr8jaPHyKBJ|NYx_*@4)*%*Q)p)dKW#AEA6Yid&U@@i($5*O z7g?XT?E}+=LrAQy#Iw#HyQujGeg>iA$3vVyvE?r!O!r7TD)H_)AI;Ab@9T1{F5bN^ zl#ItS62FX7J-2^|2mdBr?sJ^@6jwX-o(olOEu6aZ>BkQEa5C|WT7?DV_KH;-?OX6% zlpANsHGClOst`U9@X6`DUi zu~K}$(3J;PVmP0o+PY3PC0jLjpdD8!Y*D6^J(69?Q-+Z z;*(Ba@3kF5D}i|q_79oxx6KHPQ6Sm!Fh!il+N8+jE4pO9w5+)$n$~#eo12X`c;&|EdJBBplSJmP`h!dH$-fp?`r6?e zS6|%9V_m!3;uSff@n%3jBKRpc^X%%Kw(lnme#${rZrsWL<0Ku81-{1zM4te@cLlwL zSqiUDCbp<2(f02qlpZ*hi@Tu6LM&$f6#d~YTcUT^7Vv1RP|H>LWa0-Qqb*LKbIZK{7L`Hu{mEmn zJb;Yq{|I)rZ}jqW3#)p!KY0)q`H_{On$J$3K;&Bh_c?nU9F8M4D4s-m`QsT7FFw;o zU;L5>;=SrL^(~pt3#WeP$ny__Y5pX1A+D0X1G#+YEoxDJ&FKvY|7ac{+Gdq&b7+Co zy@}yn>)4Sb{Ho<@InEF*{G3cn!-nhS>w(83yHkqnPvE>i8QY_xR9#|tqXJ%48#U(= zyc0ktLQW4dOdm~S6pbF@E!+AJqxtRk@@Sqx$^VzpY~^;l)CP4w#4(m;#z%9kHkzaU zVKgTUlN34XU>Jj|1)}e%tdBalq5j&G=UqVl4Fh_TiSXJ?e87umH0wiZm-wmUoLhj+ z#w)iZK!MO;8qsTrwoLDFT!gu!NGN!E66***4(J)EE)aZYG(JQsf+u^EA%FS6eK;KI zO$O3a(Qx<$YJop{6FYew!upU=`0xV~`j8m<0}1~C@bG1%lKcc4gUAo064nHh_vpfN5EDX5 z$RBVhgp4OU;bI6GLJQA9cs~+Fj=;Qr_K`GB zJ=e2wxwdcAF5~jrEB(LY9k=t$pwe91sc!i}|6!Q+v1`A>_d zc%ZRAIYRT=p=6O!$|wWnog`3Hb4sW^^P!$}rC?{f3!UF4re&fUjO z0tofL&TSb=GJDaW9CyLxB$Lv#S?&wtND;w5pkO>XOQUlkGn!cNMogOlylZ%rJ~T&v z%A4yhizYE99y3S(MUwxn#<+?2vGsM=LHk7Xw=LV9c_oP>{Ex~QlB|Y9qM25@_r($` zCHKRjDP$BWfb&yu_&0wIfirMzjlPQfLSv7+Bc_tQgkJtN9G-?F5b-s{tsY)fCu8qP2@fJJ|2Z{L*E(X2fFMF=$b(i z>4GmHaVB>6EnmP48#7E7 zx(6kYEfhc5hB60M(fSUwJ4m{DC!SE4hgl(OHkpbaY(wR2?3muO-BELRf1se?I#Oxw zgSW5z6FT?YxulKI@H0@h5*ypty;%-A1F4DF4gJqRK_WK#Ls&DPtTKO#_lA%ZN375> zpP1>{(@-{@M8Sw8(n#};L3P=>FJ@saRPBdPy5 z0z1=)i?1<(H48-;Zh4|gB9nNJoJcHKhi=-@A*?pCq@T$4$E_lOlFhtMm&P^-xgAtPbpB$< zTS216w=ecS6mnK*&UN3>+@luFKkd>se3k~+O(COU-x}=h;in)llSGUNKlRs*vVOCc zlTQ>}2`@pKnb;vGll;O%=T4k2ZO}Ip+xQjq&B6wqgmGD;Br-Rx3*jzzMB+b``oE(`3iz7Rr@_8LS3mW+1)GEaCj}7yv>)Z zymj~yJ3g6@a}t|7D2I$CJaUk-dC=e@VS$5AQu~kN{=#}3cjKe_Mb|;yNirM`1efV_urp6t7ybkH6jyZ7$IUSjtQ4{}d@5TlFK z!?=fVG+u{AJbBf<@gZ_8p({ReH$05HDW%8V1&`pQh}&35+WxPmvGcP$}-> z4PQX=LpT6$LfB)t(B(Ek;Nv*0vzj3Nagt|~-+rl$)8f^d3tf-n?ycmyE&nD*`Bva= ze}W{K=#pa)Q;w~79CO!|lW;;u9CNqqB3*&x0Gy~o7vBNDmoaWUF!E)L6xSj5*_TNR z88hn;pAjd!w_dsC$FJ(HxxHGe*?%87tnLi_l_&Ss8d6B8;&kJCLl#1#o%{u&O81f1 zh@GMsqU!Mw@$?%cgQlU)es0sDOOMpyr0_W1XwpJyG3I=W9Hmui8VdAn@-YUgT7Y}& zJEWFS`+7INGKr0gU+>1s&r+JT9x8vuliMxtlQ}#hD&NP|>5fEq;s>Oi(kiq+jE5OI z<%qleLy}48h6Z=)M;K_dc)h#f6TVmXZGfXkFpT^f!15`%fGfN^@F>Y9)UzH6KGR~d z{WH?Qm;Rb&jDNlt9OIkh+YWc;=iIW@YngZ)E#KBHL+87TPT=u^PlWbX%)010H%^2o z{+@>>P+wOW<}N%%9=VKf>)}6=8e5LEov*^fB|rV7HuZukjXRE#f}9>%^I_A?oVZ$Lr9&)Axqv5$Tx34Cd|@N@TmP+-hzcj$Q% zjUzM&5-*^2aW2$cz$ShK<9;RieEq=Q4)^0P4;=mz!>-wChUveX!eHV>;s_b_Hg^Q? z4Dr$(DY}SrNq3aacpDC1#JV%)gQW}STKOz@b{8H${&5wVOK1}B%`!r>DfXs?(iqCU zz*Rb%a*etfKaDZ0ZpJ%%&}KqvVP#J`id4e`n9#~Ye9wr2(>-Y|UHul6^`fa{8(i!~ z$J6{n?x@}rm%-q-peTbxz=l3FmL_7344k`j4r!aSxi9sQuiWqUrIQF<`Zjb0(!=!n z2KV91XbYiZ-*i_7({w^N9|TK38bvo8gupD!o^#Nh-j8Cqk9!mH`qLH~{3fIhz{*d% z_Ya`+DfQ$+pp~kN9v(f0xF5CB9wyqFtG=>kDvhA*^u`ZWf``ND6r9zE!s%jqSCb~2IfQ;r?>!9Rc3MUQKXcdFX)>W<2f#9nHc;~c zXu*_x2hR+r50YbW7AqsHjbqCk*C~#5fs0$G2(p)97PjxzjvpOq6zB8 zzjriMgX{1Ym}+msu%OY?9ITI-IAq6Pd9s2Pt=VtQdb&EtyfXo`~_1qD}NS74erf!2{O==(41 zigWPfzpxKlpzlQb5^03}6X|>8CD?W)>YjuFG4wOKxxw8YLopzQR|_u(GMQe5rqd?V zf&6fkJQ+LqEeM-J^NsK)PrF~8fNf!E$Jou=CC(i_Y{W3>4bo9&K^r!i(4 z`7UJ7!k*%%u35CsD3Q*69kQ>XF}VG_cnw|L%jWWVwUa-wweyLH&kl0#afc^i577&I)E9#+W0Y-DhT1Z`p!|Yk>u3Sj#Omx=Okhz!}Vn;2eB{bk32v5QB zPkqguoyLQ5sKJzyhTsKg*C+z53qAVHE1|yA=GiXVUZZ@fPbg$lMyj7~lmCtCoBhQ^#&*w?rLdf&bWEz>LbwRn_1p#?l7wpu#;L(MU z`6k9uMjphyg&ybRLBU(JomMa8V`g#hd>a>XT6Tjr0Ihl6_HuufeDH6;eyP|9F%9%6 z&lUI{<}S{IuJ^FLo{bREi2CM@klcveu@Ukbu@QJGEL9jYsEs!wL(nMpPSfacZR_| zr*MpZUIY!Na0v9tW~%66X8BJHD*1}em1R)%6&=A_eDo`9;pk*{;Az^`gLb&wf#+yh z09EEe4thQ?eLh zcse@;(s`OT4@xfJF!f1+I?h4!pq=ySWU%~-T$Bt6JiU7{Wb^bzEcaJz|5Yi_z|+db zaF(Y(q92`@PECdgp1zLxc)B(j(s|l{F%pb@qsK6be_Ylmuw$H9zEaQF{gWe)r3 z`6}bDm$L|0p+9LRy&mW4pExdPfAmkBT?bMi{30^y)#VW7nHSNox3FHGR&0XGix@{9 zt;ykWlE*LErq?D9h>>enn+VN;{n@&o2P9aq#|m$CXIo4SgdV>U;;ekgZiGCZ{&FMK@ib@|bXj>-%OGYD zGG@#kWQw3)EyG8zyh)p(hV!Lm(89T6Gh7&i1&1!z^Z{GkaW>Q50Q$soXo*LI(k;+6 z4AYg%@p&4it}PHX++?xEF88|14^88EEaCjHZ*iv%H+A8gBRkxwu_nA{%-8}AQ?M7% zm58gb>ewxicop_m(iX_P3QJs<30*0eqQ2}Zblq^ZY74Y;d&AX6ta&Q3;cDv^cji=6 zrrB`SR=Cyc5`Q3cXD@UKr?TOC)>ep^VJf2gZiJc{SWsmqwD2Zj#TRB^AE3t(Gfi>* zXv$W1`XUo<F?{?gHbdPqOpW0x-UjWQ zjW*|OgYf15Ww?}W?!@ILJnNKih00X)w0XJvY^o_cm^!vWY8DPt+BV3`!V;smK}nV= zp?i2Lw!wugQ!K^{xEt(6o7*JNqsSNM2FRT5K{_)UwvpLU>Hq z-D|@0ddD{R1*ZugPXrV|%LdG9FK~BlF!ANC0Q~c??u-I=VjeFfsQ`*xrbMG~R{>mb zVVR%Y3gMg3*U|!r*@XPlt&qyOssOS#;mG%20sfm!C3L_FDBEm0%sF$5sf^yd0w0c< zD(S5&@Szx%pSc3Mc-mS3aoez7Jb*+MnBw4_Z6-VI(1k9S8-)#YOu5e%$7iiCU$#N4RUaJUb#1zx5HgB5L2R`kGsFQg9j zypW?0nD!F5=Ppx_d*_3uSVEs$0S(32oFmxWhp_jQ`yllpoS9jJpza}4j48urW;Z=- zgW!ivSJ8?UkoYhfwXSfNJ!}fc=QY?l%!Idt_d(nvI7~^HHjtT+@u(?OfAlc{Y9BRC zwRwD&v0PhF_nw+{9|V?Q{g@_~;L`9bmRMrSrXDQmFg_cd3nmA(+Mon55S3TpFqk52 z=T~^Af}j3oEuYmtV|4no`W1VT1Nk^i?TTzb$OCV#_YOa~#T9+@0ZY z2biqx(8o>1X1ZpfyRyuL50}>99{iN4!8UrOx4r@Rh>l2o1ZjDwN|<&H~H!n9}GKXuQYNLMN_-vZv9Xf31YPPfUa1$EQteZ85$z z^zF{K9BRqisAkOrpOU5po^g^VDAgq?{@6E7coW}3n1b}oF;>Cs9(fwXut}0 z-HRrFN{3+CubQrQ_pH*3D|~I$x<10c&p}XM=QyJls^b@8^KU%7e{Y*&tsyjX-06Q~< z*8ZJl!)eqs|D9&p8645hLZ~}~d%O0GZ-U!#)-;RKKhoVrZ6^QT^p90g@-y~HlL^v~sXH9aFa@)EsW2r!ygDh`ENR?L*KY<4(x4 zo9j5o4K>G6#}3FFYL0+kvT(uOH`Kh~a`!d!%-f0k^F(tZ#W7Iak=L7_9!VF?gu~h9 z2vbIH6MU9!4jWV4n``l&Lr=5xA4F)?o=)R8F;uk@-y7oAnJx6*nUK8B95Jl%gtmCR zioahoSTj*Z>M2i}Wh0dqq=GuEOyS)NRh8BeH%r>chPHE?HlM`gpHCDn}~+ zE!1l#cSXVSQ)PV|ZjhtB`HN6LCIkKJtIM9pG)Dis_G)vtFI?oA1&6u4XO>yf_f@4D zyFhz(j&G9TZ^ca0^Xm42PV~>${uUp5EH6&hzl8n|*ULbCrmXM4p)%@2eVMF3f$cNe z!u^%?gYh8XqwkXSb5U>c)*rQA^8aQm->3dmuKyJKX|O;5FF+ak*wo{)_mg zE}q4^ukVltb`w6i!=N+T#KrTGef?O9j1PCauV;{r81=)Wfbo*qTPWAwlGIE;MUKVi zWZfD2&M~V$f@BDWA@f6(aZN$S{_Vb|Bl|Wm6*;(Di+32vh9g|O6WO<+2apG;YcDFF z#Q%&5ttogGMcIS@8MAl7bx^hed+g@x%*rTLtNkMvEH|VO?fDXKT>FrXUg0u1|4Gyv zb=IKXaQb&GpSQ>Q<1M*1m|D>mWbE4X!2hFtt4(W810Rz&RGtE=~8(q$lANX+*(;H3e?|Ss z|Dqp*A35dYdNpRgH##%X-e@H1>ty|2)EiR}_3fz7_s#!r)Z6hte$B#i@jj~iy3VH_ zE?kMM?X;T7UC6#2x&zsF*1e4EyFAq3M`<{#V2jHfJf;c3n9h9a8DrOu@-+sa{Kf@P z=fXy>#E%_}X*L_>M#U=^Ks)NgeD&$5Hx{zA7pW2er?SocdgZ*VO~uC-K-?y?#VO1e+O-w)z3>+!&tF9ye-RCH=tbSN(7SavVXKYEziS~R zZpK#o>JOscSmsfmhkC#GH;mTxZT>&a7q6(d=k;z848fh`3f*MQ4Q54u$rpP{QJzCD zjJ{ogS?sEm%OQM=UQ3YLhr?Z1A-{aHOu(QRt!Ht(LE)ur^MGDS-G)$vWlTAMbsA&B zZI2@xb@I)MpPc!17UjkU&iOcge8cnqiE`gf&o@6m5DeqSc=cQ4jVsQWKWZx;dagkE zP%h9f-~4gr~Eiq9`su$Ef= zwX|!gc>cKTM(+;6*AwIH5|0I`L7by!OuZ4p@5AZHFoO|9b1{Q4%W%FW%P*7lTg3Vg%lhk3KMZ}~7v}Gh^?iNnZ@UGewxj=Tx0n_D5|4U4hyNP>p*~gC zKQHQwWc^E`zE0Nj`i=g@^4n$oi(>xp9g=^?MgJ3JJ^$$rAOG`YJwBrIt-n&%e}(!) z^nqXKUyH0ii26C2TB*7f0`Euvs_;NcbcC$^;flDRxsn=9iSE^U&<^UGQ~WhJ!i z#IC&teKL0EbackJJ-7xR20nm?suk&C7iJ?5>#hcGDkUFfeJR@e?t7KU#_;fB{ttP8 zPkBwqmwP#1Nka#H2TH+5SjLkIZnqpad1I;O+$wc?Alm!pzYN)TwY4Ef@QnI32lIPx zMee}I8fbub-`&^$Dlh@7lB#hn|mSGofzm2*Rf$+( zhs3cU>T@J61y~B^@cf|1MZ0K;1Nc1(UWVf?i8J_<0Uy1iOyo3)b41QpBA*g7WJ%m6 zacQ||AG=%RpgkfNN~}EXtIx;fT@WZr(lfpW_#>WsMJ|;%phDC;B+igHjqj~^sXe*~ z|0wqrAbTXv;F~R8Y6eWJBu;xlRd?&lUw}D}nWOUUuZr?Ci7RSEz1=NxWUa`>5_i5P z>OCHT^OcJC1c6fgk;sLgirgfzLU-$~}rAUcOCC)l7+Pfsq zIVtKr61Vv>)~^Jd5;NE(4*F8m7fS5q%&SUuV0Bg>lN_70?vzE`m4wR zT_V>~ZJ{(`FOq+O883s=ByRJvTEAis5Hn;+?2@=cVx^~;zf|HZev?Hdi&>sQ(gfmXj#%0B?c%jiPuAdwZD$b}Mn28;S8 zi913yeZEma+9)Z*Xpz${7uhjZUayxb<Hh3Yu>$K1ky9itm$;QPAAiFEWu{m_&Mc8D zByNOr4a2_+;AXE zGRV16%+Pux^eV^mNYXM<-X^hYxu|bW6}fGt$VurU7fW0va=wz4DQ2jUIQC9apCfUp z#En^^eE`3;_X&+if$jj0|cmnDsdx!7KoSOVEhS@JI{$+^}Wt|{Ys-D(CSxOCC+IV3n-VkRbu-OqJ5LZ zDIJ==`+(M}3yc;-{v;NVCULREg+Gh-jS{y>T+yl8-`L${9h#t} z>Rls6_DHPYPp$hpR4XtBSgRnwpG6)eW{8(KL*inIYb9=$xYL(;fLkNQ3Zo@Xk~l-+ zLWu*$h~*VuE^?C(!-)Tw`_r^>u=GC|3>gze%PNVZuN3t;{24u7MsRpyMJ~TeWM!(z zR+00SoOsb7c818U*NNOIu@zrB_H`&);#mBBSzmpjz&IEV{-aWO87)eaxKm>LQqew3 z;#!FVZWQe+R{AgxhMZMC0_0MOt0ZoeSjiCcM@rmryQq)7!^^yW{H->xfOCaQ0r|yl(K_fgkz@6jCXDWAl{i{|iNer3Bu>#^d@%G?BKK1oMS;@#2E1Qs z4j<;&FFIna6FIF8en-3f+BZdclfybF}q^Qr5xJu$Sk@3#+Yq5Yr{jJ+> zha_&5*!rWGKS^Sj#8m?0TG%QG@L=|nSYW)wSrV5@+$eE}#P*+k%I9lwlCJ>SC2_gL z9*J8eR(|oR0L!yW9Ph*G2e)ZH0=`1KBrb>9)#f2Hnk8|k#Mbj-z0ndUffI`fE|$bq z@E8|&Na7%P4aNCM7sNs{Wd2pu7fW0rafieKonrnRk*QJz-|RCFxjycDzxicM zy%dRyCH6?%A+enZ<=vtri2}tXah1fa5(lZ@Kk+&kFL4%(c4N$VByp?6of2D3f}cvP z#3>TzNL(uN0;N_ID9sXgN^CVt4oI8=+p%3&luF`SiJK(ufY&fvaJ2qE!Ze0836Akz zDwf1;65I8c?u=|MiEAYe;QzB9pXhP)6}eXAe5FYgD6JCPL&O4-B)0Y!^(hjUO57|k zuDn45!~&8e&XKrS;#!HDBvve9`Bq=%E59dPG-#H%L*jrTqP<<>SeYk^_T@2N=JhMe zB(H#Dq{I%1(4g3an8mXjrMg~Uw~w@DoRh*(~+#El~3E%~ET28lfq7d|f9 z*XpnS8Vi)`2~l4vvExZ!J+7;*f&gEuEAutL8=|K~E|oZ^T+~-d9KTD{r}#2o*X_GS z17(lMRT7s!E$V}w5!v&s$gL796<)o%uef{!d|zpkSb5Hyf$JkBE|s|adC@+nQsh!E zYx`QQSHR0xVqX$7bV{66CF)aN7P(d8q()I+dca_=KAyDgAf)0tSd?<2?#BCA> zHHr4hry{2rtkI>AOEmEY$_ZH0qEzdPfoepL0Bci%pyX@YJpmAL6A_`Sg#esk-2v0&>3ksT7}NF4N=Xdf$a z8cco<*TwkXMY9x%vm~yPxKZM)i(+|&5;yCw`Ro0UaY4*#EE)silDN}f)Z2TAoFs8) zfT+*tDRNdXU&i`d1p&T|(%aWSQPTQ|oYhz4R+$4uebQwT2l+6c7am^$a%+f>0di75 zkqZZi+$pigBIzYA9O%`n^RmN7z{etJu-AZdqr@qpqTU)Na*D(*iR1O3z`!9_>sL~| z0$!hzCb99G8Agm0O8R1nDxKrXFSp6}U9Su%QX2iRsvL#NFI7?y| z9Kd3RmP_I)i9PTe`Z}~j5(jJ&YY&3apYU;&#E~%h6U-eii5(KB!D`HngR-3`ER6otJe`z+^HXeGtE4r42Gl4t2#%rI6)9WALbD_`ez4TYh94Im#dbEqcAhBk z8c%SZ@FWceeP$jH8K2=QT8jUWW;lR?V0(dB2Rhnpo*rB(iSe_-X1)?PW6lmZhJRzs z|JIC=nFNE5VXtQ0CzewPlexHD5?8?zE^d;8p434+It@i|Z^mgRugj^PqsA&G0D z=jT{fvm|bV(Oev`T`a^3v$;4{5<6fu7iUP~9N5mqrINS;p5x+1NsN8@x%nyD&>9rzb0~{{vN+!-=x2$Z?H>$4d39j z`tJPsYLMi3x&95fn8I*TKeTV)QyrFN@-*PwD(Inc(>o27n`toK&kNwYEWY=GL zHVl$}F$^?)hvJ`=_)iz44Tl|HnQMC5 zXJ`sV{RtshcG}#3P(1!-A-|0DlJT=Cn#~hv6KveX8fgx^dD=X_SM3fhYwZsB{j@oz vSH}Gs;}`I!oI!5dsWE;Le?4;d_xFwO@!R3eGv-M>n_4t82W&cH9`gSHI0j{X delta 40563 zcmbV#3tW^%_y04q3zD}M)OB4E*Goudh`x!urifjTOc70yR1hr@Z-`n5qAcE!E+RJS zp&OzW(VG%Rx^xlI(y-L5>^sj#Q&M+!tPASTr#~;*I|&Mrv}I)<p!AJ?>5Q<7=PBbt&y4ddJqZahLS=cbI3$>f#tsbqjJElFcpe#fYD7K`i>>)Vp1 z)xj-pKqoJk@9ol#(4sXgYhX`yy6ZD^!}^W( zaO^TEN;jn3>>VpuW4Az>{U|4-oQ<+}r&D8DxYbIdmz1Yi!>MoTO3{;Bg!kOsh*7B8uMwt9!GQ{H9X)HX%$}SAH(0^vJ z#=+rqNEQnY38e$GSW-wHwPms9kOQ<|R(a_VZ#wYzhup<%&(Fv=|6T*rKd5w9E*hn9Ih~W~+G>me3!ztztnIXGYe zi@d*;&U&;w@&Rw+D~IJIi+^x9{p=wY{gBl+Oe$N;4x%tUlQln><(n)COW20UdGyiD z^2W$Wa%adm(f@zw{ufB4*0cDi0RtbCv&F`K&Z5iH2q}>qDWsmwvLBv*)w4 zhaw4%kB5MeMmF`~(d0w6?csTJZzOM)U$tbl!+aJvK9oK^jwOt@`qoR*TWrJl-t_%( zYD8KjVRQM(@j(7AUX^T9^k^EtlAVeUrz=*n&J(xO`&P2ziQ#nSN_KVPIm081G`5!~ zv#T*FblJl!e5TbeLXsvXv#d#tw8Ki4GP#(XVci~?MgGH{eB`6Qi-;+iB~6JWJInV@ z8B53u?8em1v?ZP8#K!viNDX?8U5owb&MQ)XFdaR#ptH(T+ zFr)78T49=3o-{LwkR9w~{2@9wo#iDY(9Cq!m{9t6@dS2s)Fk(eeqYpA+&UyJOiUvcBAF`9~(-Y!91!g1^fyrX3>J%U@nGn)En687sh~FDtD; zrC!W0V<=tzWO-yp5}}b#veK1VzD3gfzrjXjzDeF;*D`0(%CY6qs~YK_YTUynIGyA> zcG5YI{9HaPdjO#wo?r=Uj*;E$jeq-GVLWfUcDH}@Zf$=OLCygeK zW!rON>8kOpF{hG_8p}#IrqCRe9%=vuUEDO!dM9tM>q3 zyYwsDOWB})>}Dbg*YHwsmBB019@EO{(lPw6w!mNwO6CZ?CsDa+yu13$5k+bbFYOnk z>UT3es?v-=#i@Qcv|A-gJ1m&jdh1r^h;+3AzH&KGlsO``4YyNqqKf&#@U2i$oXTHr z&(rj=$MHXOI9tV;3zFN)otTqlQKhj(X3?nX*-yVej4s(dRYCyLy8qB-Ap(m@Zz;`O}_T=D|29o)&j(d9z!N`pw=?3 z$IE99)FO~MP>bB5yLPGNm3F95!<#!$4;|ttt0v=rGY8h*VtYQYjaA#|QHaNg56sta zL}u*N8@H=QZ6xQT_!i&Vc-X=b+K(+{1Du-AE_95CDATU@4&u1g9^=riw*mQR2=#Q| zcXG$HSa7d0?YirzC%A5})rQeP%`2Wz9NZl}#%NG;+p{Z+cr5Hyt?<1!adb2qxegoo z+@2mRW2>)In@pK~R3DUW8c{Ozrf2Nd<^3=QcxCK5t~t@~HWe>tJ}S-Zq7s~>zo;FU z(NE(~HI?gFD?in=PsMy7@GHaF7v}+9Q)#Ix;Io}yUW$efEmfg9@O~S`w~~`o#_|_7 ztB!a!)-!sZ{5UnLTJ$~nn~^_=M~7Q<4=t@*a@Mq=-}41?(DS_GD5sr$Ox7Na0zXOY&W_FAp0 zb{4ea6p3(_@HFtMW-()z-jH<~cHy|@UC0|U_+Ym!H;z0o`4J5?&cQ%e2<`X#>!3 zB$J&^?BZusITow*Ql3RiEO=WWZ5q#Fw*}J&C$OAt3BFYxn{w{oO?G43AsV-um9pu~ z0)zO+a402Z<=5aBLQ^x@)#sy4%5lm@FX_dmybwj6V=uq3gnmArb$&68E|>tFBQX=d z{^D{ng7tc7GS?DO1^>Q%zneBaf$CwJIJAH6ZTI02SM3KbL z#f5~YgItNffo&?>K`YWBua=Ib{$_SNI=FR5*pRm;;DGR$S?wEnJhVCaR~waw*v%c? zX}fS{EeaHddVVUd+|Dc@hTkc#ly+_ z>{{^>a*)Np_9hKl%{rHqlI`qZ$!pjba4MCQ(uL!paS0j7BL20NY-VTw1=MF1OL-#_ zH*M^|8<8Z5-F#ynZOCM&N++|5J%i{&&8&2P05k8+r!Nj>!Nqn~ws#QjVlM7|lccas zrK4#zSI!~7v%UL!mp}1lGNHa>SY25l{plCxy)Te9{=$Nhe2e6Ro^bqrQwO$cpAYWR zHtZXKeY_X{zRAw+8%&;OUT+O2udq>XIY|*a_|`Of#|UOEk0le!7nk2o$V|5S?G^1d zT)TB^suIZV-E$|+=T!%@Df{C{2v$Ffe)=8jd?1uob7E(S2gcLq$FtG{Y4mFxU1v#O z7Ug=JdO29BD~s+P&w?wG=r0pkYrd83sYoTCvtI99;w4(&DW;2?S=M1I^&ieE-wh^T zv1{)RCRwc4d)?`gC9L&uAeqAA5&Uy83qKM}YZtSWBmN!IZ}Q2-$KWw`^1ZS2%h9av zNFaT-Sq;HPBp(FRX^U9G2Z3ZVJNW))GMr5~h+AK_>ELi0HJasp=-<(8BvvaoNh{Y! zEl2$qv6M<26rrrKGMs+%pYq^C+X-*Yv8n?&rB)yAPR6i<@AhJE9rmN`!q~CH0rb^H zEZ{vW`Ig;0Jb-4R9R!JN)RDpD7F&E|0J+At9T`iB=56^6?ky>0cYnZ}9RI-plEF57 zu$d+dW6=luvk@QKX~ch6-oZf~;@aA~gl+n809~yWO;?LXsYUN+!;U)XK&|MPNA09z z`HiD^r?Q{j@zF-#^lWkJE@B5idX1i4sJh!jb@z>h++CW#P!06pMXa$phX8x>_yAh} zGb=nENYDSwjv+bsvl`yw1w6dwoeLBNs$z(ZC4SryC*$Ieon#%m_VH+v!|wiMBALNf zf3lJ6QSIbucAnDgtk>*J(d|34`9%^ML4%_Ge z)pNRVF&`rICYnIkzgoauCu>;zsg3m8kt_|J9jaA%w^n74R%Oe4)%Bm|d%E7pE`I7H zXIXsBXjR%6vq zYOvn>Ne$M4pVVOGXf>`8YFxz@e>NCX!QCf^w_Do4TUW~#o_vVRV|SccLZ{7#1V%6q zoVoBB%$ec;_uq5oY8HR4yZzqbO53KYo0pg3i_?f_SUr7CWv`waP2MQ~=G=K= z&a_9}WFOXs1t#5z3t35vw!ppKq*7kUb~%41Pov@IOKC_Xt1KDl#06e0l>${N)M%J`t5K7;6N8W7x*T=ftaJo)l)Q&wNA_MZF7XZR@@1b@5>kN=zk}!__#Y> zU{US!ZJ3Jn-7^nE*edkgN(kF_sW+{@$x_by_ped4Gm15@G8a^9-tE*%^ZSglTkOG4HNYd0ZW^0?$OuLV!Hyai3!kZ`^YoX4x>wPqv`CaUr zY#GY;r>gr(#7M$~+`II(+}bUE^FN z?e6Y3%!^$}eGps!pNnzkK6vMg2eWpkap%Qb%AGwF$V=c|1@;TC@6bIeR5H4?Xid;s zq-3I;UzxEQpO@#2=jFIcJ9L8A-_48yyxKPSPbR83W0FdoN;jf4oHvB4dMUV##Vccy z7OSb`53S-!TFq0*Zz?t#o-swg5isj_5XD-+GN(J#z;z>Ygy~tMy$J z)iHh-!JE$K-&*$amAmgrQ%z)!X!!%#e_=`=6AQF?%l*O~Wht|=m%dKryUNa2R{1w+ zWj$;^f_D&0ck>`ZLI&yId8}SbgQ}@5rcGDOcpm_*Ez~-BPY<3Ww7l!SgK4jpl6yB} zSA^OYp5U@3V5R!S+s2bUZ`-aOtnfmA+N!{_H_79y`f z7zd~7432V@-raU`a4-A)hgtn}E4wu%OZ)i`Z4?KN6`yDVylbBpICJ-*^)+H3!SRU3(|>l;Z}-%h-=G^v;*rqZy z_L|X*U0Rg*u}mU5%-g0&(c-Dg=!=Y9?pVDg#o%vZ)kE1UDQdy0*Mdb?Co${wzyawP zf{dL(YLkqJBUK}wvD2aRtJ&}Wnb=eJUGIO-WD?H~e%lkua)=+jsPNX$3IEI1YoYZ# z>&#a}u0?}KVevo3G4pp($ZTSz4gP)ILnG}kTic?wmP9hIZwGWAqc&C>R_;csjT>n6 zy52D!*(`|n`p#@MxgNp;Kz{y8OVnoCZo_7JDcYFBRG=*v#kj3}|GQv)d0M(l-~8;> zl4u4O|M6Y0|2#Fw#v+pGvEhnZ#j-BV>>8me;DMQHmM353IBUEVO0(6i)$PCF9c;$V zT&>bw`S8X&q_=OL)^++)g_W^?d_S0kv&X)lI4A=xV0dS%L>+Wrs1ecH?3t2!&4=xE zBp{{?<$srjcz10@xsi>GmKZZ=aUbq6*5|F&XJjK9G7oRPda|Y09@3MBaq7h_z7Qr3 z04-?@{Q3uRHt}&`j91UuQTKy1F20ZoYO}x?7q7E!*9Y*U0{pv!9c`T3=g;GSAJcHp zat5ojA0Epi1E`EET(PI9JL#1AdgT45}Xo|}U&IF%FQHAe?*oEuiI?tKH zixufry{d&@K6x{4D`fM%0=2Mu=?@@ej4?K7^_m-F&{w=LFY4IXpXsxDjKbdgF{`~k z3cQq;*!@2}icbf;{8O(UnkyMQwV}sbosWxQW_t(w>?i1;s0KOrEPL|jQ46(umyAAI z1w5JYPj`3W&mUl{`l&=c>3tkO9tzcPrq1UJX$vOBZltRcha!F=%qd^l^QGJQr&+HX zJ-hO$!)_l4)iCxshb%jbKxn18reiDLu%UPiw)=e1SdJJc9hfPBw4rp{cm{XWGeW z#!ef{{H2dAN7##Ki%Q1MhCkSaDjPfTORm|8hlG4TGCG(Kyw~;QV%(;-?MJk)5ODjX z#(;Ou&mnyOaYA=LjQ##@ADTaxpM-QigS!RezSVUbYk$NZoHUnruoeX4u7+EA82LtU z7WdG`+@agpxFdb(w{xNMCE}l;Re5NOT9Z0Q&SBLVI>s+t)EM3EGgvL{xfS4xp^=9& zc4>OXEgG-S)R~CC!=3U019a$ht@3ua;kS_d{&x#ou+QFg9`>xf z6=2!#cO5iG?ebK8UDLbTuIJF5$vR)#_Co3`(v{8pV;C&mOS)K$o%7gWY-34&EA#&_ zfDQfKKJw15vEE|c4@`Dy<96#92c(VrHCw|*V|gRv{@lo`r+g1MDzLcM0){y<*z21Q|&En4u}(NKrftwe4mB0<(9EuH@?%# zSl(nf!R|Ufq(2{Z#*{M_RO2*=H*_OrF37%@^nl~9(+;%e+O}xo?=inVxyD+5_3!DP zUdM2I;W@0E*r*SBlkMA1k^z2OFi@V|9dio;CXw#Yi;%e_h<&%O2VH(QYy7xBeOz5A zNg7-vq*up?wz&5WVJR*4uJRXI@v{>#zy9o)Z^C{tc2t>mtsL$kv;nY7zgL{2CKeok zGqg|`Uqb2MlJ@GCtnv@(T6gidt`p-rSB`6<9M>6t8P~hixCZ_yuB>vOwR<0lovzL{dZqu-3~ANkZA)wzgy%-ps^G2kAAtl6g6@2PP}D>fZIqG{dPPQ-bO0P zOjmq+(w*?HyV5$~*InO@-d0BkLQF@3$GOn06Zr_g8-lt{WV`uiJn3U0`)rWknRF!t zUhPbJ)0^MHvCbrzoP-;l$>!c~;i2Q7_jjpk(Bk`Sg*UpA9&}qhRNhXWa~S*RO#8R0 zLZdL7@#JXqgBBJ;+u3<`F8VQ!Dt}2|p=v<=IIn z`J3KyeczM#5V~}ttJ$BVw5J8DTuB2+3Z>amu5)*iJc1t}=`OTd;e_Zx#A=tnk=-&* z%i6h4SJogBYU0D>z+e*l7ZpN42zKa!iEt_e-Hpj~MGqn2gn!5zN@97J<5$zAuC4bF z8zt>vZz#bhyWngn=}lVU+DOt9EW^pq^zHL5?=VtK=xbM@^q&~`T~{ICG0dnTYKr26 z>gB7@`cHC|#KAWs$O^I);_pQxi(&P>^f>gJdLrfev{OVoVlHaN#%T7z$)0jm2Q)Ug2M1 zx&}p&20~xB0$H=Moz-oreA^WWABPdhzXD0)u)Pn%gonvIa}wUySHjx)WC7h#4~_FlAbA0PnfiT{LV+jLb8HBuB*&aRj|7{ zEh2|d=j0+Xk93A%i^(C{dMMrD$TCR(!! z^0II=o;Z&WK-eL~N#3VTtDw<|2@Z7=vPn37@jMh}lREnTc*t6Vy8n#9ll7kLA=dy` z)LQZ^p_Ys44E%l_DIjDJY}kNFX@G0*1~Qn?iqlY+LlWqX3lP2$1JVTXo5*0;x{)-| zb*Ei9xfmO2S?%(EhImtQFLc_3g+pN&CqrDQJC@M;8dq^1u6s1=yelamXBbbtInR+C zLj0i9Rr2?=4{Ke8FOdLz38L0j`7&wkOdf=Td(g!Q_+}3d z?~&lO7l+3mCtSz&k~%W5>I5I>m(&%Jk8`=^7x+_B$j6#by{XlF^-Xe$=Xad=rLM={ zBDsX-u68w+VnlhyZ9tt0u9Kdnao>Iy^GQ^e3sveU zzS01V2gx}arc!_Ss*?O02dAp)dc2BM5L%Mu3OK^sR-NSv{{WXU+L8r%KjGf$zGK>m z$vcKw=FDVQ@JFPP(lDnh;A2vX4_SZaI`s)jA++VFEBqwpAL#!P?Ftp2l0WEEN8vyX zj-53}p}B@MW3qNNo+fF8MmZtrGt}iN{xfonXYuW4aMbg(ewHtFUypZ1pW~XzZq49Y z)cji4T>pqGEqgVE%N7#?T&iam~C)if+fggx2rL0lOj{+RVf6 zXL%PiU>97&E~s(B)oYj-wOyZ|<3rwq0d1KIb8K-V`qu)zJ|?|@{y_TD7Jmr*0o!u% z1IYVA@pV`YTpl^9VHmiqotr$`$(? zE{cDbI0oIsMHnNQbrS=Jk!-$+*$aKxA57CI7w{$3>^R96$H}CdYq5#q!|nILgI;t1 zPTR#u=)jYF83=|C(P#S2VaW2P3AnYq=uHRH-%q$Yx1*T%HXVkP$4MZ}^r2z&;}dY| zah$iuPH3y_6(3qfX1QLyjSeUD+!3g2PfyX4M_r{IXdMp7Dpy`7iqF5dege&%X)ta1 z1R58T0Q1kEpj-JST(c8zr+?7bt02Kb>*%H`2=_%75?wF*(utHtWkX{ZstvIRNN?AZ zU1>WLP0nV)J0eXL11W#-&2^~sWq3ijIRG&-V2 zd*9>1=H($8r)9{}7%=pRUNp)T z1zI`XF$DsGsFjYK0--@PzNFlWbhx&K z;;7?J91Fvtz$-7lmnPVArMFfr1NDTv(iJe0hMMUsA3|&d28!>UB50*CpQ_)1*fBH| zm!3^y=(P6szkeso)%q~ThraYM1Vm#%<5ogwG!3PDD*=hv~V7^+U1w!Ug)niIU*fS%|p-1R)BXhwzzDCD>#`RGtrVsd`7rN zE}|tJXhpW`>I!+olqSPLk8U>~D=x=!tENG7IgPfDmCEshKVGhE zjVs}8dQROJgdV_5!#4=qU3^;L2kSdL-{CS7{w^)n8;y56?_%CFT3)l(75yHcBUh(E z!uvFqmgynziPJ)mzg7#u&w2=MVoML=C~8^@0hQ>nvJR3eX(R0!hh1iMEjmPFDD{3+ zi+}PucXPSBx$EHSVGK*iTCg6W=dcho9>K!4bx`*KHaBW5cz=lU#I+FmA>#D4knkZ^ zV%Q6j?8U5uQy=0;s9pz^M=|`h>mcVSc1Of|4acm9&d1PBlP*`*Lk`C_T^_U^S~)J# znsSBnjrHyymcz|<5z9U{NLDdw{Q>NpPj3Ie{w3QyvI{1WxN zrb8j8Y184*&t^&4e6I zBWA(@PLEHAtDMfC0oEUp?wbjboK{bQv>&lgo|p-R9H&o$Qyh<<0nHpYPlv#tkba4F zIqen?IX_|Zch7`kPOUTH7^jb-ADq5|q5m0a-x(0d>3Qq|PFKx{HxM^WhgeQ`&44sc&&I%ZPJfyK z2ROZRCY)|^* zO{yMlznZX{f58+J>LKSThJTC9{Xd8)sD-A`ccwvB3-$}@A85h3H6)%tOcY8{uI57I zNB@d`4a9mm4atGLU->XXe4825$FI~u_v^|v8zA&I>~xR9dB0(Hz`_Z?V{j}vuC(8& zH{LxoLe?LsSTe~~`3GODQ_<{CxK(?im0C=WUS^ocH_owsF(z$x6t7D1pGP+-FGEy3p9n&L-E@ZklZ$F+jbL9ql9h8kmSX0Yew>G zJOmP?^$8HmX<`E8aQbQj9N=_uJX}@f@nEHhYbQY@rvnopjnlwcP)tn$^k@PcLu%ja z;hL8c+veJ)1Zd@Y6XGGjg!FhkgmbzBgTiV11jyp_rvxbE)H(|)IX#_#-_#+UhUGbZ zJ^=#ENE5I;r{}Odr*FhV7N`DLp3{@K12dbfG$H}cal9}AnmK(N+u?%EaN1`UZ1*z7(tpf?MlTFnQ4R!pn-b{kS&-w6xF$!-qV+krb2f#0bA{+mrWDH1 z;Z7#2UrUa=Z+LpQZFF7jWC}M^+h#mO#fUY{bH)0ZjuHAmA_R0bVQ(cu0;e+*p^($` zM5ybERh1=zwHsnA7~9PhNFSb!ho;!1#Er0><9V~8lH>G^aIPB~Ua!mZH@X6>reYsz zo&%L5QNfW5bpc3E&c+ilq&~UOxwi=qK4-gK1Uw0LrXm7TF zf0)VE=4^7VD>}@SVm6$$Tb^<|#ScE0v`43KDjUwXGtB1un#yeA>E9c^X<#<8Qkx_bFHKqbA(y1U&_L)6v(6Ij&>VO=-Tg<|znY zWD0;YKcdY=i_m7pQ;@dE6lru4tnY=La%GXpVLu@`++QCMQBOm_V(jiW=J6japover zVi)7UCJY(>~YEPt_EVSflS6#XZ(_Yon za5V$#Z+hAlypo%#eHv0$nW7EzKF`3pRcQ6zWN2N5QE@y2*42ooCqwvZ#9_}s>}nhV ztCHdBYSVVQIvKLEOs6=GcA5(5@nk#@GnLX$lJOu6?Vn4AI!=?Hfq*qwZ#`~+)|$fM zl{F>@P1l)4>s`n^MhE&#c6S<|zn=K^xoSRB-Xg2nF>zZCD-)rablE4Hu%E8$R%qshH4)JXgXqrbv9nX+9J`iwzt(9~z&< zc;-F>;hS*Q=J$faO*kfU1K`{y%x#wW5S)kdsQIp}JW~Lk^2~?A&893`fx*bfUaLiV zgxm*FTTK4?LC8=j*kT%KkJ{*t4nAepw$Qdb1C3j-YNVmhVaAw+7N0Yv(WqwuPoJzX zh?$~jSOBCk^uuugN|`Cp9zWl0%y1@4K;aEPP1-jAtgB>yK@wd^0^e&6W3p2@jgi;==r*>6m?sM}5X4@BHq0D?0lU z4w%jJA^K$;P}}Fj)t60`wB%VRF2t&kkNZ0-jND;Lq|c-B4pSX{X+C5Xp+7I;nSWF2 z;a5zL(4Crn*N|6Dj8bPZ9D5CCVm6L_P7!;Tpo{C1A*Tf6o{M9@1P4dVGvNI?&Y3mI z5d1o>POFn$g|C~uDcy*+_nGc@efx%PE~`{CSI8-vt1Q*bg};drz~ZfMVmIOxODn^H zp_R@-|6N!2nNqBD;{sP~H6MHUq42mV+$6=t?g}|+I)jImYLo2X_|!B~Z&DupXaO{S zicTI^?>zB4p&C=Infh!3@3S~+mCdfCvphXCJ?kp|+*F9~C~S5`eqk#0qK-}4_zb($ zHa-IZgBeF7{z@{l9wmu?B^h}cdo+6!6kg`jLOt;t>I(YG^Z=!gEp(+^F?n~Ok1c|< zA223mo1pmz^b2QEY!l8NR4;Bq9FYg!*EN39bsP|u09PSW+|A{=&i#nPiZ*S6oEw@3 z9w`Pw$7W22czYJWH()*&AevJwmeq`dkLKoq^;e8aQ67ZrH0f7NYmOA~{>>C_H!pV2 z8orku#FI3#W-ivwiY70GG_Lp65>YQm*BhGZI`x~Wn9^UDX{!&en0Yr%(I#9Mx7C@| z<CKQTXL94=A2T;6f4S$%4f z^Z5H-hAzspWI6x+nLD^5UMMe>e0!$I(`$Is-3%YK4fwICm%8^qy9$!E;Qa-m!W>#Kcv32V0Sk9>%cEs^N4y8My5(82+#ls0vOobM?P-z+b(Z>7iV-EQj~FsJ;&>8#75h=S}9^rKi@%& zC&u{pgGv3XSTly0g;46u7^~U>nkH;2u1BMj4BnI-*F`{!NJttzVvo+l{h;byg zU272=&U5Df(!YrM`hx?i4N2dre1i(cJmMO^Bi0@PRrUX81)cC{>;K;&Cq8+L2Jk+$ z?UD`tj}|C4yy5V4;2#o}TC8|Rl)&+N^X-uOfZ6Pf#&Sl&=FTiYYz!ri|C=2yUx)fe zLgQE-ny*Rrst|YOirTeMxrL0r21I(#BWA^z;hZhc+F2;S!$V(|KZkN-2rflEV{AW% zPhMnVHuIFHqTHBAD36xqJ5g=~2jy8PckV&^nB?28a*z5r^CV(zz138{K?Mi*$m}bDEG?VM-?9+%gtT-7VOt1BE|1XjM#jGMffofztZ|By zmbq(3zLu&A+}4&gGKX95lv?Jgw2vzZ9A4};G^rVa=qz*h+w_{gmM){pnrRl5ZRy z9GI;=eUWbjXejo$M?G+BnYmm0BCL>KSl|>E@Ji+wmaZ{(8JgNAa@!b!HjH|PJSwg@ z4?GINPP27HH1a*u)&qzQANY#VN3Bp%9!I|C(ud`0rE{`B}C z@)jv?%H{iFdA_I{!&Q+L?nVXA`5W{-Rcz|e?__M0-K_iMJ#^d#+`!=#ho$(Hu2W-A zZSYfdep4evuQ6NOXE$p4EscQs9p%P=;rn-EpXEOr>bTs0wz+NJn1*s=JB0Gi z>m>O~Q64VKe?z&E0I+ z=X+=m{}N<*XVHEhm&eRAE5W+W)kux)WXwFMl;!_Ix#1tm8)bQ^C=b{m+2`#ywwqW! zT9!wk+~_})XUXzCqWw}?e$k_SSiVk{{{zR5NBy01B>xtn+@t<*S+HKW;)smD9S@%yV{Ev1A>nJo$RJ#+YK#70=SSy{h9DxV|i ztL5EI>b4o>7|FH^{n1m=kQcNTMC{OL%YKdS!V<=wb3T?cyxW@$DY>{c+KWLmp1qg6$gJ?? z&bW@^mO|?UjT0p~l!p4AZ&=y+W^_@27onmtB(WdKb%ilSJ@+&fME+d5)LD zFGDn<#7WqPX#ztYCE;WVSAeMBAmOM2LGDy+&kGDCPQpbJu9vX#f>=K4B@w4fxIn-- z0a9NU3p7Z$W``&*D-viMtKSgyefaW+mr*{*jkSDOvsYBGl>*&n4t9p@ z6ZxqU&UK0MY6)B35#_lO&VN^ww}{xOB=Ut3uQty=5V7TB5x0CM;-=3<9HviG!(N$$ zn*@xVQ-5AGPSWm0>R0007@DU|60W}8D4@5*BH|zkC+fcoU<|?n2|Mh1 zdA)um(O_uym23%@^b{*>kg$)xD6f!kST9lT2+*+8@UTQ@7`VQ-SfEM57X3#|4E^l8 zB>lTZoUQ%VL0gw4hp2LA8zWm|wBZ5?*Ze~?7<7+_!zA1w;pAbWen}|D7=NP$N|>k+ zA>l*`$K5aL=SsLq!pZ}pelo|r{|yJi9uyVQC0r$8B~sLPNVp(Mlvhi*0kJy%3oE6M0wDY zB94-_v?PNhj?C}A0bf>I{o?3JQCU&29|qP$4LxvM{ z<*9%_m?L4E)UzdPMg3X{`>Yq`F`k%@(sWM-;(`rg0Y{F6H;cGvyNK&w5OIryEiby| z>MUt^$<5$>S5q$H1_>)~i}ITNBDNe5afpO{D%#}QERAwAc>PMUgdOj+6;S1g63&)z z(|e+R$onFWX~SCmiqAn&Axgr@5_VLI`iT(qT{wzI_#LJkS*aN3CDdT>K913Lc$Rri~6}5cCXSUl0wucVu7$z zBF>g@)n}r-Ny0woM0xh-9Ao^A3|p--+_*u)WwoM#pf5z6{H2KN&x<(df{3Fy=KXIt zV7VwNRS;V;#E|GARx2W&KpWO5qV=)qTrb>*0o??M|3C9f(uQiu$!8#_@NTs1PFIL<#3gxJ<&e5^j;OZIEYuSbvznz!Mz_r%E_i!X*+83ihl3 z_Y;Ff+~kS*{7VfH6|#qjIBKYfbHhYjBjK2Tit>UHBCa3l#_Ig98trEA!QzMzahQY) z9uVbm4~kfc5wT6eVUyeBYW+%#o5AZ>;vQ*JP;rTblc$ODYzf=qM0o^0C(AEm+U2)l zt$wApjiKr(L9@gH)$>JMuu#Mm60VVOgM=N6^zwTBN|eFS>Q^kOVg+#$R#u7f>h&VF zZ4hy`glnGDCbo=_Kjb_HgF6+@S%=BgORL10b<~E{izKD*GRZQ!pdRM zffxxVOE~NZjTTy*eMB^n`hkdRkBYcS!V2`+gZH#PABk!$5_VLJ@)!xHOSnqHwGwU+ zu~UgTE>=(^;aUj?eJtw7NVrJC)e^RS;#vNFMTrm?_~Wh;u9tA!NlycKZVR*bntP9l z(jQ_m20*oheJ+T_A|zY_zNOe7j*FsNqJ&!{99Adl=SsLj!VMCRxFnQ!D%m1KDUxuD zgv06u14^8Pb0u6MVasLD@;Jw01cstiNx0xE&jR?(frO*J7UlU8u92|M6%T#>_@U4D z9t^}KFsaPk+gG_Evcq5>vPltK=0pMCeb{}8&7xAigexT6AYscdV)+OOr%Jd;#7?D3 zWGM9#R$9ag91@O@aH52>C0r!nF4$cH1G@+I@xa)A73&R?aEydgC7dteG6~m6xJktP zxv4&HVL!#dkhjc(ow>h@?vzQmTEdn;MExKM7f859!nT`2c}#mELrIo!u7pb@th5RS zl`sj%NH|}>kKm{l74+c??EBz8TJ{n&dEFa(LSNdHG-4Wl&@R=h={ArelMaK41ABwR0IOj7(`hvlh4 z{fl1~N;piyaS~3KaDjv?BwQP;@76H)~`fZ z+ZeolB~`*<{Ka>?j4_ff;R*@2NI0a2SU$B4YxOGyZ49*@rA)%L5^j>P!!A}3CgJ+N zqP)exdi{!}pQsQd;Rp%G$v8|bUo}F+j(bHMrQ^06v1CahTfzkru90wqge@b*3WFpZ zqha@rShA#08zCBKlCaMhQSOj%nS^U4+@#`r-Gj<;pJ*^~s)$o#B@Aoc!8}?8``$5+ zc1AraN)siVE8!vuS4+5F!l~0m`?(@^Dn&Czg$fDRNVq}5O1xO!CgBhXM+q3maY=$` zpkBgpsiHhx!ub-em2i`U9ZNmz@p%;H$v_+@;ZzBiE%z{h`n3|Scv6%%Wr(qwj-Yeelh38zX}Su5%nNH}?&DEHBykTtSqg@|F+ z`(|Hf*aopswuI{?9F!yKH|2_0c}m1K35SSyjuIs@ltdYC5)GzGI9I|&60U$T2hF`_ zr*9U`ROO4Y7hklP^Q{GUW@`B}uN60VZ4;|ACF*iL1u z4EOe`Lc&eWVg*Ie=L1ZjVZVv&WC<5YIOL|NA1C2#2|HRv{X`Kvm2{Dz4*kPNAjqzw`_es$i6mJPO?Hur|A(`L(%sBE#OI0+|9I30Zd zjqaC7>N$hF~*GRYl)||lsr|3U7+!i7U8$U;E$fMx!8S`l0 zTuH46$XRrvR$@0vSXnQ&%LbFM*q9iJoh;#O2^UDX4ECMH;8scOY6;iEX;kxVk=Q

3N36iB#4!W9y(k#Mbq8zkH!VB92Fo)ueUlW>rP!z3If z;W!B=OE}#V^G!;DCj)VbgsUW6E#Vpo*Gjla!akea4OTZv4i5(3rG!X0Lc%c;PLyz} zgtH}_@5XBVN|Bqv>sQJoTqWTe3D--wNy18=yT#nTr44KKD~>h>7h?n@93kNt2`5T8 zRl>OvE;6uQzfxf^wEC4A2{%Ys*(|omCgBhXH{GV)E44@a{DIS$8rFaLJfI~he5Tx{>73ZFVkNn=GY7WuVGq+22R%A+jXix#=;eB`g^yALYV#* zse$wLw?++|&VRH4F9RpODdHOaWl=*OR3XZf)mI-e{)R%!B~hVCe{;}~=j-p$8Mwjj zju3A^s=tVX^taFq{pv7Lo*Rjn_rIZ#tiR%9;GpSZ0ZY7y!xoCzp}%NjlrPg?tTAw^ z{;0Bnqt@w9ZySsn{WTH;7aR~PsL@}lFysyTOA`jJ&|iu$aLqxne1rZ@gCTD^q(8lF zFpBhd5)7QJzk^`l0{xu>1N-Q&2pHH=EmmmJ-vBV=4f+%R22MUBg(OjbLf=rR(x13D zaD@IixPepk$H5J3@p8`+J|-G$B98JGag|elD$_7f?=h{o0iUJr0{piD3Z8W1c8KHB z+?a1pa@V;r{+@)A{gfN?I_mGx@&^CR{~v18ADH8S|JXo9U$;U>#J2u!%zr^5djK4% z!{6|Xco0@zG6(x5Mrv|{jtzl#k?(8V7aOzye!XNK+&)6jOA(u4XuUbKz41E-O`BmE tV&AwenmL1BfThovyTFNhvwz!9Cm27WkO;rln}^?4@U>PXs!g@y{{xwoi4On( From ee396dac533d1ff91dcb12c0a8a4d66da266b0b7 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 15 May 2025 18:19:49 +0545 Subject: [PATCH 45/58] chore: cleanup stray log --- magicblock-api/src/magic_validator.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/magicblock-api/src/magic_validator.rs b/magicblock-api/src/magic_validator.rs index 2c0fe611..32224fb3 100644 --- a/magicblock-api/src/magic_validator.rs +++ b/magicblock-api/src/magic_validator.rs @@ -807,7 +807,6 @@ impl MagicValidator { self.committor_service.reserve_common_pubkeys(), ) .await?; - info!("RESERVED"); if !self.config.ledger.reset { remote_account_cloner_worker.hydrate().await?; From 8635738b48f6f75275eb57dace26fa8597b83223 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 15 May 2025 18:21:04 +0545 Subject: [PATCH 46/58] ix: give more compile time before expecting validator to listen --- test-integration/test-tools/src/validator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-integration/test-tools/src/validator.rs b/test-integration/test-tools/src/validator.rs index c5c64669..e206f7e8 100644 --- a/test-integration/test-tools/src/validator.rs +++ b/test-integration/test-tools/src/validator.rs @@ -145,7 +145,7 @@ pub fn wait_for_validator(mut validator: Child, port: u16) -> Option { let max_retries = if std::env::var("CI").is_ok() { 1500 } else { - 75 + 800 }; for _ in 0..max_retries { From 7f3e0eb8636b039cad9f74098322af722423fb2a Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 15 May 2025 18:52:47 +0545 Subject: [PATCH 47/58] chore: greptiles --- .../src/remote_account_cloner_worker.rs | 2 +- .../src/state/changeset_chunks.rs | 2 +- magicblock-committor-service/src/commit/common.rs | 6 +++--- magicblock-committor-service/src/commit_stage.rs | 2 +- magicblock-committor-service/src/error.rs | 2 +- magicblock-committor-service/src/persist/db.rs | 2 +- magicblock-committor-service/src/persist/error.rs | 4 ++-- .../src/persist/types/commit_status.rs | 12 +++++++----- .../src/pubkeys_provider.rs | 2 +- magicblock-committor-service/src/service.rs | 15 ++++++++++++--- magicblock-committor-service/src/transactions.rs | 6 +++--- magicblock-table-mania/src/lookup_table.rs | 14 +++++++------- magicblock-table-mania/src/lookup_table_rc.rs | 12 ++++++------ programs/magicblock/src/magicblock_instruction.rs | 2 +- 14 files changed, 47 insertions(+), 36 deletions(-) diff --git a/magicblock-account-cloner/src/remote_account_cloner_worker.rs b/magicblock-account-cloner/src/remote_account_cloner_worker.rs index 72b24223..304231bf 100644 --- a/magicblock-account-cloner/src/remote_account_cloner_worker.rs +++ b/magicblock-account-cloner/src/remote_account_cloner_worker.rs @@ -294,7 +294,7 @@ where let stream = stream::iter(account_keys); // NOTE: depending on the RPC provider we may get rate limited if we request // account states at a too high rate. - // I confirmed the the following concurrency working fine: + // I confirmed the following concurrency working fine: // Solana Mainnet: 10 // Helius: 20 // If we go higher than this we hit 429s which causes the fetcher to have to diff --git a/magicblock-committor-program/src/state/changeset_chunks.rs b/magicblock-committor-program/src/state/changeset_chunks.rs index d1e3333b..b03c6427 100644 --- a/magicblock-committor-program/src/state/changeset_chunks.rs +++ b/magicblock-committor-program/src/state/changeset_chunks.rs @@ -35,7 +35,7 @@ impl From<(&[u8], u32, u16)> for ChangesetChunk { } impl ChangesetChunk { - /// The index that the chunk will has in the [Chunks] tracker. + /// The index that the chunk will have in the [Chunks] tracker. pub fn chunk_idx(&self) -> u32 { self.offset / self.chunk_size as u32 } diff --git a/magicblock-committor-service/src/commit/common.rs b/magicblock-committor-service/src/commit/common.rs index ebfa0a1a..f666ccba 100644 --- a/magicblock-committor-service/src/commit/common.rs +++ b/magicblock-committor-service/src/commit/common.rs @@ -130,9 +130,9 @@ pub(crate) async fn send_and_confirm( start.elapsed().as_millis(), tables ); - let all_accounts = ixs.iter().flat_map(|ix| { - ix.accounts.iter().map(|x| x.pubkey).clone() - }); + let all_accounts = ixs + .iter() + .flat_map(|ix| ix.accounts.iter().map(|x| x.pubkey)); let keys_not_from_table = all_accounts .filter(|x| !keys_from_tables.contains(x)) .collect::>(); diff --git a/magicblock-committor-service/src/commit_stage.rs b/magicblock-committor-service/src/commit_stage.rs index 6e555ece..1d742037 100644 --- a/magicblock-committor-service/src/commit_stage.rs +++ b/magicblock-committor-service/src/commit_stage.rs @@ -88,7 +88,7 @@ pub enum CommitStage { /// initialized, but then this issue was detected. PartOfTooLargeBundleToProcess(CommitInfo), - /// The commmit was properly initialized and added to a chunk of instructions to process + /// The commit was properly initialized and added to a chunk of instructions to process /// commits via a transaction. For large commits the buffer and chunk accounts were properly /// prepared and haven't been closed. /// However that transaction failed. diff --git a/magicblock-committor-service/src/error.rs b/magicblock-committor-service/src/error.rs index 54344a97..d56b6e84 100644 --- a/magicblock-committor-service/src/error.rs +++ b/magicblock-committor-service/src/error.rs @@ -58,7 +58,7 @@ pub enum CommittorServiceError { solana_sdk::message::CompileError, ), - #[error("Task {0} failed to creqate transaction: {1} ({1:?})")] + #[error("Task {0} failed to create transaction: {1} ({1:?})")] FailedToCreateTransaction(String, solana_sdk::signer::SignerError), #[error("Could not find commit strategy for bundle {0}")] diff --git a/magicblock-committor-service/src/persist/db.rs b/magicblock-committor-service/src/persist/db.rs index 8f00375f..47a36ca0 100644 --- a/magicblock-committor-service/src/persist/db.rs +++ b/magicblock-committor-service/src/persist/db.rs @@ -40,7 +40,7 @@ pub struct CommitStatusRow { /// The current status of the commit /// Includes the bundle_id which will be the same for accounts whose commits /// need to be applied atomically in a single transaction - /// For single accounts a bundle_id will be gnerated as well for consistency + /// For single accounts a bundle_id will be generated as well for consistency /// For Pending commits the bundle_id is not set pub commit_status: CommitStatus, /// Time since epoch at which the commit was last retried diff --git a/magicblock-committor-service/src/persist/error.rs b/magicblock-committor-service/src/persist/error.rs index 4980225f..1d5e7590 100644 --- a/magicblock-committor-service/src/persist/error.rs +++ b/magicblock-committor-service/src/persist/error.rs @@ -14,9 +14,9 @@ pub enum CommitPersistError { ParseSignatureError(#[from] solana_sdk::signature::ParseSignatureError), #[error("ParseHashError: '{0}' ({0:?})")] - ParseHahsError(#[from] solana_sdk::hash::ParseHashError), + ParseHashError(#[from] solana_sdk::hash::ParseHashError), - #[error("Invalid Commity Type: '{0}' ({0:?})")] + #[error("Invalid Commit Type: '{0}' ({0:?})")] InvalidCommitType(String), #[error("Invalid Commit Status: '{0}' ({0:?})")] diff --git a/magicblock-committor-service/src/persist/types/commit_status.rs b/magicblock-committor-service/src/persist/types/commit_status.rs index 38bcad99..e6964ecc 100644 --- a/magicblock-committor-service/src/persist/types/commit_status.rs +++ b/magicblock-committor-service/src/persist/types/commit_status.rs @@ -29,7 +29,7 @@ pub enum CommitStatus { /// The commit is part of a bundle that contains too many commits to be included /// in a single transaction. Thus we cannot commit any of them. PartOfTooLargeBundleToProcess(u64), - /// The commmit was properly initialized and added to a chunk of instructions to process + /// The commit was properly initialized and added to a chunk of instructions to process /// commits via a transaction. For large commits the buffer and chunk accounts were properly /// prepared and haven't been closed. FailedProcess((u64, CommitStrategy, Option)), @@ -134,9 +134,11 @@ impl if let Some(sigs) = sigs { sigs } else { - return Err(CommitPersistError::CommitStatusNeedsBundleId( - status.to_string(), - )); + return Err( + CommitPersistError::CommitStatusNeedsSignatures( + status.to_string(), + ), + ); } }; } @@ -181,7 +183,7 @@ pub struct CommitStatusSignatures { /// The signature of the transaction processing the commit pub process_signature: Signature, /// The signature of the transaction finalizing the commit. - /// If the account was not finalized or it failed the this is `None`. + /// If the account was not finalized or it failed then this is `None`. /// If the finalize instruction was part of the process transaction then /// this signature is the same as [Self::process_signature]. pub finalize_signature: Option, diff --git a/magicblock-committor-service/src/pubkeys_provider.rs b/magicblock-committor-service/src/pubkeys_provider.rs index d7ad1472..b4c93a66 100644 --- a/magicblock-committor-service/src/pubkeys_provider.rs +++ b/magicblock-committor-service/src/pubkeys_provider.rs @@ -54,7 +54,7 @@ pub fn provide_common_pubkeys(validator: &Pubkey) -> HashSet { "Common pubkeys: validator: {} delegation program: {} - protoco fees vault: {} + protocol fees vault: {} validator fees vault: {} committor program: {}", validator, diff --git a/magicblock-committor-service/src/service.rs b/magicblock-committor-service/src/service.rs index 4e022eea..0cc1f3bd 100644 --- a/magicblock-committor-service/src/service.rs +++ b/magicblock-committor-service/src/service.rs @@ -32,7 +32,7 @@ pub enum CommittorMessage { ReservePubkeysForCommittee { /// Called once the pubkeys have been reserved respond_to: oneshot::Sender>, - /// The comittee whose pubkeys to reserve in a lookup table + /// The committee whose pubkeys to reserve in a lookup table /// These pubkeys are used to process/finalize the commit committee: Pubkey, /// The owner program of the committee @@ -273,8 +273,17 @@ impl CommittorService { } fn try_send(&self, msg: CommittorMessage) { - if let Err(TrySendError::Full(msg)) = self.sender.try_send(msg) { - error!("Failed to send commit message {:?}", msg); + if let Err(e) = self.sender.try_send(msg) { + match e { + TrySendError::Full(msg) => error!( + "Channel full, failed to send commit message {:?}", + msg + ), + TrySendError::Closed(msg) => error!( + "Channel closed, failed to send commit message {:?}", + msg + ), + } } } } diff --git a/magicblock-committor-service/src/transactions.rs b/magicblock-committor-service/src/transactions.rs index 63dfcddc..33ba3d66 100644 --- a/magicblock-committor-service/src/transactions.rs +++ b/magicblock-committor-service/src/transactions.rs @@ -75,11 +75,11 @@ pub(crate) const MAX_UNDELEGATE_PER_TX: u8 = 3; pub(crate) const MAX_UNDELEGATE_PER_TX_USING_LOOKUP: u8 = 16; // Allows us to run undelegate instructions without rechunking them since we know -// that we didn't process more than we also can undelegatge +// that we didn't process more than we also can undelegate const_assert!(MAX_PROCESS_PER_TX <= MAX_UNDELEGATE_PER_TX,); // Allows us to run undelegate instructions using lookup tables without rechunking -// them since we know that we didn't process more than we also can undelegatge +// them since we know that we didn't process more than we also can undelegate const_assert!( MAX_PROCESS_PER_TX_USING_LOOKUP <= MAX_UNDELEGATE_PER_TX_USING_LOOKUP ); @@ -112,7 +112,7 @@ pub(crate) struct CommitTxReport { /// [MAX_ENCODED_TRANSACTION_SIZE]. pub size_args_with_lookup: Option, - /// The size of the transaction including the finalize instructionk + /// The size of the transaction including the finalize instruction /// when using lookup tables /// This is only determined if the [SizeOfCommitWithArgs::size_including_finalize] /// is larger than [MAX_ENCODED_TRANSACTION_SIZE]. diff --git a/magicblock-table-mania/src/lookup_table.rs b/magicblock-table-mania/src/lookup_table.rs index ad7cc7b6..dd0f6b0f 100644 --- a/magicblock-table-mania/src/lookup_table.rs +++ b/magicblock-table-mania/src/lookup_table.rs @@ -158,7 +158,7 @@ impl LookupTable { } /// Returns `true` if the we requested to deactivate this table. - /// NOTE: this doesn't mean that the deactivation perios passed, thus + /// NOTE: this doesn't mean that the deactivation period passed, thus /// the table could still be considered _deactivating_ on chain. pub fn deactivate_triggered(&self) -> bool { use LookupTable::*; @@ -187,7 +187,7 @@ impl LookupTable { /// - **latest_slot**: the on chain slot at which we are creating the table /// - **sub_slot**: a bump to allow creating multiple lookup tables with the same authority /// at the same slot - /// - **pubkeys**: to extend the lookup table respecting respecting + /// - **pubkeys**: to extend the lookup table respecting /// solana_sdk::address_lookup_table::state::LOOKUP_TABLE_MAX_ADDRESSES] /// after it is initialized /// - **reqid**: id of the request adding the pubkeys @@ -270,7 +270,7 @@ impl LookupTable { /// The transaction is signed with the [Self::derived_auth]. /// /// - **rpc_client**: RPC client to use for sending the extend transaction - /// - **authority**: payer for the the extend transaction + /// - **authority**: payer for the extend transaction /// - **pubkeys**: to extend the lookup table with /// - **reqid**: id of the request adding the pubkeys pub async fn extend( @@ -335,7 +335,7 @@ impl LookupTable { /// The transaction is signed with the [Self::derived_auth]. /// /// - **rpc_client**: RPC client to use for sending the extend transaction - /// - **authority**: payer for the the extend transaction + /// - **authority**: payer for the extend transaction /// - **pubkeys**: to extend the lookup table with /// - **reqid**: id of the request adding the pubkeys /// @@ -371,7 +371,7 @@ impl LookupTable { /// Deactivates this lookup table. /// /// - **rpc_client**: RPC client to use for sending the deactivate transaction - /// - **authority**: pays for the the deactivate transaction + /// - **authority**: pays for the deactivate transaction pub async fn deactivate( &mut self, rpc_client: &MagicblockRpcClient, @@ -437,7 +437,7 @@ impl LookupTable { slot } }; - // NOTE: the solana exporer will show an account as _deactivated_ once we deactivate it + // NOTE: the solana explorer will show an account as _deactivated_ once we deactivate it // even though it is actually _deactivating_ // I tried to shorten the wait here but found that this is the minimum time needed // for the table to be considered fully _deactivated_ @@ -461,7 +461,7 @@ impl LookupTable { /// Checks if the table was deactivated and if so closes the table account. /// /// - **rpc_client**: RPC client to use for sending the close transaction - /// - **authority**: pays for the the close transaction and is refunded the + /// - **authority**: pays for the close transaction and is refunded the /// table account rent /// - **current_slot**: the current slot to use for checking deactivation pub async fn close( diff --git a/magicblock-table-mania/src/lookup_table_rc.rs b/magicblock-table-mania/src/lookup_table_rc.rs index 386a28ed..1450dbb1 100644 --- a/magicblock-table-mania/src/lookup_table_rc.rs +++ b/magicblock-table-mania/src/lookup_table_rc.rs @@ -280,7 +280,7 @@ impl LookupTableRc { } /// Returns `true` if the we requested to deactivate this table. - /// NOTE: this doesn't mean that the deactivation perios passed, thus + /// NOTE: this doesn't mean that the deactivation period passed, thus /// the table could still be considered _deactivating_ on chain. pub fn deactivate_triggered(&self) -> bool { use LookupTableRc::*; @@ -424,7 +424,7 @@ impl LookupTableRc { /// They are automatically reserved for one requestor. /// /// - **rpc_client**: RPC client to use for sending the extend transaction - /// - **authority**: payer for the the extend transaction + /// - **authority**: payer for the extend transaction /// - **pubkeys**: to extend the lookup table with pub async fn extend( &self, @@ -490,7 +490,7 @@ impl LookupTableRc { /// The transaction is signed with the [Self::derived_auth]. /// /// - **rpc_client**: RPC client to use for sending the extend transaction - /// - **authority**: payer for the the extend transaction + /// - **authority**: payer for the extend transaction /// - **pubkeys**: to extend the lookup table with /// /// Returns: the pubkeys that were added to the table @@ -524,7 +524,7 @@ impl LookupTableRc { /// Deactivates this lookup table. /// /// - **rpc_client**: RPC client to use for sending the deactivate transaction - /// - **authority**: pays for the the deactivate transaction + /// - **authority**: pays for the deactivate transaction pub async fn deactivate( &mut self, rpc_client: &MagicblockRpcClient, @@ -593,7 +593,7 @@ impl LookupTableRc { slot } }; - // NOTE: the solana exporer will show an account as _deactivated_ once we deactivate it + // NOTE: the solana explorer will show an account as _deactivated_ once we deactivate it // even though it is actually _deactivating_ // I tried to shorten the wait here but found that this is the minimum time needed // for the table to be considered fully _deactivated_ @@ -617,7 +617,7 @@ impl LookupTableRc { /// Checks if the table was deactivated and if so closes the table account. /// /// - **rpc_client**: RPC client to use for sending the close transaction - /// - **authority**: pays for the the close transaction and is refunded the + /// - **authority**: pays for the close transaction and is refunded the /// table account rent /// - **current_slot**: the current slot to use for checking deactivation pub async fn close( diff --git a/programs/magicblock/src/magicblock_instruction.rs b/programs/magicblock/src/magicblock_instruction.rs index 036b572c..2ecd3906 100644 --- a/programs/magicblock/src/magicblock_instruction.rs +++ b/programs/magicblock/src/magicblock_instruction.rs @@ -151,7 +151,7 @@ pub(crate) enum MagicBlockInstruction { /// - **1.** `[WRITE]` Magic Context Account containing the initially scheduled commits AcceptScheduleCommits, - /// Records the the attempt to realize a scheduled commit on chain. + /// Records the attempt to realize a scheduled commit on chain. /// /// The signature of this transaction can be pre-calculated since we pass the /// ID of the scheduled commit and retrieve the signature from a globally From 28504c12cf3fd1f04f9f67ef634f3f513edc3e47 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 16 May 2025 09:28:55 +0545 Subject: [PATCH 48/58] chore: improved error handling in table mania manager --- magicblock-table-mania/src/manager.rs | 69 ++++++++++++++++----------- 1 file changed, 41 insertions(+), 28 deletions(-) diff --git a/magicblock-table-mania/src/manager.rs b/magicblock-table-mania/src/manager.rs index d2202a3e..7b896006 100644 --- a/magicblock-table-mania/src/manager.rs +++ b/magicblock-table-mania/src/manager.rs @@ -174,6 +174,9 @@ impl TableMania { let mut remaining = pubkeys.iter().cloned().collect::>(); let mut tables_used = HashSet::new(); + const MAX_ALLOWED_EXTEND_ERRORS: u8 = 5; + let mut extend_errors = 0; + // Keep trying to store pubkeys until we're done while !remaining.is_empty() { // First try to use existing tables @@ -186,14 +189,28 @@ impl TableMania { // Try to use the last table if it's not full if let Some(table) = active_tables_write_lock.last() { if !table.is_full() { - self.extend_table( - table, - authority, - &mut remaining, - &mut tables_used, - ) - .await; - stored_in_existing = true; + if let Err(err) = self + .extend_table( + table, + authority, + &mut remaining, + &mut tables_used, + ) + .await + { + error!( + "Error extending table {}: {:?}", + table.table_address(), + err + ); + if extend_errors >= MAX_ALLOWED_EXTEND_ERRORS { + extend_errors += 1; + } else { + return Err(err); + } + } else { + stored_in_existing = true; + } } } } @@ -243,7 +260,7 @@ impl TableMania { authority: &Keypair, remaining: &mut Vec, tables_used: &mut HashSet, - ) { + ) -> TableManiaResult<()> { let remaining_len = remaining.len(); let storing_len = remaining_len.min(MAX_ENTRIES_AS_PART_OF_EXTEND as usize); @@ -253,27 +270,21 @@ impl TableMania { remaining_len, table.table_address() ); - let table_addresses_count = table.pubkeys().unwrap().len(); + let Some(table_addresses_count) = table.pubkeys().map(|x| x.len()) + else { + return Err(TableManiaError::CannotExtendDeactivatedTable( + *table.table_address(), + )); + }; let storing = remaining[..storing_len].to_vec(); - match table + let stored = table .extend_respecting_capacity(&self.rpc_client, authority, &storing) - .await - { - Ok(stored) => { - trace!("Stored {}", stored.len()); - tables_used.insert(*table.table_address()); - remaining.retain(|pk| !stored.contains(pk)); - } - // TODO: this could cause us to loop forever as remaining - // is never updated, possibly we need to return an error - // here instead - Err(err) => error!( - "Error extending table {}: {:?}", - table.table_address(), - err - ), - } + .await?; + trace!("Stored {}", stored.len()); + tables_used.insert(*table.table_address()); + remaining.retain(|pk| !stored.contains(pk)); + let stored_count = remaining_len - remaining.len(); trace!("Stored {}, remaining: {}", stored_count, remaining.len()); @@ -281,6 +292,8 @@ impl TableMania { table_addresses_count + stored_count, table.pubkeys().unwrap().len() ); + + Ok(()) } async fn create_new_table_and_extend( @@ -660,7 +673,7 @@ impl TableMania { closed_tables.push(*deactivated_table.table_address()) } Ok(_) => { - // Table not ready to be closed" + // Table not ready to be closed } Err(err) => error!( "Error closing table {}: {:?}", From bc1324df8ec32c57127cffbe8621b686d0accc98 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 16 May 2025 09:42:48 +0545 Subject: [PATCH 49/58] fix: greptiles --- test-integration/Makefile | 2 +- .../schedulecommit/committor-service/tests/ix_commit_local.rs | 3 ++- .../schedulecommit/test-scenarios/tests/utils/mod.rs | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/test-integration/Makefile b/test-integration/Makefile index c4fa08a4..e1148af8 100644 --- a/test-integration/Makefile +++ b/test-integration/Makefile @@ -7,7 +7,7 @@ RUST_LOG ?= 'warn,geyser_plugin=warn,magicblock=trace,rpc=trace,bank=trace,banki FLEXI_COUNTER_DIR := $(DIR)programs/flexi-counter SCHEDULECOMMIT_DIR := $(DIR)programs/schedulecommit SCHEDULECOMMIT_SECURITY_DIR := $(DIR)programs/schedulecommit-security -COMMITTOR_PROGRAM_DIR := $(DIR)../magicblock-committor-program/ +COMMITTOR_PROGRAM_DIR := $(DIR)../magicblock-committor-program FLEXI_COUNTER_SRC := $(shell find $(FLEXI_COUNTER_DIR) -name '*.rs' -o -name '*.toml') SCHEDULECOMMIT_SRC := $(shell find $(SCHEDULECOMMIT_DIR) -name '*.rs' -o -name '*.toml') diff --git a/test-integration/schedulecommit/committor-service/tests/ix_commit_local.rs b/test-integration/schedulecommit/committor-service/tests/ix_commit_local.rs index 5ba085e1..7e0dfe38 100644 --- a/test-integration/schedulecommit/committor-service/tests/ix_commit_local.rs +++ b/test-integration/schedulecommit/committor-service/tests/ix_commit_local.rs @@ -872,7 +872,8 @@ async fn ix_commit_local( ); if Instant::now() - start > MAX_TIME_TO_CLOSE { panic!( - "Timed out waiting for tables close. Still open: {}", + "Timed out waiting for tables close after {} seconds. Still open: {}", + MAX_TIME_TO_CLOSE.as_secs(), closing_tables .iter() .map(|x| x.to_string()) diff --git a/test-integration/schedulecommit/test-scenarios/tests/utils/mod.rs b/test-integration/schedulecommit/test-scenarios/tests/utils/mod.rs index 13ac5171..81f61477 100644 --- a/test-integration/schedulecommit/test-scenarios/tests/utils/mod.rs +++ b/test-integration/schedulecommit/test-scenarios/tests/utils/mod.rs @@ -222,7 +222,7 @@ pub fn assert_account_was_undelegated_on_chain( let owner = ctx.fetch_chain_account_owner(pda).unwrap(); assert_ne!( owner, DELEGATION_PROGRAM_ID, - "{} not owned by delegation program", + "{} should not be owned by delegation program", pda ); assert_eq!(owner, new_owner, "{} has new owner", pda); From 644ad1c9f9186ddf8cba9e63437af3e0e29fd0b6 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 16 May 2025 09:46:22 +0545 Subject: [PATCH 50/58] chore: demote some no longer urgent TODOs --- magicblock-api/src/magic_validator.rs | 4 ++-- magicblock-geyser-plugin/src/grpc.rs | 2 +- magicblock-rpc/src/transaction.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/magicblock-api/src/magic_validator.rs b/magicblock-api/src/magic_validator.rs index 32224fb3..d368002e 100644 --- a/magicblock-api/src/magic_validator.rs +++ b/magicblock-api/src/magic_validator.rs @@ -174,7 +174,7 @@ impl MagicValidator { config: MagicValidatorConfig, identity_keypair: Keypair, ) -> ApiResult { - // TODO(thlorenz): @@ this will need to be recreated on each start + // TODO(thlorenz): this will need to be recreated on each start let token = CancellationToken::new(); let (geyser_manager, geyser_rpc_service) = @@ -650,7 +650,7 @@ impl MagicValidator { } async fn ensure_validator_funded_on_chain(&self) -> ApiResult<()> { - // TODO: @@@ configurable? + // TODO(thlorenz) make this configurable in the future const MIN_BALANCE_SOL: u64 = 5; // TODO: @@ duplicate code getting remote_rpc_config let accounts_config = try_convert_accounts_config( diff --git a/magicblock-geyser-plugin/src/grpc.rs b/magicblock-geyser-plugin/src/grpc.rs index aa060e6f..1a74dc3f 100644 --- a/magicblock-geyser-plugin/src/grpc.rs +++ b/magicblock-geyser-plugin/src/grpc.rs @@ -144,7 +144,7 @@ impl GrpcService { block_fail_action: ConfigBlockFailAction, ) { const PROCESSED_MESSAGES_MAX: usize = 31; - // TODO(thlorenz): @@@ This could become a bottleneck affecting latency + // TODO(thlorenz): This could become a bottleneck affecting latency const PROCESSED_MESSAGES_SLEEP: Duration = Duration::from_millis(10); let mut messages: BTreeMap = Default::default(); diff --git a/magicblock-rpc/src/transaction.rs b/magicblock-rpc/src/transaction.rs index 7113ace1..9249c449 100644 --- a/magicblock-rpc/src/transaction.rs +++ b/magicblock-rpc/src/transaction.rs @@ -213,7 +213,7 @@ pub(crate) async fn send_transaction( /// Verifies only the transaction signature and is used when sending a /// transaction to avoid the extra overhead of [sig_verify_transaction_and_check_precompiles] -/// TODO(thlorenz): @@ sigverify takes upwards of 90µs which is 30%+ of +/// TODO(thlorenz): sigverify takes upwards of 90µs which is 30%+ of /// the entire time it takes to execute a transaction. /// Therefore this an intermediate solution and we need to investigate verifying the /// wire_transaction instead (solana sigverify implementation is packet based) From 4f8ecc8f64903286e6a5a5d4dbecc0de7d800030 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 16 May 2025 09:53:13 +0545 Subject: [PATCH 51/58] feat: committor service persists into ledger path --- magicblock-api/src/magic_validator.rs | 16 +++++++++++----- magicblock-committor-service/src/service.rs | 9 +++------ 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/magicblock-api/src/magic_validator.rs b/magicblock-api/src/magic_validator.rs index d368002e..cf07b225 100644 --- a/magicblock-api/src/magic_validator.rs +++ b/magicblock-api/src/magic_validator.rs @@ -198,22 +198,23 @@ impl MagicValidator { config.validator_config.ledger.reset, )?; - let exit = Arc::::default(); // SAFETY: // this code will never panic as the ledger_path always appends the // rocksdb directory to whatever path is preconfigured for the ledger, // see `Ledger::do_open`, thus this path will always have a parent - let adb_path = ledger + let ledger_parent_path = ledger .ledger_path() .parent() .expect("ledger_path didn't have a parent, should never happen"); + + let exit = Arc::::default(); let bank = Self::init_bank( Some(geyser_manager.clone()), &genesis_config, &config.validator_config.accounts.db, config.validator_config.validator.millis_per_slot, validator_pubkey, - adb_path, + ledger_parent_path, ledger.get_max_blockhash().map(|(slot, _)| slot)?, )?; @@ -310,10 +311,15 @@ impl MagicValidator { &faucet_keypair.pubkey(), ); + let committor_persist_path = + ledger_parent_path.join("committor_service.sqlite"); + debug!( + "Committor service persists to: {}", + committor_persist_path.display() + ); let committor_service = Arc::new(CommittorService::try_start( identity_keypair.insecure_clone(), - // TODO: @@@ config or inside ledger dir - "/tmp/committor_service.sqlite", + committor_persist_path, ChainConfig { rpc_uri: remote_rpc_config.url().to_string(), commitment: remote_rpc_config diff --git a/magicblock-committor-service/src/service.rs b/magicblock-committor-service/src/service.rs index 0cc1f3bd..41c5fa69 100644 --- a/magicblock-committor-service/src/service.rs +++ b/magicblock-committor-service/src/service.rs @@ -1,4 +1,4 @@ -use std::{fmt::Display, path::Path}; +use std::path::Path; use log::*; use magicblock_committor_program::Changeset; @@ -205,12 +205,9 @@ impl CommittorService { chain_config: ChainConfig, ) -> CommittorServiceResult where - P: Display + AsRef, + P: AsRef, { - debug!( - "Starting committor service with config: {:?}, persisting to: {}", - chain_config, persist_file - ); + debug!("Starting committor service with config: {:?}", chain_config); let (sender, receiver) = mpsc::channel(1_000); let cancel_token = CancellationToken::new(); { From 3cf473da8ee703727551cdc2b52e49c0ffa46b42 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 16 May 2025 10:09:51 +0545 Subject: [PATCH 52/58] chore: remove duplicate code in magic validator --- magicblock-api/src/magic_validator.rs | 40 +++++++++++++-------------- 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/magicblock-api/src/magic_validator.rs b/magicblock-api/src/magic_validator.rs index cf07b225..cad2f8b5 100644 --- a/magicblock-api/src/magic_validator.rs +++ b/magicblock-api/src/magic_validator.rs @@ -270,14 +270,10 @@ impl MagicValidator { None }; - let accounts_config = - try_convert_accounts_config(&config.validator_config.accounts) - .map_err(ApiError::ConfigError)?; - - let remote_rpc_config = RpcProviderConfig::new( - try_rpc_cluster_from_cluster(&accounts_config.remote_cluster)?, - Some(CommitmentLevel::Confirmed), - ); + let (accounts_config, remote_rpc_config) = + try_get_remote_accounts_and_rpc_config( + &config.validator_config.accounts, + )?; let remote_account_fetcher_worker = RemoteAccountFetcherWorker::new(remote_rpc_config.clone()); @@ -656,20 +652,10 @@ impl MagicValidator { } async fn ensure_validator_funded_on_chain(&self) -> ApiResult<()> { - // TODO(thlorenz) make this configurable in the future + // NOTE: 5 SOL seems reasonable, but we may require a different amount in the future const MIN_BALANCE_SOL: u64 = 5; - // TODO: @@ duplicate code getting remote_rpc_config - let accounts_config = try_convert_accounts_config( - &self.config.accounts, - ) - .expect( - "Failed to derive accounts config from provided magicblock config", - ); - let remote_rpc_config = RpcProviderConfig::new( - try_rpc_cluster_from_cluster(&accounts_config.remote_cluster)?, - Some(CommitmentLevel::Confirmed), - ); - + let (_, remote_rpc_config) = + try_get_remote_accounts_and_rpc_config(&self.config.accounts)?; let validator_pubkey = self.bank().get_identity(); let lamports = RpcClient::new_with_commitment( @@ -895,3 +881,15 @@ fn create_worker_runtime(thread_name: &str) -> tokio::runtime::Runtime { .build() .unwrap() } + +fn try_get_remote_accounts_and_rpc_config( + accounts: &magicblock_config::AccountsConfig, +) -> ApiResult<(magicblock_accounts::AccountsConfig, RpcProviderConfig)> { + let accounts_config = + try_convert_accounts_config(accounts).map_err(ApiError::ConfigError)?; + let remote_rpc_config = RpcProviderConfig::new( + try_rpc_cluster_from_cluster(&accounts_config.remote_cluster)?, + Some(CommitmentLevel::Confirmed), + ); + Ok((accounts_config, remote_rpc_config)) +} From a7b4aa5e256f778981fb2b78f48b59ca40ad00d5 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 16 May 2025 10:30:04 +0545 Subject: [PATCH 53/58] feat: limiting stale reallocs until we bail --- .../src/commit/commit_using_buffer.rs | 34 ++++++++++++++++--- .../src/commit_stage.rs | 7 ++++ magicblock-committor-service/src/error.rs | 18 ++-------- 3 files changed, 39 insertions(+), 20 deletions(-) diff --git a/magicblock-committor-service/src/commit/commit_using_buffer.rs b/magicblock-committor-service/src/commit/commit_using_buffer.rs index 1c15bd7f..3e161fb8 100644 --- a/magicblock-committor-service/src/commit/commit_using_buffer.rs +++ b/magicblock-committor-service/src/commit/commit_using_buffer.rs @@ -51,6 +51,7 @@ use crate::{ struct NextReallocs { missing_size: u64, start_idx: usize, + err: Option, } impl CommittorProcessor { @@ -826,11 +827,30 @@ impl CommittorProcessor { blockhash: ephemeral_blockhash, }; + const MAX_STALE_REALLOCS: u8 = 10; + let mut prev_missing_size = 0; + let mut remaining_tries = MAX_STALE_REALLOCS; while let Some(NextReallocs { missing_size, start_idx, + err, }) = next_reallocs { + if missing_size == prev_missing_size { + remaining_tries -= 1; + if remaining_tries == 0 { + return Err( + CommitAccountError::ReallocBufferRanOutOfRetries( + err.unwrap_or("No Error".to_string()), + Arc::new(commit_info.clone()), + commit_strategy, + ), + ); + } + } else { + remaining_tries = MAX_STALE_REALLOCS; + prev_missing_size = missing_size; + } let realloc_ixs = { let realloc_ixs = create_realloc_buffer_ixs_to_add_remaining( @@ -849,7 +869,6 @@ impl CommittorProcessor { start_idx, ) .await; - // TODO(thlorenz): give up at some point } } } @@ -882,6 +901,7 @@ impl CommittorProcessor { return Some(NextReallocs { missing_size, start_idx, + err: Some(format!("{:?}", err)), }); } }; @@ -923,23 +943,27 @@ impl CommittorProcessor { if current_size as u64 >= desired_size { None } else { - Some(desired_size - current_size as u64) + Some((desired_size - current_size as u64, None)) } } // NOTE: if we cannot get the account we must assume that // the entire size we just tried to alloc is still missing Ok(None) => { warn!("buffer account not found"); - Some(missing_size) + Some(( + missing_size, + Some("buffer account not found".to_string()), + )) } Err(err) => { warn!("Failed to get buffer account: {:?}", err); - Some(missing_size) + Some((missing_size, Some(format!("{:?}", err)))) } } - .map(|missing_size| NextReallocs { + .map(|(missing_size, err)| NextReallocs { missing_size, start_idx: count, + err, }) } diff --git a/magicblock-committor-service/src/commit_stage.rs b/magicblock-committor-service/src/commit_stage.rs index 1d742037..41d50359 100644 --- a/magicblock-committor-service/src/commit_stage.rs +++ b/magicblock-committor-service/src/commit_stage.rs @@ -142,6 +142,13 @@ impl From for CommitStage { commit_strategy, )) } + ReallocBufferRanOutOfRetries(err, commit_info, commit_strategy) => { + warn!("Realloc buffer ran out of retries: {:?}", err); + Self::BufferAndChunkPartiallyInitialized(( + ci!(commit_info), + commit_strategy, + )) + } WriteChunksRanOutOfRetries(err, commit_info, commit_strategy) => { warn!("Write chunks ran out of retries: {:?}", err); Self::BufferAndChunkPartiallyInitialized(( diff --git a/magicblock-committor-service/src/error.rs b/magicblock-committor-service/src/error.rs index d56b6e84..bf055519 100644 --- a/magicblock-committor-service/src/error.rs +++ b/magicblock-committor-service/src/error.rs @@ -102,6 +102,9 @@ pub enum CommitAccountError { #[error("Failed to deserialize chunks account: {0} ({0:?})")] DeserializeChunksAccount(std::io::Error, Arc, CommitStrategy), + #[error("Failed to affect remaining size via realloc buffer after max retries. Last error {0}")] + ReallocBufferRanOutOfRetries(String, Arc, CommitStrategy), + #[error("Failed to write complete chunks of commit data after max retries. Last write error {0:?}")] WriteChunksRanOutOfRetries( Option, @@ -109,18 +112,3 @@ pub enum CommitAccountError { CommitStrategy, ), } - -impl CommitAccountError { - pub fn into_commit_info(self) -> CommitInfo { - use CommitAccountError::*; - let ci = match self { - InitBufferAndChunkAccounts(_, commit_info, _) => { - return *commit_info; - } - GetChunksAccount(_, commit_info, _) => commit_info, - DeserializeChunksAccount(_, commit_info, _) => commit_info, - WriteChunksRanOutOfRetries(_, commit_info, _) => commit_info, - }; - Arc::::unwrap_or_clone(ci) - } -} From 776bbf147b5e24b91b03c686c9c61c0ef7887e61 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 16 May 2025 12:10:14 +0545 Subject: [PATCH 54/58] tmp: disabling ledger restore tests to isolate issues --- test-integration/test-runner/bin/run_tests.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test-integration/test-runner/bin/run_tests.rs b/test-integration/test-runner/bin/run_tests.rs index f735d7c0..1d95f294 100644 --- a/test-integration/test-runner/bin/run_tests.rs +++ b/test-integration/test-runner/bin/run_tests.rs @@ -33,10 +33,10 @@ pub fn main() { return; }; - let Ok(restore_ledger_output) = run_restore_ledger_tests(&manifest_dir) - else { - return; - }; + // let Ok(restore_ledger_output) = run_restore_ledger_tests(&manifest_dir) + // else { + // return; + // }; let Ok(magicblock_api_output) = run_magicblock_api_tests(&manifest_dir) else { @@ -54,7 +54,7 @@ pub fn main() { assert_cargo_tests_passed(scenarios_output); assert_cargo_tests_passed(cloning_output); assert_cargo_tests_passed(issues_frequent_commits_output); - assert_cargo_tests_passed(restore_ledger_output); + // assert_cargo_tests_passed(restore_ledger_output); assert_cargo_tests_passed(magicblock_api_output); assert_cargo_tests_passed(table_mania_output); assert_cargo_tests_passed(committor_output); From aeeb5ae5f855cd691032691c6c29061744aa7ec8 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 16 May 2025 12:11:43 +0545 Subject: [PATCH 55/58] tmp: disable all workflows but integration tests while isolating issues --- .github/workflows/ci-fmt.yml | 3 --- .github/workflows/ci-lint.yml | 3 --- .github/workflows/ci-test-unit.yml | 3 --- 3 files changed, 9 deletions(-) diff --git a/.github/workflows/ci-fmt.yml b/.github/workflows/ci-fmt.yml index 6feb3ed3..ae5f7d33 100644 --- a/.github/workflows/ci-fmt.yml +++ b/.github/workflows/ci-fmt.yml @@ -1,9 +1,6 @@ on: push: branches: [master] - pull_request: - branches: ["*"] - types: [opened, reopened, synchronize, ready_for_review] name: Run CI - Format diff --git a/.github/workflows/ci-lint.yml b/.github/workflows/ci-lint.yml index a59dc15e..e30adc91 100644 --- a/.github/workflows/ci-lint.yml +++ b/.github/workflows/ci-lint.yml @@ -1,9 +1,6 @@ on: push: branches: [master] - pull_request: - branches: ["*"] - types: [opened, reopened, synchronize, ready_for_review] name: Run CI - Lint diff --git a/.github/workflows/ci-test-unit.yml b/.github/workflows/ci-test-unit.yml index fa8d84ea..28c96e58 100644 --- a/.github/workflows/ci-test-unit.yml +++ b/.github/workflows/ci-test-unit.yml @@ -1,9 +1,6 @@ on: push: branches: [master] - pull_request: - branches: ["*"] - types: [opened, reopened, synchronize, ready_for_review] name: Run CI - Unit Tests From aedf9a2adcdc97bc785d77fda3073037be5c882b Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 16 May 2025 17:10:39 +0545 Subject: [PATCH 56/58] fix: ordering of worker startups --- magicblock-api/src/magic_validator.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/magicblock-api/src/magic_validator.rs b/magicblock-api/src/magic_validator.rs index cad2f8b5..68dad772 100644 --- a/magicblock-api/src/magic_validator.rs +++ b/magicblock-api/src/magic_validator.rs @@ -712,9 +712,12 @@ impl MagicValidator { self.token.clone(), )); - self.start_remote_account_cloner_worker().await?; + // NOTE: these need to startup in the right order, otherwise some worker + // that may be needed, i.e. during hydration after ledger replay + // are not started in time self.start_remote_account_fetcher_worker(); self.start_remote_account_updates_worker(); + self.start_remote_account_cloner_worker().await?; self.ledger_truncator.start(); From 1e9921d216a668f880bcd194d63e72dbcf70cc7d Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 16 May 2025 17:11:39 +0545 Subject: [PATCH 57/58] Revert "tmp: disabling ledger restore tests to isolate issues" This reverts commit 776bbf147b5e24b91b03c686c9c61c0ef7887e61. --- test-integration/test-runner/bin/run_tests.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test-integration/test-runner/bin/run_tests.rs b/test-integration/test-runner/bin/run_tests.rs index 1d95f294..f735d7c0 100644 --- a/test-integration/test-runner/bin/run_tests.rs +++ b/test-integration/test-runner/bin/run_tests.rs @@ -33,10 +33,10 @@ pub fn main() { return; }; - // let Ok(restore_ledger_output) = run_restore_ledger_tests(&manifest_dir) - // else { - // return; - // }; + let Ok(restore_ledger_output) = run_restore_ledger_tests(&manifest_dir) + else { + return; + }; let Ok(magicblock_api_output) = run_magicblock_api_tests(&manifest_dir) else { @@ -54,7 +54,7 @@ pub fn main() { assert_cargo_tests_passed(scenarios_output); assert_cargo_tests_passed(cloning_output); assert_cargo_tests_passed(issues_frequent_commits_output); - // assert_cargo_tests_passed(restore_ledger_output); + assert_cargo_tests_passed(restore_ledger_output); assert_cargo_tests_passed(magicblock_api_output); assert_cargo_tests_passed(table_mania_output); assert_cargo_tests_passed(committor_output); From 0e0ef0f485866db83f31e9eb490ec2660072ea5a Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Fri, 16 May 2025 17:11:54 +0545 Subject: [PATCH 58/58] Revert "tmp: disable all workflows but integration tests while isolating issues" This reverts commit aeeb5ae5f855cd691032691c6c29061744aa7ec8. --- .github/workflows/ci-fmt.yml | 3 +++ .github/workflows/ci-lint.yml | 3 +++ .github/workflows/ci-test-unit.yml | 3 +++ 3 files changed, 9 insertions(+) diff --git a/.github/workflows/ci-fmt.yml b/.github/workflows/ci-fmt.yml index ae5f7d33..6feb3ed3 100644 --- a/.github/workflows/ci-fmt.yml +++ b/.github/workflows/ci-fmt.yml @@ -1,6 +1,9 @@ on: push: branches: [master] + pull_request: + branches: ["*"] + types: [opened, reopened, synchronize, ready_for_review] name: Run CI - Format diff --git a/.github/workflows/ci-lint.yml b/.github/workflows/ci-lint.yml index e30adc91..a59dc15e 100644 --- a/.github/workflows/ci-lint.yml +++ b/.github/workflows/ci-lint.yml @@ -1,6 +1,9 @@ on: push: branches: [master] + pull_request: + branches: ["*"] + types: [opened, reopened, synchronize, ready_for_review] name: Run CI - Lint diff --git a/.github/workflows/ci-test-unit.yml b/.github/workflows/ci-test-unit.yml index 28c96e58..fa8d84ea 100644 --- a/.github/workflows/ci-test-unit.yml +++ b/.github/workflows/ci-test-unit.yml @@ -1,6 +1,9 @@ on: push: branches: [master] + pull_request: + branches: ["*"] + types: [opened, reopened, synchronize, ready_for_review] name: Run CI - Unit Tests