diff --git a/.github/workflows/code_coverage.yml b/.github/workflows/code_coverage.yml index 6dec3c08..f07a605f 100644 --- a/.github/workflows/code_coverage.yml +++ b/.github/workflows/code_coverage.yml @@ -34,7 +34,7 @@ jobs: - name: Make coverage directory run: mkdir coverage - name: Run grcov - run: grcov . --binary-path ./target/debug/ -s . -t lcov --branch --ignore-not-existing --keep-only '**/crates/**' --ignore '**/tests/**' --ignore '**/examples/**' -o ./coverage/lcov.info + run: grcov . --binary-path ./target/debug/ -s . -t lcov --branch --ignore-not-existing --keep-only '**/wallet/**' --ignore '**/tests/**' --ignore '**/examples/**' -o ./coverage/lcov.info - name: Generate HTML coverage report run: genhtml -o coverage-report.html --ignore-errors unmapped ./coverage/lcov.info - name: Coveralls upload diff --git a/.github/workflows/cont_integration.yml b/.github/workflows/cont_integration.yml index f43a3d59..7cf5b305 100644 --- a/.github/workflows/cont_integration.yml +++ b/.github/workflows/cont_integration.yml @@ -27,7 +27,6 @@ jobs: - version: ${{ needs.prepare.outputs.rust_version }} clippy: true - version: 1.63.0 # Overall MSRV - - version: 1.75.0 # Specific MSRV for `bdk_electrum` features: - --no-default-features --features miniscript/no-std,bdk_chain/hashbrown - --all-features @@ -44,14 +43,6 @@ jobs: profile: minimal - name: Rust Cache uses: Swatinem/rust-cache@v2.7.7 - - name: Pin dependencies for 1.75 - if: matrix.rust.version == '1.75.0' - run: | - cargo update -p home --precise "0.5.9" - cargo update -p native-tls --precise "0.2.13" - cargo update -p idna_adapter --precise "1.1.0" - cargo update -p base64ct --precise "1.6.0" - cargo update -p minreq --precise "2.13.2" - name: Pin dependencies for MSRV if: matrix.rust.version == '1.63.0' run: ./ci/pin-msrv.sh @@ -59,13 +50,8 @@ jobs: env: MATRIX_RUST_VERSION: ${{ matrix.rust.version }} run: | - if [ $MATRIX_RUST_VERSION = '1.63.0' ]; then - cargo build --workspace --exclude 'example_*' --exclude 'bdk_electrum' ${{ matrix.features }} - cargo test --workspace --exclude 'example_*' --exclude 'bdk_electrum' ${{ matrix.features }} - else - cargo build --workspace --exclude 'example_*' ${{ matrix.features }} - cargo test --workspace --exclude 'example_*' ${{ matrix.features }} - fi + cargo build --workspace --exclude 'example_*' ${{ matrix.features }} + cargo test --workspace --exclude 'example_*' ${{ matrix.features }} check-no-std: needs: prepare @@ -85,18 +71,10 @@ jobs: # target: "thumbv6m-none-eabi" - name: Rust Cache uses: Swatinem/rust-cache@v2.7.7 - - name: Check bdk_chain - working-directory: ./crates/chain - # TODO "--target thumbv6m-none-eabi" should work but currently does not - run: cargo check --no-default-features --features miniscript/no-std,hashbrown - name: Check bdk wallet - working-directory: ./crates/wallet + working-directory: ./wallet # TODO "--target thumbv6m-none-eabi" should work but currently does not run: cargo check --no-default-features --features miniscript/no-std,bdk_chain/hashbrown - - name: Check esplora - working-directory: ./crates/esplora - # TODO "--target thumbv6m-none-eabi" should work but currently does not - run: cargo check --no-default-features --features bdk_chain/hashbrown check-wasm: needs: prepare @@ -124,11 +102,8 @@ jobs: - name: Rust Cache uses: Swatinem/rust-cache@v2.7.7 - name: Check bdk wallet - working-directory: ./crates/wallet + working-directory: ./wallet run: cargo check --target wasm32-unknown-unknown --no-default-features --features miniscript/no-std,bdk_chain/hashbrown - - name: Check esplora - working-directory: ./crates/esplora - run: cargo check --target wasm32-unknown-unknown --no-default-features --features bdk_core/hashbrown,async fmt: needs: prepare @@ -179,10 +154,6 @@ jobs: strategy: matrix: example-dir: - - example_cli - - example_bitcoind_rpc_polling - - example_electrum - - example_esplora - example_wallet_electrum - example_wallet_esplora_async - example_wallet_esplora_blocking @@ -201,5 +172,5 @@ jobs: - name: Rust Cache uses: Swatinem/rust-cache@v2.7.7 - name: Build - working-directory: example-crates/${{ matrix.example-dir }} + working-directory: examples/${{ matrix.example-dir }} run: cargo build diff --git a/Cargo.toml b/Cargo.toml index 2abc16bd..fbde1ace 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,22 +1,11 @@ [workspace] resolver = "2" members = [ - "crates/wallet", - "crates/chain", - "crates/core", - "crates/file_store", - "crates/electrum", - "crates/esplora", - "crates/bitcoind_rpc", - "crates/testenv", - "example-crates/example_cli", - "example-crates/example_electrum", - "example-crates/example_esplora", - "example-crates/example_bitcoind_rpc_polling", - "example-crates/example_wallet_electrum", - "example-crates/example_wallet_esplora_blocking", - "example-crates/example_wallet_esplora_async", - "example-crates/example_wallet_rpc", + "wallet", + "examples/example_wallet_electrum", + "examples/example_wallet_esplora_blocking", + "examples/example_wallet_esplora_async", + "examples/example_wallet_rpc", ] [workspace.package] diff --git a/README.md b/README.md index dc9ae77a..ce902c82 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ -# The Bitcoin Dev Kit +# The Bitcoin Dev Kit Wallet
-

BDK

+

BDK Wallet

@@ -11,9 +11,9 @@

Crate Info - MIT or Apache-2.0 Licensed - CI Status - + MIT or Apache-2.0 Licensed + CI Status + Wallet API Docs Rustc Version 1.63.0+ Chat on Discord @@ -28,44 +28,30 @@ ## About -The `bdk` libraries aims to provide well engineered and reviewed components for Bitcoin based applications. +The `bdk_wallet` project provides a high level descriptor based wallet API for building Bitcoin applications. It is built upon the excellent [`rust-bitcoin`] and [`rust-miniscript`] crates. ## Architecture -The project is split up into several crates in the `/crates` directory: +There is currently only one published crate in this repository: -- [`wallet`](./crates/wallet): Contains the central high level `Wallet` type that is built from the low-level mechanisms provided by the other components -- [`chain`](./crates/chain): Tools for storing and indexing chain data -- [`file_store`](./crates/file_store): Persistence backend for storing chain data in a single file. Intended for testing and development purposes, not for production. -- [`esplora`](./crates/esplora): Extends the [`esplora-client`] crate with methods to fetch chain data from an esplora HTTP server in the form that [`bdk_chain`] and `Wallet` can consume. -- [`electrum`](./crates/electrum): Extends the [`electrum-client`] crate with methods to fetch chain data from an electrum server in the form that [`bdk_chain`] and `Wallet` can consume. +- [`wallet`](./wallet): Contains the central high level `Wallet` type that is built from the low-level mechanisms provided by the other components. + +Crates that `bdk_wallet` depends on are found in the [`bdk`] repository. -Fully working examples of how to use these components are in `/example-crates`: -- [`example_cli`](./example-crates/example_cli): Library used by the `example_*` crates. Provides utilities for syncing, showing the balance, generating addresses and creating transactions without using the bdk_wallet `Wallet`. -- [`example_electrum`](./example-crates/example_electrum): A command line Bitcoin wallet application built on top of `example_cli` and the `electrum` crate. It shows the power of the bdk tools (`chain` + `file_store` + `electrum`), without depending on the main `bdk_wallet` library. -- [`example_esplora`](./example-crates/example_esplora): A command line Bitcoin wallet application built on top of `example_cli` and the `esplora` crate. It shows the power of the bdk tools (`chain` + `file_store` + `esplora`), without depending on the main `bdk_wallet` library. -- [`example_bitcoind_rpc_polling`](./example-crates/example_bitcoind_rpc_polling): A command line Bitcoin wallet application built on top of `example_cli` and the `bitcoind_rpc` crate. It shows the power of the bdk tools (`chain` + `file_store` + `bitcoind_rpc`), without depending on the main `bdk_wallet` library. -- [`example_wallet_esplora_blocking`](./example-crates/example_wallet_esplora_blocking): Uses the `Wallet` to sync and spend using the Esplora blocking interface. -- [`example_wallet_esplora_async`](./example-crates/example_wallet_esplora_async): Uses the `Wallet` to sync and spend using the Esplora asynchronous interface. -- [`example_wallet_electrum`](./example-crates/example_wallet_electrum): Uses the `Wallet` to sync and spend using Electrum. +Fully working examples of how to use these components are in `/examples`: +- [`example_wallet_esplora_blocking`](examples/example_wallet_esplora_blocking): Uses the `Wallet` to sync and spend using the Esplora blocking interface. +- [`example_wallet_esplora_async`](examples/example_wallet_esplora_async): Uses the `Wallet` to sync and spend using the Esplora asynchronous interface. +- [`example_wallet_electrum`](examples/example_wallet_electrum): Uses the `Wallet` to sync and spend using Electrum. + +[`bdk`]: https://github.com/bitcoindevkit/bdk [`rust-miniscript`]: https://github.com/rust-bitcoin/rust-miniscript [`rust-bitcoin`]: https://github.com/rust-bitcoin/rust-bitcoin -[`esplora-client`]: https://docs.rs/esplora-client/ -[`electrum-client`]: https://docs.rs/electrum-client/ -[`bdk_chain`]: https://docs.rs/bdk-chain/ ## Minimum Supported Rust Version (MSRV) -The BDK library maintains a MSRV of 1.63.0. This includes the following crates: - -- `bdk_core` -- `bdk_chain` -- `bdk_bitcoind_rpc`. -- `bdk_esplora`. -- `bdk_wallet`. -The MSRV of `bdk_electrum` is 1.75.0. +The libraries in this repository maintain a MSRV of 1.63.0. To build with the MSRV of 1.63.0 you will need to pin dependencies by running the [`pin-msrv.sh`](./ci/pin-msrv.sh) script. diff --git a/ci/pin-msrv.sh b/ci/pin-msrv.sh index 2e1a09e5..61fc4878 100755 --- a/ci/pin-msrv.sh +++ b/ci/pin-msrv.sh @@ -10,21 +10,13 @@ set -euo pipefail # cargo clean # rustup override set 1.63.0 -cargo update -p zstd-sys --precise "2.0.8+zstd.1.5.5" -cargo update -p time --precise "0.3.20" cargo update -p home --precise "0.5.5" -cargo update -p proptest --precise "1.2.0" cargo update -p url --precise "2.5.0" cargo update -p tokio --precise "1.38.1" cargo update -p tokio-util --precise "0.7.11" cargo update -p indexmap --precise "2.5.0" cargo update -p security-framework-sys --precise "2.11.1" -cargo update -p csv --precise "1.3.0" -cargo update -p unicode-width --precise "0.1.13" -cargo update -p native-tls --precise "0.2.13" -cargo update -p flate2 --precise "1.0.35" -cargo update -p bzip2-sys --precise "0.1.12" cargo update -p ring --precise "0.17.12" cargo update -p once_cell --precise "1.20.3" -cargo update -p base64ct --precise "1.6.0" cargo update -p minreq --precise "2.13.2" +cargo update -p native-tls --precise "0.2.13" \ No newline at end of file diff --git a/crates/bitcoind_rpc/CHANGELOG.md b/crates/bitcoind_rpc/CHANGELOG.md deleted file mode 100644 index 6c0c8c68..00000000 --- a/crates/bitcoind_rpc/CHANGELOG.md +++ /dev/null @@ -1,23 +0,0 @@ -# Changelog - -All notable changes to this project can be found here and in each release's git tag and can be viewed with `git tag -ln100 "v*"`. See also [DEVELOPMENT_CYCLE.md](../../DEVELOPMENT_CYCLE.md) for more details. - -Contributors do not need to change this file but do need to add changelog details in their PR descriptions. The person making the next release will collect changelog details from included PRs and edit this file prior to each release. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [bitcoind_rpc-0.18.0] - -### Added - -- Added `bip158` module as a means of updating `bdk_chain` structures #1614 - -## [bitcoind_rpc-0.17.1] - -### Changed - -- Minor updates to fix new rustc 1.83.0 clippy warnings #1776 - -[bitcoind_rpc-0.17.1]: https://github.com/bitcoindevkit/bdk/releases/tag/bitcoind_rpc-0.17.1 -[bitcoind_rpc-0.18.0]: https://github.com/bitcoindevkit/bdk/releases/tag/bitcoind_rpc-0.18.0 diff --git a/crates/bitcoind_rpc/Cargo.toml b/crates/bitcoind_rpc/Cargo.toml deleted file mode 100644 index 7e5eda93..00000000 --- a/crates/bitcoind_rpc/Cargo.toml +++ /dev/null @@ -1,35 +0,0 @@ -[package] -name = "bdk_bitcoind_rpc" -version = "0.18.0" -edition = "2021" -rust-version = "1.63" -homepage = "https://bitcoindevkit.org" -repository = "https://github.com/bitcoindevkit/bdk" -documentation = "https://docs.rs/bdk_bitcoind_rpc" -description = "This crate is used for emitting blockchain data from the `bitcoind` RPC interface." -license = "MIT OR Apache-2.0" -readme = "README.md" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[lints] -workspace = true - -[dependencies] -bitcoin = { version = "0.32.0", default-features = false } -bitcoincore-rpc = { version = "0.19.0" } -bdk_core = { path = "../core", version = "0.4.1", default-features = false } - -[dev-dependencies] -bdk_bitcoind_rpc = { path = "." } -bdk_testenv = { path = "../testenv" } -bdk_chain = { path = "../chain" } - -[features] -default = ["std"] -std = ["bitcoin/std", "bdk_core/std"] -serde = ["bitcoin/serde", "bdk_core/serde"] - -[[example]] -name = "filter_iter" -required-features = ["std"] diff --git a/crates/bitcoind_rpc/README.md b/crates/bitcoind_rpc/README.md deleted file mode 100644 index 12de8702..00000000 --- a/crates/bitcoind_rpc/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# BDK Bitcoind RPC - -This crate is used for emitting blockchain data from the `bitcoind` RPC interface. diff --git a/crates/bitcoind_rpc/examples/README.md b/crates/bitcoind_rpc/examples/README.md deleted file mode 100644 index 34cee07b..00000000 --- a/crates/bitcoind_rpc/examples/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# Example bitcoind RPC sync - -### Simple Signet Test with FilterIter - -1. Start local signet bitcoind. (~8 GB space required) - ``` - mkdir -p /tmp/signet/bitcoind - bitcoind -signet -server -fallbackfee=0.0002 -blockfilterindex -datadir=/tmp/signet/bitcoind -daemon - tail -f /tmp/signet/bitcoind/signet/debug.log - ``` - Watch debug.log and wait for bitcoind to finish syncing. - -2. Set bitcoind env variables. - ``` - export RPC_URL=127.0.0.1:38332 - export RPC_COOKIE=/tmp/signet/bitcoind/signet/.cookie - ``` -3. Run `filter_iter` example. - ``` - cargo run -p bdk_bitcoind_rpc --example filter_iter - ``` \ No newline at end of file diff --git a/crates/bitcoind_rpc/examples/filter_iter.rs b/crates/bitcoind_rpc/examples/filter_iter.rs deleted file mode 100644 index 55c3325d..00000000 --- a/crates/bitcoind_rpc/examples/filter_iter.rs +++ /dev/null @@ -1,111 +0,0 @@ -#![allow(clippy::print_stdout)] -use std::time::Instant; - -use anyhow::Context; -use bdk_bitcoind_rpc::bip158::{Event, EventInner, FilterIter}; -use bdk_chain::bitcoin::{constants::genesis_block, secp256k1::Secp256k1, Network}; -use bdk_chain::indexer::keychain_txout::KeychainTxOutIndex; -use bdk_chain::local_chain::LocalChain; -use bdk_chain::miniscript::Descriptor; -use bdk_chain::{BlockId, ConfirmationBlockTime, IndexedTxGraph, SpkIterator}; -use bdk_testenv::anyhow; -use bitcoin::Address; - -// This example shows how BDK chain and tx-graph structures are updated using compact -// filters syncing. Assumes a connection can be made to a bitcoin node via environment -// variables `RPC_URL` and `RPC_COOKIE`. - -// Usage: `cargo run -p bdk_bitcoind_rpc --example filter_iter` - -const EXTERNAL: &str = "tr([7d94197e]tprv8ZgxMBicQKsPe1chHGzaa84k1inY2nAXUL8iPSyWESPrEst4E5oCFXhPATqj5fvw34LDknJz7rtXyEC4fKoXryUdc9q87pTTzfQyv61cKdE/86'/1'/0'/0/*)#uswl2jj7"; -const INTERNAL: &str = "tr([7d94197e]tprv8ZgxMBicQKsPe1chHGzaa84k1inY2nAXUL8iPSyWESPrEst4E5oCFXhPATqj5fvw34LDknJz7rtXyEC4fKoXryUdc9q87pTTzfQyv61cKdE/86'/1'/0'/1/*)#dyt7h8zx"; -const SPK_COUNT: u32 = 25; -const NETWORK: Network = Network::Signet; - -const START_HEIGHT: u32 = 170_000; -const START_HASH: &str = "00000041c812a89f084f633e4cf47e819a2f6b1c0a15162355a930410522c99d"; - -fn main() -> anyhow::Result<()> { - // Setup receiving chain and graph structures. - let secp = Secp256k1::new(); - let (descriptor, _) = Descriptor::parse_descriptor(&secp, EXTERNAL)?; - let (change_descriptor, _) = Descriptor::parse_descriptor(&secp, INTERNAL)?; - let (mut chain, _) = LocalChain::from_genesis_hash(genesis_block(NETWORK).block_hash()); - let mut graph = IndexedTxGraph::>::new({ - let mut index = KeychainTxOutIndex::default(); - index.insert_descriptor("external", descriptor.clone())?; - index.insert_descriptor("internal", change_descriptor.clone())?; - index - }); - - // Assume a minimum birthday height - let block = BlockId { - height: START_HEIGHT, - hash: START_HASH.parse()?, - }; - let _ = chain.insert_block(block)?; - - // Configure RPC client - let url = std::env::var("RPC_URL").context("must set RPC_URL")?; - let cookie = std::env::var("RPC_COOKIE").context("must set RPC_COOKIE")?; - let rpc_client = - bitcoincore_rpc::Client::new(&url, bitcoincore_rpc::Auth::CookieFile(cookie.into()))?; - - // Initialize block emitter - let cp = chain.tip(); - let start_height = cp.height(); - let mut emitter = FilterIter::new_with_checkpoint(&rpc_client, cp); - for (_, desc) in graph.index.keychains() { - let spks = SpkIterator::new_with_range(desc, 0..SPK_COUNT).map(|(_, spk)| spk); - emitter.add_spks(spks); - } - - let start = Instant::now(); - - // Sync - if let Some(tip) = emitter.get_tip()? { - let blocks_to_scan = tip.height - start_height; - - for event in emitter.by_ref() { - let event = event?; - let curr = event.height(); - // apply relevant blocks - if let Event::Block(EventInner { height, ref block }) = event { - let _ = graph.apply_block_relevant(block, height); - println!("Matched block {}", curr); - } - if curr % 1000 == 0 { - let progress = (curr - start_height) as f32 / blocks_to_scan as f32; - println!("[{:.2}%]", progress * 100.0); - } - } - // update chain - if let Some(cp) = emitter.chain_update() { - let _ = chain.apply_update(cp)?; - } - } - - println!("\ntook: {}s", start.elapsed().as_secs()); - println!("Local tip: {}", chain.tip().height()); - let unspent: Vec<_> = graph - .graph() - .filter_chain_unspents( - &chain, - chain.tip().block_id(), - graph.index.outpoints().clone(), - ) - .collect(); - if !unspent.is_empty() { - println!("\nUnspent"); - for (index, utxo) in unspent { - // (k, index) | value | outpoint | - println!("{:?} | {} | {}", index, utxo.txout.value, utxo.outpoint); - } - } - - let unused_spk = graph.index.reveal_next_spk("external").unwrap().0 .1; - let unused_address = Address::from_script(&unused_spk, NETWORK)?; - println!("Next external address: {}", unused_address); - - Ok(()) -} diff --git a/crates/bitcoind_rpc/src/bip158.rs b/crates/bitcoind_rpc/src/bip158.rs deleted file mode 100644 index 5419716b..00000000 --- a/crates/bitcoind_rpc/src/bip158.rs +++ /dev/null @@ -1,267 +0,0 @@ -//! Compact block filters sync over RPC. For more details refer to [BIP157][0]. -//! -//! This module is home to [`FilterIter`], a structure that returns bitcoin blocks by matching -//! a list of script pubkeys against a [BIP158][1] [`BlockFilter`]. -//! -//! [0]: https://github.com/bitcoin/bips/blob/master/bip-0157.mediawiki -//! [1]: https://github.com/bitcoin/bips/blob/master/bip-0158.mediawiki - -use bdk_core::collections::BTreeMap; -use core::fmt; - -use bdk_core::bitcoin; -use bdk_core::{BlockId, CheckPoint}; -use bitcoin::{ - bip158::{self, BlockFilter}, - Block, BlockHash, ScriptBuf, -}; -use bitcoincore_rpc; -use bitcoincore_rpc::RpcApi; - -/// Block height -type Height = u32; - -/// Type that generates block [`Event`]s by matching a list of script pubkeys against a -/// [`BlockFilter`]. -#[derive(Debug)] -pub struct FilterIter<'c, C> { - // RPC client - client: &'c C, - // SPK inventory - spks: Vec, - // local cp - cp: Option, - // blocks map - blocks: BTreeMap, - // best height counter - height: Height, - // stop height - stop: Height, -} - -impl<'c, C: RpcApi> FilterIter<'c, C> { - /// Construct [`FilterIter`] from a given `client` and start `height`. - pub fn new_with_height(client: &'c C, height: u32) -> Self { - Self { - client, - spks: vec![], - cp: None, - blocks: BTreeMap::new(), - height, - stop: 0, - } - } - - /// Construct [`FilterIter`] from a given `client` and [`CheckPoint`]. - pub fn new_with_checkpoint(client: &'c C, cp: CheckPoint) -> Self { - let mut filter_iter = Self::new_with_height(client, cp.height()); - filter_iter.cp = Some(cp); - filter_iter - } - - /// Extends `self` with an iterator of spks. - pub fn add_spks(&mut self, spks: impl IntoIterator) { - self.spks.extend(spks) - } - - /// Add spk to the list of spks to scan with. - pub fn add_spk(&mut self, spk: ScriptBuf) { - self.spks.push(spk); - } - - /// Get the next filter and increment the current best height. - /// - /// Returns `Ok(None)` when the stop height is exceeded. - fn next_filter(&mut self) -> Result, Error> { - if self.height > self.stop { - return Ok(None); - } - let height = self.height; - let hash = match self.blocks.get(&height) { - Some(h) => *h, - None => self.client.get_block_hash(height as u64)?, - }; - let filter_bytes = self.client.get_block_filter(&hash)?.filter; - let filter = BlockFilter::new(&filter_bytes); - self.height += 1; - Ok(Some((BlockId { height, hash }, filter))) - } - - /// Get the remote tip. - /// - /// Returns `None` if the remote height is not strictly greater than the height of this - /// [`FilterIter`]. - pub fn get_tip(&mut self) -> Result, Error> { - let tip_hash = self.client.get_best_block_hash()?; - let mut header = self.client.get_block_header_info(&tip_hash)?; - let tip_height = header.height as u32; - if self.height >= tip_height { - // nothing to do - return Ok(None); - } - self.blocks.insert(tip_height, tip_hash); - - // if we have a checkpoint we use a lookback of ten blocks - // to ensure consistency of the local chain - if let Some(cp) = self.cp.as_ref() { - // adjust start height to point of agreement + 1 - let base = self.find_base_with(cp.clone())?; - self.height = base.height + 1; - - for _ in 0..9 { - let hash = match header.previous_block_hash { - Some(hash) => hash, - None => break, - }; - header = self.client.get_block_header_info(&hash)?; - let height = header.height as u32; - if height < self.height { - break; - } - self.blocks.insert(height, hash); - } - } - - self.stop = tip_height; - - Ok(Some(BlockId { - height: tip_height, - hash: tip_hash, - })) - } -} - -/// Alias for a compact filter and associated block id. -type NextFilter = (BlockId, BlockFilter); - -/// Event inner type -#[derive(Debug, Clone)] -pub struct EventInner { - /// Height - pub height: Height, - /// Block - pub block: Block, -} - -/// Kind of event produced by [`FilterIter`]. -#[derive(Debug, Clone)] -pub enum Event { - /// Block - Block(EventInner), - /// No match - NoMatch(Height), -} - -impl Event { - /// Whether this event contains a matching block. - pub fn is_match(&self) -> bool { - matches!(self, Event::Block(_)) - } - - /// Get the height of this event. - pub fn height(&self) -> Height { - match self { - Self::Block(EventInner { height, .. }) => *height, - Self::NoMatch(h) => *h, - } - } -} - -impl Iterator for FilterIter<'_, C> { - type Item = Result; - - fn next(&mut self) -> Option { - (|| -> Result<_, Error> { - // if the next filter matches any of our watched spks, get the block - // and return it, inserting relevant block ids along the way - self.next_filter()?.map_or(Ok(None), |(block, filter)| { - let height = block.height; - let hash = block.hash; - - if self.spks.is_empty() { - Err(Error::NoScripts) - } else if filter - .match_any(&hash, self.spks.iter().map(|script| script.as_bytes())) - .map_err(Error::Bip158)? - { - let block = self.client.get_block(&hash)?; - self.blocks.insert(height, hash); - let inner = EventInner { height, block }; - Ok(Some(Event::Block(inner))) - } else { - Ok(Some(Event::NoMatch(height))) - } - }) - })() - .transpose() - } -} - -impl FilterIter<'_, C> { - /// Returns the point of agreement between `self` and the given `cp`. - fn find_base_with(&mut self, mut cp: CheckPoint) -> Result { - loop { - let height = cp.height(); - let fetched_hash = match self.blocks.get(&height) { - Some(hash) => *hash, - None if height == 0 => cp.hash(), - _ => self.client.get_block_hash(height as _)?, - }; - if cp.hash() == fetched_hash { - // ensure this block also exists in self - self.blocks.insert(height, cp.hash()); - return Ok(cp.block_id()); - } - // remember conflicts - self.blocks.insert(height, fetched_hash); - cp = cp.prev().expect("must break before genesis"); - } - } - - /// Returns a chain update from the newly scanned blocks. - /// - /// Returns `None` if this [`FilterIter`] was not constructed using a [`CheckPoint`], or - /// if no blocks have been fetched for example by using [`get_tip`](Self::get_tip). - pub fn chain_update(&mut self) -> Option { - if self.cp.is_none() || self.blocks.is_empty() { - return None; - } - - // note: to connect with the local chain we must guarantee that `self.blocks.first()` - // is also the point of agreement with `self.cp`. - Some( - CheckPoint::from_block_ids(self.blocks.iter().map(BlockId::from)) - .expect("blocks must be in order"), - ) - } -} - -/// Errors that may occur during a compact filters sync. -#[derive(Debug)] -pub enum Error { - /// bitcoin bip158 error - Bip158(bip158::Error), - /// attempted to scan blocks without any script pubkeys - NoScripts, - /// `bitcoincore_rpc` error - Rpc(bitcoincore_rpc::Error), -} - -impl From for Error { - fn from(e: bitcoincore_rpc::Error) -> Self { - Self::Rpc(e) - } -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::Bip158(e) => e.fmt(f), - Self::NoScripts => write!(f, "no script pubkeys were provided to match with"), - Self::Rpc(e) => e.fmt(f), - } - } -} - -#[cfg(feature = "std")] -impl std::error::Error for Error {} diff --git a/crates/bitcoind_rpc/src/lib.rs b/crates/bitcoind_rpc/src/lib.rs deleted file mode 100644 index 3fa17ef1..00000000 --- a/crates/bitcoind_rpc/src/lib.rs +++ /dev/null @@ -1,331 +0,0 @@ -//! This crate is used for emitting blockchain data from the `bitcoind` RPC interface. It does not -//! use the wallet RPC API, so this crate can be used with wallet-disabled Bitcoin Core nodes. -//! -//! [`Emitter`] is the main structure which sources blockchain data from [`bitcoincore_rpc::Client`]. -//! -//! To only get block updates (exclude mempool transactions), the caller can use -//! [`Emitter::next_block`] or/and [`Emitter::next_header`] until it returns `Ok(None)` (which means -//! the chain tip is reached). A separate method, [`Emitter::mempool`] can be used to emit the whole -//! mempool. -#![warn(missing_docs)] - -use bdk_core::{BlockId, CheckPoint}; -use bitcoin::{block::Header, Block, BlockHash, Transaction}; -use bitcoincore_rpc::bitcoincore_rpc_json; - -pub mod bip158; - -pub use bitcoincore_rpc; - -/// The [`Emitter`] is used to emit data sourced from [`bitcoincore_rpc::Client`]. -/// -/// Refer to [module-level documentation] for more. -/// -/// [module-level documentation]: crate -pub struct Emitter<'c, C> { - client: &'c C, - start_height: u32, - - /// The checkpoint of the last-emitted block that is in the best chain. If it is later found - /// that the block is no longer in the best chain, it will be popped off from here. - last_cp: CheckPoint, - - /// The block result returned from rpc of the last-emitted block. As this result contains the - /// next block's block hash (which we use to fetch the next block), we set this to `None` - /// whenever there are no more blocks, or the next block is no longer in the best chain. This - /// gives us an opportunity to re-fetch this result. - last_block: Option, - - /// The latest first-seen epoch of emitted mempool transactions. This is used to determine - /// whether a mempool transaction is already emitted. - last_mempool_time: usize, - - /// The last emitted block during our last mempool emission. This is used to determine whether - /// there has been a reorg since our last mempool emission. - last_mempool_tip: Option, -} - -impl<'c, C: bitcoincore_rpc::RpcApi> Emitter<'c, C> { - /// Construct a new [`Emitter`]. - /// - /// `last_cp` informs the emitter of the chain we are starting off with. This way, the emitter - /// can start emission from a block that connects to the original chain. - /// - /// `start_height` starts emission from a given height (if there are no conflicts with the - /// original chain). - pub fn new(client: &'c C, last_cp: CheckPoint, start_height: u32) -> Self { - Self { - client, - start_height, - last_cp, - last_block: None, - last_mempool_time: 0, - last_mempool_tip: None, - } - } - - /// Emit mempool transactions, alongside their first-seen unix timestamps. - /// - /// This method emits each transaction only once, unless we cannot guarantee the transaction's - /// ancestors are already emitted. - /// - /// To understand why, consider a receiver which filters transactions based on whether it - /// alters the UTXO set of tracked script pubkeys. If an emitted mempool transaction spends a - /// tracked UTXO which is confirmed at height `h`, but the receiver has only seen up to block - /// of height `h-1`, we want to re-emit this transaction until the receiver has seen the block - /// at height `h`. - pub fn mempool(&mut self) -> Result, bitcoincore_rpc::Error> { - let client = self.client; - - // This is the emitted tip height during the last mempool emission. - let prev_mempool_tip = self - .last_mempool_tip - // We use `start_height - 1` as we cannot guarantee that the block at - // `start_height` has been emitted. - .unwrap_or(self.start_height.saturating_sub(1)); - - // Mempool txs come with a timestamp of when the tx is introduced to the mempool. We keep - // track of the latest mempool tx's timestamp to determine whether we have seen a tx - // before. `prev_mempool_time` is the previous timestamp and `last_time` records what will - // be the new latest timestamp. - let prev_mempool_time = self.last_mempool_time; - let mut latest_time = prev_mempool_time; - - let txs_to_emit = client - .get_raw_mempool_verbose()? - .into_iter() - .filter_map({ - let latest_time = &mut latest_time; - move |(txid, tx_entry)| -> Option> { - let tx_time = tx_entry.time as usize; - if tx_time > *latest_time { - *latest_time = tx_time; - } - - // Avoid emitting transactions that are already emitted if we can guarantee - // blocks containing ancestors are already emitted. The bitcoind rpc interface - // provides us with the block height that the tx is introduced to the mempool. - // If we have already emitted the block of height, we can assume that all - // ancestor txs have been processed by the receiver. - let is_already_emitted = tx_time <= prev_mempool_time; - let is_within_height = tx_entry.height <= prev_mempool_tip as _; - if is_already_emitted && is_within_height { - return None; - } - - let tx = match client.get_raw_transaction(&txid, None) { - Ok(tx) => tx, - // the tx is confirmed or evicted since `get_raw_mempool_verbose` - Err(err) if err.is_not_found_error() => return None, - Err(err) => return Some(Err(err)), - }; - - Some(Ok((tx, tx_time as u64))) - } - }) - .collect::, _>>()?; - - self.last_mempool_time = latest_time; - self.last_mempool_tip = Some(self.last_cp.height()); - - Ok(txs_to_emit) - } - - /// Emit the next block height and header (if any). - pub fn next_header(&mut self) -> Result>, bitcoincore_rpc::Error> { - Ok(poll(self, |hash| self.client.get_block_header(hash))? - .map(|(checkpoint, block)| BlockEvent { block, checkpoint })) - } - - /// Emit the next block height and block (if any). - pub fn next_block(&mut self) -> Result>, bitcoincore_rpc::Error> { - Ok(poll(self, |hash| self.client.get_block(hash))? - .map(|(checkpoint, block)| BlockEvent { block, checkpoint })) - } -} - -/// A newly emitted block from [`Emitter`]. -#[derive(Debug)] -pub struct BlockEvent { - /// Either a full [`Block`] or [`Header`] of the new block. - pub block: B, - - /// The checkpoint of the new block. - /// - /// A [`CheckPoint`] is a node of a linked list of [`BlockId`]s. This checkpoint is linked to - /// all [`BlockId`]s originally passed in [`Emitter::new`] as well as emitted blocks since then. - /// These blocks are guaranteed to be of the same chain. - /// - /// This is important as BDK structures require block-to-apply to be connected with another - /// block in the original chain. - pub checkpoint: CheckPoint, -} - -impl BlockEvent { - /// The block height of this new block. - pub fn block_height(&self) -> u32 { - self.checkpoint.height() - } - - /// The block hash of this new block. - pub fn block_hash(&self) -> BlockHash { - self.checkpoint.hash() - } - - /// The [`BlockId`] of a previous block that this block connects to. - /// - /// This either returns a [`BlockId`] of a previously emitted block or from the chain we started - /// with (passed in as `last_cp` in [`Emitter::new`]). - /// - /// This value is derived from [`BlockEvent::checkpoint`]. - pub fn connected_to(&self) -> BlockId { - match self.checkpoint.prev() { - Some(prev_cp) => prev_cp.block_id(), - // there is no previous checkpoint, so just connect with itself - None => self.checkpoint.block_id(), - } - } -} - -enum PollResponse { - Block(bitcoincore_rpc_json::GetBlockResult), - NoMoreBlocks, - /// Fetched block is not in the best chain. - BlockNotInBestChain, - AgreementFound(bitcoincore_rpc_json::GetBlockResult, CheckPoint), - /// Force the genesis checkpoint down the receiver's throat. - AgreementPointNotFound(BlockHash), -} - -fn poll_once(emitter: &Emitter) -> Result -where - C: bitcoincore_rpc::RpcApi, -{ - let client = emitter.client; - - if let Some(last_res) = &emitter.last_block { - let next_hash = if last_res.height < emitter.start_height as _ { - // enforce start height - let next_hash = client.get_block_hash(emitter.start_height as _)?; - // make sure last emission is still in best chain - if client.get_block_hash(last_res.height as _)? != last_res.hash { - return Ok(PollResponse::BlockNotInBestChain); - } - next_hash - } else { - match last_res.nextblockhash { - None => return Ok(PollResponse::NoMoreBlocks), - Some(next_hash) => next_hash, - } - }; - - let res = client.get_block_info(&next_hash)?; - if res.confirmations < 0 { - return Ok(PollResponse::BlockNotInBestChain); - } - - return Ok(PollResponse::Block(res)); - } - - for cp in emitter.last_cp.iter() { - let res = match client.get_block_info(&cp.hash()) { - // block not in best chain - Ok(res) if res.confirmations < 0 => continue, - Ok(res) => res, - Err(e) if e.is_not_found_error() => { - if cp.height() > 0 { - continue; - } - // if we can't find genesis block, we can't create an update that connects - break; - } - Err(e) => return Err(e), - }; - - // agreement point found - return Ok(PollResponse::AgreementFound(res, cp)); - } - - let genesis_hash = client.get_block_hash(0)?; - Ok(PollResponse::AgreementPointNotFound(genesis_hash)) -} - -fn poll( - emitter: &mut Emitter, - get_item: F, -) -> Result, bitcoincore_rpc::Error> -where - C: bitcoincore_rpc::RpcApi, - F: Fn(&BlockHash) -> Result, -{ - loop { - match poll_once(emitter)? { - PollResponse::Block(res) => { - let height = res.height as u32; - let hash = res.hash; - let item = get_item(&hash)?; - - let new_cp = emitter - .last_cp - .clone() - .push(BlockId { height, hash }) - .expect("must push"); - emitter.last_cp = new_cp.clone(); - emitter.last_block = Some(res); - return Ok(Some((new_cp, item))); - } - PollResponse::NoMoreBlocks => { - emitter.last_block = None; - return Ok(None); - } - PollResponse::BlockNotInBestChain => { - emitter.last_block = None; - continue; - } - PollResponse::AgreementFound(res, cp) => { - let agreement_h = res.height as u32; - - // The tip during the last mempool emission needs to in the best chain, we reduce - // it if it is not. - if let Some(h) = emitter.last_mempool_tip.as_mut() { - if *h > agreement_h { - *h = agreement_h; - } - } - - // get rid of evicted blocks - emitter.last_cp = cp; - emitter.last_block = Some(res); - continue; - } - PollResponse::AgreementPointNotFound(genesis_hash) => { - emitter.last_cp = CheckPoint::new(BlockId { - height: 0, - hash: genesis_hash, - }); - emitter.last_block = None; - continue; - } - } - } -} - -/// Extends [`bitcoincore_rpc::Error`]. -pub trait BitcoindRpcErrorExt { - /// Returns whether the error is a "not found" error. - /// - /// This is useful since [`Emitter`] emits [`Result<_, bitcoincore_rpc::Error>`]s as - /// [`Iterator::Item`]. - fn is_not_found_error(&self) -> bool; -} - -impl BitcoindRpcErrorExt for bitcoincore_rpc::Error { - fn is_not_found_error(&self) -> bool { - if let bitcoincore_rpc::Error::JsonRpc(bitcoincore_rpc::jsonrpc::Error::Rpc(rpc_err)) = self - { - rpc_err.code == -5 - } else { - false - } - } -} diff --git a/crates/bitcoind_rpc/tests/test_emitter.rs b/crates/bitcoind_rpc/tests/test_emitter.rs deleted file mode 100644 index 14b0c921..00000000 --- a/crates/bitcoind_rpc/tests/test_emitter.rs +++ /dev/null @@ -1,733 +0,0 @@ -use std::collections::{BTreeMap, BTreeSet}; - -use bdk_bitcoind_rpc::Emitter; -use bdk_chain::{ - bitcoin::{Address, Amount, Txid}, - local_chain::{CheckPoint, LocalChain}, - spk_txout::SpkTxOutIndex, - Balance, BlockId, IndexedTxGraph, Merge, -}; -use bdk_testenv::{anyhow, TestEnv}; -use bitcoin::{hashes::Hash, Block, OutPoint, ScriptBuf, WScriptHash}; -use bitcoincore_rpc::RpcApi; - -/// Ensure that blocks are emitted in order even after reorg. -/// -/// 1. Mine 101 blocks. -/// 2. Emit blocks from [`Emitter`] and update the [`LocalChain`]. -/// 3. Reorg highest 6 blocks. -/// 4. Emit blocks from [`Emitter`] and re-update the [`LocalChain`]. -#[test] -pub fn test_sync_local_chain() -> anyhow::Result<()> { - let env = TestEnv::new()?; - let network_tip = env.rpc_client().get_block_count()?; - let (mut local_chain, _) = LocalChain::from_genesis_hash(env.rpc_client().get_block_hash(0)?); - let mut emitter = Emitter::new(env.rpc_client(), local_chain.tip(), 0); - - // Mine some blocks and return the actual block hashes. - // Because initializing `ElectrsD` already mines some blocks, we must include those too when - // returning block hashes. - let exp_hashes = { - let mut hashes = (0..=network_tip) - .map(|height| env.rpc_client().get_block_hash(height)) - .collect::, _>>()?; - hashes.extend(env.mine_blocks(101 - network_tip as usize, None)?); - hashes - }; - - // See if the emitter outputs the right blocks. - - while let Some(emission) = emitter.next_block()? { - let height = emission.block_height(); - let hash = emission.block_hash(); - assert_eq!( - emission.block_hash(), - exp_hashes[height as usize], - "emitted block hash is unexpected" - ); - - assert_eq!( - local_chain.apply_update(emission.checkpoint,)?, - [(height, Some(hash))].into(), - "chain update changeset is unexpected", - ); - } - - assert_eq!( - local_chain - .iter_checkpoints() - .map(|cp| (cp.height(), cp.hash())) - .collect::>(), - exp_hashes - .iter() - .enumerate() - .map(|(i, hash)| (i as u32, *hash)) - .collect::>(), - "final local_chain state is unexpected", - ); - - // Perform reorg. - let reorged_blocks = env.reorg(6)?; - let exp_hashes = exp_hashes - .iter() - .take(exp_hashes.len() - reorged_blocks.len()) - .chain(&reorged_blocks) - .cloned() - .collect::>(); - - // See if the emitter outputs the right blocks. - - let mut exp_height = exp_hashes.len() - reorged_blocks.len(); - while let Some(emission) = emitter.next_block()? { - let height = emission.block_height(); - let hash = emission.block_hash(); - assert_eq!( - height, exp_height as u32, - "emitted block has unexpected height" - ); - - assert_eq!( - hash, exp_hashes[height as usize], - "emitted block is unexpected" - ); - - assert_eq!( - local_chain.apply_update(emission.checkpoint,)?, - if exp_height == exp_hashes.len() - reorged_blocks.len() { - bdk_chain::local_chain::ChangeSet { - blocks: core::iter::once((height, Some(hash))) - .chain((height + 1..exp_hashes.len() as u32).map(|h| (h, None))) - .collect(), - } - } else { - [(height, Some(hash))].into() - }, - "chain update changeset is unexpected", - ); - - exp_height += 1; - } - - assert_eq!( - local_chain - .iter_checkpoints() - .map(|cp| (cp.height(), cp.hash())) - .collect::>(), - exp_hashes - .iter() - .enumerate() - .map(|(i, hash)| (i as u32, *hash)) - .collect::>(), - "final local_chain state is unexpected after reorg", - ); - - Ok(()) -} - -/// Ensure that [`EmittedUpdate::into_tx_graph_update`] behaves appropriately for both mempool and -/// block updates. -/// -/// [`EmittedUpdate::into_tx_graph_update`]: bdk_bitcoind_rpc::EmittedUpdate::into_tx_graph_update -#[test] -fn test_into_tx_graph() -> anyhow::Result<()> { - let env = TestEnv::new()?; - - let addr_0 = env - .rpc_client() - .get_new_address(None, None)? - .assume_checked(); - let addr_1 = env - .rpc_client() - .get_new_address(None, None)? - .assume_checked(); - let addr_2 = env - .rpc_client() - .get_new_address(None, None)? - .assume_checked(); - - env.mine_blocks(101, None)?; - - let (mut chain, _) = LocalChain::from_genesis_hash(env.rpc_client().get_block_hash(0)?); - let mut indexed_tx_graph = IndexedTxGraph::::new({ - let mut index = SpkTxOutIndex::::default(); - index.insert_spk(0, addr_0.script_pubkey()); - index.insert_spk(1, addr_1.script_pubkey()); - index.insert_spk(2, addr_2.script_pubkey()); - index - }); - - let emitter = &mut Emitter::new(env.rpc_client(), chain.tip(), 0); - - while let Some(emission) = emitter.next_block()? { - let height = emission.block_height(); - let _ = chain.apply_update(emission.checkpoint)?; - let indexed_additions = indexed_tx_graph.apply_block_relevant(&emission.block, height); - assert!(indexed_additions.is_empty()); - } - - // send 3 txs to a tracked address, these txs will be in the mempool - let exp_txids = { - let mut txids = BTreeSet::new(); - for _ in 0..3 { - txids.insert(env.rpc_client().send_to_address( - &addr_0, - Amount::from_sat(10_000), - None, - None, - None, - None, - None, - None, - )?); - } - txids - }; - - // expect that the next block should be none and we should get 3 txs from mempool - { - // next block should be `None` - assert!(emitter.next_block()?.is_none()); - - let mempool_txs = emitter.mempool()?; - let indexed_additions = indexed_tx_graph.batch_insert_unconfirmed(mempool_txs); - assert_eq!( - indexed_additions - .tx_graph - .txs - .iter() - .map(|tx| tx.compute_txid()) - .collect::>(), - exp_txids, - "changeset should have the 3 mempool transactions", - ); - assert!(indexed_additions.tx_graph.anchors.is_empty()); - } - - // mine a block that confirms the 3 txs - let exp_block_hash = env.mine_blocks(1, None)?[0]; - let exp_block_height = env.rpc_client().get_block_info(&exp_block_hash)?.height as u32; - let exp_anchors = exp_txids - .iter() - .map({ - let anchor = BlockId { - height: exp_block_height, - hash: exp_block_hash, - }; - move |&txid| (anchor, txid) - }) - .collect::>(); - - // must receive mined block which will confirm the transactions. - { - let emission = emitter.next_block()?.expect("must get mined block"); - let height = emission.block_height(); - let _ = chain.apply_update(emission.checkpoint)?; - let indexed_additions = indexed_tx_graph.apply_block_relevant(&emission.block, height); - assert!(indexed_additions.tx_graph.txs.is_empty()); - assert!(indexed_additions.tx_graph.txouts.is_empty()); - assert_eq!(indexed_additions.tx_graph.anchors, exp_anchors); - } - - Ok(()) -} - -/// Ensure next block emitted after reorg is at reorg height. -/// -/// After a reorg, if the last-emitted block height is equal or greater than the reorg height, and -/// the fallback height is equal to or lower than the reorg height, the next block/header emission -/// should be at the reorg height. -/// -/// TODO: If the reorg height is lower than the fallback height, how do we find a block height to -/// emit that can connect with our receiver chain? -#[test] -fn ensure_block_emitted_after_reorg_is_at_reorg_height() -> anyhow::Result<()> { - const EMITTER_START_HEIGHT: usize = 100; - const CHAIN_TIP_HEIGHT: usize = 110; - - let env = TestEnv::new()?; - let mut emitter = Emitter::new( - env.rpc_client(), - CheckPoint::new(BlockId { - height: 0, - hash: env.rpc_client().get_block_hash(0)?, - }), - EMITTER_START_HEIGHT as _, - ); - - env.mine_blocks(CHAIN_TIP_HEIGHT, None)?; - while emitter.next_header()?.is_some() {} - - for reorg_count in 1..=10 { - let replaced_blocks = env.reorg_empty_blocks(reorg_count)?; - let next_emission = emitter.next_header()?.expect("must emit block after reorg"); - assert_eq!( - ( - next_emission.block_height() as usize, - next_emission.block_hash() - ), - replaced_blocks[0], - "block emitted after reorg should be at the reorg height" - ); - while emitter.next_header()?.is_some() {} - } - - Ok(()) -} - -fn process_block( - recv_chain: &mut LocalChain, - recv_graph: &mut IndexedTxGraph>, - block: Block, - block_height: u32, -) -> anyhow::Result<()> { - recv_chain.apply_update(CheckPoint::from_header(&block.header, block_height))?; - let _ = recv_graph.apply_block(block, block_height); - Ok(()) -} - -fn sync_from_emitter( - recv_chain: &mut LocalChain, - recv_graph: &mut IndexedTxGraph>, - emitter: &mut Emitter, -) -> anyhow::Result<()> -where - C: bitcoincore_rpc::RpcApi, -{ - while let Some(emission) = emitter.next_block()? { - let height = emission.block_height(); - process_block(recv_chain, recv_graph, emission.block, height)?; - } - Ok(()) -} - -fn get_balance( - recv_chain: &LocalChain, - recv_graph: &IndexedTxGraph>, -) -> anyhow::Result { - let chain_tip = recv_chain.tip().block_id(); - let outpoints = recv_graph.index.outpoints().clone(); - let balance = recv_graph - .graph() - .balance(recv_chain, chain_tip, outpoints, |_, _| true); - Ok(balance) -} - -/// If a block is reorged out, ensure that containing transactions that do not exist in the -/// replacement block(s) become unconfirmed. -#[test] -fn tx_can_become_unconfirmed_after_reorg() -> anyhow::Result<()> { - const PREMINE_COUNT: usize = 101; - const ADDITIONAL_COUNT: usize = 11; - const SEND_AMOUNT: Amount = Amount::from_sat(10_000); - - let env = TestEnv::new()?; - let mut emitter = Emitter::new( - env.rpc_client(), - CheckPoint::new(BlockId { - height: 0, - hash: env.rpc_client().get_block_hash(0)?, - }), - 0, - ); - - // setup addresses - let addr_to_mine = env - .rpc_client() - .get_new_address(None, None)? - .assume_checked(); - let spk_to_track = ScriptBuf::new_p2wsh(&WScriptHash::all_zeros()); - let addr_to_track = Address::from_script(&spk_to_track, bitcoin::Network::Regtest)?; - - // setup receiver - let (mut recv_chain, _) = LocalChain::from_genesis_hash(env.rpc_client().get_block_hash(0)?); - let mut recv_graph = IndexedTxGraph::::new({ - let mut recv_index = SpkTxOutIndex::default(); - recv_index.insert_spk((), spk_to_track.clone()); - recv_index - }); - - // mine and sync receiver up to tip - env.mine_blocks(PREMINE_COUNT, Some(addr_to_mine))?; - - // create transactions that are tracked by our receiver - for _ in 0..ADDITIONAL_COUNT { - let txid = env.send(&addr_to_track, SEND_AMOUNT)?; - - // lock outputs that send to `addr_to_track` - let outpoints_to_lock = env - .rpc_client() - .get_transaction(&txid, None)? - .transaction()? - .output - .into_iter() - .enumerate() - .filter(|(_, txo)| txo.script_pubkey == spk_to_track) - .map(|(vout, _)| OutPoint::new(txid, vout as _)) - .collect::>(); - env.rpc_client().lock_unspent(&outpoints_to_lock)?; - - let _ = env.mine_blocks(1, None)?; - } - - // get emitter up to tip - sync_from_emitter(&mut recv_chain, &mut recv_graph, &mut emitter)?; - - assert_eq!( - get_balance(&recv_chain, &recv_graph)?, - Balance { - confirmed: SEND_AMOUNT * ADDITIONAL_COUNT as u64, - ..Balance::default() - }, - "initial balance must be correct", - ); - - // perform reorgs with different depths - for reorg_count in 1..=ADDITIONAL_COUNT { - env.reorg_empty_blocks(reorg_count)?; - sync_from_emitter(&mut recv_chain, &mut recv_graph, &mut emitter)?; - - assert_eq!( - get_balance(&recv_chain, &recv_graph)?, - Balance { - trusted_pending: SEND_AMOUNT * reorg_count as u64, - confirmed: SEND_AMOUNT * (ADDITIONAL_COUNT - reorg_count) as u64, - ..Balance::default() - }, - "reorg_count: {}", - reorg_count, - ); - } - - Ok(()) -} - -/// Ensure avoid-re-emission-logic is sound when [`Emitter`] is synced to tip. -/// -/// The receiver (bdk_chain structures) is synced to the chain tip, and there is txs in the mempool. -/// When we call Emitter::mempool multiple times, mempool txs should not be re-emitted, even if the -/// chain tip is extended. -#[test] -fn mempool_avoids_re_emission() -> anyhow::Result<()> { - const BLOCKS_TO_MINE: usize = 101; - const MEMPOOL_TX_COUNT: usize = 2; - - let env = TestEnv::new()?; - let mut emitter = Emitter::new( - env.rpc_client(), - CheckPoint::new(BlockId { - height: 0, - hash: env.rpc_client().get_block_hash(0)?, - }), - 0, - ); - - // mine blocks and sync up emitter - let addr = env - .rpc_client() - .get_new_address(None, None)? - .assume_checked(); - env.mine_blocks(BLOCKS_TO_MINE, Some(addr.clone()))?; - while emitter.next_header()?.is_some() {} - - // have some random txs in mempool - let exp_txids = (0..MEMPOOL_TX_COUNT) - .map(|_| env.send(&addr, Amount::from_sat(2100))) - .collect::, _>>()?; - - // the first emission should include all transactions - let emitted_txids = emitter - .mempool()? - .into_iter() - .map(|(tx, _)| tx.compute_txid()) - .collect::>(); - assert_eq!( - emitted_txids, exp_txids, - "all mempool txs should be emitted" - ); - - // second emission should be empty - assert!( - emitter.mempool()?.is_empty(), - "second emission should be empty" - ); - - // mine empty blocks + sync up our emitter -> we should still not re-emit - for _ in 0..BLOCKS_TO_MINE { - env.mine_empty_block()?; - } - while emitter.next_header()?.is_some() {} - assert!( - emitter.mempool()?.is_empty(), - "third emission, after chain tip is extended, should also be empty" - ); - - Ok(()) -} - -/// Ensure mempool tx is still re-emitted if [`Emitter`] has not reached the tx's introduction -/// height. -/// -/// We introduce a mempool tx after each block, where blocks are empty (does not confirm previous -/// mempool txs). Then we emit blocks from [`Emitter`] (intertwining `mempool` calls). We check -/// that `mempool` should always re-emit txs that have introduced at a height greater than the last -/// emitted block height. -#[test] -fn mempool_re_emits_if_tx_introduction_height_not_reached() -> anyhow::Result<()> { - const PREMINE_COUNT: usize = 101; - const MEMPOOL_TX_COUNT: usize = 21; - - let env = TestEnv::new()?; - let mut emitter = Emitter::new( - env.rpc_client(), - CheckPoint::new(BlockId { - height: 0, - hash: env.rpc_client().get_block_hash(0)?, - }), - 0, - ); - - // mine blocks to get initial balance, sync emitter up to tip - let addr = env - .rpc_client() - .get_new_address(None, None)? - .assume_checked(); - env.mine_blocks(PREMINE_COUNT, Some(addr.clone()))?; - while emitter.next_header()?.is_some() {} - - // mine blocks to introduce txs to mempool at different heights - let tx_introductions = (0..MEMPOOL_TX_COUNT) - .map(|_| -> anyhow::Result<_> { - let (height, _) = env.mine_empty_block()?; - let txid = env.send(&addr, Amount::from_sat(2100))?; - Ok((height, txid)) - }) - .collect::>>()?; - - assert_eq!( - emitter - .mempool()? - .into_iter() - .map(|(tx, _)| tx.compute_txid()) - .collect::>(), - tx_introductions.iter().map(|&(_, txid)| txid).collect(), - "first mempool emission should include all txs", - ); - assert_eq!( - emitter - .mempool()? - .into_iter() - .map(|(tx, _)| tx.compute_txid()) - .collect::>(), - tx_introductions.iter().map(|&(_, txid)| txid).collect(), - "second mempool emission should still include all txs", - ); - - // At this point, the emitter has seen all mempool transactions. It should only re-emit those - // that have introduction heights less than the emitter's last-emitted block tip. - while let Some(emission) = emitter.next_header()? { - let height = emission.block_height(); - // We call `mempool()` twice. - // The second call (at height `h`) should skip the tx introduced at height `h`. - for try_index in 0..2 { - let exp_txids = tx_introductions - .range((height as usize + try_index, Txid::all_zeros())..) - .map(|&(_, txid)| txid) - .collect::>(); - let emitted_txids = emitter - .mempool()? - .into_iter() - .map(|(tx, _)| tx.compute_txid()) - .collect::>(); - assert_eq!( - emitted_txids, exp_txids, - "\n emission {} (try {}) must only contain txs introduced at that height or lower: \n\t missing: {:?} \n\t extra: {:?}", - height, - try_index, - exp_txids - .difference(&emitted_txids) - .map(|txid| (txid, tx_introductions.iter().find_map(|(h, id)| if id == txid { Some(h) } else { None }).unwrap())) - .collect::>(), - emitted_txids - .difference(&exp_txids) - .map(|txid| (txid, tx_introductions.iter().find_map(|(h, id)| if id == txid { Some(h) } else { None }).unwrap())) - .collect::>(), - ); - } - } - - Ok(()) -} - -/// Ensure we force re-emit all mempool txs after reorg. -#[test] -fn mempool_during_reorg() -> anyhow::Result<()> { - const TIP_DIFF: usize = 10; - const PREMINE_COUNT: usize = 101; - - let env = TestEnv::new()?; - let mut emitter = Emitter::new( - env.rpc_client(), - CheckPoint::new(BlockId { - height: 0, - hash: env.rpc_client().get_block_hash(0)?, - }), - 0, - ); - - // mine blocks to get initial balance - let addr = env - .rpc_client() - .get_new_address(None, None)? - .assume_checked(); - env.mine_blocks(PREMINE_COUNT, Some(addr.clone()))?; - - // introduce mempool tx at each block extension - for _ in 0..TIP_DIFF { - env.mine_empty_block()?; - env.send(&addr, Amount::from_sat(2100))?; - } - - // sync emitter to tip, first mempool emission should include all txs (as we haven't emitted - // from the mempool yet) - while emitter.next_header()?.is_some() {} - assert_eq!( - emitter - .mempool()? - .into_iter() - .map(|(tx, _)| tx.compute_txid()) - .collect::>(), - env.rpc_client() - .get_raw_mempool()? - .into_iter() - .collect::>(), - "first mempool emission should include all txs", - ); - - // perform reorgs at different heights, these reorgs will not confirm transactions in the - // mempool - for reorg_count in 1..TIP_DIFF { - env.reorg_empty_blocks(reorg_count)?; - - // This is a map of mempool txids to tip height where the tx was introduced to the mempool - // we recalculate this at every loop as reorgs may evict transactions from mempool. We use - // the introduction height to determine whether we expect a tx to appear in a mempool - // emission. - // TODO: How can have have reorg logic in `TestEnv` NOT blacklast old blocks first? - let tx_introductions = dbg!(env - .rpc_client() - .get_raw_mempool_verbose()? - .into_iter() - .map(|(txid, entry)| (txid, entry.height as usize)) - .collect::>()); - - // `next_header` emits the replacement block of the reorg - if let Some(emission) = emitter.next_header()? { - let height = emission.block_height(); - - // the mempool emission (that follows the first block emission after reorg) should only - // include mempool txs introduced at reorg height or greater - let mempool = emitter - .mempool()? - .into_iter() - .map(|(tx, _)| tx.compute_txid()) - .collect::>(); - let exp_mempool = tx_introductions - .iter() - .filter(|(_, &intro_h)| intro_h >= (height as usize)) - .map(|(&txid, _)| txid) - .collect::>(); - assert_eq!( - mempool, exp_mempool, - "the first mempool emission after reorg should only include mempool txs introduced at reorg height or greater" - ); - - let mempool = emitter - .mempool()? - .into_iter() - .map(|(tx, _)| tx.compute_txid()) - .collect::>(); - let exp_mempool = tx_introductions - .iter() - .filter(|&(_, &intro_height)| intro_height > (height as usize)) - .map(|(&txid, _)| txid) - .collect::>(); - assert_eq!( - mempool, exp_mempool, - "following mempool emissions after reorg should exclude mempool introduction heights <= last emitted block height: \n\t missing: {:?} \n\t extra: {:?}", - exp_mempool - .difference(&mempool) - .map(|txid| (txid, tx_introductions.get(txid).unwrap())) - .collect::>(), - mempool - .difference(&exp_mempool) - .map(|txid| (txid, tx_introductions.get(txid).unwrap())) - .collect::>(), - ); - } - - // sync emitter to tip - while emitter.next_header()?.is_some() {} - } - - Ok(()) -} - -/// If blockchain re-org includes the start height, emit new start height block -/// -/// 1. mine 101 blocks -/// 2. emit blocks 99a, 100a -/// 3. invalidate blocks 99a, 100a, 101a -/// 4. mine new blocks 99b, 100b, 101b -/// 5. emit block 99b -/// -/// The block hash of 99b should be different than 99a, but their previous block hashes should -/// be the same. -#[test] -fn no_agreement_point() -> anyhow::Result<()> { - const PREMINE_COUNT: usize = 101; - - let env = TestEnv::new()?; - - // start height is 99 - let mut emitter = Emitter::new( - env.rpc_client(), - CheckPoint::new(BlockId { - height: 0, - hash: env.rpc_client().get_block_hash(0)?, - }), - (PREMINE_COUNT - 2) as u32, - ); - - // mine 101 blocks - env.mine_blocks(PREMINE_COUNT, None)?; - - // emit block 99a - let block_header_99a = emitter.next_header()?.expect("block 99a header").block; - let block_hash_99a = block_header_99a.block_hash(); - let block_hash_98a = block_header_99a.prev_blockhash; - - // emit block 100a - let block_header_100a = emitter.next_header()?.expect("block 100a header").block; - let block_hash_100a = block_header_100a.block_hash(); - - // get hash for block 101a - let block_hash_101a = env.rpc_client().get_block_hash(101)?; - - // invalidate blocks 99a, 100a, 101a - env.rpc_client().invalidate_block(&block_hash_99a)?; - env.rpc_client().invalidate_block(&block_hash_100a)?; - env.rpc_client().invalidate_block(&block_hash_101a)?; - - // mine new blocks 99b, 100b, 101b - env.mine_blocks(3, None)?; - - // emit block header 99b - let block_header_99b = emitter.next_header()?.expect("block 99b header").block; - let block_hash_99b = block_header_99b.block_hash(); - let block_hash_98b = block_header_99b.prev_blockhash; - - assert_ne!(block_hash_99a, block_hash_99b); - assert_eq!(block_hash_98a, block_hash_98b); - - Ok(()) -} diff --git a/crates/bitcoind_rpc/tests/test_filter_iter.rs b/crates/bitcoind_rpc/tests/test_filter_iter.rs deleted file mode 100644 index c8d3335a..00000000 --- a/crates/bitcoind_rpc/tests/test_filter_iter.rs +++ /dev/null @@ -1,165 +0,0 @@ -use bitcoin::{constants, Address, Amount, Network, ScriptBuf}; - -use bdk_bitcoind_rpc::bip158::FilterIter; -use bdk_core::{BlockId, CheckPoint}; -use bdk_testenv::{anyhow, bitcoind, block_id, TestEnv}; -use bitcoincore_rpc::RpcApi; - -fn testenv() -> anyhow::Result { - let mut conf = bitcoind::Conf::default(); - conf.args.push("-blockfilterindex=1"); - conf.args.push("-peerblockfilters=1"); - TestEnv::new_with_config(bdk_testenv::Config { - bitcoind: conf, - ..Default::default() - }) -} - -// Test the result of `chain_update` given a local checkpoint. -// -// new blocks -// 2--3--4--5--6--7--8--9--10--11 -// -// case 1: base below new blocks -// 0- -// case 2: base overlaps with new blocks -// 0--1--2--3--4 -// case 3: stale tip (with overlap) -// 0--1--2--3--4--x -// case 4: stale tip (no overlap) -// 0--x -#[test] -fn get_tip_and_chain_update() -> anyhow::Result<()> { - let env = testenv()?; - - let genesis_hash = constants::genesis_block(Network::Regtest).block_hash(); - let genesis = BlockId { - height: 0, - hash: genesis_hash, - }; - - let hash = env.rpc_client().get_best_block_hash()?; - let header = env.rpc_client().get_block_header_info(&hash)?; - assert_eq!(header.height, 1); - let block_1 = BlockId { - height: header.height as u32, - hash, - }; - - // `FilterIter` will try to return up to ten recent blocks - // so we keep them for reference - let new_blocks: Vec = (2..12) - .zip(env.mine_blocks(10, None)?) - .map(BlockId::from) - .collect(); - - let new_tip = *new_blocks.last().unwrap(); - - struct TestCase { - // name - name: &'static str, - // local blocks - chain: Vec, - // expected blocks - exp: Vec, - } - - // For each test we create a new `FilterIter` with the checkpoint given - // by the blocks in the test chain. Then we sync to the remote tip and - // check the blocks that are returned in the chain update. - [ - TestCase { - name: "point of agreement below new blocks, expect base + new", - chain: vec![genesis, block_1], - exp: [block_1].into_iter().chain(new_blocks.clone()).collect(), - }, - TestCase { - name: "point of agreement genesis, expect base + new", - chain: vec![genesis], - exp: [genesis].into_iter().chain(new_blocks.clone()).collect(), - }, - TestCase { - name: "point of agreement within new blocks, expect base + remaining", - chain: new_blocks[..=2].to_vec(), - exp: new_blocks[2..].to_vec(), - }, - TestCase { - name: "stale tip within new blocks, expect base + corrected + remaining", - // base height: 4, stale height: 5 - chain: vec![new_blocks[2], block_id!(5, "E")], - exp: new_blocks[2..].to_vec(), - }, - TestCase { - name: "stale tip below new blocks, expect base + corrected + new", - chain: vec![genesis, block_id!(1, "A")], - exp: [genesis, block_1].into_iter().chain(new_blocks).collect(), - }, - ] - .into_iter() - .for_each(|test| { - let cp = CheckPoint::from_block_ids(test.chain).unwrap(); - let mut iter = FilterIter::new_with_checkpoint(env.rpc_client(), cp); - assert_eq!(iter.get_tip().unwrap(), Some(new_tip)); - let update_cp = iter.chain_update().unwrap(); - let mut update_blocks: Vec<_> = update_cp.iter().map(|cp| cp.block_id()).collect(); - update_blocks.reverse(); - assert_eq!(update_blocks, test.exp, "{}", test.name); - }); - - Ok(()) -} - -#[test] -fn filter_iter_returns_matched_blocks() -> anyhow::Result<()> { - use bdk_bitcoind_rpc::bip158::{Event, EventInner}; - let env = testenv()?; - let rpc = env.rpc_client(); - while rpc.get_block_count()? < 101 { - let _ = env.mine_blocks(1, None)?; - } - - // send tx - let spk = ScriptBuf::from_hex("0014446906a6560d8ad760db3156706e72e171f3a2aa")?; - let txid = env.send( - &Address::from_script(&spk, Network::Regtest)?, - Amount::from_btc(0.42)?, - )?; - let _ = env.mine_blocks(1, None); - - // match blocks - let mut iter = FilterIter::new_with_height(rpc, 1); - iter.add_spk(spk); - assert_eq!(iter.get_tip()?.unwrap().height, 102); - - for res in iter { - let event = res?; - match event { - event if event.height() <= 101 => assert!(!event.is_match()), - Event::Block(EventInner { height, block }) => { - assert_eq!(height, 102); - assert!(block.txdata.iter().any(|tx| tx.compute_txid() == txid)); - } - Event::NoMatch(_) => panic!("expected to match block 102"), - } - } - - Ok(()) -} - -#[test] -fn filter_iter_error_no_scripts() -> anyhow::Result<()> { - use bdk_bitcoind_rpc::bip158::Error; - let env = testenv()?; - let _ = env.mine_blocks(2, None)?; - - let mut iter = FilterIter::new_with_height(env.rpc_client(), 1); - assert_eq!(iter.get_tip()?.unwrap().height, 3); - - // iterator should return three errors - for _ in 0..3 { - assert!(matches!(iter.next().unwrap(), Err(Error::NoScripts))); - } - assert!(iter.next().is_none()); - - Ok(()) -} diff --git a/crates/chain/CHANGELOG.md b/crates/chain/CHANGELOG.md deleted file mode 100644 index 2c5e1452..00000000 --- a/crates/chain/CHANGELOG.md +++ /dev/null @@ -1,16 +0,0 @@ -# Changelog - -All notable changes to this project can be found here and in each release's git tag and can be viewed with `git tag -ln100 "v*"`. See also [DEVELOPMENT_CYCLE.md](../../DEVELOPMENT_CYCLE.md) for more details. - -Contributors do not need to change this file but do need to add changelog details in their PR descriptions. The person making the next release will collect changelog details from included PRs and edit this file prior to each release. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [chain-0.21.1] - -### Changed - -- Minor updates to fix new rustc 1.83.0 clippy warnings #1776 - -[chain-0.21.1]: https://github.com/bitcoindevkit/bdk/releases/tag/chain-0.21.1 \ No newline at end of file diff --git a/crates/chain/Cargo.toml b/crates/chain/Cargo.toml deleted file mode 100644 index 8ff44438..00000000 --- a/crates/chain/Cargo.toml +++ /dev/null @@ -1,42 +0,0 @@ -[package] -name = "bdk_chain" -version = "0.21.1" -edition = "2021" -rust-version = "1.63" -homepage = "https://bitcoindevkit.org" -repository = "https://github.com/bitcoindevkit/bdk" -documentation = "https://docs.rs/bdk_chain" -description = "Collection of core structures for Bitcoin Dev Kit." -license = "MIT OR Apache-2.0" -readme = "README.md" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[lints] -workspace = true - -[dependencies] -bitcoin = { version = "0.32.0", default-features = false } -bdk_core = { path = "../core", version = "0.4.1", default-features = false } -serde = { version = "1", optional = true, features = ["derive", "rc"] } -miniscript = { version = "12.3.1", optional = true, default-features = false } - -# Feature dependencies -rusqlite = { version = "0.31.0", features = ["bundled"], optional = true } - -[dev-dependencies] -rand = "0.8" -proptest = "1.2.0" -bdk_testenv = { path = "../testenv", default-features = false } -criterion = { version = "0.2" } - -[features] -default = ["std", "miniscript"] -std = ["bitcoin/std", "miniscript?/std", "bdk_core/std"] -serde = ["dep:serde", "bitcoin/serde", "miniscript?/serde", "bdk_core/serde"] -hashbrown = ["bdk_core/hashbrown"] -rusqlite = ["std", "dep:rusqlite", "serde"] - -[[bench]] -name = "canonicalization" -harness = false diff --git a/crates/chain/README.md b/crates/chain/README.md deleted file mode 100644 index 02b33350..00000000 --- a/crates/chain/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# BDK Chain - -BDK keychain tracker, tools for storing and indexing chain data. diff --git a/crates/chain/benches/canonicalization.rs b/crates/chain/benches/canonicalization.rs deleted file mode 100644 index 6893e6df..00000000 --- a/crates/chain/benches/canonicalization.rs +++ /dev/null @@ -1,248 +0,0 @@ -use bdk_chain::{keychain_txout::KeychainTxOutIndex, local_chain::LocalChain, IndexedTxGraph}; -use bdk_core::{BlockId, CheckPoint}; -use bdk_core::{ConfirmationBlockTime, TxUpdate}; -use bdk_testenv::hash; -use bitcoin::{ - absolute, constants, hashes::Hash, key::Secp256k1, transaction, Amount, BlockHash, Network, - OutPoint, ScriptBuf, Transaction, TxIn, TxOut, -}; -use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use miniscript::{Descriptor, DescriptorPublicKey}; -use std::sync::Arc; - -type Keychain = (); -type KeychainTxGraph = IndexedTxGraph>; - -/// New tx guaranteed to have at least one output -fn new_tx(lt: u32) -> Transaction { - Transaction { - version: transaction::Version::TWO, - lock_time: absolute::LockTime::from_consensus(lt), - input: vec![], - output: vec![TxOut::NULL], - } -} - -fn spk_at_index(txout_index: &KeychainTxOutIndex, index: u32) -> ScriptBuf { - txout_index - .get_descriptor(()) - .unwrap() - .at_derivation_index(index) - .unwrap() - .script_pubkey() -} - -fn genesis_block_id() -> BlockId { - BlockId { - height: 0, - hash: constants::genesis_block(Network::Regtest).block_hash(), - } -} - -fn tip_block_id() -> BlockId { - BlockId { - height: 100, - hash: BlockHash::all_zeros(), - } -} - -/// Add ancestor tx confirmed at `block_id` with `locktime` (used for uniqueness). -/// The transaction always pays 1 BTC to SPK 0. -fn add_ancestor_tx(graph: &mut KeychainTxGraph, block_id: BlockId, locktime: u32) -> OutPoint { - let spk_0 = spk_at_index(&graph.index, 0); - let tx = Transaction { - input: vec![TxIn { - previous_output: OutPoint::new(hash!("bogus"), locktime), - ..Default::default() - }], - output: vec![TxOut { - value: Amount::ONE_BTC, - script_pubkey: spk_0, - }], - ..new_tx(locktime) - }; - let txid = tx.compute_txid(); - let _ = graph.insert_tx(tx); - let _ = graph.insert_anchor( - txid, - ConfirmationBlockTime { - block_id, - confirmation_time: 100, - }, - ); - OutPoint { txid, vout: 0 } -} - -fn setup(f: F) -> (KeychainTxGraph, LocalChain) { - const DESC: &str = "tr([ab28dc00/86h/1h/0h]tpubDCdDtzAMZZrkwKBxwNcGCqe4FRydeD9rfMisoi7qLdraG79YohRfPW4YgdKQhpgASdvh612xXNY5xYzoqnyCgPbkpK4LSVcH5Xv4cK7johH/0/*)"; - let cp = CheckPoint::from_block_ids([genesis_block_id(), tip_block_id()]) - .expect("blocks must be chronological"); - let chain = LocalChain::from_tip(cp).unwrap(); - - let (desc, _) = - >::parse_descriptor(&Secp256k1::new(), DESC).unwrap(); - let mut index = KeychainTxOutIndex::new(10); - index.insert_descriptor((), desc).unwrap(); - let mut tx_graph = KeychainTxGraph::new(index); - - f(&mut tx_graph, &chain); - (tx_graph, chain) -} - -fn run_list_canonical_txs(tx_graph: &KeychainTxGraph, chain: &LocalChain, exp_txs: usize) { - let txs = tx_graph - .graph() - .list_canonical_txs(chain, chain.tip().block_id()); - assert_eq!(txs.count(), exp_txs); -} - -fn run_filter_chain_txouts(tx_graph: &KeychainTxGraph, chain: &LocalChain, exp_txos: usize) { - let utxos = tx_graph.graph().filter_chain_txouts( - chain, - chain.tip().block_id(), - tx_graph.index.outpoints().clone(), - ); - assert_eq!(utxos.count(), exp_txos); -} - -fn run_filter_chain_unspents(tx_graph: &KeychainTxGraph, chain: &LocalChain, exp_utxos: usize) { - let utxos = tx_graph.graph().filter_chain_unspents( - chain, - chain.tip().block_id(), - tx_graph.index.outpoints().clone(), - ); - assert_eq!(utxos.count(), exp_utxos); -} - -pub fn many_conflicting_unconfirmed(c: &mut Criterion) { - const CONFLICTING_TX_COUNT: u32 = 2100; - let (tx_graph, chain) = black_box(setup(|tx_graph, _chain| { - let previous_output = add_ancestor_tx(tx_graph, tip_block_id(), 0); - // Create conflicting txs that spend from `previous_output`. - let spk_1 = spk_at_index(&tx_graph.index, 1); - for i in 1..=CONFLICTING_TX_COUNT { - let tx = Transaction { - input: vec![TxIn { - previous_output, - ..Default::default() - }], - output: vec![TxOut { - value: Amount::ONE_BTC - Amount::from_sat(i as u64 * 10), - script_pubkey: spk_1.clone(), - }], - ..new_tx(i) - }; - let mut update = TxUpdate::default(); - update.seen_ats = [(tx.compute_txid(), i as u64)].into(); - update.txs = vec![Arc::new(tx)]; - let _ = tx_graph.apply_update(update); - } - })); - c.bench_function("many_conflicting_unconfirmed::list_canonical_txs", { - let (tx_graph, chain) = (tx_graph.clone(), chain.clone()); - move |b| b.iter(|| run_list_canonical_txs(&tx_graph, &chain, 2)) - }); - c.bench_function("many_conflicting_unconfirmed::filter_chain_txouts", { - let (tx_graph, chain) = (tx_graph.clone(), chain.clone()); - move |b| b.iter(|| run_filter_chain_txouts(&tx_graph, &chain, 2)) - }); - c.bench_function("many_conflicting_unconfirmed::filter_chain_unspents", { - let (tx_graph, chain) = (tx_graph.clone(), chain.clone()); - move |b| b.iter(|| run_filter_chain_unspents(&tx_graph, &chain, 1)) - }); -} - -pub fn many_chained_unconfirmed(c: &mut Criterion) { - const TX_CHAIN_COUNT: u32 = 2100; - let (tx_graph, chain) = black_box(setup(|tx_graph, _chain| { - let mut previous_output = add_ancestor_tx(tx_graph, tip_block_id(), 0); - // Create a chain of unconfirmed txs where each subsequent tx spends the output of the - // previous one. - for i in 0..TX_CHAIN_COUNT { - // Create tx. - let tx = Transaction { - input: vec![TxIn { - previous_output, - ..Default::default() - }], - ..new_tx(i) - }; - let txid = tx.compute_txid(); - let mut update = TxUpdate::default(); - update.seen_ats = [(txid, i as u64)].into(); - update.txs = vec![Arc::new(tx)]; - let _ = tx_graph.apply_update(update); - // Store the next prevout. - previous_output = OutPoint::new(txid, 0); - } - })); - c.bench_function("many_chained_unconfirmed::list_canonical_txs", { - let (tx_graph, chain) = (tx_graph.clone(), chain.clone()); - move |b| b.iter(|| run_list_canonical_txs(&tx_graph, &chain, 2101)) - }); - c.bench_function("many_chained_unconfirmed::filter_chain_txouts", { - let (tx_graph, chain) = (tx_graph.clone(), chain.clone()); - move |b| b.iter(|| run_filter_chain_txouts(&tx_graph, &chain, 1)) - }); - c.bench_function("many_chained_unconfirmed::filter_chain_unspents", { - let (tx_graph, chain) = (tx_graph.clone(), chain.clone()); - move |b| b.iter(|| run_filter_chain_unspents(&tx_graph, &chain, 0)) - }); -} - -pub fn nested_conflicts(c: &mut Criterion) { - const CONFLICTS_PER_OUTPUT: usize = 3; - const GRAPH_DEPTH: usize = 7; - let (tx_graph, chain) = black_box(setup(|tx_graph, _chain| { - let mut prev_ops = core::iter::once(add_ancestor_tx(tx_graph, tip_block_id(), 0)) - .collect::>(); - for depth in 1..GRAPH_DEPTH { - for previous_output in core::mem::take(&mut prev_ops) { - for conflict_i in 1..=CONFLICTS_PER_OUTPUT { - let mut last_seen = depth * conflict_i; - if last_seen % 2 == 0 { - last_seen /= 2; - } - let ((_, script_pubkey), _) = tx_graph.index.next_unused_spk(()).unwrap(); - let value = - Amount::ONE_BTC - Amount::from_sat(depth as u64 * 200 - conflict_i as u64); - let tx = Transaction { - input: vec![TxIn { - previous_output, - ..Default::default() - }], - output: vec![TxOut { - value, - script_pubkey, - }], - ..new_tx(conflict_i as _) - }; - let txid = tx.compute_txid(); - prev_ops.push(OutPoint::new(txid, 0)); - let _ = tx_graph.insert_seen_at(txid, last_seen as _); - let _ = tx_graph.insert_tx(tx); - } - } - } - })); - c.bench_function("nested_conflicts_unconfirmed::list_canonical_txs", { - let (tx_graph, chain) = (tx_graph.clone(), chain.clone()); - move |b| b.iter(|| run_list_canonical_txs(&tx_graph, &chain, GRAPH_DEPTH)) - }); - c.bench_function("nested_conflicts_unconfirmed::filter_chain_txouts", { - let (tx_graph, chain) = (tx_graph.clone(), chain.clone()); - move |b| b.iter(|| run_filter_chain_txouts(&tx_graph, &chain, GRAPH_DEPTH)) - }); - c.bench_function("nested_conflicts_unconfirmed::filter_chain_unspents", { - let (tx_graph, chain) = (tx_graph.clone(), chain.clone()); - move |b| b.iter(|| run_filter_chain_unspents(&tx_graph, &chain, 1)) - }); -} - -criterion_group!( - benches, - many_conflicting_unconfirmed, - many_chained_unconfirmed, - nested_conflicts, -); -criterion_main!(benches); diff --git a/crates/chain/src/balance.rs b/crates/chain/src/balance.rs deleted file mode 100644 index 2d4dc9db..00000000 --- a/crates/chain/src/balance.rs +++ /dev/null @@ -1,53 +0,0 @@ -use bitcoin::Amount; - -/// Balance, differentiated into various categories. -#[derive(Debug, PartialEq, Eq, Clone, Default)] -#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] -pub struct Balance { - /// All coinbase outputs not yet matured - pub immature: Amount, - /// Unconfirmed UTXOs generated by a wallet tx - pub trusted_pending: Amount, - /// Unconfirmed UTXOs received from an external wallet - pub untrusted_pending: Amount, - /// Confirmed and immediately spendable balance - pub confirmed: Amount, -} - -impl Balance { - /// Get sum of trusted_pending and confirmed coins. - /// - /// This is the balance you can spend right now that shouldn't get cancelled via another party - /// double spending it. - pub fn trusted_spendable(&self) -> Amount { - self.confirmed + self.trusted_pending - } - - /// Get the whole balance visible to the wallet. - pub fn total(&self) -> Amount { - self.confirmed + self.trusted_pending + self.untrusted_pending + self.immature - } -} - -impl core::fmt::Display for Balance { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!( - f, - "{{ immature: {}, trusted_pending: {}, untrusted_pending: {}, confirmed: {} }}", - self.immature, self.trusted_pending, self.untrusted_pending, self.confirmed - ) - } -} - -impl core::ops::Add for Balance { - type Output = Self; - - fn add(self, other: Self) -> Self { - Self { - immature: self.immature + other.immature, - trusted_pending: self.trusted_pending + other.trusted_pending, - untrusted_pending: self.untrusted_pending + other.untrusted_pending, - confirmed: self.confirmed + other.confirmed, - } - } -} diff --git a/crates/chain/src/canonical_iter.rs b/crates/chain/src/canonical_iter.rs deleted file mode 100644 index 99550ab7..00000000 --- a/crates/chain/src/canonical_iter.rs +++ /dev/null @@ -1,251 +0,0 @@ -use crate::collections::{hash_map, HashMap, HashSet, VecDeque}; -use crate::tx_graph::{TxAncestors, TxDescendants}; -use crate::{Anchor, ChainOracle, TxGraph}; -use alloc::boxed::Box; -use alloc::collections::BTreeSet; -use alloc::sync::Arc; -use bdk_core::BlockId; -use bitcoin::{Transaction, Txid}; - -/// Iterates over canonical txs. -pub struct CanonicalIter<'g, A, C> { - tx_graph: &'g TxGraph, - chain: &'g C, - chain_tip: BlockId, - - unprocessed_txs_with_anchors: - Box, &'g BTreeSet)> + 'g>, - unprocessed_txs_with_last_seens: Box, u64)> + 'g>, - unprocessed_txs_left_over: VecDeque<(Txid, Arc, u32)>, - - canonical: HashMap, CanonicalReason)>, - not_canonical: HashSet, - - queue: VecDeque, -} - -impl<'g, A: Anchor, C: ChainOracle> CanonicalIter<'g, A, C> { - /// Constructs [`CanonicalIter`]. - pub fn new(tx_graph: &'g TxGraph, chain: &'g C, chain_tip: BlockId) -> Self { - let anchors = tx_graph.all_anchors(); - let pending_anchored = Box::new( - tx_graph - .txids_by_descending_anchor_height() - .filter_map(|(_, txid)| Some((txid, tx_graph.get_tx(txid)?, anchors.get(&txid)?))), - ); - let pending_last_seen = Box::new( - tx_graph - .txids_by_descending_last_seen() - .filter_map(|(last_seen, txid)| Some((txid, tx_graph.get_tx(txid)?, last_seen))), - ); - Self { - tx_graph, - chain, - chain_tip, - unprocessed_txs_with_anchors: pending_anchored, - unprocessed_txs_with_last_seens: pending_last_seen, - unprocessed_txs_left_over: VecDeque::new(), - canonical: HashMap::new(), - not_canonical: HashSet::new(), - queue: VecDeque::new(), - } - } - - /// Whether this transaction is already canonicalized. - fn is_canonicalized(&self, txid: Txid) -> bool { - self.canonical.contains_key(&txid) || self.not_canonical.contains(&txid) - } - - /// Mark transaction as canonical if it is anchored in the best chain. - fn scan_anchors( - &mut self, - txid: Txid, - tx: Arc, - anchors: &BTreeSet, - ) -> Result<(), C::Error> { - for anchor in anchors { - let in_chain_opt = self - .chain - .is_block_in_chain(anchor.anchor_block(), self.chain_tip)?; - if in_chain_opt == Some(true) { - self.mark_canonical(txid, tx, CanonicalReason::from_anchor(anchor.clone())); - return Ok(()); - } - } - // cannot determine - self.unprocessed_txs_left_over.push_back(( - txid, - tx, - anchors - .iter() - .last() - .expect( - "tx taken from `unprocessed_txs_with_anchors` so it must atleast have an anchor", - ) - .confirmation_height_upper_bound(), - )); - Ok(()) - } - - /// Marks a transaction and it's ancestors as canonical. Mark all conflicts of these as - /// `not_canonical`. - fn mark_canonical(&mut self, txid: Txid, tx: Arc, reason: CanonicalReason) { - let starting_txid = txid; - let mut is_root = true; - TxAncestors::new_include_root( - self.tx_graph, - tx, - |_: usize, tx: Arc| -> Option<()> { - let this_txid = tx.compute_txid(); - let this_reason = if is_root { - is_root = false; - reason.clone() - } else { - reason.to_transitive(starting_txid) - }; - let canonical_entry = match self.canonical.entry(this_txid) { - // Already visited tx before, exit early. - hash_map::Entry::Occupied(_) => return None, - hash_map::Entry::Vacant(entry) => entry, - }; - // Any conflicts with a canonical tx can be added to `not_canonical`. Descendants - // of `not_canonical` txs can also be added to `not_canonical`. - for (_, conflict_txid) in self.tx_graph.direct_conflicts(&tx) { - TxDescendants::new_include_root( - self.tx_graph, - conflict_txid, - |_: usize, txid: Txid| -> Option<()> { - if self.not_canonical.insert(txid) { - Some(()) - } else { - None - } - }, - ) - .run_until_finished() - } - canonical_entry.insert((tx, this_reason)); - self.queue.push_back(this_txid); - Some(()) - }, - ) - .run_until_finished() - } -} - -impl Iterator for CanonicalIter<'_, A, C> { - type Item = Result<(Txid, Arc, CanonicalReason), C::Error>; - - fn next(&mut self) -> Option { - loop { - if let Some(txid) = self.queue.pop_front() { - let (tx, reason) = self - .canonical - .get(&txid) - .cloned() - .expect("reason must exist"); - return Some(Ok((txid, tx, reason))); - } - - if let Some((txid, tx, anchors)) = self.unprocessed_txs_with_anchors.next() { - if !self.is_canonicalized(txid) { - if let Err(err) = self.scan_anchors(txid, tx, anchors) { - return Some(Err(err)); - } - } - continue; - } - - if let Some((txid, tx, last_seen)) = self.unprocessed_txs_with_last_seens.next() { - if !self.is_canonicalized(txid) { - let observed_in = ObservedIn::Mempool(last_seen); - self.mark_canonical(txid, tx, CanonicalReason::from_observed_in(observed_in)); - } - continue; - } - - if let Some((txid, tx, height)) = self.unprocessed_txs_left_over.pop_front() { - if !self.is_canonicalized(txid) { - let observed_in = ObservedIn::Block(height); - self.mark_canonical(txid, tx, CanonicalReason::from_observed_in(observed_in)); - } - continue; - } - - return None; - } - } -} - -/// Represents when and where a transaction was last observed in. -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] -pub enum ObservedIn { - /// The transaction was last observed in a block of height. - Block(u32), - /// The transaction was last observed in the mempool at the given unix timestamp. - Mempool(u64), -} - -/// The reason why a transaction is canonical. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum CanonicalReason { - /// This transaction is anchored in the best chain by `A`, and therefore canonical. - Anchor { - /// The anchor that anchored the transaction in the chain. - anchor: A, - /// Whether the anchor is of the transaction's descendant. - descendant: Option, - }, - /// This transaction does not conflict with any other transaction with a more recent - /// [`ObservedIn`] value or one that is anchored in the best chain. - ObservedIn { - /// The [`ObservedIn`] value of the transaction. - observed_in: ObservedIn, - /// Whether the [`ObservedIn`] value is of the transaction's descendant. - descendant: Option, - }, -} - -impl CanonicalReason { - /// Constructs a [`CanonicalReason`] from an `anchor`. - pub fn from_anchor(anchor: A) -> Self { - Self::Anchor { - anchor, - descendant: None, - } - } - - /// Constructs a [`CanonicalReason`] from an `observed_in` value. - pub fn from_observed_in(observed_in: ObservedIn) -> Self { - Self::ObservedIn { - observed_in, - descendant: None, - } - } - - /// Contruct a new [`CanonicalReason`] from the original which is transitive to `descendant`. - /// - /// This signals that either the [`ObservedIn`] or [`Anchor`] value belongs to the transaction's - /// descendant, but is transitively relevant. - pub fn to_transitive(&self, descendant: Txid) -> Self { - match self { - CanonicalReason::Anchor { anchor, .. } => Self::Anchor { - anchor: anchor.clone(), - descendant: Some(descendant), - }, - CanonicalReason::ObservedIn { observed_in, .. } => Self::ObservedIn { - observed_in: *observed_in, - descendant: Some(descendant), - }, - } - } - - /// This signals that either the [`ObservedIn`] or [`Anchor`] value belongs to the transaction's - /// descendant. - pub fn descendant(&self) -> &Option { - match self { - CanonicalReason::Anchor { descendant, .. } => descendant, - CanonicalReason::ObservedIn { descendant, .. } => descendant, - } - } -} diff --git a/crates/chain/src/chain_data.rs b/crates/chain/src/chain_data.rs deleted file mode 100644 index a4d764c0..00000000 --- a/crates/chain/src/chain_data.rs +++ /dev/null @@ -1,195 +0,0 @@ -use bitcoin::{constants::COINBASE_MATURITY, OutPoint, TxOut, Txid}; - -use crate::Anchor; - -/// Represents the observed position of some chain data. -/// -/// The generic `A` should be a [`Anchor`] implementation. -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, core::hash::Hash)] -#[cfg_attr( - feature = "serde", - derive(serde::Deserialize, serde::Serialize), - serde(bound( - deserialize = "A: Ord + serde::Deserialize<'de>", - serialize = "A: Ord + serde::Serialize", - )) -)] -pub enum ChainPosition { - /// The chain data is confirmed as it is anchored in the best chain by `A`. - Confirmed { - /// The [`Anchor`]. - anchor: A, - /// Whether the chain data is anchored transitively by a child transaction. - /// - /// If the value is `Some`, it means we have incomplete data. We can only deduce that the - /// chain data is confirmed at a block equal to or lower than the block referenced by `A`. - transitively: Option, - }, - /// The chain data is not confirmed. - Unconfirmed { - /// When the chain data is last seen in the mempool. - /// - /// This value will be `None` if the chain data was never seen in the mempool and only seen - /// in a conflicting chain. - last_seen: Option, - }, -} - -impl ChainPosition { - /// Returns whether [`ChainPosition`] is confirmed or not. - pub fn is_confirmed(&self) -> bool { - matches!(self, Self::Confirmed { .. }) - } -} - -impl ChainPosition<&A> { - /// Maps a [`ChainPosition<&A>`] into a [`ChainPosition`] by cloning the contents. - pub fn cloned(self) -> ChainPosition { - match self { - ChainPosition::Confirmed { - anchor, - transitively, - } => ChainPosition::Confirmed { - anchor: anchor.clone(), - transitively, - }, - ChainPosition::Unconfirmed { last_seen } => ChainPosition::Unconfirmed { last_seen }, - } - } -} - -impl ChainPosition { - /// Determines the upper bound of the confirmation height. - pub fn confirmation_height_upper_bound(&self) -> Option { - match self { - ChainPosition::Confirmed { anchor, .. } => { - Some(anchor.confirmation_height_upper_bound()) - } - ChainPosition::Unconfirmed { .. } => None, - } - } -} - -/// A `TxOut` with as much data as we can retrieve about it -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] -pub struct FullTxOut { - /// The position of the transaction in `outpoint` in the overall chain. - pub chain_position: ChainPosition, - /// The location of the `TxOut`. - pub outpoint: OutPoint, - /// The `TxOut`. - pub txout: TxOut, - /// The txid and chain position of the transaction (if any) that has spent this output. - pub spent_by: Option<(ChainPosition, Txid)>, - /// Whether this output is on a coinbase transaction. - pub is_on_coinbase: bool, -} - -impl FullTxOut { - /// Whether the `txout` is considered mature. - /// - /// Depending on the implementation of [`confirmation_height_upper_bound`] in [`Anchor`], this - /// method may return false-negatives. In other words, interpreted confirmation count may be - /// less than the actual value. - /// - /// [`confirmation_height_upper_bound`]: Anchor::confirmation_height_upper_bound - pub fn is_mature(&self, tip: u32) -> bool { - if self.is_on_coinbase { - let conf_height = match self.chain_position.confirmation_height_upper_bound() { - Some(height) => height, - None => { - debug_assert!(false, "coinbase tx can never be unconfirmed"); - return false; - } - }; - let age = tip.saturating_sub(conf_height); - if age + 1 < COINBASE_MATURITY { - return false; - } - } - - true - } - - /// Whether the utxo is/was/will be spendable with chain `tip`. - /// - /// This method does not take into account the lock time. - /// - /// Depending on the implementation of [`confirmation_height_upper_bound`] in [`Anchor`], this - /// method may return false-negatives. In other words, interpreted confirmation count may be - /// less than the actual value. - /// - /// [`confirmation_height_upper_bound`]: Anchor::confirmation_height_upper_bound - pub fn is_confirmed_and_spendable(&self, tip: u32) -> bool { - if !self.is_mature(tip) { - return false; - } - - let conf_height = match self.chain_position.confirmation_height_upper_bound() { - Some(height) => height, - None => return false, - }; - if conf_height > tip { - return false; - } - - // if the spending tx is confirmed within tip height, the txout is no longer spendable - if let Some(spend_height) = self - .spent_by - .as_ref() - .and_then(|(pos, _)| pos.confirmation_height_upper_bound()) - { - if spend_height <= tip { - return false; - } - } - - true - } -} - -#[cfg(test)] -mod test { - use bdk_core::ConfirmationBlockTime; - - use crate::BlockId; - - use super::*; - - #[test] - fn chain_position_ord() { - let unconf1 = ChainPosition::::Unconfirmed { - last_seen: Some(10), - }; - let unconf2 = ChainPosition::::Unconfirmed { - last_seen: Some(20), - }; - let conf1 = ChainPosition::Confirmed { - anchor: ConfirmationBlockTime { - confirmation_time: 20, - block_id: BlockId { - height: 9, - ..Default::default() - }, - }, - transitively: None, - }; - let conf2 = ChainPosition::Confirmed { - anchor: ConfirmationBlockTime { - confirmation_time: 15, - block_id: BlockId { - height: 12, - ..Default::default() - }, - }, - transitively: None, - }; - - assert!(unconf2 > unconf1, "higher last_seen means higher ord"); - assert!(unconf1 > conf1, "unconfirmed is higher ord than confirmed"); - assert!( - conf2 > conf1, - "confirmation_height is higher then it should be higher ord" - ); - } -} diff --git a/crates/chain/src/chain_oracle.rs b/crates/chain/src/chain_oracle.rs deleted file mode 100644 index 08e697ed..00000000 --- a/crates/chain/src/chain_oracle.rs +++ /dev/null @@ -1,25 +0,0 @@ -use crate::BlockId; - -/// Represents a service that tracks the blockchain. -/// -/// The main method is [`is_block_in_chain`] which determines whether a given block of [`BlockId`] -/// is an ancestor of the `chain_tip`. -/// -/// [`is_block_in_chain`]: Self::is_block_in_chain -pub trait ChainOracle { - /// Error type. - type Error: core::fmt::Debug; - - /// Determines whether `block` of [`BlockId`] exists as an ancestor of `chain_tip`. - /// - /// If `None` is returned, it means the implementation cannot determine whether `block` exists - /// under `chain_tip`. - fn is_block_in_chain( - &self, - block: BlockId, - chain_tip: BlockId, - ) -> Result, Self::Error>; - - /// Get the best chain's chain tip. - fn get_chain_tip(&self) -> Result; -} diff --git a/crates/chain/src/descriptor_ext.rs b/crates/chain/src/descriptor_ext.rs deleted file mode 100644 index 9cf14758..00000000 --- a/crates/chain/src/descriptor_ext.rs +++ /dev/null @@ -1,38 +0,0 @@ -use crate::miniscript::{Descriptor, DescriptorPublicKey}; -use bitcoin::hashes::{hash_newtype, sha256, Hash}; -use bitcoin::Amount; - -hash_newtype! { - /// Represents the unique ID of a descriptor. - /// - /// This is useful for having a fixed-length unique representation of a descriptor, - /// in particular, we use it to persist application state changes related to the - /// descriptor without having to re-write the whole descriptor each time. - /// - pub struct DescriptorId(pub sha256::Hash); -} - -/// A trait to extend the functionality of a miniscript descriptor. -pub trait DescriptorExt { - /// Returns the minimum [`Amount`] at which an output is broadcast-able. - /// Panics if the descriptor wildcard is hardened. - fn dust_value(&self) -> Amount; - - /// Returns the descriptor ID, calculated as the sha256 hash of the spk derived from the - /// descriptor at index 0. - fn descriptor_id(&self) -> DescriptorId; -} - -impl DescriptorExt for Descriptor { - fn dust_value(&self) -> Amount { - self.at_derivation_index(0) - .expect("descriptor can't have hardened derivation") - .script_pubkey() - .minimal_non_dust() - } - - fn descriptor_id(&self) -> DescriptorId { - let spk = self.at_derivation_index(0).unwrap().script_pubkey(); - DescriptorId(sha256::Hash::hash(spk.as_bytes())) - } -} diff --git a/crates/chain/src/example_utils.rs b/crates/chain/src/example_utils.rs deleted file mode 100644 index c71b6cfe..00000000 --- a/crates/chain/src/example_utils.rs +++ /dev/null @@ -1,29 +0,0 @@ -#![allow(unused)] -use crate::BlockId; -use alloc::vec::Vec; -use bitcoin::{ - consensus, - hashes::{hex::FromHex, Hash}, - Transaction, -}; - -pub const RAW_TX_1: &str = "0200000000010116d6174da7183d70d0a7d4dc314d517a7d135db79ad63515028b293a76f4f9d10000000000feffffff023a21fc8350060000160014531c405e1881ef192294b8813631e258bf98ea7a1027000000000000225120a60869f0dbcf1dc659c9cecbaf8050135ea9e8cdc487053f1dc6880949dc684c024730440220591b1a172a122da49ba79a3e79f98aaa03fd7a372f9760da18890b6a327e6010022013e82319231da6c99abf8123d7c07e13cf9bd8d76e113e18dc452e5024db156d012102318a2d558b2936c52e320decd6d92a88d7f530be91b6fe0af5caf41661e77da3ef2e0100"; -pub const RAW_TX_2: &str = "02000000000101a688607020cfae91a61e7c516b5ef1264d5d77f17200c3866826c6c808ebf1620000000000feffffff021027000000000000225120a60869f0dbcf1dc659c9cecbaf8050135ea9e8cdc487053f1dc6880949dc684c20fd48ff530600001600146886c525e41d4522042bd0b159dfbade2504a6bb024730440220740ff7e665cd20565d4296b549df8d26b941be3f1e3af89a0b60e50c0dbeb69a02206213ab7030cf6edc6c90d4ccf33010644261e029950a688dc0b1a9ebe6ddcc5a012102f2ac6b396a97853cb6cd62242c8ae4842024742074475023532a51e9c53194253e760100"; -pub const RAW_TX_3: &str = "0200000000010135d67ee47b557e68b8c6223958f597381965ed719f1207ee2b9e20432a24a5dc0100000000feffffff021027000000000000225120a82f29944d65b86ae6b5e5cc75e294ead6c59391a1edc5e016e3498c67fc7bbb62215a5055060000160014070df7671dea67a50c4799a744b5c9be8f4bac690247304402207ebf8d29f71fd03e7e6977b3ea78ca5fcc5c49a42ae822348fc401862fdd766c02201d7e4ff0684ecb008b6142f36ead1b0b4d615524c4f58c261113d361f4427e25012103e6a75e2fab85e5ecad641afc4ffba7222f998649d9f18cac92f0fcc8618883b3ee760100"; -pub const RAW_TX_4: &str = "02000000000101d00e8f76ed313e19b339ee293c0f52b0325c95e24c8f3966fa353fb2bedbcf580100000000feffffff021027000000000000225120882d74e5d0572d5a816cef0041a96b6c1de832f6f9676d9605c44d5e9a97d3dc9cda55fe53060000160014852b5864b8edd42fab4060c87f818e50780865ff0247304402201dccbb9bed7fba924b6d249c5837cc9b37470c0e3d8fbea77cb59baba3efe6fa0220700cc170916913b9bfc2bc0fefb6af776e8b542c561702f136cddc1c7aa43141012103acec3fc79dbbca745815c2a807dc4e81010c80e308e84913f59cb42a275dad97f3760100"; - -pub fn tx_from_hex(s: &str) -> Transaction { - let raw = Vec::from_hex(s).expect("data must be in hex"); - consensus::deserialize(raw.as_slice()).expect("must deserialize") -} - -pub fn new_hash(s: &str) -> H { - ::hash(s.as_bytes()) -} - -pub fn new_block_id(height: u32, hash: &str) -> BlockId { - BlockId { - height, - hash: new_hash(hash), - } -} diff --git a/crates/chain/src/indexed_tx_graph.rs b/crates/chain/src/indexed_tx_graph.rs deleted file mode 100644 index bcd6ac3f..00000000 --- a/crates/chain/src/indexed_tx_graph.rs +++ /dev/null @@ -1,435 +0,0 @@ -//! Contains the [`IndexedTxGraph`] and associated types. Refer to the -//! [`IndexedTxGraph`] documentation for more. -use core::{ - convert::Infallible, - fmt::{self, Debug}, - ops::RangeBounds, -}; - -use alloc::{sync::Arc, vec::Vec}; -use bitcoin::{Block, OutPoint, ScriptBuf, Transaction, TxOut, Txid}; - -use crate::{ - spk_txout::SpkTxOutIndex, - tx_graph::{self, TxGraph}, - Anchor, BlockId, ChainOracle, Indexer, Merge, TxPosInBlock, -}; - -/// The [`IndexedTxGraph`] combines a [`TxGraph`] and an [`Indexer`] implementation. -/// -/// It ensures that [`TxGraph`] and [`Indexer`] are updated atomically. -#[derive(Debug, Clone)] -pub struct IndexedTxGraph { - /// Transaction index. - pub index: I, - graph: TxGraph, -} - -impl Default for IndexedTxGraph { - fn default() -> Self { - Self { - graph: Default::default(), - index: Default::default(), - } - } -} - -impl IndexedTxGraph { - /// Construct a new [`IndexedTxGraph`] with a given `index`. - pub fn new(index: I) -> Self { - Self { - index, - graph: TxGraph::default(), - } - } - - /// Get a reference of the internal transaction graph. - pub fn graph(&self) -> &TxGraph { - &self.graph - } -} - -impl IndexedTxGraph { - /// Applies the [`ChangeSet`] to the [`IndexedTxGraph`]. - pub fn apply_changeset(&mut self, changeset: ChangeSet) { - self.index.apply_changeset(changeset.indexer); - - for tx in &changeset.tx_graph.txs { - self.index.index_tx(tx); - } - for (&outpoint, txout) in &changeset.tx_graph.txouts { - self.index.index_txout(outpoint, txout); - } - - self.graph.apply_changeset(changeset.tx_graph); - } - - /// Determines the [`ChangeSet`] between `self` and an empty [`IndexedTxGraph`]. - pub fn initial_changeset(&self) -> ChangeSet { - let graph = self.graph.initial_changeset(); - let indexer = self.index.initial_changeset(); - ChangeSet { - tx_graph: graph, - indexer, - } - } -} - -impl IndexedTxGraph -where - I::ChangeSet: Default + Merge, -{ - fn index_tx_graph_changeset( - &mut self, - tx_graph_changeset: &tx_graph::ChangeSet, - ) -> I::ChangeSet { - let mut changeset = I::ChangeSet::default(); - for added_tx in &tx_graph_changeset.txs { - changeset.merge(self.index.index_tx(added_tx)); - } - for (&added_outpoint, added_txout) in &tx_graph_changeset.txouts { - changeset.merge(self.index.index_txout(added_outpoint, added_txout)); - } - changeset - } - - /// Apply an `update` directly. - /// - /// `update` is a [`tx_graph::TxUpdate`] and the resultant changes is returned as [`ChangeSet`]. - pub fn apply_update(&mut self, update: tx_graph::TxUpdate) -> ChangeSet { - let tx_graph = self.graph.apply_update(update); - let indexer = self.index_tx_graph_changeset(&tx_graph); - ChangeSet { tx_graph, indexer } - } - - /// Insert a floating `txout` of given `outpoint`. - pub fn insert_txout(&mut self, outpoint: OutPoint, txout: TxOut) -> ChangeSet { - let graph = self.graph.insert_txout(outpoint, txout); - let indexer = self.index_tx_graph_changeset(&graph); - ChangeSet { - tx_graph: graph, - indexer, - } - } - - /// Insert and index a transaction into the graph. - pub fn insert_tx>>(&mut self, tx: T) -> ChangeSet { - let tx_graph = self.graph.insert_tx(tx); - let indexer = self.index_tx_graph_changeset(&tx_graph); - ChangeSet { tx_graph, indexer } - } - - /// Insert an `anchor` for a given transaction. - pub fn insert_anchor(&mut self, txid: Txid, anchor: A) -> ChangeSet { - self.graph.insert_anchor(txid, anchor).into() - } - - /// Insert a unix timestamp of when a transaction is seen in the mempool. - /// - /// This is used for transaction conflict resolution in [`TxGraph`] where the transaction with - /// the later last-seen is prioritized. - pub fn insert_seen_at(&mut self, txid: Txid, seen_at: u64) -> ChangeSet { - self.graph.insert_seen_at(txid, seen_at).into() - } - - /// Inserts the given `evicted_at` for `txid`. - /// - /// The `evicted_at` timestamp represents the last known time when the transaction was observed - /// to be missing from the mempool. If `txid` was previously recorded with an earlier - /// `evicted_at` value, it is updated only if the new value is greater. - pub fn insert_evicted_at(&mut self, txid: Txid, evicted_at: u64) -> ChangeSet { - let tx_graph = self.graph.insert_evicted_at(txid, evicted_at); - ChangeSet { - tx_graph, - ..Default::default() - } - } - - /// Batch insert transactions, filtering out those that are irrelevant. - /// - /// Relevancy is determined by the [`Indexer::is_tx_relevant`] implementation of `I`. Irrelevant - /// transactions in `txs` will be ignored. `txs` do not need to be in topological order. - pub fn batch_insert_relevant>>( - &mut self, - txs: impl IntoIterator)>, - ) -> ChangeSet { - // The algorithm below allows for non-topologically ordered transactions by using two loops. - // This is achieved by: - // 1. insert all txs into the index. If they are irrelevant then that's fine it will just - // not store anything about them. - // 2. decide whether to insert them into the graph depending on whether `is_tx_relevant` - // returns true or not. (in a second loop). - let txs = txs - .into_iter() - .map(|(tx, anchors)| (>>::into(tx), anchors)) - .collect::>(); - - let mut indexer = I::ChangeSet::default(); - for (tx, _) in &txs { - indexer.merge(self.index.index_tx(tx)); - } - - let mut tx_graph = tx_graph::ChangeSet::default(); - for (tx, anchors) in txs { - if self.index.is_tx_relevant(&tx) { - let txid = tx.compute_txid(); - tx_graph.merge(self.graph.insert_tx(tx.clone())); - for anchor in anchors { - tx_graph.merge(self.graph.insert_anchor(txid, anchor)); - } - } - } - - ChangeSet { tx_graph, indexer } - } - - /// Batch insert unconfirmed transactions, filtering out those that are irrelevant. - /// - /// Relevancy is determined by the internal [`Indexer::is_tx_relevant`] implementation of `I`. - /// Irrelevant transactions in `txs` will be ignored. - /// - /// Items of `txs` are tuples containing the transaction and a *last seen* timestamp. The - /// *last seen* communicates when the transaction is last seen in the mempool which is used for - /// conflict-resolution in [`TxGraph`] (refer to [`TxGraph::insert_seen_at`] for details). - pub fn batch_insert_relevant_unconfirmed>>( - &mut self, - unconfirmed_txs: impl IntoIterator, - ) -> ChangeSet { - // The algorithm below allows for non-topologically ordered transactions by using two loops. - // This is achieved by: - // 1. insert all txs into the index. If they are irrelevant then that's fine it will just - // not store anything about them. - // 2. decide whether to insert them into the graph depending on whether `is_tx_relevant` - // returns true or not. (in a second loop). - let txs = unconfirmed_txs - .into_iter() - .map(|(tx, last_seen)| (>>::into(tx), last_seen)) - .collect::>(); - - let mut indexer = I::ChangeSet::default(); - for (tx, _) in &txs { - indexer.merge(self.index.index_tx(tx)); - } - - let graph = self.graph.batch_insert_unconfirmed( - txs.into_iter() - .filter(|(tx, _)| self.index.is_tx_relevant(tx)) - .map(|(tx, seen_at)| (tx.clone(), seen_at)), - ); - - ChangeSet { - tx_graph: graph, - indexer, - } - } - - /// Batch insert unconfirmed transactions. - /// - /// Items of `txs` are tuples containing the transaction and a *last seen* timestamp. The - /// *last seen* communicates when the transaction is last seen in the mempool which is used for - /// conflict-resolution in [`TxGraph`] (refer to [`TxGraph::insert_seen_at`] for details). - /// - /// To filter out irrelevant transactions, use [`batch_insert_relevant_unconfirmed`] instead. - /// - /// [`batch_insert_relevant_unconfirmed`]: IndexedTxGraph::batch_insert_relevant_unconfirmed - pub fn batch_insert_unconfirmed>>( - &mut self, - txs: impl IntoIterator, - ) -> ChangeSet { - let graph = self.graph.batch_insert_unconfirmed(txs); - let indexer = self.index_tx_graph_changeset(&graph); - ChangeSet { - tx_graph: graph, - indexer, - } - } -} - -/// Methods are available if the anchor (`A`) can be created from [`TxPosInBlock`]. -impl IndexedTxGraph -where - I::ChangeSet: Default + Merge, - for<'b> A: Anchor + From>, - I: Indexer, -{ - /// Batch insert all transactions of the given `block` of `height`, filtering out those that are - /// irrelevant. - /// - /// Each inserted transaction's anchor will be constructed using [`TxPosInBlock`]. - /// - /// Relevancy is determined by the internal [`Indexer::is_tx_relevant`] implementation of `I`. - /// Irrelevant transactions in `txs` will be ignored. - pub fn apply_block_relevant( - &mut self, - block: &Block, - height: u32, - ) -> ChangeSet { - let block_id = BlockId { - hash: block.block_hash(), - height, - }; - let mut changeset = ChangeSet::::default(); - for (tx_pos, tx) in block.txdata.iter().enumerate() { - changeset.indexer.merge(self.index.index_tx(tx)); - if self.index.is_tx_relevant(tx) { - let txid = tx.compute_txid(); - let anchor = TxPosInBlock { - block, - block_id, - tx_pos, - } - .into(); - changeset.tx_graph.merge(self.graph.insert_tx(tx.clone())); - changeset - .tx_graph - .merge(self.graph.insert_anchor(txid, anchor)); - } - } - changeset - } - - /// Batch insert all transactions of the given `block` of `height`. - /// - /// Each inserted transaction's anchor will be constructed using [`TxPosInBlock`]. - /// - /// To only insert relevant transactions, use [`apply_block_relevant`] instead. - /// - /// [`apply_block_relevant`]: IndexedTxGraph::apply_block_relevant - pub fn apply_block(&mut self, block: Block, height: u32) -> ChangeSet { - let block_id = BlockId { - hash: block.block_hash(), - height, - }; - let mut graph = tx_graph::ChangeSet::default(); - for (tx_pos, tx) in block.txdata.iter().enumerate() { - let anchor = TxPosInBlock { - block: &block, - block_id, - tx_pos, - } - .into(); - graph.merge(self.graph.insert_anchor(tx.compute_txid(), anchor)); - graph.merge(self.graph.insert_tx(tx.clone())); - } - let indexer = self.index_tx_graph_changeset(&graph); - ChangeSet { - tx_graph: graph, - indexer, - } - } -} - -impl IndexedTxGraph -where - A: Anchor, -{ - /// List txids that are expected to exist under the given spks. - /// - /// This is used to fill [`SyncRequestBuilder::expected_spk_txids`](bdk_core::spk_client::SyncRequestBuilder::expected_spk_txids). - /// - /// The spk index range can be contrained with `range`. - /// - /// # Error - /// - /// If the [`ChainOracle`] implementation (`chain`) fails, an error will be returned with the - /// returned item. - /// - /// If the [`ChainOracle`] is infallible, - /// [`list_expected_spk_txids`](Self::list_expected_spk_txids) can be used instead. - pub fn try_list_expected_spk_txids<'a, C, I>( - &'a self, - chain: &'a C, - chain_tip: BlockId, - spk_index_range: impl RangeBounds + 'a, - ) -> impl Iterator> + 'a - where - C: ChainOracle, - X: AsRef> + 'a, - I: fmt::Debug + Clone + Ord + 'a, - { - self.graph - .try_list_expected_spk_txids(chain, chain_tip, &self.index, spk_index_range) - } - - /// List txids that are expected to exist under the given spks. - /// - /// This is the infallible version of - /// [`try_list_expected_spk_txids`](Self::try_list_expected_spk_txids). - pub fn list_expected_spk_txids<'a, C, I>( - &'a self, - chain: &'a C, - chain_tip: BlockId, - spk_index_range: impl RangeBounds + 'a, - ) -> impl Iterator + 'a - where - C: ChainOracle, - X: AsRef> + 'a, - I: fmt::Debug + Clone + Ord + 'a, - { - self.try_list_expected_spk_txids(chain, chain_tip, spk_index_range) - .map(|r| r.expect("infallible")) - } -} - -impl AsRef> for IndexedTxGraph { - fn as_ref(&self) -> &TxGraph { - &self.graph - } -} - -/// Represents changes to an [`IndexedTxGraph`]. -#[derive(Clone, Debug, PartialEq)] -#[cfg_attr( - feature = "serde", - derive(serde::Deserialize, serde::Serialize), - serde(bound( - deserialize = "A: Ord + serde::Deserialize<'de>, IA: serde::Deserialize<'de>", - serialize = "A: Ord + serde::Serialize, IA: serde::Serialize" - )) -)] -#[must_use] -pub struct ChangeSet { - /// [`TxGraph`] changeset. - pub tx_graph: tx_graph::ChangeSet, - /// [`Indexer`] changeset. - pub indexer: IA, -} - -impl Default for ChangeSet { - fn default() -> Self { - Self { - tx_graph: Default::default(), - indexer: Default::default(), - } - } -} - -impl Merge for ChangeSet { - fn merge(&mut self, other: Self) { - self.tx_graph.merge(other.tx_graph); - self.indexer.merge(other.indexer); - } - - fn is_empty(&self) -> bool { - self.tx_graph.is_empty() && self.indexer.is_empty() - } -} - -impl From> for ChangeSet { - fn from(graph: tx_graph::ChangeSet) -> Self { - Self { - tx_graph: graph, - ..Default::default() - } - } -} - -#[cfg(feature = "miniscript")] -impl From for ChangeSet { - fn from(indexer: crate::keychain_txout::ChangeSet) -> Self { - Self { - tx_graph: Default::default(), - indexer, - } - } -} diff --git a/crates/chain/src/indexer.rs b/crates/chain/src/indexer.rs deleted file mode 100644 index 22e83981..00000000 --- a/crates/chain/src/indexer.rs +++ /dev/null @@ -1,33 +0,0 @@ -//! [`Indexer`] provides utilities for indexing transaction data. - -use bitcoin::{OutPoint, Transaction, TxOut}; - -#[cfg(feature = "miniscript")] -pub mod keychain_txout; -pub mod spk_txout; - -/// Utilities for indexing transaction data. -/// -/// Types which implement this trait can be used to construct an [`IndexedTxGraph`]. -/// This trait's methods should rarely be called directly. -/// -/// [`IndexedTxGraph`]: crate::IndexedTxGraph -pub trait Indexer { - /// The resultant "changeset" when new transaction data is indexed. - type ChangeSet; - - /// Scan and index the given `outpoint` and `txout`. - fn index_txout(&mut self, outpoint: OutPoint, txout: &TxOut) -> Self::ChangeSet; - - /// Scans a transaction for relevant outpoints, which are stored and indexed internally. - fn index_tx(&mut self, tx: &Transaction) -> Self::ChangeSet; - - /// Apply changeset to itself. - fn apply_changeset(&mut self, changeset: Self::ChangeSet); - - /// Determines the [`ChangeSet`](Indexer::ChangeSet) between `self` and an empty [`Indexer`]. - fn initial_changeset(&self) -> Self::ChangeSet; - - /// Determines whether the transaction should be included in the index. - fn is_tx_relevant(&self, tx: &Transaction) -> bool; -} diff --git a/crates/chain/src/indexer/keychain_txout.rs b/crates/chain/src/indexer/keychain_txout.rs deleted file mode 100644 index bdea4b82..00000000 --- a/crates/chain/src/indexer/keychain_txout.rs +++ /dev/null @@ -1,921 +0,0 @@ -//! [`KeychainTxOutIndex`] controls how script pubkeys are revealed for multiple keychains and -//! indexes [`TxOut`]s with them. - -use crate::{ - collections::*, - miniscript::{Descriptor, DescriptorPublicKey}, - spk_client::{FullScanRequestBuilder, SyncRequestBuilder}, - spk_iter::BIP32_MAX_INDEX, - spk_txout::SpkTxOutIndex, - DescriptorExt, DescriptorId, Indexed, Indexer, KeychainIndexed, SpkIterator, -}; -use alloc::{borrow::ToOwned, vec::Vec}; -use bitcoin::{Amount, OutPoint, ScriptBuf, SignedAmount, Transaction, TxOut, Txid}; -use core::{ - fmt::Debug, - ops::{Bound, RangeBounds}, -}; - -use crate::Merge; - -/// The default lookahead for a [`KeychainTxOutIndex`] -pub const DEFAULT_LOOKAHEAD: u32 = 25; - -/// [`KeychainTxOutIndex`] controls how script pubkeys are revealed for multiple keychains, and -/// indexes [`TxOut`]s with them. -/// -/// A single keychain is a chain of script pubkeys derived from a single [`Descriptor`]. Keychains -/// are identified using the `K` generic. Script pubkeys are identified by the keychain that they -/// are derived from `K`, as well as the derivation index `u32`. -/// -/// There is a strict 1-to-1 relationship between descriptors and keychains. Each keychain has one -/// and only one descriptor and each descriptor has one and only one keychain. The -/// [`insert_descriptor`] method will return an error if you try and violate this invariant. This -/// rule is a proxy for a stronger rule: no two descriptors should produce the same script pubkey. -/// Having two descriptors produce the same script pubkey should cause whichever keychain derives -/// the script pubkey first to be the effective owner of it but you should not rely on this -/// behaviour. âš  It is up you, the developer, not to violate this invariant. -/// -/// # Revealed script pubkeys -/// -/// Tracking how script pubkeys are revealed is useful for collecting chain data. For example, if -/// the user has requested 5 script pubkeys (to receive money with), we only need to use those -/// script pubkeys to scan for chain data. -/// -/// Call [`reveal_to_target`] or [`reveal_next_spk`] to reveal more script pubkeys. -/// Call [`revealed_keychain_spks`] or [`revealed_spks`] to iterate through revealed script pubkeys. -/// -/// # Lookahead script pubkeys -/// -/// When an user first recovers a wallet (i.e. from a recovery phrase and/or descriptor), we will -/// NOT have knowledge of which script pubkeys are revealed. So when we index a transaction or -/// txout (using [`index_tx`]/[`index_txout`]) we scan the txouts against script pubkeys derived -/// above the last revealed index. These additionally-derived script pubkeys are called the -/// lookahead. -/// -/// The [`KeychainTxOutIndex`] is constructed with the `lookahead` and cannot be altered. See -/// [`DEFAULT_LOOKAHEAD`] for the value used in the `Default` implementation. Use [`new`] to set a -/// custom `lookahead`. -/// -/// # Unbounded script pubkey iterator -/// -/// For script-pubkey-based chain sources (such as Electrum/Esplora), an initial scan is best done -/// by iterating though derived script pubkeys one by one and requesting transaction histories for -/// each script pubkey. We will stop after x-number of script pubkeys have empty histories. An -/// unbounded script pubkey iterator is useful to pass to such a chain source because it doesn't -/// require holding a reference to the index. -/// -/// Call [`unbounded_spk_iter`] to get an unbounded script pubkey iterator for a given keychain. -/// Call [`all_unbounded_spk_iters`] to get unbounded script pubkey iterators for all keychains. -/// -/// # Change sets -/// -/// Methods that can update the last revealed index or add keychains will return [`ChangeSet`] to report -/// these changes. This should be persisted for future recovery. -/// -/// ## Synopsis -/// -/// ``` -/// use bdk_chain::indexer::keychain_txout::KeychainTxOutIndex; -/// # use bdk_chain::{ miniscript::{Descriptor, DescriptorPublicKey} }; -/// # use core::str::FromStr; -/// -/// // imagine our service has internal and external addresses but also addresses for users -/// #[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd)] -/// enum MyKeychain { -/// External, -/// Internal, -/// MyAppUser { -/// user_id: u32 -/// } -/// } -/// -/// let mut txout_index = KeychainTxOutIndex::::default(); -/// -/// # let secp = bdk_chain::bitcoin::secp256k1::Secp256k1::signing_only(); -/// # let (external_descriptor,_) = Descriptor::::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap(); -/// # let (internal_descriptor,_) = Descriptor::::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/*)").unwrap(); -/// # let (descriptor_42, _) = Descriptor::::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/2/*)").unwrap(); -/// let _ = txout_index.insert_descriptor(MyKeychain::External, external_descriptor)?; -/// let _ = txout_index.insert_descriptor(MyKeychain::Internal, internal_descriptor)?; -/// let _ = txout_index.insert_descriptor(MyKeychain::MyAppUser { user_id: 42 }, descriptor_42)?; -/// -/// let new_spk_for_user = txout_index.reveal_next_spk(MyKeychain::MyAppUser{ user_id: 42 }); -/// # Ok::<_, bdk_chain::indexer::keychain_txout::InsertDescriptorError<_>>(()) -/// ``` -/// -/// [`Ord`]: core::cmp::Ord -/// [`SpkTxOutIndex`]: crate::spk_txout::SpkTxOutIndex -/// [`Descriptor`]: crate::miniscript::Descriptor -/// [`reveal_to_target`]: Self::reveal_to_target -/// [`reveal_next_spk`]: Self::reveal_next_spk -/// [`revealed_keychain_spks`]: Self::revealed_keychain_spks -/// [`revealed_spks`]: Self::revealed_spks -/// [`index_tx`]: Self::index_tx -/// [`index_txout`]: Self::index_txout -/// [`new`]: Self::new -/// [`unbounded_spk_iter`]: Self::unbounded_spk_iter -/// [`all_unbounded_spk_iters`]: Self::all_unbounded_spk_iters -/// [`outpoints`]: Self::outpoints -/// [`txouts`]: Self::txouts -/// [`unused_spks`]: Self::unused_spks -/// [`insert_descriptor`]: Self::insert_descriptor -#[derive(Clone, Debug)] -pub struct KeychainTxOutIndex { - inner: SpkTxOutIndex<(K, u32)>, - keychain_to_descriptor_id: BTreeMap, - descriptor_id_to_keychain: HashMap, - descriptors: HashMap>, - last_revealed: HashMap, - lookahead: u32, -} - -impl Default for KeychainTxOutIndex { - fn default() -> Self { - Self::new(DEFAULT_LOOKAHEAD) - } -} - -impl AsRef> for KeychainTxOutIndex { - fn as_ref(&self) -> &SpkTxOutIndex<(K, u32)> { - &self.inner - } -} - -impl Indexer for KeychainTxOutIndex { - type ChangeSet = ChangeSet; - - fn index_txout(&mut self, outpoint: OutPoint, txout: &TxOut) -> Self::ChangeSet { - let mut changeset = ChangeSet::default(); - if let Some((keychain, index)) = self.inner.scan_txout(outpoint, txout).cloned() { - let did = self - .keychain_to_descriptor_id - .get(&keychain) - .expect("invariant"); - if self.last_revealed.get(did) < Some(&index) { - self.last_revealed.insert(*did, index); - changeset.last_revealed.insert(*did, index); - self.replenish_inner_index(*did, &keychain, self.lookahead); - } - } - changeset - } - - fn index_tx(&mut self, tx: &bitcoin::Transaction) -> Self::ChangeSet { - let mut changeset = ChangeSet::default(); - let txid = tx.compute_txid(); - for (op, txout) in tx.output.iter().enumerate() { - changeset.merge(self.index_txout(OutPoint::new(txid, op as u32), txout)); - } - changeset - } - - fn initial_changeset(&self) -> Self::ChangeSet { - ChangeSet { - last_revealed: self.last_revealed.clone().into_iter().collect(), - } - } - - fn apply_changeset(&mut self, changeset: Self::ChangeSet) { - self.apply_changeset(changeset) - } - - fn is_tx_relevant(&self, tx: &bitcoin::Transaction) -> bool { - self.inner.is_relevant(tx) - } -} - -impl KeychainTxOutIndex { - /// Construct a [`KeychainTxOutIndex`] with the given `lookahead`. - /// - /// The `lookahead` is the number of script pubkeys to derive and cache from the internal - /// descriptors over and above the last revealed script index. Without a lookahead the index - /// will miss outputs you own when processing transactions whose output script pubkeys lie - /// beyond the last revealed index. In certain situations, such as when performing an initial - /// scan of the blockchain during wallet import, it may be uncertain or unknown what the index - /// of the last revealed script pubkey actually is. - /// - /// Refer to [struct-level docs](KeychainTxOutIndex) for more about `lookahead`. - pub fn new(lookahead: u32) -> Self { - Self { - inner: SpkTxOutIndex::default(), - keychain_to_descriptor_id: Default::default(), - descriptors: Default::default(), - descriptor_id_to_keychain: Default::default(), - last_revealed: Default::default(), - lookahead, - } - } - - /// Get a reference to the internal [`SpkTxOutIndex`]. - pub fn inner(&self) -> &SpkTxOutIndex<(K, u32)> { - &self.inner - } -} - -/// Methods that are *re-exposed* from the internal [`SpkTxOutIndex`]. -impl KeychainTxOutIndex { - /// Get the set of indexed outpoints, corresponding to tracked keychains. - pub fn outpoints(&self) -> &BTreeSet> { - self.inner.outpoints() - } - - /// Iterate over known txouts that spend to tracked script pubkeys. - pub fn txouts( - &self, - ) -> impl DoubleEndedIterator> + ExactSizeIterator - { - self.inner - .txouts() - .map(|(index, op, txout)| (index.clone(), (op, txout))) - } - - /// Finds all txouts on a transaction that has previously been scanned and indexed. - pub fn txouts_in_tx( - &self, - txid: Txid, - ) -> impl DoubleEndedIterator> { - self.inner - .txouts_in_tx(txid) - .map(|(index, op, txout)| (index.clone(), (op, txout))) - } - - /// Return the [`TxOut`] of `outpoint` if it has been indexed, and if it corresponds to a - /// tracked keychain. - /// - /// The associated keychain and keychain index of the txout's spk is also returned. - /// - /// This calls [`SpkTxOutIndex::txout`] internally. - pub fn txout(&self, outpoint: OutPoint) -> Option> { - self.inner - .txout(outpoint) - .map(|(index, txout)| (index.clone(), txout)) - } - - /// Return the script that exists under the given `keychain`'s `index`. - /// - /// This calls [`SpkTxOutIndex::spk_at_index`] internally. - pub fn spk_at_index(&self, keychain: K, index: u32) -> Option { - self.inner.spk_at_index(&(keychain.clone(), index)) - } - - /// Returns the keychain and keychain index associated with the spk. - /// - /// This calls [`SpkTxOutIndex::index_of_spk`] internally. - pub fn index_of_spk(&self, script: ScriptBuf) -> Option<&(K, u32)> { - self.inner.index_of_spk(script) - } - - /// Returns whether the spk under the `keychain`'s `index` has been used. - /// - /// Here, "unused" means that after the script pubkey was stored in the index, the index has - /// never scanned a transaction output with it. - /// - /// This calls [`SpkTxOutIndex::is_used`] internally. - pub fn is_used(&self, keychain: K, index: u32) -> bool { - self.inner.is_used(&(keychain, index)) - } - - /// Marks the script pubkey at `index` as used even though the tracker hasn't seen an output - /// with it. - /// - /// This only has an effect when the `index` had been added to `self` already and was unused. - /// - /// Returns whether the spk under the given `keychain` and `index` is successfully - /// marked as used. Returns false either when there is no descriptor under the given - /// keychain, or when the spk is already marked as used. - /// - /// This is useful when you want to reserve a script pubkey for something but don't want to add - /// the transaction output using it to the index yet. Other callers will consider `index` on - /// `keychain` used until you call [`unmark_used`]. - /// - /// This calls [`SpkTxOutIndex::mark_used`] internally. - /// - /// [`unmark_used`]: Self::unmark_used - pub fn mark_used(&mut self, keychain: K, index: u32) -> bool { - self.inner.mark_used(&(keychain, index)) - } - - /// Undoes the effect of [`mark_used`]. Returns whether the `index` is inserted back into - /// `unused`. - /// - /// Note that if `self` has scanned an output with this script pubkey, then this will have no - /// effect. - /// - /// This calls [`SpkTxOutIndex::unmark_used`] internally. - /// - /// [`mark_used`]: Self::mark_used - pub fn unmark_used(&mut self, keychain: K, index: u32) -> bool { - self.inner.unmark_used(&(keychain, index)) - } - - /// Computes the total value transfer effect `tx` has on the script pubkeys belonging to the - /// keychains in `range`. Value is *sent* when a script pubkey in the `range` is on an input and - /// *received* when it is on an output. For `sent` to be computed correctly, the output being - /// spent must have already been scanned by the index. Calculating received just uses the - /// [`Transaction`] outputs directly, so it will be correct even if it has not been scanned. - pub fn sent_and_received( - &self, - tx: &Transaction, - range: impl RangeBounds, - ) -> (Amount, Amount) { - self.inner - .sent_and_received(tx, self.map_to_inner_bounds(range)) - } - - /// Computes the net value that this transaction gives to the script pubkeys in the index and - /// *takes* from the transaction outputs in the index. Shorthand for calling - /// [`sent_and_received`] and subtracting sent from received. - /// - /// This calls [`SpkTxOutIndex::net_value`] internally. - /// - /// [`sent_and_received`]: Self::sent_and_received - pub fn net_value(&self, tx: &Transaction, range: impl RangeBounds) -> SignedAmount { - self.inner.net_value(tx, self.map_to_inner_bounds(range)) - } -} - -impl KeychainTxOutIndex { - /// Return all keychains and their corresponding descriptors. - pub fn keychains( - &self, - ) -> impl DoubleEndedIterator)> + ExactSizeIterator + '_ - { - self.keychain_to_descriptor_id - .iter() - .map(|(k, did)| (k.clone(), self.descriptors.get(did).expect("invariant"))) - } - - /// Insert a descriptor with a keychain associated to it. - /// - /// Adding a descriptor means you will be able to derive new script pubkeys under it and the - /// txout index will discover transaction outputs with those script pubkeys (once they've been - /// derived and added to the index). - /// - /// keychain <-> descriptor is a one-to-one mapping that cannot be changed. Attempting to do so - /// will return a [`InsertDescriptorError`]. - /// - /// [`KeychainTxOutIndex`] will prevent you from inserting two descriptors which derive the same - /// script pubkey at index 0, but it's up to you to ensure that descriptors don't collide at - /// other indices. If they do nothing catastrophic happens at the `KeychainTxOutIndex` level - /// (one keychain just becomes the defacto owner of that spk arbitrarily) but this may have - /// subtle implications up the application stack like one UTXO being missing from one keychain - /// because it has been assigned to another which produces the same script pubkey. - pub fn insert_descriptor( - &mut self, - keychain: K, - descriptor: Descriptor, - ) -> Result> { - let did = descriptor.descriptor_id(); - if !self.keychain_to_descriptor_id.contains_key(&keychain) - && !self.descriptor_id_to_keychain.contains_key(&did) - { - self.descriptors.insert(did, descriptor.clone()); - self.keychain_to_descriptor_id.insert(keychain.clone(), did); - self.descriptor_id_to_keychain.insert(did, keychain.clone()); - self.replenish_inner_index(did, &keychain, self.lookahead); - return Ok(true); - } - - if let Some(existing_desc_id) = self.keychain_to_descriptor_id.get(&keychain) { - let descriptor = self.descriptors.get(existing_desc_id).expect("invariant"); - if *existing_desc_id != did { - return Err(InsertDescriptorError::KeychainAlreadyAssigned { - existing_assignment: descriptor.clone(), - keychain, - }); - } - } - - if let Some(existing_keychain) = self.descriptor_id_to_keychain.get(&did) { - let descriptor = self.descriptors.get(&did).expect("invariant").clone(); - - if *existing_keychain != keychain { - return Err(InsertDescriptorError::DescriptorAlreadyAssigned { - existing_assignment: existing_keychain.clone(), - descriptor, - }); - } - } - - Ok(false) - } - - /// Gets the descriptor associated with the keychain. Returns `None` if the keychain doesn't - /// have a descriptor associated with it. - pub fn get_descriptor(&self, keychain: K) -> Option<&Descriptor> { - let did = self.keychain_to_descriptor_id.get(&keychain)?; - self.descriptors.get(did) - } - - /// Get the lookahead setting. - /// - /// Refer to [`new`] for more information on the `lookahead`. - /// - /// [`new`]: Self::new - pub fn lookahead(&self) -> u32 { - self.lookahead - } - - /// Store lookahead scripts until `target_index` (inclusive). - /// - /// This does not change the global `lookahead` setting. - pub fn lookahead_to_target(&mut self, keychain: K, target_index: u32) { - if let Some((next_index, _)) = self.next_index(keychain.clone()) { - let temp_lookahead = (target_index + 1) - .checked_sub(next_index) - .filter(|&index| index > 0); - - if let Some(temp_lookahead) = temp_lookahead { - self.replenish_inner_index_keychain(keychain, temp_lookahead); - } - } - } - - fn replenish_inner_index_did(&mut self, did: DescriptorId, lookahead: u32) { - if let Some(keychain) = self.descriptor_id_to_keychain.get(&did).cloned() { - self.replenish_inner_index(did, &keychain, lookahead); - } - } - - fn replenish_inner_index_keychain(&mut self, keychain: K, lookahead: u32) { - if let Some(did) = self.keychain_to_descriptor_id.get(&keychain) { - self.replenish_inner_index(*did, &keychain, lookahead); - } - } - - /// Syncs the state of the inner spk index after changes to a keychain - fn replenish_inner_index(&mut self, did: DescriptorId, keychain: &K, lookahead: u32) { - let descriptor = self.descriptors.get(&did).expect("invariant"); - let next_store_index = self - .inner - .all_spks() - .range(&(keychain.clone(), u32::MIN)..=&(keychain.clone(), u32::MAX)) - .last() - .map_or(0, |((_, index), _)| *index + 1); - let next_reveal_index = self.last_revealed.get(&did).map_or(0, |v| *v + 1); - for (new_index, new_spk) in - SpkIterator::new_with_range(descriptor, next_store_index..next_reveal_index + lookahead) - { - let _inserted = self - .inner - .insert_spk((keychain.clone(), new_index), new_spk); - debug_assert!(_inserted, "replenish lookahead: must not have existing spk: keychain={:?}, lookahead={}, next_store_index={}, next_reveal_index={}", keychain, lookahead, next_store_index, next_reveal_index); - } - } - - /// Get an unbounded spk iterator over a given `keychain`. Returns `None` if the provided - /// keychain doesn't exist - pub fn unbounded_spk_iter( - &self, - keychain: K, - ) -> Option>> { - let descriptor = self.get_descriptor(keychain)?.clone(); - Some(SpkIterator::new(descriptor)) - } - - /// Get unbounded spk iterators for all keychains. - pub fn all_unbounded_spk_iters( - &self, - ) -> BTreeMap>> { - self.keychain_to_descriptor_id - .iter() - .map(|(k, did)| { - ( - k.clone(), - SpkIterator::new(self.descriptors.get(did).expect("invariant").clone()), - ) - }) - .collect() - } - - /// Iterate over revealed spks of keychains in `range` - pub fn revealed_spks( - &self, - range: impl RangeBounds, - ) -> impl Iterator> + '_ { - let start = range.start_bound(); - let end = range.end_bound(); - let mut iter_last_revealed = self - .keychain_to_descriptor_id - .range((start, end)) - .map(|(k, did)| (k, self.last_revealed.get(did).cloned())); - let mut iter_spks = self - .inner - .all_spks() - .range(self.map_to_inner_bounds((start, end))); - let mut current_keychain = iter_last_revealed.next(); - // The reason we need a tricky algorithm is because of the "lookahead" feature which means - // that some of the spks in the SpkTxoutIndex will not have been revealed yet. So we need to - // filter out those spks that are above the last_revealed for that keychain. To do this we - // iterate through the last_revealed for each keychain and the spks for each keychain in - // tandem. This minimizes BTreeMap queries. - core::iter::from_fn(move || loop { - let ((keychain, index), spk) = iter_spks.next()?; - // We need to find the last revealed that matches the current spk we are considering so - // we skip ahead. - while current_keychain?.0 < keychain { - current_keychain = iter_last_revealed.next(); - } - let (current_keychain, last_revealed) = current_keychain?; - - if current_keychain == keychain && Some(*index) <= last_revealed { - break Some(((keychain.clone(), *index), spk.clone())); - } - }) - } - - /// Iterate over revealed spks of the given `keychain` with ascending indices. - /// - /// This is a double ended iterator so you can easily reverse it to get an iterator where - /// the script pubkeys that were most recently revealed are first. - pub fn revealed_keychain_spks( - &self, - keychain: K, - ) -> impl DoubleEndedIterator> + '_ { - let end = self - .last_revealed_index(keychain.clone()) - .map(|v| v + 1) - .unwrap_or(0); - self.inner - .all_spks() - .range((keychain.clone(), 0)..(keychain.clone(), end)) - .map(|((_, index), spk)| (*index, spk.clone())) - } - - /// Iterate over revealed, but unused, spks of all keychains. - pub fn unused_spks( - &self, - ) -> impl DoubleEndedIterator> + Clone + '_ { - self.keychain_to_descriptor_id.keys().flat_map(|keychain| { - self.unused_keychain_spks(keychain.clone()) - .map(|(i, spk)| ((keychain.clone(), i), spk.clone())) - }) - } - - /// Iterate over revealed, but unused, spks of the given `keychain`. - /// Returns an empty iterator if the provided keychain doesn't exist. - pub fn unused_keychain_spks( - &self, - keychain: K, - ) -> impl DoubleEndedIterator> + Clone + '_ { - let end = match self.keychain_to_descriptor_id.get(&keychain) { - Some(did) => self.last_revealed.get(did).map(|v| *v + 1).unwrap_or(0), - None => 0, - }; - - self.inner - .unused_spks((keychain.clone(), 0)..(keychain.clone(), end)) - .map(|((_, i), spk)| (*i, spk)) - } - - /// Get the next derivation index for `keychain`. The next index is the index after the last revealed - /// derivation index. - /// - /// The second field in the returned tuple represents whether the next derivation index is new. - /// There are two scenarios where the next derivation index is reused (not new): - /// - /// 1. The keychain's descriptor has no wildcard, and a script has already been revealed. - /// 2. The number of revealed scripts has already reached 2^31 (refer to BIP-32). - /// - /// Not checking the second field of the tuple may result in address reuse. - /// - /// Returns None if the provided `keychain` doesn't exist. - pub fn next_index(&self, keychain: K) -> Option<(u32, bool)> { - let did = self.keychain_to_descriptor_id.get(&keychain)?; - let last_index = self.last_revealed.get(did).cloned(); - let descriptor = self.descriptors.get(did).expect("invariant"); - - // we can only get the next index if the wildcard exists. - let has_wildcard = descriptor.has_wildcard(); - - Some(match last_index { - // if there is no index, next_index is always 0. - None => (0, true), - // descriptors without wildcards can only have one index. - Some(_) if !has_wildcard => (0, false), - // derivation index must be < 2^31 (BIP-32). - Some(index) if index > BIP32_MAX_INDEX => { - unreachable!("index is out of bounds") - } - Some(index) if index == BIP32_MAX_INDEX => (index, false), - // get the next derivation index. - Some(index) => (index + 1, true), - }) - } - - /// Get the last derivation index that is revealed for each keychain. - /// - /// Keychains with no revealed indices will not be included in the returned [`BTreeMap`]. - pub fn last_revealed_indices(&self) -> BTreeMap { - self.last_revealed - .iter() - .filter_map(|(desc_id, index)| { - let keychain = self.descriptor_id_to_keychain.get(desc_id)?; - Some((keychain.clone(), *index)) - }) - .collect() - } - - /// Get the last derivation index revealed for `keychain`. Returns None if the keychain doesn't - /// exist, or if the keychain doesn't have any revealed scripts. - pub fn last_revealed_index(&self, keychain: K) -> Option { - let descriptor_id = self.keychain_to_descriptor_id.get(&keychain)?; - self.last_revealed.get(descriptor_id).cloned() - } - - /// Convenience method to call [`Self::reveal_to_target`] on multiple keychains. - pub fn reveal_to_target_multi(&mut self, keychains: &BTreeMap) -> ChangeSet { - let mut changeset = ChangeSet::default(); - - for (keychain, &index) in keychains { - if let Some((_, new_changeset)) = self.reveal_to_target(keychain.clone(), index) { - changeset.merge(new_changeset); - } - } - - changeset - } - - /// Reveals script pubkeys of the `keychain`'s descriptor **up to and including** the - /// `target_index`. - /// - /// If the `target_index` cannot be reached (due to the descriptor having no wildcard and/or - /// the `target_index` is in the hardened index range), this method will make a best-effort and - /// reveal up to the last possible index. - /// - /// This returns list of newly revealed indices (alongside their scripts) and a - /// [`ChangeSet`], which reports updates to the latest revealed index. If no new script - /// pubkeys are revealed, then both of these will be empty. - /// - /// Returns None if the provided `keychain` doesn't exist. - #[must_use] - pub fn reveal_to_target( - &mut self, - keychain: K, - target_index: u32, - ) -> Option<(Vec>, ChangeSet)> { - let mut changeset = ChangeSet::default(); - let mut spks: Vec> = vec![]; - while let Some((i, new)) = self.next_index(keychain.clone()) { - if !new || i > target_index { - break; - } - match self.reveal_next_spk(keychain.clone()) { - Some(((i, spk), change)) => { - spks.push((i, spk)); - changeset.merge(change); - } - None => break, - } - } - - Some((spks, changeset)) - } - - /// Attempts to reveal the next script pubkey for `keychain`. - /// - /// Returns the derivation index of the revealed script pubkey, the revealed script pubkey and a - /// [`ChangeSet`] which represents changes in the last revealed index (if any). - /// Returns None if the provided keychain doesn't exist. - /// - /// When a new script cannot be revealed, we return the last revealed script and an empty - /// [`ChangeSet`]. There are two scenarios when a new script pubkey cannot be derived: - /// - /// 1. The descriptor has no wildcard and already has one script revealed. - /// 2. The descriptor has already revealed scripts up to the numeric bound. - /// 3. There is no descriptor associated with the given keychain. - pub fn reveal_next_spk(&mut self, keychain: K) -> Option<(Indexed, ChangeSet)> { - let (next_index, new) = self.next_index(keychain.clone())?; - let mut changeset = ChangeSet::default(); - - if new { - let did = self.keychain_to_descriptor_id.get(&keychain)?; - self.last_revealed.insert(*did, next_index); - changeset.last_revealed.insert(*did, next_index); - self.replenish_inner_index(*did, &keychain, self.lookahead); - } - let script = self - .inner - .spk_at_index(&(keychain.clone(), next_index)) - .expect("we just inserted it"); - Some(((next_index, script), changeset)) - } - - /// Gets the next unused script pubkey in the keychain. I.e., the script pubkey with the lowest - /// index that has not been used yet. - /// - /// This will derive and reveal a new script pubkey if no more unused script pubkeys exist. - /// - /// If the descriptor has no wildcard and already has a used script pubkey or if a descriptor - /// has used all scripts up to the derivation bounds, then the last derived script pubkey will be - /// returned. - /// - /// Returns `None` if there are no script pubkeys that have been used and no new script pubkey - /// could be revealed (see [`reveal_next_spk`] for when this happens). - /// - /// [`reveal_next_spk`]: Self::reveal_next_spk - pub fn next_unused_spk(&mut self, keychain: K) -> Option<(Indexed, ChangeSet)> { - let next_unused = self - .unused_keychain_spks(keychain.clone()) - .next() - .map(|(i, spk)| ((i, spk.to_owned()), ChangeSet::default())); - - next_unused.or_else(|| self.reveal_next_spk(keychain)) - } - - /// Iterate over all [`OutPoint`]s that have `TxOut`s with script pubkeys derived from - /// `keychain`. - pub fn keychain_outpoints( - &self, - keychain: K, - ) -> impl DoubleEndedIterator> + '_ { - self.keychain_outpoints_in_range(keychain.clone()..=keychain) - .map(|((_, i), op)| (i, op)) - } - - /// Iterate over [`OutPoint`]s that have script pubkeys derived from keychains in `range`. - pub fn keychain_outpoints_in_range<'a>( - &'a self, - range: impl RangeBounds + 'a, - ) -> impl DoubleEndedIterator> + 'a { - self.inner - .outputs_in_range(self.map_to_inner_bounds(range)) - .map(|((k, i), op)| ((k.clone(), *i), op)) - } - - fn map_to_inner_bounds(&self, bound: impl RangeBounds) -> impl RangeBounds<(K, u32)> { - let start = match bound.start_bound() { - Bound::Included(keychain) => Bound::Included((keychain.clone(), u32::MIN)), - Bound::Excluded(keychain) => Bound::Excluded((keychain.clone(), u32::MAX)), - Bound::Unbounded => Bound::Unbounded, - }; - let end = match bound.end_bound() { - Bound::Included(keychain) => Bound::Included((keychain.clone(), u32::MAX)), - Bound::Excluded(keychain) => Bound::Excluded((keychain.clone(), u32::MIN)), - Bound::Unbounded => Bound::Unbounded, - }; - - (start, end) - } - - /// Returns the highest derivation index of the `keychain` where [`KeychainTxOutIndex`] has - /// found a [`TxOut`] with it's script pubkey. - pub fn last_used_index(&self, keychain: K) -> Option { - self.keychain_outpoints(keychain).last().map(|(i, _)| i) - } - - /// Returns the highest derivation index of each keychain that [`KeychainTxOutIndex`] has found - /// a [`TxOut`] with it's script pubkey. - pub fn last_used_indices(&self) -> BTreeMap { - self.keychain_to_descriptor_id - .iter() - .filter_map(|(keychain, _)| { - self.last_used_index(keychain.clone()) - .map(|index| (keychain.clone(), index)) - }) - .collect() - } - - /// Applies the `ChangeSet` to the [`KeychainTxOutIndex`] - pub fn apply_changeset(&mut self, changeset: ChangeSet) { - for (&desc_id, &index) in &changeset.last_revealed { - let v = self.last_revealed.entry(desc_id).or_default(); - *v = index.max(*v); - self.replenish_inner_index_did(desc_id, self.lookahead); - } - } -} - -#[derive(Clone, Debug, PartialEq)] -/// Error returned from [`KeychainTxOutIndex::insert_descriptor`] -pub enum InsertDescriptorError { - /// The descriptor has already been assigned to a keychain so you can't assign it to another - DescriptorAlreadyAssigned { - /// The descriptor you have attempted to reassign - descriptor: Descriptor, - /// The keychain that the descriptor is already assigned to - existing_assignment: K, - }, - /// The keychain is already assigned to a descriptor so you can't reassign it - KeychainAlreadyAssigned { - /// The keychain that you have attempted to reassign - keychain: K, - /// The descriptor that the keychain is already assigned to - existing_assignment: Descriptor, - }, -} - -impl core::fmt::Display for InsertDescriptorError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - InsertDescriptorError::DescriptorAlreadyAssigned { - existing_assignment: existing, - descriptor, - } => { - write!( - f, - "attempt to re-assign descriptor {descriptor:?} already assigned to {existing:?}" - ) - } - InsertDescriptorError::KeychainAlreadyAssigned { - existing_assignment: existing, - keychain, - } => { - write!( - f, - "attempt to re-assign keychain {keychain:?} already assigned to {existing:?}" - ) - } - } - } -} - -#[cfg(feature = "std")] -impl std::error::Error for InsertDescriptorError {} - -/// Represents updates to the derivation index of a [`KeychainTxOutIndex`]. -/// It maps each keychain `K` to a descriptor and its last revealed index. -/// -/// It can be applied to [`KeychainTxOutIndex`] with [`apply_changeset`]. -/// -/// The `last_revealed` field is monotone in that [`merge`] will never decrease it. -/// `keychains_added` is *not* monotone, once it is set any attempt to change it is subject to the -/// same *one-to-one* keychain <-> descriptor mapping invariant as [`KeychainTxOutIndex`] itself. -/// -/// [`KeychainTxOutIndex`]: crate::keychain_txout::KeychainTxOutIndex -/// [`apply_changeset`]: crate::keychain_txout::KeychainTxOutIndex::apply_changeset -/// [`merge`]: Self::merge -#[derive(Clone, Debug, Default, PartialEq)] -#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] -#[must_use] -pub struct ChangeSet { - /// Contains for each descriptor_id the last revealed index of derivation - pub last_revealed: BTreeMap, -} - -impl Merge for ChangeSet { - /// Merge another [`ChangeSet`] into self. - fn merge(&mut self, other: Self) { - // for `last_revealed`, entries of `other` will take precedence ONLY if it is greater than - // what was originally in `self`. - for (desc_id, index) in other.last_revealed { - use crate::collections::btree_map::Entry; - match self.last_revealed.entry(desc_id) { - Entry::Vacant(entry) => { - entry.insert(index); - } - Entry::Occupied(mut entry) => { - if *entry.get() < index { - entry.insert(index); - } - } - } - } - } - - /// Returns whether the changeset are empty. - fn is_empty(&self) -> bool { - self.last_revealed.is_empty() - } -} - -/// Trait to extend [`SyncRequestBuilder`]. -pub trait SyncRequestBuilderExt { - /// Add [`Script`](bitcoin::Script)s that are revealed by the `indexer` of the given `spk_range` - /// that will be synced against. - fn revealed_spks_from_indexer(self, indexer: &KeychainTxOutIndex, spk_range: R) -> Self - where - R: core::ops::RangeBounds; - - /// Add [`Script`](bitcoin::Script)s that are revealed by the `indexer` but currently unused. - fn unused_spks_from_indexer(self, indexer: &KeychainTxOutIndex) -> Self; -} - -impl SyncRequestBuilderExt for SyncRequestBuilder<(K, u32)> { - fn revealed_spks_from_indexer(self, indexer: &KeychainTxOutIndex, spk_range: R) -> Self - where - R: core::ops::RangeBounds, - { - self.spks_with_indexes(indexer.revealed_spks(spk_range)) - } - - fn unused_spks_from_indexer(self, indexer: &KeychainTxOutIndex) -> Self { - self.spks_with_indexes(indexer.unused_spks()) - } -} - -/// Trait to extend [`FullScanRequestBuilder`]. -pub trait FullScanRequestBuilderExt { - /// Add spk iterators for each keychain tracked in `indexer`. - fn spks_from_indexer(self, indexer: &KeychainTxOutIndex) -> Self; -} - -impl FullScanRequestBuilderExt for FullScanRequestBuilder { - fn spks_from_indexer(mut self, indexer: &KeychainTxOutIndex) -> Self { - for (keychain, spks) in indexer.all_unbounded_spk_iters() { - self = self.spks_for_keychain(keychain, spks); - } - self - } -} diff --git a/crates/chain/src/indexer/spk_txout.rs b/crates/chain/src/indexer/spk_txout.rs deleted file mode 100644 index 6378dbb7..00000000 --- a/crates/chain/src/indexer/spk_txout.rs +++ /dev/null @@ -1,363 +0,0 @@ -//! [`SpkTxOutIndex`] is an index storing [`TxOut`]s that have a script pubkey that matches those in a list. - -use core::ops::RangeBounds; - -use crate::{ - collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap}, - Indexer, -}; -use bitcoin::{Amount, OutPoint, ScriptBuf, SignedAmount, Transaction, TxOut, Txid}; - -/// An index storing [`TxOut`]s that have a script pubkey that matches those in a list. -/// -/// The basic idea is that you insert script pubkeys you care about into the index with -/// [`insert_spk`] and then when you call [`Indexer::index_tx`] or [`Indexer::index_txout`], the -/// index will look at any txouts you pass in and store and index any txouts matching one of its -/// script pubkeys. -/// -/// Each script pubkey is associated with an application-defined index script index `I`, which must be -/// [`Ord`]. Usually, this is used to associate the derivation index of the script pubkey or even a -/// combination of `(keychain, derivation_index)`. -/// -/// Note there is no harm in scanning transactions that disappear from the blockchain or were never -/// in there in the first place. `SpkTxOutIndex` is intentionally *monotone* -- you cannot delete or -/// modify txouts that have been indexed. To find out which txouts from the index are actually in the -/// chain or unspent, you must use other sources of information like a [`TxGraph`]. -/// -/// [`TxOut`]: bitcoin::TxOut -/// [`insert_spk`]: Self::insert_spk -/// [`Ord`]: core::cmp::Ord -/// [`TxGraph`]: crate::tx_graph::TxGraph -#[derive(Clone, Debug)] -pub struct SpkTxOutIndex { - /// script pubkeys ordered by index - spks: BTreeMap, - /// A reverse lookup from spk to spk index - spk_indices: HashMap, - /// The set of unused indexes. - unused: BTreeSet, - /// Lookup index and txout by outpoint. - txouts: BTreeMap, - /// Lookup from spk index to outpoints that had that spk - spk_txouts: BTreeSet<(I, OutPoint)>, -} - -impl Default for SpkTxOutIndex { - fn default() -> Self { - Self { - txouts: Default::default(), - spks: Default::default(), - spk_indices: Default::default(), - spk_txouts: Default::default(), - unused: Default::default(), - } - } -} - -impl AsRef> for SpkTxOutIndex { - fn as_ref(&self) -> &SpkTxOutIndex { - self - } -} - -impl Indexer for SpkTxOutIndex { - type ChangeSet = (); - - fn index_txout(&mut self, outpoint: OutPoint, txout: &TxOut) -> Self::ChangeSet { - self.scan_txout(outpoint, txout); - Default::default() - } - - fn index_tx(&mut self, tx: &Transaction) -> Self::ChangeSet { - self.scan(tx); - Default::default() - } - - fn initial_changeset(&self) -> Self::ChangeSet {} - - fn apply_changeset(&mut self, _changeset: Self::ChangeSet) { - // This applies nothing. - } - - fn is_tx_relevant(&self, tx: &Transaction) -> bool { - self.is_relevant(tx) - } -} - -impl SpkTxOutIndex { - /// Scans a transaction's outputs for matching script pubkeys. - /// - /// Typically, this is used in two situations: - /// - /// 1. After loading transaction data from the disk, you may scan over all the txouts to restore all - /// your txouts. - /// 2. When getting new data from the chain, you usually scan it before incorporating it into your chain state. - pub fn scan(&mut self, tx: &Transaction) -> BTreeSet { - let mut scanned_indices = BTreeSet::new(); - let txid = tx.compute_txid(); - for (i, txout) in tx.output.iter().enumerate() { - let op = OutPoint::new(txid, i as u32); - if let Some(spk_i) = self.scan_txout(op, txout) { - scanned_indices.insert(spk_i.clone()); - } - } - - scanned_indices - } - - /// Scan a single `TxOut` for a matching script pubkey and returns the index that matches the - /// script pubkey (if any). - pub fn scan_txout(&mut self, op: OutPoint, txout: &TxOut) -> Option<&I> { - let spk_i = self.spk_indices.get(&txout.script_pubkey); - if let Some(spk_i) = spk_i { - self.txouts.insert(op, (spk_i.clone(), txout.clone())); - self.spk_txouts.insert((spk_i.clone(), op)); - self.unused.remove(spk_i); - } - spk_i - } - - /// Get a reference to the set of indexed outpoints. - pub fn outpoints(&self) -> &BTreeSet<(I, OutPoint)> { - &self.spk_txouts - } - - /// Iterate over all known txouts that spend to tracked script pubkeys. - pub fn txouts( - &self, - ) -> impl DoubleEndedIterator + ExactSizeIterator { - self.txouts - .iter() - .map(|(op, (index, txout))| (index, *op, txout)) - } - - /// Finds all txouts on a transaction that has previously been scanned and indexed. - pub fn txouts_in_tx( - &self, - txid: Txid, - ) -> impl DoubleEndedIterator { - self.txouts - .range(OutPoint::new(txid, u32::MIN)..=OutPoint::new(txid, u32::MAX)) - .map(|(op, (index, txout))| (index, *op, txout)) - } - - /// Iterates over all the outputs with script pubkeys in an index range. - pub fn outputs_in_range( - &self, - range: impl RangeBounds, - ) -> impl DoubleEndedIterator { - use bitcoin::hashes::Hash; - use core::ops::Bound::*; - let min_op = OutPoint { - txid: Txid::all_zeros(), - vout: u32::MIN, - }; - let max_op = OutPoint { - txid: Txid::from_byte_array([0xff; Txid::LEN]), - vout: u32::MAX, - }; - - let start = match range.start_bound() { - Included(index) => Included((index.clone(), min_op)), - Excluded(index) => Excluded((index.clone(), max_op)), - Unbounded => Unbounded, - }; - - let end = match range.end_bound() { - Included(index) => Included((index.clone(), max_op)), - Excluded(index) => Excluded((index.clone(), min_op)), - Unbounded => Unbounded, - }; - - self.spk_txouts.range((start, end)).map(|(i, op)| (i, *op)) - } - - /// Returns the txout and script pubkey index of the `TxOut` at `OutPoint`. - /// - /// Returns `None` if the `TxOut` hasn't been scanned or if nothing matching was found there. - pub fn txout(&self, outpoint: OutPoint) -> Option<(&I, &TxOut)> { - self.txouts.get(&outpoint).map(|v| (&v.0, &v.1)) - } - - /// Returns the script that has been inserted at the `index`. - /// - /// If that index hasn't been inserted yet, it will return `None`. - pub fn spk_at_index(&self, index: &I) -> Option { - self.spks.get(index).cloned() - } - - /// The script pubkeys that are being tracked by the index. - pub fn all_spks(&self) -> &BTreeMap { - &self.spks - } - - /// Adds a script pubkey to scan for. Returns `false` and does nothing if spk already exists in the map - /// - /// the index will look for outputs spending to this spk whenever it scans new data. - pub fn insert_spk(&mut self, index: I, spk: ScriptBuf) -> bool { - match self.spk_indices.entry(spk.clone()) { - Entry::Vacant(value) => { - value.insert(index.clone()); - self.spks.insert(index.clone(), spk); - self.unused.insert(index); - true - } - Entry::Occupied(_) => false, - } - } - - /// Iterates over all unused script pubkeys in an index range. - /// - /// Here, "unused" means that after the script pubkey was stored in the index, the index has - /// never scanned a transaction output with it. - /// - /// # Example - /// - /// ```rust - /// # use bdk_chain::spk_txout::SpkTxOutIndex; - /// - /// // imagine our spks are indexed like (keychain, derivation_index). - /// let txout_index = SpkTxOutIndex::<(u32, u32)>::default(); - /// let all_unused_spks = txout_index.unused_spks(..); - /// let change_index = 1; - /// let unused_change_spks = - /// txout_index.unused_spks((change_index, u32::MIN)..(change_index, u32::MAX)); - /// ``` - pub fn unused_spks( - &self, - range: R, - ) -> impl DoubleEndedIterator + Clone + '_ - where - R: RangeBounds, - { - self.unused - .range(range) - .map(move |index| (index, self.spk_at_index(index).expect("must exist"))) - } - - /// Returns whether the script pubkey at `index` has been used or not. - /// - /// Here, "unused" means that after the script pubkey was stored in the index, the index has - /// never scanned a transaction output with it. - pub fn is_used(&self, index: &I) -> bool { - !self.unused.contains(index) - } - - /// Marks the script pubkey at `index` as used even though it hasn't seen an output spending to it. - /// This only affects when the `index` had already been added to `self` and was unused. - /// - /// Returns whether the `index` was initially present as `unused`. - /// - /// This is useful when you want to reserve a script pubkey for something but don't want to add - /// the transaction output using it to the index yet. Other callers will consider the `index` used - /// until you call [`unmark_used`]. - /// - /// [`unmark_used`]: Self::unmark_used - pub fn mark_used(&mut self, index: &I) -> bool { - self.unused.remove(index) - } - - /// Undoes the effect of [`mark_used`]. Returns whether the `index` is inserted back into - /// `unused`. - /// - /// Note that if `self` has scanned an output with this script pubkey then this will have no - /// effect. - /// - /// [`mark_used`]: Self::mark_used - pub fn unmark_used(&mut self, index: &I) -> bool { - // we cannot set the index as unused when it does not exist - if !self.spks.contains_key(index) { - return false; - } - // we cannot set the index as unused when txouts are indexed under it - if self.outputs_in_range(index..=index).next().is_some() { - return false; - } - self.unused.insert(index.clone()) - } - - /// Returns the index associated with the script pubkey. - pub fn index_of_spk(&self, script: ScriptBuf) -> Option<&I> { - self.spk_indices.get(script.as_script()) - } - - /// Computes the total value transfer effect `tx` has on the script pubkeys in `range`. Value is - /// *sent* when a script pubkey in the `range` is on an input and *received* when it is on an - /// output. For `sent` to be computed correctly, the output being spent must have already been - /// scanned by the index. Calculating received just uses the [`Transaction`] outputs directly, - /// so it will be correct even if it has not been scanned. - pub fn sent_and_received( - &self, - tx: &Transaction, - range: impl RangeBounds, - ) -> (Amount, Amount) { - let mut sent = Amount::ZERO; - let mut received = Amount::ZERO; - - for txin in &tx.input { - if let Some((index, txout)) = self.txout(txin.previous_output) { - if range.contains(index) { - sent += txout.value; - } - } - } - for txout in &tx.output { - if let Some(index) = self.index_of_spk(txout.script_pubkey.clone()) { - if range.contains(index) { - received += txout.value; - } - } - } - - (sent, received) - } - - /// Computes the net value transfer effect of `tx` on the script pubkeys in `range`. Shorthand - /// for calling [`sent_and_received`] and subtracting sent from received. - /// - /// [`sent_and_received`]: Self::sent_and_received - pub fn net_value(&self, tx: &Transaction, range: impl RangeBounds) -> SignedAmount { - let (sent, received) = self.sent_and_received(tx, range); - received.to_signed().expect("valid `SignedAmount`") - - sent.to_signed().expect("valid `SignedAmount`") - } - - /// Whether any of the inputs of this transaction spend a txout tracked or whether any output - /// matches one of our script pubkeys. - /// - /// It is easily possible to misuse this method and get false negatives by calling it before you - /// have scanned the `TxOut`s the transaction is spending. For example, if you want to filter out - /// all the transactions in a block that are irrelevant, you **must first scan all the - /// transactions in the block** and only then use this method. - pub fn is_relevant(&self, tx: &Transaction) -> bool { - let input_matches = tx - .input - .iter() - .any(|input| self.txouts.contains_key(&input.previous_output)); - let output_matches = tx - .output - .iter() - .any(|output| self.spk_indices.contains_key(&output.script_pubkey)); - input_matches || output_matches - } - - /// Find relevant script pubkeys associated with a transaction for tracking and validation. - /// - /// Returns a set of script pubkeys from [`SpkTxOutIndex`] that are relevant to the outputs and - /// previous outputs of a given transaction. Inputs are only considered relevant if the parent - /// transactions have been scanned. - pub fn relevant_spks_of_tx(&self, tx: &Transaction) -> BTreeSet<(I, ScriptBuf)> { - let spks_from_inputs = tx.input.iter().filter_map(|txin| { - self.txouts - .get(&txin.previous_output) - .cloned() - .map(|(i, prev_txo)| (i, prev_txo.script_pubkey)) - }); - let spks_from_outputs = tx - .output - .iter() - .filter_map(|txout| self.spk_indices.get_key_value(&txout.script_pubkey)) - .map(|(spk, i)| (i.clone(), spk.clone())); - spks_from_inputs.chain(spks_from_outputs).collect() - } -} diff --git a/crates/chain/src/lib.rs b/crates/chain/src/lib.rs deleted file mode 100644 index 92a6d5c4..00000000 --- a/crates/chain/src/lib.rs +++ /dev/null @@ -1,104 +0,0 @@ -//! This crate is a collection of core structures for [Bitcoin Dev Kit]. -//! -//! The goal of this crate is to give wallets the mechanisms needed to: -//! -//! 1. Figure out what data they need to fetch. -//! 2. Process the data in a way that never leads to inconsistent states. -//! 3. Fully index that data and expose it to be consumed without friction. -//! -//! Our design goals for these mechanisms are: -//! -//! 1. Data source agnostic -- nothing in `bdk_chain` cares about where you get data from or whether -//! you do it synchronously or asynchronously. If you know a fact about the blockchain, you can just -//! tell `bdk_chain`'s APIs about it, and that information will be integrated, if it can be done -//! consistently. -//! 2. Data persistence agnostic -- `bdk_chain` does not care where you cache on-chain data, what you -//! cache or how you retrieve it from persistent storage. -//! -//! [Bitcoin Dev Kit]: https://bitcoindevkit.org/ - -// only enables the `doc_cfg` feature when the `docsrs` configuration attribute is defined -#![cfg_attr(docsrs, feature(doc_cfg))] -#![cfg_attr( - docsrs, - doc(html_logo_url = "https://github.com/bitcoindevkit/bdk/raw/master/static/bdk.png") -)] -#![no_std] -#![warn(missing_docs)] - -pub use bitcoin; -mod balance; -pub use balance::*; -mod chain_data; -pub use chain_data::*; -pub mod indexed_tx_graph; -pub use indexed_tx_graph::IndexedTxGraph; -pub mod indexer; -pub use indexer::spk_txout; -pub use indexer::Indexer; -pub mod local_chain; -mod tx_data_traits; -pub use tx_data_traits::*; -pub mod tx_graph; -pub use tx_graph::TxGraph; -mod chain_oracle; -pub use chain_oracle::*; -mod canonical_iter; -pub use canonical_iter::*; - -#[doc(hidden)] -pub mod example_utils; - -#[cfg(feature = "miniscript")] -pub use miniscript; -#[cfg(feature = "miniscript")] -mod descriptor_ext; -#[cfg(feature = "miniscript")] -pub use descriptor_ext::{DescriptorExt, DescriptorId}; -#[cfg(feature = "miniscript")] -mod spk_iter; -#[cfg(feature = "miniscript")] -pub use indexer::keychain_txout; -#[cfg(feature = "miniscript")] -pub use spk_iter::*; -#[cfg(feature = "rusqlite")] -pub mod rusqlite_impl; - -pub extern crate bdk_core; -pub use bdk_core::*; - -#[allow(unused_imports)] -#[macro_use] -extern crate alloc; -#[cfg(feature = "rusqlite")] -pub extern crate rusqlite; -#[cfg(feature = "serde")] -pub extern crate serde; - -#[cfg(feature = "std")] -#[macro_use] -extern crate std; - -/// A wrapper that we use to impl remote traits for types in our crate or dependency crates. -pub struct Impl(pub T); - -impl Impl { - /// Returns the inner `T`. - pub fn into_inner(self) -> T { - self.0 - } -} - -impl From for Impl { - fn from(value: T) -> Self { - Self(value) - } -} - -impl core::ops::Deref for Impl { - type Target = T; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} diff --git a/crates/chain/src/local_chain.rs b/crates/chain/src/local_chain.rs deleted file mode 100644 index b9a1b645..00000000 --- a/crates/chain/src/local_chain.rs +++ /dev/null @@ -1,671 +0,0 @@ -//! The [`LocalChain`] is a local implementation of [`ChainOracle`]. - -use core::convert::Infallible; -use core::ops::RangeBounds; - -use crate::collections::BTreeMap; -use crate::{BlockId, ChainOracle, Merge}; -pub use bdk_core::{CheckPoint, CheckPointIter}; -use bitcoin::block::Header; -use bitcoin::BlockHash; - -/// Apply `changeset` to the checkpoint. -fn apply_changeset_to_checkpoint( - mut init_cp: CheckPoint, - changeset: &ChangeSet, -) -> Result { - if let Some(start_height) = changeset.blocks.keys().next().cloned() { - // changes after point of agreement - let mut extension = BTreeMap::default(); - // point of agreement - let mut base: Option = None; - - for cp in init_cp.iter() { - if cp.height() >= start_height { - extension.insert(cp.height(), cp.hash()); - } else { - base = Some(cp); - break; - } - } - - for (&height, &hash) in &changeset.blocks { - match hash { - Some(hash) => { - extension.insert(height, hash); - } - None => { - extension.remove(&height); - } - }; - } - - let new_tip = match base { - Some(base) => base - .extend(extension.into_iter().map(BlockId::from)) - .expect("extension is strictly greater than base"), - None => LocalChain::from_blocks(extension)?.tip(), - }; - init_cp = new_tip; - } - - Ok(init_cp) -} - -/// This is a local implementation of [`ChainOracle`]. -#[derive(Debug, Clone, PartialEq)] -pub struct LocalChain { - tip: CheckPoint, -} - -impl ChainOracle for LocalChain { - type Error = Infallible; - - fn is_block_in_chain( - &self, - block: BlockId, - chain_tip: BlockId, - ) -> Result, Self::Error> { - let chain_tip_cp = match self.tip.get(chain_tip.height) { - // we can only determine whether `block` is in chain of `chain_tip` if `chain_tip` can - // be identified in chain - Some(cp) if cp.hash() == chain_tip.hash => cp, - _ => return Ok(None), - }; - match chain_tip_cp.get(block.height) { - Some(cp) => Ok(Some(cp.hash() == block.hash)), - None => Ok(None), - } - } - - fn get_chain_tip(&self) -> Result { - Ok(self.tip.block_id()) - } -} - -impl LocalChain { - /// Get the genesis hash. - pub fn genesis_hash(&self) -> BlockHash { - self.tip.get(0).expect("genesis must exist").hash() - } - - /// Construct [`LocalChain`] from genesis `hash`. - #[must_use] - pub fn from_genesis_hash(hash: BlockHash) -> (Self, ChangeSet) { - let height = 0; - let chain = Self { - tip: CheckPoint::new(BlockId { height, hash }), - }; - let changeset = chain.initial_changeset(); - (chain, changeset) - } - - /// Construct a [`LocalChain`] from an initial `changeset`. - pub fn from_changeset(changeset: ChangeSet) -> Result { - let genesis_entry = changeset.blocks.get(&0).copied().flatten(); - let genesis_hash = match genesis_entry { - Some(hash) => hash, - None => return Err(MissingGenesisError), - }; - - let (mut chain, _) = Self::from_genesis_hash(genesis_hash); - chain.apply_changeset(&changeset)?; - - debug_assert!(chain._check_changeset_is_applied(&changeset)); - - Ok(chain) - } - - /// Construct a [`LocalChain`] from a given `checkpoint` tip. - pub fn from_tip(tip: CheckPoint) -> Result { - let genesis_cp = tip.iter().last().expect("must have at least one element"); - if genesis_cp.height() != 0 { - return Err(MissingGenesisError); - } - Ok(Self { tip }) - } - - /// Constructs a [`LocalChain`] from a [`BTreeMap`] of height to [`BlockHash`]. - /// - /// The [`BTreeMap`] enforces the height order. However, the caller must ensure the blocks are - /// all of the same chain. - pub fn from_blocks(blocks: BTreeMap) -> Result { - if !blocks.contains_key(&0) { - return Err(MissingGenesisError); - } - - let mut tip: Option = None; - for block in &blocks { - match tip { - Some(curr) => { - tip = Some( - curr.push(BlockId::from(block)) - .expect("BTreeMap is ordered"), - ) - } - None => tip = Some(CheckPoint::new(BlockId::from(block))), - } - } - - Ok(Self { - tip: tip.expect("already checked to have genesis"), - }) - } - - /// Get the highest checkpoint. - pub fn tip(&self) -> CheckPoint { - self.tip.clone() - } - - /// Applies the given `update` to the chain. - /// - /// The method returns [`ChangeSet`] on success. This represents the changes applied to `self`. - /// - /// There must be no ambiguity about which of the existing chain's blocks are still valid and - /// which are now invalid. That is, the new chain must implicitly connect to a definite block in - /// the existing chain and invalidate the block after it (if it exists) by including a block at - /// the same height but with a different hash to explicitly exclude it as a connection point. - /// - /// # Errors - /// - /// An error will occur if the update does not correctly connect with `self`. - /// - /// [module-level documentation]: crate::local_chain - pub fn apply_update(&mut self, update: CheckPoint) -> Result { - let (new_tip, changeset) = merge_chains(self.tip.clone(), update)?; - self.tip = new_tip; - debug_assert!(self._check_changeset_is_applied(&changeset)); - Ok(changeset) - } - - /// Update the chain with a given [`Header`] at `height` which you claim is connected to a existing block in the chain. - /// - /// This is useful when you have a block header that you want to record as part of the chain but - /// don't necessarily know that the `prev_blockhash` is in the chain. - /// - /// This will usually insert two new [`BlockId`]s into the chain: the header's block and the - /// header's `prev_blockhash` block. `connected_to` must already be in the chain but is allowed - /// to be `prev_blockhash` (in which case only one new block id will be inserted). - /// To be successful, `connected_to` must be chosen carefully so that `LocalChain`'s [update - /// rules][`apply_update`] are satisfied. - /// - /// # Errors - /// - /// [`ApplyHeaderError::InconsistentBlocks`] occurs if the `connected_to` block and the - /// [`Header`] is inconsistent. For example, if the `connected_to` block is the same height as - /// `header` or `prev_blockhash`, but has a different block hash. Or if the `connected_to` - /// height is greater than the header's `height`. - /// - /// [`ApplyHeaderError::CannotConnect`] occurs if the internal call to [`apply_update`] fails. - /// - /// [`apply_update`]: Self::apply_update - pub fn apply_header_connected_to( - &mut self, - header: &Header, - height: u32, - connected_to: BlockId, - ) -> Result { - let this = BlockId { - height, - hash: header.block_hash(), - }; - let prev = height.checked_sub(1).map(|prev_height| BlockId { - height: prev_height, - hash: header.prev_blockhash, - }); - let conn = match connected_to { - // `connected_to` can be ignored if same as `this` or `prev` (duplicate) - conn if conn == this || Some(conn) == prev => None, - // this occurs if: - // - `connected_to` height is the same as `prev`, but different hash - // - `connected_to` height is the same as `this`, but different hash - // - `connected_to` height is greater than `this` (this is not allowed) - conn if conn.height >= height.saturating_sub(1) => { - return Err(ApplyHeaderError::InconsistentBlocks) - } - conn => Some(conn), - }; - - let update = CheckPoint::from_block_ids([conn, prev, Some(this)].into_iter().flatten()) - .expect("block ids must be in order"); - - self.apply_update(update) - .map_err(ApplyHeaderError::CannotConnect) - } - - /// Update the chain with a given [`Header`] connecting it with the previous block. - /// - /// This is a convenience method to call [`apply_header_connected_to`] with the `connected_to` - /// parameter being `height-1:prev_blockhash`. If there is no previous block (i.e. genesis), we - /// use the current block as `connected_to`. - /// - /// [`apply_header_connected_to`]: LocalChain::apply_header_connected_to - pub fn apply_header( - &mut self, - header: &Header, - height: u32, - ) -> Result { - let connected_to = match height.checked_sub(1) { - Some(prev_height) => BlockId { - height: prev_height, - hash: header.prev_blockhash, - }, - None => BlockId { - height, - hash: header.block_hash(), - }, - }; - self.apply_header_connected_to(header, height, connected_to) - .map_err(|err| match err { - ApplyHeaderError::InconsistentBlocks => { - unreachable!("connected_to is derived from the block so is always consistent") - } - ApplyHeaderError::CannotConnect(err) => err, - }) - } - - /// Apply the given `changeset`. - pub fn apply_changeset(&mut self, changeset: &ChangeSet) -> Result<(), MissingGenesisError> { - let old_tip = self.tip.clone(); - let new_tip = apply_changeset_to_checkpoint(old_tip, changeset)?; - self.tip = new_tip; - debug_assert!(self._check_changeset_is_applied(changeset)); - Ok(()) - } - - /// Insert a [`BlockId`]. - /// - /// # Errors - /// - /// Replacing the block hash of an existing checkpoint will result in an error. - pub fn insert_block(&mut self, block_id: BlockId) -> Result { - if let Some(original_cp) = self.tip.get(block_id.height) { - let original_hash = original_cp.hash(); - if original_hash != block_id.hash { - return Err(AlterCheckPointError { - height: block_id.height, - original_hash, - update_hash: Some(block_id.hash), - }); - } - return Ok(ChangeSet::default()); - } - - let mut changeset = ChangeSet::default(); - changeset - .blocks - .insert(block_id.height, Some(block_id.hash)); - self.apply_changeset(&changeset) - .map_err(|_| AlterCheckPointError { - height: 0, - original_hash: self.genesis_hash(), - update_hash: changeset.blocks.get(&0).cloned().flatten(), - })?; - Ok(changeset) - } - - /// Removes blocks from (and inclusive of) the given `block_id`. - /// - /// This will remove blocks with a height equal or greater than `block_id`, but only if - /// `block_id` exists in the chain. - /// - /// # Errors - /// - /// This will fail with [`MissingGenesisError`] if the caller attempts to disconnect from the - /// genesis block. - pub fn disconnect_from(&mut self, block_id: BlockId) -> Result { - let mut remove_from = Option::::None; - let mut changeset = ChangeSet::default(); - for cp in self.tip().iter() { - let cp_id = cp.block_id(); - if cp_id.height < block_id.height { - break; - } - changeset.blocks.insert(cp_id.height, None); - if cp_id == block_id { - remove_from = Some(cp); - } - } - self.tip = match remove_from.map(|cp| cp.prev()) { - // The checkpoint below the earliest checkpoint to remove will be the new tip. - Some(Some(new_tip)) => new_tip, - // If there is no checkpoint below the earliest checkpoint to remove, it means the - // "earliest checkpoint to remove" is the genesis block. We disallow removing the - // genesis block. - Some(None) => return Err(MissingGenesisError), - // If there is nothing to remove, we return an empty changeset. - None => return Ok(ChangeSet::default()), - }; - Ok(changeset) - } - - /// Derives an initial [`ChangeSet`], meaning that it can be applied to an empty chain to - /// recover the current chain. - pub fn initial_changeset(&self) -> ChangeSet { - ChangeSet { - blocks: self - .tip - .iter() - .map(|cp| { - let block_id = cp.block_id(); - (block_id.height, Some(block_id.hash)) - }) - .collect(), - } - } - - /// Iterate over checkpoints in descending height order. - pub fn iter_checkpoints(&self) -> CheckPointIter { - self.tip.iter() - } - - fn _check_changeset_is_applied(&self, changeset: &ChangeSet) -> bool { - let mut curr_cp = self.tip.clone(); - for (height, exp_hash) in changeset.blocks.iter().rev() { - match curr_cp.get(*height) { - Some(query_cp) => { - if query_cp.height() != *height || Some(query_cp.hash()) != *exp_hash { - return false; - } - curr_cp = query_cp; - } - None => { - if exp_hash.is_some() { - return false; - } - } - } - } - true - } - - /// Get checkpoint at given `height` (if it exists). - /// - /// This is a shorthand for calling [`CheckPoint::get`] on the [`tip`]. - /// - /// [`tip`]: LocalChain::tip - pub fn get(&self, height: u32) -> Option { - self.tip.get(height) - } - - /// Iterate checkpoints over a height range. - /// - /// Note that we always iterate checkpoints in reverse height order (iteration starts at tip - /// height). - /// - /// This is a shorthand for calling [`CheckPoint::range`] on the [`tip`]. - /// - /// [`tip`]: LocalChain::tip - pub fn range(&self, range: R) -> impl Iterator - where - R: RangeBounds, - { - self.tip.range(range) - } -} - -/// The [`ChangeSet`] represents changes to [`LocalChain`]. -#[derive(Debug, Default, Clone, PartialEq)] -#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] -pub struct ChangeSet { - /// Changes to the [`LocalChain`] blocks. - /// - /// The key represents the block height, and the value either represents added a new [`CheckPoint`] - /// (if [`Some`]), or removing a [`CheckPoint`] (if [`None`]). - pub blocks: BTreeMap>, -} - -impl Merge for ChangeSet { - fn merge(&mut self, other: Self) { - Merge::merge(&mut self.blocks, other.blocks) - } - - fn is_empty(&self) -> bool { - self.blocks.is_empty() - } -} - -impl)>> From for ChangeSet { - fn from(blocks: B) -> Self { - Self { - blocks: blocks.into_iter().collect(), - } - } -} - -impl FromIterator<(u32, Option)> for ChangeSet { - fn from_iter)>>(iter: T) -> Self { - Self { - blocks: iter.into_iter().collect(), - } - } -} - -impl FromIterator<(u32, BlockHash)> for ChangeSet { - fn from_iter>(iter: T) -> Self { - Self { - blocks: iter - .into_iter() - .map(|(height, hash)| (height, Some(hash))) - .collect(), - } - } -} - -/// An error which occurs when a [`LocalChain`] is constructed without a genesis checkpoint. -#[derive(Clone, Debug, PartialEq)] -pub struct MissingGenesisError; - -impl core::fmt::Display for MissingGenesisError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!( - f, - "cannot construct `LocalChain` without a genesis checkpoint" - ) - } -} - -#[cfg(feature = "std")] -impl std::error::Error for MissingGenesisError {} - -/// Represents a failure when trying to insert/remove a checkpoint to/from [`LocalChain`]. -#[derive(Clone, Debug, PartialEq)] -pub struct AlterCheckPointError { - /// The checkpoint's height. - pub height: u32, - /// The original checkpoint's block hash which cannot be replaced/removed. - pub original_hash: BlockHash, - /// The attempted update to the `original_block` hash. - pub update_hash: Option, -} - -impl core::fmt::Display for AlterCheckPointError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self.update_hash { - Some(update_hash) => write!( - f, - "failed to insert block at height {}: original={} update={}", - self.height, self.original_hash, update_hash - ), - None => write!( - f, - "failed to remove block at height {}: original={}", - self.height, self.original_hash - ), - } - } -} - -#[cfg(feature = "std")] -impl std::error::Error for AlterCheckPointError {} - -/// Occurs when an update does not have a common checkpoint with the original chain. -#[derive(Clone, Debug, PartialEq)] -pub struct CannotConnectError { - /// The suggested checkpoint to include to connect the two chains. - pub try_include_height: u32, -} - -impl core::fmt::Display for CannotConnectError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!( - f, - "introduced chain cannot connect with the original chain, try include height {}", - self.try_include_height, - ) - } -} - -#[cfg(feature = "std")] -impl std::error::Error for CannotConnectError {} - -/// The error type for [`LocalChain::apply_header_connected_to`]. -#[derive(Debug, Clone, PartialEq)] -pub enum ApplyHeaderError { - /// Occurs when `connected_to` block conflicts with either the current block or previous block. - InconsistentBlocks, - /// Occurs when the update cannot connect with the original chain. - CannotConnect(CannotConnectError), -} - -impl core::fmt::Display for ApplyHeaderError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - ApplyHeaderError::InconsistentBlocks => write!( - f, - "the `connected_to` block conflicts with either the current or previous block" - ), - ApplyHeaderError::CannotConnect(err) => core::fmt::Display::fmt(err, f), - } - } -} - -#[cfg(feature = "std")] -impl std::error::Error for ApplyHeaderError {} - -/// Applies `update_tip` onto `original_tip`. -/// -/// On success, a tuple is returned `(changeset, can_replace)`. If `can_replace` is true, then the -/// `update_tip` can replace the `original_tip`. -fn merge_chains( - original_tip: CheckPoint, - update_tip: CheckPoint, -) -> Result<(CheckPoint, ChangeSet), CannotConnectError> { - let mut changeset = ChangeSet::default(); - let mut orig = original_tip.iter(); - let mut update = update_tip.iter(); - let mut curr_orig = None; - let mut curr_update = None; - let mut prev_orig: Option = None; - let mut prev_update: Option = None; - let mut point_of_agreement_found = false; - let mut prev_orig_was_invalidated = false; - let mut potentially_invalidated_heights = vec![]; - - // If we can, we want to return the update tip as the new tip because this allows checkpoints - // in multiple locations to keep the same `Arc` pointers when they are being updated from each - // other using this function. We can do this as long as long as the update contains every - // block's height of the original chain. - let mut is_update_height_superset_of_original = true; - - // To find the difference between the new chain and the original we iterate over both of them - // from the tip backwards in tandem. We always dealing with the highest one from either chain - // first and move to the next highest. The crucial logic is applied when they have blocks at the - // same height. - loop { - if curr_orig.is_none() { - curr_orig = orig.next(); - } - if curr_update.is_none() { - curr_update = update.next(); - } - - match (curr_orig.as_ref(), curr_update.as_ref()) { - // Update block that doesn't exist in the original chain - (o, Some(u)) if Some(u.height()) > o.map(|o| o.height()) => { - changeset.blocks.insert(u.height(), Some(u.hash())); - prev_update = curr_update.take(); - } - // Original block that isn't in the update - (Some(o), u) if Some(o.height()) > u.map(|u| u.height()) => { - // this block might be gone if an earlier block gets invalidated - potentially_invalidated_heights.push(o.height()); - prev_orig_was_invalidated = false; - prev_orig = curr_orig.take(); - - is_update_height_superset_of_original = false; - - // OPTIMIZATION: we have run out of update blocks so we don't need to continue - // iterating because there's no possibility of adding anything to changeset. - if u.is_none() { - break; - } - } - (Some(o), Some(u)) => { - if o.hash() == u.hash() { - // We have found our point of agreement 🎉 -- we require that the previous (i.e. - // higher because we are iterating backwards) block in the original chain was - // invalidated (if it exists). This ensures that there is an unambiguous point of - // connection to the original chain from the update chain (i.e. we know the - // precisely which original blocks are invalid). - if !prev_orig_was_invalidated && !point_of_agreement_found { - if let (Some(prev_orig), Some(_prev_update)) = (&prev_orig, &prev_update) { - return Err(CannotConnectError { - try_include_height: prev_orig.height(), - }); - } - } - point_of_agreement_found = true; - prev_orig_was_invalidated = false; - // OPTIMIZATION 2 -- if we have the same underlying pointer at this point, we - // can guarantee that no older blocks are introduced. - if o.eq_ptr(u) { - if is_update_height_superset_of_original { - return Ok((update_tip, changeset)); - } else { - let new_tip = apply_changeset_to_checkpoint(original_tip, &changeset) - .map_err(|_| CannotConnectError { - try_include_height: 0, - })?; - return Ok((new_tip, changeset)); - } - } - } else { - // We have an invalidation height so we set the height to the updated hash and - // also purge all the original chain block hashes above this block. - changeset.blocks.insert(u.height(), Some(u.hash())); - for invalidated_height in potentially_invalidated_heights.drain(..) { - changeset.blocks.insert(invalidated_height, None); - } - prev_orig_was_invalidated = true; - } - prev_update = curr_update.take(); - prev_orig = curr_orig.take(); - } - (None, None) => { - break; - } - _ => { - unreachable!("compiler cannot tell that everything has been covered") - } - } - } - - // When we don't have a point of agreement you can imagine it is implicitly the - // genesis block so we need to do the final connectivity check which in this case - // just means making sure the entire original chain was invalidated. - if !prev_orig_was_invalidated && !point_of_agreement_found { - if let Some(prev_orig) = prev_orig { - return Err(CannotConnectError { - try_include_height: prev_orig.height(), - }); - } - } - - let new_tip = apply_changeset_to_checkpoint(original_tip, &changeset).map_err(|_| { - CannotConnectError { - try_include_height: 0, - } - })?; - Ok((new_tip, changeset)) -} diff --git a/crates/chain/src/rusqlite_impl.rs b/crates/chain/src/rusqlite_impl.rs deleted file mode 100644 index 3bc105d0..00000000 --- a/crates/chain/src/rusqlite_impl.rs +++ /dev/null @@ -1,789 +0,0 @@ -//! Support for persisting `bdk_chain` structures to SQLite using [`rusqlite`]. - -use crate::*; -use core::str::FromStr; - -use alloc::{ - borrow::ToOwned, - boxed::Box, - string::{String, ToString}, - sync::Arc, - vec::Vec, -}; -use bitcoin::consensus::{Decodable, Encodable}; -use rusqlite; -use rusqlite::named_params; -use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; -use rusqlite::OptionalExtension; -use rusqlite::Transaction; - -/// Table name for schemas. -pub const SCHEMAS_TABLE_NAME: &str = "bdk_schemas"; - -/// Initialize the schema table. -fn init_schemas_table(db_tx: &Transaction) -> rusqlite::Result<()> { - let sql = format!("CREATE TABLE IF NOT EXISTS {}( name TEXT PRIMARY KEY NOT NULL, version INTEGER NOT NULL ) STRICT", SCHEMAS_TABLE_NAME); - db_tx.execute(&sql, ())?; - Ok(()) -} - -/// Get schema version of `schema_name`. -fn schema_version(db_tx: &Transaction, schema_name: &str) -> rusqlite::Result> { - let sql = format!( - "SELECT version FROM {} WHERE name=:name", - SCHEMAS_TABLE_NAME - ); - db_tx - .query_row(&sql, named_params! { ":name": schema_name }, |row| { - row.get::<_, u32>("version") - }) - .optional() -} - -/// Set the `schema_version` of `schema_name`. -fn set_schema_version( - db_tx: &Transaction, - schema_name: &str, - schema_version: u32, -) -> rusqlite::Result<()> { - let sql = format!( - "REPLACE INTO {}(name, version) VALUES(:name, :version)", - SCHEMAS_TABLE_NAME, - ); - db_tx.execute( - &sql, - named_params! { ":name": schema_name, ":version": schema_version }, - )?; - Ok(()) -} - -/// Runs logic that initializes/migrates the table schemas. -pub fn migrate_schema( - db_tx: &Transaction, - schema_name: &str, - versioned_scripts: &[&str], -) -> rusqlite::Result<()> { - init_schemas_table(db_tx)?; - let current_version = schema_version(db_tx, schema_name)?; - let exec_from = current_version.map_or(0_usize, |v| v as usize + 1); - let scripts_to_exec = versioned_scripts.iter().enumerate().skip(exec_from); - for (version, script) in scripts_to_exec { - set_schema_version(db_tx, schema_name, version as u32)?; - db_tx.execute_batch(script)?; - } - Ok(()) -} - -impl FromSql for Impl { - fn column_result(value: ValueRef<'_>) -> FromSqlResult { - bitcoin::Txid::from_str(value.as_str()?) - .map(Self) - .map_err(from_sql_error) - } -} - -impl ToSql for Impl { - fn to_sql(&self) -> rusqlite::Result> { - Ok(self.to_string().into()) - } -} - -impl FromSql for Impl { - fn column_result(value: ValueRef<'_>) -> FromSqlResult { - bitcoin::BlockHash::from_str(value.as_str()?) - .map(Self) - .map_err(from_sql_error) - } -} - -impl ToSql for Impl { - fn to_sql(&self) -> rusqlite::Result> { - Ok(self.to_string().into()) - } -} - -#[cfg(feature = "miniscript")] -impl FromSql for Impl { - fn column_result(value: ValueRef<'_>) -> FromSqlResult { - DescriptorId::from_str(value.as_str()?) - .map(Self) - .map_err(from_sql_error) - } -} - -#[cfg(feature = "miniscript")] -impl ToSql for Impl { - fn to_sql(&self) -> rusqlite::Result> { - Ok(self.to_string().into()) - } -} - -impl FromSql for Impl { - fn column_result(value: ValueRef<'_>) -> FromSqlResult { - bitcoin::Transaction::consensus_decode_from_finite_reader(&mut value.as_bytes()?) - .map(Self) - .map_err(from_sql_error) - } -} - -impl ToSql for Impl { - fn to_sql(&self) -> rusqlite::Result> { - let mut bytes = Vec::::new(); - self.consensus_encode(&mut bytes).map_err(to_sql_error)?; - Ok(bytes.into()) - } -} - -impl FromSql for Impl { - fn column_result(value: ValueRef<'_>) -> FromSqlResult { - Ok(bitcoin::Script::from_bytes(value.as_bytes()?) - .to_owned() - .into()) - } -} - -impl ToSql for Impl { - fn to_sql(&self) -> rusqlite::Result> { - Ok(self.as_bytes().into()) - } -} - -impl FromSql for Impl { - fn column_result(value: ValueRef<'_>) -> FromSqlResult { - Ok(bitcoin::Amount::from_sat(value.as_i64()?.try_into().map_err(from_sql_error)?).into()) - } -} - -impl ToSql for Impl { - fn to_sql(&self) -> rusqlite::Result> { - let amount: i64 = self.to_sat().try_into().map_err(to_sql_error)?; - Ok(amount.into()) - } -} - -#[cfg(feature = "miniscript")] -impl FromSql for Impl> { - fn column_result(value: ValueRef<'_>) -> FromSqlResult { - miniscript::Descriptor::from_str(value.as_str()?) - .map(Self) - .map_err(from_sql_error) - } -} - -#[cfg(feature = "miniscript")] -impl ToSql for Impl> { - fn to_sql(&self) -> rusqlite::Result> { - Ok(self.to_string().into()) - } -} - -impl FromSql for Impl { - fn column_result(value: ValueRef<'_>) -> FromSqlResult { - bitcoin::Network::from_str(value.as_str()?) - .map(Self) - .map_err(from_sql_error) - } -} - -impl ToSql for Impl { - fn to_sql(&self) -> rusqlite::Result> { - Ok(self.to_string().into()) - } -} - -fn from_sql_error(err: E) -> FromSqlError { - FromSqlError::Other(Box::new(err)) -} - -fn to_sql_error(err: E) -> rusqlite::Error { - rusqlite::Error::ToSqlConversionFailure(Box::new(err)) -} - -impl tx_graph::ChangeSet { - /// Schema name for [`tx_graph::ChangeSet`]. - pub const SCHEMA_NAME: &'static str = "bdk_txgraph"; - /// Name of table that stores full transactions and `last_seen` timestamps. - pub const TXS_TABLE_NAME: &'static str = "bdk_txs"; - /// Name of table that stores floating txouts. - pub const TXOUTS_TABLE_NAME: &'static str = "bdk_txouts"; - /// Name of table that stores [`Anchor`]s. - pub const ANCHORS_TABLE_NAME: &'static str = "bdk_anchors"; - - /// Get v0 of sqlite [tx_graph::ChangeSet] schema - pub fn schema_v0() -> String { - // full transactions - let create_txs_table = format!( - "CREATE TABLE {} ( \ - txid TEXT PRIMARY KEY NOT NULL, \ - raw_tx BLOB, \ - last_seen INTEGER \ - ) STRICT", - Self::TXS_TABLE_NAME, - ); - // floating txouts - let create_txouts_table = format!( - "CREATE TABLE {} ( \ - txid TEXT NOT NULL, \ - vout INTEGER NOT NULL, \ - value INTEGER NOT NULL, \ - script BLOB NOT NULL, \ - PRIMARY KEY (txid, vout) \ - ) STRICT", - Self::TXOUTS_TABLE_NAME, - ); - // anchors - let create_anchors_table = format!( - "CREATE TABLE {} ( \ - txid TEXT NOT NULL REFERENCES {} (txid), \ - block_height INTEGER NOT NULL, \ - block_hash TEXT NOT NULL, \ - anchor BLOB NOT NULL, \ - PRIMARY KEY (txid, block_height, block_hash) \ - ) STRICT", - Self::ANCHORS_TABLE_NAME, - Self::TXS_TABLE_NAME, - ); - - format!("{create_txs_table}; {create_txouts_table}; {create_anchors_table}") - } - - /// Get v1 of sqlite [tx_graph::ChangeSet] schema - pub fn schema_v1() -> String { - let add_confirmation_time_column = format!( - "ALTER TABLE {} ADD COLUMN confirmation_time INTEGER DEFAULT -1 NOT NULL", - Self::ANCHORS_TABLE_NAME, - ); - let extract_confirmation_time_from_anchor_column = format!( - "UPDATE {} SET confirmation_time = json_extract(anchor, '$.confirmation_time')", - Self::ANCHORS_TABLE_NAME, - ); - let drop_anchor_column = format!( - "ALTER TABLE {} DROP COLUMN anchor", - Self::ANCHORS_TABLE_NAME, - ); - format!("{add_confirmation_time_column}; {extract_confirmation_time_from_anchor_column}; {drop_anchor_column}") - } - - /// Get v2 of sqlite [tx_graph::ChangeSet] schema - pub fn schema_v2() -> String { - format!( - "ALTER TABLE {} ADD COLUMN last_evicted INTEGER", - Self::TXS_TABLE_NAME, - ) - } - - /// Initialize sqlite tables. - pub fn init_sqlite_tables(db_tx: &rusqlite::Transaction) -> rusqlite::Result<()> { - migrate_schema( - db_tx, - Self::SCHEMA_NAME, - &[&Self::schema_v0(), &Self::schema_v1(), &Self::schema_v2()], - ) - } - - /// Construct a [`TxGraph`] from an sqlite database. - /// - /// Remember to call [`Self::init_sqlite_tables`] beforehand. - pub fn from_sqlite(db_tx: &rusqlite::Transaction) -> rusqlite::Result { - let mut changeset = Self::default(); - - let mut statement = db_tx.prepare(&format!( - "SELECT txid, raw_tx, last_seen, last_evicted FROM {}", - Self::TXS_TABLE_NAME, - ))?; - let row_iter = statement.query_map([], |row| { - Ok(( - row.get::<_, Impl>("txid")?, - row.get::<_, Option>>("raw_tx")?, - row.get::<_, Option>("last_seen")?, - row.get::<_, Option>("last_evicted")?, - )) - })?; - for row in row_iter { - let (Impl(txid), tx, last_seen, last_evicted) = row?; - if let Some(Impl(tx)) = tx { - changeset.txs.insert(Arc::new(tx)); - } - if let Some(last_seen) = last_seen { - changeset.last_seen.insert(txid, last_seen); - } - if let Some(last_evicted) = last_evicted { - changeset.last_evicted.insert(txid, last_evicted); - } - } - - let mut statement = db_tx.prepare(&format!( - "SELECT txid, vout, value, script FROM {}", - Self::TXOUTS_TABLE_NAME, - ))?; - let row_iter = statement.query_map([], |row| { - Ok(( - row.get::<_, Impl>("txid")?, - row.get::<_, u32>("vout")?, - row.get::<_, Impl>("value")?, - row.get::<_, Impl>("script")?, - )) - })?; - for row in row_iter { - let (Impl(txid), vout, Impl(value), Impl(script_pubkey)) = row?; - changeset.txouts.insert( - bitcoin::OutPoint { txid, vout }, - bitcoin::TxOut { - value, - script_pubkey, - }, - ); - } - - let mut statement = db_tx.prepare(&format!( - "SELECT block_hash, block_height, confirmation_time, txid FROM {}", - Self::ANCHORS_TABLE_NAME, - ))?; - let row_iter = statement.query_map([], |row| { - Ok(( - row.get::<_, Impl>("block_hash")?, - row.get::<_, u32>("block_height")?, - row.get::<_, u64>("confirmation_time")?, - row.get::<_, Impl>("txid")?, - )) - })?; - for row in row_iter { - let (hash, height, confirmation_time, Impl(txid)) = row?; - changeset.anchors.insert(( - ConfirmationBlockTime { - block_id: BlockId::from((&height, &hash.0)), - confirmation_time, - }, - txid, - )); - } - - Ok(changeset) - } - - /// Persist `changeset` to the sqlite database. - /// - /// Remember to call [`Self::init_sqlite_tables`] beforehand. - pub fn persist_to_sqlite(&self, db_tx: &rusqlite::Transaction) -> rusqlite::Result<()> { - let mut statement = db_tx.prepare_cached(&format!( - "INSERT INTO {}(txid, raw_tx) VALUES(:txid, :raw_tx) ON CONFLICT(txid) DO UPDATE SET raw_tx=:raw_tx", - Self::TXS_TABLE_NAME, - ))?; - for tx in &self.txs { - statement.execute(named_params! { - ":txid": Impl(tx.compute_txid()), - ":raw_tx": Impl(tx.as_ref().clone()), - })?; - } - - let mut statement = db_tx - .prepare_cached(&format!( - "INSERT INTO {}(txid, last_seen) VALUES(:txid, :last_seen) ON CONFLICT(txid) DO UPDATE SET last_seen=:last_seen", - Self::TXS_TABLE_NAME, - ))?; - for (&txid, &last_seen) in &self.last_seen { - let checked_time = last_seen.to_sql()?; - statement.execute(named_params! { - ":txid": Impl(txid), - ":last_seen": Some(checked_time), - })?; - } - - let mut statement = db_tx - .prepare_cached(&format!( - "INSERT INTO {}(txid, last_evicted) VALUES(:txid, :last_evicted) ON CONFLICT(txid) DO UPDATE SET last_evicted=:last_evicted", - Self::TXS_TABLE_NAME, - ))?; - for (&txid, &last_evicted) in &self.last_evicted { - let checked_time = last_evicted.to_sql()?; - statement.execute(named_params! { - ":txid": Impl(txid), - ":last_evicted": Some(checked_time), - })?; - } - - let mut statement = db_tx.prepare_cached(&format!( - "REPLACE INTO {}(txid, vout, value, script) VALUES(:txid, :vout, :value, :script)", - Self::TXOUTS_TABLE_NAME, - ))?; - for (op, txo) in &self.txouts { - statement.execute(named_params! { - ":txid": Impl(op.txid), - ":vout": op.vout, - ":value": Impl(txo.value), - ":script": Impl(txo.script_pubkey.clone()), - })?; - } - - let mut statement = db_tx.prepare_cached(&format!( - "REPLACE INTO {}(txid, block_height, block_hash, confirmation_time) VALUES(:txid, :block_height, :block_hash, :confirmation_time)", - Self::ANCHORS_TABLE_NAME, - ))?; - let mut statement_txid = db_tx.prepare_cached(&format!( - "INSERT OR IGNORE INTO {}(txid) VALUES(:txid)", - Self::TXS_TABLE_NAME, - ))?; - for (anchor, txid) in &self.anchors { - let anchor_block = anchor.anchor_block(); - statement_txid.execute(named_params! { - ":txid": Impl(*txid) - })?; - statement.execute(named_params! { - ":txid": Impl(*txid), - ":block_height": anchor_block.height, - ":block_hash": Impl(anchor_block.hash), - ":confirmation_time": anchor.confirmation_time, - })?; - } - - Ok(()) - } -} - -impl local_chain::ChangeSet { - /// Schema name for the changeset. - pub const SCHEMA_NAME: &'static str = "bdk_localchain"; - /// Name of sqlite table that stores blocks of [`LocalChain`](local_chain::LocalChain). - pub const BLOCKS_TABLE_NAME: &'static str = "bdk_blocks"; - - /// Get v0 of sqlite [local_chain::ChangeSet] schema - pub fn schema_v0() -> String { - // blocks - format!( - "CREATE TABLE {} ( \ - block_height INTEGER PRIMARY KEY NOT NULL, \ - block_hash TEXT NOT NULL \ - ) STRICT", - Self::BLOCKS_TABLE_NAME, - ) - } - - /// Initialize sqlite tables for persisting [`local_chain::LocalChain`]. - pub fn init_sqlite_tables(db_tx: &rusqlite::Transaction) -> rusqlite::Result<()> { - migrate_schema(db_tx, Self::SCHEMA_NAME, &[&Self::schema_v0()]) - } - - /// Construct a [`LocalChain`](local_chain::LocalChain) from sqlite database. - /// - /// Remember to call [`Self::init_sqlite_tables`] beforehand. - pub fn from_sqlite(db_tx: &rusqlite::Transaction) -> rusqlite::Result { - let mut changeset = Self::default(); - - let mut statement = db_tx.prepare(&format!( - "SELECT block_height, block_hash FROM {}", - Self::BLOCKS_TABLE_NAME, - ))?; - let row_iter = statement.query_map([], |row| { - Ok(( - row.get::<_, u32>("block_height")?, - row.get::<_, Impl>("block_hash")?, - )) - })?; - for row in row_iter { - let (height, Impl(hash)) = row?; - changeset.blocks.insert(height, Some(hash)); - } - - Ok(changeset) - } - - /// Persist `changeset` to the sqlite database. - /// - /// Remember to call [`Self::init_sqlite_tables`] beforehand. - pub fn persist_to_sqlite(&self, db_tx: &rusqlite::Transaction) -> rusqlite::Result<()> { - let mut replace_statement = db_tx.prepare_cached(&format!( - "REPLACE INTO {}(block_height, block_hash) VALUES(:block_height, :block_hash)", - Self::BLOCKS_TABLE_NAME, - ))?; - let mut delete_statement = db_tx.prepare_cached(&format!( - "DELETE FROM {} WHERE block_height=:block_height", - Self::BLOCKS_TABLE_NAME, - ))?; - for (&height, &hash) in &self.blocks { - match hash { - Some(hash) => replace_statement.execute(named_params! { - ":block_height": height, - ":block_hash": Impl(hash), - })?, - None => delete_statement.execute(named_params! { - ":block_height": height, - })?, - }; - } - - Ok(()) - } -} - -#[cfg(feature = "miniscript")] -impl keychain_txout::ChangeSet { - /// Schema name for the changeset. - pub const SCHEMA_NAME: &'static str = "bdk_keychaintxout"; - /// Name for table that stores last revealed indices per descriptor id. - pub const LAST_REVEALED_TABLE_NAME: &'static str = "bdk_descriptor_last_revealed"; - - /// Get v0 of sqlite [keychain_txout::ChangeSet] schema - pub fn schema_v0() -> String { - format!( - "CREATE TABLE {} ( \ - descriptor_id TEXT PRIMARY KEY NOT NULL, \ - last_revealed INTEGER NOT NULL \ - ) STRICT", - Self::LAST_REVEALED_TABLE_NAME, - ) - } - - /// Initialize sqlite tables for persisting - /// [`KeychainTxOutIndex`](keychain_txout::KeychainTxOutIndex). - pub fn init_sqlite_tables(db_tx: &rusqlite::Transaction) -> rusqlite::Result<()> { - migrate_schema(db_tx, Self::SCHEMA_NAME, &[&Self::schema_v0()]) - } - - /// Construct [`KeychainTxOutIndex`](keychain_txout::KeychainTxOutIndex) from sqlite database - /// and given parameters. - /// - /// Remember to call [`Self::init_sqlite_tables`] beforehand. - pub fn from_sqlite(db_tx: &rusqlite::Transaction) -> rusqlite::Result { - let mut changeset = Self::default(); - - let mut statement = db_tx.prepare(&format!( - "SELECT descriptor_id, last_revealed FROM {}", - Self::LAST_REVEALED_TABLE_NAME, - ))?; - let row_iter = statement.query_map([], |row| { - Ok(( - row.get::<_, Impl>("descriptor_id")?, - row.get::<_, u32>("last_revealed")?, - )) - })?; - for row in row_iter { - let (Impl(descriptor_id), last_revealed) = row?; - changeset.last_revealed.insert(descriptor_id, last_revealed); - } - - Ok(changeset) - } - - /// Persist `changeset` to the sqlite database. - /// - /// Remember to call [`Self::init_sqlite_tables`] beforehand. - pub fn persist_to_sqlite(&self, db_tx: &rusqlite::Transaction) -> rusqlite::Result<()> { - let mut statement = db_tx.prepare_cached(&format!( - "REPLACE INTO {}(descriptor_id, last_revealed) VALUES(:descriptor_id, :last_revealed)", - Self::LAST_REVEALED_TABLE_NAME, - ))?; - for (&descriptor_id, &last_revealed) in &self.last_revealed { - statement.execute(named_params! { - ":descriptor_id": Impl(descriptor_id), - ":last_revealed": last_revealed, - })?; - } - - Ok(()) - } -} - -#[cfg(test)] -mod test { - use super::*; - - use bdk_testenv::{anyhow, hash}; - use bitcoin::{absolute, transaction, TxIn, TxOut}; - - #[test] - fn can_persist_anchors_and_txs_independently() -> anyhow::Result<()> { - type ChangeSet = tx_graph::ChangeSet; - let mut conn = rusqlite::Connection::open_in_memory()?; - - // init tables - { - let db_tx = conn.transaction()?; - ChangeSet::init_sqlite_tables(&db_tx)?; - db_tx.commit()?; - } - - let tx = bitcoin::Transaction { - version: transaction::Version::TWO, - lock_time: absolute::LockTime::ZERO, - input: vec![TxIn::default()], - output: vec![TxOut::NULL], - }; - let tx = Arc::new(tx); - let txid = tx.compute_txid(); - let anchor = ConfirmationBlockTime { - block_id: BlockId { - height: 21, - hash: hash!("anchor"), - }, - confirmation_time: 1342, - }; - - // First persist the anchor - { - let changeset = ChangeSet { - anchors: [(anchor, txid)].into(), - ..Default::default() - }; - let db_tx = conn.transaction()?; - changeset.persist_to_sqlite(&db_tx)?; - db_tx.commit()?; - } - - // Now persist the tx - { - let changeset = ChangeSet { - txs: [tx.clone()].into(), - ..Default::default() - }; - let db_tx = conn.transaction()?; - changeset.persist_to_sqlite(&db_tx)?; - db_tx.commit()?; - } - - // Loading changeset from sqlite should succeed - { - let db_tx = conn.transaction()?; - let changeset = ChangeSet::from_sqlite(&db_tx)?; - db_tx.commit()?; - assert!(changeset.txs.contains(&tx)); - assert!(changeset.anchors.contains(&(anchor, txid))); - } - - Ok(()) - } - - #[test] - fn v0_to_v2_schema_migration_is_backward_compatible() -> anyhow::Result<()> { - type ChangeSet = tx_graph::ChangeSet; - let mut conn = rusqlite::Connection::open_in_memory()?; - - // Create initial database with v0 sqlite schema - { - let db_tx = conn.transaction()?; - migrate_schema(&db_tx, ChangeSet::SCHEMA_NAME, &[&ChangeSet::schema_v0()])?; - db_tx.commit()?; - } - - let tx = bitcoin::Transaction { - version: transaction::Version::TWO, - lock_time: absolute::LockTime::ZERO, - input: vec![TxIn::default()], - output: vec![TxOut::NULL], - }; - let tx = Arc::new(tx); - let txid = tx.compute_txid(); - let anchor = ConfirmationBlockTime { - block_id: BlockId { - height: 21, - hash: hash!("anchor"), - }, - confirmation_time: 1342, - }; - - // Persist anchor with v0 sqlite schema - { - let changeset = ChangeSet { - anchors: [(anchor, txid)].into(), - ..Default::default() - }; - let mut statement = conn.prepare_cached(&format!( - "REPLACE INTO {} (txid, block_height, block_hash, anchor) - VALUES( - :txid, - :block_height, - :block_hash, - jsonb('{{ - \"block_id\": {{\"height\": {},\"hash\":\"{}\"}}, - \"confirmation_time\": {} - }}') - )", - ChangeSet::ANCHORS_TABLE_NAME, - anchor.block_id.height, - anchor.block_id.hash, - anchor.confirmation_time, - ))?; - let mut statement_txid = conn.prepare_cached(&format!( - "INSERT OR IGNORE INTO {}(txid) VALUES(:txid)", - ChangeSet::TXS_TABLE_NAME, - ))?; - for (anchor, txid) in &changeset.anchors { - let anchor_block = anchor.anchor_block(); - statement_txid.execute(named_params! { - ":txid": Impl(*txid) - })?; - match statement.execute(named_params! { - ":txid": Impl(*txid), - ":block_height": anchor_block.height, - ":block_hash": Impl(anchor_block.hash), - }) { - Ok(updated) => assert_eq!(updated, 1), - Err(err) => panic!("update failed: {}", err), - } - } - } - - // Apply v1 & v2 sqlite schema to tables with data - { - let db_tx = conn.transaction()?; - migrate_schema( - &db_tx, - ChangeSet::SCHEMA_NAME, - &[ - &ChangeSet::schema_v0(), - &ChangeSet::schema_v1(), - &ChangeSet::schema_v2(), - ], - )?; - db_tx.commit()?; - } - - // Loading changeset from sqlite should succeed - { - let db_tx = conn.transaction()?; - let changeset = ChangeSet::from_sqlite(&db_tx)?; - db_tx.commit()?; - assert!(changeset.anchors.contains(&(anchor, txid))); - } - - Ok(()) - } - - #[test] - fn can_persist_last_evicted() -> anyhow::Result<()> { - use bitcoin::hashes::Hash; - - type ChangeSet = tx_graph::ChangeSet; - let mut conn = rusqlite::Connection::open_in_memory()?; - - // Init tables - { - let db_tx = conn.transaction()?; - ChangeSet::init_sqlite_tables(&db_tx)?; - db_tx.commit()?; - } - - let txid = bitcoin::Txid::all_zeros(); - let last_evicted = 100; - - // Persist `last_evicted` - { - let changeset = ChangeSet { - last_evicted: [(txid, last_evicted)].into(), - ..Default::default() - }; - let db_tx = conn.transaction()?; - changeset.persist_to_sqlite(&db_tx)?; - db_tx.commit()?; - } - - // Load from sqlite should succeed - { - let db_tx = conn.transaction()?; - let changeset = ChangeSet::from_sqlite(&db_tx)?; - db_tx.commit()?; - assert_eq!(changeset.last_evicted.get(&txid), Some(&last_evicted)); - } - - Ok(()) - } -} diff --git a/crates/chain/src/spk_iter.rs b/crates/chain/src/spk_iter.rs deleted file mode 100644 index 7228b719..00000000 --- a/crates/chain/src/spk_iter.rs +++ /dev/null @@ -1,272 +0,0 @@ -use crate::{ - bitcoin::{secp256k1::Secp256k1, ScriptBuf}, - miniscript::{Descriptor, DescriptorPublicKey}, - Indexed, -}; -use core::{borrow::Borrow, ops::Bound, ops::RangeBounds}; - -/// Maximum [BIP32](https://bips.xyz/32) derivation index. -pub const BIP32_MAX_INDEX: u32 = (1 << 31) - 1; - -/// An iterator for derived script pubkeys. -/// -/// [`SpkIterator`] is an implementation of the [`Iterator`] trait which possesses its own `next()` -/// and `nth()` functions, both of which circumvent the unnecessary intermediate derivations required -/// when using their default implementations. -/// -/// ## Examples -/// -/// ``` -/// use bdk_chain::SpkIterator; -/// # use miniscript::{Descriptor, DescriptorPublicKey}; -/// # use bitcoin::{secp256k1::Secp256k1}; -/// # use std::str::FromStr; -/// # let secp = bitcoin::secp256k1::Secp256k1::signing_only(); -/// # let (descriptor, _) = Descriptor::::parse_descriptor(&secp, "wpkh([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/0)").unwrap(); -/// # let external_spk_0 = descriptor.at_derivation_index(0).unwrap().script_pubkey(); -/// # let external_spk_3 = descriptor.at_derivation_index(3).unwrap().script_pubkey(); -/// # let external_spk_4 = descriptor.at_derivation_index(4).unwrap().script_pubkey(); -/// -/// // Creates a new script pubkey iterator starting at 0 from a descriptor. -/// let mut spk_iter = SpkIterator::new(&descriptor); -/// assert_eq!(spk_iter.next(), Some((0, external_spk_0))); -/// assert_eq!(spk_iter.next(), None); -/// ``` -#[derive(Clone)] -pub struct SpkIterator { - next_index: u32, - end: u32, - descriptor: D, - secp: Secp256k1, -} - -impl SpkIterator -where - D: Borrow>, -{ - /// Create a new script pubkey iterator from `descriptor`. - /// - /// This iterates from derivation index 0 and stops at index 0x7FFFFFFF (as specified in - /// BIP-32). Non-wildcard descriptors will only return one script pubkey at derivation index 0. - /// - /// Use [`new_with_range`](SpkIterator::new_with_range) to create an iterator with a specified - /// derivation index range. - pub fn new(descriptor: D) -> Self { - SpkIterator::new_with_range(descriptor, 0..=BIP32_MAX_INDEX) - } - - /// Create a new script pubkey iterator from `descriptor` and a given `range`. - /// - /// Non-wildcard descriptors will only emit a single script pubkey (at derivation index 0). - /// Wildcard descriptors have an end-bound of 0x7FFFFFFF (inclusive). - /// - /// Refer to [`new`](SpkIterator::new) for more. - pub fn new_with_range(descriptor: D, range: R) -> Self - where - R: RangeBounds, - { - let start = match range.start_bound() { - Bound::Included(start) => *start, - Bound::Excluded(start) => *start + 1, - Bound::Unbounded => u32::MIN, - }; - - let mut end = match range.end_bound() { - Bound::Included(end) => *end + 1, - Bound::Excluded(end) => *end, - Bound::Unbounded => u32::MAX, - }; - - // Because `end` is exclusive, we want the maximum value to be BIP32_MAX_INDEX + 1. - end = end.min(BIP32_MAX_INDEX + 1); - - Self { - next_index: start, - end, - descriptor, - secp: Secp256k1::verification_only(), - } - } - - /// Get a reference to the internal descriptor. - pub fn descriptor(&self) -> &D { - &self.descriptor - } -} - -impl Iterator for SpkIterator -where - D: Borrow>, -{ - type Item = Indexed; - - fn next(&mut self) -> Option { - // For non-wildcard descriptors, we expect the first element to be Some((0, spk)), then None after. - // For wildcard descriptors, we expect it to keep iterating until exhausted. - if self.next_index >= self.end { - return None; - } - - // If the descriptor is non-wildcard, only index 0 will return an spk. - if !self.descriptor.borrow().has_wildcard() && self.next_index != 0 { - return None; - } - - let script = self - .descriptor - .borrow() - .derived_descriptor(&self.secp, self.next_index) - .expect("the descriptor cannot need hardened derivation") - .script_pubkey(); - let output = (self.next_index, script); - - self.next_index += 1; - - Some(output) - } - - fn nth(&mut self, n: usize) -> Option { - self.next_index = self - .next_index - .saturating_add(u32::try_from(n).unwrap_or(u32::MAX)); - self.next() - } -} - -#[cfg(test)] -mod test { - use crate::{ - bitcoin::secp256k1::Secp256k1, - indexer::keychain_txout::KeychainTxOutIndex, - miniscript::{Descriptor, DescriptorPublicKey}, - spk_iter::{SpkIterator, BIP32_MAX_INDEX}, - }; - - #[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd)] - enum TestKeychain { - External, - Internal, - } - - fn init_txout_index() -> ( - KeychainTxOutIndex, - Descriptor, - Descriptor, - ) { - let mut txout_index = KeychainTxOutIndex::::new(0); - - let secp = Secp256k1::signing_only(); - let (external_descriptor,_) = Descriptor::::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap(); - let (internal_descriptor,_) = Descriptor::::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/*)").unwrap(); - - let _ = txout_index - .insert_descriptor(TestKeychain::External, external_descriptor.clone()) - .unwrap(); - let _ = txout_index - .insert_descriptor(TestKeychain::Internal, internal_descriptor.clone()) - .unwrap(); - - (txout_index, external_descriptor, internal_descriptor) - } - - #[test] - #[allow(clippy::iter_nth_zero)] - #[rustfmt::skip] - fn test_spkiterator_wildcard() { - let (_, external_desc, _) = init_txout_index(); - let external_spk_0 = external_desc.at_derivation_index(0).unwrap().script_pubkey(); - let external_spk_16 = external_desc.at_derivation_index(16).unwrap().script_pubkey(); - let external_spk_20 = external_desc.at_derivation_index(20).unwrap().script_pubkey(); - let external_spk_21 = external_desc.at_derivation_index(21).unwrap().script_pubkey(); - let external_spk_max = external_desc.at_derivation_index(BIP32_MAX_INDEX).unwrap().script_pubkey(); - - let mut external_spk = SpkIterator::new(&external_desc); - let max_index = BIP32_MAX_INDEX - 22; - - assert_eq!(external_spk.next(), Some((0, external_spk_0))); - assert_eq!(external_spk.nth(15), Some((16, external_spk_16))); - assert_eq!(external_spk.nth(3), Some((20, external_spk_20.clone()))); - assert_eq!(external_spk.next(), Some((21, external_spk_21))); - assert_eq!( - external_spk.nth(max_index as usize), - Some((BIP32_MAX_INDEX, external_spk_max)) - ); - assert_eq!(external_spk.nth(0), None); - - let mut external_spk = SpkIterator::new_with_range(&external_desc, 0..21); - assert_eq!(external_spk.nth(20), Some((20, external_spk_20))); - assert_eq!(external_spk.next(), None); - - let mut external_spk = SpkIterator::new_with_range(&external_desc, 0..21); - assert_eq!(external_spk.nth(21), None); - } - - #[test] - #[allow(clippy::iter_nth_zero)] - fn test_spkiterator_non_wildcard() { - let secp = bitcoin::secp256k1::Secp256k1::signing_only(); - let (no_wildcard_descriptor, _) = Descriptor::::parse_descriptor(&secp, "wpkh([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/0)").unwrap(); - let external_spk_0 = no_wildcard_descriptor - .at_derivation_index(0) - .unwrap() - .script_pubkey(); - - let mut external_spk = SpkIterator::new(&no_wildcard_descriptor); - - assert_eq!(external_spk.next(), Some((0, external_spk_0.clone()))); - assert_eq!(external_spk.next(), None); - - let mut external_spk = SpkIterator::new(&no_wildcard_descriptor); - - assert_eq!(external_spk.nth(0), Some((0, external_spk_0.clone()))); - assert_eq!(external_spk.nth(0), None); - - let mut external_spk = SpkIterator::new_with_range(&no_wildcard_descriptor, 0..0); - - assert_eq!(external_spk.next(), None); - - let mut external_spk = SpkIterator::new_with_range(&no_wildcard_descriptor, 0..1); - - assert_eq!(external_spk.nth(0), Some((0, external_spk_0.clone()))); - assert_eq!(external_spk.next(), None); - - // We test that using new_with_range with range_len > 1 gives back an iterator with - // range_len = 1 - let mut external_spk = SpkIterator::new_with_range(&no_wildcard_descriptor, 0..10); - - assert_eq!(external_spk.nth(0), Some((0, external_spk_0))); - assert_eq!(external_spk.nth(0), None); - - // non index-0 should NOT return an spk - assert_eq!( - SpkIterator::new_with_range(&no_wildcard_descriptor, 1..1).next(), - None - ); - assert_eq!( - SpkIterator::new_with_range(&no_wildcard_descriptor, 1..=1).next(), - None - ); - assert_eq!( - SpkIterator::new_with_range(&no_wildcard_descriptor, 1..2).next(), - None - ); - assert_eq!( - SpkIterator::new_with_range(&no_wildcard_descriptor, 1..=2).next(), - None - ); - assert_eq!( - SpkIterator::new_with_range(&no_wildcard_descriptor, 10..11).next(), - None - ); - assert_eq!( - SpkIterator::new_with_range(&no_wildcard_descriptor, 10..=10).next(), - None - ); - } -} - -#[test] -fn spk_iterator_is_send_and_static() { - fn is_send_and_static() {} - is_send_and_static::>>() -} diff --git a/crates/chain/src/tx_data_traits.rs b/crates/chain/src/tx_data_traits.rs deleted file mode 100644 index 74d1021f..00000000 --- a/crates/chain/src/tx_data_traits.rs +++ /dev/null @@ -1,128 +0,0 @@ -use crate::{BlockId, ConfirmationBlockTime}; - -/// Trait that "anchors" blockchain data to a specific block of height and hash. -/// -/// If transaction A is anchored in block B, and block B is in the best chain, we can -/// assume that transaction A is also confirmed in the best chain. This does not necessarily mean -/// that transaction A is confirmed in block B. It could also mean transaction A is confirmed in a -/// parent block of B. -/// -/// Every [`Anchor`] implementation must contain a [`BlockId`] parameter, and must implement -/// [`Ord`]. When implementing [`Ord`], the anchors' [`BlockId`]s should take precedence -/// over other elements inside the [`Anchor`]s for comparison purposes, i.e., you should first -/// compare the anchors' [`BlockId`]s and then care about the rest. -/// -/// The example shows different types of anchors: -/// ``` -/// # use bdk_chain::local_chain::LocalChain; -/// # use bdk_chain::tx_graph::TxGraph; -/// # use bdk_chain::BlockId; -/// # use bdk_chain::ConfirmationBlockTime; -/// # use bdk_chain::example_utils::*; -/// # use bitcoin::hashes::Hash; -/// // Initialize the local chain with two blocks. -/// let chain = LocalChain::from_blocks( -/// [ -/// (1, Hash::hash("first".as_bytes())), -/// (2, Hash::hash("second".as_bytes())), -/// ] -/// .into_iter() -/// .collect(), -/// ); -/// -/// // Transaction to be inserted into `TxGraph`s with different anchor types. -/// let tx = tx_from_hex(RAW_TX_1); -/// -/// // Insert `tx` into a `TxGraph` that uses `BlockId` as the anchor type. -/// // When a transaction is anchored with `BlockId`, the anchor block and the confirmation block of -/// // the transaction is the same block. -/// let mut graph_a = TxGraph::::default(); -/// let _ = graph_a.insert_tx(tx.clone()); -/// graph_a.insert_anchor( -/// tx.compute_txid(), -/// BlockId { -/// height: 1, -/// hash: Hash::hash("first".as_bytes()), -/// }, -/// ); -/// -/// // Insert `tx` into a `TxGraph` that uses `ConfirmationBlockTime` as the anchor type. -/// // This anchor records the anchor block and the confirmation time of the transaction. When a -/// // transaction is anchored with `ConfirmationBlockTime`, the anchor block and confirmation block -/// // of the transaction is the same block. -/// let mut graph_c = TxGraph::::default(); -/// let _ = graph_c.insert_tx(tx.clone()); -/// graph_c.insert_anchor( -/// tx.compute_txid(), -/// ConfirmationBlockTime { -/// block_id: BlockId { -/// height: 2, -/// hash: Hash::hash("third".as_bytes()), -/// }, -/// confirmation_time: 123, -/// }, -/// ); -/// ``` -pub trait Anchor: core::fmt::Debug + Clone + Eq + PartialOrd + Ord + core::hash::Hash { - /// Returns the [`BlockId`] that the associated blockchain data is "anchored" in. - fn anchor_block(&self) -> BlockId; - - /// Get the upper bound of the chain data's confirmation height. - /// - /// The default definition gives a pessimistic answer. This can be overridden by the `Anchor` - /// implementation for a more accurate value. - fn confirmation_height_upper_bound(&self) -> u32 { - self.anchor_block().height - } -} - -impl Anchor for &A { - fn anchor_block(&self) -> BlockId { - ::anchor_block(self) - } -} - -impl Anchor for BlockId { - fn anchor_block(&self) -> Self { - *self - } -} - -impl Anchor for ConfirmationBlockTime { - fn anchor_block(&self) -> BlockId { - self.block_id - } - - fn confirmation_height_upper_bound(&self) -> u32 { - self.block_id.height - } -} - -/// Set of parameters sufficient to construct an [`Anchor`]. -/// -/// Typically used as an additional constraint on anchor: -/// `for<'b> A: Anchor + From>`. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct TxPosInBlock<'b> { - /// Block in which the transaction appeared. - pub block: &'b bitcoin::Block, - /// Block's [`BlockId`]. - pub block_id: BlockId, - /// Position in the block on which the transaction appeared. - pub tx_pos: usize, -} - -impl From> for BlockId { - fn from(pos: TxPosInBlock) -> Self { - pos.block_id - } -} - -impl From> for ConfirmationBlockTime { - fn from(pos: TxPosInBlock) -> Self { - Self { - block_id: pos.block_id, - confirmation_time: pos.block.header.time as _, - } - } -} diff --git a/crates/chain/src/tx_graph.rs b/crates/chain/src/tx_graph.rs deleted file mode 100644 index 4c299035..00000000 --- a/crates/chain/src/tx_graph.rs +++ /dev/null @@ -1,1619 +0,0 @@ -//! Module for structures that store and traverse transactions. -//! -//! [`TxGraph`] contains transactions and indexes them so you can easily traverse the graph of -//! those transactions. `TxGraph` is *monotone* in that you can always insert a transaction -- it -//! does not care whether that transaction is in the current best chain or whether it conflicts with -//! any of the existing transactions or what order you insert the transactions. This means that you -//! can always combine two [`TxGraph`]s together, without resulting in inconsistencies. Furthermore, -//! there is currently no way to delete a transaction. -//! -//! Transactions can be either whole or partial (i.e., transactions for which we only know some -//! outputs, which we usually call "floating outputs"; these are usually inserted using the -//! [`insert_txout`] method.). -//! -//! The graph contains transactions in the form of [`TxNode`]s. Each node contains the txid, the -//! transaction (whole or partial), the blocks that it is anchored to (see the [`Anchor`] -//! documentation for more details), and the timestamp of the last time we saw the transaction as -//! unconfirmed. -//! -//! # Canonicalization -//! -//! Conflicting transactions are allowed to coexist within a [`TxGraph`]. A process called -//! canonicalization is required to get a conflict-free view of transactions. -//! -//! * [`list_canonical_txs`](TxGraph::list_canonical_txs) lists canonical transactions. -//! * [`filter_chain_txouts`](TxGraph::filter_chain_txouts) filters out canonical outputs from a -//! list of outpoints. -//! * [`filter_chain_unspents`](TxGraph::filter_chain_unspents) filters out canonical unspent -//! outputs from a list of outpoints. -//! * [`balance`](TxGraph::balance) gets the total sum of unspent outputs filtered from a list of -//! outpoints. -//! * [`canonical_iter`](TxGraph::canonical_iter) returns the [`CanonicalIter`] which contains all -//! of the canonicalization logic. -//! -//! All these methods require a `chain` and `chain_tip` argument. The `chain` must be a -//! [`ChainOracle`] implementation (such as [`LocalChain`](crate::local_chain::LocalChain)) which -//! identifies which blocks exist under a given `chain_tip`. -//! -//! The canonicalization algorithm uses the following associated data to determine which -//! transactions have precedence over others: -//! -//! * [`Anchor`] - This bit of data represents that a transaction is anchored in a given block. If -//! the transaction is anchored in chain of `chain_tip`, or is an ancestor of a transaction -//! anchored in chain of `chain_tip`, then the transaction must be canonical. -//! * `last_seen` - This is the timestamp of when a transaction is last-seen in the mempool. This -//! value is updated by [`insert_seen_at`](TxGraph::insert_seen_at) and -//! [`apply_update`](TxGraph::apply_update). Transactions that are seen later have higher -//! priority than those that are seen earlier. `last_seen` values are transitive. This means -//! that the actual `last_seen` value of a transaction is the max of all the `last_seen` values -//! from it's descendants. -//! * `last_evicted` - This is the timestamp of when a transaction last went missing from the -//! mempool. If this value is equal to or higher than the transaction's `last_seen` value, then -//! it will not be considered canonical. -//! -//! # Graph traversal -//! -//! You can use [`TxAncestors`]/[`TxDescendants`] to traverse ancestors and descendants of a given -//! transaction, respectively. -//! -//! # Applying changes -//! -//! The [`ChangeSet`] reports changes made to a [`TxGraph`]; it can be used to either save to -//! persistent storage, or to be applied to another [`TxGraph`]. -//! -//! Methods that change the state of [`TxGraph`] will return [`ChangeSet`]s. -//! -//! # Generics -//! -//! Anchors are represented as generics within `TxGraph`. To make use of all functionality of the -//! `TxGraph`, anchors (`A`) should implement [`Anchor`]. -//! -//! Anchors are made generic so that different types of data can be stored with how a transaction is -//! *anchored* to a given block. An example of this is storing a merkle proof of the transaction to -//! the confirmation block - this can be done with a custom [`Anchor`] type. The minimal [`Anchor`] -//! type would just be a [`BlockId`] which just represents the height and hash of the block which -//! the transaction is contained in. Note that a transaction can be contained in multiple -//! conflicting blocks (by nature of the Bitcoin network). -//! -//! ``` -//! # use bdk_chain::BlockId; -//! # use bdk_chain::tx_graph::TxGraph; -//! # use bdk_chain::example_utils::*; -//! # use bitcoin::Transaction; -//! # let tx_a = tx_from_hex(RAW_TX_1); -//! let mut tx_graph: TxGraph = TxGraph::default(); -//! -//! // insert a transaction -//! let changeset = tx_graph.insert_tx(tx_a); -//! -//! // We can restore the state of the `tx_graph` by applying all -//! // the changesets obtained by mutating the original (the order doesn't matter). -//! let mut restored_tx_graph: TxGraph = TxGraph::default(); -//! restored_tx_graph.apply_changeset(changeset); -//! -//! assert_eq!(tx_graph, restored_tx_graph); -//! ``` -//! -//! A [`TxGraph`] can also be updated with another [`TxGraph`] which merges them together. -//! -//! ``` -//! # use bdk_chain::{Merge, BlockId}; -//! # use bdk_chain::tx_graph::{self, TxGraph}; -//! # use bdk_chain::example_utils::*; -//! # use bitcoin::Transaction; -//! # use std::sync::Arc; -//! # let tx_a = tx_from_hex(RAW_TX_1); -//! # let tx_b = tx_from_hex(RAW_TX_2); -//! let mut graph: TxGraph = TxGraph::default(); -//! -//! let mut update = tx_graph::TxUpdate::default(); -//! update.txs.push(Arc::new(tx_a)); -//! update.txs.push(Arc::new(tx_b)); -//! -//! // apply the update graph -//! let changeset = graph.apply_update(update.clone()); -//! -//! // if we apply it again, the resulting changeset will be empty -//! let changeset = graph.apply_update(update); -//! assert!(changeset.is_empty()); -//! ``` -//! [`insert_txout`]: TxGraph::insert_txout - -use crate::collections::*; -use crate::spk_txout::SpkTxOutIndex; -use crate::BlockId; -use crate::CanonicalIter; -use crate::CanonicalReason; -use crate::ObservedIn; -use crate::{Anchor, Balance, ChainOracle, ChainPosition, FullTxOut, Merge}; -use alloc::collections::vec_deque::VecDeque; -use alloc::sync::Arc; -use alloc::vec::Vec; -use bdk_core::ConfirmationBlockTime; -pub use bdk_core::TxUpdate; -use bitcoin::{Amount, OutPoint, ScriptBuf, SignedAmount, Transaction, TxOut, Txid}; -use core::fmt::{self, Formatter}; -use core::ops::RangeBounds; -use core::{ - convert::Infallible, - ops::{Deref, RangeInclusive}, -}; - -impl From> for TxUpdate { - fn from(graph: TxGraph) -> Self { - let mut tx_update = TxUpdate::default(); - tx_update.txs = graph.full_txs().map(|tx_node| tx_node.tx).collect(); - tx_update.txouts = graph - .floating_txouts() - .map(|(op, txo)| (op, txo.clone())) - .collect(); - tx_update.anchors = graph - .anchors - .into_iter() - .flat_map(|(txid, anchors)| anchors.into_iter().map(move |a| (a, txid))) - .collect(); - tx_update.seen_ats = graph.last_seen.into_iter().collect(); - tx_update.evicted_ats = graph.last_evicted.into_iter().collect(); - tx_update - } -} - -impl From> for TxGraph { - fn from(update: TxUpdate) -> Self { - let mut graph = TxGraph::::default(); - let _ = graph.apply_update(update); - graph - } -} - -/// A graph of transactions and spends. -/// -/// See the [module-level documentation] for more. -/// -/// [module-level documentation]: crate::tx_graph -#[derive(Clone, Debug, PartialEq)] -pub struct TxGraph { - txs: HashMap, - spends: BTreeMap>, - anchors: HashMap>, - last_seen: HashMap, - last_evicted: HashMap, - - txs_by_highest_conf_heights: BTreeSet<(u32, Txid)>, - txs_by_last_seen: BTreeSet<(u64, Txid)>, - - // The following fields exist so that methods can return references to empty sets. - // FIXME: This can be removed once `HashSet::new` and `BTreeSet::new` are const fns. - empty_outspends: HashSet, - empty_anchors: BTreeSet, -} - -impl Default for TxGraph { - fn default() -> Self { - Self { - txs: Default::default(), - spends: Default::default(), - anchors: Default::default(), - last_seen: Default::default(), - last_evicted: Default::default(), - txs_by_highest_conf_heights: Default::default(), - txs_by_last_seen: Default::default(), - empty_outspends: Default::default(), - empty_anchors: Default::default(), - } - } -} - -/// A transaction node in the [`TxGraph`]. -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub struct TxNode<'a, T, A> { - /// Txid of the transaction. - pub txid: Txid, - /// A partial or full representation of the transaction. - pub tx: T, - /// The blocks that the transaction is "anchored" in. - pub anchors: &'a BTreeSet, - /// The last-seen unix timestamp of the transaction as unconfirmed. - pub last_seen_unconfirmed: Option, -} - -impl Deref for TxNode<'_, T, A> { - type Target = T; - - fn deref(&self) -> &Self::Target { - &self.tx - } -} - -/// Internal representation of a transaction node of a [`TxGraph`]. -/// -/// This can either be a whole transaction, or a partial transaction (where we only have select -/// outputs). -#[derive(Clone, Debug, PartialEq)] -enum TxNodeInternal { - Whole(Arc), - Partial(BTreeMap), -} - -impl Default for TxNodeInternal { - fn default() -> Self { - Self::Partial(BTreeMap::new()) - } -} - -/// A transaction that is deemed to be part of the canonical history. -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub struct CanonicalTx<'a, T, A> { - /// How the transaction is observed in the canonical chain (confirmed or unconfirmed). - pub chain_position: ChainPosition, - /// The transaction node (as part of the graph). - pub tx_node: TxNode<'a, T, A>, -} - -/// Errors returned by `TxGraph::calculate_fee`. -#[derive(Debug, PartialEq, Eq)] -pub enum CalculateFeeError { - /// Missing `TxOut` for one or more of the inputs of the tx - MissingTxOut(Vec), - /// When the transaction is invalid according to the graph it has a negative fee - NegativeFee(SignedAmount), -} - -impl fmt::Display for CalculateFeeError { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - CalculateFeeError::MissingTxOut(outpoints) => write!( - f, - "missing `TxOut` for one or more of the inputs of the tx: {:?}", - outpoints - ), - CalculateFeeError::NegativeFee(fee) => write!( - f, - "transaction is invalid according to the graph and has negative fee: {}", - fee.display_dynamic() - ), - } - } -} - -#[cfg(feature = "std")] -impl std::error::Error for CalculateFeeError {} - -impl TxGraph { - /// Iterate over all tx outputs known by [`TxGraph`]. - /// - /// This includes txouts of both full transactions as well as floating transactions. - pub fn all_txouts(&self) -> impl Iterator { - self.txs.iter().flat_map(|(txid, tx)| match tx { - TxNodeInternal::Whole(tx) => tx - .as_ref() - .output - .iter() - .enumerate() - .map(|(vout, txout)| (OutPoint::new(*txid, vout as _), txout)) - .collect::>(), - TxNodeInternal::Partial(txouts) => txouts - .iter() - .map(|(vout, txout)| (OutPoint::new(*txid, *vout as _), txout)) - .collect::>(), - }) - } - - /// Iterate over floating txouts known by [`TxGraph`]. - /// - /// Floating txouts are txouts that do not have the residing full transaction contained in the - /// graph. - pub fn floating_txouts(&self) -> impl Iterator { - self.txs - .iter() - .filter_map(|(txid, tx_node)| match tx_node { - TxNodeInternal::Whole(_) => None, - TxNodeInternal::Partial(txouts) => Some( - txouts - .iter() - .map(|(&vout, txout)| (OutPoint::new(*txid, vout), txout)), - ), - }) - .flatten() - } - - /// Iterate over all full transactions in the graph. - pub fn full_txs(&self) -> impl Iterator, A>> { - self.txs.iter().filter_map(|(&txid, tx)| match tx { - TxNodeInternal::Whole(tx) => Some(TxNode { - txid, - tx: tx.clone(), - anchors: self.anchors.get(&txid).unwrap_or(&self.empty_anchors), - last_seen_unconfirmed: self.last_seen.get(&txid).copied(), - }), - TxNodeInternal::Partial(_) => None, - }) - } - - /// Iterate over graph transactions with no anchors or last-seen. - pub fn txs_with_no_anchor_or_last_seen( - &self, - ) -> impl Iterator, A>> { - self.full_txs().filter_map(|tx| { - if tx.anchors.is_empty() && tx.last_seen_unconfirmed.is_none() { - Some(tx) - } else { - None - } - }) - } - - /// Get a transaction by txid. This only returns `Some` for full transactions. - /// - /// Refer to [`get_txout`] for getting a specific [`TxOut`]. - /// - /// [`get_txout`]: Self::get_txout - pub fn get_tx(&self, txid: Txid) -> Option> { - self.get_tx_node(txid).map(|n| n.tx) - } - - /// Get a transaction node by txid. This only returns `Some` for full transactions. - pub fn get_tx_node(&self, txid: Txid) -> Option, A>> { - match &self.txs.get(&txid)? { - TxNodeInternal::Whole(tx) => Some(TxNode { - txid, - tx: tx.clone(), - anchors: self.anchors.get(&txid).unwrap_or(&self.empty_anchors), - last_seen_unconfirmed: self.last_seen.get(&txid).copied(), - }), - _ => None, - } - } - - /// Obtains a single tx output (if any) at the specified outpoint. - pub fn get_txout(&self, outpoint: OutPoint) -> Option<&TxOut> { - match &self.txs.get(&outpoint.txid)? { - TxNodeInternal::Whole(tx) => tx.as_ref().output.get(outpoint.vout as usize), - TxNodeInternal::Partial(txouts) => txouts.get(&outpoint.vout), - } - } - - /// Returns known outputs of a given `txid`. - /// - /// Returns a [`BTreeMap`] of vout to output of the provided `txid`. - pub fn tx_outputs(&self, txid: Txid) -> Option> { - Some(match &self.txs.get(&txid)? { - TxNodeInternal::Whole(tx) => tx - .as_ref() - .output - .iter() - .enumerate() - .map(|(vout, txout)| (vout as u32, txout)) - .collect::>(), - TxNodeInternal::Partial(txouts) => txouts - .iter() - .map(|(vout, txout)| (*vout, txout)) - .collect::>(), - }) - } - - /// Calculates the fee of a given transaction. Returns [`Amount::ZERO`] if `tx` is a coinbase transaction. - /// Returns `OK(_)` if we have all the [`TxOut`]s being spent by `tx` in the graph (either as - /// the full transactions or individual txouts). - /// - /// To calculate the fee for a [`Transaction`] that depends on foreign [`TxOut`] values you must - /// first manually insert the foreign TxOuts into the tx graph using the [`insert_txout`] function. - /// Only insert TxOuts you trust the values for! - /// - /// Note `tx` does not have to be in the graph for this to work. - /// - /// [`insert_txout`]: Self::insert_txout - pub fn calculate_fee(&self, tx: &Transaction) -> Result { - if tx.is_coinbase() { - return Ok(Amount::ZERO); - } - - let (inputs_sum, missing_outputs) = tx.input.iter().fold( - (SignedAmount::ZERO, Vec::new()), - |(mut sum, mut missing_outpoints), txin| match self.get_txout(txin.previous_output) { - None => { - missing_outpoints.push(txin.previous_output); - (sum, missing_outpoints) - } - Some(txout) => { - sum += txout.value.to_signed().expect("valid `SignedAmount`"); - (sum, missing_outpoints) - } - }, - ); - if !missing_outputs.is_empty() { - return Err(CalculateFeeError::MissingTxOut(missing_outputs)); - } - - let outputs_sum = tx - .output - .iter() - .map(|txout| txout.value.to_signed().expect("valid `SignedAmount`")) - .sum::(); - - let fee = inputs_sum - outputs_sum; - fee.to_unsigned() - .map_err(|_| CalculateFeeError::NegativeFee(fee)) - } - - /// The transactions spending from this output. - /// - /// [`TxGraph`] allows conflicting transactions within the graph. Obviously the transactions in - /// the returned set will never be in the same active-chain. - pub fn outspends(&self, outpoint: OutPoint) -> &HashSet { - self.spends.get(&outpoint).unwrap_or(&self.empty_outspends) - } - - /// Iterates over the transactions spending from `txid`. - /// - /// The iterator item is a union of `(vout, txid-set)` where: - /// - /// - `vout` is the provided `txid`'s outpoint that is being spent - /// - `txid-set` is the set of txids spending the `vout`. - pub fn tx_spends( - &self, - txid: Txid, - ) -> impl DoubleEndedIterator)> + '_ { - let start = OutPoint::new(txid, 0); - let end = OutPoint::new(txid, u32::MAX); - self.spends - .range(start..=end) - .map(|(outpoint, spends)| (outpoint.vout, spends)) - } -} - -impl TxGraph { - /// Creates an iterator that filters and maps ancestor transactions. - /// - /// The iterator starts with the ancestors of the supplied `tx` (ancestor transactions of `tx` - /// are transactions spent by `tx`). The supplied transaction is excluded from the iterator. - /// - /// The supplied closure takes in two inputs `(depth, ancestor_tx)`: - /// - /// * `depth` is the distance between the starting `Transaction` and the `ancestor_tx`. I.e., if - /// the `Transaction` is spending an output of the `ancestor_tx` then `depth` will be 1. - /// * `ancestor_tx` is the `Transaction`'s ancestor which we are considering to walk. - /// - /// The supplied closure returns an `Option`, allowing the caller to map each `Transaction` - /// it visits and decide whether to visit ancestors. - pub fn walk_ancestors<'g, T, F, O>(&'g self, tx: T, walk_map: F) -> TxAncestors<'g, A, F, O> - where - T: Into>, - F: FnMut(usize, Arc) -> Option + 'g, - { - TxAncestors::new_exclude_root(self, tx, walk_map) - } - - /// Creates an iterator that filters and maps descendants from the starting `txid`. - /// - /// The supplied closure takes in two inputs `(depth, descendant_txid)`: - /// - /// * `depth` is the distance between the starting `txid` and the `descendant_txid`. I.e., if the - /// descendant is spending an output of the starting `txid` then `depth` will be 1. - /// * `descendant_txid` is the descendant's txid which we are considering to walk. - /// - /// The supplied closure returns an `Option`, allowing the caller to map each node it visits - /// and decide whether to visit descendants. - pub fn walk_descendants<'g, F, O>( - &'g self, - txid: Txid, - walk_map: F, - ) -> TxDescendants<'g, A, F, O> - where - F: FnMut(usize, Txid) -> Option + 'g, - { - TxDescendants::new_exclude_root(self, txid, walk_map) - } -} - -impl TxGraph { - /// Creates an iterator that both filters and maps conflicting transactions (this includes - /// descendants of directly-conflicting transactions, which are also considered conflicts). - /// - /// Refer to [`Self::walk_descendants`] for `walk_map` usage. - pub fn walk_conflicts<'g, F, O>( - &'g self, - tx: &'g Transaction, - walk_map: F, - ) -> TxDescendants<'g, A, F, O> - where - F: FnMut(usize, Txid) -> Option + 'g, - { - let txids = self.direct_conflicts(tx).map(|(_, txid)| txid); - TxDescendants::from_multiple_include_root(self, txids, walk_map) - } - - /// Given a transaction, return an iterator of txids that directly conflict with the given - /// transaction's inputs (spends). The conflicting txids are returned with the given - /// transaction's vin (in which it conflicts). - /// - /// Note that this only returns directly conflicting txids and won't include: - /// - descendants of conflicting transactions (which are technically also conflicting) - /// - transactions conflicting with the given transaction's ancestors - pub fn direct_conflicts<'g>( - &'g self, - tx: &'g Transaction, - ) -> impl Iterator + 'g { - let txid = tx.compute_txid(); - tx.input - .iter() - .enumerate() - .filter_map(move |(vin, txin)| self.spends.get(&txin.previous_output).zip(Some(vin))) - .flat_map(|(spends, vin)| core::iter::repeat(vin).zip(spends.iter().cloned())) - .filter(move |(_, conflicting_txid)| *conflicting_txid != txid) - } - - /// Get all transaction anchors known by [`TxGraph`]. - pub fn all_anchors(&self) -> &HashMap> { - &self.anchors - } - - /// Whether the graph has any transactions or outputs in it. - pub fn is_empty(&self) -> bool { - self.txs.is_empty() - } -} - -impl TxGraph { - /// Transform the [`TxGraph`] to have [`Anchor`]s of another type. - /// - /// This takes in a closure of signature `FnMut(A) -> A2` which is called for each [`Anchor`] to - /// transform it. - pub fn map_anchors(self, f: F) -> TxGraph - where - F: FnMut(A) -> A2, - { - let mut new_graph = TxGraph::::default(); - new_graph.apply_changeset(self.initial_changeset().map_anchors(f)); - new_graph - } - - /// Construct a new [`TxGraph`] from a list of transactions. - pub fn new(txs: impl IntoIterator) -> Self { - let mut new = Self::default(); - for tx in txs.into_iter() { - let _ = new.insert_tx(tx); - } - new - } - - /// Inserts the given [`TxOut`] at [`OutPoint`]. - /// - /// Inserting floating txouts are useful for determining fee/feerate of transactions we care - /// about. - /// - /// The [`ChangeSet`] result will be empty if the `outpoint` (or a full transaction containing - /// the `outpoint`) already existed in `self`. - /// - /// [`apply_changeset`]: Self::apply_changeset - pub fn insert_txout(&mut self, outpoint: OutPoint, txout: TxOut) -> ChangeSet { - let mut changeset = ChangeSet::::default(); - let tx_node = self.txs.entry(outpoint.txid).or_default(); - match tx_node { - TxNodeInternal::Whole(_) => { - // ignore this txout we have the full one already. - // NOTE: You might think putting a debug_assert! here to check the output being - // replaced was actually correct is a good idea but the tests have already been - // written assuming this never panics. - } - TxNodeInternal::Partial(partial_tx) => { - match partial_tx.insert(outpoint.vout, txout.clone()) { - Some(old_txout) => { - debug_assert_eq!( - txout, old_txout, - "txout of the same outpoint should never change" - ); - } - None => { - changeset.txouts.insert(outpoint, txout); - } - } - } - } - changeset - } - - /// Inserts the given transaction into [`TxGraph`]. - /// - /// The [`ChangeSet`] returned will be empty if `tx` already exists. - pub fn insert_tx>>(&mut self, tx: T) -> ChangeSet { - let tx: Arc = tx.into(); - let txid = tx.compute_txid(); - let mut changeset = ChangeSet::::default(); - - let tx_node = self.txs.entry(txid).or_default(); - match tx_node { - TxNodeInternal::Whole(existing_tx) => { - debug_assert_eq!( - existing_tx.as_ref(), - tx.as_ref(), - "tx of same txid should never change" - ); - } - partial_tx => { - for txin in &tx.input { - // this means the tx is coinbase so there is no previous output - if txin.previous_output.is_null() { - continue; - } - self.spends - .entry(txin.previous_output) - .or_default() - .insert(txid); - } - *partial_tx = TxNodeInternal::Whole(tx.clone()); - changeset.txs.insert(tx); - } - } - - changeset - } - - /// Batch insert unconfirmed transactions. - /// - /// Items of `txs` are tuples containing the transaction and a *last seen* timestamp. The - /// *last seen* communicates when the transaction is last seen in mempool which is used for - /// conflict-resolution (refer to [`TxGraph::insert_seen_at`] for details). - pub fn batch_insert_unconfirmed>>( - &mut self, - txs: impl IntoIterator, - ) -> ChangeSet { - let mut changeset = ChangeSet::::default(); - for (tx, seen_at) in txs { - let tx: Arc = tx.into(); - changeset.merge(self.insert_seen_at(tx.compute_txid(), seen_at)); - changeset.merge(self.insert_tx(tx)); - } - changeset - } - - /// Inserts the given `anchor` into [`TxGraph`]. - /// - /// The [`ChangeSet`] returned will be empty if graph already knows that `txid` exists in - /// `anchor`. - pub fn insert_anchor(&mut self, txid: Txid, anchor: A) -> ChangeSet { - // These two variables are used to determine how to modify the `txid`'s entry in - // `txs_by_highest_conf_heights`. - // We want to remove `(old_top_h?, txid)` and insert `(new_top_h?, txid)`. - let mut old_top_h = None; - let mut new_top_h = anchor.confirmation_height_upper_bound(); - - let is_changed = match self.anchors.entry(txid) { - hash_map::Entry::Occupied(mut e) => { - old_top_h = e - .get() - .iter() - .last() - .map(Anchor::confirmation_height_upper_bound); - if let Some(old_top_h) = old_top_h { - if old_top_h > new_top_h { - new_top_h = old_top_h; - } - } - let is_changed = e.get_mut().insert(anchor.clone()); - is_changed - } - hash_map::Entry::Vacant(e) => { - e.insert(core::iter::once(anchor.clone()).collect()); - true - } - }; - - let mut changeset = ChangeSet::::default(); - if is_changed { - let new_top_is_changed = match old_top_h { - None => true, - Some(old_top_h) if old_top_h != new_top_h => true, - _ => false, - }; - if new_top_is_changed { - if let Some(prev_top_h) = old_top_h { - self.txs_by_highest_conf_heights.remove(&(prev_top_h, txid)); - } - self.txs_by_highest_conf_heights.insert((new_top_h, txid)); - } - changeset.anchors.insert((anchor, txid)); - } - changeset - } - - /// Inserts the given `seen_at` for `txid` into [`TxGraph`]. - /// - /// Note that [`TxGraph`] only keeps track of the latest `seen_at`. - pub fn insert_seen_at(&mut self, txid: Txid, seen_at: u64) -> ChangeSet { - let mut old_last_seen = None; - let is_changed = match self.last_seen.entry(txid) { - hash_map::Entry::Occupied(mut e) => { - let last_seen = e.get_mut(); - old_last_seen = Some(*last_seen); - let change = *last_seen < seen_at; - if change { - *last_seen = seen_at; - } - change - } - hash_map::Entry::Vacant(e) => { - e.insert(seen_at); - true - } - }; - - let mut changeset = ChangeSet::::default(); - if is_changed { - if let Some(old_last_seen) = old_last_seen { - self.txs_by_last_seen.remove(&(old_last_seen, txid)); - } - self.txs_by_last_seen.insert((seen_at, txid)); - changeset.last_seen.insert(txid, seen_at); - } - changeset - } - - /// Inserts the given `evicted_at` for `txid` into [`TxGraph`]. - /// - /// The `evicted_at` timestamp represents the last known time when the transaction was observed - /// to be missing from the mempool. If `txid` was previously recorded with an earlier - /// `evicted_at` value, it is updated only if the new value is greater. - pub fn insert_evicted_at(&mut self, txid: Txid, evicted_at: u64) -> ChangeSet { - let is_changed = match self.last_evicted.entry(txid) { - hash_map::Entry::Occupied(mut e) => { - let last_evicted = e.get_mut(); - let change = *last_evicted < evicted_at; - if change { - *last_evicted = evicted_at; - } - change - } - hash_map::Entry::Vacant(e) => { - e.insert(evicted_at); - true - } - }; - - let mut changeset = ChangeSet::::default(); - if is_changed { - changeset.last_evicted.insert(txid, evicted_at); - } - changeset - } - - /// Extends this graph with the given `update`. - /// - /// The returned [`ChangeSet`] is the set difference between `update` and `self` (transactions that - /// exist in `update` but not in `self`). - pub fn apply_update(&mut self, update: TxUpdate) -> ChangeSet { - let mut changeset = ChangeSet::::default(); - for tx in update.txs { - changeset.merge(self.insert_tx(tx)); - } - for (outpoint, txout) in update.txouts { - changeset.merge(self.insert_txout(outpoint, txout)); - } - for (anchor, txid) in update.anchors { - changeset.merge(self.insert_anchor(txid, anchor)); - } - for (txid, seen_at) in update.seen_ats { - changeset.merge(self.insert_seen_at(txid, seen_at)); - } - for (txid, evicted_at) in update.evicted_ats { - changeset.merge(self.insert_evicted_at(txid, evicted_at)); - } - changeset - } - - /// Determines the [`ChangeSet`] between `self` and an empty [`TxGraph`]. - pub fn initial_changeset(&self) -> ChangeSet { - ChangeSet { - txs: self.full_txs().map(|tx_node| tx_node.tx).collect(), - txouts: self - .floating_txouts() - .map(|(op, txout)| (op, txout.clone())) - .collect(), - anchors: self - .anchors - .iter() - .flat_map(|(txid, anchors)| anchors.iter().map(|a| (a.clone(), *txid))) - .collect(), - last_seen: self.last_seen.iter().map(|(&k, &v)| (k, v)).collect(), - last_evicted: self.last_evicted.iter().map(|(&k, &v)| (k, v)).collect(), - } - } - - /// Applies [`ChangeSet`] to [`TxGraph`]. - pub fn apply_changeset(&mut self, changeset: ChangeSet) { - for tx in changeset.txs { - let _ = self.insert_tx(tx); - } - for (outpoint, txout) in changeset.txouts { - let _ = self.insert_txout(outpoint, txout); - } - for (anchor, txid) in changeset.anchors { - let _ = self.insert_anchor(txid, anchor); - } - for (txid, seen_at) in changeset.last_seen { - let _ = self.insert_seen_at(txid, seen_at); - } - for (txid, evicted_at) in changeset.last_evicted { - let _ = self.insert_evicted_at(txid, evicted_at); - } - } -} - -impl TxGraph { - /// List graph transactions that are in `chain` with `chain_tip`. - /// - /// Each transaction is represented as a [`CanonicalTx`] that contains where the transaction is - /// observed in-chain, and the [`TxNode`]. - /// - /// # Error - /// - /// If the [`ChainOracle`] implementation (`chain`) fails, an error will be returned with the - /// returned item. - /// - /// If the [`ChainOracle`] is infallible, [`list_canonical_txs`] can be used instead. - /// - /// [`list_canonical_txs`]: Self::list_canonical_txs - pub fn try_list_canonical_txs<'a, C: ChainOracle + 'a>( - &'a self, - chain: &'a C, - chain_tip: BlockId, - ) -> impl Iterator, A>, C::Error>> { - self.canonical_iter(chain, chain_tip).flat_map(move |res| { - res.map(|(txid, _, canonical_reason)| { - let tx_node = self.get_tx_node(txid).expect("must contain tx"); - let chain_position = match canonical_reason { - CanonicalReason::Anchor { anchor, descendant } => match descendant { - Some(_) => { - let direct_anchor = tx_node - .anchors - .iter() - .find_map(|a| -> Option> { - match chain.is_block_in_chain(a.anchor_block(), chain_tip) { - Ok(Some(true)) => Some(Ok(a.clone())), - Ok(Some(false)) | Ok(None) => None, - Err(err) => Some(Err(err)), - } - }) - .transpose()?; - match direct_anchor { - Some(anchor) => ChainPosition::Confirmed { - anchor, - transitively: None, - }, - None => ChainPosition::Confirmed { - anchor, - transitively: descendant, - }, - } - } - None => ChainPosition::Confirmed { - anchor, - transitively: None, - }, - }, - CanonicalReason::ObservedIn { observed_in, .. } => match observed_in { - ObservedIn::Mempool(last_seen) => ChainPosition::Unconfirmed { - last_seen: Some(last_seen), - }, - ObservedIn::Block(_) => ChainPosition::Unconfirmed { last_seen: None }, - }, - }; - Ok(CanonicalTx { - chain_position, - tx_node, - }) - }) - }) - } - - /// List graph transactions that are in `chain` with `chain_tip`. - /// - /// This is the infallible version of [`try_list_canonical_txs`]. - /// - /// [`try_list_canonical_txs`]: Self::try_list_canonical_txs - pub fn list_canonical_txs<'a, C: ChainOracle + 'a>( - &'a self, - chain: &'a C, - chain_tip: BlockId, - ) -> impl Iterator, A>> { - self.try_list_canonical_txs(chain, chain_tip) - .map(|res| res.expect("infallible")) - } - - /// Get a filtered list of outputs from the given `outpoints` that are in `chain` with - /// `chain_tip`. - /// - /// `outpoints` is a list of outpoints we are interested in, coupled with an outpoint identifier - /// (`OI`) for convenience. If `OI` is not necessary, the caller can use `()`, or - /// [`Iterator::enumerate`] over a list of [`OutPoint`]s. - /// - /// Floating outputs (i.e., outputs for which we don't have the full transaction in the graph) - /// are ignored. - /// - /// # Error - /// - /// An [`Iterator::Item`] can be an [`Err`] if the [`ChainOracle`] implementation (`chain`) - /// fails. - /// - /// If the [`ChainOracle`] implementation is infallible, [`filter_chain_txouts`] can be used - /// instead. - /// - /// [`filter_chain_txouts`]: Self::filter_chain_txouts - pub fn try_filter_chain_txouts<'a, C: ChainOracle + 'a, OI: Clone + 'a>( - &'a self, - chain: &'a C, - chain_tip: BlockId, - outpoints: impl IntoIterator + 'a, - ) -> Result)> + 'a, C::Error> { - let mut canon_txs = HashMap::, A>>::new(); - let mut canon_spends = HashMap::::new(); - for r in self.try_list_canonical_txs(chain, chain_tip) { - let canonical_tx = r?; - let txid = canonical_tx.tx_node.txid; - - if !canonical_tx.tx_node.tx.is_coinbase() { - for txin in &canonical_tx.tx_node.tx.input { - let _res = canon_spends.insert(txin.previous_output, txid); - assert!( - _res.is_none(), - "tried to replace {:?} with {:?}", - _res, - txid - ); - } - } - canon_txs.insert(txid, canonical_tx); - } - Ok(outpoints.into_iter().filter_map(move |(spk_i, outpoint)| { - let canon_tx = canon_txs.get(&outpoint.txid)?; - let txout = canon_tx - .tx_node - .tx - .output - .get(outpoint.vout as usize) - .cloned()?; - let chain_position = canon_tx.chain_position.clone(); - let spent_by = canon_spends.get(&outpoint).map(|spend_txid| { - let spend_tx = canon_txs - .get(spend_txid) - .cloned() - .expect("must be canonical"); - (spend_tx.chain_position, *spend_txid) - }); - let is_on_coinbase = canon_tx.tx_node.is_coinbase(); - Some(( - spk_i, - FullTxOut { - outpoint, - txout, - chain_position, - spent_by, - is_on_coinbase, - }, - )) - })) - } - - /// List txids by descending anchor height order. - /// - /// If multiple anchors exist for a txid, the highest anchor height will be used. Transactions - /// without anchors are excluded. - pub fn txids_by_descending_anchor_height( - &self, - ) -> impl ExactSizeIterator + '_ { - self.txs_by_highest_conf_heights.iter().copied().rev() - } - - /// List txids by descending last-seen order. - /// - /// Transactions without last-seens are excluded. Transactions with a last-evicted timestamp - /// equal or higher than it's last-seen timestamp are excluded. - pub fn txids_by_descending_last_seen(&self) -> impl Iterator + '_ { - self.txs_by_last_seen - .iter() - .copied() - .rev() - .filter(|(last_seen, txid)| match self.last_evicted.get(txid) { - Some(last_evicted) => last_evicted < last_seen, - None => true, - }) - } - - /// Returns a [`CanonicalIter`]. - pub fn canonical_iter<'a, C: ChainOracle>( - &'a self, - chain: &'a C, - chain_tip: BlockId, - ) -> CanonicalIter<'a, A, C> { - CanonicalIter::new(self, chain, chain_tip) - } - - /// Get a filtered list of outputs from the given `outpoints` that are in `chain` with - /// `chain_tip`. - /// - /// This is the infallible version of [`try_filter_chain_txouts`]. - /// - /// [`try_filter_chain_txouts`]: Self::try_filter_chain_txouts - pub fn filter_chain_txouts<'a, C: ChainOracle + 'a, OI: Clone + 'a>( - &'a self, - chain: &'a C, - chain_tip: BlockId, - outpoints: impl IntoIterator + 'a, - ) -> impl Iterator)> + 'a { - self.try_filter_chain_txouts(chain, chain_tip, outpoints) - .expect("oracle is infallible") - } - - /// Get a filtered list of unspent outputs (UTXOs) from the given `outpoints` that are in - /// `chain` with `chain_tip`. - /// - /// `outpoints` is a list of outpoints we are interested in, coupled with an outpoint identifier - /// (`OI`) for convenience. If `OI` is not necessary, the caller can use `()`, or - /// [`Iterator::enumerate`] over a list of [`OutPoint`]s. - /// - /// Floating outputs are ignored. - /// - /// # Error - /// - /// An [`Iterator::Item`] can be an [`Err`] if the [`ChainOracle`] implementation (`chain`) - /// fails. - /// - /// If the [`ChainOracle`] implementation is infallible, [`filter_chain_unspents`] can be used - /// instead. - /// - /// [`filter_chain_unspents`]: Self::filter_chain_unspents - pub fn try_filter_chain_unspents<'a, C: ChainOracle + 'a, OI: Clone + 'a>( - &'a self, - chain: &'a C, - chain_tip: BlockId, - outpoints: impl IntoIterator + 'a, - ) -> Result)> + 'a, C::Error> { - Ok(self - .try_filter_chain_txouts(chain, chain_tip, outpoints)? - .filter(|(_, full_txo)| full_txo.spent_by.is_none())) - } - - /// Get a filtered list of unspent outputs (UTXOs) from the given `outpoints` that are in - /// `chain` with `chain_tip`. - /// - /// This is the infallible version of [`try_filter_chain_unspents`]. - /// - /// [`try_filter_chain_unspents`]: Self::try_filter_chain_unspents - pub fn filter_chain_unspents<'a, C: ChainOracle + 'a, OI: Clone + 'a>( - &'a self, - chain: &'a C, - chain_tip: BlockId, - txouts: impl IntoIterator + 'a, - ) -> impl Iterator)> + 'a { - self.try_filter_chain_unspents(chain, chain_tip, txouts) - .expect("oracle is infallible") - } - - /// Get the total balance of `outpoints` that are in `chain` of `chain_tip`. - /// - /// The output of `trust_predicate` should return `true` for scripts that we trust. - /// - /// `outpoints` is a list of outpoints we are interested in, coupled with an outpoint identifier - /// (`OI`) for convenience. If `OI` is not necessary, the caller can use `()`, or - /// [`Iterator::enumerate`] over a list of [`OutPoint`]s. - /// - /// If the provided [`ChainOracle`] implementation (`chain`) is infallible, [`balance`] can be - /// used instead. - /// - /// [`balance`]: Self::balance - pub fn try_balance( - &self, - chain: &C, - chain_tip: BlockId, - outpoints: impl IntoIterator, - mut trust_predicate: impl FnMut(&OI, ScriptBuf) -> bool, - ) -> Result { - let mut immature = Amount::ZERO; - let mut trusted_pending = Amount::ZERO; - let mut untrusted_pending = Amount::ZERO; - let mut confirmed = Amount::ZERO; - - for (spk_i, txout) in self.try_filter_chain_unspents(chain, chain_tip, outpoints)? { - match &txout.chain_position { - ChainPosition::Confirmed { .. } => { - if txout.is_confirmed_and_spendable(chain_tip.height) { - confirmed += txout.txout.value; - } else if !txout.is_mature(chain_tip.height) { - immature += txout.txout.value; - } - } - ChainPosition::Unconfirmed { .. } => { - if trust_predicate(&spk_i, txout.txout.script_pubkey) { - trusted_pending += txout.txout.value; - } else { - untrusted_pending += txout.txout.value; - } - } - } - } - - Ok(Balance { - immature, - trusted_pending, - untrusted_pending, - confirmed, - }) - } - - /// Get the total balance of `outpoints` that are in `chain` of `chain_tip`. - /// - /// This is the infallible version of [`try_balance`]. - /// - /// [`try_balance`]: Self::try_balance - pub fn balance, OI: Clone>( - &self, - chain: &C, - chain_tip: BlockId, - outpoints: impl IntoIterator, - trust_predicate: impl FnMut(&OI, ScriptBuf) -> bool, - ) -> Balance { - self.try_balance(chain, chain_tip, outpoints, trust_predicate) - .expect("oracle is infallible") - } - - /// List txids that are expected to exist under the given spks. - /// - /// This is used to fill [`SyncRequestBuilder::expected_spk_txids`](bdk_core::spk_client::SyncRequestBuilder::expected_spk_txids). - /// - /// The spk index range can be constrained with `range`. - /// - /// # Error - /// - /// If the [`ChainOracle`] implementation (`chain`) fails, an error will be returned with the - /// returned item. - /// - /// If the [`ChainOracle`] is infallible, - /// [`list_expected_spk_txids`](Self::list_expected_spk_txids) can be used instead. - pub fn try_list_expected_spk_txids<'a, C, I>( - &'a self, - chain: &'a C, - chain_tip: BlockId, - indexer: &'a impl AsRef>, - spk_index_range: impl RangeBounds + 'a, - ) -> impl Iterator> + 'a - where - C: ChainOracle, - I: fmt::Debug + Clone + Ord + 'a, - { - let indexer = indexer.as_ref(); - self.try_list_canonical_txs(chain, chain_tip).flat_map( - move |res| -> Vec> { - let range = &spk_index_range; - let c_tx = match res { - Ok(c_tx) => c_tx, - Err(err) => return vec![Err(err)], - }; - let relevant_spks = indexer.relevant_spks_of_tx(&c_tx.tx_node); - relevant_spks - .into_iter() - .filter(|(i, _)| range.contains(i)) - .map(|(_, spk)| Ok((spk, c_tx.tx_node.txid))) - .collect() - }, - ) - } - - /// List txids that are expected to exist under the given spks. - /// - /// This is the infallible version of - /// [`try_list_expected_spk_txids`](Self::try_list_expected_spk_txids). - pub fn list_expected_spk_txids<'a, C, I>( - &'a self, - chain: &'a C, - chain_tip: BlockId, - indexer: &'a impl AsRef>, - spk_index_range: impl RangeBounds + 'a, - ) -> impl Iterator + 'a - where - C: ChainOracle, - I: fmt::Debug + Clone + Ord + 'a, - { - self.try_list_expected_spk_txids(chain, chain_tip, indexer, spk_index_range) - .map(|r| r.expect("infallible")) - } -} - -/// The [`ChangeSet`] represents changes to a [`TxGraph`]. -/// -/// Since [`TxGraph`] is monotone, the "changeset" can only contain transactions to be added and -/// not removed. -/// -/// Refer to [module-level documentation] for more. -/// -/// [module-level documentation]: crate::tx_graph -#[derive(Debug, Clone, PartialEq)] -#[cfg_attr( - feature = "serde", - derive(serde::Deserialize, serde::Serialize), - serde(bound( - deserialize = "A: Ord + serde::Deserialize<'de>", - serialize = "A: Ord + serde::Serialize", - )) -)] -#[must_use] -pub struct ChangeSet { - /// Added transactions. - pub txs: BTreeSet>, - /// Added txouts. - pub txouts: BTreeMap, - /// Added anchors. - pub anchors: BTreeSet<(A, Txid)>, - /// Added last-seen unix timestamps of transactions. - pub last_seen: BTreeMap, - /// Added timestamps of when a transaction is last evicted from the mempool. - #[cfg_attr(feature = "serde", serde(default))] - pub last_evicted: BTreeMap, -} - -impl Default for ChangeSet { - fn default() -> Self { - Self { - txs: Default::default(), - txouts: Default::default(), - anchors: Default::default(), - last_seen: Default::default(), - last_evicted: Default::default(), - } - } -} - -impl ChangeSet { - /// Iterates over all outpoints contained within [`ChangeSet`]. - pub fn txouts(&self) -> impl Iterator { - self.txs - .iter() - .flat_map(|tx| { - tx.output - .iter() - .enumerate() - .map(move |(vout, txout)| (OutPoint::new(tx.compute_txid(), vout as _), txout)) - }) - .chain(self.txouts.iter().map(|(op, txout)| (*op, txout))) - } - - /// Iterates over the heights of that the new transaction anchors in this changeset. - /// - /// This is useful if you want to find which heights you need to fetch data about in order to - /// confirm or exclude these anchors. - pub fn anchor_heights(&self) -> impl Iterator + '_ - where - A: Anchor, - { - let mut dedup = None; - self.anchors - .iter() - .map(|(a, _)| a.anchor_block().height) - .filter(move |height| { - let duplicate = dedup == Some(*height); - dedup = Some(*height); - !duplicate - }) - } -} - -impl Merge for ChangeSet { - fn merge(&mut self, other: Self) { - // We use `extend` instead of `BTreeMap::append` due to performance issues with `append`. - // Refer to https://github.com/rust-lang/rust/issues/34666#issuecomment-675658420 - self.txs.extend(other.txs); - self.txouts.extend(other.txouts); - self.anchors.extend(other.anchors); - - // last_seen timestamps should only increase - self.last_seen.extend( - other - .last_seen - .into_iter() - .filter(|(txid, update_ls)| self.last_seen.get(txid) < Some(update_ls)) - .collect::>(), - ); - // last_evicted timestamps should only increase - self.last_evicted.extend( - other - .last_evicted - .into_iter() - .filter(|(txid, update_lm)| self.last_evicted.get(txid) < Some(update_lm)) - .collect::>(), - ); - } - - fn is_empty(&self) -> bool { - self.txs.is_empty() - && self.txouts.is_empty() - && self.anchors.is_empty() - && self.last_seen.is_empty() - && self.last_evicted.is_empty() - } -} - -impl ChangeSet { - /// Transform the [`ChangeSet`] to have [`Anchor`]s of another type. - /// - /// This takes in a closure of signature `FnMut(A) -> A2` which is called for each [`Anchor`] to - /// transform it. - pub fn map_anchors(self, mut f: F) -> ChangeSet - where - F: FnMut(A) -> A2, - { - ChangeSet { - txs: self.txs, - txouts: self.txouts, - anchors: BTreeSet::<(A2, Txid)>::from_iter( - self.anchors.into_iter().map(|(a, txid)| (f(a), txid)), - ), - last_seen: self.last_seen, - last_evicted: self.last_evicted, - } - } -} - -impl AsRef> for TxGraph { - fn as_ref(&self) -> &TxGraph { - self - } -} - -/// An iterator that traverses ancestors of a given root transaction. -/// -/// The iterator excludes partial transactions. -/// -/// Returned by the [`walk_ancestors`] method of [`TxGraph`]. -/// -/// [`walk_ancestors`]: TxGraph::walk_ancestors -pub struct TxAncestors<'g, A, F, O> -where - F: FnMut(usize, Arc) -> Option, -{ - graph: &'g TxGraph, - visited: HashSet, - queue: VecDeque<(usize, Arc)>, - filter_map: F, -} - -impl<'g, A, F, O> TxAncestors<'g, A, F, O> -where - F: FnMut(usize, Arc) -> Option, -{ - /// Creates a `TxAncestors` that includes the starting `Transaction` when iterating. - pub(crate) fn new_include_root( - graph: &'g TxGraph, - tx: impl Into>, - filter_map: F, - ) -> Self { - Self { - graph, - visited: Default::default(), - queue: [(0, tx.into())].into(), - filter_map, - } - } - - /// Creates a `TxAncestors` that excludes the starting `Transaction` when iterating. - pub(crate) fn new_exclude_root( - graph: &'g TxGraph, - tx: impl Into>, - filter_map: F, - ) -> Self { - let mut ancestors = Self { - graph, - visited: Default::default(), - queue: Default::default(), - filter_map, - }; - ancestors.populate_queue(1, tx.into()); - ancestors - } - - /// Creates a `TxAncestors` from multiple starting `Transaction`s that includes the starting - /// `Transaction`s when iterating. - #[allow(unused)] - pub(crate) fn from_multiple_include_root( - graph: &'g TxGraph, - txs: I, - filter_map: F, - ) -> Self - where - I: IntoIterator, - I::Item: Into>, - { - Self { - graph, - visited: Default::default(), - queue: txs.into_iter().map(|tx| (0, tx.into())).collect(), - filter_map, - } - } - - /// Creates a `TxAncestors` from multiple starting `Transaction`s that excludes the starting - /// `Transaction`s when iterating. - #[allow(unused)] - pub(crate) fn from_multiple_exclude_root( - graph: &'g TxGraph, - txs: I, - filter_map: F, - ) -> Self - where - I: IntoIterator, - I::Item: Into>, - { - let mut ancestors = Self { - graph, - visited: Default::default(), - queue: Default::default(), - filter_map, - }; - for tx in txs { - ancestors.populate_queue(1, tx.into()); - } - ancestors - } - - /// Traverse all ancestors that are not filtered out by the provided closure. - pub fn run_until_finished(self) { - self.for_each(|_| {}) - } - - fn populate_queue(&mut self, depth: usize, tx: Arc) { - let ancestors = tx - .input - .iter() - .map(|txin| txin.previous_output.txid) - .filter(|&prev_txid| self.visited.insert(prev_txid)) - .filter_map(|prev_txid| self.graph.get_tx(prev_txid)) - .map(|tx| (depth, tx)); - self.queue.extend(ancestors); - } -} - -impl Iterator for TxAncestors<'_, A, F, O> -where - F: FnMut(usize, Arc) -> Option, -{ - type Item = O; - - fn next(&mut self) -> Option { - loop { - // we have exhausted all paths when queue is empty - let (ancestor_depth, tx) = self.queue.pop_front()?; - // ignore paths when user filters them out - let item = match (self.filter_map)(ancestor_depth, tx.clone()) { - Some(item) => item, - None => continue, - }; - self.populate_queue(ancestor_depth + 1, tx); - return Some(item); - } - } -} - -/// An iterator that traverses transaction descendants. -/// -/// Returned by the [`walk_descendants`] method of [`TxGraph`]. -/// -/// [`walk_descendants`]: TxGraph::walk_descendants -pub struct TxDescendants<'g, A, F, O> -where - F: FnMut(usize, Txid) -> Option, -{ - graph: &'g TxGraph, - visited: HashSet, - queue: VecDeque<(usize, Txid)>, - filter_map: F, -} - -impl<'g, A, F, O> TxDescendants<'g, A, F, O> -where - F: FnMut(usize, Txid) -> Option, -{ - /// Creates a `TxDescendants` that includes the starting `txid` when iterating. - #[allow(unused)] - pub(crate) fn new_include_root(graph: &'g TxGraph, txid: Txid, filter_map: F) -> Self { - Self { - graph, - visited: Default::default(), - queue: [(0, txid)].into(), - filter_map, - } - } - - /// Creates a `TxDescendants` that excludes the starting `txid` when iterating. - pub(crate) fn new_exclude_root(graph: &'g TxGraph, txid: Txid, filter_map: F) -> Self { - let mut descendants = Self { - graph, - visited: Default::default(), - queue: Default::default(), - filter_map, - }; - descendants.populate_queue(1, txid); - descendants - } - - /// Creates a `TxDescendants` from multiple starting transactions that includes the starting - /// `txid`s when iterating. - pub(crate) fn from_multiple_include_root( - graph: &'g TxGraph, - txids: I, - filter_map: F, - ) -> Self - where - I: IntoIterator, - { - Self { - graph, - visited: Default::default(), - queue: txids.into_iter().map(|txid| (0, txid)).collect(), - filter_map, - } - } - - /// Creates a `TxDescendants` from multiple starting transactions that excludes the starting - /// `txid`s when iterating. - #[allow(unused)] - pub(crate) fn from_multiple_exclude_root( - graph: &'g TxGraph, - txids: I, - filter_map: F, - ) -> Self - where - I: IntoIterator, - { - let mut descendants = Self { - graph, - visited: Default::default(), - queue: Default::default(), - filter_map, - }; - for txid in txids { - descendants.populate_queue(1, txid); - } - descendants - } - - /// Traverse all descendants that are not filtered out by the provided closure. - pub fn run_until_finished(self) { - self.for_each(|_| {}) - } - - fn populate_queue(&mut self, depth: usize, txid: Txid) { - let spend_paths = self - .graph - .spends - .range(tx_outpoint_range(txid)) - .flat_map(|(_, spends)| spends) - .map(|&txid| (depth, txid)); - self.queue.extend(spend_paths); - } -} - -impl Iterator for TxDescendants<'_, A, F, O> -where - F: FnMut(usize, Txid) -> Option, -{ - type Item = O; - - fn next(&mut self) -> Option { - let (op_spends, txid, item) = loop { - // we have exhausted all paths when queue is empty - let (op_spends, txid) = self.queue.pop_front()?; - // we do not want to visit the same transaction twice - if self.visited.insert(txid) { - // ignore paths when user filters them out - if let Some(item) = (self.filter_map)(op_spends, txid) { - break (op_spends, txid, item); - } - } - }; - - self.populate_queue(op_spends + 1, txid); - Some(item) - } -} - -fn tx_outpoint_range(txid: Txid) -> RangeInclusive { - OutPoint::new(txid, u32::MIN)..=OutPoint::new(txid, u32::MAX) -} diff --git a/crates/chain/tests/common/mod.rs b/crates/chain/tests/common/mod.rs deleted file mode 100644 index cb3ee66f..00000000 --- a/crates/chain/tests/common/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -#![cfg(feature = "miniscript")] - -mod tx_template; -#[allow(unused_imports)] -pub use tx_template::*; diff --git a/crates/chain/tests/common/tx_template.rs b/crates/chain/tests/common/tx_template.rs deleted file mode 100644 index 0b0e2fd9..00000000 --- a/crates/chain/tests/common/tx_template.rs +++ /dev/null @@ -1,140 +0,0 @@ -#![cfg(feature = "miniscript")] - -use bdk_testenv::utils::DESCRIPTORS; -use rand::distributions::{Alphanumeric, DistString}; -use std::collections::HashMap; - -use bdk_chain::{spk_txout::SpkTxOutIndex, tx_graph::TxGraph, Anchor}; -use bitcoin::{ - locktime::absolute::LockTime, secp256k1::Secp256k1, transaction, Amount, OutPoint, ScriptBuf, - Sequence, Transaction, TxIn, TxOut, Txid, Witness, -}; -use miniscript::Descriptor; - -/// Template for creating a transaction in `TxGraph`. -/// -/// The incentive for transaction templates is to create a transaction history in a simple manner to -/// avoid having to explicitly hash previous transactions to form previous outpoints of later -/// transactions. -#[derive(Clone, Copy, Default)] -pub struct TxTemplate<'a, A> { - /// Uniquely identifies the transaction, before it can have a txid. - pub tx_name: &'a str, - pub inputs: &'a [TxInTemplate<'a>], - pub outputs: &'a [TxOutTemplate], - pub anchors: &'a [A], - pub last_seen: Option, -} - -#[allow(dead_code)] -pub enum TxInTemplate<'a> { - /// This will give a random txid and vout. - Bogus, - - /// This is used for coinbase transactions because they do not have previous outputs. - Coinbase, - - /// Contains the `tx_name` and `vout` that we are spending. The rule is that we must only spend - /// from tx of a previous `TxTemplate`. - PrevTx(&'a str, usize), -} - -pub struct TxOutTemplate { - pub value: u64, - pub spk_index: Option, // some = get spk from SpkTxOutIndex, none = random spk -} - -#[allow(unused)] -impl TxOutTemplate { - pub fn new(value: u64, spk_index: Option) -> Self { - TxOutTemplate { value, spk_index } - } -} - -#[allow(dead_code)] -pub fn init_graph<'a, A: Anchor + Clone + 'a>( - tx_templates: impl IntoIterator>, -) -> (TxGraph, SpkTxOutIndex, HashMap<&'a str, Txid>) { - let (descriptor, _) = - Descriptor::parse_descriptor(&Secp256k1::signing_only(), DESCRIPTORS[2]).unwrap(); - let mut graph = TxGraph::::default(); - let mut spk_index = SpkTxOutIndex::default(); - (0..10).for_each(|index| { - spk_index.insert_spk( - index, - descriptor - .at_derivation_index(index) - .unwrap() - .script_pubkey(), - ); - }); - let mut tx_ids = HashMap::<&'a str, Txid>::new(); - - for (bogus_txin_vout, tx_tmp) in tx_templates.into_iter().enumerate() { - let tx = Transaction { - version: transaction::Version::non_standard(0), - lock_time: LockTime::ZERO, - input: tx_tmp - .inputs - .iter() - .map(|input| match input { - TxInTemplate::Bogus => TxIn { - previous_output: OutPoint::new( - bitcoin::hashes::Hash::hash( - Alphanumeric - .sample_string(&mut rand::thread_rng(), 20) - .as_bytes(), - ), - bogus_txin_vout as u32, - ), - script_sig: ScriptBuf::new(), - sequence: Sequence::default(), - witness: Witness::new(), - }, - TxInTemplate::Coinbase => TxIn { - previous_output: OutPoint::null(), - script_sig: ScriptBuf::new(), - sequence: Sequence::MAX, - witness: Witness::new(), - }, - TxInTemplate::PrevTx(prev_name, prev_vout) => { - let prev_txid = tx_ids.get(prev_name).expect( - "txin template must spend from tx of template that comes before", - ); - TxIn { - previous_output: OutPoint::new(*prev_txid, *prev_vout as _), - script_sig: ScriptBuf::new(), - sequence: Sequence::default(), - witness: Witness::new(), - } - } - }) - .collect(), - output: tx_tmp - .outputs - .iter() - .map(|output| match &output.spk_index { - None => TxOut { - value: Amount::from_sat(output.value), - script_pubkey: ScriptBuf::new(), - }, - Some(index) => TxOut { - value: Amount::from_sat(output.value), - script_pubkey: spk_index.spk_at_index(index).unwrap(), - }, - }) - .collect(), - }; - - tx_ids.insert(tx_tmp.tx_name, tx.compute_txid()); - spk_index.scan(&tx); - let _ = graph.insert_tx(tx.clone()); - for anchor in tx_tmp.anchors.iter() { - let _ = graph.insert_anchor(tx.compute_txid(), anchor.clone()); - } - if let Some(last_seen) = tx_tmp.last_seen { - let _ = graph.insert_seen_at(tx.compute_txid(), last_seen); - } - } - (graph, spk_index, tx_ids) -} diff --git a/crates/chain/tests/test_indexed_tx_graph.rs b/crates/chain/tests/test_indexed_tx_graph.rs deleted file mode 100644 index 1e28eb6a..00000000 --- a/crates/chain/tests/test_indexed_tx_graph.rs +++ /dev/null @@ -1,675 +0,0 @@ -#![cfg(feature = "miniscript")] - -#[macro_use] -mod common; - -use std::{collections::BTreeSet, sync::Arc}; - -use bdk_chain::{ - indexed_tx_graph::{self, IndexedTxGraph}, - indexer::keychain_txout::KeychainTxOutIndex, - local_chain::LocalChain, - tx_graph, Balance, ChainPosition, ConfirmationBlockTime, DescriptorExt, -}; -use bdk_testenv::{ - block_id, hash, - utils::{new_tx, DESCRIPTORS}, -}; -use bitcoin::{secp256k1::Secp256k1, Amount, OutPoint, ScriptBuf, Transaction, TxIn, TxOut}; -use miniscript::Descriptor; - -/// Ensure [`IndexedTxGraph::insert_relevant_txs`] can successfully index transactions NOT presented -/// in topological order. -/// -/// Given 3 transactions (A, B, C), where A has 2 owned outputs. B and C spends an output each of A. -/// Typically, we would only know whether B and C are relevant if we have indexed A (A's outpoints -/// are associated with owned spks in the index). Ensure insertion and indexing is topological- -/// agnostic. -#[test] -fn insert_relevant_txs() { - use bdk_chain::indexer::keychain_txout; - let (descriptor, _) = Descriptor::parse_descriptor(&Secp256k1::signing_only(), DESCRIPTORS[0]) - .expect("must be valid"); - let spk_0 = descriptor.at_derivation_index(0).unwrap().script_pubkey(); - let spk_1 = descriptor.at_derivation_index(9).unwrap().script_pubkey(); - - let mut graph = IndexedTxGraph::>::new( - KeychainTxOutIndex::new(10), - ); - let _ = graph - .index - .insert_descriptor((), descriptor.clone()) - .unwrap(); - - let tx_a = Transaction { - output: vec![ - TxOut { - value: Amount::from_sat(10_000), - script_pubkey: spk_0, - }, - TxOut { - value: Amount::from_sat(20_000), - script_pubkey: spk_1, - }, - ], - ..new_tx(0) - }; - - let tx_b = Transaction { - input: vec![TxIn { - previous_output: OutPoint::new(tx_a.compute_txid(), 0), - ..Default::default() - }], - ..new_tx(1) - }; - - let tx_c = Transaction { - input: vec![TxIn { - previous_output: OutPoint::new(tx_a.compute_txid(), 1), - ..Default::default() - }], - ..new_tx(2) - }; - - let txs = [tx_c, tx_b, tx_a]; - - let changeset = indexed_tx_graph::ChangeSet { - tx_graph: tx_graph::ChangeSet { - txs: txs.iter().cloned().map(Arc::new).collect(), - ..Default::default() - }, - indexer: keychain_txout::ChangeSet { - last_revealed: [(descriptor.descriptor_id(), 9_u32)].into(), - }, - }; - - assert_eq!( - graph.batch_insert_relevant(txs.iter().cloned().map(|tx| (tx, None))), - changeset, - ); - - // The initial changeset will also contain info about the keychain we added - let initial_changeset = indexed_tx_graph::ChangeSet { - tx_graph: changeset.tx_graph, - indexer: keychain_txout::ChangeSet { - last_revealed: changeset.indexer.last_revealed, - }, - }; - - assert_eq!(graph.initial_changeset(), initial_changeset); -} - -/// Ensure consistency IndexedTxGraph list_* and balance methods. These methods lists -/// relevant txouts and utxos from the information fetched from a ChainOracle (here a LocalChain). -/// -/// Test Setup: -/// -/// Local Chain => <0> ----- <1> ----- <2> ----- <3> ---- ... ---- <150> -/// -/// Keychains: -/// -/// keychain_1: Trusted -/// keychain_2: Untrusted -/// -/// Transactions: -/// -/// tx1: A Coinbase, sending 70000 sats to "trusted" address. [Block 0] -/// tx2: A external Receive, sending 30000 sats to "untrusted" address. [Block 1] -/// tx3: Internal Spend. Spends tx2 and returns change of 10000 to "trusted" address. [Block 2] -/// tx4: Mempool tx, sending 20000 sats to "untrusted" address. -/// tx5: Mempool tx, sending 15000 sats to "trusted" address. -/// tx6: Complete unrelated tx. [Block 3] -/// -/// Different transactions are added via `insert_relevant_txs`. -/// `list_owned_txout`, `list_owned_utxos` and `balance` method is asserted -/// with expected values at Block height 0, 1, and 2. -/// -/// Finally Add more blocks to local chain until tx1 coinbase maturity hits. -/// Assert maturity at coinbase maturity inflection height. Block height 98 and 99. -#[test] -fn test_list_owned_txouts() { - // Create Local chains - let local_chain = - LocalChain::from_blocks((0..150).map(|i| (i as u32, hash!("random"))).collect()) - .expect("must have genesis hash"); - - // Initiate IndexedTxGraph - - let (desc_1, _) = - Descriptor::parse_descriptor(&Secp256k1::signing_only(), DESCRIPTORS[2]).unwrap(); - let (desc_2, _) = - Descriptor::parse_descriptor(&Secp256k1::signing_only(), DESCRIPTORS[3]).unwrap(); - - let mut graph = IndexedTxGraph::>::new( - KeychainTxOutIndex::new(10), - ); - - assert!(graph - .index - .insert_descriptor("keychain_1".into(), desc_1) - .unwrap()); - assert!(graph - .index - .insert_descriptor("keychain_2".into(), desc_2) - .unwrap()); - - // Get trusted and untrusted addresses - - let mut trusted_spks: Vec = Vec::new(); - let mut untrusted_spks: Vec = Vec::new(); - - { - // we need to scope here to take immutable reference of the graph - for _ in 0..10 { - let ((_, script), _) = graph - .index - .reveal_next_spk("keychain_1".to_string()) - .unwrap(); - // TODO Assert indexes - trusted_spks.push(script.to_owned()); - } - } - { - for _ in 0..10 { - let ((_, script), _) = graph - .index - .reveal_next_spk("keychain_2".to_string()) - .unwrap(); - untrusted_spks.push(script.to_owned()); - } - } - - // Create test transactions - - // tx1 is the genesis coinbase - let tx1 = Transaction { - input: vec![TxIn { - previous_output: OutPoint::null(), - ..Default::default() - }], - output: vec![TxOut { - value: Amount::from_sat(70000), - script_pubkey: trusted_spks[0].to_owned(), - }], - ..new_tx(1) - }; - - // tx2 is an incoming transaction received at untrusted keychain at block 1. - let tx2 = Transaction { - output: vec![TxOut { - value: Amount::from_sat(30000), - script_pubkey: untrusted_spks[0].to_owned(), - }], - ..new_tx(2) - }; - - // tx3 spends tx2 and gives a change back in trusted keychain. Confirmed at Block 2. - let tx3 = Transaction { - input: vec![TxIn { - previous_output: OutPoint::new(tx2.compute_txid(), 0), - ..Default::default() - }], - output: vec![TxOut { - value: Amount::from_sat(10000), - script_pubkey: trusted_spks[1].to_owned(), - }], - ..new_tx(3) - }; - - // tx4 is an external transaction receiving at untrusted keychain, unconfirmed. - let tx4 = Transaction { - output: vec![TxOut { - value: Amount::from_sat(20000), - script_pubkey: untrusted_spks[1].to_owned(), - }], - ..new_tx(4) - }; - - // tx5 is an external transaction receiving at trusted keychain, unconfirmed. - let tx5 = Transaction { - output: vec![TxOut { - value: Amount::from_sat(15000), - script_pubkey: trusted_spks[2].to_owned(), - }], - ..new_tx(5) - }; - - // tx6 is an unrelated transaction confirmed at 3. - // This won't be inserted because it is not relevant. - let tx6 = new_tx(6); - - // Insert transactions into graph with respective anchors - // Insert unconfirmed txs with a last_seen timestamp - - let _ = - graph.batch_insert_relevant([&tx1, &tx2, &tx3, &tx6].iter().enumerate().map(|(i, &tx)| { - let height = i as u32; - ( - tx.clone(), - local_chain - .get(height) - .map(|cp| cp.block_id()) - .map(|block_id| ConfirmationBlockTime { - block_id, - confirmation_time: 100, - }), - ) - })); - - let _ = - graph.batch_insert_relevant_unconfirmed([&tx4, &tx5].iter().map(|&tx| (tx.clone(), 100))); - - // A helper lambda to extract and filter data from the graph. - let fetch = - |height: u32, graph: &IndexedTxGraph>| { - let chain_tip = local_chain - .get(height) - .map(|cp| cp.block_id()) - .unwrap_or_else(|| panic!("block must exist at {}", height)); - let txouts = graph - .graph() - .filter_chain_txouts( - &local_chain, - chain_tip, - graph.index.outpoints().iter().cloned(), - ) - .collect::>(); - - let utxos = graph - .graph() - .filter_chain_unspents( - &local_chain, - chain_tip, - graph.index.outpoints().iter().cloned(), - ) - .collect::>(); - - let balance = graph.graph().balance( - &local_chain, - chain_tip, - graph.index.outpoints().iter().cloned(), - |_, spk: ScriptBuf| trusted_spks.contains(&spk), - ); - - let confirmed_txouts_txid = txouts - .iter() - .filter_map(|(_, full_txout)| { - if full_txout.chain_position.is_confirmed() { - Some(full_txout.outpoint.txid) - } else { - None - } - }) - .collect::>(); - - let unconfirmed_txouts_txid = txouts - .iter() - .filter_map(|(_, full_txout)| { - if !full_txout.chain_position.is_confirmed() { - Some(full_txout.outpoint.txid) - } else { - None - } - }) - .collect::>(); - - let confirmed_utxos_txid = utxos - .iter() - .filter_map(|(_, full_txout)| { - if full_txout.chain_position.is_confirmed() { - Some(full_txout.outpoint.txid) - } else { - None - } - }) - .collect::>(); - - let unconfirmed_utxos_txid = utxos - .iter() - .filter_map(|(_, full_txout)| { - if !full_txout.chain_position.is_confirmed() { - Some(full_txout.outpoint.txid) - } else { - None - } - }) - .collect::>(); - - ( - confirmed_txouts_txid, - unconfirmed_txouts_txid, - confirmed_utxos_txid, - unconfirmed_utxos_txid, - balance, - ) - }; - - // ----- TEST BLOCK ----- - - // AT Block 0 - { - let ( - confirmed_txouts_txid, - unconfirmed_txouts_txid, - confirmed_utxos_txid, - unconfirmed_utxos_txid, - balance, - ) = fetch(0, &graph); - - // tx1 is a confirmed txout and is unspent - // tx4, tx5 are unconfirmed - assert_eq!(confirmed_txouts_txid, [tx1.compute_txid()].into()); - assert_eq!( - unconfirmed_txouts_txid, - [ - tx2.compute_txid(), - tx3.compute_txid(), - tx4.compute_txid(), - tx5.compute_txid() - ] - .into() - ); - - assert_eq!(confirmed_utxos_txid, [tx1.compute_txid()].into()); - assert_eq!( - unconfirmed_utxos_txid, - [tx3.compute_txid(), tx4.compute_txid(), tx5.compute_txid()].into() - ); - - assert_eq!( - balance, - Balance { - immature: Amount::from_sat(70000), // immature coinbase - trusted_pending: Amount::from_sat(25000), // tx3, tx5 - untrusted_pending: Amount::from_sat(20000), // tx4 - confirmed: Amount::ZERO // Nothing is confirmed yet - } - ); - } - - // AT Block 1 - { - let ( - confirmed_txouts_txid, - unconfirmed_txouts_txid, - confirmed_utxos_txid, - unconfirmed_utxos_txid, - balance, - ) = fetch(1, &graph); - - // tx2 gets into confirmed txout set - assert_eq!( - confirmed_txouts_txid, - [tx1.compute_txid(), tx2.compute_txid()].into() - ); - assert_eq!( - unconfirmed_txouts_txid, - [tx3.compute_txid(), tx4.compute_txid(), tx5.compute_txid()].into() - ); - - // tx2 gets into confirmed utxos set - assert_eq!(confirmed_utxos_txid, [tx1.compute_txid()].into()); - assert_eq!( - unconfirmed_utxos_txid, - [tx3.compute_txid(), tx4.compute_txid(), tx5.compute_txid()].into() - ); - - assert_eq!( - balance, - Balance { - immature: Amount::from_sat(70000), // immature coinbase - trusted_pending: Amount::from_sat(25000), // tx3, tx5 - untrusted_pending: Amount::from_sat(20000), // tx4 - confirmed: Amount::from_sat(0) // tx2 got confirmed (but spent by 3) - } - ); - } - - // AT Block 2 - { - let ( - confirmed_txouts_txid, - unconfirmed_txouts_txid, - confirmed_utxos_txid, - unconfirmed_utxos_txid, - balance, - ) = fetch(2, &graph); - - // tx3 now gets into the confirmed txout set - assert_eq!( - confirmed_txouts_txid, - [tx1.compute_txid(), tx2.compute_txid(), tx3.compute_txid()].into() - ); - assert_eq!( - unconfirmed_txouts_txid, - [tx4.compute_txid(), tx5.compute_txid()].into() - ); - - // tx3 also gets into confirmed utxo set - assert_eq!( - confirmed_utxos_txid, - [tx1.compute_txid(), tx3.compute_txid()].into() - ); - assert_eq!( - unconfirmed_utxos_txid, - [tx4.compute_txid(), tx5.compute_txid()].into() - ); - - assert_eq!( - balance, - Balance { - immature: Amount::from_sat(70000), // immature coinbase - trusted_pending: Amount::from_sat(15000), // tx5 - untrusted_pending: Amount::from_sat(20000), // tx4 - confirmed: Amount::from_sat(10000) // tx3 got confirmed - } - ); - } - - // AT Block 98 - { - let ( - confirmed_txouts_txid, - unconfirmed_txouts_txid, - confirmed_utxos_txid, - unconfirmed_utxos_txid, - balance, - ) = fetch(98, &graph); - - // no change compared to block 2 - assert_eq!( - confirmed_txouts_txid, - [tx1.compute_txid(), tx2.compute_txid(), tx3.compute_txid()].into() - ); - assert_eq!( - unconfirmed_txouts_txid, - [tx4.compute_txid(), tx5.compute_txid()].into() - ); - - assert_eq!( - confirmed_utxos_txid, - [tx1.compute_txid(), tx3.compute_txid()].into() - ); - assert_eq!( - unconfirmed_utxos_txid, - [tx4.compute_txid(), tx5.compute_txid()].into() - ); - - // Coinbase is still immature - assert_eq!( - balance, - Balance { - immature: Amount::from_sat(70000), // immature coinbase - trusted_pending: Amount::from_sat(15000), // tx5 - untrusted_pending: Amount::from_sat(20000), // tx4 - confirmed: Amount::from_sat(10000) // tx3 is confirmed - } - ); - } - - // AT Block 99 - { - let (_, _, _, _, balance) = fetch(99, &graph); - - // Coinbase maturity hits - assert_eq!( - balance, - Balance { - immature: Amount::ZERO, // coinbase matured - trusted_pending: Amount::from_sat(15000), // tx5 - untrusted_pending: Amount::from_sat(20000), // tx4 - confirmed: Amount::from_sat(80000) // tx1 + tx3 - } - ); - } -} - -/// Given a `LocalChain`, `IndexedTxGraph`, and a `Transaction`, when we insert some anchor -/// (possibly non-canonical) and/or a last-seen timestamp into the graph, we check the canonical -/// position of the tx: -/// -/// - tx with no anchors or last_seen has no `ChainPosition` -/// - tx with any last_seen will be `Unconfirmed` -/// - tx with an anchor in best chain will be `Confirmed` -/// - tx with an anchor not in best chain (no last_seen) has no `ChainPosition` -#[test] -fn test_get_chain_position() { - use bdk_chain::local_chain::CheckPoint; - use bdk_chain::spk_txout::SpkTxOutIndex; - use bdk_chain::BlockId; - - #[derive(Debug)] - struct TestCase { - name: &'static str, - tx: Transaction, - anchor: Option, - last_seen: Option, - exp_pos: Option>, - } - - // addr: bcrt1qc6fweuf4xjvz4x3gx3t9e0fh4hvqyu2qw4wvxm - let spk = ScriptBuf::from_hex("0014c692ecf13534982a9a2834565cbd37add8027140").unwrap(); - let mut graph = IndexedTxGraph::new({ - let mut index = SpkTxOutIndex::default(); - let _ = index.insert_spk(0u32, spk.clone()); - index - }); - - // Anchors to test - let blocks = vec![block_id!(0, "g"), block_id!(1, "A"), block_id!(2, "B")]; - - let cp = CheckPoint::from_block_ids(blocks.clone()).unwrap(); - let chain = LocalChain::from_tip(cp).unwrap(); - - // The test will insert a transaction into the indexed tx graph along with any anchors and - // timestamps, then check the tx's canonical position is expected. - fn run( - chain: &LocalChain, - graph: &mut IndexedTxGraph>, - test: TestCase, - ) { - let TestCase { - name, - tx, - anchor, - last_seen, - exp_pos, - } = test; - - // add data to graph - let txid = tx.compute_txid(); - let _ = graph.insert_tx(tx); - if let Some(anchor) = anchor { - let _ = graph.insert_anchor(txid, anchor); - } - if let Some(seen_at) = last_seen { - let _ = graph.insert_seen_at(txid, seen_at); - } - - // check chain position - let chain_pos = graph - .graph() - .list_canonical_txs(chain, chain.tip().block_id()) - .find_map(|canon_tx| { - if canon_tx.tx_node.txid == txid { - Some(canon_tx.chain_position) - } else { - None - } - }); - assert_eq!(chain_pos, exp_pos, "failed test case: {name}"); - } - - [ - TestCase { - name: "tx no anchors or last_seen - no chain pos", - tx: Transaction { - output: vec![TxOut { - value: Amount::ONE_BTC, - script_pubkey: spk.clone(), - }], - ..new_tx(0) - }, - anchor: None, - last_seen: None, - exp_pos: None, - }, - TestCase { - name: "tx last_seen - unconfirmed", - tx: Transaction { - output: vec![TxOut { - value: Amount::ONE_BTC, - script_pubkey: spk.clone(), - }], - ..new_tx(1) - }, - anchor: None, - last_seen: Some(2), - exp_pos: Some(ChainPosition::Unconfirmed { last_seen: Some(2) }), - }, - TestCase { - name: "tx anchor in best chain - confirmed", - tx: Transaction { - output: vec![TxOut { - value: Amount::ONE_BTC, - script_pubkey: spk.clone(), - }], - ..new_tx(2) - }, - anchor: Some(blocks[1]), - last_seen: None, - exp_pos: Some(ChainPosition::Confirmed { - anchor: blocks[1], - transitively: None, - }), - }, - TestCase { - name: "tx unknown anchor with last_seen - unconfirmed", - tx: Transaction { - output: vec![TxOut { - value: Amount::ONE_BTC, - script_pubkey: spk.clone(), - }], - ..new_tx(3) - }, - anchor: Some(block_id!(2, "B'")), - last_seen: Some(2), - exp_pos: Some(ChainPosition::Unconfirmed { last_seen: Some(2) }), - }, - TestCase { - name: "tx unknown anchor - unconfirmed", - tx: Transaction { - output: vec![TxOut { - value: Amount::ONE_BTC, - script_pubkey: spk.clone(), - }], - ..new_tx(4) - }, - anchor: Some(block_id!(2, "B'")), - last_seen: None, - exp_pos: Some(ChainPosition::Unconfirmed { last_seen: None }), - }, - ] - .into_iter() - .for_each(|t| run(&chain, &mut graph, t)); -} diff --git a/crates/chain/tests/test_keychain_txout_index.rs b/crates/chain/tests/test_keychain_txout_index.rs deleted file mode 100644 index 8b299b89..00000000 --- a/crates/chain/tests/test_keychain_txout_index.rs +++ /dev/null @@ -1,711 +0,0 @@ -#![cfg(feature = "miniscript")] - -use bdk_chain::{ - collections::BTreeMap, - indexer::keychain_txout::{ChangeSet, KeychainTxOutIndex}, - DescriptorExt, DescriptorId, Indexer, Merge, -}; -use bdk_testenv::{ - hash, - utils::{new_tx, DESCRIPTORS}, -}; -use bitcoin::{secp256k1::Secp256k1, Amount, OutPoint, ScriptBuf, Transaction, TxOut}; -use miniscript::{Descriptor, DescriptorPublicKey}; - -#[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd)] -enum TestKeychain { - External, - Internal, -} - -fn parse_descriptor(descriptor: &str) -> Descriptor { - let secp = bdk_chain::bitcoin::secp256k1::Secp256k1::signing_only(); - Descriptor::::parse_descriptor(&secp, descriptor) - .unwrap() - .0 -} - -fn init_txout_index( - external_descriptor: Descriptor, - internal_descriptor: Descriptor, - lookahead: u32, -) -> KeychainTxOutIndex { - let mut txout_index = KeychainTxOutIndex::::new(lookahead); - - let _ = txout_index - .insert_descriptor(TestKeychain::External, external_descriptor) - .unwrap(); - let _ = txout_index - .insert_descriptor(TestKeychain::Internal, internal_descriptor) - .unwrap(); - - txout_index -} - -fn spk_at_index(descriptor: &Descriptor, index: u32) -> ScriptBuf { - descriptor - .derived_descriptor(&Secp256k1::verification_only(), index) - .expect("must derive") - .script_pubkey() -} - -// We create two empty changesets lhs and rhs, we then insert various descriptors with various -// last_revealed, merge rhs to lhs, and check that the result is consistent with these rules: -// - Existing index doesn't update if the new index in `other` is lower than `self`. -// - Existing index updates if the new index in `other` is higher than `self`. -// - Existing index is unchanged if keychain doesn't exist in `other`. -// - New keychain gets added if the keychain is in `other` but not in `self`. -#[test] -fn merge_changesets_check_last_revealed() { - let secp = bitcoin::secp256k1::Secp256k1::signing_only(); - let descriptor_ids: Vec<_> = DESCRIPTORS - .iter() - .take(4) - .map(|d| { - Descriptor::::parse_descriptor(&secp, d) - .unwrap() - .0 - .descriptor_id() - }) - .collect(); - - let mut lhs_di = BTreeMap::::default(); - let mut rhs_di = BTreeMap::::default(); - lhs_di.insert(descriptor_ids[0], 7); - lhs_di.insert(descriptor_ids[1], 0); - lhs_di.insert(descriptor_ids[2], 3); - - rhs_di.insert(descriptor_ids[0], 3); // value less than lhs desc 0 - rhs_di.insert(descriptor_ids[1], 5); // value more than lhs desc 1 - lhs_di.insert(descriptor_ids[3], 4); // key doesn't exist in lhs - - let mut lhs = ChangeSet { - last_revealed: lhs_di, - }; - let rhs = ChangeSet { - last_revealed: rhs_di, - }; - lhs.merge(rhs); - - // Existing index doesn't update if the new index in `other` is lower than `self`. - assert_eq!(lhs.last_revealed.get(&descriptor_ids[0]), Some(&7)); - // Existing index updates if the new index in `other` is higher than `self`. - assert_eq!(lhs.last_revealed.get(&descriptor_ids[1]), Some(&5)); - // Existing index is unchanged if keychain doesn't exist in `other`. - assert_eq!(lhs.last_revealed.get(&descriptor_ids[2]), Some(&3)); - // New keychain gets added if the keychain is in `other` but not in `self`. - assert_eq!(lhs.last_revealed.get(&descriptor_ids[3]), Some(&4)); -} - -#[test] -fn test_set_all_derivation_indices() { - let external_descriptor = parse_descriptor(DESCRIPTORS[0]); - let internal_descriptor = parse_descriptor(DESCRIPTORS[1]); - let mut txout_index = - init_txout_index(external_descriptor.clone(), internal_descriptor.clone(), 0); - let derive_to: BTreeMap<_, _> = - [(TestKeychain::External, 12), (TestKeychain::Internal, 24)].into(); - let last_revealed: BTreeMap<_, _> = [ - (external_descriptor.descriptor_id(), 12), - (internal_descriptor.descriptor_id(), 24), - ] - .into(); - assert_eq!( - txout_index.reveal_to_target_multi(&derive_to), - ChangeSet { - last_revealed: last_revealed.clone() - } - ); - assert_eq!(txout_index.last_revealed_indices(), derive_to); - assert_eq!( - txout_index.reveal_to_target_multi(&derive_to), - ChangeSet::default(), - "no changes if we set to the same thing" - ); - assert_eq!(txout_index.initial_changeset().last_revealed, last_revealed); -} - -#[test] -fn test_lookahead() { - let external_descriptor = parse_descriptor(DESCRIPTORS[0]); - let internal_descriptor = parse_descriptor(DESCRIPTORS[1]); - let lookahead = 10; - let mut txout_index = init_txout_index( - external_descriptor.clone(), - internal_descriptor.clone(), - lookahead, - ); - - // given: - // - external lookahead set to 10 - // when: - // - set external derivation index to value higher than last, but within the lookahead value - // expect: - // - scripts cached in spk_txout_index should increase correctly - // - stored scripts of external keychain should be of expected counts - for index in 0..20 { - let (revealed_spks, revealed_changeset) = txout_index - .reveal_to_target(TestKeychain::External, index) - .unwrap(); - assert_eq!( - revealed_spks, - vec![(index, spk_at_index(&external_descriptor, index))], - ); - assert_eq!( - &revealed_changeset.last_revealed, - &[(external_descriptor.descriptor_id(), index)].into() - ); - - // test stored spks are expected - let exp_last_store_index = index + lookahead; - for i in index + 1..=exp_last_store_index { - assert_eq!( - txout_index.spk_at_index(TestKeychain::External, i), - Some(spk_at_index(&external_descriptor, i)) - ); - } - assert!(txout_index - .spk_at_index(TestKeychain::External, exp_last_store_index + 1) - .is_none()); - - // internal should only have lookahead - for i in 0..lookahead { - assert_eq!( - txout_index.spk_at_index(TestKeychain::Internal, i), - Some(spk_at_index(&internal_descriptor, i)) - ); - } - assert!(txout_index - .spk_at_index(TestKeychain::Internal, lookahead) - .is_none()); - - assert_eq!( - txout_index - .revealed_keychain_spks(TestKeychain::External) - .count(), - index as usize + 1, - ); - assert_eq!( - txout_index - .revealed_keychain_spks(TestKeychain::Internal) - .count(), - 0, - ); - assert_eq!( - txout_index - .unused_keychain_spks(TestKeychain::External) - .count(), - index as usize + 1, - ); - assert_eq!( - txout_index - .unused_keychain_spks(TestKeychain::Internal) - .count(), - 0, - ); - } - - // given: - // - internal lookahead is 10 - // - internal derivation index is `None` - // when: - // - derivation index is set ahead of current derivation index + lookahead - // expect: - // - scripts cached in spk_txout_index should increase correctly, a.k.a. no scripts are skipped - let reveal_to = 24; - let (revealed_spks, revealed_changeset) = txout_index - .reveal_to_target(TestKeychain::Internal, reveal_to) - .unwrap(); - assert_eq!( - revealed_spks, - (0..=reveal_to) - .map(|index| (index, spk_at_index(&internal_descriptor, index))) - .collect::>(), - ); - assert_eq!( - &revealed_changeset.last_revealed, - &[(internal_descriptor.descriptor_id(), reveal_to)].into() - ); - - // test stored spks are expected - let exp_last_store_index = reveal_to + lookahead; - for index in reveal_to + 1..=exp_last_store_index { - assert_eq!( - txout_index.spk_at_index(TestKeychain::Internal, index), - Some(spk_at_index(&internal_descriptor, index)) - ); - } - assert!(txout_index - .spk_at_index(TestKeychain::Internal, exp_last_store_index + 1) - .is_none()); - - assert_eq!( - txout_index - .revealed_keychain_spks(TestKeychain::Internal) - .count(), - 25, - ); - - // ensure derivation indices are expected for each keychain - let last_external_index = txout_index - .last_revealed_index(TestKeychain::External) - .expect("already derived"); - let last_internal_index = txout_index - .last_revealed_index(TestKeychain::Internal) - .expect("already derived"); - assert_eq!(last_external_index, 19); - assert_eq!(last_internal_index, 24); - - // when: - // - scanning txouts with spks within stored indexes - // expect: - // - no changes to stored index counts - let external_iter = 0..=last_external_index; - let internal_iter = last_internal_index - last_external_index..=last_internal_index; - for (external_index, internal_index) in external_iter.zip(internal_iter) { - let tx = Transaction { - output: vec![ - TxOut { - script_pubkey: external_descriptor - .at_derivation_index(external_index) - .unwrap() - .script_pubkey(), - value: Amount::from_sat(10_000), - }, - TxOut { - script_pubkey: internal_descriptor - .at_derivation_index(internal_index) - .unwrap() - .script_pubkey(), - value: Amount::from_sat(10_000), - }, - ], - ..new_tx(external_index) - }; - assert_eq!(txout_index.index_tx(&tx), ChangeSet::default()); - assert_eq!( - txout_index.last_revealed_index(TestKeychain::External), - Some(last_external_index) - ); - assert_eq!( - txout_index.last_revealed_index(TestKeychain::Internal), - Some(last_internal_index) - ); - assert_eq!( - txout_index - .revealed_keychain_spks(TestKeychain::External) - .count(), - last_external_index as usize + 1, - ); - assert_eq!( - txout_index - .revealed_keychain_spks(TestKeychain::Internal) - .count(), - last_internal_index as usize + 1, - ); - } -} - -// when: -// - scanning txouts with spks above last stored index -// expect: -// - last revealed index should increase as expected -// - last used index should change as expected -#[test] -fn test_scan_with_lookahead() { - let external_descriptor = parse_descriptor(DESCRIPTORS[0]); - let internal_descriptor = parse_descriptor(DESCRIPTORS[1]); - let mut txout_index = - init_txout_index(external_descriptor.clone(), internal_descriptor.clone(), 10); - - let spks: BTreeMap = [0, 10, 20, 30] - .into_iter() - .map(|i| { - ( - i, - external_descriptor - .at_derivation_index(i) - .unwrap() - .script_pubkey(), - ) - }) - .collect(); - - for (&spk_i, spk) in &spks { - let op = OutPoint::new(hash!("fake tx"), spk_i); - let txout = TxOut { - script_pubkey: spk.clone(), - value: Amount::ZERO, - }; - - let changeset = txout_index.index_txout(op, &txout); - assert_eq!( - &changeset.last_revealed, - &[(external_descriptor.descriptor_id(), spk_i)].into() - ); - assert_eq!( - txout_index.last_revealed_index(TestKeychain::External), - Some(spk_i) - ); - assert_eq!( - txout_index.last_used_index(TestKeychain::External), - Some(spk_i) - ); - } - - // now try with index 41 (lookahead surpassed), we expect that the txout to not be indexed - let spk_41 = external_descriptor - .at_derivation_index(41) - .unwrap() - .script_pubkey(); - let op = OutPoint::new(hash!("fake tx"), 41); - let txout = TxOut { - script_pubkey: spk_41, - value: Amount::ZERO, - }; - let changeset = txout_index.index_txout(op, &txout); - assert!(changeset.is_empty()); -} - -#[test] -#[rustfmt::skip] -fn test_wildcard_derivations() { - let external_descriptor = parse_descriptor(DESCRIPTORS[0]); - let internal_descriptor = parse_descriptor(DESCRIPTORS[1]); - let mut txout_index = init_txout_index(external_descriptor.clone(), internal_descriptor.clone(), 0); - let external_spk_0 = external_descriptor.at_derivation_index(0).unwrap().script_pubkey(); - let external_spk_16 = external_descriptor.at_derivation_index(16).unwrap().script_pubkey(); - let external_spk_26 = external_descriptor.at_derivation_index(26).unwrap().script_pubkey(); - let external_spk_27 = external_descriptor.at_derivation_index(27).unwrap().script_pubkey(); - - // - nothing is derived - // - unused list is also empty - // - // - next_derivation_index() == (0, true) - // - derive_new() == ((0, ), keychain::ChangeSet) - // - next_unused() == ((0, ), keychain::ChangeSet:is_empty()) - assert_eq!(txout_index.next_index(TestKeychain::External).unwrap(), (0, true)); - let (spk, changeset) = txout_index.reveal_next_spk(TestKeychain::External).unwrap(); - assert_eq!(spk, (0_u32, external_spk_0.clone())); - assert_eq!(&changeset.last_revealed, &[(external_descriptor.descriptor_id(), 0)].into()); - let (spk, changeset) = txout_index.next_unused_spk(TestKeychain::External).unwrap(); - assert_eq!(spk, (0_u32, external_spk_0.clone())); - assert_eq!(&changeset.last_revealed, &[].into()); - - // - derived till 25 - // - used all spks till 15. - // - used list : [0..=15, 17, 20, 23] - // - unused list: [16, 18, 19, 21, 22, 24, 25] - - // - next_derivation_index() = (26, true) - // - derive_new() = ((26, ), keychain::ChangeSet) - // - next_unused() == ((16, ), keychain::ChangeSet::is_empty()) - let _ = txout_index.reveal_to_target(TestKeychain::External, 25); - - (0..=15) - .chain([17, 20, 23]) - .for_each(|index| assert!(txout_index.mark_used(TestKeychain::External, index))); - - assert_eq!(txout_index.next_index(TestKeychain::External).unwrap(), (26, true)); - - let (spk, changeset) = txout_index.reveal_next_spk(TestKeychain::External).unwrap(); - assert_eq!(spk, (26, external_spk_26)); - - assert_eq!(&changeset.last_revealed, &[(external_descriptor.descriptor_id(), 26)].into()); - - let (spk, changeset) = txout_index.next_unused_spk(TestKeychain::External).unwrap(); - assert_eq!(spk, (16, external_spk_16)); - assert_eq!(&changeset.last_revealed, &[].into()); - - // - Use all the derived till 26. - // - next_unused() = ((27, ), keychain::ChangeSet) - (0..=26).for_each(|index| { - txout_index.mark_used(TestKeychain::External, index); - }); - - let (spk, changeset) = txout_index.next_unused_spk(TestKeychain::External).unwrap(); - assert_eq!(spk, (27, external_spk_27)); - assert_eq!(&changeset.last_revealed, &[(external_descriptor.descriptor_id(), 27)].into()); -} - -#[test] -fn test_non_wildcard_derivations() { - let mut txout_index = KeychainTxOutIndex::::new(0); - - let secp = bitcoin::secp256k1::Secp256k1::signing_only(); - let (no_wildcard_descriptor, _) = - Descriptor::::parse_descriptor(&secp, DESCRIPTORS[6]).unwrap(); - let external_spk = no_wildcard_descriptor - .at_derivation_index(0) - .unwrap() - .script_pubkey(); - - let _ = txout_index - .insert_descriptor(TestKeychain::External, no_wildcard_descriptor.clone()) - .unwrap(); - - // given: - // - `txout_index` with no stored scripts - // expect: - // - next derivation index should be new - // - when we derive a new script, script @ index 0 - // - when we get the next unused script, script @ index 0 - assert_eq!( - txout_index.next_index(TestKeychain::External).unwrap(), - (0, true) - ); - let (spk, changeset) = txout_index.reveal_next_spk(TestKeychain::External).unwrap(); - assert_eq!(spk, (0, external_spk.clone())); - assert_eq!( - &changeset.last_revealed, - &[(no_wildcard_descriptor.descriptor_id(), 0)].into() - ); - - let (spk, changeset) = txout_index.next_unused_spk(TestKeychain::External).unwrap(); - assert_eq!(spk, (0, external_spk.clone())); - assert_eq!(&changeset.last_revealed, &[].into()); - - // given: - // - the non-wildcard descriptor already has a stored and used script - // expect: - // - next derivation index should not be new - // - derive new and next unused should return the old script - // - store_up_to should not panic and return empty changeset - assert_eq!( - txout_index.next_index(TestKeychain::External).unwrap(), - (0, false) - ); - txout_index.mark_used(TestKeychain::External, 0); - - let (spk, changeset) = txout_index.reveal_next_spk(TestKeychain::External).unwrap(); - assert_eq!(spk, (0, external_spk.clone())); - assert_eq!(&changeset.last_revealed, &[].into()); - - let (spk, changeset) = txout_index.next_unused_spk(TestKeychain::External).unwrap(); - assert_eq!(spk, (0, external_spk.clone())); - assert_eq!(&changeset.last_revealed, &[].into()); - let (revealed_spks, revealed_changeset) = txout_index - .reveal_to_target(TestKeychain::External, 200) - .unwrap(); - assert_eq!(revealed_spks.len(), 0); - assert!(revealed_changeset.is_empty()); - - // we check that spks_of_keychain returns a SpkIterator with just one element - assert_eq!( - txout_index - .revealed_keychain_spks(TestKeychain::External) - .count(), - 1, - ); -} - -/// Check that calling `lookahead_to_target` stores the expected spks. -#[test] -fn lookahead_to_target() { - #[derive(Default)] - struct TestCase { - /// Global lookahead value. - lookahead: u32, - /// Last revealed index for external keychain. - external_last_revealed: Option, - /// Last revealed index for internal keychain. - internal_last_revealed: Option, - /// Call `lookahead_to_target(External, u32)`. - external_target: Option, - /// Call `lookahead_to_target(Internal, u32)`. - internal_target: Option, - } - - let test_cases = &[ - TestCase { - lookahead: 0, - external_target: Some(100), - ..Default::default() - }, - TestCase { - lookahead: 10, - internal_target: Some(99), - ..Default::default() - }, - TestCase { - lookahead: 100, - internal_target: Some(9), - external_target: Some(10), - ..Default::default() - }, - TestCase { - lookahead: 12, - external_last_revealed: Some(2), - internal_last_revealed: Some(2), - internal_target: Some(15), - external_target: Some(13), - }, - TestCase { - lookahead: 13, - external_last_revealed: Some(100), - internal_last_revealed: Some(21), - internal_target: Some(120), - external_target: Some(130), - }, - ]; - - for t in test_cases { - let external_descriptor = parse_descriptor(DESCRIPTORS[0]); - let internal_descriptor = parse_descriptor(DESCRIPTORS[1]); - let mut index = init_txout_index( - external_descriptor.clone(), - internal_descriptor.clone(), - t.lookahead, - ); - - if let Some(last_revealed) = t.external_last_revealed { - let _ = index.reveal_to_target(TestKeychain::External, last_revealed); - } - if let Some(last_revealed) = t.internal_last_revealed { - let _ = index.reveal_to_target(TestKeychain::Internal, last_revealed); - } - - let keychain_test_cases = [ - ( - TestKeychain::External, - t.external_last_revealed, - t.external_target, - ), - ( - TestKeychain::Internal, - t.internal_last_revealed, - t.internal_target, - ), - ]; - for (keychain, last_revealed, target) in keychain_test_cases { - if let Some(target) = target { - let original_last_stored_index = match last_revealed { - Some(last_revealed) => Some(last_revealed + t.lookahead), - None => t.lookahead.checked_sub(1), - }; - let exp_last_stored_index = match original_last_stored_index { - Some(original_last_stored_index) => { - Ord::max(target, original_last_stored_index) - } - None => target, - }; - index.lookahead_to_target(keychain.clone(), target); - let keys: Vec<_> = (0..) - .take_while(|&i| index.spk_at_index(keychain.clone(), i).is_some()) - .collect(); - let exp_keys: Vec<_> = (0..=exp_last_stored_index).collect(); - assert_eq!(keys, exp_keys); - } - } - } -} - -#[test] -fn applying_changesets_one_by_one_vs_aggregate_must_have_same_result() { - let desc = parse_descriptor(DESCRIPTORS[0]); - let changesets: &[ChangeSet] = &[ - ChangeSet { - last_revealed: [(desc.descriptor_id(), 10)].into(), - }, - ChangeSet { - last_revealed: [(desc.descriptor_id(), 12)].into(), - }, - ]; - - let mut indexer_a = KeychainTxOutIndex::::new(0); - indexer_a - .insert_descriptor(TestKeychain::External, desc.clone()) - .expect("must insert keychain"); - for changeset in changesets { - indexer_a.apply_changeset(changeset.clone()); - } - - let mut indexer_b = KeychainTxOutIndex::::new(0); - indexer_b - .insert_descriptor(TestKeychain::External, desc.clone()) - .expect("must insert keychain"); - let aggregate_changesets = changesets - .iter() - .cloned() - .reduce(|mut agg, cs| { - agg.merge(cs); - agg - }) - .expect("must aggregate changesets"); - indexer_b.apply_changeset(aggregate_changesets); - - assert_eq!( - indexer_a.keychains().collect::>(), - indexer_b.keychains().collect::>() - ); - assert_eq!( - indexer_a.spk_at_index(TestKeychain::External, 0), - indexer_b.spk_at_index(TestKeychain::External, 0) - ); - assert_eq!( - indexer_a.spk_at_index(TestKeychain::Internal, 0), - indexer_b.spk_at_index(TestKeychain::Internal, 0) - ); - assert_eq!( - indexer_a.last_revealed_indices(), - indexer_b.last_revealed_indices() - ); -} - -#[test] -fn assigning_same_descriptor_to_multiple_keychains_should_error() { - let desc = parse_descriptor(DESCRIPTORS[0]); - let mut indexer = KeychainTxOutIndex::::new(0); - let _ = indexer - .insert_descriptor(TestKeychain::Internal, desc.clone()) - .unwrap(); - assert!(indexer - .insert_descriptor(TestKeychain::External, desc) - .is_err()) -} - -#[test] -fn reassigning_keychain_to_a_new_descriptor_should_error() { - let desc1 = parse_descriptor(DESCRIPTORS[0]); - let desc2 = parse_descriptor(DESCRIPTORS[1]); - let mut indexer = KeychainTxOutIndex::::new(0); - let _ = indexer.insert_descriptor(TestKeychain::Internal, desc1); - assert!(indexer - .insert_descriptor(TestKeychain::Internal, desc2) - .is_err()); -} - -#[test] -fn when_querying_over_a_range_of_keychains_the_utxos_should_show_up() { - let mut indexer = KeychainTxOutIndex::::new(0); - let mut tx = new_tx(0); - - for (i, descriptor) in DESCRIPTORS.iter().enumerate() { - let descriptor = parse_descriptor(descriptor); - let _ = indexer.insert_descriptor(i, descriptor.clone()).unwrap(); - if i != 4 { - // skip one in the middle to see if uncovers any bugs - indexer.reveal_next_spk(i); - } - tx.output.push(TxOut { - script_pubkey: descriptor.at_derivation_index(0).unwrap().script_pubkey(), - value: Amount::from_sat(10_000), - }); - } - - let n_spks = DESCRIPTORS.len() - /*we skipped one*/ 1; - - let _ = indexer.index_tx(&tx); - assert_eq!(indexer.outpoints().len(), n_spks); - - assert_eq!(indexer.revealed_spks(0..DESCRIPTORS.len()).count(), n_spks); - assert_eq!(indexer.revealed_spks(1..4).count(), 4 - 1); - assert_eq!( - indexer.net_value(&tx, 0..DESCRIPTORS.len()).to_sat(), - (10_000 * n_spks) as i64 - ); - assert_eq!( - indexer.net_value(&tx, 3..6).to_sat(), - (10_000 * (6 - 3 - /*the skipped one*/ 1)) as i64 - ); -} diff --git a/crates/chain/tests/test_local_chain.rs b/crates/chain/tests/test_local_chain.rs deleted file mode 100644 index a0b8220e..00000000 --- a/crates/chain/tests/test_local_chain.rs +++ /dev/null @@ -1,855 +0,0 @@ -#![cfg(feature = "miniscript")] - -use std::ops::{Bound, RangeBounds}; - -use bdk_chain::{ - local_chain::{ - AlterCheckPointError, ApplyHeaderError, CannotConnectError, ChangeSet, CheckPoint, - LocalChain, MissingGenesisError, - }, - BlockId, -}; -use bdk_testenv::{chain_update, hash, local_chain}; -use bitcoin::{block::Header, hashes::Hash, BlockHash}; -use proptest::prelude::*; - -#[derive(Debug)] -struct TestLocalChain<'a> { - name: &'static str, - chain: LocalChain, - update: CheckPoint, - exp: ExpectedResult<'a>, -} - -#[derive(Debug, PartialEq)] -enum ExpectedResult<'a> { - Ok { - changeset: &'a [(u32, Option)], - init_changeset: &'a [(u32, Option)], - }, - Err(CannotConnectError), -} - -impl TestLocalChain<'_> { - fn run(mut self) { - let got_changeset = match self.chain.apply_update(self.update) { - Ok(changeset) => changeset, - Err(got_err) => { - assert_eq!( - ExpectedResult::Err(got_err), - self.exp, - "{}: unexpected error", - self.name - ); - return; - } - }; - - match self.exp { - ExpectedResult::Ok { - changeset, - init_changeset, - } => { - assert_eq!( - got_changeset, - changeset.iter().cloned().collect(), - "{}: unexpected changeset", - self.name - ); - assert_eq!( - self.chain.initial_changeset(), - init_changeset.iter().cloned().collect(), - "{}: unexpected initial changeset", - self.name - ); - } - ExpectedResult::Err(err) => panic!( - "{}: expected error ({}), got non-error result: {:?}", - self.name, err, got_changeset - ), - } - } -} - -#[test] -fn update_local_chain() { - [ - TestLocalChain { - name: "add first tip", - chain: local_chain![(0, hash!("A"))], - update: chain_update![(0, hash!("A"))], - exp: ExpectedResult::Ok { - changeset: &[], - init_changeset: &[(0, Some(hash!("A")))], - }, - }, - TestLocalChain { - name: "add second tip", - chain: local_chain![(0, hash!("A"))], - update: chain_update![(0, hash!("A")), (1, hash!("B"))], - exp: ExpectedResult::Ok { - changeset: &[(1, Some(hash!("B")))], - init_changeset: &[(0, Some(hash!("A"))), (1, Some(hash!("B")))], - }, - }, - TestLocalChain { - name: "two disjoint chains cannot merge", - chain: local_chain![(0, hash!("_")), (1, hash!("A"))], - update: chain_update![(0, hash!("_")), (2, hash!("B"))], - exp: ExpectedResult::Err(CannotConnectError { - try_include_height: 1, - }), - }, - TestLocalChain { - name: "two disjoint chains cannot merge (existing chain longer)", - chain: local_chain![(0, hash!("_")), (2, hash!("A"))], - update: chain_update![(0, hash!("_")), (1, hash!("B"))], - exp: ExpectedResult::Err(CannotConnectError { - try_include_height: 2, - }), - }, - TestLocalChain { - name: "duplicate chains should merge", - chain: local_chain![(0, hash!("A"))], - update: chain_update![(0, hash!("A"))], - exp: ExpectedResult::Ok { - changeset: &[], - init_changeset: &[(0, Some(hash!("A")))], - }, - }, - // Introduce an older checkpoint (B) - // | 0 | 1 | 2 | 3 - // chain | _ C D - // update | _ B C - TestLocalChain { - name: "can introduce older checkpoint", - chain: local_chain![(0, hash!("_")), (2, hash!("C")), (3, hash!("D"))], - update: chain_update![(0, hash!("_")), (1, hash!("B")), (2, hash!("C"))], - exp: ExpectedResult::Ok { - changeset: &[(1, Some(hash!("B")))], - init_changeset: &[(0, Some(hash!("_"))), (1, Some(hash!("B"))), (2, Some(hash!("C"))), (3, Some(hash!("D")))], - }, - }, - // Introduce an older checkpoint (A) that is not directly behind PoA - // | 0 | 2 | 3 | 4 - // chain | _ B C - // update | _ A C - TestLocalChain { - name: "can introduce older checkpoint 2", - chain: local_chain![(0, hash!("_")), (3, hash!("B")), (4, hash!("C"))], - update: chain_update![(0, hash!("_")), (2, hash!("A")), (4, hash!("C"))], - exp: ExpectedResult::Ok { - changeset: &[(2, Some(hash!("A")))], - init_changeset: &[(0, Some(hash!("_"))), (2, Some(hash!("A"))), (3, Some(hash!("B"))), (4, Some(hash!("C")))], - } - }, - // Introduce an older checkpoint (B) that is not the oldest checkpoint - // | 0 | 1 | 2 | 3 - // chain | _ A C - // update | _ B C - TestLocalChain { - name: "can introduce older checkpoint 3", - chain: local_chain![(0, hash!("_")), (1, hash!("A")), (3, hash!("C"))], - update: chain_update![(0, hash!("_")), (2, hash!("B")), (3, hash!("C"))], - exp: ExpectedResult::Ok { - changeset: &[(2, Some(hash!("B")))], - init_changeset: &[(0, Some(hash!("_"))), (1, Some(hash!("A"))), (2, Some(hash!("B"))), (3, Some(hash!("C")))], - } - }, - // Introduce two older checkpoints below the PoA - // | 0 | 1 | 2 | 3 - // chain | _ C - // update | _ A B C - TestLocalChain { - name: "introduce two older checkpoints below PoA", - chain: local_chain![(0, hash!("_")), (3, hash!("C"))], - update: chain_update![(0, hash!("_")), (1, hash!("A")), (2, hash!("B")), (3, hash!("C"))], - exp: ExpectedResult::Ok { - changeset: &[(1, Some(hash!("A"))), (2, Some(hash!("B")))], - init_changeset: &[(0, Some(hash!("_"))), (1, Some(hash!("A"))), (2, Some(hash!("B"))), (3, Some(hash!("C")))], - }, - }, - TestLocalChain { - name: "fix blockhash before agreement point", - chain: local_chain![(0, hash!("im-wrong")), (1, hash!("we-agree"))], - update: chain_update![(0, hash!("fix")), (1, hash!("we-agree"))], - exp: ExpectedResult::Ok { - changeset: &[(0, Some(hash!("fix")))], - init_changeset: &[(0, Some(hash!("fix"))), (1, Some(hash!("we-agree")))], - }, - }, - // B and C are in both chain and update - // | 0 | 1 | 2 | 3 | 4 - // chain | _ B C - // update | _ A B C D - // This should succeed with the point of agreement being C and A should be added in addition. - TestLocalChain { - name: "two points of agreement", - chain: local_chain![(0, hash!("_")), (2, hash!("B")), (3, hash!("C"))], - update: chain_update![(0, hash!("_")), (1, hash!("A")), (2, hash!("B")), (3, hash!("C")), (4, hash!("D"))], - exp: ExpectedResult::Ok { - changeset: &[(1, Some(hash!("A"))), (4, Some(hash!("D")))], - init_changeset: &[ - (0, Some(hash!("_"))), - (1, Some(hash!("A"))), - (2, Some(hash!("B"))), - (3, Some(hash!("C"))), - (4, Some(hash!("D"))), - ], - }, - }, - // Update and chain does not connect: - // | 0 | 1 | 2 | 3 | 4 - // chain | _ B C - // update | _ A B D - // This should fail as we cannot figure out whether C & D are on the same chain - TestLocalChain { - name: "update and chain does not connect", - chain: local_chain![(0, hash!("_")), (2, hash!("B")), (3, hash!("C"))], - update: chain_update![(0, hash!("_")), (1, hash!("A")), (2, hash!("B")), (4, hash!("D"))], - exp: ExpectedResult::Err(CannotConnectError { - try_include_height: 3, - }), - }, - // Transient invalidation: - // | 0 | 1 | 2 | 3 | 4 | 5 - // chain | _ B C E - // update | _ B' C' D - // This should succeed and invalidate B,C and E with point of agreement being A. - TestLocalChain { - name: "transitive invalidation applies to checkpoints higher than invalidation", - chain: local_chain![(0, hash!("_")), (2, hash!("B")), (3, hash!("C")), (5, hash!("E"))], - update: chain_update![(0, hash!("_")), (2, hash!("B'")), (3, hash!("C'")), (4, hash!("D"))], - exp: ExpectedResult::Ok { - changeset: &[ - (2, Some(hash!("B'"))), - (3, Some(hash!("C'"))), - (4, Some(hash!("D"))), - (5, None), - ], - init_changeset: &[ - (0, Some(hash!("_"))), - (2, Some(hash!("B'"))), - (3, Some(hash!("C'"))), - (4, Some(hash!("D"))), - ], - }, - }, - // Transient invalidation: - // | 0 | 1 | 2 | 3 | 4 - // chain | _ B C E - // update | _ B' C' D - // This should succeed and invalidate B, C and E with no point of agreement - TestLocalChain { - name: "transitive invalidation applies to checkpoints higher than invalidation no point of agreement", - chain: local_chain![(0, hash!("_")), (1, hash!("B")), (2, hash!("C")), (4, hash!("E"))], - update: chain_update![(0, hash!("_")), (1, hash!("B'")), (2, hash!("C'")), (3, hash!("D"))], - exp: ExpectedResult::Ok { - changeset: &[ - (1, Some(hash!("B'"))), - (2, Some(hash!("C'"))), - (3, Some(hash!("D"))), - (4, None) - ], - init_changeset: &[ - (0, Some(hash!("_"))), - (1, Some(hash!("B'"))), - (2, Some(hash!("C'"))), - (3, Some(hash!("D"))), - ], - }, - }, - // Transient invalidation: - // | 0 | 1 | 2 | 3 | 4 | 5 - // chain | _ A B C E - // update | _ B' C' D - // This should fail since although it tells us that B and C are invalid it doesn't tell us whether - // A was invalid. - TestLocalChain { - name: "invalidation but no connection", - chain: local_chain![(0, hash!("_")), (1, hash!("A")), (2, hash!("B")), (3, hash!("C")), (5, hash!("E"))], - update: chain_update![(0, hash!("_")), (2, hash!("B'")), (3, hash!("C'")), (4, hash!("D"))], - exp: ExpectedResult::Err(CannotConnectError { try_include_height: 1 }), - }, - // Introduce blocks between two points of agreement - // | 0 | 1 | 2 | 3 | 4 | 5 - // chain | A B D E - // update | A C E F - TestLocalChain { - name: "introduce blocks between two points of agreement", - chain: local_chain![(0, hash!("A")), (1, hash!("B")), (3, hash!("D")), (4, hash!("E"))], - update: chain_update![(0, hash!("A")), (2, hash!("C")), (4, hash!("E")), (5, hash!("F"))], - exp: ExpectedResult::Ok { - changeset: &[ - (2, Some(hash!("C"))), - (5, Some(hash!("F"))), - ], - init_changeset: &[ - (0, Some(hash!("A"))), - (1, Some(hash!("B"))), - (2, Some(hash!("C"))), - (3, Some(hash!("D"))), - (4, Some(hash!("E"))), - (5, Some(hash!("F"))), - ], - }, - }, - // Allow update that is shorter than original chain - // | 0 | 1 | 2 | 3 | 4 | 5 - // chain | A C D E F - // update | A C D' - TestLocalChain { - name: "allow update that is shorter than original chain", - chain: local_chain![(0, hash!("_")), (2, hash!("C")), (3, hash!("D")), (4, hash!("E")), (5, hash!("F"))], - update: chain_update![(0, hash!("_")), (2, hash!("C")), (3, hash!("D'"))], - exp: ExpectedResult::Ok { - changeset: &[ - (3, Some(hash!("D'"))), - (4, None), - (5, None), - ], - init_changeset: &[ - (0, Some(hash!("_"))), - (2, Some(hash!("C"))), - (3, Some(hash!("D'"))), - ], - }, - }, - ] - .into_iter() - .for_each(TestLocalChain::run); -} - -#[test] -fn local_chain_insert_block() { - struct TestCase { - original: LocalChain, - insert: (u32, BlockHash), - expected_result: Result, - expected_final: LocalChain, - } - - let test_cases = [ - TestCase { - original: local_chain![(0, hash!("_"))], - insert: (5, hash!("block5")), - expected_result: Ok([(5, Some(hash!("block5")))].into()), - expected_final: local_chain![(0, hash!("_")), (5, hash!("block5"))], - }, - TestCase { - original: local_chain![(0, hash!("_")), (3, hash!("A"))], - insert: (4, hash!("B")), - expected_result: Ok([(4, Some(hash!("B")))].into()), - expected_final: local_chain![(0, hash!("_")), (3, hash!("A")), (4, hash!("B"))], - }, - TestCase { - original: local_chain![(0, hash!("_")), (4, hash!("B"))], - insert: (3, hash!("A")), - expected_result: Ok([(3, Some(hash!("A")))].into()), - expected_final: local_chain![(0, hash!("_")), (3, hash!("A")), (4, hash!("B"))], - }, - TestCase { - original: local_chain![(0, hash!("_")), (2, hash!("K"))], - insert: (2, hash!("K")), - expected_result: Ok([].into()), - expected_final: local_chain![(0, hash!("_")), (2, hash!("K"))], - }, - TestCase { - original: local_chain![(0, hash!("_")), (2, hash!("K"))], - insert: (2, hash!("J")), - expected_result: Err(AlterCheckPointError { - height: 2, - original_hash: hash!("K"), - update_hash: Some(hash!("J")), - }), - expected_final: local_chain![(0, hash!("_")), (2, hash!("K"))], - }, - ]; - - for (i, t) in test_cases.into_iter().enumerate() { - let mut chain = t.original; - assert_eq!( - chain.insert_block(t.insert.into()), - t.expected_result, - "[{}] unexpected result when inserting block", - i, - ); - assert_eq!(chain, t.expected_final, "[{}] unexpected final chain", i,); - } -} - -#[test] -fn local_chain_disconnect_from() { - struct TestCase { - name: &'static str, - original: LocalChain, - disconnect_from: (u32, BlockHash), - exp_result: Result, - exp_final: LocalChain, - } - - let test_cases = [ - TestCase { - name: "try_replace_genesis_should_fail", - original: local_chain![(0, hash!("_"))], - disconnect_from: (0, hash!("_")), - exp_result: Err(MissingGenesisError), - exp_final: local_chain![(0, hash!("_"))], - }, - TestCase { - name: "try_replace_genesis_should_fail_2", - original: local_chain![(0, hash!("_")), (2, hash!("B")), (3, hash!("C"))], - disconnect_from: (0, hash!("_")), - exp_result: Err(MissingGenesisError), - exp_final: local_chain![(0, hash!("_")), (2, hash!("B")), (3, hash!("C"))], - }, - TestCase { - name: "from_does_not_exist", - original: local_chain![(0, hash!("_")), (3, hash!("C"))], - disconnect_from: (2, hash!("B")), - exp_result: Ok(ChangeSet::default()), - exp_final: local_chain![(0, hash!("_")), (3, hash!("C"))], - }, - TestCase { - name: "from_has_different_blockhash", - original: local_chain![(0, hash!("_")), (2, hash!("B"))], - disconnect_from: (2, hash!("not_B")), - exp_result: Ok(ChangeSet::default()), - exp_final: local_chain![(0, hash!("_")), (2, hash!("B"))], - }, - TestCase { - name: "disconnect_one", - original: local_chain![(0, hash!("_")), (2, hash!("B"))], - disconnect_from: (2, hash!("B")), - exp_result: Ok(ChangeSet::from_iter([(2, None)])), - exp_final: local_chain![(0, hash!("_"))], - }, - TestCase { - name: "disconnect_three", - original: local_chain![ - (0, hash!("_")), - (2, hash!("B")), - (3, hash!("C")), - (4, hash!("D")) - ], - disconnect_from: (2, hash!("B")), - exp_result: Ok(ChangeSet::from_iter([(2, None), (3, None), (4, None)])), - exp_final: local_chain![(0, hash!("_"))], - }, - ]; - - for (i, t) in test_cases.into_iter().enumerate() { - let mut chain = t.original; - let result = chain.disconnect_from(t.disconnect_from.into()); - assert_eq!( - result, t.exp_result, - "[{}:{}] unexpected changeset result", - i, t.name - ); - assert_eq!( - chain, t.exp_final, - "[{}:{}] unexpected final chain", - i, t.name - ); - } -} - -#[test] -fn checkpoint_from_block_ids() { - struct TestCase<'a> { - name: &'a str, - blocks: &'a [(u32, BlockHash)], - exp_result: Result<(), Option<(u32, BlockHash)>>, - } - - let test_cases = [ - TestCase { - name: "in_order", - blocks: &[(0, hash!("A")), (1, hash!("B")), (3, hash!("D"))], - exp_result: Ok(()), - }, - TestCase { - name: "with_duplicates", - blocks: &[(1, hash!("B")), (2, hash!("C")), (2, hash!("C'"))], - exp_result: Err(Some((2, hash!("C")))), - }, - TestCase { - name: "not_in_order", - blocks: &[(1, hash!("B")), (3, hash!("D")), (2, hash!("C"))], - exp_result: Err(Some((3, hash!("D")))), - }, - TestCase { - name: "empty", - blocks: &[], - exp_result: Err(None), - }, - TestCase { - name: "single", - blocks: &[(21, hash!("million"))], - exp_result: Ok(()), - }, - ]; - - for (i, t) in test_cases.into_iter().enumerate() { - let result = CheckPoint::from_block_ids( - t.blocks - .iter() - .map(|&(height, hash)| BlockId { height, hash }), - ); - match t.exp_result { - Ok(_) => { - assert!(result.is_ok(), "[{}:{}] should be Ok", i, t.name); - let result_vec = { - let mut v = result - .unwrap() - .into_iter() - .map(|cp| (cp.height(), cp.hash())) - .collect::>(); - v.reverse(); - v - }; - assert_eq!( - &result_vec, t.blocks, - "[{}:{}] not equal to original block ids", - i, t.name - ); - } - Err(exp_last) => { - assert!(result.is_err(), "[{}:{}] should be Err", i, t.name); - let err = result.unwrap_err(); - assert_eq!( - err.as_ref() - .map(|last_cp| (last_cp.height(), last_cp.hash())), - exp_last, - "[{}:{}] error's last cp height should be {:?}, got {:?}", - i, - t.name, - exp_last, - err - ); - } - } - } -} - -#[test] -fn checkpoint_query() { - struct TestCase { - chain: LocalChain, - /// The heights we want to call [`CheckPoint::query`] with, represented as an inclusive - /// range. - /// - /// If a [`CheckPoint`] exists at that height, we expect [`CheckPoint::query`] to return - /// it. If not, [`CheckPoint::query`] should return `None`. - query_range: (u32, u32), - } - - let test_cases = [ - TestCase { - chain: local_chain![(0, hash!("_")), (1, hash!("A"))], - query_range: (0, 2), - }, - TestCase { - chain: local_chain![(0, hash!("_")), (2, hash!("B")), (3, hash!("C"))], - query_range: (0, 3), - }, - ]; - - for t in test_cases.into_iter() { - let tip = t.chain.tip(); - for h in t.query_range.0..=t.query_range.1 { - let query_result = tip.get(h); - - // perform an exhausitive search for the checkpoint at height `h` - let exp_hash = t - .chain - .iter_checkpoints() - .find(|cp| cp.height() == h) - .map(|cp| cp.hash()); - - match query_result { - Some(cp) => { - assert_eq!(Some(cp.hash()), exp_hash); - assert_eq!(cp.height(), h); - } - None => assert!(exp_hash.is_none()), - } - } - } -} - -#[test] -fn checkpoint_insert() { - struct TestCase<'a> { - /// The name of the test. - #[allow(dead_code)] - name: &'a str, - /// The original checkpoint chain to call [`CheckPoint::insert`] on. - chain: &'a [(u32, BlockHash)], - /// The `block_id` to insert. - to_insert: (u32, BlockHash), - /// The expected final checkpoint chain after calling [`CheckPoint::insert`]. - exp_final_chain: &'a [(u32, BlockHash)], - } - - let test_cases = [ - TestCase { - name: "insert_above_tip", - chain: &[(1, hash!("a")), (2, hash!("b"))], - to_insert: (4, hash!("d")), - exp_final_chain: &[(1, hash!("a")), (2, hash!("b")), (4, hash!("d"))], - }, - TestCase { - name: "insert_already_exists_expect_no_change", - chain: &[(1, hash!("a")), (2, hash!("b")), (3, hash!("c"))], - to_insert: (2, hash!("b")), - exp_final_chain: &[(1, hash!("a")), (2, hash!("b")), (3, hash!("c"))], - }, - TestCase { - name: "insert_in_middle", - chain: &[(2, hash!("b")), (4, hash!("d")), (5, hash!("e"))], - to_insert: (3, hash!("c")), - exp_final_chain: &[ - (2, hash!("b")), - (3, hash!("c")), - (4, hash!("d")), - (5, hash!("e")), - ], - }, - TestCase { - name: "replace_one", - chain: &[(3, hash!("c")), (4, hash!("d")), (5, hash!("e"))], - to_insert: (5, hash!("E")), - exp_final_chain: &[(3, hash!("c")), (4, hash!("d")), (5, hash!("E"))], - }, - TestCase { - name: "insert_conflict_should_evict", - chain: &[ - (3, hash!("c")), - (4, hash!("d")), - (5, hash!("e")), - (6, hash!("f")), - ], - to_insert: (4, hash!("D")), - exp_final_chain: &[(3, hash!("c")), (4, hash!("D"))], - }, - ]; - - fn genesis_block() -> impl Iterator { - core::iter::once((0, hash!("_"))).map(BlockId::from) - } - - for t in test_cases.into_iter() { - let chain = CheckPoint::from_block_ids( - genesis_block().chain(t.chain.iter().copied().map(BlockId::from)), - ) - .expect("test formed incorrectly, must construct checkpoint chain"); - - let exp_final_chain = CheckPoint::from_block_ids( - genesis_block().chain(t.exp_final_chain.iter().copied().map(BlockId::from)), - ) - .expect("test formed incorrectly, must construct checkpoint chain"); - - assert_eq!( - chain.insert(t.to_insert.into()), - exp_final_chain, - "unexpected final chain" - ); - } -} - -#[test] -fn local_chain_apply_header_connected_to() { - fn header_from_prev_blockhash(prev_blockhash: BlockHash) -> Header { - Header { - version: bitcoin::block::Version::default(), - prev_blockhash, - merkle_root: bitcoin::hash_types::TxMerkleNode::all_zeros(), - time: 0, - bits: bitcoin::CompactTarget::default(), - nonce: 0, - } - } - - struct TestCase { - name: &'static str, - chain: LocalChain, - header: Header, - height: u32, - connected_to: BlockId, - exp_result: Result)>, ApplyHeaderError>, - } - - let test_cases = [ - { - let header = header_from_prev_blockhash(hash!("_")); - let hash = header.block_hash(); - let height = 1; - let connected_to = BlockId { height, hash }; - TestCase { - name: "connected_to_self_header_applied_to_self", - chain: local_chain![(0, hash!("_")), (height, hash)], - header, - height, - connected_to, - exp_result: Ok(vec![]), - } - }, - { - let prev_hash = hash!("A"); - let prev_height = 1; - let header = header_from_prev_blockhash(prev_hash); - let hash = header.block_hash(); - let height = prev_height + 1; - let connected_to = BlockId { - height: prev_height, - hash: prev_hash, - }; - TestCase { - name: "connected_to_prev_header_applied_to_self", - chain: local_chain![(0, hash!("_")), (prev_height, prev_hash)], - header, - height, - connected_to, - exp_result: Ok(vec![(height, Some(hash))]), - } - }, - { - let header = header_from_prev_blockhash(BlockHash::all_zeros()); - let hash = header.block_hash(); - let height = 0; - let connected_to = BlockId { height, hash }; - TestCase { - name: "genesis_applied_to_self", - chain: local_chain![(0, hash)], - header, - height, - connected_to, - exp_result: Ok(vec![]), - } - }, - { - let header = header_from_prev_blockhash(hash!("Z")); - let height = 10; - let hash = header.block_hash(); - let prev_height = height - 1; - let prev_hash = header.prev_blockhash; - TestCase { - name: "connect_at_connected_to", - chain: local_chain![(0, hash!("_")), (2, hash!("B")), (3, hash!("C"))], - header, - height: 10, - connected_to: BlockId { - height: 3, - hash: hash!("C"), - }, - exp_result: Ok(vec![(prev_height, Some(prev_hash)), (height, Some(hash))]), - } - }, - { - let prev_hash = hash!("A"); - let prev_height = 1; - let header = header_from_prev_blockhash(prev_hash); - let connected_to = BlockId { - height: prev_height, - hash: hash!("not_prev_hash"), - }; - TestCase { - name: "inconsistent_prev_hash", - chain: local_chain![(0, hash!("_")), (prev_height, hash!("not_prev_hash"))], - header, - height: prev_height + 1, - connected_to, - exp_result: Err(ApplyHeaderError::InconsistentBlocks), - } - }, - { - let prev_hash = hash!("A"); - let prev_height = 1; - let header = header_from_prev_blockhash(prev_hash); - let height = prev_height + 1; - let connected_to = BlockId { - height, - hash: hash!("not_current_hash"), - }; - TestCase { - name: "inconsistent_current_block", - chain: local_chain![(0, hash!("_")), (height, hash!("not_current_hash"))], - header, - height, - connected_to, - exp_result: Err(ApplyHeaderError::InconsistentBlocks), - } - }, - { - let header = header_from_prev_blockhash(hash!("B")); - let height = 3; - let connected_to = BlockId { - height: 4, - hash: hash!("D"), - }; - TestCase { - name: "connected_to_is_greater", - chain: local_chain![(0, hash!("_")), (2, hash!("B"))], - header, - height, - connected_to, - exp_result: Err(ApplyHeaderError::InconsistentBlocks), - } - }, - ]; - - for (i, t) in test_cases.into_iter().enumerate() { - let mut chain = t.chain; - let result = chain.apply_header_connected_to(&t.header, t.height, t.connected_to); - let exp_result = t - .exp_result - .map(|cs| cs.iter().cloned().collect::()); - assert_eq!(result, exp_result, "[{}:{}] unexpected result", i, t.name); - } -} - -fn generate_height_range_bounds( - height_upper_bound: u32, -) -> impl Strategy, Bound)> { - fn generate_height_bound(height_upper_bound: u32) -> impl Strategy> { - prop_oneof![ - (0..height_upper_bound).prop_map(Bound::Included), - (0..height_upper_bound).prop_map(Bound::Excluded), - Just(Bound::Unbounded), - ] - } - ( - generate_height_bound(height_upper_bound), - generate_height_bound(height_upper_bound), - ) -} - -fn generate_checkpoints(max_height: u32, max_count: usize) -> impl Strategy { - proptest::collection::btree_set(1..max_height, 0..max_count).prop_map(|mut heights| { - heights.insert(0); // must have genesis - CheckPoint::from_block_ids(heights.into_iter().map(|height| { - let hash = bitcoin::hashes::Hash::hash(height.to_le_bytes().as_slice()); - BlockId { height, hash } - })) - .expect("blocks must be in order as it comes from btreeset") - }) -} - -proptest! { - #![proptest_config(ProptestConfig { - ..Default::default() - })] - - /// Ensure that [`CheckPoint::range`] returns the expected checkpoint heights by comparing it - /// against a more primitive approach. - #[test] - fn checkpoint_range( - range in generate_height_range_bounds(21_000), - cp in generate_checkpoints(21_000, 2100) - ) { - let exp_heights = cp.iter().map(|cp| cp.height()).filter(|h| range.contains(h)).collect::>(); - let heights = cp.range(range).map(|cp| cp.height()).collect::>(); - prop_assert_eq!(heights, exp_heights); - } -} diff --git a/crates/chain/tests/test_spk_txout_index.rs b/crates/chain/tests/test_spk_txout_index.rs deleted file mode 100644 index 3d3b82e8..00000000 --- a/crates/chain/tests/test_spk_txout_index.rs +++ /dev/null @@ -1,124 +0,0 @@ -use bdk_chain::{spk_txout::SpkTxOutIndex, Indexer}; -use bitcoin::{ - absolute, transaction, Amount, OutPoint, ScriptBuf, SignedAmount, Transaction, TxIn, TxOut, -}; - -#[test] -fn spk_txout_sent_and_received() { - let spk1 = ScriptBuf::from_hex("001404f1e52ce2bab3423c6a8c63b7cd730d8f12542c").unwrap(); - let spk2 = ScriptBuf::from_hex("00142b57404ae14f08c3a0c903feb2af7830605eb00f").unwrap(); - - let mut index = SpkTxOutIndex::default(); - index.insert_spk(0, spk1.clone()); - index.insert_spk(1, spk2.clone()); - - let tx1 = Transaction { - version: transaction::Version::TWO, - lock_time: absolute::LockTime::ZERO, - input: vec![], - output: vec![TxOut { - value: Amount::from_sat(42_000), - script_pubkey: spk1.clone(), - }], - }; - - assert_eq!( - index.sent_and_received(&tx1, ..), - (Amount::from_sat(0), Amount::from_sat(42_000)) - ); - assert_eq!( - index.sent_and_received(&tx1, ..1), - (Amount::from_sat(0), Amount::from_sat(42_000)) - ); - assert_eq!( - index.sent_and_received(&tx1, 1..), - (Amount::from_sat(0), Amount::from_sat(0)) - ); - assert_eq!(index.net_value(&tx1, ..), SignedAmount::from_sat(42_000)); - index.index_tx(&tx1); - assert_eq!( - index.sent_and_received(&tx1, ..), - (Amount::from_sat(0), Amount::from_sat(42_000)), - "shouldn't change after scanning" - ); - - let tx2 = Transaction { - version: transaction::Version::ONE, - lock_time: absolute::LockTime::ZERO, - input: vec![TxIn { - previous_output: OutPoint { - txid: tx1.compute_txid(), - vout: 0, - }, - ..Default::default() - }], - output: vec![ - TxOut { - value: Amount::from_sat(20_000), - script_pubkey: spk2, - }, - TxOut { - script_pubkey: spk1, - value: Amount::from_sat(30_000), - }, - ], - }; - - assert_eq!( - index.sent_and_received(&tx2, ..), - (Amount::from_sat(42_000), Amount::from_sat(50_000)) - ); - assert_eq!( - index.sent_and_received(&tx2, ..1), - (Amount::from_sat(42_000), Amount::from_sat(30_000)) - ); - assert_eq!( - index.sent_and_received(&tx2, 1..), - (Amount::from_sat(0), Amount::from_sat(20_000)) - ); - assert_eq!(index.net_value(&tx2, ..), SignedAmount::from_sat(8_000)); -} - -#[test] -fn mark_used() { - let spk1 = ScriptBuf::from_hex("001404f1e52ce2bab3423c6a8c63b7cd730d8f12542c").unwrap(); - let spk2 = ScriptBuf::from_hex("00142b57404ae14f08c3a0c903feb2af7830605eb00f").unwrap(); - - let mut spk_index = SpkTxOutIndex::default(); - spk_index.insert_spk(1, spk1.clone()); - spk_index.insert_spk(2, spk2); - - assert!(!spk_index.is_used(&1)); - spk_index.mark_used(&1); - assert!(spk_index.is_used(&1)); - spk_index.unmark_used(&1); - assert!(!spk_index.is_used(&1)); - spk_index.mark_used(&1); - assert!(spk_index.is_used(&1)); - - let tx1 = Transaction { - version: transaction::Version::TWO, - lock_time: absolute::LockTime::ZERO, - input: vec![], - output: vec![TxOut { - value: Amount::from_sat(42_000), - script_pubkey: spk1, - }], - }; - - spk_index.index_tx(&tx1); - spk_index.unmark_used(&1); - assert!( - spk_index.is_used(&1), - "even though we unmark_used it doesn't matter because there was a tx scanned that used it" - ); -} - -#[test] -fn unmark_used_does_not_result_in_invalid_representation() { - let mut spk_index = SpkTxOutIndex::default(); - assert!(!spk_index.unmark_used(&0)); - assert!(!spk_index.unmark_used(&1)); - assert!(!spk_index.unmark_used(&2)); - assert!(spk_index.unused_spks(..).collect::>().is_empty()); -} diff --git a/crates/chain/tests/test_tx_graph.rs b/crates/chain/tests/test_tx_graph.rs deleted file mode 100644 index 44614782..00000000 --- a/crates/chain/tests/test_tx_graph.rs +++ /dev/null @@ -1,1327 +0,0 @@ -#![cfg(feature = "miniscript")] - -#[macro_use] -mod common; -use bdk_chain::{collections::*, BlockId, ConfirmationBlockTime}; -use bdk_chain::{ - local_chain::LocalChain, - tx_graph::{self, CalculateFeeError}, - tx_graph::{ChangeSet, TxGraph}, - Anchor, ChainOracle, ChainPosition, Merge, -}; -use bdk_testenv::{block_id, hash, utils::new_tx}; -use bitcoin::{ - absolute, hashes::Hash, transaction, Amount, BlockHash, OutPoint, ScriptBuf, SignedAmount, - Transaction, TxIn, TxOut, Txid, -}; -use common::*; -use core::iter; -use rand::RngCore; -use std::sync::Arc; -use std::vec; - -#[test] -fn insert_txouts() { - // 2 (Outpoint, TxOut) tuples that denotes original data in the graph, as partial transactions. - let original_ops = [ - ( - OutPoint::new(hash!("tx1"), 1), - TxOut { - value: Amount::from_sat(10_000), - script_pubkey: ScriptBuf::new(), - }, - ), - ( - OutPoint::new(hash!("tx1"), 2), - TxOut { - value: Amount::from_sat(20_000), - script_pubkey: ScriptBuf::new(), - }, - ), - ]; - - // Another (OutPoint, TxOut) tuple to be used as update as partial transaction. - let update_ops = [( - OutPoint::new(hash!("tx2"), 0), - TxOut { - value: Amount::from_sat(20_000), - script_pubkey: ScriptBuf::new(), - }, - )]; - - // One full transaction to be included in the update - let update_tx = Transaction { - version: transaction::Version::ONE, - lock_time: absolute::LockTime::ZERO, - input: vec![TxIn { - previous_output: OutPoint::null(), - ..Default::default() - }], - output: vec![TxOut { - value: Amount::from_sat(30_000), - script_pubkey: ScriptBuf::new(), - }], - }; - - // Conf anchor used to mark the full transaction as confirmed. - let conf_anchor = BlockId { - height: 100, - hash: hash!("random blockhash"), - }; - - // Unconfirmed seen_at timestamp to mark the partial transactions as unconfirmed. - let unconf_seen_at = 1000000_u64; - - // Make the original graph - let mut graph = { - let mut graph = TxGraph::::default(); - for (outpoint, txout) in &original_ops { - assert_eq!( - graph.insert_txout(*outpoint, txout.clone()), - ChangeSet { - txouts: [(*outpoint, txout.clone())].into(), - ..Default::default() - } - ); - } - graph - }; - - // Make the update graph - let update = { - let mut update = tx_graph::TxUpdate::default(); - for (outpoint, txout) in &update_ops { - // Insert partials transactions. - update.txouts.insert(*outpoint, txout.clone()); - // Mark them unconfirmed. - update.seen_ats.insert((outpoint.txid, unconf_seen_at)); - } - - // Insert the full transaction. - update.txs.push(update_tx.clone().into()); - // Mark it as confirmed. - update - .anchors - .insert((conf_anchor, update_tx.compute_txid())); - update - }; - - // Check the resulting addition. - let changeset = graph.apply_update(update); - - assert_eq!( - changeset, - ChangeSet { - txs: [Arc::new(update_tx.clone())].into(), - txouts: update_ops.clone().into(), - anchors: [(conf_anchor, update_tx.compute_txid()),].into(), - last_seen: [(hash!("tx2"), 1000000)].into(), - last_evicted: [].into(), - } - ); - - // Apply changeset and check the new graph counts. - graph.apply_changeset(changeset); - assert_eq!(graph.all_txouts().count(), 4); - assert_eq!(graph.full_txs().count(), 1); - assert_eq!(graph.floating_txouts().count(), 3); - - // Check TxOuts are fetched correctly from the graph. - assert_eq!( - graph.tx_outputs(hash!("tx1")).expect("should exists"), - [ - ( - 1u32, - &TxOut { - value: Amount::from_sat(10_000), - script_pubkey: ScriptBuf::new(), - } - ), - ( - 2u32, - &TxOut { - value: Amount::from_sat(20_000), - script_pubkey: ScriptBuf::new(), - } - ) - ] - .into() - ); - - assert_eq!( - graph - .tx_outputs(update_tx.compute_txid()) - .expect("should exists"), - [( - 0u32, - &TxOut { - value: Amount::from_sat(30_000), - script_pubkey: ScriptBuf::new() - } - )] - .into() - ); - - // Check that the initial_changeset is correct - assert_eq!( - graph.initial_changeset(), - ChangeSet { - txs: [Arc::new(update_tx.clone())].into(), - txouts: update_ops.into_iter().chain(original_ops).collect(), - anchors: [(conf_anchor, update_tx.compute_txid()),].into(), - last_seen: [(hash!("tx2"), 1000000)].into(), - last_evicted: [].into(), - } - ); -} - -#[test] -fn insert_tx_graph_doesnt_count_coinbase_as_spent() { - let tx = Transaction { - version: transaction::Version::ONE, - lock_time: absolute::LockTime::ZERO, - input: vec![TxIn { - previous_output: OutPoint::null(), - ..Default::default() - }], - output: vec![], - }; - - let mut graph = TxGraph::::default(); - let changeset = graph.insert_tx(tx); - assert!(!changeset.is_empty()); - assert!(graph.outspends(OutPoint::null()).is_empty()); - assert!(graph.tx_spends(Txid::all_zeros()).next().is_none()); -} - -#[test] -fn insert_tx_graph_keeps_track_of_spend() { - let tx1 = Transaction { - version: transaction::Version::ONE, - lock_time: absolute::LockTime::ZERO, - input: vec![], - output: vec![TxOut::NULL], - }; - - let op = OutPoint { - txid: tx1.compute_txid(), - vout: 0, - }; - - let tx2 = Transaction { - version: transaction::Version::ONE, - lock_time: absolute::LockTime::ZERO, - input: vec![TxIn { - previous_output: op, - ..Default::default() - }], - output: vec![], - }; - - let mut graph1 = TxGraph::::default(); - let mut graph2 = TxGraph::::default(); - - // insert in different order - let _ = graph1.insert_tx(tx1.clone()); - let _ = graph1.insert_tx(tx2.clone()); - - let _ = graph2.insert_tx(tx2.clone()); - let _ = graph2.insert_tx(tx1); - - assert_eq!( - graph1.outspends(op), - &iter::once(tx2.compute_txid()).collect::>() - ); - assert_eq!(graph2.outspends(op), graph1.outspends(op)); -} - -#[test] -fn insert_tx_can_retrieve_full_tx_from_graph() { - let tx = Transaction { - version: transaction::Version::ONE, - lock_time: absolute::LockTime::ZERO, - input: vec![TxIn { - previous_output: OutPoint::null(), - ..Default::default() - }], - output: vec![TxOut::NULL], - }; - - let mut graph = TxGraph::::default(); - let _ = graph.insert_tx(tx.clone()); - assert_eq!( - graph - .get_tx(tx.compute_txid()) - .map(|tx| tx.as_ref().clone()), - Some(tx) - ); -} - -#[test] -fn insert_tx_displaces_txouts() { - let mut tx_graph = TxGraph::::default(); - - let tx = Transaction { - version: transaction::Version::ONE, - lock_time: absolute::LockTime::ZERO, - input: vec![], - output: vec![TxOut { - value: Amount::from_sat(42_000), - script_pubkey: ScriptBuf::default(), - }], - }; - let txid = tx.compute_txid(); - let outpoint = OutPoint::new(txid, 0); - let txout = tx.output.first().unwrap(); - - let changeset = tx_graph.insert_txout(outpoint, txout.clone()); - assert!(!changeset.is_empty()); - - let changeset = tx_graph.insert_tx(tx.clone()); - assert_eq!(changeset.txs.len(), 1); - assert!(changeset.txouts.is_empty()); - assert!(tx_graph.get_tx(txid).is_some()); - assert_eq!(tx_graph.get_txout(outpoint), Some(txout)); -} - -#[test] -fn insert_txout_does_not_displace_tx() { - let mut tx_graph = TxGraph::::default(); - let tx = Transaction { - version: transaction::Version::ONE, - lock_time: absolute::LockTime::ZERO, - input: vec![], - output: vec![TxOut { - value: Amount::from_sat(42_000), - script_pubkey: ScriptBuf::new(), - }], - }; - - let _changeset = tx_graph.insert_tx(tx.clone()); - - let _ = tx_graph.insert_txout( - OutPoint { - txid: tx.compute_txid(), - vout: 0, - }, - TxOut { - value: Amount::from_sat(1_337_000), - script_pubkey: ScriptBuf::new(), - }, - ); - - let _ = tx_graph.insert_txout( - OutPoint { - txid: tx.compute_txid(), - vout: 1, - }, - TxOut { - value: Amount::from_sat(1_000_000_000), - script_pubkey: ScriptBuf::new(), - }, - ); - - assert_eq!( - tx_graph - .get_txout(OutPoint { - txid: tx.compute_txid(), - vout: 0 - }) - .unwrap() - .value, - Amount::from_sat(42_000) - ); - assert_eq!( - tx_graph.get_txout(OutPoint { - txid: tx.compute_txid(), - vout: 1 - }), - None - ); -} - -#[test] -fn test_calculate_fee() { - let mut graph = TxGraph::::default(); - let intx1 = Transaction { - version: transaction::Version::ONE, - lock_time: absolute::LockTime::ZERO, - input: vec![], - output: vec![TxOut { - value: Amount::from_sat(100), - script_pubkey: ScriptBuf::new(), - }], - }; - let intx2 = Transaction { - version: transaction::Version::TWO, - lock_time: absolute::LockTime::ZERO, - input: vec![], - output: vec![TxOut { - value: Amount::from_sat(200), - script_pubkey: ScriptBuf::new(), - }], - }; - - let intxout1 = ( - OutPoint { - txid: hash!("dangling output"), - vout: 0, - }, - TxOut { - value: Amount::from_sat(300), - script_pubkey: ScriptBuf::new(), - }, - ); - - let _ = graph.insert_tx(intx1.clone()); - let _ = graph.insert_tx(intx2.clone()); - let _ = graph.insert_txout(intxout1.0, intxout1.1); - - let mut tx = Transaction { - version: transaction::Version::ONE, - lock_time: absolute::LockTime::ZERO, - input: vec![ - TxIn { - previous_output: OutPoint { - txid: intx1.compute_txid(), - vout: 0, - }, - ..Default::default() - }, - TxIn { - previous_output: OutPoint { - txid: intx2.compute_txid(), - vout: 0, - }, - ..Default::default() - }, - TxIn { - previous_output: intxout1.0, - ..Default::default() - }, - ], - output: vec![TxOut { - value: Amount::from_sat(500), - script_pubkey: ScriptBuf::new(), - }], - }; - - assert_eq!(graph.calculate_fee(&tx), Ok(Amount::from_sat(100))); - - tx.input.remove(2); - - // fee would be negative, should return CalculateFeeError::NegativeFee - assert_eq!( - graph.calculate_fee(&tx), - Err(CalculateFeeError::NegativeFee(SignedAmount::from_sat(-200))) - ); - - // If we have an unknown outpoint, fee should return CalculateFeeError::MissingTxOut. - let outpoint = OutPoint { - txid: hash!("unknown_txid"), - vout: 0, - }; - tx.input.push(TxIn { - previous_output: outpoint, - ..Default::default() - }); - assert_eq!( - graph.calculate_fee(&tx), - Err(CalculateFeeError::MissingTxOut(vec!(outpoint))) - ); -} - -#[test] -fn test_calculate_fee_on_coinbase() { - let tx = Transaction { - version: transaction::Version::ONE, - lock_time: absolute::LockTime::ZERO, - input: vec![TxIn { - previous_output: OutPoint::null(), - ..Default::default() - }], - output: vec![TxOut::NULL], - }; - - let graph = TxGraph::<()>::default(); - - assert_eq!(graph.calculate_fee(&tx), Ok(Amount::ZERO)); -} - -// `test_walk_ancestors` uses the following transaction structure: -// -// a0 -// / \ -// b0 b1 b2 -// / \ \ / -// c0 c1 c2 c3 -// / \ / -// d0 d1 -// \ -// e0 -// -// where b0 and b1 spend a0, c0 and c1 spend b0, d0 spends c1, etc. -#[test] -fn test_walk_ancestors() { - let local_chain = LocalChain::from_blocks( - (0..=20) - .map(|ht| (ht, BlockHash::hash(format!("Block Hash {}", ht).as_bytes()))) - .collect(), - ) - .expect("must contain genesis hash"); - let tip = local_chain.tip(); - - let tx_a0 = Transaction { - input: vec![TxIn { - previous_output: OutPoint::new(hash!("op0"), 0), - ..TxIn::default() - }], - output: vec![TxOut::NULL, TxOut::NULL], - ..new_tx(0) - }; - - // tx_b0 spends tx_a0 - let tx_b0 = Transaction { - input: vec![TxIn { - previous_output: OutPoint::new(tx_a0.compute_txid(), 0), - ..TxIn::default() - }], - output: vec![TxOut::NULL, TxOut::NULL], - ..new_tx(0) - }; - - // tx_b1 spends tx_a0 - let tx_b1 = Transaction { - input: vec![TxIn { - previous_output: OutPoint::new(tx_a0.compute_txid(), 1), - ..TxIn::default() - }], - output: vec![TxOut::NULL], - ..new_tx(0) - }; - - let tx_b2 = Transaction { - input: vec![TxIn { - previous_output: OutPoint::new(hash!("op1"), 0), - ..TxIn::default() - }], - output: vec![TxOut::NULL], - ..new_tx(0) - }; - - // tx_c0 spends tx_b0 - let tx_c0 = Transaction { - input: vec![TxIn { - previous_output: OutPoint::new(tx_b0.compute_txid(), 0), - ..TxIn::default() - }], - output: vec![TxOut::NULL], - ..new_tx(0) - }; - - // tx_c1 spends tx_b0 - let tx_c1 = Transaction { - input: vec![TxIn { - previous_output: OutPoint::new(tx_b0.compute_txid(), 1), - ..TxIn::default() - }], - output: vec![TxOut::NULL], - ..new_tx(0) - }; - - // tx_c2 spends tx_b1 and tx_b2 - let tx_c2 = Transaction { - input: vec![ - TxIn { - previous_output: OutPoint::new(tx_b1.compute_txid(), 0), - ..TxIn::default() - }, - TxIn { - previous_output: OutPoint::new(tx_b2.compute_txid(), 0), - ..TxIn::default() - }, - ], - output: vec![TxOut::NULL], - ..new_tx(0) - }; - - let tx_c3 = Transaction { - input: vec![TxIn { - previous_output: OutPoint::new(hash!("op2"), 0), - ..TxIn::default() - }], - output: vec![TxOut::NULL], - ..new_tx(0) - }; - - // tx_d0 spends tx_c1 - let tx_d0 = Transaction { - input: vec![TxIn { - previous_output: OutPoint::new(tx_c1.compute_txid(), 0), - ..TxIn::default() - }], - output: vec![TxOut::NULL], - ..new_tx(0) - }; - - // tx_d1 spends tx_c2 and tx_c3 - let tx_d1 = Transaction { - input: vec![ - TxIn { - previous_output: OutPoint::new(tx_c2.compute_txid(), 0), - ..TxIn::default() - }, - TxIn { - previous_output: OutPoint::new(tx_c3.compute_txid(), 0), - ..TxIn::default() - }, - ], - output: vec![TxOut::NULL], - ..new_tx(0) - }; - - // tx_e0 spends tx_d1 - let tx_e0 = Transaction { - input: vec![TxIn { - previous_output: OutPoint::new(tx_d1.compute_txid(), 0), - ..TxIn::default() - }], - output: vec![TxOut::NULL], - ..new_tx(0) - }; - - let mut graph = TxGraph::::new([ - tx_a0.clone(), - tx_b0.clone(), - tx_b1.clone(), - tx_b2.clone(), - tx_c0.clone(), - tx_c1.clone(), - tx_c2.clone(), - tx_c3.clone(), - tx_d0.clone(), - tx_d1.clone(), - tx_e0.clone(), - ]); - - [&tx_a0, &tx_b1].iter().for_each(|&tx| { - let changeset = graph.insert_anchor(tx.compute_txid(), tip.block_id()); - assert!(!changeset.is_empty()); - }); - - let ancestors = [ - graph - .walk_ancestors(tx_c0.clone(), |depth, tx| Some((depth, tx))) - .collect::>(), - graph - .walk_ancestors(tx_d0.clone(), |depth, tx| Some((depth, tx))) - .collect::>(), - graph - .walk_ancestors(tx_e0.clone(), |depth, tx| Some((depth, tx))) - .collect::>(), - // Only traverse unconfirmed ancestors of tx_e0 this time - graph - .walk_ancestors(tx_e0.clone(), |depth, tx| { - let tx_node = graph.get_tx_node(tx.compute_txid())?; - for block in tx_node.anchors { - match local_chain.is_block_in_chain(block.anchor_block(), tip.block_id()) { - Ok(Some(true)) => return None, - _ => continue, - } - } - Some((depth, tx_node.tx)) - }) - .collect::>(), - ]; - - let expected_ancestors = [ - vec![(1, &tx_b0), (2, &tx_a0)], - vec![(1, &tx_c1), (2, &tx_b0), (3, &tx_a0)], - vec![ - (1, &tx_d1), - (2, &tx_c2), - (2, &tx_c3), - (3, &tx_b1), - (3, &tx_b2), - (4, &tx_a0), - ], - vec![(1, &tx_d1), (2, &tx_c2), (2, &tx_c3), (3, &tx_b2)], - ]; - - for (txids, expected_txids) in ancestors.into_iter().zip(expected_ancestors) { - assert_eq!( - txids, - expected_txids - .into_iter() - .map(|(i, tx)| (i, Arc::new(tx.clone()))) - .collect::>() - ); - } -} - -#[test] -fn test_conflicting_descendants() { - let previous_output = OutPoint::new(hash!("op"), 2); - - // tx_a spends previous_output - let tx_a = Transaction { - input: vec![TxIn { - previous_output, - ..TxIn::default() - }], - output: vec![TxOut::NULL], - ..new_tx(0) - }; - - // tx_a2 spends previous_output and conflicts with tx_a - let tx_a2 = Transaction { - input: vec![TxIn { - previous_output, - ..TxIn::default() - }], - output: vec![TxOut::NULL, TxOut::NULL], - ..new_tx(1) - }; - - // tx_b spends tx_a - let tx_b = Transaction { - input: vec![TxIn { - previous_output: OutPoint::new(tx_a.compute_txid(), 0), - ..TxIn::default() - }], - output: vec![TxOut::NULL], - ..new_tx(2) - }; - - let txid_a = tx_a.compute_txid(); - let txid_b = tx_b.compute_txid(); - - let mut graph = TxGraph::::default(); - let _ = graph.insert_tx(tx_a); - let _ = graph.insert_tx(tx_b); - - assert_eq!( - graph - .walk_conflicts(&tx_a2, |depth, txid| Some((depth, txid))) - .collect::>(), - vec![(0_usize, txid_a), (1_usize, txid_b),], - ); -} - -#[test] -fn test_descendants_no_repeat() { - let tx_a = Transaction { - output: vec![TxOut::NULL, TxOut::NULL, TxOut::NULL], - ..new_tx(0) - }; - - let txs_b = (0..3) - .map(|vout| Transaction { - input: vec![TxIn { - previous_output: OutPoint::new(tx_a.compute_txid(), vout), - ..TxIn::default() - }], - output: vec![TxOut::NULL], - ..new_tx(1) - }) - .collect::>(); - - let txs_c = (0..2) - .map(|vout| Transaction { - input: vec![TxIn { - previous_output: OutPoint::new(txs_b[vout as usize].compute_txid(), vout), - ..TxIn::default() - }], - output: vec![TxOut::NULL], - ..new_tx(2) - }) - .collect::>(); - - let tx_d = Transaction { - input: vec![ - TxIn { - previous_output: OutPoint::new(txs_c[0].compute_txid(), 0), - ..TxIn::default() - }, - TxIn { - previous_output: OutPoint::new(txs_c[1].compute_txid(), 0), - ..TxIn::default() - }, - ], - output: vec![TxOut::NULL], - ..new_tx(3) - }; - - let tx_e = Transaction { - input: vec![TxIn { - previous_output: OutPoint::new(tx_d.compute_txid(), 0), - ..TxIn::default() - }], - output: vec![TxOut::NULL], - ..new_tx(4) - }; - - let txs_not_connected = (10..20) - .map(|v| Transaction { - input: vec![TxIn { - previous_output: OutPoint::new(hash!("tx_does_not_exist"), v), - ..TxIn::default() - }], - output: vec![TxOut::NULL], - ..new_tx(v) - }) - .collect::>(); - - let mut graph = TxGraph::::default(); - let mut expected_txids = Vec::new(); - - // these are NOT descendants of `tx_a` - for tx in txs_not_connected { - let _ = graph.insert_tx(tx.clone()); - } - - // these are the expected descendants of `tx_a` - for tx in txs_b - .iter() - .chain(&txs_c) - .chain(core::iter::once(&tx_d)) - .chain(core::iter::once(&tx_e)) - { - let _ = graph.insert_tx(tx.clone()); - expected_txids.push(tx.compute_txid()); - } - - let descendants = graph - .walk_descendants(tx_a.compute_txid(), |_, txid| Some(txid)) - .collect::>(); - - assert_eq!(descendants, expected_txids); -} - -#[test] -fn test_chain_spends() { - let local_chain = LocalChain::from_blocks( - (0..=100) - .map(|ht| (ht, BlockHash::hash(format!("Block Hash {}", ht).as_bytes()))) - .collect(), - ) - .expect("must have genesis hash"); - let tip = local_chain.tip(); - - // The parent tx contains 2 outputs. Which are spent by one confirmed and one unconfirmed tx. - // The parent tx is confirmed at block 95. - let tx_0 = Transaction { - input: vec![], - output: vec![ - TxOut { - value: Amount::from_sat(10_000), - script_pubkey: ScriptBuf::new(), - }, - TxOut { - value: Amount::from_sat(20_000), - script_pubkey: ScriptBuf::new(), - }, - ], - ..new_tx(0) - }; - - // The first confirmed transaction spends vout: 0. And is confirmed at block 98. - let tx_1 = Transaction { - input: vec![TxIn { - previous_output: OutPoint::new(tx_0.compute_txid(), 0), - ..TxIn::default() - }], - output: vec![ - TxOut { - value: Amount::from_sat(5_000), - script_pubkey: ScriptBuf::new(), - }, - TxOut { - value: Amount::from_sat(5_000), - script_pubkey: ScriptBuf::new(), - }, - ], - ..new_tx(0) - }; - - // The second transactions spends vout:1, and is unconfirmed. - let tx_2 = Transaction { - input: vec![TxIn { - previous_output: OutPoint::new(tx_0.compute_txid(), 1), - ..TxIn::default() - }], - output: vec![ - TxOut { - value: Amount::from_sat(10_000), - script_pubkey: ScriptBuf::new(), - }, - TxOut { - value: Amount::from_sat(10_000), - script_pubkey: ScriptBuf::new(), - }, - ], - ..new_tx(0) - }; - - let mut graph = TxGraph::::default(); - - let _ = graph.insert_tx(tx_0.clone()); - let _ = graph.insert_tx(tx_1.clone()); - let _ = graph.insert_tx(tx_2.clone()); - - for (ht, tx) in [(95, &tx_0), (98, &tx_1)] { - let _ = graph.insert_anchor( - tx.compute_txid(), - ConfirmationBlockTime { - block_id: tip.get(ht).unwrap().block_id(), - confirmation_time: 100, - }, - ); - } - - let build_canonical_spends = - |chain: &LocalChain, tx_graph: &TxGraph| -> HashMap { - tx_graph - .filter_chain_txouts( - chain, - tip.block_id(), - tx_graph.all_txouts().map(|(op, _)| ((), op)), - ) - .filter_map(|(_, full_txo)| Some((full_txo.outpoint, full_txo.spent_by?))) - .collect() - }; - let build_canonical_positions = |chain: &LocalChain, - tx_graph: &TxGraph| - -> HashMap> { - tx_graph - .list_canonical_txs(chain, tip.block_id()) - .map(|canon_tx| (canon_tx.tx_node.txid, canon_tx.chain_position)) - .collect() - }; - - { - let canonical_spends = build_canonical_spends(&local_chain, &graph); - let canonical_positions = build_canonical_positions(&local_chain, &graph); - - // Assert that confirmed spends are returned correctly. - assert_eq!( - canonical_spends - .get(&OutPoint::new(tx_0.compute_txid(), 0)) - .cloned(), - Some(( - ChainPosition::Confirmed { - anchor: ConfirmationBlockTime { - block_id: tip.get(98).unwrap().block_id(), - confirmation_time: 100 - }, - transitively: None, - }, - tx_1.compute_txid(), - )), - ); - // Check if chain position is returned correctly. - assert_eq!( - canonical_positions.get(&tx_0.compute_txid()).cloned(), - Some(ChainPosition::Confirmed { - anchor: ConfirmationBlockTime { - block_id: tip.get(95).unwrap().block_id(), - confirmation_time: 100 - }, - transitively: None - }) - ); - } - - // Mark the unconfirmed as seen and check correct ObservedAs status is returned. - let _ = graph.insert_seen_at(tx_2.compute_txid(), 1234567); - { - let canonical_spends = build_canonical_spends(&local_chain, &graph); - - // Check chain spend returned correctly. - assert_eq!( - canonical_spends - .get(&OutPoint::new(tx_0.compute_txid(), 1)) - .cloned(), - Some(( - ChainPosition::Unconfirmed { - last_seen: Some(1234567) - }, - tx_2.compute_txid() - )) - ); - } - - // A conflicting transaction that conflicts with tx_1. - let tx_1_conflict = Transaction { - input: vec![TxIn { - previous_output: OutPoint::new(tx_0.compute_txid(), 0), - ..Default::default() - }], - ..new_tx(0) - }; - let _ = graph.insert_tx(tx_1_conflict.clone()); - { - let canonical_positions = build_canonical_positions(&local_chain, &graph); - - // Because this tx conflicts with an already confirmed transaction, chain position should return none. - assert!(canonical_positions - .get(&tx_1_conflict.compute_txid()) - .is_none()); - } - - // Another conflicting tx that conflicts with tx_2. - let tx_2_conflict = Transaction { - input: vec![TxIn { - previous_output: OutPoint::new(tx_0.compute_txid(), 1), - ..Default::default() - }], - ..new_tx(0) - }; - // Insert in graph and mark it as seen. - let _ = graph.insert_tx(tx_2_conflict.clone()); - let _ = graph.insert_seen_at(tx_2_conflict.compute_txid(), 1234568); - { - let canonical_spends = build_canonical_spends(&local_chain, &graph); - let canonical_positions = build_canonical_positions(&local_chain, &graph); - - // This should return a valid observation with correct last seen. - assert_eq!( - canonical_positions - .get(&tx_2_conflict.compute_txid()) - .cloned(), - Some(ChainPosition::Unconfirmed { - last_seen: Some(1234568) - }) - ); - - // Chain_spend now catches the new transaction as the spend. - assert_eq!( - canonical_spends - .get(&OutPoint::new(tx_0.compute_txid(), 1)) - .cloned(), - Some(( - ChainPosition::Unconfirmed { - last_seen: Some(1234568) - }, - tx_2_conflict.compute_txid() - )) - ); - - // Chain position of the `tx_2` is now none, as it is older than `tx_2_conflict` - assert!(canonical_positions.get(&tx_2.compute_txid()).is_none()); - } -} - -/// Ensure that `last_seen` values only increase during [`Merge::merge`]. -#[test] -fn test_changeset_last_seen_merge() { - let txid: Txid = hash!("test txid"); - - let test_cases: &[(Option, Option)] = &[ - (Some(5), Some(6)), - (Some(5), Some(5)), - (Some(6), Some(5)), - (None, Some(5)), - (Some(5), None), - ]; - - for (original_ls, update_ls) in test_cases { - let mut original = ChangeSet::<()> { - last_seen: original_ls.map(|ls| (txid, ls)).into_iter().collect(), - ..Default::default() - }; - assert!(!original.is_empty() || original_ls.is_none()); - let update = ChangeSet::<()> { - last_seen: update_ls.map(|ls| (txid, ls)).into_iter().collect(), - ..Default::default() - }; - assert!(!update.is_empty() || update_ls.is_none()); - - original.merge(update); - assert_eq!( - &original.last_seen.get(&txid).cloned(), - Ord::max(original_ls, update_ls), - ); - } -} - -#[test] -fn transactions_inserted_into_tx_graph_are_not_canonical_until_they_have_an_anchor_in_best_chain() { - let txs = vec![new_tx(0), new_tx(1)]; - let txids: Vec = txs.iter().map(Transaction::compute_txid).collect(); - - // graph - let mut graph = TxGraph::::new(txs); - let full_txs: Vec<_> = graph.full_txs().collect(); - assert_eq!(full_txs.len(), 2); - let unseen_txs: Vec<_> = graph.txs_with_no_anchor_or_last_seen().collect(); - assert_eq!(unseen_txs.len(), 2); - - // chain - let blocks: BTreeMap = [(0, hash!("g")), (1, hash!("A")), (2, hash!("B"))] - .into_iter() - .collect(); - let chain = LocalChain::from_blocks(blocks).unwrap(); - let canonical_txs: Vec<_> = graph - .list_canonical_txs(&chain, chain.tip().block_id()) - .collect(); - assert!(canonical_txs.is_empty()); - - // tx0 with seen_at should be returned by canonical txs - let _ = graph.insert_seen_at(txids[0], 2); - let mut canonical_txs = graph.list_canonical_txs(&chain, chain.tip().block_id()); - assert_eq!( - canonical_txs.next().map(|tx| tx.tx_node.txid).unwrap(), - txids[0] - ); - drop(canonical_txs); - - // tx1 with anchor is also canonical - let _ = graph.insert_anchor(txids[1], block_id!(2, "B")); - let canonical_txids: Vec<_> = graph - .list_canonical_txs(&chain, chain.tip().block_id()) - .map(|tx| tx.tx_node.txid) - .collect(); - assert!(canonical_txids.contains(&txids[1])); - assert!(graph.txs_with_no_anchor_or_last_seen().next().is_none()); -} - -#[test] -fn insert_anchor_without_tx() { - let mut graph = TxGraph::::default(); - - let tx = new_tx(21); - let txid = tx.compute_txid(); - - let anchor = BlockId { - height: 100, - hash: hash!("A"), - }; - - // insert anchor with no corresponding tx - let mut changeset = graph.insert_anchor(txid, anchor); - assert!(changeset.anchors.contains(&(anchor, txid))); - // recover from changeset - let mut recovered = TxGraph::default(); - recovered.apply_changeset(changeset.clone()); - assert_eq!(recovered, graph); - - // now insert tx - let tx = Arc::new(tx); - let graph_changeset = graph.insert_tx(tx.clone()); - assert!(graph_changeset.txs.contains(&tx)); - changeset.merge(graph_changeset); - // recover from changeset again - let mut recovered = TxGraph::default(); - recovered.apply_changeset(changeset); - assert_eq!(recovered, graph); -} - -#[test] -/// The `map_anchors` allow a caller to pass a function to reconstruct the [`TxGraph`] with any [`Anchor`], -/// even though the function is non-deterministic. -fn call_map_anchors_with_non_deterministic_anchor() { - #[derive(Debug, Default, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)] - /// A non-deterministic anchor - pub struct NonDeterministicAnchor { - pub anchor_block: BlockId, - pub non_deterministic_field: u32, - } - - impl Anchor for NonDeterministicAnchor { - fn anchor_block(&self) -> BlockId { - self.anchor_block - } - } - - let template = [ - TxTemplate { - tx_name: "tx1", - inputs: &[TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(10000, Some(1))], - anchors: &[block_id!(1, "A")], - last_seen: None, - }, - TxTemplate { - tx_name: "tx2", - inputs: &[TxInTemplate::PrevTx("tx1", 0)], - outputs: &[TxOutTemplate::new(20000, Some(2))], - anchors: &[block_id!(2, "B")], - ..Default::default() - }, - TxTemplate { - tx_name: "tx3", - inputs: &[TxInTemplate::PrevTx("tx2", 0)], - outputs: &[TxOutTemplate::new(30000, Some(3))], - anchors: &[block_id!(3, "C"), block_id!(4, "D")], - ..Default::default() - }, - ]; - let (graph, _, _) = init_graph(&template); - let new_graph = graph.clone().map_anchors(|a| NonDeterministicAnchor { - anchor_block: a, - // A non-deterministic value - non_deterministic_field: rand::thread_rng().next_u32(), - }); - - // Check all the details in new_graph reconstruct as well - - let mut full_txs_vec: Vec<_> = graph.full_txs().collect(); - full_txs_vec.sort(); - let mut new_txs_vec: Vec<_> = new_graph.full_txs().collect(); - new_txs_vec.sort(); - let mut new_txs = new_txs_vec.iter(); - - for tx_node in full_txs_vec.iter() { - let new_txnode = new_txs.next().unwrap(); - assert_eq!(new_txnode.txid, tx_node.txid); - assert_eq!(new_txnode.tx, tx_node.tx); - assert_eq!( - new_txnode.last_seen_unconfirmed, - tx_node.last_seen_unconfirmed - ); - assert_eq!(new_txnode.anchors.len(), tx_node.anchors.len()); - - let mut new_anchors: Vec<_> = new_txnode.anchors.iter().map(|a| a.anchor_block).collect(); - new_anchors.sort(); - let mut old_anchors: Vec<_> = tx_node.anchors.iter().copied().collect(); - old_anchors.sort(); - assert_eq!(new_anchors, old_anchors); - } - assert!(new_txs.next().is_none()); - - let mut new_graph_anchors: Vec<_> = new_graph - .all_anchors() - .iter() - .flat_map(|(_, anchors)| anchors) - .map(|a| a.anchor_block) - .collect(); - new_graph_anchors.sort(); - assert_eq!( - new_graph_anchors, - vec![ - block_id!(1, "A"), - block_id!(2, "B"), - block_id!(3, "C"), - block_id!(4, "D"), - ] - ); -} - -/// Tests `From` impls for conversion between [`TxGraph`] and [`tx_graph::TxUpdate`]. -#[test] -fn tx_graph_update_conversion() { - use tx_graph::TxUpdate; - - type TestCase = (&'static str, TxUpdate); - - fn make_tx(v: i32) -> Transaction { - Transaction { - version: transaction::Version(v), - lock_time: absolute::LockTime::ZERO, - input: vec![], - output: vec![], - } - } - - fn make_txout(a: u64) -> TxOut { - TxOut { - value: Amount::from_sat(a), - script_pubkey: ScriptBuf::default(), - } - } - - let test_cases: &[TestCase] = &[ - ("empty_update", TxUpdate::default()), - ("single_tx", { - let mut tx_update = TxUpdate::default(); - tx_update.txs = vec![make_tx(0).into()]; - tx_update - }), - ("two_txs", { - let mut tx_update = TxUpdate::default(); - tx_update.txs = vec![make_tx(0).into(), make_tx(1).into()]; - tx_update - }), - ("with_floating_txouts", { - let mut tx_update = TxUpdate::default(); - tx_update.txs = vec![make_tx(0).into(), make_tx(1).into()]; - tx_update.txouts = [ - (OutPoint::new(hash!("a"), 0), make_txout(0)), - (OutPoint::new(hash!("a"), 1), make_txout(1)), - (OutPoint::new(hash!("b"), 0), make_txout(2)), - ] - .into(); - tx_update - }), - ("with_anchors", { - let mut tx_update = TxUpdate::default(); - tx_update.txs = vec![make_tx(0).into(), make_tx(1).into()]; - tx_update.txouts = [ - (OutPoint::new(hash!("a"), 0), make_txout(0)), - (OutPoint::new(hash!("a"), 1), make_txout(1)), - (OutPoint::new(hash!("b"), 0), make_txout(2)), - ] - .into(); - tx_update.anchors = [ - (ConfirmationBlockTime::default(), hash!("a")), - (ConfirmationBlockTime::default(), hash!("b")), - ] - .into(); - tx_update - }), - ("with_seen_ats", { - let mut tx_update = TxUpdate::default(); - tx_update.txs = vec![make_tx(0).into(), make_tx(1).into()]; - tx_update.txouts = [ - (OutPoint::new(hash!("a"), 0), make_txout(0)), - (OutPoint::new(hash!("a"), 1), make_txout(1)), - (OutPoint::new(hash!("d"), 0), make_txout(2)), - ] - .into(); - tx_update.anchors = [ - (ConfirmationBlockTime::default(), hash!("a")), - (ConfirmationBlockTime::default(), hash!("b")), - ] - .into(); - tx_update.seen_ats = [(hash!("c"), 12346)].into_iter().collect(); - tx_update - }), - ]; - - for (test_name, update) in test_cases { - let mut tx_graph = TxGraph::::default(); - let _ = tx_graph.apply_update(update.clone()); - let update_from_tx_graph: TxUpdate = tx_graph.into(); - - assert_eq!( - update - .txs - .iter() - .map(|tx| tx.compute_txid()) - .collect::>(), - update_from_tx_graph - .txs - .iter() - .map(|tx| tx.compute_txid()) - .collect::>(), - "{}: txs do not match", - test_name - ); - assert_eq!( - update.txouts, update_from_tx_graph.txouts, - "{}: txouts do not match", - test_name - ); - assert_eq!( - update.anchors, update_from_tx_graph.anchors, - "{}: anchors do not match", - test_name - ); - assert_eq!( - update.seen_ats, update_from_tx_graph.seen_ats, - "{}: seen_ats do not match", - test_name - ); - } -} diff --git a/crates/chain/tests/test_tx_graph_conflicts.rs b/crates/chain/tests/test_tx_graph_conflicts.rs deleted file mode 100644 index ff4c8b1f..00000000 --- a/crates/chain/tests/test_tx_graph_conflicts.rs +++ /dev/null @@ -1,766 +0,0 @@ -#![cfg(feature = "miniscript")] - -#[macro_use] -mod common; - -use bdk_chain::{Balance, BlockId}; -use bdk_testenv::{block_id, hash, local_chain}; -use bitcoin::{Amount, OutPoint, ScriptBuf}; -use common::*; -use std::collections::{BTreeSet, HashSet}; - -#[allow(dead_code)] -struct Scenario<'a> { - /// Name of the test scenario - name: &'a str, - /// Transaction templates - tx_templates: &'a [TxTemplate<'a, BlockId>], - /// Names of txs that must exist in the output of `list_canonical_txs` - exp_chain_txs: HashSet<&'a str>, - /// Outpoints that must exist in the output of `filter_chain_txouts` - exp_chain_txouts: HashSet<(&'a str, u32)>, - /// Outpoints of UTXOs that must exist in the output of `filter_chain_unspents` - exp_unspents: HashSet<(&'a str, u32)>, - /// Expected balances - exp_balance: Balance, -} - -/// This test ensures that [`TxGraph`] will reliably filter out irrelevant transactions when -/// presented with multiple conflicting transaction scenarios using the [`TxTemplate`] structure. -/// This test also checks that [`TxGraph::list_canonical_txs`], [`TxGraph::filter_chain_txouts`], -/// [`TxGraph::filter_chain_unspents`], and [`TxGraph::balance`] return correct data. -#[test] -fn test_tx_conflict_handling() { - // Create Local chains - let local_chain = local_chain!( - (0, hash!("A")), - (1, hash!("B")), - (2, hash!("C")), - (3, hash!("D")), - (4, hash!("E")), - (5, hash!("F")), - (6, hash!("G")) - ); - let chain_tip = local_chain.tip().block_id(); - - let scenarios = [ - Scenario { - name: "coinbase tx cannot be in mempool and be unconfirmed", - tx_templates: &[ - TxTemplate { - tx_name: "unconfirmed_coinbase", - inputs: &[TxInTemplate::Coinbase], - outputs: &[TxOutTemplate::new(5000, Some(0))], - ..Default::default() - }, - TxTemplate { - tx_name: "confirmed_genesis", - inputs: &[TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(10000, Some(1))], - anchors: &[block_id!(1, "B")], - last_seen: None, - }, - TxTemplate { - tx_name: "unconfirmed_conflict", - inputs: &[ - TxInTemplate::PrevTx("confirmed_genesis", 0), - TxInTemplate::PrevTx("unconfirmed_coinbase", 0) - ], - outputs: &[TxOutTemplate::new(20000, Some(2))], - ..Default::default() - }, - TxTemplate { - tx_name: "confirmed_conflict", - inputs: &[TxInTemplate::PrevTx("confirmed_genesis", 0)], - outputs: &[TxOutTemplate::new(20000, Some(3))], - anchors: &[block_id!(4, "E")], - ..Default::default() - }, - ], - exp_chain_txs: HashSet::from(["confirmed_genesis", "confirmed_conflict"]), - exp_chain_txouts: HashSet::from([("confirmed_genesis", 0), ("confirmed_conflict", 0)]), - exp_unspents: HashSet::from([("confirmed_conflict", 0)]), - exp_balance: Balance { - confirmed: Amount::from_sat(20000), - ..Default::default() - }, - }, - Scenario { - name: "2 unconfirmed txs with same last_seens conflict", - tx_templates: &[ - TxTemplate { - tx_name: "tx1", - outputs: &[TxOutTemplate::new(40000, Some(0))], - anchors: &[block_id!(1, "B")], - last_seen: None, - ..Default::default() - }, - TxTemplate { - tx_name: "tx_conflict_1", - inputs: &[TxInTemplate::PrevTx("tx1", 0)], - outputs: &[TxOutTemplate::new(20000, Some(2))], - last_seen: Some(300), - ..Default::default() - }, - TxTemplate { - tx_name: "tx_conflict_2", - inputs: &[TxInTemplate::PrevTx("tx1", 0)], - outputs: &[TxOutTemplate::new(30000, Some(3))], - last_seen: Some(300), - ..Default::default() - }, - ], - // the txgraph is going to pick tx_conflict_2 because of higher lexicographical txid - exp_chain_txs: HashSet::from(["tx1", "tx_conflict_2"]), - exp_chain_txouts: HashSet::from([("tx1", 0), ("tx_conflict_2", 0)]), - exp_unspents: HashSet::from([("tx_conflict_2", 0)]), - exp_balance: Balance { - immature: Amount::ZERO, - trusted_pending: Amount::from_sat(30000), - untrusted_pending: Amount::ZERO, - confirmed: Amount::ZERO, - }, - }, - Scenario { - name: "2 unconfirmed txs with different last_seens conflict", - tx_templates: &[ - TxTemplate { - tx_name: "tx1", - inputs: &[TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(10000, Some(0)), TxOutTemplate::new(10000, Some(1))], - anchors: &[block_id!(1, "B")], - last_seen: None, - }, - TxTemplate { - tx_name: "tx_conflict_1", - inputs: &[TxInTemplate::PrevTx("tx1", 0), TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(20000, Some(2))], - last_seen: Some(200), - ..Default::default() - }, - TxTemplate { - tx_name: "tx_conflict_2", - inputs: &[TxInTemplate::PrevTx("tx1", 0), TxInTemplate::PrevTx("tx1", 1)], - outputs: &[TxOutTemplate::new(30000, Some(3))], - last_seen: Some(300), - ..Default::default() - }, - ], - exp_chain_txs: HashSet::from(["tx1", "tx_conflict_2"]), - exp_chain_txouts: HashSet::from([("tx1", 0), ("tx1", 1), ("tx_conflict_2", 0)]), - exp_unspents: HashSet::from([("tx_conflict_2", 0)]), - exp_balance: Balance { - immature: Amount::ZERO, - trusted_pending: Amount::from_sat(30000), - untrusted_pending: Amount::ZERO, - confirmed: Amount::ZERO, - }, - }, - Scenario { - name: "3 unconfirmed txs with different last_seens conflict", - tx_templates: &[ - TxTemplate { - tx_name: "tx1", - inputs: &[TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(10000, Some(0))], - anchors: &[block_id!(1, "B")], - last_seen: None, - }, - TxTemplate { - tx_name: "tx_conflict_1", - inputs: &[TxInTemplate::PrevTx("tx1", 0), TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(20000, Some(1))], - last_seen: Some(200), - ..Default::default() - }, - TxTemplate { - tx_name: "tx_conflict_2", - inputs: &[TxInTemplate::PrevTx("tx1", 0)], - outputs: &[TxOutTemplate::new(30000, Some(2))], - last_seen: Some(300), - ..Default::default() - }, - TxTemplate { - tx_name: "tx_conflict_3", - inputs: &[TxInTemplate::PrevTx("tx1", 0)], - outputs: &[TxOutTemplate::new(40000, Some(3))], - last_seen: Some(400), - ..Default::default() - }, - ], - exp_chain_txs: HashSet::from(["tx1", "tx_conflict_3"]), - exp_chain_txouts: HashSet::from([("tx1", 0), ("tx_conflict_3", 0)]), - exp_unspents: HashSet::from([("tx_conflict_3", 0)]), - exp_balance: Balance { - immature: Amount::ZERO, - trusted_pending: Amount::from_sat(40000), - untrusted_pending: Amount::ZERO, - confirmed: Amount::ZERO, - }, - }, - Scenario { - name: "unconfirmed tx conflicts with tx in orphaned block, orphaned higher last_seen", - tx_templates: &[ - TxTemplate { - tx_name: "tx1", - inputs: &[TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(10000, Some(0))], - anchors: &[block_id!(1, "B")], - last_seen: None, - }, - TxTemplate { - tx_name: "tx_conflict_1", - inputs: &[TxInTemplate::PrevTx("tx1", 0), TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(20000, Some(1))], - last_seen: Some(200), - ..Default::default() - }, - TxTemplate { - tx_name: "tx_orphaned_conflict", - inputs: &[TxInTemplate::PrevTx("tx1", 0)], - outputs: &[TxOutTemplate::new(30000, Some(2))], - anchors: &[block_id!(4, "Orphaned Block")], - last_seen: Some(300), - }, - ], - exp_chain_txs: HashSet::from(["tx1", "tx_orphaned_conflict"]), - exp_chain_txouts: HashSet::from([("tx1", 0), ("tx_orphaned_conflict", 0)]), - exp_unspents: HashSet::from([("tx_orphaned_conflict", 0)]), - exp_balance: Balance { - immature: Amount::ZERO, - trusted_pending: Amount::from_sat(30000), - untrusted_pending: Amount::ZERO, - confirmed: Amount::ZERO, - }, - }, - Scenario { - name: "unconfirmed tx conflicts with tx in orphaned block, orphaned lower last_seen", - tx_templates: &[ - TxTemplate { - tx_name: "tx1", - inputs: &[TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(10000, Some(0))], - anchors: &[block_id!(1, "B")], - last_seen: None, - }, - TxTemplate { - tx_name: "tx_conflict_1", - inputs: &[TxInTemplate::PrevTx("tx1", 0), TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(20000, Some(1))], - last_seen: Some(200), - ..Default::default() - }, - TxTemplate { - tx_name: "tx_orphaned_conflict", - inputs: &[TxInTemplate::PrevTx("tx1", 0)], - outputs: &[TxOutTemplate::new(30000, Some(2))], - anchors: &[block_id!(4, "Orphaned Block")], - last_seen: Some(100), - }, - ], - exp_chain_txs: HashSet::from(["tx1", "tx_conflict_1"]), - exp_chain_txouts: HashSet::from([("tx1", 0), ("tx_conflict_1", 0)]), - exp_unspents: HashSet::from([("tx_conflict_1", 0)]), - exp_balance: Balance { - immature: Amount::ZERO, - trusted_pending: Amount::from_sat(20000), - untrusted_pending: Amount::ZERO, - confirmed: Amount::ZERO, - }, - }, - Scenario { - name: "multiple unconfirmed txs conflict with a confirmed tx", - tx_templates: &[ - TxTemplate { - tx_name: "tx1", - inputs: &[TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(10000, Some(0))], - anchors: &[block_id!(1, "B")], - last_seen: None, - }, - TxTemplate { - tx_name: "tx_conflict_1", - inputs: &[TxInTemplate::PrevTx("tx1", 0), TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(20000, Some(1))], - last_seen: Some(200), - ..Default::default() - }, - TxTemplate { - tx_name: "tx_conflict_2", - inputs: &[TxInTemplate::PrevTx("tx1", 0)], - outputs: &[TxOutTemplate::new(30000, Some(2))], - last_seen: Some(300), - ..Default::default() - }, - TxTemplate { - tx_name: "tx_conflict_3", - inputs: &[TxInTemplate::PrevTx("tx1", 0)], - outputs: &[TxOutTemplate::new(40000, Some(3))], - last_seen: Some(400), - ..Default::default() - }, - TxTemplate { - tx_name: "tx_confirmed_conflict", - inputs: &[TxInTemplate::PrevTx("tx1", 0)], - outputs: &[TxOutTemplate::new(50000, Some(4))], - anchors: &[block_id!(1, "B")], - ..Default::default() - }, - ], - exp_chain_txs: HashSet::from(["tx1", "tx_confirmed_conflict"]), - exp_chain_txouts: HashSet::from([("tx1", 0), ("tx_confirmed_conflict", 0)]), - exp_unspents: HashSet::from([("tx_confirmed_conflict", 0)]), - exp_balance: Balance { - immature: Amount::ZERO, - trusted_pending: Amount::ZERO, - untrusted_pending: Amount::ZERO, - confirmed: Amount::from_sat(50000), - }, - }, - Scenario { - name: "B and B' spend A and conflict, C spends B, all the transactions are unconfirmed, B' has higher last_seen than B", - tx_templates: &[ - TxTemplate { - tx_name: "A", - inputs: &[TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(10000, Some(0))], - last_seen: Some(22), - ..Default::default() - }, - TxTemplate { - tx_name: "B", - inputs: &[TxInTemplate::PrevTx("A", 0)], - outputs: &[TxOutTemplate::new(20000, Some(1))], - last_seen: Some(23), - ..Default::default() - }, - TxTemplate { - tx_name: "B'", - inputs: &[TxInTemplate::PrevTx("A", 0)], - outputs: &[TxOutTemplate::new(20000, Some(2))], - last_seen: Some(24), - ..Default::default() - }, - TxTemplate { - tx_name: "C", - inputs: &[TxInTemplate::PrevTx("B", 0)], - outputs: &[TxOutTemplate::new(30000, Some(3))], - last_seen: Some(25), - ..Default::default() - }, - ], - // A, B, C will appear in the list methods - // This is because B' has a higher last seen than B, but C has a higher - // last seen than B', so B and C are considered canonical - exp_chain_txs: HashSet::from(["A", "B", "C"]), - exp_chain_txouts: HashSet::from([("A", 0), ("B", 0), ("C", 0)]), - exp_unspents: HashSet::from([("C", 0)]), - exp_balance: Balance { - immature: Amount::ZERO, - trusted_pending: Amount::from_sat(30000), - untrusted_pending: Amount::ZERO, - confirmed: Amount::ZERO, - }, - }, - Scenario { - name: "B and B' spend A and conflict, C spends B, A and B' are in best chain", - tx_templates: &[ - TxTemplate { - tx_name: "A", - inputs: &[TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(10000, Some(0))], - anchors: &[block_id!(1, "B")], - last_seen: None, - }, - TxTemplate { - tx_name: "B", - inputs: &[TxInTemplate::PrevTx("A", 0)], - outputs: &[TxOutTemplate::new(20000, Some(1))], - ..Default::default() - }, - TxTemplate { - tx_name: "B'", - inputs: &[TxInTemplate::PrevTx("A", 0)], - outputs: &[TxOutTemplate::new(20000, Some(2))], - anchors: &[block_id!(4, "E")], - ..Default::default() - }, - TxTemplate { - tx_name: "C", - inputs: &[TxInTemplate::PrevTx("B", 0)], - outputs: &[TxOutTemplate::new(30000, Some(3))], - ..Default::default() - }, - ], - // B and C should not appear in the list methods - exp_chain_txs: HashSet::from(["A", "B'"]), - exp_chain_txouts: HashSet::from([("A", 0), ("B'", 0)]), - exp_unspents: HashSet::from([("B'", 0)]), - exp_balance: Balance { - immature: Amount::ZERO, - trusted_pending: Amount::ZERO, - untrusted_pending: Amount::ZERO, - confirmed: Amount::from_sat(20000), - }, - }, - Scenario { - name: "B and B' spend A and conflict, C spends B', A and B' are in best chain", - tx_templates: &[ - TxTemplate { - tx_name: "A", - inputs: &[TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(10000, Some(0))], - anchors: &[block_id!(1, "B")], - ..Default::default() - }, - TxTemplate { - tx_name: "B", - inputs: &[TxInTemplate::PrevTx("A", 0)], - outputs: &[TxOutTemplate::new(20000, Some(1))], - last_seen: Some(2), - ..Default::default() - }, - TxTemplate { - tx_name: "B'", - inputs: &[TxInTemplate::PrevTx("A", 0)], - outputs: &[TxOutTemplate::new(20000, Some(2))], - anchors: &[block_id!(4, "E")], - ..Default::default() - }, - TxTemplate { - tx_name: "C", - inputs: &[TxInTemplate::PrevTx("B'", 0)], - outputs: &[TxOutTemplate::new(30000, Some(3))], - last_seen: Some(1), - ..Default::default() - }, - ], - // B should not appear in the list methods - exp_chain_txs: HashSet::from(["A", "B'", "C"]), - exp_chain_txouts: HashSet::from([ - ("A", 0), - ("B'", 0), - ("C", 0), - ]), - exp_unspents: HashSet::from([("C", 0)]), - exp_balance: Balance { - immature: Amount::ZERO, - trusted_pending: Amount::from_sat(30000), - untrusted_pending: Amount::ZERO, - confirmed: Amount::ZERO, - }, - }, - Scenario { - name: "B and B' spend A and conflict, C spends both B and B', A is in best chain", - tx_templates: &[ - TxTemplate { - tx_name: "A", - inputs: &[TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(10000, Some(0))], - anchors: &[block_id!(1, "B")], - last_seen: None, - }, - TxTemplate { - tx_name: "B", - inputs: &[TxInTemplate::PrevTx("A", 0), TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(20000, Some(1))], - last_seen: Some(200), - ..Default::default() - }, - TxTemplate { - tx_name: "B'", - inputs: &[TxInTemplate::PrevTx("A", 0)], - outputs: &[TxOutTemplate::new(30000, Some(2))], - last_seen: Some(300), - ..Default::default() - }, - TxTemplate { - tx_name: "C", - inputs: &[ - TxInTemplate::PrevTx("B", 0), - TxInTemplate::PrevTx("B'", 0), - ], - outputs: &[TxOutTemplate::new(20000, Some(3))], - ..Default::default() - }, - ], - // C should not appear in the list methods - exp_chain_txs: HashSet::from(["A", "B'"]), - exp_chain_txouts: HashSet::from([("A", 0), ("B'", 0)]), - exp_unspents: HashSet::from([("B'", 0)]), - exp_balance: Balance { - immature: Amount::ZERO, - trusted_pending: Amount::from_sat(30000), - untrusted_pending: Amount::ZERO, - confirmed: Amount::ZERO, - }, - }, - Scenario { - name: "B and B' spend A and conflict, B' is confirmed, C spends both B and B', A is in best chain", - tx_templates: &[ - TxTemplate { - tx_name: "A", - inputs: &[TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(10000, Some(0))], - anchors: &[block_id!(1, "B")], - last_seen: None, - }, - TxTemplate { - tx_name: "B", - inputs: &[TxInTemplate::PrevTx("A", 0), TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(20000, Some(1))], - last_seen: Some(200), - ..Default::default() - }, - TxTemplate { - tx_name: "B'", - inputs: &[TxInTemplate::PrevTx("A", 0)], - outputs: &[TxOutTemplate::new(50000, Some(4))], - anchors: &[block_id!(1, "B")], - ..Default::default() - }, - TxTemplate { - tx_name: "C", - inputs: &[ - TxInTemplate::PrevTx("B", 0), - TxInTemplate::PrevTx("B'", 0), - ], - outputs: &[TxOutTemplate::new(20000, Some(5))], - ..Default::default() - }, - ], - // C should not appear in the list methods - exp_chain_txs: HashSet::from(["A", "B'"]), - exp_chain_txouts: HashSet::from([("A", 0), ("B'", 0)]), - exp_unspents: HashSet::from([("B'", 0)]), - exp_balance: Balance { - immature: Amount::ZERO, - trusted_pending: Amount::ZERO, - untrusted_pending: Amount::ZERO, - confirmed: Amount::from_sat(50000), - }, - }, - Scenario { - name: "B and B' spend A and conflict, B' is confirmed, C spends both B and B', D spends C, A is in best chain", - tx_templates: &[ - TxTemplate { - tx_name: "A", - inputs: &[TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(10000, Some(0))], - anchors: &[block_id!(1, "B")], - last_seen: None, - }, - TxTemplate { - tx_name: "B", - inputs: &[TxInTemplate::PrevTx("A", 0), TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(20000, Some(1))], - last_seen: Some(200), - ..Default::default() - }, - TxTemplate { - tx_name: "B'", - inputs: &[TxInTemplate::PrevTx("A", 0)], - outputs: &[TxOutTemplate::new(50000, Some(4))], - anchors: &[block_id!(1, "B")], - ..Default::default() - }, - TxTemplate { - tx_name: "C", - inputs: &[ - TxInTemplate::PrevTx("B", 0), - TxInTemplate::PrevTx("B'", 0), - ], - outputs: &[TxOutTemplate::new(20000, Some(5))], - ..Default::default() - }, - TxTemplate { - tx_name: "D", - inputs: &[TxInTemplate::PrevTx("C", 0)], - outputs: &[TxOutTemplate::new(20000, Some(6))], - ..Default::default() - }, - ], - // D should not appear in the list methods - exp_chain_txs: HashSet::from(["A", "B'"]), - exp_chain_txouts: HashSet::from([("A", 0), ("B'", 0)]), - exp_unspents: HashSet::from([("B'", 0)]), - exp_balance: Balance { - immature: Amount::ZERO, - trusted_pending: Amount::ZERO, - untrusted_pending: Amount::ZERO, - confirmed: Amount::from_sat(50000), - }, - }, - Scenario { - name: "transitively confirmed ancestors", - tx_templates: &[ - TxTemplate { - tx_name: "first", - inputs: &[TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(1000, Some(0))], - ..Default::default() - }, - TxTemplate { - tx_name: "second", - inputs: &[TxInTemplate::PrevTx("first", 0)], - outputs: &[TxOutTemplate::new(900, Some(0))], - ..Default::default() - }, - TxTemplate { - tx_name: "anchored", - inputs: &[TxInTemplate::PrevTx("second", 0)], - outputs: &[TxOutTemplate::new(800, Some(0))], - anchors: &[block_id!(3, "D")], - ..Default::default() - }, - ], - exp_chain_txs: HashSet::from(["first", "second", "anchored"]), - exp_chain_txouts: HashSet::from([("first", 0), ("second", 0), ("anchored", 0)]), - exp_unspents: HashSet::from([("anchored", 0)]), - exp_balance: Balance { - immature: Amount::ZERO, - trusted_pending: Amount::ZERO, - untrusted_pending: Amount::ZERO, - confirmed: Amount::from_sat(800), - } - }, - Scenario { - name: "transitively anchored txs should have priority over last seen", - tx_templates: &[ - TxTemplate { - tx_name: "root", - inputs: &[TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(10_000, Some(0))], - anchors: &[block_id!(1, "B")], - ..Default::default() - }, - TxTemplate { - tx_name: "last_seen_conflict", - inputs: &[TxInTemplate::PrevTx("root", 0)], - outputs: &[TxOutTemplate::new(9900, Some(1))], - last_seen: Some(1000), - ..Default::default() - }, - TxTemplate { - tx_name: "transitively_anchored_conflict", - inputs: &[TxInTemplate::PrevTx("root", 0)], - outputs: &[TxOutTemplate::new(9000, Some(1))], - last_seen: Some(100), - ..Default::default() - }, - TxTemplate { - tx_name: "anchored", - inputs: &[TxInTemplate::PrevTx("transitively_anchored_conflict", 0)], - outputs: &[TxOutTemplate::new(8000, Some(2))], - anchors: &[block_id!(4, "E")], - ..Default::default() - }, - ], - exp_chain_txs: HashSet::from(["root", "transitively_anchored_conflict", "anchored"]), - exp_chain_txouts: HashSet::from([("root", 0), ("transitively_anchored_conflict", 0), ("anchored", 0)]), - exp_unspents: HashSet::from([("anchored", 0)]), - exp_balance: Balance { - confirmed: Amount::from_sat(8000), - ..Default::default() - } - }, - Scenario { - name: "tx anchored in orphaned block and not seen in mempool should be canon", - tx_templates: &[ - TxTemplate { - tx_name: "root", - inputs: &[TxInTemplate::Bogus], - outputs: &[TxOutTemplate::new(10_000, None)], - anchors: &[block_id!(1, "B")], - ..Default::default() - }, - TxTemplate { - tx_name: "tx", - inputs: &[TxInTemplate::PrevTx("root", 0)], - outputs: &[TxOutTemplate::new(9000, Some(0))], - anchors: &[block_id!(6, "not G")], - ..Default::default() - }, - ], - exp_chain_txs: HashSet::from(["root", "tx"]), - exp_chain_txouts: HashSet::from([("tx", 0)]), - exp_unspents: HashSet::from([("tx", 0)]), - exp_balance: Balance { trusted_pending: Amount::from_sat(9000), ..Default::default() } - } - ]; - - for scenario in scenarios { - let (tx_graph, spk_index, exp_tx_ids) = init_graph(scenario.tx_templates.iter()); - - let txs = tx_graph - .list_canonical_txs(&local_chain, chain_tip) - .map(|tx| tx.tx_node.txid) - .collect::>(); - let exp_txs = scenario - .exp_chain_txs - .iter() - .map(|txid| *exp_tx_ids.get(txid).expect("txid must exist")) - .collect::>(); - assert_eq!( - txs, exp_txs, - "\n[{}] 'list_canonical_txs' failed", - scenario.name - ); - - let txouts = tx_graph - .filter_chain_txouts( - &local_chain, - chain_tip, - spk_index.outpoints().iter().cloned(), - ) - .map(|(_, full_txout)| full_txout.outpoint) - .collect::>(); - let exp_txouts = scenario - .exp_chain_txouts - .iter() - .map(|(txid, vout)| OutPoint { - txid: *exp_tx_ids.get(txid).expect("txid must exist"), - vout: *vout, - }) - .collect::>(); - assert_eq!( - txouts, exp_txouts, - "\n[{}] 'filter_chain_txouts' failed", - scenario.name - ); - - let utxos = tx_graph - .filter_chain_unspents( - &local_chain, - chain_tip, - spk_index.outpoints().iter().cloned(), - ) - .map(|(_, full_txout)| full_txout.outpoint) - .collect::>(); - let exp_utxos = scenario - .exp_unspents - .iter() - .map(|(txid, vout)| OutPoint { - txid: *exp_tx_ids.get(txid).expect("txid must exist"), - vout: *vout, - }) - .collect::>(); - assert_eq!( - utxos, exp_utxos, - "\n[{}] 'filter_chain_unspents' failed", - scenario.name - ); - - let balance = tx_graph.balance( - &local_chain, - chain_tip, - spk_index.outpoints().iter().cloned(), - |_, spk: ScriptBuf| spk_index.index_of_spk(spk).is_some(), - ); - assert_eq!( - balance, scenario.exp_balance, - "\n[{}] 'balance' failed", - scenario.name - ); - } -} diff --git a/crates/core/CHANGELOG.md b/crates/core/CHANGELOG.md deleted file mode 100644 index 6252a80d..00000000 --- a/crates/core/CHANGELOG.md +++ /dev/null @@ -1,20 +0,0 @@ -# Changelog - -All notable changes to this project can be found here and in each release's git tag and can be viewed with `git tag -ln100 "v*"`. See also [DEVELOPMENT_CYCLE.md](../../DEVELOPMENT_CYCLE.md) for more details. - -Contributors do not need to change this file but do need to add changelog details in their PR descriptions. The person making the next release will collect changelog details from included PRs and edit this file prior to each release. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## Unreleased - -- test: add tests for `Merge` trait #1738 - -## [core-0.4.1] - -### Changed - -- Minor updates to fix new rustc 1.83.0 clippy warnings #1776 - -[core-0.4.1]: https://github.com/bitcoindevkit/bdk/releases/tag/core-0.4.1 diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml deleted file mode 100644 index fae5e512..00000000 --- a/crates/core/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "bdk_core" -version = "0.4.1" -edition = "2021" -rust-version = "1.63" -homepage = "https://bitcoindevkit.org" -repository = "https://github.com/bitcoindevkit/bdk" -documentation = "https://docs.rs/bdk_core" -description = "Collection of core structures for Bitcoin Dev Kit." -license = "MIT OR Apache-2.0" -readme = "README.md" - -[dependencies] -bitcoin = { version = "0.32", default-features = false } -serde = { version = "1", optional = true, features = ["derive", "rc"] } -hashbrown = { version = "0.14.5", optional = true, default-features = false, features = ["ahash", "inline-more"] } - -[features] -default = ["std"] -std = ["bitcoin/std"] -serde = ["dep:serde", "bitcoin/serde", "hashbrown?/serde"] - -[dev-dependencies] -bdk_chain = { path = "../chain" } -bdk_testenv = { path = "../testenv", default-features = false } diff --git a/crates/core/README.md b/crates/core/README.md deleted file mode 100644 index 08eae24f..00000000 --- a/crates/core/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# BDK Core - -This crate is a collection of core structures used by the bdk_chain, bdk_wallet, and bdk chain data source crates. diff --git a/crates/core/src/block_id.rs b/crates/core/src/block_id.rs deleted file mode 100644 index 2e64c9cb..00000000 --- a/crates/core/src/block_id.rs +++ /dev/null @@ -1,51 +0,0 @@ -use bitcoin::{hashes::Hash, BlockHash}; - -/// A reference to a block in the canonical chain. -#[derive(Debug, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)] -#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] -pub struct BlockId { - /// The height of the block. - pub height: u32, - /// The hash of the block. - pub hash: BlockHash, -} - -impl Default for BlockId { - fn default() -> Self { - Self { - height: Default::default(), - hash: BlockHash::all_zeros(), - } - } -} - -impl From<(u32, BlockHash)> for BlockId { - fn from((height, hash): (u32, BlockHash)) -> Self { - Self { height, hash } - } -} - -impl From for (u32, BlockHash) { - fn from(block_id: BlockId) -> Self { - (block_id.height, block_id.hash) - } -} - -impl From<(&u32, &BlockHash)> for BlockId { - fn from((height, hash): (&u32, &BlockHash)) -> Self { - Self { - height: *height, - hash: *hash, - } - } -} - -/// Represents the confirmation block and time of a transaction. -#[derive(Debug, Default, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)] -#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] -pub struct ConfirmationBlockTime { - /// The anchor block. - pub block_id: BlockId, - /// The confirmation time of the transaction being anchored. - pub confirmation_time: u64, -} diff --git a/crates/core/src/checkpoint.rs b/crates/core/src/checkpoint.rs deleted file mode 100644 index 9bfdf2ee..00000000 --- a/crates/core/src/checkpoint.rs +++ /dev/null @@ -1,264 +0,0 @@ -use core::ops::RangeBounds; - -use alloc::sync::Arc; -use bitcoin::BlockHash; - -use crate::BlockId; - -/// A checkpoint is a node of a reference-counted linked list of [`BlockId`]s. -/// -/// Checkpoints are cheaply cloneable and are useful to find the agreement point between two sparse -/// block chains. -#[derive(Debug, Clone)] -pub struct CheckPoint(Arc); - -/// The internal contents of [`CheckPoint`]. -#[derive(Debug, Clone)] -struct CPInner { - /// Block id (hash and height). - block: BlockId, - /// Previous checkpoint (if any). - prev: Option>, -} - -/// When a `CPInner` is dropped we need to go back down the chain and manually remove any -/// no-longer referenced checkpoints. Letting the default rust dropping mechanism handle this -/// leads to recursive logic and stack overflows -/// -/// https://github.com/bitcoindevkit/bdk/issues/1634 -impl Drop for CPInner { - fn drop(&mut self) { - // Take out `prev` so its `drop` won't be called when this drop is finished - let mut current = self.prev.take(); - while let Some(arc_node) = current { - // Get rid of the Arc around `prev` if we're the only one holding a ref - // So the `drop` on it won't be called when the `Arc` is dropped. - // - // FIXME: When MSRV > 1.70.0 this should use Arc::into_inner which actually guarantees - // that no recursive drop calls can happen even with multiple threads. - match Arc::try_unwrap(arc_node).ok() { - Some(mut node) => { - // Keep going backwards - current = node.prev.take(); - // Don't call `drop` on `CPInner` since that risks it becoming recursive. - core::mem::forget(node); - } - None => break, - } - } - } -} - -impl PartialEq for CheckPoint { - fn eq(&self, other: &Self) -> bool { - let self_cps = self.iter().map(|cp| cp.block_id()); - let other_cps = other.iter().map(|cp| cp.block_id()); - self_cps.eq(other_cps) - } -} - -impl CheckPoint { - /// Construct a new base block at the front of a linked list. - pub fn new(block: BlockId) -> Self { - Self(Arc::new(CPInner { block, prev: None })) - } - - /// Construct a checkpoint from a list of [`BlockId`]s in ascending height order. - /// - /// # Errors - /// - /// This method will error if any of the follow occurs: - /// - /// - The `blocks` iterator is empty, in which case, the error will be `None`. - /// - The `blocks` iterator is not in ascending height order. - /// - The `blocks` iterator contains multiple [`BlockId`]s of the same height. - /// - /// The error type is the last successful checkpoint constructed (if any). - pub fn from_block_ids( - block_ids: impl IntoIterator, - ) -> Result> { - let mut blocks = block_ids.into_iter(); - let mut acc = CheckPoint::new(blocks.next().ok_or(None)?); - for id in blocks { - acc = acc.push(id).map_err(Some)?; - } - Ok(acc) - } - - /// Construct a checkpoint from the given `header` and block `height`. - /// - /// If `header` is of the genesis block, the checkpoint won't have a [`prev`] node. Otherwise, - /// we return a checkpoint linked with the previous block. - /// - /// [`prev`]: CheckPoint::prev - pub fn from_header(header: &bitcoin::block::Header, height: u32) -> Self { - let hash = header.block_hash(); - let this_block_id = BlockId { height, hash }; - - let prev_height = match height.checked_sub(1) { - Some(h) => h, - None => return Self::new(this_block_id), - }; - - let prev_block_id = BlockId { - height: prev_height, - hash: header.prev_blockhash, - }; - - CheckPoint::new(prev_block_id) - .push(this_block_id) - .expect("must construct checkpoint") - } - - /// Puts another checkpoint onto the linked list representing the blockchain. - /// - /// Returns an `Err(self)` if the block you are pushing on is not at a greater height that the one you - /// are pushing on to. - pub fn push(self, block: BlockId) -> Result { - if self.height() < block.height { - Ok(Self(Arc::new(CPInner { - block, - prev: Some(self.0), - }))) - } else { - Err(self) - } - } - - /// Extends the checkpoint linked list by a iterator of block ids. - /// - /// Returns an `Err(self)` if there is block which does not have a greater height than the - /// previous one. - pub fn extend(self, blocks: impl IntoIterator) -> Result { - let mut curr = self.clone(); - for block in blocks { - curr = curr.push(block).map_err(|_| self.clone())?; - } - Ok(curr) - } - - /// Get the [`BlockId`] of the checkpoint. - pub fn block_id(&self) -> BlockId { - self.0.block - } - - /// Get the height of the checkpoint. - pub fn height(&self) -> u32 { - self.0.block.height - } - - /// Get the block hash of the checkpoint. - pub fn hash(&self) -> BlockHash { - self.0.block.hash - } - - /// Get the previous checkpoint in the chain - pub fn prev(&self) -> Option { - self.0.prev.clone().map(CheckPoint) - } - - /// Iterate from this checkpoint in descending height. - pub fn iter(&self) -> CheckPointIter { - self.clone().into_iter() - } - - /// Get checkpoint at `height`. - /// - /// Returns `None` if checkpoint at `height` does not exist`. - pub fn get(&self, height: u32) -> Option { - self.range(height..=height).next() - } - - /// Iterate checkpoints over a height range. - /// - /// Note that we always iterate checkpoints in reverse height order (iteration starts at tip - /// height). - pub fn range(&self, range: R) -> impl Iterator - where - R: RangeBounds, - { - let start_bound = range.start_bound().cloned(); - let end_bound = range.end_bound().cloned(); - self.iter() - .skip_while(move |cp| match end_bound { - core::ops::Bound::Included(inc_bound) => cp.height() > inc_bound, - core::ops::Bound::Excluded(exc_bound) => cp.height() >= exc_bound, - core::ops::Bound::Unbounded => false, - }) - .take_while(move |cp| match start_bound { - core::ops::Bound::Included(inc_bound) => cp.height() >= inc_bound, - core::ops::Bound::Excluded(exc_bound) => cp.height() > exc_bound, - core::ops::Bound::Unbounded => true, - }) - } - - /// Inserts `block_id` at its height within the chain. - /// - /// The effect of `insert` depends on whether a height already exists. If it doesn't the - /// `block_id` we inserted and all pre-existing blocks higher than it will be re-inserted after - /// it. If the height already existed and has a conflicting block hash then it will be purged - /// along with all block following it. The returned chain will have a tip of the `block_id` - /// passed in. Of course, if the `block_id` was already present then this just returns `self`. - /// - /// # Panics - /// - /// This panics if called with a genesis block that differs from that of `self`. - #[must_use] - pub fn insert(self, block_id: BlockId) -> Self { - let mut cp = self.clone(); - let mut tail = vec![]; - let base = loop { - if cp.height() == block_id.height { - if cp.hash() == block_id.hash { - return self; - } - assert_ne!(cp.height(), 0, "cannot replace genesis block"); - // if we have a conflict we just return the inserted block because the tail is by - // implication invalid. - tail = vec![]; - break cp.prev().expect("can't be called on genesis block"); - } - - if cp.height() < block_id.height { - break cp; - } - - tail.push(cp.block_id()); - cp = cp.prev().expect("will break before genesis block"); - }; - - base.extend(core::iter::once(block_id).chain(tail.into_iter().rev())) - .expect("tail is in order") - } - - /// This method tests for `self` and `other` to have equal internal pointers. - pub fn eq_ptr(&self, other: &Self) -> bool { - Arc::as_ptr(&self.0) == Arc::as_ptr(&other.0) - } -} - -/// Iterates over checkpoints backwards. -pub struct CheckPointIter { - current: Option>, -} - -impl Iterator for CheckPointIter { - type Item = CheckPoint; - - fn next(&mut self) -> Option { - let current = self.current.clone()?; - self.current.clone_from(¤t.prev); - Some(CheckPoint(current)) - } -} - -impl IntoIterator for CheckPoint { - type Item = CheckPoint; - type IntoIter = CheckPointIter; - - fn into_iter(self) -> Self::IntoIter { - CheckPointIter { - current: Some(self.0), - } - } -} diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs deleted file mode 100644 index 95bebe90..00000000 --- a/crates/core/src/lib.rs +++ /dev/null @@ -1,74 +0,0 @@ -//! This crate is a collection of core structures for [Bitcoin Dev Kit]. - -// only enables the `doc_cfg` feature when the `docsrs` configuration attribute is defined -#![cfg_attr(docsrs, feature(doc_cfg))] -#![cfg_attr( - docsrs, - doc(html_logo_url = "https://github.com/bitcoindevkit/bdk/raw/master/static/bdk.png") -)] -#![no_std] -#![warn(missing_docs)] - -pub use bitcoin; - -#[allow(unused_imports)] -#[macro_use] -extern crate alloc; - -#[allow(unused_imports)] -#[cfg(feature = "std")] -#[macro_use] -extern crate std; - -#[cfg(feature = "serde")] -pub extern crate serde; - -#[cfg(all(not(feature = "std"), feature = "hashbrown"))] -extern crate hashbrown; - -// When no-std use `alloc`'s Hash collections. This is activated by default -#[cfg(all(not(feature = "std"), not(feature = "hashbrown")))] -#[doc(hidden)] -pub mod collections { - #![allow(dead_code)] - pub type HashSet = alloc::collections::BTreeSet; - pub type HashMap = alloc::collections::BTreeMap; - pub use alloc::collections::{btree_map as hash_map, *}; -} - -// When we have std, use `std`'s all collections -#[cfg(all(feature = "std", not(feature = "hashbrown")))] -#[doc(hidden)] -pub mod collections { - pub use std::collections::{hash_map, *}; -} - -// With this special feature `hashbrown`, use `hashbrown`'s hash collections, and else from `alloc`. -#[cfg(feature = "hashbrown")] -#[doc(hidden)] -pub mod collections { - #![allow(dead_code)] - pub type HashSet = hashbrown::HashSet; - pub type HashMap = hashbrown::HashMap; - pub use alloc::collections::*; - pub use hashbrown::hash_map; -} - -/// A tuple of keychain index and `T` representing the indexed value. -pub type Indexed = (u32, T); -/// A tuple of keychain `K`, derivation index (`u32`) and a `T` associated with them. -pub type KeychainIndexed = ((K, u32), T); - -mod block_id; -pub use block_id::*; - -mod checkpoint; -pub use checkpoint::*; - -mod tx_update; -pub use tx_update::*; - -mod merge; -pub use merge::*; - -pub mod spk_client; diff --git a/crates/core/src/merge.rs b/crates/core/src/merge.rs deleted file mode 100644 index 59c0d7f4..00000000 --- a/crates/core/src/merge.rs +++ /dev/null @@ -1,82 +0,0 @@ -use crate::alloc::vec::Vec; -use crate::collections::{BTreeMap, BTreeSet}; - -/// Trait that makes an object mergeable. -pub trait Merge: Default { - /// Merge another object of the same type onto `self`. - fn merge(&mut self, other: Self); - - /// Returns whether the structure is considered empty. - fn is_empty(&self) -> bool; - - /// Take the value, replacing it with the default value. - fn take(&mut self) -> Option { - if self.is_empty() { - None - } else { - Some(core::mem::take(self)) - } - } -} - -impl Merge for BTreeMap { - fn merge(&mut self, other: Self) { - // We use `extend` instead of `BTreeMap::append` due to performance issues with `append`. - // Refer to https://github.com/rust-lang/rust/issues/34666#issuecomment-675658420 - BTreeMap::extend(self, other) - } - - fn is_empty(&self) -> bool { - BTreeMap::is_empty(self) - } -} - -impl Merge for BTreeSet { - fn merge(&mut self, other: Self) { - // We use `extend` instead of `BTreeMap::append` due to performance issues with `append`. - // Refer to https://github.com/rust-lang/rust/issues/34666#issuecomment-675658420 - BTreeSet::extend(self, other) - } - - fn is_empty(&self) -> bool { - BTreeSet::is_empty(self) - } -} - -impl Merge for Vec { - fn merge(&mut self, mut other: Self) { - Vec::append(self, &mut other) - } - - fn is_empty(&self) -> bool { - Vec::is_empty(self) - } -} - -macro_rules! impl_merge_for_tuple { - ($($a:ident $b:tt)*) => { - impl<$($a),*> Merge for ($($a,)*) where $($a: Merge),* { - - fn merge(&mut self, _other: Self) { - $(Merge::merge(&mut self.$b, _other.$b) );* - } - - fn is_empty(&self) -> bool { - $(Merge::is_empty(&self.$b) && )* true - } - } - } -} - -impl_merge_for_tuple!(); -impl_merge_for_tuple!(T0 0); -impl_merge_for_tuple!(T0 0 T1 1); -impl_merge_for_tuple!(T0 0 T1 1 T2 2); -impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3); -impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3 T4 4); -impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3 T4 4 T5 5); -impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3 T4 4 T5 5 T6 6); -impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3 T4 4 T5 5 T6 6 T7 7); -impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3 T4 4 T5 5 T6 6 T7 7 T8 8); -impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3 T4 4 T5 5 T6 6 T7 7 T8 8 T9 9); -impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3 T4 4 T5 5 T6 6 T7 7 T8 8 T9 9 T10 10); diff --git a/crates/core/src/spk_client.rs b/crates/core/src/spk_client.rs deleted file mode 100644 index b6a8e020..00000000 --- a/crates/core/src/spk_client.rs +++ /dev/null @@ -1,632 +0,0 @@ -//! Helper types for spk-based blockchain clients. -use crate::{ - alloc::{boxed::Box, collections::VecDeque, vec::Vec}, - collections::{BTreeMap, HashMap, HashSet}, - CheckPoint, ConfirmationBlockTime, Indexed, -}; -use bitcoin::{OutPoint, Script, ScriptBuf, Txid}; - -type InspectSync = dyn FnMut(SyncItem, SyncProgress) + Send + 'static; - -type InspectFullScan = dyn FnMut(K, u32, &Script) + Send + 'static; - -/// An item reported to the [`inspect`](SyncRequestBuilder::inspect) closure of [`SyncRequest`]. -#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub enum SyncItem<'i, I> { - /// Script pubkey sync item. - Spk(I, &'i Script), - /// Txid sync item. - Txid(Txid), - /// Outpoint sync item. - OutPoint(OutPoint), -} - -impl core::fmt::Display for SyncItem<'_, I> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - SyncItem::Spk(i, spk) => { - if (i as &dyn core::any::Any).is::<()>() { - write!(f, "script '{}'", spk) - } else { - write!(f, "script {:?} '{}'", i, spk) - } - } - SyncItem::Txid(txid) => write!(f, "txid '{}'", txid), - SyncItem::OutPoint(op) => write!(f, "outpoint '{}'", op), - } - } -} - -/// The progress of [`SyncRequest`]. -#[derive(Debug, Clone)] -pub struct SyncProgress { - /// Script pubkeys consumed by the request. - pub spks_consumed: usize, - /// Script pubkeys remaining in the request. - pub spks_remaining: usize, - /// Txids consumed by the request. - pub txids_consumed: usize, - /// Txids remaining in the request. - pub txids_remaining: usize, - /// Outpoints consumed by the request. - pub outpoints_consumed: usize, - /// Outpoints remaining in the request. - pub outpoints_remaining: usize, -} - -impl SyncProgress { - /// Total items, consumed and remaining, of the request. - pub fn total(&self) -> usize { - self.total_spks() + self.total_txids() + self.total_outpoints() - } - - /// Total script pubkeys, consumed and remaining, of the request. - pub fn total_spks(&self) -> usize { - self.spks_consumed + self.spks_remaining - } - - /// Total txids, consumed and remaining, of the request. - pub fn total_txids(&self) -> usize { - self.txids_consumed + self.txids_remaining - } - - /// Total outpoints, consumed and remaining, of the request. - pub fn total_outpoints(&self) -> usize { - self.outpoints_consumed + self.outpoints_remaining - } - - /// Total consumed items of the request. - pub fn consumed(&self) -> usize { - self.spks_consumed + self.txids_consumed + self.outpoints_consumed - } - - /// Total remaining items of the request. - pub fn remaining(&self) -> usize { - self.spks_remaining + self.txids_remaining + self.outpoints_remaining - } -} - -/// [`Script`] with expected [`Txid`] histories. -#[derive(Debug, Clone)] -pub struct SpkWithExpectedTxids { - /// Script pubkey. - pub spk: ScriptBuf, - - /// [`Txid`]s that we expect to appear in the chain source's spk history response. - /// - /// Any transaction listed here that is missing from the spk history response should be - /// considered evicted from the mempool. - pub expected_txids: HashSet, -} - -impl From for SpkWithExpectedTxids { - fn from(spk: ScriptBuf) -> Self { - Self { - spk, - expected_txids: HashSet::new(), - } - } -} - -/// Builds a [`SyncRequest`]. -/// -/// Construct with [`SyncRequest::builder`]. -#[must_use] -pub struct SyncRequestBuilder { - inner: SyncRequest, -} - -impl SyncRequestBuilder<()> { - /// Add [`Script`]s that will be synced against. - pub fn spks(self, spks: impl IntoIterator) -> Self { - self.spks_with_indexes(spks.into_iter().map(|spk| ((), spk))) - } -} - -impl SyncRequestBuilder { - /// Set the initial chain tip for the sync request. - /// - /// This is used to update [`LocalChain`](../../bdk_chain/local_chain/struct.LocalChain.html). - pub fn chain_tip(mut self, cp: CheckPoint) -> Self { - self.inner.chain_tip = Some(cp); - self - } - - /// Add [`Script`]s coupled with associated indexes that will be synced against. - /// - /// # Example - /// - /// Sync revealed script pubkeys obtained from a - /// [`KeychainTxOutIndex`](../../bdk_chain/indexer/keychain_txout/struct.KeychainTxOutIndex.html). - /// - /// ```rust - /// # use bdk_chain::spk_client::SyncRequest; - /// # use bdk_chain::indexer::keychain_txout::KeychainTxOutIndex; - /// # use bdk_chain::miniscript::{Descriptor, DescriptorPublicKey}; - /// # let secp = bdk_chain::bitcoin::secp256k1::Secp256k1::signing_only(); - /// # let (descriptor_a,_) = Descriptor::::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap(); - /// # let (descriptor_b,_) = Descriptor::::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/*)").unwrap(); - /// let mut indexer = KeychainTxOutIndex::<&'static str>::default(); - /// indexer.insert_descriptor("descriptor_a", descriptor_a)?; - /// indexer.insert_descriptor("descriptor_b", descriptor_b)?; - /// - /// /* Assume that the caller does more mutations to the `indexer` here... */ - /// - /// // Reveal spks for "descriptor_a", then build a sync request. Each spk will be indexed with - /// // `u32`, which represents the derivation index of the associated spk from "descriptor_a". - /// let (newly_revealed_spks, _changeset) = indexer - /// .reveal_to_target("descriptor_a", 21) - /// .expect("keychain must exist"); - /// let _request = SyncRequest::builder() - /// .spks_with_indexes(newly_revealed_spks) - /// .build(); - /// - /// // Sync all revealed spks in the indexer. This time, spks may be derived from different - /// // keychains. Each spk will be indexed with `(&'static str, u32)` where `&'static str` is - /// // the keychain identifier and `u32` is the derivation index. - /// let all_revealed_spks = indexer.revealed_spks(..); - /// let _request = SyncRequest::builder() - /// .spks_with_indexes(all_revealed_spks) - /// .build(); - /// # Ok::<_, bdk_chain::keychain_txout::InsertDescriptorError<_>>(()) - /// ``` - pub fn spks_with_indexes(mut self, spks: impl IntoIterator) -> Self { - self.inner.spks.extend(spks); - self - } - - /// Add transactions that are expected to exist under the given spks. - /// - /// This is useful for detecting a malicious replacement of an incoming transaction. - pub fn expected_spk_txids(mut self, txs: impl IntoIterator) -> Self { - for (spk, txid) in txs { - self.inner - .spk_expected_txids - .entry(spk) - .or_default() - .insert(txid); - } - self - } - - /// Add [`Txid`]s that will be synced against. - pub fn txids(mut self, txids: impl IntoIterator) -> Self { - self.inner.txids.extend(txids); - self - } - - /// Add [`OutPoint`]s that will be synced against. - pub fn outpoints(mut self, outpoints: impl IntoIterator) -> Self { - self.inner.outpoints.extend(outpoints); - self - } - - /// Set the closure that will inspect every sync item visited. - pub fn inspect(mut self, inspect: F) -> Self - where - F: FnMut(SyncItem, SyncProgress) + Send + 'static, - { - self.inner.inspect = Box::new(inspect); - self - } - - /// Build the [`SyncRequest`]. - pub fn build(self) -> SyncRequest { - self.inner - } -} - -/// Data required to perform a spk-based blockchain client sync. -/// -/// A client sync fetches relevant chain data for a known list of scripts, transaction ids and -/// outpoints. The sync process also updates the chain from the given -/// [`chain_tip`](SyncRequestBuilder::chain_tip) (if provided). -/// -/// ```rust -/// # use bdk_chain::{bitcoin::{hashes::Hash, ScriptBuf}, local_chain::LocalChain}; -/// # use bdk_chain::spk_client::SyncRequest; -/// # let (local_chain, _) = LocalChain::from_genesis_hash(Hash::all_zeros()); -/// # let scripts = [ScriptBuf::default(), ScriptBuf::default()]; -/// // Construct a sync request. -/// let sync_request = SyncRequest::builder() -/// // Provide chain tip of the local wallet. -/// .chain_tip(local_chain.tip()) -/// // Provide list of scripts to scan for transactions against. -/// .spks(scripts) -/// // This is called for every synced item. -/// .inspect(|item, progress| println!("{} (remaining: {})", item, progress.remaining())) -/// // Finish constructing the sync request. -/// .build(); -/// ``` -#[must_use] -pub struct SyncRequest { - start_time: u64, - chain_tip: Option, - spks: VecDeque<(I, ScriptBuf)>, - spks_consumed: usize, - spk_expected_txids: HashMap>, - txids: VecDeque, - txids_consumed: usize, - outpoints: VecDeque, - outpoints_consumed: usize, - inspect: Box>, -} - -impl From> for SyncRequest { - fn from(builder: SyncRequestBuilder) -> Self { - builder.inner - } -} - -impl SyncRequest { - /// Start building [`SyncRequest`] with a given `start_time`. - /// - /// `start_time` specifies the start time of sync. Chain sources can use this value to set - /// [`TxUpdate::seen_ats`](crate::TxUpdate::seen_ats) for mempool transactions. A transaction - /// without any `seen_ats` is assumed to be unseen in the mempool. - /// - /// Use [`SyncRequest::builder`] to use the current timestamp as `start_time` (this requires - /// `feature = "std"`). - pub fn builder_at(start_time: u64) -> SyncRequestBuilder { - SyncRequestBuilder { - inner: Self { - start_time, - chain_tip: None, - spks: VecDeque::new(), - spks_consumed: 0, - spk_expected_txids: HashMap::new(), - txids: VecDeque::new(), - txids_consumed: 0, - outpoints: VecDeque::new(), - outpoints_consumed: 0, - inspect: Box::new(|_, _| ()), - }, - } - } - - /// Start building [`SyncRequest`] with the current timestamp as the `start_time`. - /// - /// Use [`SyncRequest::builder_at`] to manually set the `start_time`, or if `feature = "std"` - /// is not available. - #[cfg(feature = "std")] - #[cfg_attr(docsrs, doc(cfg(feature = "std")))] - pub fn builder() -> SyncRequestBuilder { - let start_time = std::time::UNIX_EPOCH - .elapsed() - .expect("failed to get current timestamp") - .as_secs(); - Self::builder_at(start_time) - } - - /// When the sync-request was initiated. - pub fn start_time(&self) -> u64 { - self.start_time - } - - /// Get the [`SyncProgress`] of this request. - pub fn progress(&self) -> SyncProgress { - SyncProgress { - spks_consumed: self.spks_consumed, - spks_remaining: self.spks.len(), - txids_consumed: self.txids_consumed, - txids_remaining: self.txids.len(), - outpoints_consumed: self.outpoints_consumed, - outpoints_remaining: self.outpoints.len(), - } - } - - /// Get the chain tip [`CheckPoint`] of this request (if any). - pub fn chain_tip(&self) -> Option { - self.chain_tip.clone() - } - - /// Advances the sync request and returns the next [`ScriptBuf`] with corresponding [`Txid`] - /// history. - /// - /// Returns [`None`] when there are no more scripts remaining in the request. - pub fn next_spk_with_expected_txids(&mut self) -> Option { - let (i, next_spk) = self.spks.pop_front()?; - self.spks_consumed += 1; - self._call_inspect(SyncItem::Spk(i, next_spk.as_script())); - let spk_history = self - .spk_expected_txids - .get(&next_spk) - .cloned() - .unwrap_or_default(); - Some(SpkWithExpectedTxids { - spk: next_spk, - expected_txids: spk_history, - }) - } - - /// Advances the sync request and returns the next [`Txid`]. - /// - /// Returns [`None`] when there are no more txids remaining in the request. - pub fn next_txid(&mut self) -> Option { - let txid = self.txids.pop_front()?; - self.txids_consumed += 1; - self._call_inspect(SyncItem::Txid(txid)); - Some(txid) - } - - /// Advances the sync request and returns the next [`OutPoint`]. - /// - /// Returns [`None`] when there are no more outpoints in the request. - pub fn next_outpoint(&mut self) -> Option { - let outpoint = self.outpoints.pop_front()?; - self.outpoints_consumed += 1; - self._call_inspect(SyncItem::OutPoint(outpoint)); - Some(outpoint) - } - - /// Iterate over [`ScriptBuf`]s with corresponding [`Txid`] histories contained in this request. - pub fn iter_spks_with_expected_txids( - &mut self, - ) -> impl ExactSizeIterator + '_ { - SyncIter::::new(self) - } - - /// Iterate over [`Txid`]s contained in this request. - pub fn iter_txids(&mut self) -> impl ExactSizeIterator + '_ { - SyncIter::::new(self) - } - - /// Iterate over [`OutPoint`]s contained in this request. - pub fn iter_outpoints(&mut self) -> impl ExactSizeIterator + '_ { - SyncIter::::new(self) - } - - fn _call_inspect(&mut self, item: SyncItem) { - let progress = self.progress(); - (*self.inspect)(item, progress); - } -} - -/// Data returned from a spk-based blockchain client sync. -/// -/// See also [`SyncRequest`]. -#[must_use] -#[derive(Debug)] -pub struct SyncResponse { - /// Relevant transaction data discovered during the scan. - pub tx_update: crate::TxUpdate, - /// Changes to the chain discovered during the scan. - pub chain_update: Option, -} - -impl Default for SyncResponse { - fn default() -> Self { - Self { - tx_update: Default::default(), - chain_update: Default::default(), - } - } -} - -/// Builds a [`FullScanRequest`]. -/// -/// Construct with [`FullScanRequest::builder`]. -#[must_use] -pub struct FullScanRequestBuilder { - inner: FullScanRequest, -} - -impl FullScanRequestBuilder { - /// Set the initial chain tip for the full scan request. - /// - /// This is used to update [`LocalChain`](../../bdk_chain/local_chain/struct.LocalChain.html). - pub fn chain_tip(mut self, tip: CheckPoint) -> Self { - self.inner.chain_tip = Some(tip); - self - } - - /// Set the spk iterator for a given `keychain`. - pub fn spks_for_keychain( - mut self, - keychain: K, - spks: impl IntoIterator> + Send + 'static>, - ) -> Self { - self.inner - .spks_by_keychain - .insert(keychain, Box::new(spks.into_iter())); - self - } - - /// Set the closure that will inspect every sync item visited. - pub fn inspect(mut self, inspect: F) -> Self - where - F: FnMut(K, u32, &Script) + Send + 'static, - { - self.inner.inspect = Box::new(inspect); - self - } - - /// Build the [`FullScanRequest`]. - pub fn build(self) -> FullScanRequest { - self.inner - } -} - -/// Data required to perform a spk-based blockchain client full scan. -/// -/// A client full scan iterates through all the scripts for the given keychains, fetching relevant -/// data until some stop gap number of scripts is found that have no data. This operation is -/// generally only used when importing or restoring previously used keychains in which the list of -/// used scripts is not known. The full scan process also updates the chain from the given -/// [`chain_tip`](FullScanRequestBuilder::chain_tip) (if provided). -#[must_use] -pub struct FullScanRequest { - start_time: u64, - chain_tip: Option, - spks_by_keychain: BTreeMap> + Send>>, - inspect: Box>, -} - -impl From> for FullScanRequest { - fn from(builder: FullScanRequestBuilder) -> Self { - builder.inner - } -} - -impl FullScanRequest { - /// Start building a [`FullScanRequest`] with a given `start_time`. - /// - /// `start_time` specifies the start time of sync. Chain sources can use this value to set - /// [`TxUpdate::seen_ats`](crate::TxUpdate::seen_ats) for mempool transactions. A transaction - /// without any `seen_ats` is assumed to be unseen in the mempool. - /// - /// Use [`FullScanRequest::builder`] to use the current timestamp as `start_time` (this - /// requires `feature = "std`). - pub fn builder_at(start_time: u64) -> FullScanRequestBuilder { - FullScanRequestBuilder { - inner: Self { - start_time, - chain_tip: None, - spks_by_keychain: BTreeMap::new(), - inspect: Box::new(|_, _, _| ()), - }, - } - } - - /// Start building a [`FullScanRequest`] with the current timestamp as the `start_time`. - /// - /// Use [`FullScanRequest::builder_at`] to manually set the `start_time`, or if `feature = - /// "std"` is not available. - #[cfg(feature = "std")] - #[cfg_attr(docsrs, doc(cfg(feature = "std")))] - pub fn builder() -> FullScanRequestBuilder { - let start_time = std::time::UNIX_EPOCH - .elapsed() - .expect("failed to get current timestamp") - .as_secs(); - Self::builder_at(start_time) - } - - /// When the full-scan-request was initiated. - pub fn start_time(&self) -> u64 { - self.start_time - } - - /// Get the chain tip [`CheckPoint`] of this request (if any). - pub fn chain_tip(&self) -> Option { - self.chain_tip.clone() - } - - /// List all keychains contained in this request. - pub fn keychains(&self) -> Vec { - self.spks_by_keychain.keys().cloned().collect() - } - - /// Advances the full scan request and returns the next indexed [`ScriptBuf`] of the given - /// `keychain`. - pub fn next_spk(&mut self, keychain: K) -> Option> { - self.iter_spks(keychain).next() - } - - /// Iterate over indexed [`ScriptBuf`]s contained in this request of the given `keychain`. - pub fn iter_spks(&mut self, keychain: K) -> impl Iterator> + '_ { - let spks = self.spks_by_keychain.get_mut(&keychain); - let inspect = &mut self.inspect; - KeychainSpkIter { - keychain, - spks, - inspect, - } - } -} - -/// Data returned from a spk-based blockchain client full scan. -/// -/// See also [`FullScanRequest`]. -#[must_use] -#[derive(Debug)] -pub struct FullScanResponse { - /// Relevant transaction data discovered during the scan. - pub tx_update: crate::TxUpdate, - /// Last active indices for the corresponding keychains (`K`). An index is active if it had a - /// transaction associated with the script pubkey at that index. - pub last_active_indices: BTreeMap, - /// Changes to the chain discovered during the scan. - pub chain_update: Option, -} - -impl Default for FullScanResponse { - fn default() -> Self { - Self { - tx_update: Default::default(), - chain_update: Default::default(), - last_active_indices: Default::default(), - } - } -} - -struct KeychainSpkIter<'r, K> { - keychain: K, - spks: Option<&'r mut Box> + Send>>, - inspect: &'r mut Box>, -} - -impl Iterator for KeychainSpkIter<'_, K> { - type Item = Indexed; - - fn next(&mut self) -> Option { - let (i, spk) = self.spks.as_mut()?.next()?; - (*self.inspect)(self.keychain.clone(), i, &spk); - Some((i, spk)) - } -} - -struct SyncIter<'r, I, Item> { - request: &'r mut SyncRequest, - marker: core::marker::PhantomData, -} - -impl<'r, I, Item> SyncIter<'r, I, Item> { - fn new(request: &'r mut SyncRequest) -> Self { - Self { - request, - marker: core::marker::PhantomData, - } - } -} - -impl<'r, I, Item> ExactSizeIterator for SyncIter<'r, I, Item> where SyncIter<'r, I, Item>: Iterator {} - -impl Iterator for SyncIter<'_, I, SpkWithExpectedTxids> { - type Item = SpkWithExpectedTxids; - - fn next(&mut self) -> Option { - self.request.next_spk_with_expected_txids() - } - - fn size_hint(&self) -> (usize, Option) { - let remaining = self.request.spks.len(); - (remaining, Some(remaining)) - } -} - -impl Iterator for SyncIter<'_, I, Txid> { - type Item = Txid; - - fn next(&mut self) -> Option { - self.request.next_txid() - } - - fn size_hint(&self) -> (usize, Option) { - let remaining = self.request.txids.len(); - (remaining, Some(remaining)) - } -} - -impl Iterator for SyncIter<'_, I, OutPoint> { - type Item = OutPoint; - - fn next(&mut self) -> Option { - self.request.next_outpoint() - } - - fn size_hint(&self) -> (usize, Option) { - let remaining = self.request.outpoints.len(); - (remaining, Some(remaining)) - } -} diff --git a/crates/core/src/tx_update.rs b/crates/core/src/tx_update.rs deleted file mode 100644 index 89a224fb..00000000 --- a/crates/core/src/tx_update.rs +++ /dev/null @@ -1,94 +0,0 @@ -use crate::collections::{BTreeMap, BTreeSet, HashSet}; -use alloc::{sync::Arc, vec::Vec}; -use bitcoin::{OutPoint, Transaction, TxOut, Txid}; - -/// Data object used to communicate updates about relevant transactions from some chain data source -/// to the core model (usually a `bdk_chain::TxGraph`). -/// -/// ```rust -/// use bdk_core::TxUpdate; -/// # use std::sync::Arc; -/// # use bitcoin::{Transaction, transaction::Version, absolute::LockTime}; -/// # let version = Version::ONE; -/// # let lock_time = LockTime::ZERO; -/// # let tx = Arc::new(Transaction { input: vec![], output: vec![], version, lock_time }); -/// # let txid = tx.compute_txid(); -/// # let anchor = (); -/// let mut tx_update = TxUpdate::default(); -/// tx_update.txs.push(tx); -/// tx_update.anchors.insert((anchor, txid)); -/// ``` -#[derive(Debug, Clone)] -#[non_exhaustive] -pub struct TxUpdate { - /// Full transactions. These are transactions that were determined to be relevant to the wallet - /// given the request. - pub txs: Vec>, - - /// Floating txouts. These are `TxOut`s that exist but the whole transaction wasn't included in - /// `txs` since only knowing about the output is important. These are often used to help determine - /// the fee of a wallet transaction. - pub txouts: BTreeMap, - - /// Transaction anchors. Anchors tells us a position in the chain where a transaction was - /// confirmed. - pub anchors: BTreeSet<(A, Txid)>, - - /// When transactions were seen in the mempool. - /// - /// An unconfirmed transaction can only be canonical with a `seen_at` value. It is the - /// responsibility of the chain-source to include the `seen_at` values for unconfirmed - /// (unanchored) transactions. - /// - /// [`FullScanRequest::start_time`](crate::spk_client::FullScanRequest::start_time) or - /// [`SyncRequest::start_time`](crate::spk_client::SyncRequest::start_time) can be used to - /// provide the `seen_at` value. - pub seen_ats: HashSet<(Txid, u64)>, - - /// When transactions were discovered to be missing (evicted) from the mempool. - /// - /// [`SyncRequest::start_time`](crate::spk_client::SyncRequest::start_time) can be used to - /// provide the `evicted_at` value. - pub evicted_ats: HashSet<(Txid, u64)>, -} - -impl Default for TxUpdate { - fn default() -> Self { - Self { - txs: Default::default(), - txouts: Default::default(), - anchors: Default::default(), - seen_ats: Default::default(), - evicted_ats: Default::default(), - } - } -} - -impl TxUpdate { - /// Transforms the [`TxUpdate`] to have `anchors` (`A`) of another type (`A2`). - /// - /// This takes in a closure with signature `FnMut(A) -> A2` which is called for each anchor to - /// transform it. - pub fn map_anchors A2>(self, mut map: F) -> TxUpdate { - TxUpdate { - txs: self.txs, - txouts: self.txouts, - anchors: self - .anchors - .into_iter() - .map(|(a, txid)| (map(a), txid)) - .collect(), - seen_ats: self.seen_ats, - evicted_ats: self.evicted_ats, - } - } - - /// Extend this update with `other`. - pub fn extend(&mut self, other: TxUpdate) { - self.txs.extend(other.txs); - self.txouts.extend(other.txouts); - self.anchors.extend(other.anchors); - self.seen_ats.extend(other.seen_ats); - self.evicted_ats.extend(other.evicted_ats); - } -} diff --git a/crates/core/tests/test_checkpoint.rs b/crates/core/tests/test_checkpoint.rs deleted file mode 100644 index a5194e5b..00000000 --- a/crates/core/tests/test_checkpoint.rs +++ /dev/null @@ -1,53 +0,0 @@ -use bdk_core::{BlockId, CheckPoint}; -use bdk_testenv::{block_id, hash}; - -/// Inserting a block that already exists in the checkpoint chain must always succeed. -#[test] -fn checkpoint_insert_existing() { - let blocks = &[ - block_id!(0, "genesis"), - block_id!(1, "A"), - block_id!(2, "B"), - block_id!(3, "C"), - ]; - - // Index `i` allows us to test with chains of different length. - // Index `j` allows us to test inserting different block heights. - for i in 0..blocks.len() { - let cp_chain = CheckPoint::from_block_ids(blocks[..=i].iter().copied()) - .expect("must construct valid chain"); - - for j in 0..=i { - let block_to_insert = cp_chain - .get(j as u32) - .expect("cp of height must exist") - .block_id(); - let new_cp_chain = cp_chain.clone().insert(block_to_insert); - - assert_eq!( - new_cp_chain, cp_chain, - "must not divert from original chain" - ); - assert!(new_cp_chain.eq_ptr(&cp_chain), "pointers must still match"); - } - } -} - -#[test] -fn checkpoint_destruction_is_sound() { - // Prior to the fix in https://github.com/bitcoindevkit/bdk/pull/1731 - // this could have caused a stack overflow due to drop recursion in Arc. - // We test that a long linked list can clean itself up without blowing - // out the stack. - let mut cp = CheckPoint::new(BlockId { - height: 0, - hash: hash!("g"), - }); - let end = 10_000; - for height in 1u32..end { - let hash = bitcoin::hashes::Hash::hash(height.to_be_bytes().as_slice()); - let block = BlockId { height, hash }; - cp = cp.push(block).unwrap(); - } - assert_eq!(cp.iter().count() as u32, end); -} diff --git a/crates/core/tests/test_merge.rs b/crates/core/tests/test_merge.rs deleted file mode 100644 index 8bfd6a24..00000000 --- a/crates/core/tests/test_merge.rs +++ /dev/null @@ -1,119 +0,0 @@ -use std::collections::{BTreeMap, BTreeSet}; - -use bdk_core::Merge; - -#[test] -fn test_btree_map_merge() { - let mut map1: BTreeMap = BTreeMap::new(); - map1.insert(1, "a"); - let mut map2: BTreeMap = BTreeMap::new(); - map2.insert(2, "b"); - - map1.merge(map2); - - let expected: BTreeMap = BTreeMap::from([(1, "a"), (2, "b")]); - assert_eq!(map1, expected); -} - -#[test] -fn test_btree_set_merge() { - let mut set1: BTreeSet = BTreeSet::new(); - set1.insert(1); - let mut set2: BTreeSet = BTreeSet::new(); - set2.insert(2); - - set1.merge(set2); - - let expected: BTreeSet = BTreeSet::from([1, 2]); - assert_eq!(set1, expected); -} - -#[test] -fn test_vec_merge() { - let mut vec1 = vec![1, 2, 3]; - let vec2 = vec![4, 5, 6]; - - vec1.merge(vec2); - - assert_eq!(vec1, vec![1, 2, 3, 4, 5, 6]); -} - -#[test] -fn test_tuple_merge() { - let mut tuple1 = (vec![1, 2], BTreeSet::from([3])); - let tuple2 = (vec![3, 4], BTreeSet::from([4])); - - tuple1.merge(tuple2); - - let expected_vec = vec![1, 2, 3, 4]; - assert_eq!(tuple1.0, expected_vec); - let expected_set: BTreeSet = BTreeSet::from([3, 4]); - assert_eq!(tuple1.1, expected_set); -} - -#[test] -fn test_is_empty() { - let map: BTreeMap = BTreeMap::new(); - assert!(Merge::is_empty(&map)); - - let set: BTreeSet = BTreeSet::new(); - assert!(Merge::is_empty(&set)); - - let vec: Vec = Vec::new(); - assert!(Merge::is_empty(&vec)); -} -#[test] -fn test_take() { - let mut map: BTreeMap = BTreeMap::new(); - map.insert(1, 1); - let taken_map = Merge::take(&mut map); - assert_eq!(taken_map, Some(BTreeMap::from([(1, 1)]))); - assert!(map.is_empty()); - - let mut set: BTreeSet = BTreeSet::new(); - set.insert(1); - let taken_set = Merge::take(&mut set); - assert_eq!(taken_set, Some(BTreeSet::from([1]))); - assert!(set.is_empty()); - - let mut vec: Vec = vec![1]; - let taken_vec = Merge::take(&mut vec); - assert_eq!(taken_vec, Some(vec![1])); - assert!(vec.is_empty()); -} - -#[test] -fn test_btree_map_merge_conflict() { - let mut map1: BTreeMap = BTreeMap::new(); - map1.insert(1, "a"); - let mut map2: BTreeMap = BTreeMap::new(); - map2.insert(1, "b"); - - map1.merge(map2); - - let expected: BTreeMap = BTreeMap::from([(1, "b")]); - assert_eq!(map1, expected); -} - -#[test] -fn test_btree_set_merge_conflict() { - let mut set1: BTreeSet = BTreeSet::new(); - set1.insert(1); - let mut set2: BTreeSet = BTreeSet::new(); - set2.insert(1); - - set1.merge(set2); - - let expected: BTreeSet = BTreeSet::from([1]); - assert_eq!(set1, expected); -} - -#[test] -fn test_vec_merge_duplicates() { - let mut vec1 = vec![1, 2, 3]; - let vec2 = vec![3, 4, 5]; - - vec1.merge(vec2); - - assert_eq!(vec1, vec![1, 2, 3, 3, 4, 5]); -} diff --git a/crates/electrum/CHANGELOG.md b/crates/electrum/CHANGELOG.md deleted file mode 100644 index f89e86ed..00000000 --- a/crates/electrum/CHANGELOG.md +++ /dev/null @@ -1,22 +0,0 @@ -# Changelog - -All notable changes to this project can be found here and in each release's git tag and can be viewed with `git tag -ln100 "v*"`. See also [DEVELOPMENT_CYCLE.md](../../DEVELOPMENT_CYCLE.md) for more details. - -Contributors do not need to change this file but do need to add changelog details in their PR descriptions. The person making the next release will collect changelog details from included PRs and edit this file prior to each release. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [electrum-0.21.0] - -- Bump crate MSRV to 1.75.0 -- deps: bump `electrum-client` to 0.23.0 - -## [electrum-0.20.1] - -### Changed - -- Minor updates to fix new rustc 1.83.0 clippy warnings #1776 - -[electrum-0.20.1]: https://github.com/bitcoindevkit/bdk/releases/tag/electrum-0.20.1 -[electrum-0.21.0]: https://github.com/bitcoindevkit/bdk/releases/tag/electrum-0.21.0 \ No newline at end of file diff --git a/crates/electrum/Cargo.toml b/crates/electrum/Cargo.toml deleted file mode 100644 index e3918c18..00000000 --- a/crates/electrum/Cargo.toml +++ /dev/null @@ -1,31 +0,0 @@ -[package] -name = "bdk_electrum" -version = "0.21.0" -edition = "2021" -homepage = "https://bitcoindevkit.org" -repository = "https://github.com/bitcoindevkit/bdk" -documentation = "https://docs.rs/bdk_electrum" -description = "Fetch data from electrum in the form BDK accepts" -license = "MIT OR Apache-2.0" -readme = "README.md" - -[lints] -workspace = true - -[dependencies] -bdk_core = { path = "../core", version = "0.4.1" } -electrum-client = { version = "0.23", features = [ "proxy" ], default-features = false } - -[dev-dependencies] -bdk_testenv = { path = "../testenv" } -bdk_chain = { path = "../chain" } - -[features] -default = ["use-rustls"] -use-rustls = ["electrum-client/use-rustls"] -use-rustls-ring = ["electrum-client/use-rustls-ring"] -use-openssl = ["electrum-client/use-openssl"] - -[[test]] -name = "test_electrum" -required-features = ["use-rustls"] diff --git a/crates/electrum/README.md b/crates/electrum/README.md deleted file mode 100644 index efe7109c..00000000 --- a/crates/electrum/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# BDK Electrum - -BDK Electrum extends [`electrum-client`] to update [`bdk_chain`] structures -from an Electrum server. - -## Minimum Supported Rust Version (MSRV) -This crate has a MSRV of 1.75.0. - -To build with MSRV you will need to pin dependencies as follows: -```shell -cargo update -p home --precise "0.5.9" -``` - -[`electrum-client`]: https://docs.rs/electrum-client/ -[`bdk_chain`]: https://docs.rs/bdk-chain/ diff --git a/crates/electrum/src/bdk_electrum_client.rs b/crates/electrum/src/bdk_electrum_client.rs deleted file mode 100644 index fb387bb3..00000000 --- a/crates/electrum/src/bdk_electrum_client.rs +++ /dev/null @@ -1,622 +0,0 @@ -use bdk_core::{ - bitcoin::{block::Header, BlockHash, OutPoint, Transaction, Txid}, - collections::{BTreeMap, HashMap, HashSet}, - spk_client::{ - FullScanRequest, FullScanResponse, SpkWithExpectedTxids, SyncRequest, SyncResponse, - }, - BlockId, CheckPoint, ConfirmationBlockTime, TxUpdate, -}; -use electrum_client::{ElectrumApi, Error, HeaderNotification}; -use std::sync::{Arc, Mutex}; - -/// We include a chain suffix of a certain length for the purpose of robustness. -const CHAIN_SUFFIX_LENGTH: u32 = 8; - -/// Wrapper around an [`electrum_client::ElectrumApi`] which includes an internal in-memory -/// transaction cache to avoid re-fetching already downloaded transactions. -#[derive(Debug)] -pub struct BdkElectrumClient { - /// The internal [`electrum_client::ElectrumApi`] - pub inner: E, - /// The transaction cache - tx_cache: Mutex>>, - /// The header cache - block_header_cache: Mutex>, -} - -impl BdkElectrumClient { - /// Creates a new bdk client from a [`electrum_client::ElectrumApi`] - pub fn new(client: E) -> Self { - Self { - inner: client, - tx_cache: Default::default(), - block_header_cache: Default::default(), - } - } - - /// Inserts transactions into the transaction cache so that the client will not fetch these - /// transactions. - pub fn populate_tx_cache(&self, txs: impl IntoIterator>>) { - let mut tx_cache = self.tx_cache.lock().unwrap(); - for tx in txs { - let tx = tx.into(); - let txid = tx.compute_txid(); - tx_cache.insert(txid, tx); - } - } - - /// Fetch transaction of given `txid`. - /// - /// If it hits the cache it will return the cached version and avoid making the request. - pub fn fetch_tx(&self, txid: Txid) -> Result, Error> { - let tx_cache = self.tx_cache.lock().unwrap(); - - if let Some(tx) = tx_cache.get(&txid) { - return Ok(Arc::clone(tx)); - } - - drop(tx_cache); - - let tx = Arc::new(self.inner.transaction_get(&txid)?); - - self.tx_cache.lock().unwrap().insert(txid, Arc::clone(&tx)); - - Ok(tx) - } - - /// Fetch block header of given `height`. - /// - /// If it hits the cache it will return the cached version and avoid making the request. - fn fetch_header(&self, height: u32) -> Result { - let block_header_cache = self.block_header_cache.lock().unwrap(); - - if let Some(header) = block_header_cache.get(&height) { - return Ok(*header); - } - - drop(block_header_cache); - - self.update_header(height) - } - - /// Update a block header at given `height`. Returns the updated header. - fn update_header(&self, height: u32) -> Result { - let header = self.inner.block_header(height as usize)?; - - self.block_header_cache - .lock() - .unwrap() - .insert(height, header); - - Ok(header) - } - - /// Broadcasts a transaction to the network. - /// - /// This is a re-export of [`ElectrumApi::transaction_broadcast`]. - pub fn transaction_broadcast(&self, tx: &Transaction) -> Result { - self.inner.transaction_broadcast(tx) - } - - /// Full scan the keychain scripts specified with the blockchain (via an Electrum client) and - /// returns updates for [`bdk_chain`] data structures. - /// - /// - `request`: struct with data required to perform a spk-based blockchain client full scan, - /// see [`FullScanRequest`]. - /// - `stop_gap`: the full scan for each keychain stops after a gap of script pubkeys with no - /// associated transactions. - /// - `batch_size`: specifies the max number of script pubkeys to request for in a single batch - /// request. - /// - `fetch_prev_txouts`: specifies whether we want previous `TxOut`s for fee calculation. - /// Note that this requires additional calls to the Electrum server, but - /// is necessary for calculating the fee on a transaction if your wallet - /// does not own the inputs. Methods like [`Wallet.calculate_fee`] and - /// [`Wallet.calculate_fee_rate`] will return a - /// [`CalculateFeeError::MissingTxOut`] error if those `TxOut`s are not - /// present in the transaction graph. - /// - /// [`bdk_chain`]: ../bdk_chain/index.html - /// [`CalculateFeeError::MissingTxOut`]: ../bdk_chain/tx_graph/enum.CalculateFeeError.html#variant.MissingTxOut - /// [`Wallet.calculate_fee`]: ../bdk_wallet/struct.Wallet.html#method.calculate_fee - /// [`Wallet.calculate_fee_rate`]: ../bdk_wallet/struct.Wallet.html#method.calculate_fee_rate - pub fn full_scan( - &self, - request: impl Into>, - stop_gap: usize, - batch_size: usize, - fetch_prev_txouts: bool, - ) -> Result, Error> { - let mut request: FullScanRequest = request.into(); - let start_time = request.start_time(); - - let tip_and_latest_blocks = match request.chain_tip() { - Some(chain_tip) => Some(fetch_tip_and_latest_blocks(&self.inner, chain_tip)?), - None => None, - }; - - let mut tx_update = TxUpdate::::default(); - let mut last_active_indices = BTreeMap::::default(); - for keychain in request.keychains() { - let spks = request - .iter_spks(keychain.clone()) - .map(|(spk_i, spk)| (spk_i, SpkWithExpectedTxids::from(spk))); - if let Some(last_active_index) = - self.populate_with_spks(start_time, &mut tx_update, spks, stop_gap, batch_size)? - { - last_active_indices.insert(keychain, last_active_index); - } - } - - // Fetch previous `TxOut`s for fee calculation if flag is enabled. - if fetch_prev_txouts { - self.fetch_prev_txout(&mut tx_update)?; - } - - let chain_update = match tip_and_latest_blocks { - Some((chain_tip, latest_blocks)) => Some(chain_update( - chain_tip, - &latest_blocks, - tx_update.anchors.iter().cloned(), - )?), - _ => None, - }; - - Ok(FullScanResponse { - tx_update, - chain_update, - last_active_indices, - }) - } - - /// Sync a set of scripts with the blockchain (via an Electrum client) for the data specified - /// and returns updates for [`bdk_chain`] data structures. - /// - /// - `request`: struct with data required to perform a spk-based blockchain client sync, - /// see [`SyncRequest`] - /// - `batch_size`: specifies the max number of script pubkeys to request for in a single batch - /// request - /// - `fetch_prev_txouts`: specifies whether we want previous `TxOut`s for fee calculation. - /// Note that this requires additional calls to the Electrum server, but - /// is necessary for calculating the fee on a transaction if your wallet - /// does not own the inputs. Methods like [`Wallet.calculate_fee`] and - /// [`Wallet.calculate_fee_rate`] will return a - /// [`CalculateFeeError::MissingTxOut`] error if those `TxOut`s are not - /// present in the transaction graph. - /// - /// If the scripts to sync are unknown, such as when restoring or importing a keychain that - /// may include scripts that have been used, use [`full_scan`] with the keychain. - /// - /// [`full_scan`]: Self::full_scan - /// [`bdk_chain`]: ../bdk_chain/index.html - /// [`CalculateFeeError::MissingTxOut`]: ../bdk_chain/tx_graph/enum.CalculateFeeError.html#variant.MissingTxOut - /// [`Wallet.calculate_fee`]: ../bdk_wallet/struct.Wallet.html#method.calculate_fee - /// [`Wallet.calculate_fee_rate`]: ../bdk_wallet/struct.Wallet.html#method.calculate_fee_rate - pub fn sync( - &self, - request: impl Into>, - batch_size: usize, - fetch_prev_txouts: bool, - ) -> Result { - let mut request: SyncRequest = request.into(); - let start_time = request.start_time(); - - let tip_and_latest_blocks = match request.chain_tip() { - Some(chain_tip) => Some(fetch_tip_and_latest_blocks(&self.inner, chain_tip)?), - None => None, - }; - - let mut tx_update = TxUpdate::::default(); - self.populate_with_spks( - start_time, - &mut tx_update, - request - .iter_spks_with_expected_txids() - .enumerate() - .map(|(i, spk)| (i as u32, spk)), - usize::MAX, - batch_size, - )?; - self.populate_with_txids(start_time, &mut tx_update, request.iter_txids())?; - self.populate_with_outpoints(start_time, &mut tx_update, request.iter_outpoints())?; - - // Fetch previous `TxOut`s for fee calculation if flag is enabled. - if fetch_prev_txouts { - self.fetch_prev_txout(&mut tx_update)?; - } - - let chain_update = match tip_and_latest_blocks { - Some((chain_tip, latest_blocks)) => Some(chain_update( - chain_tip, - &latest_blocks, - tx_update.anchors.iter().cloned(), - )?), - None => None, - }; - - Ok(SyncResponse { - tx_update, - chain_update, - }) - } - - /// Populate the `tx_update` with transactions/anchors associated with the given `spks`. - /// - /// Transactions that contains an output with requested spk, or spends form an output with - /// requested spk will be added to `tx_update`. Anchors of the aforementioned transactions are - /// also included. - fn populate_with_spks( - &self, - start_time: u64, - tx_update: &mut TxUpdate, - mut spks_with_expected_txids: impl Iterator, - stop_gap: usize, - batch_size: usize, - ) -> Result, Error> { - let mut unused_spk_count = 0_usize; - let mut last_active_index = Option::::None; - - loop { - let spks = (0..batch_size) - .map_while(|_| spks_with_expected_txids.next()) - .collect::>(); - if spks.is_empty() { - return Ok(last_active_index); - } - - let spk_histories = self - .inner - .batch_script_get_history(spks.iter().map(|(_, s)| s.spk.as_script()))?; - - for ((spk_index, spk), spk_history) in spks.into_iter().zip(spk_histories) { - if spk_history.is_empty() { - unused_spk_count = unused_spk_count.saturating_add(1); - if unused_spk_count >= stop_gap { - return Ok(last_active_index); - } - } else { - last_active_index = Some(spk_index); - unused_spk_count = 0; - } - - let spk_history_set = spk_history - .iter() - .map(|res| res.tx_hash) - .collect::>(); - - tx_update.evicted_ats.extend( - spk.expected_txids - .difference(&spk_history_set) - .map(|&txid| (txid, start_time)), - ); - - for tx_res in spk_history { - tx_update.txs.push(self.fetch_tx(tx_res.tx_hash)?); - match tx_res.height.try_into() { - // Returned heights 0 & -1 are reserved for unconfirmed txs. - Ok(height) if height > 0 => { - self.validate_merkle_for_anchor(tx_update, tx_res.tx_hash, height)?; - } - _ => { - tx_update.seen_ats.insert((tx_res.tx_hash, start_time)); - } - } - } - } - } - } - - /// Populate the `tx_update` with associated transactions/anchors of `outpoints`. - /// - /// Transactions in which the outpoint resides, and transactions that spend from the outpoint are - /// included. Anchors of the aforementioned transactions are included. - fn populate_with_outpoints( - &self, - start_time: u64, - tx_update: &mut TxUpdate, - outpoints: impl IntoIterator, - ) -> Result<(), Error> { - for outpoint in outpoints { - let op_txid = outpoint.txid; - let op_tx = self.fetch_tx(op_txid)?; - let op_txout = match op_tx.output.get(outpoint.vout as usize) { - Some(txout) => txout, - None => continue, - }; - debug_assert_eq!(op_tx.compute_txid(), op_txid); - - // attempt to find the following transactions (alongside their chain positions), and - // add to our sparsechain `update`: - let mut has_residing = false; // tx in which the outpoint resides - let mut has_spending = false; // tx that spends the outpoint - for res in self.inner.script_get_history(&op_txout.script_pubkey)? { - if has_residing && has_spending { - break; - } - - if !has_residing && res.tx_hash == op_txid { - has_residing = true; - tx_update.txs.push(Arc::clone(&op_tx)); - match res.height.try_into() { - // Returned heights 0 & -1 are reserved for unconfirmed txs. - Ok(height) if height > 0 => { - self.validate_merkle_for_anchor(tx_update, res.tx_hash, height)?; - } - _ => { - tx_update.seen_ats.insert((res.tx_hash, start_time)); - } - } - } - - if !has_spending && res.tx_hash != op_txid { - let res_tx = self.fetch_tx(res.tx_hash)?; - // we exclude txs/anchors that do not spend our specified outpoint(s) - has_spending = res_tx - .input - .iter() - .any(|txin| txin.previous_output == outpoint); - if !has_spending { - continue; - } - tx_update.txs.push(Arc::clone(&res_tx)); - match res.height.try_into() { - // Returned heights 0 & -1 are reserved for unconfirmed txs. - Ok(height) if height > 0 => { - self.validate_merkle_for_anchor(tx_update, res.tx_hash, height)?; - } - _ => { - tx_update.seen_ats.insert((res.tx_hash, start_time)); - } - } - } - } - } - Ok(()) - } - - /// Populate the `tx_update` with transactions/anchors of the provided `txids`. - fn populate_with_txids( - &self, - start_time: u64, - tx_update: &mut TxUpdate, - txids: impl IntoIterator, - ) -> Result<(), Error> { - for txid in txids { - let tx = match self.fetch_tx(txid) { - Ok(tx) => tx, - Err(electrum_client::Error::Protocol(_)) => continue, - Err(other_err) => return Err(other_err), - }; - - let spk = tx - .output - .first() - .map(|txo| &txo.script_pubkey) - .expect("tx must have an output"); - - // because of restrictions of the Electrum API, we have to use the `script_get_history` - // call to get confirmation status of our transaction - if let Some(r) = self - .inner - .script_get_history(spk)? - .into_iter() - .find(|r| r.tx_hash == txid) - { - match r.height.try_into() { - // Returned heights 0 & -1 are reserved for unconfirmed txs. - Ok(height) if height > 0 => { - self.validate_merkle_for_anchor(tx_update, txid, height)?; - } - _ => { - tx_update.seen_ats.insert((r.tx_hash, start_time)); - } - } - } - - tx_update.txs.push(tx); - } - Ok(()) - } - - // Helper function which checks if a transaction is confirmed by validating the merkle proof. - // An anchor is inserted if the transaction is validated to be in a confirmed block. - fn validate_merkle_for_anchor( - &self, - tx_update: &mut TxUpdate, - txid: Txid, - confirmation_height: usize, - ) -> Result<(), Error> { - if let Ok(merkle_res) = self - .inner - .transaction_get_merkle(&txid, confirmation_height) - { - let mut header = self.fetch_header(merkle_res.block_height as u32)?; - let mut is_confirmed_tx = electrum_client::utils::validate_merkle_proof( - &txid, - &header.merkle_root, - &merkle_res, - ); - - // Merkle validation will fail if the header in `block_header_cache` is outdated, so we - // want to check if there is a new header and validate against the new one. - if !is_confirmed_tx { - header = self.update_header(merkle_res.block_height as u32)?; - is_confirmed_tx = electrum_client::utils::validate_merkle_proof( - &txid, - &header.merkle_root, - &merkle_res, - ); - } - - if is_confirmed_tx { - tx_update.anchors.insert(( - ConfirmationBlockTime { - confirmation_time: header.time as u64, - block_id: BlockId { - height: merkle_res.block_height as u32, - hash: header.block_hash(), - }, - }, - txid, - )); - } - } - Ok(()) - } - - // Helper function which fetches the `TxOut`s of our relevant transactions' previous transactions, - // which we do not have by default. This data is needed to calculate the transaction fee. - fn fetch_prev_txout( - &self, - tx_update: &mut TxUpdate, - ) -> Result<(), Error> { - let mut no_dup = HashSet::::new(); - for tx in &tx_update.txs { - if !tx.is_coinbase() && no_dup.insert(tx.compute_txid()) { - for vin in &tx.input { - let outpoint = vin.previous_output; - let vout = outpoint.vout; - let prev_tx = self.fetch_tx(outpoint.txid)?; - let txout = prev_tx.output[vout as usize].clone(); - let _ = tx_update.txouts.insert(outpoint, txout); - } - } - } - Ok(()) - } -} - -/// Return a [`CheckPoint`] of the latest tip, that connects with `prev_tip`. The latest blocks are -/// fetched to construct checkpoint updates with the proper [`BlockHash`] in case of re-org. -fn fetch_tip_and_latest_blocks( - client: &impl ElectrumApi, - prev_tip: CheckPoint, -) -> Result<(CheckPoint, BTreeMap), Error> { - let HeaderNotification { height, .. } = client.block_headers_subscribe()?; - let new_tip_height = height as u32; - - // If electrum returns a tip height that is lower than our previous tip, then checkpoints do - // not need updating. We just return the previous tip and use that as the point of agreement. - if new_tip_height < prev_tip.height() { - return Ok((prev_tip, BTreeMap::new())); - } - - // Atomically fetch the latest `CHAIN_SUFFIX_LENGTH` count of blocks from Electrum. We use this - // to construct our checkpoint update. - let mut new_blocks = { - let start_height = new_tip_height.saturating_sub(CHAIN_SUFFIX_LENGTH - 1); - let hashes = client - .block_headers(start_height as _, CHAIN_SUFFIX_LENGTH as _)? - .headers - .into_iter() - .map(|h| h.block_hash()); - (start_height..).zip(hashes).collect::>() - }; - - // Find the "point of agreement" (if any). - let agreement_cp = { - let mut agreement_cp = Option::::None; - for cp in prev_tip.iter() { - let cp_block = cp.block_id(); - let hash = match new_blocks.get(&cp_block.height) { - Some(&hash) => hash, - None => { - assert!( - new_tip_height >= cp_block.height, - "already checked that electrum's tip cannot be smaller" - ); - let hash = client.block_header(cp_block.height as _)?.block_hash(); - new_blocks.insert(cp_block.height, hash); - hash - } - }; - if hash == cp_block.hash { - agreement_cp = Some(cp); - break; - } - } - agreement_cp - }; - - let agreement_height = agreement_cp.as_ref().map(CheckPoint::height); - - let new_tip = new_blocks - .iter() - // Prune `new_blocks` to only include blocks that are actually new. - .filter(|(height, _)| Some(*<&u32>::clone(height)) > agreement_height) - .map(|(height, hash)| BlockId { - height: *height, - hash: *hash, - }) - .fold(agreement_cp, |prev_cp, block| { - Some(match prev_cp { - Some(cp) => cp.push(block).expect("must extend checkpoint"), - None => CheckPoint::new(block), - }) - }) - .expect("must have at least one checkpoint"); - - Ok((new_tip, new_blocks)) -} - -// Add a corresponding checkpoint per anchor height if it does not yet exist. Checkpoints should not -// surpass `latest_blocks`. -fn chain_update( - mut tip: CheckPoint, - latest_blocks: &BTreeMap, - anchors: impl Iterator, -) -> Result { - for (anchor, _txid) in anchors { - let height = anchor.block_id.height; - - // Checkpoint uses the `BlockHash` from `latest_blocks` so that the hash will be consistent - // in case of a re-org. - if tip.get(height).is_none() && height <= tip.height() { - let hash = match latest_blocks.get(&height) { - Some(&hash) => hash, - None => anchor.block_id.hash, - }; - tip = tip.insert(BlockId { hash, height }); - } - } - Ok(tip) -} - -#[cfg(test)] -mod test { - use crate::{bdk_electrum_client::TxUpdate, BdkElectrumClient}; - use bdk_chain::bitcoin::{OutPoint, Transaction, TxIn}; - use bdk_core::collections::BTreeMap; - use bdk_testenv::{utils::new_tx, TestEnv}; - use std::sync::Arc; - - #[cfg(feature = "default")] - #[test] - fn test_fetch_prev_txout_with_coinbase() { - let env = TestEnv::new().unwrap(); - let electrum_client = - electrum_client::Client::new(env.electrsd.electrum_url.as_str()).unwrap(); - let client = BdkElectrumClient::new(electrum_client); - - // Create a coinbase transaction. - let coinbase_tx = Transaction { - input: vec![TxIn { - previous_output: OutPoint::null(), - ..Default::default() - }], - ..new_tx(0) - }; - - assert!(coinbase_tx.is_coinbase()); - - // Test that `fetch_prev_txout` does not process coinbase transactions. Calling - // `fetch_prev_txout` on a coinbase transaction will trigger a `fetch_tx` on a transaction - // with a txid of all zeros. If `fetch_prev_txout` attempts to fetch this transaction, this - // assertion will fail. - let mut tx_update = TxUpdate::default(); - tx_update.txs = vec![Arc::new(coinbase_tx)]; - assert!(client.fetch_prev_txout(&mut tx_update).is_ok()); - - // Ensure that the txouts are empty. - assert_eq!(tx_update.txouts, BTreeMap::default()); - } -} diff --git a/crates/electrum/src/lib.rs b/crates/electrum/src/lib.rs deleted file mode 100644 index 8bc87321..00000000 --- a/crates/electrum/src/lib.rs +++ /dev/null @@ -1,26 +0,0 @@ -//! This crate is used for returning updates from Electrum servers. -//! -//! Updates are returned as either a [`SyncResponse`] (if [`BdkElectrumClient::sync()`] is called), -//! or a [`FullScanResponse`] (if [`BdkElectrumClient::full_scan()`] is called). -//! -//! In most cases [`BdkElectrumClient::sync()`] is used to sync the transaction histories of scripts -//! that the application cares about, for example the scripts for all the receive addresses of a -//! Wallet's keychain that it has shown a user. -//! -//! [`BdkElectrumClient::full_scan`] is meant to be used when importing or restoring a keychain -//! where the range of possibly used scripts is not known. In this case it is necessary to scan all -//! keychain scripts until a number (the "stop gap") of unused scripts is discovered. -//! -//! Refer to [`example_electrum`] for a complete example. -//! -//! [`example_electrum`]: https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_electrum -//! [`SyncResponse`]: bdk_core::spk_client::SyncResponse -//! [`FullScanResponse`]: bdk_core::spk_client::FullScanResponse - -#![warn(missing_docs)] - -mod bdk_electrum_client; -pub use bdk_electrum_client::*; - -pub use bdk_core; -pub use electrum_client; diff --git a/crates/electrum/tests/test_electrum.rs b/crates/electrum/tests/test_electrum.rs deleted file mode 100644 index 3c1d1180..00000000 --- a/crates/electrum/tests/test_electrum.rs +++ /dev/null @@ -1,878 +0,0 @@ -use bdk_chain::{ - bitcoin::{hashes::Hash, Address, Amount, ScriptBuf, WScriptHash}, - local_chain::LocalChain, - spk_client::{FullScanRequest, SyncRequest, SyncResponse}, - spk_txout::SpkTxOutIndex, - Balance, ConfirmationBlockTime, IndexedTxGraph, Indexer, Merge, TxGraph, -}; -use bdk_core::bitcoin::{ - key::{Secp256k1, UntweakedPublicKey}, - Network, -}; -use bdk_electrum::BdkElectrumClient; -use bdk_testenv::{ - anyhow, - bitcoincore_rpc::{json::CreateRawTransactionInput, RawTx, RpcApi}, - TestEnv, -}; -use core::time::Duration; -use electrum_client::ElectrumApi; -use std::collections::{BTreeSet, HashMap, HashSet}; -use std::str::FromStr; - -// Batch size for `sync_with_electrum`. -const BATCH_SIZE: usize = 5; - -pub fn get_test_spk() -> ScriptBuf { - const PK_BYTES: &[u8] = &[ - 12, 244, 72, 4, 163, 4, 211, 81, 159, 82, 153, 123, 125, 74, 142, 40, 55, 237, 191, 231, - 31, 114, 89, 165, 83, 141, 8, 203, 93, 240, 53, 101, - ]; - let secp = Secp256k1::new(); - let pk = UntweakedPublicKey::from_slice(PK_BYTES).expect("Must be valid PK"); - ScriptBuf::new_p2tr(&secp, pk, None) -} - -fn get_balance( - recv_chain: &LocalChain, - recv_graph: &IndexedTxGraph>, -) -> anyhow::Result { - let chain_tip = recv_chain.tip().block_id(); - let outpoints = recv_graph.index.outpoints().clone(); - let balance = recv_graph - .graph() - .balance(recv_chain, chain_tip, outpoints, |_, _| true); - Ok(balance) -} - -fn sync_with_electrum( - client: &BdkElectrumClient, - spks: Spks, - chain: &mut LocalChain, - graph: &mut IndexedTxGraph, -) -> anyhow::Result -where - I: Indexer, - I::ChangeSet: Default + Merge, - Spks: IntoIterator, - Spks::IntoIter: ExactSizeIterator + Send + 'static, -{ - let update = client.sync( - SyncRequest::builder().chain_tip(chain.tip()).spks(spks), - BATCH_SIZE, - true, - )?; - - if let Some(chain_update) = update.chain_update.clone() { - let _ = chain - .apply_update(chain_update) - .map_err(|err| anyhow::anyhow!("LocalChain update error: {:?}", err))?; - } - let _ = graph.apply_update(update.tx_update.clone()); - - Ok(update) -} - -// Ensure that a wallet can detect a malicious replacement of an incoming transaction. -// -// This checks that both the Electrum chain source and the receiving structures properly track the -// replaced transaction as missing. -#[test] -pub fn detect_receive_tx_cancel() -> anyhow::Result<()> { - const SEND_TX_FEE: Amount = Amount::from_sat(1000); - const UNDO_SEND_TX_FEE: Amount = Amount::from_sat(2000); - - let env = TestEnv::new()?; - let rpc_client = env.rpc_client(); - let electrum_client = electrum_client::Client::new(env.electrsd.electrum_url.as_str())?; - let client = BdkElectrumClient::new(electrum_client); - - let mut graph = IndexedTxGraph::::new(SpkTxOutIndex::<()>::default()); - let (chain, _) = LocalChain::from_genesis_hash(env.bitcoind.client.get_block_hash(0)?); - - // Get receiving address. - let receiver_spk = get_test_spk(); - let receiver_addr = Address::from_script(&receiver_spk, bdk_chain::bitcoin::Network::Regtest)?; - graph.index.insert_spk((), receiver_spk); - - env.mine_blocks(101, None)?; - - // Select a UTXO to use as an input for constructing our test transactions. - let selected_utxo = rpc_client - .list_unspent(None, None, None, Some(false), None)? - .into_iter() - // Find a block reward tx. - .find(|utxo| utxo.amount == Amount::from_int_btc(50)) - .expect("Must find a block reward UTXO"); - - // Derive the sender's address from the selected UTXO. - let sender_spk = selected_utxo.script_pub_key.clone(); - let sender_addr = Address::from_script(&sender_spk, bdk_chain::bitcoin::Network::Regtest) - .expect("Failed to derive address from UTXO"); - - // Setup the common inputs used by both `send_tx` and `undo_send_tx`. - let inputs = [CreateRawTransactionInput { - txid: selected_utxo.txid, - vout: selected_utxo.vout, - sequence: None, - }]; - - // Create and sign the `send_tx` that sends funds to the receiver address. - let send_tx_outputs = HashMap::from([( - receiver_addr.to_string(), - selected_utxo.amount - SEND_TX_FEE, - )]); - let send_tx = rpc_client.create_raw_transaction(&inputs, &send_tx_outputs, None, Some(true))?; - let send_tx = rpc_client - .sign_raw_transaction_with_wallet(send_tx.raw_hex(), None, None)? - .transaction()?; - - // Create and sign the `undo_send_tx` transaction. This redirects funds back to the sender - // address. - let undo_send_outputs = HashMap::from([( - sender_addr.to_string(), - selected_utxo.amount - UNDO_SEND_TX_FEE, - )]); - let undo_send_tx = - rpc_client.create_raw_transaction(&inputs, &undo_send_outputs, None, Some(true))?; - let undo_send_tx = rpc_client - .sign_raw_transaction_with_wallet(undo_send_tx.raw_hex(), None, None)? - .transaction()?; - - // Sync after broadcasting the `send_tx`. Ensure that we detect and receive the `send_tx`. - let send_txid = env.rpc_client().send_raw_transaction(send_tx.raw_hex())?; - env.wait_until_electrum_sees_txid(send_txid, Duration::from_secs(6))?; - let sync_request = SyncRequest::builder() - .chain_tip(chain.tip()) - .spks_with_indexes(graph.index.all_spks().clone()) - .expected_spk_txids(graph.list_expected_spk_txids(&chain, chain.tip().block_id(), ..)); - let sync_response = client.sync(sync_request, BATCH_SIZE, true)?; - assert!( - sync_response - .tx_update - .txs - .iter() - .any(|tx| tx.compute_txid() == send_txid), - "sync response must include the send_tx" - ); - let changeset = graph.apply_update(sync_response.tx_update.clone()); - assert!( - changeset.tx_graph.txs.contains(&send_tx), - "tx graph must deem send_tx relevant and include it" - ); - - // Sync after broadcasting the `undo_send_tx`. Verify that `send_tx` is now missing from the - // mempool. - let undo_send_txid = env - .rpc_client() - .send_raw_transaction(undo_send_tx.raw_hex())?; - env.wait_until_electrum_sees_txid(undo_send_txid, Duration::from_secs(6))?; - let sync_request = SyncRequest::builder() - .chain_tip(chain.tip()) - .spks_with_indexes(graph.index.all_spks().clone()) - .expected_spk_txids(graph.list_expected_spk_txids(&chain, chain.tip().block_id(), ..)); - let sync_response = client.sync(sync_request, BATCH_SIZE, true)?; - assert!( - sync_response - .tx_update - .evicted_ats - .iter() - .any(|(txid, _)| *txid == send_txid), - "sync response must track send_tx as missing from mempool" - ); - let changeset = graph.apply_update(sync_response.tx_update.clone()); - assert!( - changeset.tx_graph.last_evicted.contains_key(&send_txid), - "tx graph must track send_tx as missing" - ); - - Ok(()) -} - -/// If an spk history contains a tx that spends another unconfirmed tx (chained mempool history), -/// the Electrum API will return the tx with a negative height. This should succeed and not panic. -#[test] -pub fn chained_mempool_tx_sync() -> anyhow::Result<()> { - let env = TestEnv::new()?; - let rpc_client = env.rpc_client(); - let electrum_client = electrum_client::Client::new(env.electrsd.electrum_url.as_str())?; - - let tracked_addr = rpc_client - .get_new_address(None, None)? - .require_network(Network::Regtest)?; - - env.mine_blocks(100, None)?; - - // First unconfirmed tx. - let txid1 = env.send(&tracked_addr, Amount::from_btc(1.0)?)?; - - // Create second unconfirmed tx that spends the first. - let utxo = rpc_client - .list_unspent(None, Some(0), None, Some(true), None)? - .into_iter() - .find(|utxo| utxo.script_pub_key == tracked_addr.script_pubkey()) - .expect("must find the newly created utxo"); - let tx_that_spends_unconfirmed = rpc_client.create_raw_transaction( - &[CreateRawTransactionInput { - txid: utxo.txid, - vout: utxo.vout, - sequence: None, - }], - &[( - tracked_addr.to_string(), - utxo.amount - Amount::from_sat(1000), - )] - .into(), - None, - None, - )?; - let signed_tx = rpc_client - .sign_raw_transaction_with_wallet(tx_that_spends_unconfirmed.raw_hex(), None, None)? - .transaction()?; - let txid2 = rpc_client.send_raw_transaction(signed_tx.raw_hex())?; - - env.wait_until_electrum_sees_txid(signed_tx.compute_txid(), Duration::from_secs(5))?; - - let spk_history = electrum_client.script_get_history(&tracked_addr.script_pubkey())?; - assert!( - spk_history.into_iter().any(|tx_res| tx_res.height < 0), - "must find tx with negative height" - ); - - let client = BdkElectrumClient::new(electrum_client); - let req = SyncRequest::builder() - .spks(core::iter::once(tracked_addr.script_pubkey())) - .build(); - let req_time = req.start_time(); - let response = client.sync(req, 1, false)?; - assert_eq!( - response.tx_update.seen_ats, - [(txid1, req_time), (txid2, req_time)].into(), - "both txids must have `seen_at` time match the request's `start_time`", - ); - - Ok(()) -} - -#[test] -pub fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> { - let env = TestEnv::new()?; - let electrum_client = electrum_client::Client::new(env.electrsd.electrum_url.as_str())?; - let client = BdkElectrumClient::new(electrum_client); - - let receive_address0 = - Address::from_str("bcrt1qc6fweuf4xjvz4x3gx3t9e0fh4hvqyu2qw4wvxm")?.assume_checked(); - let receive_address1 = - Address::from_str("bcrt1qfjg5lv3dvc9az8patec8fjddrs4aqtauadnagr")?.assume_checked(); - - let misc_spks = [ - receive_address0.script_pubkey(), - receive_address1.script_pubkey(), - ]; - - let _block_hashes = env.mine_blocks(101, None)?; - let txid1 = env.bitcoind.client.send_to_address( - &receive_address1, - Amount::from_sat(10000), - None, - None, - None, - None, - Some(1), - None, - )?; - let txid2 = env.bitcoind.client.send_to_address( - &receive_address0, - Amount::from_sat(20000), - None, - None, - None, - None, - Some(1), - None, - )?; - env.mine_blocks(1, None)?; - env.wait_until_electrum_sees_block(Duration::from_secs(6))?; - - // use a full checkpoint linked list (since this is not what we are testing) - let cp_tip = env.make_checkpoint_tip(); - - let sync_update = { - let request = SyncRequest::builder() - .chain_tip(cp_tip.clone()) - .spks(misc_spks); - client.sync(request, 1, true)? - }; - - assert!( - { - let update_cps = sync_update - .chain_update - .iter() - .map(|cp| cp.block_id()) - .collect::>(); - let superset_cps = cp_tip - .iter() - .map(|cp| cp.block_id()) - .collect::>(); - superset_cps.is_superset(&update_cps) - }, - "update should not alter original checkpoint tip since we already started with all checkpoints", - ); - - let tx_update = sync_update.tx_update; - let updated_graph = { - let mut graph = TxGraph::::default(); - let _ = graph.apply_update(tx_update.clone()); - graph - }; - // Check to see if we have the floating txouts available from our two created transactions' - // previous outputs in order to calculate transaction fees. - for tx in &tx_update.txs { - // Retrieve the calculated fee from `TxGraph`, which will panic if we do not have the - // floating txouts available from the transactions' previous outputs. - let fee = updated_graph.calculate_fee(tx).expect("Fee must exist"); - - // Retrieve the fee in the transaction data from `bitcoind`. - let tx_fee = env - .bitcoind - .client - .get_transaction(&tx.compute_txid(), None) - .expect("Tx must exist") - .fee - .expect("Fee must exist") - .abs() - .to_unsigned() - .expect("valid `Amount`"); - - // Check that the calculated fee matches the fee from the transaction data. - assert_eq!(fee, tx_fee); - } - - assert_eq!( - tx_update - .txs - .iter() - .map(|tx| tx.compute_txid()) - .collect::>(), - [txid1, txid2].into(), - "update must include all expected transactions", - ); - Ok(()) -} - -/// Test the bounds of the address scan depending on the `stop_gap`. -#[test] -pub fn test_update_tx_graph_stop_gap() -> anyhow::Result<()> { - let env = TestEnv::new()?; - let electrum_client = electrum_client::Client::new(env.electrsd.electrum_url.as_str())?; - let client = BdkElectrumClient::new(electrum_client); - let _block_hashes = env.mine_blocks(101, None)?; - - // Now let's test the gap limit. First of all get a chain of 10 addresses. - let addresses = [ - "bcrt1qj9f7r8r3p2y0sqf4r3r62qysmkuh0fzep473d2ar7rcz64wqvhssjgf0z4", - "bcrt1qmm5t0ch7vh2hryx9ctq3mswexcugqe4atkpkl2tetm8merqkthas3w7q30", - "bcrt1qut9p7ej7l7lhyvekj28xknn8gnugtym4d5qvnp5shrsr4nksmfqsmyn87g", - "bcrt1qqz0xtn3m235p2k96f5wa2dqukg6shxn9n3txe8arlrhjh5p744hsd957ww", - "bcrt1q9c0t62a8l6wfytmf2t9lfj35avadk3mm8g4p3l84tp6rl66m48sqrme7wu", - "bcrt1qkmh8yrk2v47cklt8dytk8f3ammcwa4q7dzattedzfhqzvfwwgyzsg59zrh", - "bcrt1qvgrsrzy07gjkkfr5luplt0azxtfwmwq5t62gum5jr7zwcvep2acs8hhnp2", - "bcrt1qw57edarcg50ansq8mk3guyrk78rk0fwvrds5xvqeupteu848zayq549av8", - "bcrt1qvtve5ekf6e5kzs68knvnt2phfw6a0yjqrlgat392m6zt9jsvyxhqfx67ef", - "bcrt1qw03ddumfs9z0kcu76ln7jrjfdwam20qtffmkcral3qtza90sp9kqm787uk", - ]; - let addresses: Vec<_> = addresses - .into_iter() - .map(|s| Address::from_str(s).unwrap().assume_checked()) - .collect(); - let spks: Vec<_> = addresses - .iter() - .enumerate() - .map(|(i, addr)| (i as u32, addr.script_pubkey())) - .collect(); - - // Then receive coins on the 4th address. - let txid_4th_addr = env.bitcoind.client.send_to_address( - &addresses[3], - Amount::from_sat(10000), - None, - None, - None, - None, - Some(1), - None, - )?; - env.mine_blocks(1, None)?; - env.wait_until_electrum_sees_block(Duration::from_secs(6))?; - - // use a full checkpoint linked list (since this is not what we are testing) - let cp_tip = env.make_checkpoint_tip(); - - // A scan with a stop_gap of 3 won't find the transaction, but a scan with a gap limit of 4 - // will. - let full_scan_update = { - let request = FullScanRequest::builder() - .chain_tip(cp_tip.clone()) - .spks_for_keychain(0, spks.clone()); - client.full_scan(request, 3, 1, false)? - }; - assert!(full_scan_update.tx_update.txs.is_empty()); - assert!(full_scan_update.last_active_indices.is_empty()); - let full_scan_update = { - let request = FullScanRequest::builder() - .chain_tip(cp_tip.clone()) - .spks_for_keychain(0, spks.clone()); - client.full_scan(request, 4, 1, false)? - }; - assert_eq!( - full_scan_update - .tx_update - .txs - .first() - .unwrap() - .compute_txid(), - txid_4th_addr - ); - assert_eq!(full_scan_update.last_active_indices[&0], 3); - - // Now receive a coin on the last address. - let txid_last_addr = env.bitcoind.client.send_to_address( - &addresses[addresses.len() - 1], - Amount::from_sat(10000), - None, - None, - None, - None, - Some(1), - None, - )?; - env.mine_blocks(1, None)?; - env.wait_until_electrum_sees_block(Duration::from_secs(6))?; - - // A scan with gap limit 5 won't find the second transaction, but a scan with gap limit 6 will. - // The last active indice won't be updated in the first case but will in the second one. - let full_scan_update = { - let request = FullScanRequest::builder() - .chain_tip(cp_tip.clone()) - .spks_for_keychain(0, spks.clone()); - client.full_scan(request, 5, 1, false)? - }; - let txs: HashSet<_> = full_scan_update - .tx_update - .txs - .iter() - .map(|tx| tx.compute_txid()) - .collect(); - assert_eq!(txs.len(), 1); - assert!(txs.contains(&txid_4th_addr)); - assert_eq!(full_scan_update.last_active_indices[&0], 3); - let full_scan_update = { - let request = FullScanRequest::builder() - .chain_tip(cp_tip.clone()) - .spks_for_keychain(0, spks.clone()); - client.full_scan(request, 6, 1, false)? - }; - let txs: HashSet<_> = full_scan_update - .tx_update - .txs - .iter() - .map(|tx| tx.compute_txid()) - .collect(); - assert_eq!(txs.len(), 2); - assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr)); - assert_eq!(full_scan_update.last_active_indices[&0], 9); - - Ok(()) -} - -/// Ensure that [`BdkElectrumClient::sync`] can confirm previously unconfirmed transactions in both -/// reorg and no-reorg situations. After the transaction is confirmed after reorg, check if floating -/// txouts for previous outputs were inserted for transaction fee calculation. -#[test] -fn test_sync() -> anyhow::Result<()> { - const SEND_AMOUNT: Amount = Amount::from_sat(10_000); - - let env = TestEnv::new()?; - let electrum_client = electrum_client::Client::new(env.electrsd.electrum_url.as_str())?; - let client = BdkElectrumClient::new(electrum_client); - - // Setup addresses. - let addr_to_mine = env - .bitcoind - .client - .get_new_address(None, None)? - .assume_checked(); - let spk_to_track = ScriptBuf::new_p2wsh(&WScriptHash::all_zeros()); - let addr_to_track = Address::from_script(&spk_to_track, bdk_chain::bitcoin::Network::Regtest)?; - - // Setup receiver. - let (mut recv_chain, _) = LocalChain::from_genesis_hash(env.bitcoind.client.get_block_hash(0)?); - let mut recv_graph = IndexedTxGraph::::new({ - let mut recv_index = SpkTxOutIndex::default(); - recv_index.insert_spk((), spk_to_track.clone()); - recv_index - }); - - // Mine some blocks. - env.mine_blocks(101, Some(addr_to_mine))?; - env.wait_until_electrum_sees_block(Duration::from_secs(6))?; - - // Broadcast transaction to mempool. - let txid = env.send(&addr_to_track, SEND_AMOUNT)?; - env.wait_until_electrum_sees_txid(txid, Duration::from_secs(6))?; - - let _ = sync_with_electrum( - &client, - [spk_to_track.clone()], - &mut recv_chain, - &mut recv_graph, - )?; - - // Check for unconfirmed balance when transaction exists only in mempool. - assert_eq!( - get_balance(&recv_chain, &recv_graph)?, - Balance { - trusted_pending: SEND_AMOUNT, - ..Balance::default() - }, - "balance must be correct", - ); - - // Mine block to confirm transaction. - env.mine_blocks(1, None)?; - env.wait_until_electrum_sees_block(Duration::from_secs(6))?; - - let _ = sync_with_electrum( - &client, - [spk_to_track.clone()], - &mut recv_chain, - &mut recv_graph, - )?; - - // Check if balance is correct when transaction is confirmed. - assert_eq!( - get_balance(&recv_chain, &recv_graph)?, - Balance { - confirmed: SEND_AMOUNT, - ..Balance::default() - }, - "balance must be correct", - ); - - // Perform reorg on block with confirmed transaction. - env.reorg_empty_blocks(1)?; - env.wait_until_electrum_sees_block(Duration::from_secs(6))?; - - let _ = sync_with_electrum( - &client, - [spk_to_track.clone()], - &mut recv_chain, - &mut recv_graph, - )?; - - // Check if balance is correct when transaction returns to mempool. - assert_eq!( - get_balance(&recv_chain, &recv_graph)?, - Balance { - trusted_pending: SEND_AMOUNT, - ..Balance::default() - }, - ); - - // Mine block to confirm transaction again. - env.mine_blocks(1, None)?; - env.wait_until_electrum_sees_block(Duration::from_secs(6))?; - - let _ = sync_with_electrum(&client, [spk_to_track], &mut recv_chain, &mut recv_graph)?; - - // Check if balance is correct once transaction is confirmed again. - assert_eq!( - get_balance(&recv_chain, &recv_graph)?, - Balance { - confirmed: SEND_AMOUNT, - ..Balance::default() - }, - "balance must be correct", - ); - - // Check to see if we have the floating txouts available from our transactions' previous outputs - // in order to calculate transaction fees. - for tx in recv_graph.graph().full_txs() { - // Retrieve the calculated fee from `TxGraph`, which will panic if we do not have the - // floating txouts available from the transaction's previous outputs. - let fee = recv_graph - .graph() - .calculate_fee(&tx.tx) - .expect("fee must exist"); - - // Retrieve the fee in the transaction data from `bitcoind`. - let tx_fee = env - .bitcoind - .client - .get_transaction(&tx.txid, None) - .expect("Tx must exist") - .fee - .expect("Fee must exist") - .abs() - .to_unsigned() - .expect("valid `Amount`"); - - // Check that the calculated fee matches the fee from the transaction data. - assert_eq!(fee, tx_fee); - } - - Ok(()) -} - -/// Ensure that confirmed txs that are reorged become unconfirmed. -/// -/// 1. Mine 101 blocks. -/// 2. Mine 8 blocks with a confirmed tx in each. -/// 3. Perform 8 separate reorgs on each block with a confirmed tx. -/// 4. Check [`Balance`] after each reorg to ensure unconfirmed amount is correct. -#[test] -fn tx_can_become_unconfirmed_after_reorg() -> anyhow::Result<()> { - const REORG_COUNT: usize = 8; - const SEND_AMOUNT: Amount = Amount::from_sat(10_000); - - let env = TestEnv::new()?; - let electrum_client = electrum_client::Client::new(env.electrsd.electrum_url.as_str())?; - let client = BdkElectrumClient::new(electrum_client); - - // Setup addresses. - let addr_to_mine = env - .bitcoind - .client - .get_new_address(None, None)? - .assume_checked(); - let spk_to_track = ScriptBuf::new_p2wsh(&WScriptHash::all_zeros()); - let addr_to_track = Address::from_script(&spk_to_track, bdk_chain::bitcoin::Network::Regtest)?; - - // Setup receiver. - let (mut recv_chain, _) = LocalChain::from_genesis_hash(env.bitcoind.client.get_block_hash(0)?); - let mut recv_graph = IndexedTxGraph::::new({ - let mut recv_index = SpkTxOutIndex::default(); - recv_index.insert_spk((), spk_to_track.clone()); - recv_index - }); - - // Mine some blocks. - env.mine_blocks(101, Some(addr_to_mine))?; - - // Create transactions that are tracked by our receiver. - let mut txids = vec![]; - let mut hashes = vec![]; - for _ in 0..REORG_COUNT { - txids.push(env.send(&addr_to_track, SEND_AMOUNT)?); - hashes.extend(env.mine_blocks(1, None)?); - } - - // Sync up to tip. - env.wait_until_electrum_sees_block(Duration::from_secs(6))?; - let update = sync_with_electrum( - &client, - [spk_to_track.clone()], - &mut recv_chain, - &mut recv_graph, - )?; - - // Retain a snapshot of all anchors before reorg process. - let initial_anchors = update.tx_update.anchors.clone(); - assert_eq!(initial_anchors.len(), REORG_COUNT); - for i in 0..REORG_COUNT { - let (anchor, txid) = initial_anchors.iter().nth(i).unwrap(); - assert_eq!(anchor.block_id.hash, hashes[i]); - assert_eq!(*txid, txids[i]); - } - - // Check if initial balance is correct. - assert_eq!( - get_balance(&recv_chain, &recv_graph)?, - Balance { - confirmed: SEND_AMOUNT * REORG_COUNT as u64, - ..Balance::default() - }, - "initial balance must be correct", - ); - - // Perform reorgs with different depths. - for depth in 1..=REORG_COUNT { - env.reorg_empty_blocks(depth)?; - - env.wait_until_electrum_sees_block(Duration::from_secs(6))?; - let update = sync_with_electrum( - &client, - [spk_to_track.clone()], - &mut recv_chain, - &mut recv_graph, - )?; - - // Check that no new anchors are added during current reorg. - assert!(initial_anchors.is_superset(&update.tx_update.anchors)); - - assert_eq!( - get_balance(&recv_chain, &recv_graph)?, - Balance { - trusted_pending: SEND_AMOUNT * depth as u64, - confirmed: SEND_AMOUNT * (REORG_COUNT - depth) as u64, - ..Balance::default() - }, - "reorg_count: {}", - depth, - ); - } - - Ok(()) -} - -#[test] -fn test_sync_with_coinbase() -> anyhow::Result<()> { - let env = TestEnv::new()?; - let electrum_client = electrum_client::Client::new(env.electrsd.electrum_url.as_str())?; - let client = BdkElectrumClient::new(electrum_client); - - // Setup address. - let spk_to_track = ScriptBuf::new_p2wsh(&WScriptHash::all_zeros()); - let addr_to_track = Address::from_script(&spk_to_track, bdk_chain::bitcoin::Network::Regtest)?; - - // Setup receiver. - let (mut recv_chain, _) = LocalChain::from_genesis_hash(env.bitcoind.client.get_block_hash(0)?); - let mut recv_graph = IndexedTxGraph::::new({ - let mut recv_index = SpkTxOutIndex::default(); - recv_index.insert_spk((), spk_to_track.clone()); - recv_index - }); - - // Mine some blocks. - env.mine_blocks(101, Some(addr_to_track))?; - env.wait_until_electrum_sees_block(Duration::from_secs(6))?; - - // Check to see if electrum syncs properly. - assert!(sync_with_electrum( - &client, - [spk_to_track.clone()], - &mut recv_chain, - &mut recv_graph, - ) - .is_ok()); - - Ok(()) -} - -#[test] -fn test_check_fee_calculation() -> anyhow::Result<()> { - const SEND_AMOUNT: Amount = Amount::from_sat(10_000); - const FEE_AMOUNT: Amount = Amount::from_sat(1650); - let env = TestEnv::new()?; - let electrum_client = electrum_client::Client::new(env.electrsd.electrum_url.as_str())?; - let client = BdkElectrumClient::new(electrum_client); - - let spk_to_track = ScriptBuf::new_p2wsh(&WScriptHash::all_zeros()); - let addr_to_track = Address::from_script(&spk_to_track, bdk_chain::bitcoin::Network::Regtest)?; - - // Setup receiver. - let (mut recv_chain, _) = LocalChain::from_genesis_hash(env.bitcoind.client.get_block_hash(0)?); - let mut recv_graph = IndexedTxGraph::::new({ - let mut recv_index = SpkTxOutIndex::default(); - recv_index.insert_spk((), spk_to_track.clone()); - recv_index - }); - - // Mine some blocks. - env.mine_blocks(101, None)?; - - // Send a preliminary tx such that the new utxo in Core's wallet - // becomes the input of the next tx - let new_addr = env - .rpc_client() - .get_new_address(None, None)? - .assume_checked(); - let prev_amt = SEND_AMOUNT + FEE_AMOUNT; - env.send(&new_addr, prev_amt)?; - let prev_block_hash = env.mine_blocks(1, None)?.into_iter().next(); - - let txid = env.send(&addr_to_track, SEND_AMOUNT)?; - - // Mine a block to confirm sent tx. - let block_hash = env.mine_blocks(1, None)?.into_iter().next(); - - // Look at the tx we just sent, it should have 1 input and 1 output - let tx = env - .rpc_client() - .get_raw_transaction_info(&txid, block_hash.as_ref())?; - assert_eq!(tx.vin.len(), 1); - assert_eq!(tx.vout.len(), 1); - let vin = &tx.vin[0]; - let prev_txid = vin.txid.unwrap(); - let vout = vin.vout.unwrap(); - let outpoint = bdk_chain::bitcoin::OutPoint::new(prev_txid, vout); - - // Get the txout of the previous tx - let prev_tx = env - .rpc_client() - .get_raw_transaction_info(&prev_txid, prev_block_hash.as_ref())?; - let txout = prev_tx - .vout - .iter() - .find(|txout| txout.value == prev_amt) - .unwrap(); - let script_pubkey = ScriptBuf::from_bytes(txout.script_pub_key.hex.to_vec()); - let txout = bdk_chain::bitcoin::TxOut { - value: txout.value, - script_pubkey, - }; - - // Sync up to tip. - env.wait_until_electrum_sees_block(Duration::from_secs(6))?; - let _ = sync_with_electrum( - &client, - [spk_to_track.clone()], - &mut recv_chain, - &mut recv_graph, - )?; - - // Check the graph update contains the right floating txout - let graph_txout = recv_graph - .graph() - .all_txouts() - .find(|(_op, txout)| txout.value == prev_amt) - .unwrap(); - assert_eq!(graph_txout, (outpoint, &txout)); - - // Check to see if tx is confirmed. - assert_eq!( - get_balance(&recv_chain, &recv_graph)?, - Balance { - confirmed: SEND_AMOUNT, - ..Balance::default() - }, - ); - - for tx in recv_graph.graph().full_txs() { - // Retrieve the calculated fee from `TxGraph`, which will panic if we do not have the - // floating txouts available from the transaction's previous outputs. - let fee = recv_graph - .graph() - .calculate_fee(&tx.tx) - .expect("fee must exist"); - - // Check the fee calculated fee matches the initial fee amount - assert_eq!(fee, FEE_AMOUNT); - - // Retrieve the fee in the transaction data from `bitcoind`. - let tx_fee = env - .bitcoind - .client - .get_transaction(&tx.txid, None) - .expect("Tx must exist") - .fee - .expect("Fee must exist") - .abs() - .to_sat() as u64; - - // Check that the calculated fee matches the fee from the transaction data. - assert_eq!(fee, Amount::from_sat(tx_fee)); // 1650sat - } - Ok(()) -} diff --git a/crates/esplora/CHANGELOG.md b/crates/esplora/CHANGELOG.md deleted file mode 100644 index af09ad9c..00000000 --- a/crates/esplora/CHANGELOG.md +++ /dev/null @@ -1,16 +0,0 @@ -# Changelog - -All notable changes to this project can be found here and in each release's git tag and can be viewed with `git tag -ln100 "v*"`. See also [DEVELOPMENT_CYCLE.md](../../DEVELOPMENT_CYCLE.md) for more details. - -Contributors do not need to change this file but do need to add changelog details in their PR descriptions. The person making the next release will collect changelog details from included PRs and edit this file prior to each release. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [esplora-0.20.1] - -### Changed - -- Minor updates to fix new rustc 1.83.0 clippy warnings #1776 - -[esplora-0.20.1]: https://github.com/bitcoindevkit/bdk/releases/tag/esplora-0.20.1 \ No newline at end of file diff --git a/crates/esplora/Cargo.toml b/crates/esplora/Cargo.toml deleted file mode 100644 index 85054f2e..00000000 --- a/crates/esplora/Cargo.toml +++ /dev/null @@ -1,48 +0,0 @@ -[package] -name = "bdk_esplora" -version = "0.20.1" -edition = "2021" -homepage = "https://bitcoindevkit.org" -repository = "https://github.com/bitcoindevkit/bdk" -documentation = "https://docs.rs/bdk_esplora" -description = "Fetch data from esplora in the form that accepts" -license = "MIT OR Apache-2.0" -readme = "README.md" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[lints] -workspace = true - -[dependencies] -bdk_core = { path = "../core", version = "0.4.1", default-features = false } -esplora-client = { version = "0.11.0", default-features = false } -async-trait = { version = "0.1.66", optional = true } -futures = { version = "0.3.26", optional = true } - -[dev-dependencies] -esplora-client = { version = "0.11.0" } -bdk_chain = { path = "../chain" } -bdk_testenv = { path = "../testenv" } -tokio = { version = "1", features = ["rt", "rt-multi-thread", "macros"] } - -[features] -default = ["std", "async-https", "blocking-https"] -std = ["bdk_core/std"] -tokio = ["esplora-client/tokio"] -async = ["async-trait", "futures", "esplora-client/async"] -async-https = ["async", "esplora-client/async-https"] -async-https-rustls = ["async", "esplora-client/async-https-rustls"] -async-https-native = ["async", "esplora-client/async-https-native"] -blocking = ["esplora-client/blocking"] -blocking-https = ["blocking", "esplora-client/blocking-https"] -blocking-https-rustls = ["blocking", "esplora-client/blocking-https-rustls"] -blocking-https-native = ["blocking", "esplora-client/blocking-https-native"] - -[[test]] -name = "blocking_ext" -required-features = ["blocking"] - -[[test]] -name = "async_ext" -required-features = ["async"] diff --git a/crates/esplora/README.md b/crates/esplora/README.md deleted file mode 100644 index 244deb20..00000000 --- a/crates/esplora/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# BDK Esplora - -BDK Esplora extends [`esplora-client`] (with extension traits: [`EsploraExt`] and -[`EsploraAsyncExt`]) to update [`bdk_chain`] structures from an Esplora server. - -The extension traits are primarily intended to satisfy [`SyncRequest`]s with [`sync`] and -[`FullScanRequest`]s with [`full_scan`]. - -## Usage - -For blocking-only: -```toml -bdk_esplora = { version = "0.19", features = ["blocking"] } -``` - -For async-only: -```toml -bdk_esplora = { version = "0.19", features = ["async"] } -``` - -For async-only (with https): - -You can additionally specify to use either rustls or native-tls, e.g. `async-https-native`, and this applies to both async and blocking features. -```toml -bdk_esplora = { version = "0.19", features = ["async-https"] } -``` - -For async-only (with tokio): -```toml -bdk_esplora = { version = "0.19", features = ["async", "tokio"] } -``` - -To use the extension traits: -```rust -// for blocking -#[cfg(feature = "blocking")] -use bdk_esplora::EsploraExt; - -// for async -#[cfg(feature = "async")] -use bdk_esplora::EsploraAsyncExt; -``` - -For full examples, refer to [`example_wallet_esplora_blocking`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_wallet_esplora_blocking) and [`example_wallet_esplora_async`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_wallet_esplora_async). - -[`esplora-client`]: https://docs.rs/esplora-client/ -[`bdk_chain`]: https://docs.rs/bdk-chain/ -[`EsploraExt`]: crate::EsploraExt -[`EsploraAsyncExt`]: crate::EsploraAsyncExt -[`SyncRequest`]: bdk_core::spk_client::SyncRequest -[`FullScanRequest`]: bdk_core::spk_client::FullScanRequest -[`sync`]: crate::EsploraExt::sync -[`full_scan`]: crate::EsploraExt::full_scan diff --git a/crates/esplora/src/async_ext.rs b/crates/esplora/src/async_ext.rs deleted file mode 100644 index 7d8460c5..00000000 --- a/crates/esplora/src/async_ext.rs +++ /dev/null @@ -1,725 +0,0 @@ -use async_trait::async_trait; -use bdk_core::collections::{BTreeMap, BTreeSet, HashSet}; -use bdk_core::spk_client::{ - FullScanRequest, FullScanResponse, SpkWithExpectedTxids, SyncRequest, SyncResponse, -}; -use bdk_core::{ - bitcoin::{BlockHash, OutPoint, Txid}, - BlockId, CheckPoint, ConfirmationBlockTime, Indexed, TxUpdate, -}; -use esplora_client::Sleeper; -use futures::{stream::FuturesOrdered, TryStreamExt}; - -use crate::{insert_anchor_or_seen_at_from_status, insert_prevouts}; - -/// [`esplora_client::Error`] -type Error = Box; - -/// Trait to extend the functionality of [`esplora_client::AsyncClient`]. -/// -/// Refer to [crate-level documentation](crate) for more. -#[cfg_attr(target_arch = "wasm32", async_trait(?Send))] -#[cfg_attr(not(target_arch = "wasm32"), async_trait)] -pub trait EsploraAsyncExt { - /// Scan keychain scripts for transactions against Esplora, returning an update that can be - /// applied to the receiving structures. - /// - /// `request` provides the data required to perform a script-pubkey-based full scan - /// (see [`FullScanRequest`]). The full scan for each keychain (`K`) stops after a gap of - /// `stop_gap` script pubkeys with no associated transactions. `parallel_requests` specifies - /// the maximum number of HTTP requests to make in parallel. - /// - /// Refer to [crate-level docs](crate) for more. - async fn full_scan> + Send>( - &self, - request: R, - stop_gap: usize, - parallel_requests: usize, - ) -> Result, Error>; - - /// Sync a set of scripts, txids, and/or outpoints against Esplora. - /// - /// `request` provides the data required to perform a script-pubkey-based sync (see - /// [`SyncRequest`]). `parallel_requests` specifies the maximum number of HTTP requests to make - /// in parallel. - /// - /// Refer to [crate-level docs](crate) for more. - async fn sync> + Send>( - &self, - request: R, - parallel_requests: usize, - ) -> Result; -} - -#[cfg_attr(target_arch = "wasm32", async_trait(?Send))] -#[cfg_attr(not(target_arch = "wasm32"), async_trait)] -impl EsploraAsyncExt for esplora_client::AsyncClient -where - S: Sleeper + Clone + Send + Sync, - S::Sleep: Send, -{ - async fn full_scan> + Send>( - &self, - request: R, - stop_gap: usize, - parallel_requests: usize, - ) -> Result, Error> { - let mut request: FullScanRequest = request.into(); - let start_time = request.start_time(); - let keychains = request.keychains(); - - let chain_tip = request.chain_tip(); - let latest_blocks = if chain_tip.is_some() { - Some(fetch_latest_blocks(self).await?) - } else { - None - }; - - let mut tx_update = TxUpdate::::default(); - let mut inserted_txs = HashSet::::new(); - let mut last_active_indices = BTreeMap::::new(); - for keychain in keychains { - let keychain_spks = request - .iter_spks(keychain.clone()) - .map(|(spk_i, spk)| (spk_i, spk.into())); - let (update, last_active_index) = fetch_txs_with_keychain_spks( - self, - start_time, - &mut inserted_txs, - keychain_spks, - stop_gap, - parallel_requests, - ) - .await?; - tx_update.extend(update); - if let Some(last_active_index) = last_active_index { - last_active_indices.insert(keychain, last_active_index); - } - } - - let chain_update = match (chain_tip, latest_blocks) { - (Some(chain_tip), Some(latest_blocks)) => { - Some(chain_update(self, &latest_blocks, &chain_tip, &tx_update.anchors).await?) - } - _ => None, - }; - - Ok(FullScanResponse { - chain_update, - tx_update, - last_active_indices, - }) - } - - async fn sync> + Send>( - &self, - request: R, - parallel_requests: usize, - ) -> Result { - let mut request: SyncRequest = request.into(); - let start_time = request.start_time(); - - let chain_tip = request.chain_tip(); - let latest_blocks = if chain_tip.is_some() { - Some(fetch_latest_blocks(self).await?) - } else { - None - }; - - let mut tx_update = TxUpdate::::default(); - let mut inserted_txs = HashSet::::new(); - tx_update.extend( - fetch_txs_with_spks( - self, - start_time, - &mut inserted_txs, - request.iter_spks_with_expected_txids(), - parallel_requests, - ) - .await?, - ); - tx_update.extend( - fetch_txs_with_txids( - self, - start_time, - &mut inserted_txs, - request.iter_txids(), - parallel_requests, - ) - .await?, - ); - tx_update.extend( - fetch_txs_with_outpoints( - self, - start_time, - &mut inserted_txs, - request.iter_outpoints(), - parallel_requests, - ) - .await?, - ); - - let chain_update = match (chain_tip, latest_blocks) { - (Some(chain_tip), Some(latest_blocks)) => { - Some(chain_update(self, &latest_blocks, &chain_tip, &tx_update.anchors).await?) - } - _ => None, - }; - - Ok(SyncResponse { - chain_update, - tx_update, - }) - } -} - -/// Fetch latest blocks from Esplora in an atomic call. -/// -/// We want to do this before fetching transactions and anchors as we cannot fetch latest blocks AND -/// transactions atomically, and the checkpoint tip is used to determine last-scanned block (for -/// block-based chain-sources). Therefore it's better to be conservative when setting the tip (use -/// an earlier tip rather than a later tip) otherwise the caller may accidentally skip blocks when -/// alternating between chain-sources. -async fn fetch_latest_blocks( - client: &esplora_client::AsyncClient, -) -> Result, Error> { - Ok(client - .get_blocks(None) - .await? - .into_iter() - .map(|b| (b.time.height, b.id)) - .collect()) -} - -/// Used instead of [`esplora_client::BlockingClient::get_block_hash`]. -/// -/// This first checks the previously fetched `latest_blocks` before fetching from Esplora again. -async fn fetch_block( - client: &esplora_client::AsyncClient, - latest_blocks: &BTreeMap, - height: u32, -) -> Result, Error> { - if let Some(&hash) = latest_blocks.get(&height) { - return Ok(Some(hash)); - } - - // We avoid fetching blocks higher than previously fetched `latest_blocks` as the local chain - // tip is used to signal for the last-synced-up-to-height. - let &tip_height = latest_blocks - .keys() - .last() - .expect("must have atleast one entry"); - if height > tip_height { - return Ok(None); - } - - Ok(Some(client.get_block_hash(height).await?)) -} - -/// Create the [`local_chain::Update`]. -/// -/// We want to have a corresponding checkpoint per anchor height. However, checkpoints fetched -/// should not surpass `latest_blocks`. -async fn chain_update( - client: &esplora_client::AsyncClient, - latest_blocks: &BTreeMap, - local_tip: &CheckPoint, - anchors: &BTreeSet<(ConfirmationBlockTime, Txid)>, -) -> Result { - let mut point_of_agreement = None; - let mut conflicts = vec![]; - for local_cp in local_tip.iter() { - let remote_hash = match fetch_block(client, latest_blocks, local_cp.height()).await? { - Some(hash) => hash, - None => continue, - }; - if remote_hash == local_cp.hash() { - point_of_agreement = Some(local_cp.clone()); - break; - } else { - // it is not strictly necessary to include all the conflicted heights (we do need the - // first one) but it seems prudent to make sure the updated chain's heights are a - // superset of the existing chain after update. - conflicts.push(BlockId { - height: local_cp.height(), - hash: remote_hash, - }); - } - } - - let mut tip = point_of_agreement.expect("remote esplora should have same genesis block"); - - tip = tip - .extend(conflicts.into_iter().rev()) - .expect("evicted are in order"); - - for (anchor, _txid) in anchors { - let height = anchor.block_id.height; - if tip.get(height).is_none() { - let hash = match fetch_block(client, latest_blocks, height).await? { - Some(hash) => hash, - None => continue, - }; - tip = tip.insert(BlockId { height, hash }); - } - } - - // insert the most recent blocks at the tip to make sure we update the tip and make the update - // robust. - for (&height, &hash) in latest_blocks.iter() { - tip = tip.insert(BlockId { height, hash }); - } - - Ok(tip) -} - -/// Fetch transactions and associated [`ConfirmationBlockTime`]s by scanning -/// `keychain_spks` against Esplora. -/// -/// `keychain_spks` is an *unbounded* indexed-[`ScriptBuf`] iterator that represents scripts -/// derived from a keychain. The scanning logic stops after a `stop_gap` number of consecutive -/// scripts with no transaction history is reached. `parallel_requests` specifies the maximum -/// number of HTTP requests to make in parallel. -/// -/// A [`TxGraph`] (containing the fetched transactions and anchors) and the last active -/// keychain index (if any) is returned. The last active keychain index is the keychain's last -/// script pubkey that contains a non-empty transaction history. -/// -/// Refer to [crate-level docs](crate) for more. -async fn fetch_txs_with_keychain_spks( - client: &esplora_client::AsyncClient, - start_time: u64, - inserted_txs: &mut HashSet, - mut keychain_spks: I, - stop_gap: usize, - parallel_requests: usize, -) -> Result<(TxUpdate, Option), Error> -where - I: Iterator> + Send, - S: Sleeper + Clone + Send + Sync, -{ - type TxsOfSpkIndex = (u32, Vec, HashSet); - - let mut update = TxUpdate::::default(); - let mut last_index = Option::::None; - let mut last_active_index = Option::::None; - - loop { - let handles = keychain_spks - .by_ref() - .take(parallel_requests) - .map(|(spk_index, spk)| { - let client = client.clone(); - let expected_txids = spk.expected_txids; - let spk = spk.spk; - async move { - let mut last_seen = None; - let mut spk_txs = Vec::new(); - loop { - let txs = client.scripthash_txs(&spk, last_seen).await?; - let tx_count = txs.len(); - last_seen = txs.last().map(|tx| tx.txid); - spk_txs.extend(txs); - if tx_count < 25 { - break; - } - } - let got_txids = spk_txs.iter().map(|tx| tx.txid).collect::>(); - let evicted_txids = expected_txids - .difference(&got_txids) - .copied() - .collect::>(); - Result::::Ok((spk_index, spk_txs, evicted_txids)) - } - }) - .collect::>(); - - if handles.is_empty() { - break; - } - - for (index, txs, evicted) in handles.try_collect::>().await? { - last_index = Some(index); - if !txs.is_empty() { - last_active_index = Some(index); - } - for tx in txs { - if inserted_txs.insert(tx.txid) { - update.txs.push(tx.to_tx().into()); - } - insert_anchor_or_seen_at_from_status(&mut update, start_time, tx.txid, tx.status); - insert_prevouts(&mut update, tx.vin); - } - update - .evicted_ats - .extend(evicted.into_iter().map(|txid| (txid, start_time))); - } - - let last_index = last_index.expect("Must be set since handles wasn't empty."); - let gap_limit_reached = if let Some(i) = last_active_index { - last_index >= i.saturating_add(stop_gap as u32) - } else { - last_index + 1 >= stop_gap as u32 - }; - if gap_limit_reached { - break; - } - } - - Ok((update, last_active_index)) -} - -/// Fetch transactions and associated [`ConfirmationBlockTime`]s by scanning `spks` -/// against Esplora. -/// -/// Unlike with [`EsploraAsyncExt::fetch_txs_with_keychain_spks`], `spks` must be *bounded* as -/// all contained scripts will be scanned. `parallel_requests` specifies the maximum number of -/// HTTP requests to make in parallel. -/// -/// Refer to [crate-level docs](crate) for more. -async fn fetch_txs_with_spks( - client: &esplora_client::AsyncClient, - start_time: u64, - inserted_txs: &mut HashSet, - spks: I, - parallel_requests: usize, -) -> Result, Error> -where - I: IntoIterator + Send, - I::IntoIter: Send, - S: Sleeper + Clone + Send + Sync, -{ - fetch_txs_with_keychain_spks( - client, - start_time, - inserted_txs, - spks.into_iter().enumerate().map(|(i, spk)| (i as u32, spk)), - usize::MAX, - parallel_requests, - ) - .await - .map(|(update, _)| update) -} - -/// Fetch transactions and associated [`ConfirmationBlockTime`]s by scanning `txids` -/// against Esplora. -/// -/// `parallel_requests` specifies the maximum number of HTTP requests to make in parallel. -/// -/// Refer to [crate-level docs](crate) for more. -async fn fetch_txs_with_txids( - client: &esplora_client::AsyncClient, - start_time: u64, - inserted_txs: &mut HashSet, - txids: I, - parallel_requests: usize, -) -> Result, Error> -where - I: IntoIterator + Send, - I::IntoIter: Send, - S: Sleeper + Clone + Send + Sync, -{ - let mut update = TxUpdate::::default(); - // Only fetch for non-inserted txs. - let mut txids = txids - .into_iter() - .filter(|txid| !inserted_txs.contains(txid)) - .collect::>() - .into_iter(); - loop { - let handles = txids - .by_ref() - .take(parallel_requests) - .map(|txid| { - let client = client.clone(); - async move { client.get_tx_info(&txid).await.map(|t| (txid, t)) } - }) - .collect::>(); - - if handles.is_empty() { - break; - } - - for (txid, tx_info) in handles.try_collect::>().await? { - if let Some(tx_info) = tx_info { - if inserted_txs.insert(txid) { - update.txs.push(tx_info.to_tx().into()); - } - insert_anchor_or_seen_at_from_status(&mut update, start_time, txid, tx_info.status); - insert_prevouts(&mut update, tx_info.vin); - } - } - } - Ok(update) -} - -/// Fetch transactions and [`ConfirmationBlockTime`]s that contain and spend the provided -/// `outpoints`. -/// -/// `parallel_requests` specifies the maximum number of HTTP requests to make in parallel. -/// -/// Refer to [crate-level docs](crate) for more. -async fn fetch_txs_with_outpoints( - client: &esplora_client::AsyncClient, - start_time: u64, - inserted_txs: &mut HashSet, - outpoints: I, - parallel_requests: usize, -) -> Result, Error> -where - I: IntoIterator + Send, - I::IntoIter: Send, - S: Sleeper + Clone + Send + Sync, -{ - let outpoints = outpoints.into_iter().collect::>(); - let mut update = TxUpdate::::default(); - - // make sure txs exists in graph and tx statuses are updated - // TODO: We should maintain a tx cache (like we do with Electrum). - update.extend( - fetch_txs_with_txids( - client, - start_time, - inserted_txs, - outpoints.iter().copied().map(|op| op.txid), - parallel_requests, - ) - .await?, - ); - - // get outpoint spend-statuses - let mut outpoints = outpoints.into_iter(); - let mut missing_txs = Vec::::with_capacity(outpoints.len()); - loop { - let handles = outpoints - .by_ref() - .take(parallel_requests) - .map(|op| { - let client = client.clone(); - async move { client.get_output_status(&op.txid, op.vout as _).await } - }) - .collect::>(); - - if handles.is_empty() { - break; - } - - for op_status in handles.try_collect::>().await?.into_iter().flatten() { - let spend_txid = match op_status.txid { - Some(txid) => txid, - None => continue, - }; - if !inserted_txs.contains(&spend_txid) { - missing_txs.push(spend_txid); - } - if let Some(spend_status) = op_status.status { - insert_anchor_or_seen_at_from_status( - &mut update, - start_time, - spend_txid, - spend_status, - ); - } - } - } - - update.extend( - fetch_txs_with_txids( - client, - start_time, - inserted_txs, - missing_txs, - parallel_requests, - ) - .await?, - ); - Ok(update) -} - -#[cfg(test)] -mod test { - use std::{collections::BTreeSet, time::Duration}; - - use bdk_chain::{ - bitcoin::{hashes::Hash, Txid}, - local_chain::LocalChain, - BlockId, - }; - use bdk_core::ConfirmationBlockTime; - use bdk_testenv::{anyhow, bitcoincore_rpc::RpcApi, TestEnv}; - use esplora_client::Builder; - - use crate::async_ext::{chain_update, fetch_latest_blocks}; - - macro_rules! h { - ($index:literal) => {{ - bdk_chain::bitcoin::hashes::Hash::hash($index.as_bytes()) - }}; - } - - /// Ensure that update does not remove heights (from original), and all anchor heights are included. - #[tokio::test] - pub async fn test_finalize_chain_update() -> anyhow::Result<()> { - struct TestCase<'a> { - #[allow(dead_code)] - name: &'a str, - /// Initial blockchain height to start the env with. - initial_env_height: u32, - /// Initial checkpoint heights to start with. - initial_cps: &'a [u32], - /// The final blockchain height of the env. - final_env_height: u32, - /// The anchors to test with: `(height, txid)`. Only the height is provided as we can fetch - /// the blockhash from the env. - anchors: &'a [(u32, Txid)], - } - - let test_cases = [ - TestCase { - name: "chain_extends", - initial_env_height: 60, - initial_cps: &[59, 60], - final_env_height: 90, - anchors: &[], - }, - TestCase { - name: "introduce_older_heights", - initial_env_height: 50, - initial_cps: &[10, 15], - final_env_height: 50, - anchors: &[(11, h!("A")), (14, h!("B"))], - }, - TestCase { - name: "introduce_older_heights_after_chain_extends", - initial_env_height: 50, - initial_cps: &[10, 15], - final_env_height: 100, - anchors: &[(11, h!("A")), (14, h!("B"))], - }, - ]; - - for t in test_cases.into_iter() { - let env = TestEnv::new()?; - let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap()); - let client = Builder::new(base_url.as_str()).build_async()?; - - // set env to `initial_env_height` - if let Some(to_mine) = t - .initial_env_height - .checked_sub(env.make_checkpoint_tip().height()) - { - env.mine_blocks(to_mine as _, None)?; - } - while client.get_height().await? < t.initial_env_height { - std::thread::sleep(Duration::from_millis(10)); - } - - // craft initial `local_chain` - let local_chain = { - let (mut chain, _) = LocalChain::from_genesis_hash(env.genesis_hash()?); - // force `chain_update_blocking` to add all checkpoints in `t.initial_cps` - let anchors = t - .initial_cps - .iter() - .map(|&height| -> anyhow::Result<_> { - Ok(( - ConfirmationBlockTime { - block_id: BlockId { - height, - hash: env.bitcoind.client.get_block_hash(height as _)?, - }, - confirmation_time: height as _, - }, - Txid::all_zeros(), - )) - }) - .collect::>>()?; - let update = chain_update( - &client, - &fetch_latest_blocks(&client).await?, - &chain.tip(), - &anchors, - ) - .await?; - chain.apply_update(update)?; - chain - }; - - // extend env chain - if let Some(to_mine) = t - .final_env_height - .checked_sub(env.make_checkpoint_tip().height()) - { - env.mine_blocks(to_mine as _, None)?; - } - while client.get_height().await? < t.final_env_height { - std::thread::sleep(Duration::from_millis(10)); - } - - // craft update - let update = { - let anchors = t - .anchors - .iter() - .map(|&(height, txid)| -> anyhow::Result<_> { - Ok(( - ConfirmationBlockTime { - block_id: BlockId { - height, - hash: env.bitcoind.client.get_block_hash(height as _)?, - }, - confirmation_time: height as _, - }, - txid, - )) - }) - .collect::>()?; - chain_update( - &client, - &fetch_latest_blocks(&client).await?, - &local_chain.tip(), - &anchors, - ) - .await? - }; - - // apply update - let mut updated_local_chain = local_chain.clone(); - updated_local_chain.apply_update(update)?; - - assert!( - { - let initial_heights = local_chain - .iter_checkpoints() - .map(|cp| cp.height()) - .collect::>(); - let updated_heights = updated_local_chain - .iter_checkpoints() - .map(|cp| cp.height()) - .collect::>(); - updated_heights.is_superset(&initial_heights) - }, - "heights from the initial chain must all be in the updated chain", - ); - - assert!( - { - let exp_anchor_heights = t - .anchors - .iter() - .map(|(h, _)| *h) - .chain(t.initial_cps.iter().copied()) - .collect::>(); - let anchor_heights = updated_local_chain - .iter_checkpoints() - .map(|cp| cp.height()) - .collect::>(); - anchor_heights.is_superset(&exp_anchor_heights) - }, - "anchor heights must all be in updated chain", - ); - } - - Ok(()) - } -} diff --git a/crates/esplora/src/blocking_ext.rs b/crates/esplora/src/blocking_ext.rs deleted file mode 100644 index bee97fee..00000000 --- a/crates/esplora/src/blocking_ext.rs +++ /dev/null @@ -1,890 +0,0 @@ -use bdk_core::collections::{BTreeMap, BTreeSet, HashSet}; -use bdk_core::spk_client::{ - FullScanRequest, FullScanResponse, SpkWithExpectedTxids, SyncRequest, SyncResponse, -}; -use bdk_core::{ - bitcoin::{BlockHash, OutPoint, Txid}, - BlockId, CheckPoint, ConfirmationBlockTime, Indexed, TxUpdate, -}; -use esplora_client::{OutputStatus, Tx}; -use std::thread::JoinHandle; - -use crate::{insert_anchor_or_seen_at_from_status, insert_prevouts}; - -/// [`esplora_client::Error`] -pub type Error = Box; - -/// Trait to extend the functionality of [`esplora_client::BlockingClient`]. -/// -/// Refer to [crate-level documentation](crate) for more. -pub trait EsploraExt { - /// Scan keychain scripts for transactions against Esplora, returning an update that can be - /// applied to the receiving structures. - /// - /// `request` provides the data required to perform a script-pubkey-based full scan - /// (see [`FullScanRequest`]). The full scan for each keychain (`K`) stops after a gap of - /// `stop_gap` script pubkeys with no associated transactions. `parallel_requests` specifies - /// the maximum number of HTTP requests to make in parallel. - /// - /// Refer to [crate-level docs](crate) for more. - fn full_scan>>( - &self, - request: R, - stop_gap: usize, - parallel_requests: usize, - ) -> Result, Error>; - - /// Sync a set of scripts, txids, and/or outpoints against Esplora. - /// - /// `request` provides the data required to perform a script-pubkey-based sync (see - /// [`SyncRequest`]). `parallel_requests` specifies the maximum number of HTTP requests to make - /// in parallel. - /// - /// Refer to [crate-level docs](crate) for more. - fn sync>>( - &self, - request: R, - parallel_requests: usize, - ) -> Result; -} - -impl EsploraExt for esplora_client::BlockingClient { - fn full_scan>>( - &self, - request: R, - stop_gap: usize, - parallel_requests: usize, - ) -> Result, Error> { - let mut request: FullScanRequest = request.into(); - let start_time = request.start_time(); - - let chain_tip = request.chain_tip(); - let latest_blocks = if chain_tip.is_some() { - Some(fetch_latest_blocks(self)?) - } else { - None - }; - - let mut tx_update = TxUpdate::default(); - let mut inserted_txs = HashSet::::new(); - let mut last_active_indices = BTreeMap::::new(); - for keychain in request.keychains() { - let keychain_spks = request - .iter_spks(keychain.clone()) - .map(|(spk_i, spk)| (spk_i, spk.into())); - let (update, last_active_index) = fetch_txs_with_keychain_spks( - self, - start_time, - &mut inserted_txs, - keychain_spks, - stop_gap, - parallel_requests, - )?; - tx_update.extend(update); - if let Some(last_active_index) = last_active_index { - last_active_indices.insert(keychain, last_active_index); - } - } - - let chain_update = match (chain_tip, latest_blocks) { - (Some(chain_tip), Some(latest_blocks)) => Some(chain_update( - self, - &latest_blocks, - &chain_tip, - &tx_update.anchors, - )?), - _ => None, - }; - - Ok(FullScanResponse { - chain_update, - tx_update, - last_active_indices, - }) - } - - fn sync>>( - &self, - request: R, - parallel_requests: usize, - ) -> Result { - let mut request: SyncRequest = request.into(); - let start_time = request.start_time(); - - let chain_tip = request.chain_tip(); - let latest_blocks = if chain_tip.is_some() { - Some(fetch_latest_blocks(self)?) - } else { - None - }; - - let mut tx_update = TxUpdate::::default(); - let mut inserted_txs = HashSet::::new(); - tx_update.extend(fetch_txs_with_spks( - self, - start_time, - &mut inserted_txs, - request.iter_spks_with_expected_txids(), - parallel_requests, - )?); - tx_update.extend(fetch_txs_with_txids( - self, - start_time, - &mut inserted_txs, - request.iter_txids(), - parallel_requests, - )?); - tx_update.extend(fetch_txs_with_outpoints( - self, - start_time, - &mut inserted_txs, - request.iter_outpoints(), - parallel_requests, - )?); - - let chain_update = match (chain_tip, latest_blocks) { - (Some(chain_tip), Some(latest_blocks)) => Some(chain_update( - self, - &latest_blocks, - &chain_tip, - &tx_update.anchors, - )?), - _ => None, - }; - - Ok(SyncResponse { - chain_update, - tx_update, - }) - } -} - -/// Fetch latest blocks from Esplora in an atomic call. -/// -/// We want to do this before fetching transactions and anchors as we cannot fetch latest blocks AND -/// transactions atomically, and the checkpoint tip is used to determine last-scanned block (for -/// block-based chain-sources). Therefore it's better to be conservative when setting the tip (use -/// an earlier tip rather than a later tip) otherwise the caller may accidentally skip blocks when -/// alternating between chain-sources. -fn fetch_latest_blocks( - client: &esplora_client::BlockingClient, -) -> Result, Error> { - Ok(client - .get_blocks(None)? - .into_iter() - .map(|b| (b.time.height, b.id)) - .collect()) -} - -/// Used instead of [`esplora_client::BlockingClient::get_block_hash`]. -/// -/// This first checks the previously fetched `latest_blocks` before fetching from Esplora again. -fn fetch_block( - client: &esplora_client::BlockingClient, - latest_blocks: &BTreeMap, - height: u32, -) -> Result, Error> { - if let Some(&hash) = latest_blocks.get(&height) { - return Ok(Some(hash)); - } - - // We avoid fetching blocks higher than previously fetched `latest_blocks` as the local chain - // tip is used to signal for the last-synced-up-to-height. - let &tip_height = latest_blocks - .keys() - .last() - .expect("must have atleast one entry"); - if height > tip_height { - return Ok(None); - } - - Ok(Some(client.get_block_hash(height)?)) -} - -/// Create the [`local_chain::Update`]. -/// -/// We want to have a corresponding checkpoint per anchor height. However, checkpoints fetched -/// should not surpass `latest_blocks`. -fn chain_update( - client: &esplora_client::BlockingClient, - latest_blocks: &BTreeMap, - local_tip: &CheckPoint, - anchors: &BTreeSet<(ConfirmationBlockTime, Txid)>, -) -> Result { - let mut point_of_agreement = None; - let mut conflicts = vec![]; - for local_cp in local_tip.iter() { - let remote_hash = match fetch_block(client, latest_blocks, local_cp.height())? { - Some(hash) => hash, - None => continue, - }; - if remote_hash == local_cp.hash() { - point_of_agreement = Some(local_cp.clone()); - break; - } else { - // it is not strictly necessary to include all the conflicted heights (we do need the - // first one) but it seems prudent to make sure the updated chain's heights are a - // superset of the existing chain after update. - conflicts.push(BlockId { - height: local_cp.height(), - hash: remote_hash, - }); - } - } - - let mut tip = point_of_agreement.expect("remote esplora should have same genesis block"); - - tip = tip - .extend(conflicts.into_iter().rev()) - .expect("evicted are in order"); - - for (anchor, _) in anchors { - let height = anchor.block_id.height; - if tip.get(height).is_none() { - let hash = match fetch_block(client, latest_blocks, height)? { - Some(hash) => hash, - None => continue, - }; - tip = tip.insert(BlockId { height, hash }); - } - } - - // insert the most recent blocks at the tip to make sure we update the tip and make the update - // robust. - for (&height, &hash) in latest_blocks.iter() { - tip = tip.insert(BlockId { height, hash }); - } - - Ok(tip) -} - -fn fetch_txs_with_keychain_spks>>( - client: &esplora_client::BlockingClient, - start_time: u64, - inserted_txs: &mut HashSet, - mut keychain_spks: I, - stop_gap: usize, - parallel_requests: usize, -) -> Result<(TxUpdate, Option), Error> { - type TxsOfSpkIndex = (u32, Vec, HashSet); - - let mut update = TxUpdate::::default(); - let mut last_index = Option::::None; - let mut last_active_index = Option::::None; - - loop { - let handles = keychain_spks - .by_ref() - .take(parallel_requests) - .map(|(spk_index, spk)| { - let client = client.clone(); - let expected_txids = spk.expected_txids; - let spk = spk.spk; - std::thread::spawn(move || -> Result { - let mut last_txid = None; - let mut spk_txs = Vec::new(); - loop { - let txs = client.scripthash_txs(&spk, last_txid)?; - let tx_count = txs.len(); - last_txid = txs.last().map(|tx| tx.txid); - spk_txs.extend(txs); - if tx_count < 25 { - break; - } - } - let got_txids = spk_txs.iter().map(|tx| tx.txid).collect::>(); - let evicted_txids = expected_txids - .difference(&got_txids) - .copied() - .collect::>(); - Ok((spk_index, spk_txs, evicted_txids)) - }) - }) - .collect::>>>(); - - if handles.is_empty() { - break; - } - - for handle in handles { - let (index, txs, evicted) = handle.join().expect("thread must not panic")?; - last_index = Some(index); - if !txs.is_empty() { - last_active_index = Some(index); - } - for tx in txs { - if inserted_txs.insert(tx.txid) { - update.txs.push(tx.to_tx().into()); - } - insert_anchor_or_seen_at_from_status(&mut update, start_time, tx.txid, tx.status); - insert_prevouts(&mut update, tx.vin); - } - update - .evicted_ats - .extend(evicted.into_iter().map(|txid| (txid, start_time))); - } - - let last_index = last_index.expect("Must be set since handles wasn't empty."); - let gap_limit_reached = if let Some(i) = last_active_index { - last_index >= i.saturating_add(stop_gap as u32) - } else { - last_index + 1 >= stop_gap as u32 - }; - if gap_limit_reached { - break; - } - } - - Ok((update, last_active_index)) -} - -/// Fetch transactions and associated [`ConfirmationBlockTime`]s by scanning `spks` -/// against Esplora. -/// -/// Unlike with [`EsploraExt::fetch_txs_with_keychain_spks`], `spks` must be *bounded* as all -/// contained scripts will be scanned. `parallel_requests` specifies the maximum number of HTTP -/// requests to make in parallel. -/// -/// Refer to [crate-level docs](crate) for more. -fn fetch_txs_with_spks>( - client: &esplora_client::BlockingClient, - start_time: u64, - inserted_txs: &mut HashSet, - spks: I, - parallel_requests: usize, -) -> Result, Error> { - fetch_txs_with_keychain_spks( - client, - start_time, - inserted_txs, - spks.into_iter().enumerate().map(|(i, spk)| (i as u32, spk)), - usize::MAX, - parallel_requests, - ) - .map(|(update, _)| update) -} - -/// Fetch transactions and associated [`ConfirmationBlockTime`]s by scanning `txids` -/// against Esplora. -/// -/// `parallel_requests` specifies the maximum number of HTTP requests to make in parallel. -/// -/// Refer to [crate-level docs](crate) for more. -fn fetch_txs_with_txids>( - client: &esplora_client::BlockingClient, - start_time: u64, - inserted_txs: &mut HashSet, - txids: I, - parallel_requests: usize, -) -> Result, Error> { - let mut update = TxUpdate::::default(); - // Only fetch for non-inserted txs. - let mut txids = txids - .into_iter() - .filter(|txid| !inserted_txs.contains(txid)) - .collect::>() - .into_iter(); - loop { - let handles = txids - .by_ref() - .take(parallel_requests) - .map(|txid| { - let client = client.clone(); - std::thread::spawn(move || { - client - .get_tx_info(&txid) - .map_err(Box::new) - .map(|t| (txid, t)) - }) - }) - .collect::), Error>>>>(); - - if handles.is_empty() { - break; - } - - for handle in handles { - let (txid, tx_info) = handle.join().expect("thread must not panic")?; - if let Some(tx_info) = tx_info { - if inserted_txs.insert(txid) { - update.txs.push(tx_info.to_tx().into()); - } - insert_anchor_or_seen_at_from_status(&mut update, start_time, txid, tx_info.status); - insert_prevouts(&mut update, tx_info.vin); - } - } - } - Ok(update) -} - -/// Fetch transactions and [`ConfirmationBlockTime`]s that contain and spend the provided -/// `outpoints`. -/// -/// `parallel_requests` specifies the maximum number of HTTP requests to make in parallel. -/// -/// Refer to [crate-level docs](crate) for more. -fn fetch_txs_with_outpoints>( - client: &esplora_client::BlockingClient, - start_time: u64, - inserted_txs: &mut HashSet, - outpoints: I, - parallel_requests: usize, -) -> Result, Error> { - let outpoints = outpoints.into_iter().collect::>(); - let mut update = TxUpdate::::default(); - - // make sure txs exists in graph and tx statuses are updated - // TODO: We should maintain a tx cache (like we do with Electrum). - update.extend(fetch_txs_with_txids( - client, - start_time, - inserted_txs, - outpoints.iter().map(|op| op.txid), - parallel_requests, - )?); - - // get outpoint spend-statuses - let mut outpoints = outpoints.into_iter(); - let mut missing_txs = Vec::::with_capacity(outpoints.len()); - loop { - let handles = outpoints - .by_ref() - .take(parallel_requests) - .map(|op| { - let client = client.clone(); - std::thread::spawn(move || { - client - .get_output_status(&op.txid, op.vout as _) - .map_err(Box::new) - }) - }) - .collect::, Error>>>>(); - - if handles.is_empty() { - break; - } - - for handle in handles { - if let Some(op_status) = handle.join().expect("thread must not panic")? { - let spend_txid = match op_status.txid { - Some(txid) => txid, - None => continue, - }; - if !inserted_txs.contains(&spend_txid) { - missing_txs.push(spend_txid); - } - if let Some(spend_status) = op_status.status { - insert_anchor_or_seen_at_from_status( - &mut update, - start_time, - spend_txid, - spend_status, - ); - } - } - } - } - - update.extend(fetch_txs_with_txids( - client, - start_time, - inserted_txs, - missing_txs, - parallel_requests, - )?); - Ok(update) -} - -#[cfg(test)] -mod test { - use crate::blocking_ext::{chain_update, fetch_latest_blocks}; - use bdk_chain::bitcoin::hashes::Hash; - use bdk_chain::bitcoin::Txid; - use bdk_chain::local_chain::LocalChain; - use bdk_chain::BlockId; - use bdk_core::ConfirmationBlockTime; - use bdk_testenv::{anyhow, bitcoincore_rpc::RpcApi, TestEnv}; - use esplora_client::{BlockHash, Builder}; - use std::collections::{BTreeMap, BTreeSet}; - use std::time::Duration; - - macro_rules! h { - ($index:literal) => {{ - bdk_chain::bitcoin::hashes::Hash::hash($index.as_bytes()) - }}; - } - - macro_rules! local_chain { - [ $(($height:expr, $block_hash:expr)), * ] => {{ - #[allow(unused_mut)] - bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $block_hash).into()),*].into_iter().collect()) - .expect("chain must have genesis block") - }}; - } - - /// Ensure that update does not remove heights (from original), and all anchor heights are included. - #[test] - pub fn test_finalize_chain_update() -> anyhow::Result<()> { - struct TestCase<'a> { - #[allow(dead_code)] - name: &'a str, - /// Initial blockchain height to start the env with. - initial_env_height: u32, - /// Initial checkpoint heights to start with in the local chain. - initial_cps: &'a [u32], - /// The final blockchain height of the env. - final_env_height: u32, - /// The anchors to test with: `(height, txid)`. Only the height is provided as we can fetch - /// the blockhash from the env. - anchors: &'a [(u32, Txid)], - } - - let test_cases = [ - TestCase { - name: "chain_extends", - initial_env_height: 60, - initial_cps: &[59, 60], - final_env_height: 90, - anchors: &[], - }, - TestCase { - name: "introduce_older_heights", - initial_env_height: 50, - initial_cps: &[10, 15], - final_env_height: 50, - anchors: &[(11, h!("A")), (14, h!("B"))], - }, - TestCase { - name: "introduce_older_heights_after_chain_extends", - initial_env_height: 50, - initial_cps: &[10, 15], - final_env_height: 100, - anchors: &[(11, h!("A")), (14, h!("B"))], - }, - ]; - - for t in test_cases.into_iter() { - let env = TestEnv::new()?; - let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap()); - let client = Builder::new(base_url.as_str()).build_blocking(); - - // set env to `initial_env_height` - if let Some(to_mine) = t - .initial_env_height - .checked_sub(env.make_checkpoint_tip().height()) - { - env.mine_blocks(to_mine as _, None)?; - } - while client.get_height()? < t.initial_env_height { - std::thread::sleep(Duration::from_millis(10)); - } - - // craft initial `local_chain` - let local_chain = { - let (mut chain, _) = LocalChain::from_genesis_hash(env.genesis_hash()?); - // force `chain_update_blocking` to add all checkpoints in `t.initial_cps` - let anchors = t - .initial_cps - .iter() - .map(|&height| -> anyhow::Result<_> { - Ok(( - ConfirmationBlockTime { - block_id: BlockId { - height, - hash: env.bitcoind.client.get_block_hash(height as _)?, - }, - confirmation_time: height as _, - }, - Txid::all_zeros(), - )) - }) - .collect::>>()?; - let update = chain_update( - &client, - &fetch_latest_blocks(&client)?, - &chain.tip(), - &anchors, - )?; - chain.apply_update(update)?; - chain - }; - - // extend env chain - if let Some(to_mine) = t - .final_env_height - .checked_sub(env.make_checkpoint_tip().height()) - { - env.mine_blocks(to_mine as _, None)?; - } - while client.get_height()? < t.final_env_height { - std::thread::sleep(Duration::from_millis(10)); - } - - // craft update - let update = { - let anchors = t - .anchors - .iter() - .map(|&(height, txid)| -> anyhow::Result<_> { - Ok(( - ConfirmationBlockTime { - block_id: BlockId { - height, - hash: env.bitcoind.client.get_block_hash(height as _)?, - }, - confirmation_time: height as _, - }, - txid, - )) - }) - .collect::>()?; - chain_update( - &client, - &fetch_latest_blocks(&client)?, - &local_chain.tip(), - &anchors, - )? - }; - - // apply update - let mut updated_local_chain = local_chain.clone(); - updated_local_chain.apply_update(update)?; - - assert!( - { - let initial_heights = local_chain - .iter_checkpoints() - .map(|cp| cp.height()) - .collect::>(); - let updated_heights = updated_local_chain - .iter_checkpoints() - .map(|cp| cp.height()) - .collect::>(); - updated_heights.is_superset(&initial_heights) - }, - "heights from the initial chain must all be in the updated chain", - ); - - assert!( - { - let exp_anchor_heights = t - .anchors - .iter() - .map(|(h, _)| *h) - .chain(t.initial_cps.iter().copied()) - .collect::>(); - let anchor_heights = updated_local_chain - .iter_checkpoints() - .map(|cp| cp.height()) - .collect::>(); - anchor_heights.is_superset(&exp_anchor_heights) - }, - "anchor heights must all be in updated chain", - ); - } - - Ok(()) - } - - #[test] - fn update_local_chain() -> anyhow::Result<()> { - const TIP_HEIGHT: u32 = 50; - - let env = TestEnv::new()?; - let blocks = { - let bitcoind_client = &env.bitcoind.client; - assert_eq!(bitcoind_client.get_block_count()?, 1); - [ - (0, bitcoind_client.get_block_hash(0)?), - (1, bitcoind_client.get_block_hash(1)?), - ] - .into_iter() - .chain((2..).zip(env.mine_blocks((TIP_HEIGHT - 1) as usize, None)?)) - .collect::>() - }; - // so new blocks can be seen by Electrs - let env = env.reset_electrsd()?; - let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap()); - let client = Builder::new(base_url.as_str()).build_blocking(); - - struct TestCase { - name: &'static str, - /// Original local chain to start off with. - chain: LocalChain, - /// Heights of floating anchors. [`chain_update_blocking`] will request for checkpoints - /// of these heights. - request_heights: &'static [u32], - /// The expected local chain result (heights only). - exp_update_heights: &'static [u32], - } - - let test_cases = [ - TestCase { - name: "request_later_blocks", - chain: local_chain![(0, blocks[&0]), (21, blocks[&21])], - request_heights: &[22, 25, 28], - exp_update_heights: &[21, 22, 25, 28], - }, - TestCase { - name: "request_prev_blocks", - chain: local_chain![(0, blocks[&0]), (1, blocks[&1]), (5, blocks[&5])], - request_heights: &[4], - exp_update_heights: &[4, 5], - }, - TestCase { - name: "request_prev_blocks_2", - chain: local_chain![(0, blocks[&0]), (1, blocks[&1]), (10, blocks[&10])], - request_heights: &[4, 6], - exp_update_heights: &[4, 6, 10], - }, - TestCase { - name: "request_later_and_prev_blocks", - chain: local_chain![(0, blocks[&0]), (7, blocks[&7]), (11, blocks[&11])], - request_heights: &[8, 9, 15], - exp_update_heights: &[8, 9, 11, 15], - }, - TestCase { - name: "request_tip_only", - chain: local_chain![(0, blocks[&0]), (5, blocks[&5]), (49, blocks[&49])], - request_heights: &[TIP_HEIGHT], - exp_update_heights: &[49], - }, - TestCase { - name: "request_nothing", - chain: local_chain![(0, blocks[&0]), (13, blocks[&13]), (23, blocks[&23])], - request_heights: &[], - exp_update_heights: &[23], - }, - TestCase { - name: "request_nothing_during_reorg", - chain: local_chain![(0, blocks[&0]), (13, blocks[&13]), (23, h!("23"))], - request_heights: &[], - exp_update_heights: &[13, 23], - }, - TestCase { - name: "request_nothing_during_reorg_2", - chain: local_chain![ - (0, blocks[&0]), - (21, blocks[&21]), - (22, h!("22")), - (23, h!("23")) - ], - request_heights: &[], - exp_update_heights: &[21, 22, 23], - }, - TestCase { - name: "request_prev_blocks_during_reorg", - chain: local_chain![ - (0, blocks[&0]), - (21, blocks[&21]), - (22, h!("22")), - (23, h!("23")) - ], - request_heights: &[17, 20], - exp_update_heights: &[17, 20, 21, 22, 23], - }, - TestCase { - name: "request_later_blocks_during_reorg", - chain: local_chain![ - (0, blocks[&0]), - (9, blocks[&9]), - (22, h!("22")), - (23, h!("23")) - ], - request_heights: &[25, 27], - exp_update_heights: &[9, 22, 23, 25, 27], - }, - TestCase { - name: "request_later_blocks_during_reorg_2", - chain: local_chain![(0, blocks[&0]), (9, h!("9"))], - request_heights: &[10], - exp_update_heights: &[0, 9, 10], - }, - TestCase { - name: "request_later_and_prev_blocks_during_reorg", - chain: local_chain![(0, blocks[&0]), (1, blocks[&1]), (9, h!("9"))], - request_heights: &[8, 11], - exp_update_heights: &[1, 8, 9, 11], - }, - ]; - - for (i, t) in test_cases.into_iter().enumerate() { - let mut chain = t.chain; - - let mock_anchors = t - .request_heights - .iter() - .map(|&h| { - let anchor_blockhash: BlockHash = bdk_chain::bitcoin::hashes::Hash::hash( - &format!("hash_at_height_{}", h).into_bytes(), - ); - let txid: Txid = bdk_chain::bitcoin::hashes::Hash::hash( - &format!("txid_at_height_{}", h).into_bytes(), - ); - let anchor = ConfirmationBlockTime { - block_id: BlockId { - height: h, - hash: anchor_blockhash, - }, - confirmation_time: h as _, - }; - (anchor, txid) - }) - .collect::>(); - let chain_update = chain_update( - &client, - &fetch_latest_blocks(&client)?, - &chain.tip(), - &mock_anchors, - )?; - - let update_blocks = chain_update - .iter() - .map(|cp| cp.block_id()) - .collect::>(); - - let exp_update_blocks = t - .exp_update_heights - .iter() - .map(|&height| { - let hash = blocks[&height]; - BlockId { height, hash } - }) - .chain( - // Electrs Esplora `get_block` call fetches 10 blocks which is included in the - // update - blocks - .range(TIP_HEIGHT - 9..) - .map(|(&height, &hash)| BlockId { height, hash }), - ) - .collect::>(); - - assert!( - update_blocks.is_superset(&exp_update_blocks), - "[{}:{}] unexpected update", - i, - t.name - ); - - let _ = chain - .apply_update(chain_update) - .unwrap_or_else(|err| panic!("[{}:{}] update failed to apply: {}", i, t.name, err)); - - // all requested heights must exist in the final chain - for height in t.request_heights { - let exp_blockhash = blocks.get(height).expect("block must exist in bitcoind"); - assert_eq!( - chain.get(*height).map(|cp| cp.hash()), - Some(*exp_blockhash), - "[{}:{}] block {}:{} must exist in final chain", - i, - t.name, - height, - exp_blockhash - ); - } - } - - Ok(()) - } -} diff --git a/crates/esplora/src/lib.rs b/crates/esplora/src/lib.rs deleted file mode 100644 index a4d4b9a5..00000000 --- a/crates/esplora/src/lib.rs +++ /dev/null @@ -1,80 +0,0 @@ -#![doc = include_str!("../README.md")] -//! # Stop Gap -//! -//! [`EsploraExt::full_scan`] takes in a `stop_gap` input which is defined as the maximum number of -//! consecutive unused script pubkeys to scan transactions for before stopping. -//! -//! For example, with a `stop_gap` of 3, `full_scan` will keep scanning until it encounters 3 -//! consecutive script pubkeys with no associated transactions. -//! -//! This follows the same approach as other Bitcoin-related software, -//! such as [Electrum](https://electrum.readthedocs.io/en/latest/faq.html#what-is-the-gap-limit), -//! [BTCPay Server](https://docs.btcpayserver.org/FAQ/Wallet/#the-gap-limit-problem), -//! and [Sparrow](https://www.sparrowwallet.com/docs/faq.html#ive-restored-my-wallet-but-some-of-my-funds-are-missing). -//! -//! A `stop_gap` of 0 will be treated as a `stop_gap` of 1. -//! -//! # Async -//! -//! Just like how [`EsploraExt`] extends the functionality of an -//! [`esplora_client::BlockingClient`], [`EsploraAsyncExt`] is the async version which extends -//! [`esplora_client::AsyncClient`]. - -use bdk_core::bitcoin::{Amount, OutPoint, TxOut, Txid}; -use bdk_core::{BlockId, ConfirmationBlockTime, TxUpdate}; -use esplora_client::TxStatus; - -pub use esplora_client; - -#[cfg(feature = "blocking")] -mod blocking_ext; -#[cfg(feature = "blocking")] -pub use blocking_ext::*; - -#[cfg(feature = "async")] -mod async_ext; -#[cfg(feature = "async")] -pub use async_ext::*; - -fn insert_anchor_or_seen_at_from_status( - update: &mut TxUpdate, - start_time: u64, - txid: Txid, - status: TxStatus, -) { - if let TxStatus { - block_height: Some(height), - block_hash: Some(hash), - block_time: Some(time), - .. - } = status - { - let anchor = ConfirmationBlockTime { - block_id: BlockId { height, hash }, - confirmation_time: time, - }; - update.anchors.insert((anchor, txid)); - } else { - update.seen_ats.insert((txid, start_time)); - } -} - -/// Inserts floating txouts into `tx_graph` using [`Vin`](esplora_client::api::Vin)s returned by -/// Esplora. -fn insert_prevouts( - update: &mut TxUpdate, - esplora_inputs: impl IntoIterator, -) { - let prevouts = esplora_inputs - .into_iter() - .filter_map(|vin| Some((vin.txid, vin.vout, vin.prevout?))); - for (prev_txid, prev_vout, prev_txout) in prevouts { - update.txouts.insert( - OutPoint::new(prev_txid, prev_vout), - TxOut { - script_pubkey: prev_txout.scriptpubkey, - value: Amount::from_sat(prev_txout.value), - }, - ); - } -} diff --git a/crates/esplora/tests/async_ext.rs b/crates/esplora/tests/async_ext.rs deleted file mode 100644 index 987f04e4..00000000 --- a/crates/esplora/tests/async_ext.rs +++ /dev/null @@ -1,369 +0,0 @@ -use bdk_chain::bitcoin::{Address, Amount}; -use bdk_chain::local_chain::LocalChain; -use bdk_chain::spk_client::{FullScanRequest, SyncRequest}; -use bdk_chain::spk_txout::SpkTxOutIndex; -use bdk_chain::{ConfirmationBlockTime, IndexedTxGraph, TxGraph}; -use bdk_esplora::EsploraAsyncExt; -use bdk_testenv::bitcoincore_rpc::json::CreateRawTransactionInput; -use bdk_testenv::bitcoincore_rpc::RawTx; -use bdk_testenv::{anyhow, bitcoincore_rpc::RpcApi, TestEnv}; -use esplora_client::{self, Builder}; -use std::collections::{BTreeSet, HashMap, HashSet}; -use std::str::FromStr; -use std::thread::sleep; -use std::time::Duration; - -mod common; - -// Ensure that a wallet can detect a malicious replacement of an incoming transaction. -// -// This checks that both the Esplora chain source and the receiving structures properly track the -// replaced transaction as missing. -#[tokio::test] -pub async fn detect_receive_tx_cancel() -> anyhow::Result<()> { - const SEND_TX_FEE: Amount = Amount::from_sat(1000); - const UNDO_SEND_TX_FEE: Amount = Amount::from_sat(2000); - - let env = TestEnv::new()?; - let rpc_client = env.rpc_client(); - let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap()); - let client = Builder::new(base_url.as_str()).build_async()?; - - let mut graph = IndexedTxGraph::::new(SpkTxOutIndex::<()>::default()); - let (chain, _) = LocalChain::from_genesis_hash(env.bitcoind.client.get_block_hash(0)?); - - // Get receiving address. - let receiver_spk = common::get_test_spk(); - let receiver_addr = Address::from_script(&receiver_spk, bdk_chain::bitcoin::Network::Regtest)?; - graph.index.insert_spk((), receiver_spk); - - env.mine_blocks(101, None)?; - - // Select a UTXO to use as an input for constructing our test transactions. - let selected_utxo = rpc_client - .list_unspent(None, None, None, Some(false), None)? - .into_iter() - // Find a block reward tx. - .find(|utxo| utxo.amount == Amount::from_int_btc(50)) - .expect("Must find a block reward UTXO"); - - // Derive the sender's address from the selected UTXO. - let sender_spk = selected_utxo.script_pub_key.clone(); - let sender_addr = Address::from_script(&sender_spk, bdk_chain::bitcoin::Network::Regtest) - .expect("Failed to derive address from UTXO"); - - // Setup the common inputs used by both `send_tx` and `undo_send_tx`. - let inputs = [CreateRawTransactionInput { - txid: selected_utxo.txid, - vout: selected_utxo.vout, - sequence: None, - }]; - - // Create and sign the `send_tx` that sends funds to the receiver address. - let send_tx_outputs = HashMap::from([( - receiver_addr.to_string(), - selected_utxo.amount - SEND_TX_FEE, - )]); - let send_tx = rpc_client.create_raw_transaction(&inputs, &send_tx_outputs, None, Some(true))?; - let send_tx = rpc_client - .sign_raw_transaction_with_wallet(send_tx.raw_hex(), None, None)? - .transaction()?; - - // Create and sign the `undo_send_tx` transaction. This redirects funds back to the sender - // address. - let undo_send_outputs = HashMap::from([( - sender_addr.to_string(), - selected_utxo.amount - UNDO_SEND_TX_FEE, - )]); - let undo_send_tx = - rpc_client.create_raw_transaction(&inputs, &undo_send_outputs, None, Some(true))?; - let undo_send_tx = rpc_client - .sign_raw_transaction_with_wallet(undo_send_tx.raw_hex(), None, None)? - .transaction()?; - - // Sync after broadcasting the `send_tx`. Ensure that we detect and receive the `send_tx`. - let send_txid = env.rpc_client().send_raw_transaction(send_tx.raw_hex())?; - env.wait_until_electrum_sees_txid(send_txid, Duration::from_secs(6))?; - let sync_request = SyncRequest::builder() - .chain_tip(chain.tip()) - .spks_with_indexes(graph.index.all_spks().clone()) - .expected_spk_txids(graph.list_expected_spk_txids(&chain, chain.tip().block_id(), ..)); - let sync_response = client.sync(sync_request, 1).await?; - assert!( - sync_response - .tx_update - .txs - .iter() - .any(|tx| tx.compute_txid() == send_txid), - "sync response must include the send_tx" - ); - let changeset = graph.apply_update(sync_response.tx_update.clone()); - assert!( - changeset.tx_graph.txs.contains(&send_tx), - "tx graph must deem send_tx relevant and include it" - ); - - // Sync after broadcasting the `undo_send_tx`. Verify that `send_tx` is now missing from the - // mempool. - let undo_send_txid = env - .rpc_client() - .send_raw_transaction(undo_send_tx.raw_hex())?; - env.wait_until_electrum_sees_txid(undo_send_txid, Duration::from_secs(6))?; - let sync_request = SyncRequest::builder() - .chain_tip(chain.tip()) - .spks_with_indexes(graph.index.all_spks().clone()) - .expected_spk_txids(graph.list_expected_spk_txids(&chain, chain.tip().block_id(), ..)); - let sync_response = client.sync(sync_request, 1).await?; - assert!( - sync_response - .tx_update - .evicted_ats - .iter() - .any(|(txid, _)| *txid == send_txid), - "sync response must track send_tx as missing from mempool" - ); - let changeset = graph.apply_update(sync_response.tx_update.clone()); - assert!( - changeset.tx_graph.last_evicted.contains_key(&send_txid), - "tx graph must track send_tx as missing" - ); - - Ok(()) -} -#[tokio::test] -pub async fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> { - let env = TestEnv::new()?; - let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap()); - let client = Builder::new(base_url.as_str()).build_async()?; - - let receive_address0 = - Address::from_str("bcrt1qc6fweuf4xjvz4x3gx3t9e0fh4hvqyu2qw4wvxm")?.assume_checked(); - let receive_address1 = - Address::from_str("bcrt1qfjg5lv3dvc9az8patec8fjddrs4aqtauadnagr")?.assume_checked(); - - let misc_spks = [ - receive_address0.script_pubkey(), - receive_address1.script_pubkey(), - ]; - - let _block_hashes = env.mine_blocks(101, None)?; - let txid1 = env.bitcoind.client.send_to_address( - &receive_address1, - Amount::from_sat(10000), - None, - None, - None, - None, - Some(1), - None, - )?; - let txid2 = env.bitcoind.client.send_to_address( - &receive_address0, - Amount::from_sat(20000), - None, - None, - None, - None, - Some(1), - None, - )?; - let _block_hashes = env.mine_blocks(1, None)?; - while client.get_height().await.unwrap() < 102 { - sleep(Duration::from_millis(10)) - } - - // use a full checkpoint linked list (since this is not what we are testing) - let cp_tip = env.make_checkpoint_tip(); - - let sync_update = { - let request = SyncRequest::builder() - .chain_tip(cp_tip.clone()) - .spks(misc_spks); - client.sync(request, 1).await? - }; - - assert!( - { - let update_cps = sync_update - .chain_update - .iter() - .map(|cp| cp.block_id()) - .collect::>(); - let superset_cps = cp_tip - .iter() - .map(|cp| cp.block_id()) - .collect::>(); - superset_cps.is_superset(&update_cps) - }, - "update should not alter original checkpoint tip since we already started with all checkpoints", - ); - - let tx_update = sync_update.tx_update; - let updated_graph = { - let mut graph = TxGraph::::default(); - let _ = graph.apply_update(tx_update.clone()); - graph - }; - // Check to see if we have the floating txouts available from our two created transactions' - // previous outputs in order to calculate transaction fees. - for tx in &tx_update.txs { - // Retrieve the calculated fee from `TxGraph`, which will panic if we do not have the - // floating txouts available from the transactions' previous outputs. - let fee = updated_graph.calculate_fee(tx).expect("Fee must exist"); - - // Retrieve the fee in the transaction data from `bitcoind`. - let tx_fee = env - .bitcoind - .client - .get_transaction(&tx.compute_txid(), None) - .expect("Tx must exist") - .fee - .expect("Fee must exist") - .abs() - .to_unsigned() - .expect("valid `Amount`"); - - // Check that the calculated fee matches the fee from the transaction data. - assert_eq!(fee, tx_fee); - } - - assert_eq!( - tx_update - .txs - .iter() - .map(|tx| tx.compute_txid()) - .collect::>(), - [txid1, txid2].into(), - "update must include all expected transactions" - ); - Ok(()) -} - -/// Test the bounds of the address scan depending on the `stop_gap`. -#[tokio::test] -pub async fn test_async_update_tx_graph_stop_gap() -> anyhow::Result<()> { - let env = TestEnv::new()?; - let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap()); - let client = Builder::new(base_url.as_str()).build_async()?; - let _block_hashes = env.mine_blocks(101, None)?; - - // Now let's test the gap limit. First of all get a chain of 10 addresses. - let addresses = [ - "bcrt1qj9f7r8r3p2y0sqf4r3r62qysmkuh0fzep473d2ar7rcz64wqvhssjgf0z4", - "bcrt1qmm5t0ch7vh2hryx9ctq3mswexcugqe4atkpkl2tetm8merqkthas3w7q30", - "bcrt1qut9p7ej7l7lhyvekj28xknn8gnugtym4d5qvnp5shrsr4nksmfqsmyn87g", - "bcrt1qqz0xtn3m235p2k96f5wa2dqukg6shxn9n3txe8arlrhjh5p744hsd957ww", - "bcrt1q9c0t62a8l6wfytmf2t9lfj35avadk3mm8g4p3l84tp6rl66m48sqrme7wu", - "bcrt1qkmh8yrk2v47cklt8dytk8f3ammcwa4q7dzattedzfhqzvfwwgyzsg59zrh", - "bcrt1qvgrsrzy07gjkkfr5luplt0azxtfwmwq5t62gum5jr7zwcvep2acs8hhnp2", - "bcrt1qw57edarcg50ansq8mk3guyrk78rk0fwvrds5xvqeupteu848zayq549av8", - "bcrt1qvtve5ekf6e5kzs68knvnt2phfw6a0yjqrlgat392m6zt9jsvyxhqfx67ef", - "bcrt1qw03ddumfs9z0kcu76ln7jrjfdwam20qtffmkcral3qtza90sp9kqm787uk", - ]; - let addresses: Vec<_> = addresses - .into_iter() - .map(|s| Address::from_str(s).unwrap().assume_checked()) - .collect(); - let spks: Vec<_> = addresses - .iter() - .enumerate() - .map(|(i, addr)| (i as u32, addr.script_pubkey())) - .collect(); - - // Then receive coins on the 4th address. - let txid_4th_addr = env.bitcoind.client.send_to_address( - &addresses[3], - Amount::from_sat(10000), - None, - None, - None, - None, - Some(1), - None, - )?; - let _block_hashes = env.mine_blocks(1, None)?; - while client.get_height().await.unwrap() < 103 { - sleep(Duration::from_millis(10)) - } - - // use a full checkpoint linked list (since this is not what we are testing) - let cp_tip = env.make_checkpoint_tip(); - - // A scan with a gap limit of 3 won't find the transaction, but a scan with a gap limit of 4 - // will. - let full_scan_update = { - let request = FullScanRequest::builder() - .chain_tip(cp_tip.clone()) - .spks_for_keychain(0, spks.clone()); - client.full_scan(request, 3, 1).await? - }; - assert!(full_scan_update.tx_update.txs.is_empty()); - assert!(full_scan_update.last_active_indices.is_empty()); - let full_scan_update = { - let request = FullScanRequest::builder() - .chain_tip(cp_tip.clone()) - .spks_for_keychain(0, spks.clone()); - client.full_scan(request, 4, 1).await? - }; - assert_eq!( - full_scan_update - .tx_update - .txs - .first() - .unwrap() - .compute_txid(), - txid_4th_addr - ); - assert_eq!(full_scan_update.last_active_indices[&0], 3); - - // Now receive a coin on the last address. - let txid_last_addr = env.bitcoind.client.send_to_address( - &addresses[addresses.len() - 1], - Amount::from_sat(10000), - None, - None, - None, - None, - Some(1), - None, - )?; - let _block_hashes = env.mine_blocks(1, None)?; - while client.get_height().await.unwrap() < 104 { - sleep(Duration::from_millis(10)) - } - - // A scan with gap limit 5 won't find the second transaction, but a scan with gap limit 6 will. - // The last active indice won't be updated in the first case but will in the second one. - let full_scan_update = { - let request = FullScanRequest::builder() - .chain_tip(cp_tip.clone()) - .spks_for_keychain(0, spks.clone()); - client.full_scan(request, 5, 1).await? - }; - let txs: HashSet<_> = full_scan_update - .tx_update - .txs - .iter() - .map(|tx| tx.compute_txid()) - .collect(); - assert_eq!(txs.len(), 1); - assert!(txs.contains(&txid_4th_addr)); - assert_eq!(full_scan_update.last_active_indices[&0], 3); - let full_scan_update = { - let request = FullScanRequest::builder() - .chain_tip(cp_tip.clone()) - .spks_for_keychain(0, spks.clone()); - client.full_scan(request, 6, 1).await? - }; - let txs: HashSet<_> = full_scan_update - .tx_update - .txs - .iter() - .map(|tx| tx.compute_txid()) - .collect(); - assert_eq!(txs.len(), 2); - assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr)); - assert_eq!(full_scan_update.last_active_indices[&0], 9); - - Ok(()) -} diff --git a/crates/esplora/tests/blocking_ext.rs b/crates/esplora/tests/blocking_ext.rs deleted file mode 100644 index d6f8c448..00000000 --- a/crates/esplora/tests/blocking_ext.rs +++ /dev/null @@ -1,370 +0,0 @@ -use bdk_chain::bitcoin::{Address, Amount}; -use bdk_chain::local_chain::LocalChain; -use bdk_chain::spk_client::{FullScanRequest, SyncRequest}; -use bdk_chain::spk_txout::SpkTxOutIndex; -use bdk_chain::{ConfirmationBlockTime, IndexedTxGraph, TxGraph}; -use bdk_esplora::EsploraExt; -use bdk_testenv::bitcoincore_rpc::json::CreateRawTransactionInput; -use bdk_testenv::bitcoincore_rpc::RawTx; -use bdk_testenv::{anyhow, bitcoincore_rpc::RpcApi, TestEnv}; -use esplora_client::{self, Builder}; -use std::collections::{BTreeSet, HashMap, HashSet}; -use std::str::FromStr; -use std::thread::sleep; -use std::time::Duration; - -mod common; - -// Ensure that a wallet can detect a malicious replacement of an incoming transaction. -// -// This checks that both the Esplora chain source and the receiving structures properly track the -// replaced transaction as missing. -#[test] -pub fn detect_receive_tx_cancel() -> anyhow::Result<()> { - const SEND_TX_FEE: Amount = Amount::from_sat(1000); - const UNDO_SEND_TX_FEE: Amount = Amount::from_sat(2000); - - let env = TestEnv::new()?; - let rpc_client = env.rpc_client(); - let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap()); - let client = Builder::new(base_url.as_str()).build_blocking(); - - let mut graph = IndexedTxGraph::::new(SpkTxOutIndex::<()>::default()); - let (chain, _) = LocalChain::from_genesis_hash(env.bitcoind.client.get_block_hash(0)?); - - // Get receiving address. - let receiver_spk = common::get_test_spk(); - let receiver_addr = Address::from_script(&receiver_spk, bdk_chain::bitcoin::Network::Regtest)?; - graph.index.insert_spk((), receiver_spk); - - env.mine_blocks(101, None)?; - - // Select a UTXO to use as an input for constructing our test transactions. - let selected_utxo = rpc_client - .list_unspent(None, None, None, Some(false), None)? - .into_iter() - // Find a block reward tx. - .find(|utxo| utxo.amount == Amount::from_int_btc(50)) - .expect("Must find a block reward UTXO"); - - // Derive the sender's address from the selected UTXO. - let sender_spk = selected_utxo.script_pub_key.clone(); - let sender_addr = Address::from_script(&sender_spk, bdk_chain::bitcoin::Network::Regtest) - .expect("Failed to derive address from UTXO"); - - // Setup the common inputs used by both `send_tx` and `undo_send_tx`. - let inputs = [CreateRawTransactionInput { - txid: selected_utxo.txid, - vout: selected_utxo.vout, - sequence: None, - }]; - - // Create and sign the `send_tx` that sends funds to the receiver address. - let send_tx_outputs = HashMap::from([( - receiver_addr.to_string(), - selected_utxo.amount - SEND_TX_FEE, - )]); - let send_tx = rpc_client.create_raw_transaction(&inputs, &send_tx_outputs, None, Some(true))?; - let send_tx = rpc_client - .sign_raw_transaction_with_wallet(send_tx.raw_hex(), None, None)? - .transaction()?; - - // Create and sign the `undo_send_tx` transaction. This redirects funds back to the sender - // address. - let undo_send_outputs = HashMap::from([( - sender_addr.to_string(), - selected_utxo.amount - UNDO_SEND_TX_FEE, - )]); - let undo_send_tx = - rpc_client.create_raw_transaction(&inputs, &undo_send_outputs, None, Some(true))?; - let undo_send_tx = rpc_client - .sign_raw_transaction_with_wallet(undo_send_tx.raw_hex(), None, None)? - .transaction()?; - - // Sync after broadcasting the `send_tx`. Ensure that we detect and receive the `send_tx`. - let send_txid = env.rpc_client().send_raw_transaction(send_tx.raw_hex())?; - env.wait_until_electrum_sees_txid(send_txid, Duration::from_secs(6))?; - let sync_request = SyncRequest::builder() - .chain_tip(chain.tip()) - .spks_with_indexes(graph.index.all_spks().clone()) - .expected_spk_txids(graph.list_expected_spk_txids(&chain, chain.tip().block_id(), ..)); - let sync_response = client.sync(sync_request, 1)?; - assert!( - sync_response - .tx_update - .txs - .iter() - .any(|tx| tx.compute_txid() == send_txid), - "sync response must include the send_tx" - ); - let changeset = graph.apply_update(sync_response.tx_update.clone()); - assert!( - changeset.tx_graph.txs.contains(&send_tx), - "tx graph must deem send_tx relevant and include it" - ); - - // Sync after broadcasting the `undo_send_tx`. Verify that `send_tx` is now missing from the - // mempool. - let undo_send_txid = env - .rpc_client() - .send_raw_transaction(undo_send_tx.raw_hex())?; - env.wait_until_electrum_sees_txid(undo_send_txid, Duration::from_secs(6))?; - let sync_request = SyncRequest::builder() - .chain_tip(chain.tip()) - .spks_with_indexes(graph.index.all_spks().clone()) - .expected_spk_txids(graph.list_expected_spk_txids(&chain, chain.tip().block_id(), ..)); - let sync_response = client.sync(sync_request, 1)?; - assert!( - sync_response - .tx_update - .evicted_ats - .iter() - .any(|(txid, _)| *txid == send_txid), - "sync response must track send_tx as missing from mempool" - ); - let changeset = graph.apply_update(sync_response.tx_update.clone()); - assert!( - changeset.tx_graph.last_evicted.contains_key(&send_txid), - "tx graph must track send_tx as missing" - ); - - Ok(()) -} - -#[test] -pub fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> { - let env = TestEnv::new()?; - let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap()); - let client = Builder::new(base_url.as_str()).build_blocking(); - - let receive_address0 = - Address::from_str("bcrt1qc6fweuf4xjvz4x3gx3t9e0fh4hvqyu2qw4wvxm")?.assume_checked(); - let receive_address1 = - Address::from_str("bcrt1qfjg5lv3dvc9az8patec8fjddrs4aqtauadnagr")?.assume_checked(); - - let misc_spks = [ - receive_address0.script_pubkey(), - receive_address1.script_pubkey(), - ]; - - let _block_hashes = env.mine_blocks(101, None)?; - let txid1 = env.bitcoind.client.send_to_address( - &receive_address1, - Amount::from_sat(10000), - None, - None, - None, - None, - Some(1), - None, - )?; - let txid2 = env.bitcoind.client.send_to_address( - &receive_address0, - Amount::from_sat(20000), - None, - None, - None, - None, - Some(1), - None, - )?; - let _block_hashes = env.mine_blocks(1, None)?; - while client.get_height().unwrap() < 102 { - sleep(Duration::from_millis(10)) - } - - // use a full checkpoint linked list (since this is not what we are testing) - let cp_tip = env.make_checkpoint_tip(); - - let sync_update = { - let request = SyncRequest::builder() - .chain_tip(cp_tip.clone()) - .spks(misc_spks); - client.sync(request, 1)? - }; - - assert!( - { - let update_cps = sync_update - .chain_update - .iter() - .map(|cp| cp.block_id()) - .collect::>(); - let superset_cps = cp_tip - .iter() - .map(|cp| cp.block_id()) - .collect::>(); - superset_cps.is_superset(&update_cps) - }, - "update should not alter original checkpoint tip since we already started with all checkpoints", - ); - - let tx_update = sync_update.tx_update; - let updated_graph = { - let mut graph = TxGraph::::default(); - let _ = graph.apply_update(tx_update.clone()); - graph - }; - // Check to see if we have the floating txouts available from our two created transactions' - // previous outputs in order to calculate transaction fees. - for tx in &tx_update.txs { - // Retrieve the calculated fee from `TxGraph`, which will panic if we do not have the - // floating txouts available from the transactions' previous outputs. - let fee = updated_graph.calculate_fee(tx).expect("Fee must exist"); - - // Retrieve the fee in the transaction data from `bitcoind`. - let tx_fee = env - .bitcoind - .client - .get_transaction(&tx.compute_txid(), None) - .expect("Tx must exist") - .fee - .expect("Fee must exist") - .abs() - .to_unsigned() - .expect("valid `Amount`"); - - // Check that the calculated fee matches the fee from the transaction data. - assert_eq!(fee, tx_fee); - } - - assert_eq!( - tx_update - .txs - .iter() - .map(|tx| tx.compute_txid()) - .collect::>(), - [txid1, txid2].into(), - "update must include all expected transactions" - ); - Ok(()) -} - -/// Test the bounds of the address scan depending on the `stop_gap`. -#[test] -pub fn test_update_tx_graph_stop_gap() -> anyhow::Result<()> { - let env = TestEnv::new()?; - let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap()); - let client = Builder::new(base_url.as_str()).build_blocking(); - let _block_hashes = env.mine_blocks(101, None)?; - - // Now let's test the gap limit. First of all get a chain of 10 addresses. - let addresses = [ - "bcrt1qj9f7r8r3p2y0sqf4r3r62qysmkuh0fzep473d2ar7rcz64wqvhssjgf0z4", - "bcrt1qmm5t0ch7vh2hryx9ctq3mswexcugqe4atkpkl2tetm8merqkthas3w7q30", - "bcrt1qut9p7ej7l7lhyvekj28xknn8gnugtym4d5qvnp5shrsr4nksmfqsmyn87g", - "bcrt1qqz0xtn3m235p2k96f5wa2dqukg6shxn9n3txe8arlrhjh5p744hsd957ww", - "bcrt1q9c0t62a8l6wfytmf2t9lfj35avadk3mm8g4p3l84tp6rl66m48sqrme7wu", - "bcrt1qkmh8yrk2v47cklt8dytk8f3ammcwa4q7dzattedzfhqzvfwwgyzsg59zrh", - "bcrt1qvgrsrzy07gjkkfr5luplt0azxtfwmwq5t62gum5jr7zwcvep2acs8hhnp2", - "bcrt1qw57edarcg50ansq8mk3guyrk78rk0fwvrds5xvqeupteu848zayq549av8", - "bcrt1qvtve5ekf6e5kzs68knvnt2phfw6a0yjqrlgat392m6zt9jsvyxhqfx67ef", - "bcrt1qw03ddumfs9z0kcu76ln7jrjfdwam20qtffmkcral3qtza90sp9kqm787uk", - ]; - let addresses: Vec<_> = addresses - .into_iter() - .map(|s| Address::from_str(s).unwrap().assume_checked()) - .collect(); - let spks: Vec<_> = addresses - .iter() - .enumerate() - .map(|(i, addr)| (i as u32, addr.script_pubkey())) - .collect(); - - // Then receive coins on the 4th address. - let txid_4th_addr = env.bitcoind.client.send_to_address( - &addresses[3], - Amount::from_sat(10000), - None, - None, - None, - None, - Some(1), - None, - )?; - let _block_hashes = env.mine_blocks(1, None)?; - while client.get_height().unwrap() < 103 { - sleep(Duration::from_millis(10)) - } - - // use a full checkpoint linked list (since this is not what we are testing) - let cp_tip = env.make_checkpoint_tip(); - - // A scan with a stop_gap of 3 won't find the transaction, but a scan with a gap limit of 4 - // will. - let full_scan_update = { - let request = FullScanRequest::builder() - .chain_tip(cp_tip.clone()) - .spks_for_keychain(0, spks.clone()); - client.full_scan(request, 3, 1)? - }; - assert!(full_scan_update.tx_update.txs.is_empty()); - assert!(full_scan_update.last_active_indices.is_empty()); - let full_scan_update = { - let request = FullScanRequest::builder() - .chain_tip(cp_tip.clone()) - .spks_for_keychain(0, spks.clone()); - client.full_scan(request, 4, 1)? - }; - assert_eq!( - full_scan_update - .tx_update - .txs - .first() - .unwrap() - .compute_txid(), - txid_4th_addr - ); - assert_eq!(full_scan_update.last_active_indices[&0], 3); - - // Now receive a coin on the last address. - let txid_last_addr = env.bitcoind.client.send_to_address( - &addresses[addresses.len() - 1], - Amount::from_sat(10000), - None, - None, - None, - None, - Some(1), - None, - )?; - let _block_hashes = env.mine_blocks(1, None)?; - while client.get_height().unwrap() < 104 { - sleep(Duration::from_millis(10)) - } - - // A scan with gap limit 5 won't find the second transaction, but a scan with gap limit 6 will. - // The last active indice won't be updated in the first case but will in the second one. - let full_scan_update = { - let request = FullScanRequest::builder() - .chain_tip(cp_tip.clone()) - .spks_for_keychain(0, spks.clone()); - client.full_scan(request, 5, 1)? - }; - let txs: HashSet<_> = full_scan_update - .tx_update - .txs - .iter() - .map(|tx| tx.compute_txid()) - .collect(); - assert_eq!(txs.len(), 1); - assert!(txs.contains(&txid_4th_addr)); - assert_eq!(full_scan_update.last_active_indices[&0], 3); - let full_scan_update = { - let request = FullScanRequest::builder() - .chain_tip(cp_tip.clone()) - .spks_for_keychain(0, spks.clone()); - client.full_scan(request, 6, 1)? - }; - let txs: HashSet<_> = full_scan_update - .tx_update - .txs - .iter() - .map(|tx| tx.compute_txid()) - .collect(); - assert_eq!(txs.len(), 2); - assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr)); - assert_eq!(full_scan_update.last_active_indices[&0], 9); - - Ok(()) -} diff --git a/crates/esplora/tests/common/mod.rs b/crates/esplora/tests/common/mod.rs deleted file mode 100644 index c629c502..00000000 --- a/crates/esplora/tests/common/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -use bdk_core::bitcoin::key::{Secp256k1, UntweakedPublicKey}; -use bdk_core::bitcoin::ScriptBuf; - -const PK_BYTES: &[u8] = &[ - 12, 244, 72, 4, 163, 4, 211, 81, 159, 82, 153, 123, 125, 74, 142, 40, 55, 237, 191, 231, 31, - 114, 89, 165, 83, 141, 8, 203, 93, 240, 53, 101, -]; - -#[allow(dead_code)] -pub fn get_test_spk() -> ScriptBuf { - let secp = Secp256k1::new(); - let pk = UntweakedPublicKey::from_slice(PK_BYTES).expect("Must be valid PK"); - ScriptBuf::new_p2tr(&secp, pk, None) -} diff --git a/crates/file_store/CHANGELOG.md b/crates/file_store/CHANGELOG.md deleted file mode 100644 index e4079988..00000000 --- a/crates/file_store/CHANGELOG.md +++ /dev/null @@ -1,37 +0,0 @@ -# Changelog - -All notable changes to this project can be found here and in each release's git tag and can be viewed with `git tag -ln100 "v*"`. See also [DEVELOPMENT_CYCLE.md](../../DEVELOPMENT_CYCLE.md) for more details. - -Contributors do not need to change this file but do need to add changelog details in their PR descriptions. The person making the next release will collect changelog details from included PRs and edit this file prior to each release. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [file_store-0.19.0] - -### Added: - -- `StoreError` enum, which includes `Io`, `Bincode` and `InvalidMagicBytes` #1684. -- docs: add "not intended for production" note in `README`. - -### Changed: - -- `Store::create_new` to `Store::create`, with new return type: `Result` -- `Store::open` to `Store::load`, with new return type: `Result<(Self, Option), StoreErrorWithDump>` -- `Store::open_or_create` to `Store::load_or_create`, with new return type: `Result<(Option, Self), StoreErrorWithDump>` -- `Store::aggregate_changesets` to `Store::dump`, with new return type: `Result, StoreErrorWithDump>` -- `FileError` to `StoreError` -- `AggregateChangesetsError` to `StoreErrorWithDump`, which now can include all the variants of `StoreError` in the error field. - -#### Removed: - -- `IterError` deleted. - -## [file_store-0.18.1] - -### Changed - -- Minor updates to fix new rustc 1.83.0 clippy warnings #1776 - -[file_store-0.18.1]: https://github.com/bitcoindevkit/bdk/releases/tag/file_store-0.18.1 -[file_store-0.19.0]: https://github.com/bitcoindevkit/bdk/releases/tag/file_store-0.19.0 diff --git a/crates/file_store/Cargo.toml b/crates/file_store/Cargo.toml deleted file mode 100644 index 287f34da..00000000 --- a/crates/file_store/Cargo.toml +++ /dev/null @@ -1,22 +0,0 @@ -[package] -name = "bdk_file_store" -version = "0.19.0" -edition = "2021" -license = "MIT OR Apache-2.0" -repository = "https://github.com/bitcoindevkit/bdk" -documentation = "https://docs.rs/bdk_file_store" -description = "A simple append-only flat file database for persisting bdk_chain data." -keywords = ["bitcoin", "persist", "persistence", "bdk", "file"] -authors = ["Bitcoin Dev Kit Developers"] -readme = "README.md" - -[lints] -workspace = true - -[dependencies] -bdk_core = { path = "../core", version = "0.4.1", features = ["serde"]} -bincode = { version = "1" } -serde = { version = "1", features = ["derive"] } - -[dev-dependencies] -tempfile = "3" diff --git a/crates/file_store/README.md b/crates/file_store/README.md deleted file mode 100644 index 5c4100b9..00000000 --- a/crates/file_store/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# BDK File Store - -> âš  `bdk_file_store` is a development/testing database. It does not natively support backwards compatible BDK version upgrades so should not be used in production. - -This is a simple append-only flat file database for persisting [`bdk_chain`] changesets. - -The main structure is [`Store`] which works with any [`bdk_chain`] based changesets to persist data into a flat file. - -[`bdk_chain`]:https://docs.rs/bdk_chain/latest/bdk_chain/ diff --git a/crates/file_store/src/entry_iter.rs b/crates/file_store/src/entry_iter.rs deleted file mode 100644 index 8b284f18..00000000 --- a/crates/file_store/src/entry_iter.rs +++ /dev/null @@ -1,83 +0,0 @@ -use crate::StoreError; -use bincode::Options; -use std::{ - fs::File, - io::{self, BufReader, Seek}, - marker::PhantomData, -}; - -use crate::bincode_options; - -/// Iterator over entries in a file store. -/// -/// Reads and returns an entry each time [`next`] is called. If an error occurs while reading the -/// iterator will yield a `Result::Err(_)` instead and then `None` for the next call to `next`. -/// -/// [`next`]: Self::next -pub struct EntryIter<'t, T> { - /// Buffered reader around the file - db_file: BufReader<&'t mut File>, - finished: bool, - /// The file position for the first read of `db_file`. - start_pos: Option, - types: PhantomData, -} - -impl<'t, T> EntryIter<'t, T> { - pub fn new(start_pos: u64, db_file: &'t mut File) -> Self { - Self { - db_file: BufReader::new(db_file), - start_pos: Some(start_pos), - finished: false, - types: PhantomData, - } - } -} - -impl Iterator for EntryIter<'_, T> -where - T: serde::de::DeserializeOwned, -{ - type Item = Result; - - fn next(&mut self) -> Option { - if self.finished { - return None; - } - (|| { - if let Some(start) = self.start_pos.take() { - self.db_file.seek(io::SeekFrom::Start(start))?; - } - - let pos_before_read = self.db_file.stream_position()?; - match bincode_options().deserialize_from(&mut self.db_file) { - Ok(changeset) => Ok(Some(changeset)), - Err(e) => { - self.finished = true; - let pos_after_read = self.db_file.stream_position()?; - // allow unexpected EOF if 0 bytes were read - if let bincode::ErrorKind::Io(inner) = &*e { - if inner.kind() == io::ErrorKind::UnexpectedEof - && pos_after_read == pos_before_read - { - return Ok(None); - } - } - self.db_file.seek(io::SeekFrom::Start(pos_before_read))?; - Err(StoreError::Bincode(*e)) - } - } - })() - .transpose() - } -} - -impl Drop for EntryIter<'_, T> { - fn drop(&mut self) { - // This syncs the underlying file's offset with the buffer's position. This way, we - // maintain the correct position to start the next read/write. - if let Ok(pos) = self.db_file.stream_position() { - let _ = self.db_file.get_mut().seek(io::SeekFrom::Start(pos)); - } - } -} diff --git a/crates/file_store/src/lib.rs b/crates/file_store/src/lib.rs deleted file mode 100644 index 8703b1a4..00000000 --- a/crates/file_store/src/lib.rs +++ /dev/null @@ -1,45 +0,0 @@ -#![doc = include_str!("../README.md")] -mod entry_iter; -mod store; -use std::io; - -use bincode::{DefaultOptions, Options}; -pub use entry_iter::*; -pub use store::*; - -pub(crate) fn bincode_options() -> impl bincode::Options { - DefaultOptions::new().with_varint_encoding() -} - -/// Error that occurs due to problems encountered with the file. -#[derive(Debug)] -pub enum StoreError { - /// IO error, this may mean that the file is too short. - Io(io::Error), - /// Magic bytes do not match what is expected. - InvalidMagicBytes { got: Vec, expected: Vec }, - /// Failure to decode data from the file. - Bincode(bincode::ErrorKind), -} - -impl core::fmt::Display for StoreError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - Self::Io(e) => write!(f, "io error trying to read file: {}", e), - Self::InvalidMagicBytes { got, expected } => write!( - f, - "file has invalid magic bytes: expected={:?} got={:?}", - expected, got, - ), - Self::Bincode(e) => write!(f, "bincode error while reading entry {}", e), - } - } -} - -impl From for StoreError { - fn from(value: io::Error) -> Self { - Self::Io(value) - } -} - -impl std::error::Error for StoreError {} diff --git a/crates/file_store/src/store.rs b/crates/file_store/src/store.rs deleted file mode 100644 index d870e530..00000000 --- a/crates/file_store/src/store.rs +++ /dev/null @@ -1,597 +0,0 @@ -use crate::{bincode_options, EntryIter, StoreError}; -use bdk_core::Merge; -use bincode::Options; -use std::{ - fmt::{self, Debug}, - fs::{File, OpenOptions}, - io::{self, Read, Write}, - marker::PhantomData, - path::Path, -}; - -/// Persists an append-only list of changesets (`C`) to a single file. -/// -/// > âš  This is a development/testing database. It does not natively support backwards compatible -/// > BDK version upgrades so should not be used in production. -#[derive(Debug)] -pub struct Store { - magic_len: usize, - db_file: File, - marker: PhantomData, -} - -impl Store -where - C: Merge + serde::Serialize + serde::de::DeserializeOwned, -{ - /// Create a new [`Store`] file in write-only mode; error if the file exists. - /// - /// `magic` is the prefixed bytes to write to the new file. This will be checked when loading - /// the [`Store`] in the future with [`load`]. - /// - /// [`load`]: Store::load - pub fn create

(magic: &[u8], file_path: P) -> Result - where - P: AsRef, - { - let mut f = OpenOptions::new() - .create_new(true) - .read(true) - .write(true) - .truncate(true) - .open(file_path)?; - f.write_all(magic)?; - Ok(Self { - magic_len: magic.len(), - db_file: f, - marker: Default::default(), - }) - } - - /// Load an existing [`Store`]. - /// - /// Use [`create`] to create a new [`Store`]. - /// - /// # Errors - /// - /// If the prefixed bytes of the loaded file do not match the provided `magic`, a - /// [`StoreErrorWithDump`] will be returned with the [`StoreError::InvalidMagicBytes`] error variant in - /// its error field and changeset field set to [`Option::None`] - /// - /// If there exist changesets in the file, [`load`] will try to aggregate them in - /// a single changeset to verify their integrity. If aggregation fails - /// [`StoreErrorWithDump`] will be returned with the [`StoreError::Bincode`] error variant in - /// its error field and the aggregated changeset so far in the changeset field. - /// - /// To get a new working file store from this error use [`Store::create`] and [`Store::append`] - /// to add the aggregated changeset obtained from [`StoreErrorWithDump`]. - /// - /// To analyze the causes of the problem in the original database do not recreate the [`Store`] - /// using the same file path. Not changing the file path will overwrite previous file without - /// being able to recover its original data. - /// - /// # Examples - /// ``` - /// use bdk_file_store::{Store, StoreErrorWithDump}; - /// # use std::fs::OpenOptions; - /// # use bdk_core::Merge; - /// # use std::collections::BTreeSet; - /// # use std::io; - /// # use std::io::SeekFrom; - /// # use std::io::{Seek, Write}; - /// # - /// # fn main() -> io::Result<()> { - /// # const MAGIC_BYTES_LEN: usize = 12; - /// # const MAGIC_BYTES: [u8; MAGIC_BYTES_LEN] = - /// # [98, 100, 107, 102, 115, 49, 49, 49, 49, 49, 49, 49]; - /// # - /// # type TestChangeSet = BTreeSet; - /// # - /// # let temp_dir = tempfile::tempdir()?; - /// # let file_path = temp_dir.path().join("db_file"); - /// # let mut store = Store::::create(&MAGIC_BYTES, &file_path).unwrap(); - /// # let changesets = [ - /// # TestChangeSet::from(["1".into()]), - /// # TestChangeSet::from(["2".into(), "3".into()]), - /// # TestChangeSet::from(["4".into(), "5".into(), "6".into()]), - /// # ]; - /// # - /// # for changeset in &changesets[..] { - /// # store.append(changeset)?; - /// # } - /// # - /// # drop(store); - /// # - /// # // Simulate the file is broken - /// # let mut data = [255_u8; 2000]; - /// # data[..MAGIC_BYTES_LEN].copy_from_slice(&MAGIC_BYTES); - /// # let mut file = OpenOptions::new().append(true).open(file_path.clone())?; - /// # let new_len = file.seek(SeekFrom::End(-2))?; - /// # file.set_len(new_len)?; - /// - /// let (mut new_store, _aggregate_changeset) = - /// match Store::::load(&MAGIC_BYTES, &file_path) { - /// # Ok(_) => panic!("should have errored"), - /// Ok((store, changeset)) => (store, changeset), - /// Err(StoreErrorWithDump { changeset, .. }) => { - /// let new_file_path = file_path.with_extension("backup"); - /// let mut new_store = - /// Store::create(&MAGIC_BYTES, &new_file_path).expect("must create new file"); - /// if let Some(aggregated_changeset) = changeset { - /// new_store.append(&aggregated_changeset)?; - /// } - /// // The following will overwrite the original file. You will loose the corrupted - /// // portion of the original file forever. - /// drop(new_store); - /// std::fs::rename(&new_file_path, &file_path)?; - /// Store::load(&MAGIC_BYTES, &file_path).expect("must load new file") - /// } - /// }; - /// # - /// # assert_eq!( - /// # new_store.dump().expect("should dump changeset: {1, 2, 3} "), - /// # changesets[..2].iter().cloned().reduce(|mut acc, cs| { - /// # Merge::merge(&mut acc, cs); - /// # acc - /// # }), - /// # "should recover all changesets", - /// # ); - /// # - /// # Ok(()) - /// # } - /// ``` - /// [`create`]: Store::create - /// [`load`]: Store::load - pub fn load

(magic: &[u8], file_path: P) -> Result<(Self, Option), StoreErrorWithDump> - where - P: AsRef, - { - let mut f = OpenOptions::new().read(true).write(true).open(file_path)?; - - let mut magic_buf = vec![0_u8; magic.len()]; - f.read_exact(&mut magic_buf)?; - if magic_buf != magic { - return Err(StoreErrorWithDump { - changeset: Option::::None, - error: StoreError::InvalidMagicBytes { - got: magic_buf, - expected: magic.to_vec(), - }, - }); - } - - let mut store = Self { - magic_len: magic.len(), - db_file: f, - marker: Default::default(), - }; - - // Get aggregated changeset - let aggregated_changeset = store.dump()?; - - Ok((store, aggregated_changeset)) - } - - /// Dump the aggregate of all changesets in [`Store`]. - /// - /// # Errors - /// - /// If there exist changesets in the file, [`dump`] will try to aggregate them in a single - /// changeset. If aggregation fails [`StoreErrorWithDump`] will be returned with the - /// [`StoreError::Bincode`] error variant in its error field and the aggregated changeset so - /// far in the changeset field. - /// - /// [`dump`]: Store::dump - pub fn dump(&mut self) -> Result, StoreErrorWithDump> { - EntryIter::new(self.magic_len as u64, &mut self.db_file).try_fold( - Option::::None, - |mut aggregated_changeset: Option, next_changeset| match next_changeset { - Ok(next_changeset) => { - match &mut aggregated_changeset { - Some(aggregated_changeset) => aggregated_changeset.merge(next_changeset), - aggregated_changeset => *aggregated_changeset = Some(next_changeset), - } - Ok(aggregated_changeset) - } - Err(iter_error) => Err(StoreErrorWithDump { - changeset: aggregated_changeset, - error: iter_error, - }), - }, - ) - } - - /// Attempt to load existing [`Store`] file; create it if the file does not exist. - /// - /// Internally, this calls either [`load`] or [`create`]. - /// - /// [`load`]: Store::load - /// [`create`]: Store::create - pub fn load_or_create

( - magic: &[u8], - file_path: P, - ) -> Result<(Self, Option), StoreErrorWithDump> - where - P: AsRef, - { - if file_path.as_ref().exists() { - Self::load(magic, file_path) - } else { - Self::create(magic, file_path) - .map(|store| (store, Option::::None)) - .map_err(|err: StoreError| StoreErrorWithDump { - changeset: Option::::None, - error: err, - }) - } - } - - /// Append a new changeset to the file. Does nothing if the changeset is empty. Truncation is - /// not needed because file pointer is always moved to the end of the last decodable data from - /// beginning to end. - /// - /// If multiple garbage writes are produced on the file, the next load will only retrieve the - /// first chunk of valid changesets. - /// - /// If garbage data is written and then valid changesets, the next load will still only - /// retrieve the first chunk of valid changesets. The recovery of those valid changesets after - /// the garbage data is responsibility of the user. - pub fn append(&mut self, changeset: &C) -> Result<(), io::Error> { - // no need to write anything if changeset is empty - if changeset.is_empty() { - return Ok(()); - } - - bincode_options() - .serialize_into(&mut self.db_file, changeset) - .map_err(|e| match *e { - bincode::ErrorKind::Io(error) => error, - unexpected_err => panic!("unexpected bincode error: {}", unexpected_err), - })?; - - Ok(()) - } -} - -/// Error type for [`Store::dump`]. -#[derive(Debug)] -pub struct StoreErrorWithDump { - /// The partially-aggregated changeset. - pub changeset: Option, - - /// The [`StoreError`] - pub error: StoreError, -} - -impl From for StoreErrorWithDump { - fn from(value: io::Error) -> Self { - Self { - changeset: Option::::None, - error: StoreError::Io(value), - } - } -} - -impl std::fmt::Display for StoreErrorWithDump { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - std::fmt::Display::fmt(&self.error, f) - } -} - -impl std::error::Error for StoreErrorWithDump {} - -#[cfg(test)] -mod test { - use super::*; - - use std::{ - collections::BTreeSet, - fs, - io::{Seek, Write}, - }; - - const TEST_MAGIC_BYTES_LEN: usize = 12; - const TEST_MAGIC_BYTES: [u8; TEST_MAGIC_BYTES_LEN] = - [98, 100, 107, 102, 115, 49, 49, 49, 49, 49, 49, 49]; - - type TestChangeSet = BTreeSet; - - /// Check behavior of [`Store::create`] and [`Store::load`]. - #[test] - fn construct_store() { - let temp_dir = tempfile::tempdir().unwrap(); - let file_path = temp_dir.path().join("db_file"); - let _ = Store::::load(&TEST_MAGIC_BYTES, &file_path) - .expect_err("must not open as file does not exist yet"); - let _ = Store::::create(&TEST_MAGIC_BYTES, &file_path) - .expect("must create file"); - // cannot create new as file already exists - let _ = Store::::create(&TEST_MAGIC_BYTES, &file_path) - .expect_err("must fail as file already exists now"); - let _ = Store::::load(&TEST_MAGIC_BYTES, &file_path) - .expect("must open as file exists now"); - } - - #[test] - fn load_fails_if_file_is_too_short() { - let tempdir = tempfile::tempdir().unwrap(); - let file_path = tempdir.path().join("db_file"); - fs::write(&file_path, &TEST_MAGIC_BYTES[..TEST_MAGIC_BYTES_LEN - 1]).expect("should write"); - - match Store::::load(&TEST_MAGIC_BYTES, &file_path) { - Err(StoreErrorWithDump { - error: StoreError::Io(e), - .. - }) => assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof), - unexpected => panic!("unexpected result: {:?}", unexpected), - }; - } - - #[test] - fn load_fails_if_magic_bytes_are_invalid() { - let invalid_magic_bytes = "ldkfs0000000"; - - let tempdir = tempfile::tempdir().unwrap(); - let file_path = tempdir.path().join("db_file"); - fs::write(&file_path, invalid_magic_bytes.as_bytes()).expect("should write"); - - match Store::::load(&TEST_MAGIC_BYTES, &file_path) { - Err(StoreErrorWithDump { - error: StoreError::InvalidMagicBytes { got, .. }, - .. - }) => { - assert_eq!(got, invalid_magic_bytes.as_bytes()) - } - unexpected => panic!("unexpected result: {:?}", unexpected), - }; - } - - #[test] - fn load_fails_if_undecodable_bytes() { - // initial data to write to file (magic bytes + invalid data) - let mut data = [255_u8; 2000]; - data[..TEST_MAGIC_BYTES_LEN].copy_from_slice(&TEST_MAGIC_BYTES); - - let test_changesets = TestChangeSet::from(["one".into(), "two".into(), "three!".into()]); - - let temp_dir = tempfile::tempdir().unwrap(); - let file_path = temp_dir.path().join("db_file"); - let mut store = - Store::::create(&TEST_MAGIC_BYTES, &file_path).expect("should create"); - store.append(&test_changesets).expect("should append"); - - // Write garbage to file - store.db_file.write_all(&data).expect("should write"); - - drop(store); - - match Store::::load(&TEST_MAGIC_BYTES, file_path) { - Err(StoreErrorWithDump { - changeset, - error: StoreError::Bincode(_), - }) => { - assert_eq!(changeset, Some(test_changesets)) - } - unexpected_res => panic!("unexpected result: {:?}", unexpected_res), - } - } - - #[test] - fn dump_fails_if_undecodable_bytes() { - // initial data to write to file (magic bytes + invalid data) - let mut data = [255_u8; 2000]; - data[..TEST_MAGIC_BYTES_LEN].copy_from_slice(&TEST_MAGIC_BYTES); - - let test_changesets = TestChangeSet::from(["one".into(), "two".into(), "three!".into()]); - - let temp_dir = tempfile::tempdir().unwrap(); - let file_path = temp_dir.path().join("db_file"); - let mut store = - Store::::create(&TEST_MAGIC_BYTES, file_path).expect("should create"); - store.append(&test_changesets).expect("should append"); - - // Write garbage to file - store.db_file.write_all(&data).expect("should write"); - - match store.dump() { - Err(StoreErrorWithDump { - changeset, - error: StoreError::Bincode(_), - }) => { - assert_eq!(changeset, Some(test_changesets)) - } - unexpected_res => panic!("unexpected result: {:?}", unexpected_res), - } - } - - #[test] - fn append() { - let temp_dir = tempfile::tempdir().unwrap(); - let file_path = temp_dir.path().join("db_file"); - - let not_empty_changeset = BTreeSet::from(["hello".to_string(), "world".to_string()]); - - let mut store = - Store::::create(&TEST_MAGIC_BYTES, file_path).expect("must create"); - - store - .append(¬_empty_changeset) - .expect("must append changeset"); - let aggregated_changeset = store - .dump() - .expect("should aggregate") - .expect("should not be empty"); - assert_eq!(not_empty_changeset, aggregated_changeset); - } - - #[test] - fn append_empty_changeset_does_nothing() { - let temp_dir = tempfile::tempdir().unwrap(); - let file_path = temp_dir.path().join("db_file"); - - let empty_changeset = BTreeSet::new(); - - let mut store = - Store::::create(&TEST_MAGIC_BYTES, file_path).expect("must create"); - - store - .append(&empty_changeset) - .expect("must append changeset"); - let aggregated_changeset = store.dump().expect("should aggregate"); - assert_eq!(None, aggregated_changeset); - } - - #[test] - fn load_or_create() { - let temp_dir = tempfile::tempdir().unwrap(); - let file_path = temp_dir.path().join("db_file"); - let changeset = BTreeSet::from(["hello".to_string(), "world".to_string()]); - - { - let (mut store, _) = - Store::::load_or_create(&TEST_MAGIC_BYTES, &file_path) - .expect("must create"); - assert!(file_path.exists()); - store.append(&changeset).expect("must succeed"); - } - - { - let (_, recovered_changeset) = - Store::::load_or_create(&TEST_MAGIC_BYTES, &file_path) - .expect("must load"); - assert_eq!(recovered_changeset, Some(changeset)); - } - } - - #[test] - fn last_write_is_short() { - let temp_dir = tempfile::tempdir().unwrap(); - - let changesets = [ - TestChangeSet::from(["1".into()]), - TestChangeSet::from(["2".into(), "3".into()]), - TestChangeSet::from(["4".into(), "5".into(), "6".into()]), - ]; - let last_changeset = TestChangeSet::from(["7".into(), "8".into(), "9".into()]); - let last_changeset_bytes = bincode_options().serialize(&last_changeset).unwrap(); - - for short_write_len in 1..last_changeset_bytes.len() - 1 { - let file_path = temp_dir.path().join(format!("{}.dat", short_write_len)); - - // simulate creating a file, writing data where the last write is incomplete - { - let mut store = - Store::::create(&TEST_MAGIC_BYTES, &file_path).unwrap(); - for changeset in &changesets { - store.append(changeset).unwrap(); - } - // this is the incomplete write - store - .db_file - .write_all(&last_changeset_bytes[..short_write_len]) - .unwrap(); - } - - // load file again and aggregate changesets - // write the last changeset again (this time it succeeds) - { - let err = Store::::load(&TEST_MAGIC_BYTES, &file_path) - .expect_err("should fail to aggregate"); - assert_eq!( - err.changeset, - changesets.iter().cloned().reduce(|mut acc, cs| { - Merge::merge(&mut acc, cs); - acc - }), - "should recover all changesets that are written in full", - ); - // Remove file and start again - fs::remove_file(&file_path).expect("should remove file"); - let mut store = - Store::::create(&TEST_MAGIC_BYTES, &file_path).unwrap(); - for changeset in &changesets { - store.append(changeset).unwrap(); - } - // this is the complete write - store - .db_file - .write_all(&last_changeset_bytes) - .expect("should write last changeset in full"); - } - - // load file again - this time we should successfully aggregate all changesets - { - let (_, aggregated_changeset) = - Store::::load(&TEST_MAGIC_BYTES, &file_path).unwrap(); - assert_eq!( - aggregated_changeset, - changesets - .iter() - .cloned() - .chain(core::iter::once(last_changeset.clone())) - .reduce(|mut acc, cs| { - Merge::merge(&mut acc, cs); - acc - }), - "should recover all changesets", - ); - } - } - } - - #[test] - fn test_load_recovers_state_after_last_write() { - let temp_dir = tempfile::tempdir().unwrap(); - let file_path = temp_dir.path().join("db_file"); - let changeset1 = BTreeSet::from(["hello".to_string(), "world".to_string()]); - let changeset2 = BTreeSet::from(["change after write".to_string()]); - - { - // create new store - let mut store = - Store::::create(&TEST_MAGIC_BYTES, &file_path).expect("must create"); - - // append first changeset to store - store.append(&changeset1).expect("must succeed"); - } - - { - // open store - let (mut store, _) = Store::::load(&TEST_MAGIC_BYTES, &file_path) - .expect("failed to load store"); - - // now append the second changeset - store.append(&changeset2).expect("must succeed"); - - // Retrieve stored changesets from the database - let stored_changesets = store - .dump() - .expect("must succeed") - .expect("must be not empty"); - - // expected changeset must be changeset2 + changeset1 - let mut expected_changeset = changeset2.clone(); - expected_changeset.extend(changeset1); - - // Assert that stored_changesets matches expected_changeset but not changeset2 - assert_eq!(stored_changesets, expected_changeset); - assert_ne!(stored_changesets, changeset2); - } - - // Open the store again to verify file pointer position at the end of the file - let (mut store, _) = Store::::load(&TEST_MAGIC_BYTES, &file_path) - .expect("should load correctly"); - - // get the current position of file pointer just after loading store - let current_pointer = store.db_file.stream_position().expect("must suceed"); - - // end pointer for the loaded store - let expected_pointer = store - .db_file - .seek(io::SeekFrom::End(0)) - .expect("must succeed"); - - // current position matches EOF - assert_eq!(current_pointer, expected_pointer); - } -} diff --git a/crates/testenv/CHANGELOG.md b/crates/testenv/CHANGELOG.md deleted file mode 100644 index 03d038f0..00000000 --- a/crates/testenv/CHANGELOG.md +++ /dev/null @@ -1,16 +0,0 @@ -# Changelog - -All notable changes to this project can be found here and in each release's git tag and can be viewed with `git tag -ln100 "v*"`. See also [DEVELOPMENT_CYCLE.md](../../DEVELOPMENT_CYCLE.md) for more details. - -Contributors do not need to change this file but do need to add changelog details in their PR descriptions. The person making the next release will collect changelog details from included PRs and edit this file prior to each release. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [testenv-0.11.1] - -### Changed - -- Minor updates to fix new rustc 1.83.0 clippy warnings #1776 - -[testenv-0.11.1]: https://github.com/bitcoindevkit/bdk/releases/tag/testenv-0.11.1 \ No newline at end of file diff --git a/crates/testenv/Cargo.toml b/crates/testenv/Cargo.toml deleted file mode 100644 index 48f4d242..00000000 --- a/crates/testenv/Cargo.toml +++ /dev/null @@ -1,32 +0,0 @@ -[package] -name = "bdk_testenv" -version = "0.11.1" -edition = "2021" -rust-version = "1.63" -homepage = "https://bitcoindevkit.org" -repository = "https://github.com/bitcoindevkit/bdk" -documentation = "https://docs.rs/bdk_testenv" -description = "Testing framework for BDK chain sources." -license = "MIT OR Apache-2.0" -readme = "README.md" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[lints] -workspace = true - -[dependencies] -bdk_chain = { path = "../chain", version = "0.21.1", default-features = false } -electrsd = { version = "0.28.0", features = [ "legacy" ], default-features = false } - -[dev-dependencies] -bdk_testenv = { path = "." } - -[features] -default = ["std", "download"] -download = ["electrsd/bitcoind_25_0", "electrsd/esplora_a33e97e1"] -std = ["bdk_chain/std"] -serde = ["bdk_chain/serde"] - -[package.metadata.docs.rs] -no-default-features = true \ No newline at end of file diff --git a/crates/testenv/README.md b/crates/testenv/README.md deleted file mode 100644 index 06527fb7..00000000 --- a/crates/testenv/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# BDK TestEnv - -This crate sets up a regtest environment with a single [`bitcoind`] node -connected to an [`electrs`] instance. This framework provides the infrastructure -for testing chain source crates, e.g., [`bdk_chain`], [`bdk_electrum`], -[`bdk_esplora`], etc. \ No newline at end of file diff --git a/crates/testenv/src/lib.rs b/crates/testenv/src/lib.rs deleted file mode 100644 index 2c0f15f6..00000000 --- a/crates/testenv/src/lib.rs +++ /dev/null @@ -1,352 +0,0 @@ -pub mod utils; - -use bdk_chain::{ - bitcoin::{ - address::NetworkChecked, block::Header, hash_types::TxMerkleNode, hashes::Hash, - secp256k1::rand::random, transaction, Address, Amount, Block, BlockHash, CompactTarget, - ScriptBuf, ScriptHash, Transaction, TxIn, TxOut, Txid, - }, - local_chain::CheckPoint, - BlockId, -}; -use bitcoincore_rpc::{ - bitcoincore_rpc_json::{GetBlockTemplateModes, GetBlockTemplateRules}, - RpcApi, -}; -use electrsd::bitcoind::anyhow::Context; - -pub use electrsd; -pub use electrsd::bitcoind; -pub use electrsd::bitcoind::anyhow; -pub use electrsd::bitcoind::bitcoincore_rpc; -pub use electrsd::electrum_client; -use electrsd::electrum_client::ElectrumApi; -use std::time::Duration; - -/// Struct for running a regtest environment with a single `bitcoind` node with an `electrs` -/// instance connected to it. -pub struct TestEnv { - pub bitcoind: electrsd::bitcoind::BitcoinD, - pub electrsd: electrsd::ElectrsD, -} - -/// Configuration parameters. -#[derive(Debug)] -pub struct Config<'a> { - /// [`bitcoind::Conf`] - pub bitcoind: bitcoind::Conf<'a>, - /// [`electrsd::Conf`] - pub electrsd: electrsd::Conf<'a>, -} - -impl Default for Config<'_> { - /// Use the default configuration plus set `http_enabled = true` for [`electrsd::Conf`] - /// which is required for testing `bdk_esplora`. - fn default() -> Self { - Self { - bitcoind: bitcoind::Conf::default(), - electrsd: { - let mut conf = electrsd::Conf::default(); - conf.http_enabled = true; - conf - }, - } - } -} - -impl TestEnv { - /// Construct a new [`TestEnv`] instance with the default configuration used by BDK. - pub fn new() -> anyhow::Result { - TestEnv::new_with_config(Config::default()) - } - - /// Construct a new [`TestEnv`] instance with the provided [`Config`]. - pub fn new_with_config(config: Config) -> anyhow::Result { - let bitcoind_exe = match std::env::var("BITCOIND_EXE") { - Ok(path) => path, - Err(_) => bitcoind::downloaded_exe_path().context( - "you need to provide an env var BITCOIND_EXE or specify a bitcoind version feature", - )?, - }; - let bitcoind = bitcoind::BitcoinD::with_conf(bitcoind_exe, &config.bitcoind)?; - - let electrs_exe = match std::env::var("ELECTRS_EXE") { - Ok(path) => path, - Err(_) => electrsd::downloaded_exe_path() - .context("electrs version feature must be enabled")?, - }; - let electrsd = electrsd::ElectrsD::with_conf(electrs_exe, &bitcoind, &config.electrsd)?; - - Ok(Self { bitcoind, electrsd }) - } - - /// Exposes the [`ElectrumApi`] calls from the Electrum client. - pub fn electrum_client(&self) -> &impl ElectrumApi { - &self.electrsd.client - } - - /// Exposes the [`RpcApi`] calls from [`bitcoincore_rpc`]. - pub fn rpc_client(&self) -> &impl RpcApi { - &self.bitcoind.client - } - - // Reset `electrsd` so that new blocks can be seen. - pub fn reset_electrsd(mut self) -> anyhow::Result { - let mut electrsd_conf = electrsd::Conf::default(); - electrsd_conf.http_enabled = true; - let electrsd = match std::env::var_os("ELECTRS_EXE") { - Some(env_electrs_exe) => { - electrsd::ElectrsD::with_conf(env_electrs_exe, &self.bitcoind, &electrsd_conf) - } - None => { - let electrs_exe = electrsd::downloaded_exe_path() - .expect("electrs version feature must be enabled"); - electrsd::ElectrsD::with_conf(electrs_exe, &self.bitcoind, &electrsd_conf) - } - }?; - self.electrsd = electrsd; - Ok(self) - } - - /// Mine a number of blocks of a given size `count`, which may be specified to a given coinbase - /// `address`. - pub fn mine_blocks( - &self, - count: usize, - address: Option

, - ) -> anyhow::Result> { - let coinbase_address = match address { - Some(address) => address, - None => self - .bitcoind - .client - .get_new_address(None, None)? - .assume_checked(), - }; - let block_hashes = self - .bitcoind - .client - .generate_to_address(count as _, &coinbase_address)?; - Ok(block_hashes) - } - - /// Mine a block that is guaranteed to be empty even with transactions in the mempool. - pub fn mine_empty_block(&self) -> anyhow::Result<(usize, BlockHash)> { - let bt = self.bitcoind.client.get_block_template( - GetBlockTemplateModes::Template, - &[GetBlockTemplateRules::SegWit], - &[], - )?; - - let txdata = vec![Transaction { - version: transaction::Version::ONE, - lock_time: bdk_chain::bitcoin::absolute::LockTime::from_height(0)?, - input: vec![TxIn { - previous_output: bdk_chain::bitcoin::OutPoint::default(), - script_sig: ScriptBuf::builder() - .push_int(bt.height as _) - // randomn number so that re-mining creates unique block - .push_int(random()) - .into_script(), - sequence: bdk_chain::bitcoin::Sequence::default(), - witness: bdk_chain::bitcoin::Witness::new(), - }], - output: vec![TxOut { - value: Amount::ZERO, - script_pubkey: ScriptBuf::new_p2sh(&ScriptHash::all_zeros()), - }], - }]; - - let bits: [u8; 4] = bt - .bits - .clone() - .try_into() - .expect("rpc provided us with invalid bits"); - - let mut block = Block { - header: Header { - version: bdk_chain::bitcoin::block::Version::default(), - prev_blockhash: bt.previous_block_hash, - merkle_root: TxMerkleNode::all_zeros(), - time: Ord::max(bt.min_time, std::time::UNIX_EPOCH.elapsed()?.as_secs()) as u32, - bits: CompactTarget::from_consensus(u32::from_be_bytes(bits)), - nonce: 0, - }, - txdata, - }; - - block.header.merkle_root = block.compute_merkle_root().expect("must compute"); - - for nonce in 0..=u32::MAX { - block.header.nonce = nonce; - if block.header.target().is_met_by(block.block_hash()) { - break; - } - } - - self.bitcoind.client.submit_block(&block)?; - Ok((bt.height as usize, block.block_hash())) - } - - /// This method waits for the Electrum notification indicating that a new block has been mined. - /// `timeout` is the maximum [`Duration`] we want to wait for a response from Electrsd. - pub fn wait_until_electrum_sees_block(&self, timeout: Duration) -> anyhow::Result<()> { - self.electrsd.client.block_headers_subscribe()?; - let delay = Duration::from_millis(200); - let start = std::time::Instant::now(); - - while start.elapsed() < timeout { - self.electrsd.trigger()?; - self.electrsd.client.ping()?; - if self.electrsd.client.block_headers_pop()?.is_some() { - return Ok(()); - } - - std::thread::sleep(delay); - } - - Err(anyhow::Error::msg( - "Timed out waiting for Electrsd to get block header", - )) - } - - /// This method waits for Electrsd to see a transaction with given `txid`. `timeout` is the - /// maximum [`Duration`] we want to wait for a response from Electrsd. - pub fn wait_until_electrum_sees_txid( - &self, - txid: Txid, - timeout: Duration, - ) -> anyhow::Result<()> { - let delay = Duration::from_millis(200); - let start = std::time::Instant::now(); - - while start.elapsed() < timeout { - if self.electrsd.client.transaction_get(&txid).is_ok() { - return Ok(()); - } - - std::thread::sleep(delay); - } - - Err(anyhow::Error::msg( - "Timed out waiting for Electrsd to get transaction", - )) - } - - /// Invalidate a number of blocks of a given size `count`. - pub fn invalidate_blocks(&self, count: usize) -> anyhow::Result<()> { - let mut hash = self.bitcoind.client.get_best_block_hash()?; - for _ in 0..count { - let prev_hash = self - .bitcoind - .client - .get_block_info(&hash)? - .previousblockhash; - self.bitcoind.client.invalidate_block(&hash)?; - match prev_hash { - Some(prev_hash) => hash = prev_hash, - None => break, - } - } - Ok(()) - } - - /// Reorg a number of blocks of a given size `count`. - /// Refer to [`TestEnv::mine_empty_block`] for more information. - pub fn reorg(&self, count: usize) -> anyhow::Result> { - let start_height = self.bitcoind.client.get_block_count()?; - self.invalidate_blocks(count)?; - - let res = self.mine_blocks(count, None); - assert_eq!( - self.bitcoind.client.get_block_count()?, - start_height, - "reorg should not result in height change" - ); - res - } - - /// Reorg with a number of empty blocks of a given size `count`. - pub fn reorg_empty_blocks(&self, count: usize) -> anyhow::Result> { - let start_height = self.bitcoind.client.get_block_count()?; - self.invalidate_blocks(count)?; - - let res = (0..count) - .map(|_| self.mine_empty_block()) - .collect::, _>>()?; - assert_eq!( - self.bitcoind.client.get_block_count()?, - start_height, - "reorg should not result in height change" - ); - Ok(res) - } - - /// Send a tx of a given `amount` to a given `address`. - pub fn send(&self, address: &Address, amount: Amount) -> anyhow::Result { - let txid = self - .bitcoind - .client - .send_to_address(address, amount, None, None, None, None, None, None)?; - Ok(txid) - } - - /// Create a checkpoint linked list of all the blocks in the chain. - pub fn make_checkpoint_tip(&self) -> CheckPoint { - CheckPoint::from_block_ids((0_u32..).map_while(|height| { - self.bitcoind - .client - .get_block_hash(height as u64) - .ok() - .map(|hash| BlockId { height, hash }) - })) - .expect("must craft tip") - } - - /// Get the genesis hash of the blockchain. - pub fn genesis_hash(&self) -> anyhow::Result { - let hash = self.bitcoind.client.get_block_hash(0)?; - Ok(hash) - } -} - -#[cfg(test)] -mod test { - use crate::TestEnv; - use core::time::Duration; - use electrsd::bitcoind::{anyhow::Result, bitcoincore_rpc::RpcApi}; - - /// This checks that reorgs initiated by `bitcoind` is detected by our `electrsd` instance. - #[test] - fn test_reorg_is_detected_in_electrsd() -> Result<()> { - let env = TestEnv::new()?; - - // Mine some blocks. - env.mine_blocks(101, None)?; - env.wait_until_electrum_sees_block(Duration::from_secs(6))?; - let height = env.bitcoind.client.get_block_count()?; - let blocks = (0..=height) - .map(|i| env.bitcoind.client.get_block_hash(i)) - .collect::, _>>()?; - - // Perform reorg on six blocks. - env.reorg(6)?; - env.wait_until_electrum_sees_block(Duration::from_secs(6))?; - let reorged_height = env.bitcoind.client.get_block_count()?; - let reorged_blocks = (0..=height) - .map(|i| env.bitcoind.client.get_block_hash(i)) - .collect::, _>>()?; - - assert_eq!(height, reorged_height); - - // Block hashes should not be equal on the six reorged blocks. - for (i, (block, reorged_block)) in blocks.iter().zip(reorged_blocks.iter()).enumerate() { - match i <= height as usize - 6 { - true => assert_eq!(block, reorged_block), - false => assert_ne!(block, reorged_block), - } - } - - Ok(()) - } -} diff --git a/crates/testenv/src/utils.rs b/crates/testenv/src/utils.rs deleted file mode 100644 index 93ca1f21..00000000 --- a/crates/testenv/src/utils.rs +++ /dev/null @@ -1,90 +0,0 @@ -use bdk_chain::bitcoin; - -#[allow(unused_macros)] -#[macro_export] -macro_rules! block_id { - ($height:expr, $hash:literal) => {{ - bdk_chain::BlockId { - height: $height, - hash: bitcoin::hashes::Hash::hash($hash.as_bytes()), - } - }}; -} - -#[allow(unused_macros)] -#[macro_export] -macro_rules! hash { - ($index:literal) => {{ - bitcoin::hashes::Hash::hash($index.as_bytes()) - }}; -} - -#[allow(unused_macros)] -#[macro_export] -macro_rules! local_chain { - [ $(($height:expr, $hash:expr)), * ] => {{ - #[allow(unused_mut)] - bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $hash).into()),*].into_iter().collect()) - .expect("chain must have genesis block") - }}; -} - -#[allow(unused_macros)] -#[macro_export] -macro_rules! chain_update { - [ $(($height:expr, $hash:expr)), * ] => {{ - #[allow(unused_mut)] - bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $hash).into()),*].into_iter().collect()) - .expect("chain must have genesis block") - .tip() - }}; -} - -#[allow(unused_macros)] -#[macro_export] -macro_rules! changeset { - (checkpoints: $($tail:tt)*) => { changeset!(index: TxHeight, checkpoints: $($tail)*) }; - ( - index: $ind:ty, - checkpoints: [ $(( $height:expr, $cp_to:expr )),* ] - $(,txids: [ $(( $txid:expr, $tx_to:expr )),* ])? - ) => {{ - use bdk_chain::collections::BTreeMap; - - #[allow(unused_mut)] - bdk_chain::sparse_chain::ChangeSet::<$ind> { - checkpoints: { - let mut changes = BTreeMap::default(); - $(changes.insert($height, $cp_to);)* - changes - }, - txids: { - let mut changes = BTreeMap::default(); - $($(changes.insert($txid, $tx_to.map(|h: TxHeight| h.into()));)*)? - changes - } - } - }}; -} - -#[allow(unused)] -pub fn new_tx(lt: u32) -> bitcoin::Transaction { - bitcoin::Transaction { - version: bitcoin::transaction::Version::non_standard(0x00), - lock_time: bitcoin::absolute::LockTime::from_consensus(lt), - input: vec![], - output: vec![], - } -} - -#[allow(unused)] -pub const DESCRIPTORS: [&str; 7] = [ - "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)", - "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/*)", - "wpkh([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/0/*)", - "tr(tprv8ZgxMBicQKsPd3krDUsBAmtnRsK3rb8u5yi1zhQgMhF1tR8MW7xfE4rnrbbsrbPR52e7rKapu6ztw1jXveJSCGHEriUGZV7mCe88duLp5pj/86'/1'/0'/0/*)", - "tr(tprv8ZgxMBicQKsPd3krDUsBAmtnRsK3rb8u5yi1zhQgMhF1tR8MW7xfE4rnrbbsrbPR52e7rKapu6ztw1jXveJSCGHEriUGZV7mCe88duLp5pj/86'/1'/0'/1/*)", - "wpkh(xprv9s21ZrQH143K4EXURwMHuLS469fFzZyXk7UUpdKfQwhoHcAiYTakpe8pMU2RiEdvrU9McyuE7YDoKcXkoAwEGoK53WBDnKKv2zZbb9BzttX/1/0/*)", - // non-wildcard - "wpkh([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/0)", -]; diff --git a/docs/adr/0003_canonicalization_algorithm.md b/docs/adr/0003_canonicalization_algorithm.md deleted file mode 100644 index c3062cbc..00000000 --- a/docs/adr/0003_canonicalization_algorithm.md +++ /dev/null @@ -1,83 +0,0 @@ -# Introduce `O(n)` Canonicalization Algorithm - -* Status: Proposed -* Authors: @LLFourn, @evanlinjin -* Date: 2024-12-01 -* Targeted modules: `bdk_chain` -* Associated Tickets/PRs: Issue #1665, ~PR #1659~, PR #1670 - -## Context and Problem Statement - -The [2024 Wizardsardine BDK code audit](https://gist.github.com/darosior/4aeb9512d7f1ac7666abc317d6f9453b) uncovered the severity of the performance issues in the original canonicalization logic. The problem is especially severe for wallet histories with many unconfirmed and conflicting transactions. This can be a dDos vector if BDK is used in server-side applications. The time complexity of the original canonicalization logic is $O(n^2)$. - -The old canonicalization logic is based on `TxGraph::get_chain_position`. This is called on every transaction included in `TxGraph`, having to traverse backwards and forwards to check that all ancestors do not conflict with anything that is anchored in the best chain and that no conflict has a higher `last-seen` value. Also note that `last-seen` values are transitive, so to determine the *actual* `last-seen` value, we need to iterate through all descendants. - -## Considered Options - -#### Option 1: Introduce a `canonical_cache` as a parameter to all `get_chain_position`-based methods. - -The `canonical_cache` will include both `canonical` and `not_canonical` sets of txids. This avoids revisiting what has already been visited. - -**Pros:** -* Least API and code changes. - -**Cons:** -* The API can be misused. Can get wildly wrong results if the `canonical_cache` parameter is used across updates to `TxGraph` or the `ChainOracle` impl. -* Visiting transactions in a certain order may decrease the number of traversals. I.e. if we call `get_chain_position` on transactions with anchors first, `get_chain_position` calls on non-anchored transactions later on won't need to do as much work. Visiting order is not enforced if we stick to a `get_chain_position`-based API. - -#### Option 2: Traverse `TxGraph` spends forwards, starting from graph roots. - -For this algorithm, we maintain two `txid` sets; `maybe_canonical` and `not_canonical`. Note that these sets are not mutually exclusive since we are traversing spends, and a transaction can have multiple inputs (spends). When we arrive at a transaction's input (spend), we may not have checked all of the transaction's other inputs to be sure that an ancestor does not conflict with a transaction that is anchored or has a higher last-seen value. - -**Pros:** -* API cannot be misused (as it can in option 1). -* We can traverse transactions in a pseudo-chronological order. - -**Cons:** -* Duplicate work may have to be done if we have transactions with multiple inputs. We may mark a subset of transactions as `maybe_canonical`, then end up having to mark a majority of those as `not_canonical` later on if a spend of a previously-visited transaction is determined to be a descendant of a `not_canonical` transaction. -* Does not handle transitively-anchored transactions properly. If a transaction is anchored in the best chain, all of it's ancestors are anchored in the best chain even though they do not have an explicit anchor attached. To find transitive anchors, we need to traverse backwards. However this algorithm only traverses forwards. - -#### Option 3: Traverse `TxGraph` backwards, starting from transactions with the highest `last-seen` values. - -The premise is that transactions with higher last-seen values are most likely to be canonical and not conflict with transactions anchored in the best chain (since they are seen most recently in the mempool). - -The algorithm maintains 2 `txid` sets. One for `canonical` and `not_canonical`. These are mutually exclusive sets. A transaction that is included in either of these sets have already been visited and can be skipped. We iterate through all transactions, ordered by descending last-seen values. - -For each transaction, we traverse it's ancestors, stopping when we hit a confirmed transaction or a transaction that conflicts with a confirmed transaction. If a conflict with a confirmed transaction is found, we can mark that transaction and all it's descendants as `not_canonical`. Otherwise, the entire subset will be `canonical`. If we hit a transaction that is anchored in the best chain, we can mark it and all of it's ancestors as `canonical`. - -**Pros:** -* We can efficiently mark large subsets as canonical/not-canonical. - -**Cons:** -* Like option 2, this does not handle transitively-anchored transactions properly. - -#### Option 4: Traverse transactions with anchors first. - -The algorithm's premise is as follows: - -1. If transaction `A` is determined to be canonical, all of `A`'s ancestors must also be canonical. -2. If transaction `B` is determined to be NOT canonical, all of `B`'s descendants must also be NOT canonical. -3. If a transaction is anchored in the best chain, it is canonical. -4. If a transaction conflicts with a canonical transaction, it is NOT canonical. -5. A transaction with a higher last-seen has precedence. -6. Last-seen values are transitive. A transaction's real last-seen value is the max between it's last-seen value all of it's descendants. - -Like Option 3's algorithm, we maintain two mutually-exclusive `txid` sets: `canonical` and `not_canonical`. - -Imagine a method `mark_canonical(A)` that is based on premise 1 and 2. This method will mark transaction `A` and all of it's ancestors as canonical. For each transaction that is marked canonical, we can iterate all of it's conflicts and mark those as `non_canonical`. If a transaction already exists in `canonical` or `not_canonical`, we can break early, avoiding duplicate work. - -This algorithm iterates transactions in 3 runs. - -1. Iterate over all transactions with anchors in descending anchor-height order. For any transaction that has an anchor pointing to the best chain, we call `mark_canonical` on it. We iterate in descending-height order to reduce the number of anchors we need to check against the `ChainOracle` (premise 1). The purpose of this run is to populate `non_canonical` with all transactions that directly conflict with anchored transactions and populate `canonical` with all anchored transactions and ancestors of anchors transactions (transitive anchors). -2. Iterate over all transactions with last-seen values, in descending last-seen order. We can call `mark_canonical` on all of these that do not already exist in `canonical` or `not_canonical`. -3. Iterate over remaining transactions that contains anchors (but not in the best chain) and have no last-seen value. We treat these transactions in the same way as we do in run 2. - -**Pros:** -* Transitive anchors are handled correctly. -* We can efficiently mark large subsets as canonical/non-canonical. - -**Cons:** ? - -## Decision Outcome - -Option 4 is implemented in PR #1670. diff --git a/example-crates/example_bitcoind_rpc_polling/Cargo.toml b/example-crates/example_bitcoind_rpc_polling/Cargo.toml deleted file mode 100644 index 6728bb13..00000000 --- a/example-crates/example_bitcoind_rpc_polling/Cargo.toml +++ /dev/null @@ -1,12 +0,0 @@ -[package] -name = "example_bitcoind_rpc_polling" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -bdk_chain = { path = "../../crates/chain", features = ["serde"] } -bdk_bitcoind_rpc = { path = "../../crates/bitcoind_rpc" } -example_cli = { path = "../example_cli" } -ctrlc = { version = "^2" } diff --git a/example-crates/example_bitcoind_rpc_polling/README.md b/example-crates/example_bitcoind_rpc_polling/README.md deleted file mode 100644 index fef82ab1..00000000 --- a/example-crates/example_bitcoind_rpc_polling/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# Example RPC CLI - -### Simple Regtest Test - -1. Start local regtest bitcoind. - ``` - mkdir -p /tmp/regtest/bitcoind - bitcoind -regtest -server -fallbackfee=0.0002 -rpcuser= -rpcpassword= -datadir=/tmp/regtest/bitcoind -daemon - ``` -2. Create a test bitcoind wallet and set bitcoind env. - ``` - bitcoin-cli -datadir=/tmp/regtest/bitcoind -regtest -rpcuser= -rpcpassword= -named createwallet wallet_name="test" - export RPC_URL=127.0.0.1:18443 - export RPC_USER= - export RPC_PASS= - ``` -3. Get test bitcoind wallet info. - ``` - bitcoin-cli -rpcwallet="test" -rpcuser= -rpcpassword= -datadir=/tmp/regtest/bitcoind -regtest getwalletinfo - ``` -4. Get new test bitcoind wallet address. - ``` - BITCOIND_ADDRESS=$(bitcoin-cli -rpcwallet="test" -datadir=/tmp/regtest/bitcoind -regtest -rpcuser= -rpcpassword= getnewaddress) - echo $BITCOIND_ADDRESS - ``` -5. Generate 101 blocks with reward to test bitcoind wallet address. - ``` - bitcoin-cli -datadir=/tmp/regtest/bitcoind -regtest -rpcuser= -rpcpassword= generatetoaddress 101 $BITCOIND_ADDRESS - ``` -6. Verify test bitcoind wallet balance. - ``` - bitcoin-cli -rpcwallet="test" -datadir=/tmp/regtest/bitcoind -regtest -rpcuser= -rpcpassword= getbalances - ``` -7. Set descriptor env and get address from RPC CLI wallet. - ``` - export DESCRIPTOR="wpkh(tprv8ZgxMBicQKsPfK9BTf82oQkHhawtZv19CorqQKPFeaHDMA4dXYX6eWsJGNJ7VTQXWmoHdrfjCYuDijcRmNFwSKcVhswzqs4fugE8turndGc/1/*)" - cargo run -- --network regtest address next - ``` -8. Send 5 test bitcoin to RPC CLI wallet. - ``` - bitcoin-cli -rpcwallet="test" -datadir=/tmp/regtest/bitcoind -regtest -rpcuser= -rpcpassword= sendtoaddress
5 - ``` -9. Sync blockchain with RPC CLI wallet. - ``` - cargo run -- --network regtest sync - - ``` -10. Get RPC CLI wallet unconfirmed balances. - ``` - cargo run -- --network regtest balance - ``` -11. Generate 1 block with reward to test bitcoind wallet address. - ``` - bitcoin-cli -datadir=/tmp/regtest/bitcoind -rpcuser= -rpcpassword= -regtest generatetoaddress 10 $BITCOIND_ADDRESS - ``` -12. Sync the blockchain with RPC CLI wallet. - ``` - cargo run -- --network regtest sync - - ``` -13. Get RPC CLI wallet confirmed balances. - ``` - cargo run -- --network regtest balance - ``` -14. Get RPC CLI wallet transactions. - ``` - cargo run -- --network regtest txout list - ``` \ No newline at end of file diff --git a/example-crates/example_bitcoind_rpc_polling/src/main.rs b/example-crates/example_bitcoind_rpc_polling/src/main.rs deleted file mode 100644 index 83cb25f8..00000000 --- a/example-crates/example_bitcoind_rpc_polling/src/main.rs +++ /dev/null @@ -1,370 +0,0 @@ -use std::{ - path::PathBuf, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - time::{Duration, Instant}, -}; - -use bdk_bitcoind_rpc::{ - bitcoincore_rpc::{Auth, Client, RpcApi}, - Emitter, -}; -use bdk_chain::{ - bitcoin::{Block, Transaction}, - local_chain, Merge, -}; -use example_cli::{ - anyhow, - clap::{self, Args, Subcommand}, - ChangeSet, Keychain, -}; - -const DB_MAGIC: &[u8] = b"bdk_example_rpc"; -const DB_PATH: &str = ".bdk_example_rpc.db"; - -/// The mpsc channel bound for emissions from [`Emitter`]. -const CHANNEL_BOUND: usize = 10; -/// Delay for printing status to stdout. -const STDOUT_PRINT_DELAY: Duration = Duration::from_secs(6); -/// Delay between mempool emissions. -const MEMPOOL_EMIT_DELAY: Duration = Duration::from_secs(30); -/// Delay for committing to persistence. -const DB_COMMIT_DELAY: Duration = Duration::from_secs(60); - -#[derive(Debug)] -enum Emission { - Block(bdk_bitcoind_rpc::BlockEvent), - Mempool(Vec<(Transaction, u64)>), - Tip(u32), -} - -#[derive(Args, Debug, Clone)] -struct RpcArgs { - /// RPC URL - #[clap(env = "RPC_URL", long, default_value = "127.0.0.1:8332")] - url: String, - /// RPC auth cookie file - #[clap(env = "RPC_COOKIE", long)] - rpc_cookie: Option, - /// RPC auth username - #[clap(env = "RPC_USER", long)] - rpc_user: Option, - /// RPC auth password - #[clap(env = "RPC_PASS", long)] - rpc_password: Option, - /// Starting block height to fallback to if no point of agreement if found - #[clap(env = "FALLBACK_HEIGHT", long, default_value = "0")] - fallback_height: u32, -} - -impl From for Auth { - fn from(args: RpcArgs) -> Self { - match (args.rpc_cookie, args.rpc_user, args.rpc_password) { - (None, None, None) => Self::None, - (Some(path), _, _) => Self::CookieFile(path), - (_, Some(user), Some(pass)) => Self::UserPass(user, pass), - (_, Some(_), None) => panic!("rpc auth: missing rpc_pass"), - (_, None, Some(_)) => panic!("rpc auth: missing rpc_user"), - } - } -} - -impl RpcArgs { - fn new_client(&self) -> anyhow::Result { - Ok(Client::new( - &self.url, - match (&self.rpc_cookie, &self.rpc_user, &self.rpc_password) { - (None, None, None) => Auth::None, - (Some(path), _, _) => Auth::CookieFile(path.clone()), - (_, Some(user), Some(pass)) => Auth::UserPass(user.clone(), pass.clone()), - (_, Some(_), None) => panic!("rpc auth: missing rpc_pass"), - (_, None, Some(_)) => panic!("rpc auth: missing rpc_user"), - }, - )?) - } -} - -#[derive(Subcommand, Debug, Clone)] -enum RpcCommands { - /// Syncs local state with remote state via RPC (starting from last point of agreement) and - /// stores/indexes relevant transactions - Sync { - #[clap(flatten)] - rpc_args: RpcArgs, - }, - /// Sync by having the emitter logic in a separate thread - Live { - #[clap(flatten)] - rpc_args: RpcArgs, - }, -} - -fn main() -> anyhow::Result<()> { - let start = Instant::now(); - - let example_cli::Init { - args, - graph, - chain, - db, - network, - } = match example_cli::init_or_load::(DB_MAGIC, DB_PATH)? { - Some(init) => init, - None => return Ok(()), - }; - - let rpc_cmd = match args.command { - example_cli::Commands::ChainSpecific(rpc_cmd) => rpc_cmd, - general_cmd => { - return example_cli::handle_commands( - &graph, - &chain, - &db, - network, - |rpc_args, tx| { - let client = rpc_args.new_client()?; - client.send_raw_transaction(tx)?; - Ok(()) - }, - general_cmd, - ); - } - }; - - match rpc_cmd { - RpcCommands::Sync { rpc_args } => { - let RpcArgs { - fallback_height, .. - } = rpc_args; - - let chain_tip = chain.lock().unwrap().tip(); - let rpc_client = rpc_args.new_client()?; - let mut emitter = Emitter::new(&rpc_client, chain_tip, fallback_height); - let mut db_stage = ChangeSet::default(); - - let mut last_db_commit = Instant::now(); - let mut last_print = Instant::now(); - - while let Some(emission) = emitter.next_block()? { - let height = emission.block_height(); - - let mut chain = chain.lock().unwrap(); - let mut graph = graph.lock().unwrap(); - - let chain_changeset = chain - .apply_update(emission.checkpoint) - .expect("must always apply as we receive blocks in order from emitter"); - let graph_changeset = graph.apply_block_relevant(&emission.block, height); - db_stage.merge(ChangeSet { - local_chain: chain_changeset, - tx_graph: graph_changeset.tx_graph, - indexer: graph_changeset.indexer, - ..Default::default() - }); - - // commit staged db changes in intervals - if last_db_commit.elapsed() >= DB_COMMIT_DELAY { - let db = &mut *db.lock().unwrap(); - last_db_commit = Instant::now(); - if let Some(changeset) = db_stage.take() { - db.append(&changeset)?; - } - println!( - "[{:>10}s] committed to db (took {}s)", - start.elapsed().as_secs_f32(), - last_db_commit.elapsed().as_secs_f32() - ); - } - - // print synced-to height and current balance in intervals - if last_print.elapsed() >= STDOUT_PRINT_DELAY { - last_print = Instant::now(); - let synced_to = chain.tip(); - let balance = { - graph.graph().balance( - &*chain, - synced_to.block_id(), - graph.index.outpoints().iter().cloned(), - |(k, _), _| k == &Keychain::Internal, - ) - }; - println!( - "[{:>10}s] synced to {} @ {} | total: {}", - start.elapsed().as_secs_f32(), - synced_to.hash(), - synced_to.height(), - balance.total() - ); - } - } - - let mempool_txs = emitter.mempool()?; - let graph_changeset = graph - .lock() - .unwrap() - .batch_insert_relevant_unconfirmed(mempool_txs); - { - let db = &mut *db.lock().unwrap(); - db_stage.merge(ChangeSet { - tx_graph: graph_changeset.tx_graph, - indexer: graph_changeset.indexer, - ..Default::default() - }); - if let Some(changeset) = db_stage.take() { - db.append(&changeset)?; - } - } - } - RpcCommands::Live { rpc_args } => { - let RpcArgs { - fallback_height, .. - } = rpc_args; - let sigterm_flag = start_ctrlc_handler(); - - let last_cp = chain.lock().unwrap().tip(); - - println!( - "[{:>10}s] starting emitter thread...", - start.elapsed().as_secs_f32() - ); - let (tx, rx) = std::sync::mpsc::sync_channel::(CHANNEL_BOUND); - let emission_jh = std::thread::spawn(move || -> anyhow::Result<()> { - let rpc_client = rpc_args.new_client()?; - let mut emitter = Emitter::new(&rpc_client, last_cp, fallback_height); - - let mut block_count = rpc_client.get_block_count()? as u32; - tx.send(Emission::Tip(block_count))?; - - loop { - match emitter.next_block()? { - Some(block_emission) => { - let height = block_emission.block_height(); - if sigterm_flag.load(Ordering::Acquire) { - break; - } - if height > block_count { - block_count = rpc_client.get_block_count()? as u32; - tx.send(Emission::Tip(block_count))?; - } - tx.send(Emission::Block(block_emission))?; - } - None => { - if await_flag(&sigterm_flag, MEMPOOL_EMIT_DELAY) { - break; - } - println!("preparing mempool emission..."); - let now = Instant::now(); - tx.send(Emission::Mempool(emitter.mempool()?))?; - println!("mempool emission prepared in {}s", now.elapsed().as_secs()); - continue; - } - }; - } - - println!("emitter thread shutting down..."); - Ok(()) - }); - - let mut tip_height = 0_u32; - let mut last_db_commit = Instant::now(); - let mut last_print = Option::::None; - let mut db_stage = ChangeSet::default(); - - for emission in rx { - let mut graph = graph.lock().unwrap(); - let mut chain = chain.lock().unwrap(); - - let (chain_changeset, graph_changeset) = match emission { - Emission::Block(block_emission) => { - let height = block_emission.block_height(); - let chain_changeset = chain - .apply_update(block_emission.checkpoint) - .expect("must always apply as we receive blocks in order from emitter"); - let graph_changeset = - graph.apply_block_relevant(&block_emission.block, height); - (chain_changeset, graph_changeset) - } - Emission::Mempool(mempool_txs) => { - let graph_changeset = graph.batch_insert_relevant_unconfirmed(mempool_txs); - (local_chain::ChangeSet::default(), graph_changeset) - } - Emission::Tip(h) => { - tip_height = h; - continue; - } - }; - - db_stage.merge(ChangeSet { - local_chain: chain_changeset, - tx_graph: graph_changeset.tx_graph, - indexer: graph_changeset.indexer, - ..Default::default() - }); - - if last_db_commit.elapsed() >= DB_COMMIT_DELAY { - let db = &mut *db.lock().unwrap(); - last_db_commit = Instant::now(); - if let Some(changeset) = db_stage.take() { - db.append(&changeset)?; - } - println!( - "[{:>10}s] committed to db (took {}s)", - start.elapsed().as_secs_f32(), - last_db_commit.elapsed().as_secs_f32() - ); - } - - if last_print.map_or(Duration::MAX, |i| i.elapsed()) >= STDOUT_PRINT_DELAY { - last_print = Some(Instant::now()); - let synced_to = chain.tip(); - let balance = { - graph.graph().balance( - &*chain, - synced_to.block_id(), - graph.index.outpoints().iter().cloned(), - |(k, _), _| k == &Keychain::Internal, - ) - }; - println!( - "[{:>10}s] synced to {} @ {} / {} | total: {}", - start.elapsed().as_secs_f32(), - synced_to.hash(), - synced_to.height(), - tip_height, - balance.total() - ); - } - } - - emission_jh.join().expect("must join emitter thread")?; - } - } - - Ok(()) -} - -#[allow(dead_code)] -fn start_ctrlc_handler() -> Arc { - let flag = Arc::new(AtomicBool::new(false)); - let cloned_flag = flag.clone(); - - ctrlc::set_handler(move || cloned_flag.store(true, Ordering::Release)); - - flag -} - -#[allow(dead_code)] -fn await_flag(flag: &AtomicBool, duration: Duration) -> bool { - let start = Instant::now(); - loop { - if flag.load(Ordering::Acquire) { - return true; - } - if start.elapsed() >= duration { - return false; - } - std::thread::sleep(Duration::from_secs(1)); - } -} diff --git a/example-crates/example_cli/Cargo.toml b/example-crates/example_cli/Cargo.toml deleted file mode 100644 index 0a467db8..00000000 --- a/example-crates/example_cli/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -name = "example_cli" -version = "0.2.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -bdk_chain = { path = "../../crates/chain", features = ["serde", "miniscript"]} -bdk_coin_select = "0.4" -bdk_file_store = { path = "../../crates/file_store" } -bitcoin = { version = "0.32.0", features = ["base64"], default-features = false } - -anyhow = "1" -clap = { version = "4.5.17", features = ["derive", "env"] } -rand = "0.8" -serde = { version = "1", features = ["derive"] } -serde_json = "1.0" diff --git a/example-crates/example_cli/src/lib.rs b/example-crates/example_cli/src/lib.rs deleted file mode 100644 index 2cb27849..00000000 --- a/example-crates/example_cli/src/lib.rs +++ /dev/null @@ -1,954 +0,0 @@ -use serde_json::json; -use std::cmp; -use std::collections::HashMap; -use std::env; -use std::fmt; -use std::str::FromStr; -use std::sync::Mutex; - -use anyhow::bail; -use anyhow::Context; -use bdk_chain::bitcoin::{ - absolute, address::NetworkUnchecked, bip32, consensus, constants, hex::DisplayHex, relative, - secp256k1::Secp256k1, transaction, Address, Amount, Network, NetworkKind, PrivateKey, Psbt, - PublicKey, Sequence, Transaction, TxIn, TxOut, -}; -use bdk_chain::miniscript::{ - descriptor::{DescriptorSecretKey, SinglePubKey}, - plan::{Assets, Plan}, - psbt::PsbtExt, - Descriptor, DescriptorPublicKey, ForEachKey, -}; -use bdk_chain::ConfirmationBlockTime; -use bdk_chain::{ - indexed_tx_graph, - indexer::keychain_txout::{self, KeychainTxOutIndex}, - local_chain::{self, LocalChain}, - tx_graph, ChainOracle, DescriptorExt, FullTxOut, IndexedTxGraph, Merge, -}; -use bdk_coin_select::{ - metrics::LowestFee, Candidate, ChangePolicy, CoinSelector, DrainWeights, FeeRate, Target, - TargetFee, TargetOutputs, -}; -use bdk_file_store::Store; -use clap::{Parser, Subcommand}; -use rand::prelude::*; - -pub use anyhow; -pub use clap; - -/// Alias for a `IndexedTxGraph` with specific `Anchor` and `Indexer`. -pub type KeychainTxGraph = IndexedTxGraph>; - -/// ChangeSet -#[derive(Default, Debug, Clone, PartialEq, serde::Deserialize, serde::Serialize)] -pub struct ChangeSet { - /// Descriptor for recipient addresses. - pub descriptor: Option>, - /// Descriptor for change addresses. - pub change_descriptor: Option>, - /// Stores the network type of the transaction data. - pub network: Option, - /// Changes to the [`LocalChain`]. - pub local_chain: local_chain::ChangeSet, - /// Changes to [`TxGraph`](tx_graph::TxGraph). - pub tx_graph: tx_graph::ChangeSet, - /// Changes to [`KeychainTxOutIndex`]. - pub indexer: keychain_txout::ChangeSet, -} - -#[derive(Parser)] -#[clap(author, version, about, long_about = None)] -#[clap(propagate_version = true)] -pub struct Args { - #[clap(subcommand)] - pub command: Commands, -} - -#[derive(Subcommand, Debug, Clone)] -pub enum Commands { - /// Initialize a new data store. - Init { - /// Network - #[clap(long, short, default_value = "signet")] - network: Network, - /// Descriptor - #[clap(env = "DESCRIPTOR")] - descriptor: String, - /// Change descriptor - #[clap(long, short, env = "CHANGE_DESCRIPTOR")] - change_descriptor: Option, - }, - #[clap(flatten)] - ChainSpecific(CS), - /// Address generation and inspection. - Address { - #[clap(subcommand)] - addr_cmd: AddressCmd, - }, - /// Get the wallet balance. - Balance, - /// TxOut related commands. - #[clap(name = "txout")] - TxOut { - #[clap(subcommand)] - txout_cmd: TxOutCmd, - }, - /// PSBT operations - Psbt { - #[clap(subcommand)] - psbt_cmd: PsbtCmd, - }, - /// Generate new BIP86 descriptors. - Generate { - /// Network - #[clap(long, short, default_value = "signet")] - network: Network, - }, -} - -#[derive(Subcommand, Debug, Clone)] -pub enum AddressCmd { - /// Get the next unused address. - Next, - /// Get a new address regardless of the existing unused addresses. - New, - /// List all addresses - List { - /// List change addresses - #[clap(long)] - change: bool, - }, - /// Get last revealed address index for each keychain. - Index, -} - -#[derive(Subcommand, Debug, Clone)] -pub enum TxOutCmd { - /// List transaction outputs. - List { - /// Return only spent outputs. - #[clap(short, long)] - spent: bool, - /// Return only unspent outputs. - #[clap(short, long)] - unspent: bool, - /// Return only confirmed outputs. - #[clap(long)] - confirmed: bool, - /// Return only unconfirmed outputs. - #[clap(long)] - unconfirmed: bool, - }, -} - -#[derive(Subcommand, Debug, Clone)] -pub enum PsbtCmd { - /// Create a new PSBT. - New { - /// Amount to send in satoshis - #[clap(required = true)] - value: u64, - /// Recipient address - #[clap(required = true)] - address: Address, - /// Set the feerate of the tx (sat/vbyte) - #[clap(long, short, default_value = "1.0")] - feerate: Option, - /// Set max absolute timelock (from consensus value) - #[clap(long, short)] - after: Option, - /// Set max relative timelock (from consensus value) - #[clap(long, short)] - older: Option, - /// Coin selection algorithm - #[clap(long, short, default_value = "bnb")] - coin_select: CoinSelectionAlgo, - /// Debug print the PSBT - #[clap(long, short)] - debug: bool, - }, - /// Sign with a hot signer - Sign { - /// Private descriptor [env: DESCRIPTOR=] - #[clap(long, short)] - descriptor: Option, - /// PSBT - #[clap(long, short, required = true)] - psbt: String, - }, - /// Extract transaction - Extract { - /// PSBT - #[clap(long, short, required = true)] - psbt: String, - /// Whether to try broadcasting the tx - #[clap(long, short)] - broadcast: bool, - #[clap(flatten)] - chain_specific: S, - }, -} - -#[derive( - Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, serde::Deserialize, serde::Serialize, -)] -pub enum Keychain { - External, - Internal, -} - -impl fmt::Display for Keychain { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Keychain::External => write!(f, "external"), - Keychain::Internal => write!(f, "internal"), - } - } -} - -#[derive(Clone, Debug, Default)] -pub enum CoinSelectionAlgo { - LargestFirst, - SmallestFirst, - OldestFirst, - NewestFirst, - #[default] - BranchAndBound, -} - -impl FromStr for CoinSelectionAlgo { - type Err = anyhow::Error; - - fn from_str(s: &str) -> Result { - use CoinSelectionAlgo::*; - Ok(match s { - "largest-first" => LargestFirst, - "smallest-first" => SmallestFirst, - "oldest-first" => OldestFirst, - "newest-first" => NewestFirst, - "bnb" => BranchAndBound, - unknown => bail!("unknown coin selection algorithm '{}'", unknown), - }) - } -} - -impl fmt::Display for CoinSelectionAlgo { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - use CoinSelectionAlgo::*; - write!( - f, - "{}", - match self { - LargestFirst => "largest-first", - SmallestFirst => "smallest-first", - OldestFirst => "oldest-first", - NewestFirst => "newest-first", - BranchAndBound => "bnb", - } - ) - } -} - -// Records changes to the internal keychain when we -// have to include a change output during tx creation. -#[derive(Debug)] -pub struct ChangeInfo { - pub change_keychain: Keychain, - pub indexer: keychain_txout::ChangeSet, - pub index: u32, -} - -pub fn create_tx( - graph: &mut KeychainTxGraph, - chain: &O, - assets: &Assets, - cs_algorithm: CoinSelectionAlgo, - address: Address, - value: u64, - feerate: f32, -) -> anyhow::Result<(Psbt, Option)> -where - O::Error: std::error::Error + Send + Sync + 'static, -{ - let mut changeset = keychain_txout::ChangeSet::default(); - - // get planned utxos - let mut plan_utxos = planned_utxos(graph, chain, assets)?; - - // sort utxos if cs-algo requires it - match cs_algorithm { - CoinSelectionAlgo::LargestFirst => { - plan_utxos.sort_by_key(|(_, utxo)| cmp::Reverse(utxo.txout.value)) - } - CoinSelectionAlgo::SmallestFirst => plan_utxos.sort_by_key(|(_, utxo)| utxo.txout.value), - CoinSelectionAlgo::OldestFirst => plan_utxos.sort_by_key(|(_, utxo)| utxo.chain_position), - CoinSelectionAlgo::NewestFirst => { - plan_utxos.sort_by_key(|(_, utxo)| cmp::Reverse(utxo.chain_position)) - } - CoinSelectionAlgo::BranchAndBound => plan_utxos.shuffle(&mut thread_rng()), - } - - // build candidate set - let candidates: Vec = plan_utxos - .iter() - .map(|(plan, utxo)| { - Candidate::new( - utxo.txout.value.to_sat(), - plan.satisfaction_weight() as u64, - plan.witness_version().is_some(), - ) - }) - .collect(); - - // create recipient output(s) - let mut outputs = vec![TxOut { - value: Amount::from_sat(value), - script_pubkey: address.script_pubkey(), - }]; - - let (change_keychain, _) = graph - .index - .keychains() - .last() - .expect("must have a keychain"); - - let ((change_index, change_script), index_changeset) = graph - .index - .next_unused_spk(change_keychain) - .expect("Must exist"); - changeset.merge(index_changeset); - - let mut change_output = TxOut { - value: Amount::ZERO, - script_pubkey: change_script, - }; - - let change_desc = graph - .index - .keychains() - .find(|(k, _)| k == &change_keychain) - .expect("must exist") - .1; - - let min_drain_value = change_desc.dust_value().to_sat(); - - let target = Target { - outputs: TargetOutputs::fund_outputs( - outputs - .iter() - .map(|output| (output.weight().to_wu(), output.value.to_sat())), - ), - fee: TargetFee { - rate: FeeRate::from_sat_per_vb(feerate), - ..Default::default() - }, - }; - - let change_policy = ChangePolicy { - min_value: min_drain_value, - drain_weights: DrainWeights::TR_KEYSPEND, - }; - - // run coin selection - let mut selector = CoinSelector::new(&candidates); - match cs_algorithm { - CoinSelectionAlgo::BranchAndBound => { - let metric = LowestFee { - target, - long_term_feerate: FeeRate::from_sat_per_vb(10.0), - change_policy, - }; - match selector.run_bnb(metric, 10_000) { - Ok(_) => {} - Err(_) => selector - .select_until_target_met(target) - .context("selecting coins")?, - } - } - _ => selector - .select_until_target_met(target) - .context("selecting coins")?, - } - - // get the selected plan utxos - let selected: Vec<_> = selector.apply_selection(&plan_utxos).collect(); - - // if the selection tells us to use change and the change value is sufficient, we add it as an output - let mut change_info = Option::::None; - let drain = selector.drain(target, change_policy); - if drain.value > min_drain_value { - change_output.value = Amount::from_sat(drain.value); - outputs.push(change_output); - change_info = Some(ChangeInfo { - change_keychain, - indexer: changeset, - index: change_index, - }); - outputs.shuffle(&mut thread_rng()); - } - - let unsigned_tx = Transaction { - version: transaction::Version::TWO, - lock_time: assets - .absolute_timelock - .unwrap_or(absolute::LockTime::from_height( - chain.get_chain_tip()?.height, - )?), - input: selected - .iter() - .map(|(plan, utxo)| TxIn { - previous_output: utxo.outpoint, - sequence: plan - .relative_timelock - .map_or(Sequence::ENABLE_RBF_NO_LOCKTIME, Sequence::from), - ..Default::default() - }) - .collect(), - output: outputs, - }; - - // update psbt with plan - let mut psbt = Psbt::from_unsigned_tx(unsigned_tx)?; - for (i, (plan, utxo)) in selected.iter().enumerate() { - let psbt_input = &mut psbt.inputs[i]; - plan.update_psbt_input(psbt_input); - psbt_input.witness_utxo = Some(utxo.txout.clone()); - } - - Ok((psbt, change_info)) -} - -// Alias the elements of `planned_utxos` -pub type PlanUtxo = (Plan, FullTxOut); - -pub fn planned_utxos( - graph: &KeychainTxGraph, - chain: &O, - assets: &Assets, -) -> Result, O::Error> { - let chain_tip = chain.get_chain_tip()?; - let outpoints = graph.index.outpoints(); - graph - .graph() - .try_filter_chain_unspents(chain, chain_tip, outpoints.iter().cloned())? - .filter_map(|((k, i), full_txo)| -> Option> { - let desc = graph - .index - .keychains() - .find(|(keychain, _)| *keychain == k) - .expect("keychain must exist") - .1 - .at_derivation_index(i) - .expect("i can't be hardened"); - - let plan = desc.plan(assets).ok()?; - - Some(Ok((plan, full_txo))) - }) - .collect() -} - -pub fn handle_commands( - graph: &Mutex, - chain: &Mutex, - db: &Mutex>, - network: Network, - broadcast_fn: impl FnOnce(S, &Transaction) -> anyhow::Result<()>, - cmd: Commands, -) -> anyhow::Result<()> { - match cmd { - Commands::Init { .. } => unreachable!("handled by init command"), - Commands::Generate { .. } => unreachable!("handled by generate command"), - Commands::ChainSpecific(_) => unreachable!("example code should handle this!"), - Commands::Address { addr_cmd } => { - let graph = &mut *graph.lock().unwrap(); - let index = &mut graph.index; - - match addr_cmd { - AddressCmd::Next | AddressCmd::New => { - let spk_chooser = match addr_cmd { - AddressCmd::Next => KeychainTxOutIndex::next_unused_spk, - AddressCmd::New => KeychainTxOutIndex::reveal_next_spk, - _ => unreachable!("only these two variants exist in match arm"), - }; - - let ((spk_i, spk), index_changeset) = - spk_chooser(index, Keychain::External).expect("Must exist"); - let db = &mut *db.lock().unwrap(); - db.append(&ChangeSet { - indexer: index_changeset, - ..Default::default() - })?; - let addr = Address::from_script(spk.as_script(), network)?; - println!("[address @ {}] {}", spk_i, addr); - Ok(()) - } - AddressCmd::Index => { - for (keychain, derivation_index) in index.last_revealed_indices() { - println!("{:?}: {}", keychain, derivation_index); - } - Ok(()) - } - AddressCmd::List { change } => { - let target_keychain = match change { - true => Keychain::Internal, - false => Keychain::External, - }; - for (spk_i, spk) in index.revealed_keychain_spks(target_keychain) { - let address = Address::from_script(spk.as_script(), network) - .expect("should always be able to derive address"); - println!( - "{:?} {} used:{}", - spk_i, - address, - index.is_used(target_keychain, spk_i) - ); - } - Ok(()) - } - } - } - Commands::Balance => { - let graph = &*graph.lock().unwrap(); - let chain = &*chain.lock().unwrap(); - fn print_balances<'a>( - title_str: &'a str, - items: impl IntoIterator, - ) { - println!("{}:", title_str); - for (name, amount) in items.into_iter() { - println!(" {:<10} {:>12} sats", name, amount.to_sat()) - } - } - - let balance = graph.graph().try_balance( - chain, - chain.get_chain_tip()?, - graph.index.outpoints().iter().cloned(), - |(k, _), _| k == &Keychain::Internal, - )?; - - let confirmed_total = balance.confirmed + balance.immature; - let unconfirmed_total = balance.untrusted_pending + balance.trusted_pending; - - print_balances( - "confirmed", - [ - ("total", confirmed_total), - ("spendable", balance.confirmed), - ("immature", balance.immature), - ], - ); - print_balances( - "unconfirmed", - [ - ("total", unconfirmed_total), - ("trusted", balance.trusted_pending), - ("untrusted", balance.untrusted_pending), - ], - ); - - Ok(()) - } - Commands::TxOut { txout_cmd } => { - let graph = &*graph.lock().unwrap(); - let chain = &*chain.lock().unwrap(); - let chain_tip = chain.get_chain_tip()?; - let outpoints = graph.index.outpoints(); - - match txout_cmd { - TxOutCmd::List { - spent, - unspent, - confirmed, - unconfirmed, - } => { - let txouts = graph - .graph() - .try_filter_chain_txouts(chain, chain_tip, outpoints.iter().cloned())? - .filter(|(_, full_txo)| match (spent, unspent) { - (true, false) => full_txo.spent_by.is_some(), - (false, true) => full_txo.spent_by.is_none(), - _ => true, - }) - .filter(|(_, full_txo)| match (confirmed, unconfirmed) { - (true, false) => full_txo.chain_position.is_confirmed(), - (false, true) => !full_txo.chain_position.is_confirmed(), - _ => true, - }) - .collect::>(); - - for (spk_i, full_txo) in txouts { - let addr = Address::from_script(&full_txo.txout.script_pubkey, network)?; - println!( - "{:?} {} {} {} spent:{:?}", - spk_i, full_txo.txout.value, full_txo.outpoint, addr, full_txo.spent_by - ) - } - Ok(()) - } - } - } - Commands::Psbt { psbt_cmd } => match psbt_cmd { - PsbtCmd::New { - value, - address, - feerate, - after, - older, - coin_select, - debug, - } => { - let address = address.require_network(network)?; - - let (psbt, change_info) = { - let mut graph = graph.lock().unwrap(); - let chain = chain.lock().unwrap(); - - // collect assets we can sign for - let mut pks = vec![]; - for (_, desc) in graph.index.keychains() { - desc.for_each_key(|k| { - pks.push(k.clone()); - true - }); - } - let mut assets = Assets::new().add(pks); - if let Some(n) = after { - assets = assets.after(absolute::LockTime::from_consensus(n)); - } - if let Some(n) = older { - assets = assets.older(relative::LockTime::from_consensus(n)?); - } - - create_tx( - &mut graph, - &*chain, - &assets, - coin_select, - address, - value, - feerate.expect("must have feerate"), - )? - }; - - if let Some(ChangeInfo { - change_keychain, - indexer, - index, - }) = change_info - { - // We must first persist to disk the fact that we've got a new address from the - // change keychain so future scans will find the tx we're about to broadcast. - // If we're unable to persist this, then we don't want to broadcast. - { - let db = &mut *db.lock().unwrap(); - db.append(&ChangeSet { - indexer, - ..Default::default() - })?; - } - - // We don't want other callers/threads to use this address while we're using it - // but we also don't want to scan the tx we just created because it's not - // technically in the blockchain yet. - graph - .lock() - .unwrap() - .index - .mark_used(change_keychain, index); - } - - if debug { - dbg!(psbt); - } else { - // print base64 encoded psbt - let fee = psbt.fee()?.to_sat(); - let mut obj = serde_json::Map::new(); - obj.insert("psbt".to_string(), json!(psbt.to_string())); - obj.insert("fee".to_string(), json!(fee)); - println!("{}", serde_json::to_string_pretty(&obj)?); - }; - - Ok(()) - } - PsbtCmd::Sign { psbt, descriptor } => { - let mut psbt = Psbt::from_str(&psbt)?; - - let desc_str = match descriptor { - Some(s) => s, - None => env::var("DESCRIPTOR").context("unable to sign")?, - }; - - let secp = Secp256k1::new(); - let (_, keymap) = Descriptor::parse_descriptor(&secp, &desc_str)?; - if keymap.is_empty() { - bail!("unable to sign") - } - - // note: we're only looking at the first entry in the keymap - // the idea is to find something that impls `GetKey` - let sign_res = match keymap.iter().next().expect("not empty") { - (DescriptorPublicKey::Single(single_pub), DescriptorSecretKey::Single(prv)) => { - let pk = match single_pub.key { - SinglePubKey::FullKey(pk) => pk, - SinglePubKey::XOnly(_) => unimplemented!("single xonly pubkey"), - }; - let keys: HashMap = [(pk, prv.key)].into(); - psbt.sign(&keys, &secp) - } - (_, DescriptorSecretKey::XPrv(k)) => psbt.sign(&k.xkey, &secp), - _ => unimplemented!("multi xkey signer"), - }; - - let _ = sign_res - .map_err(|errors| anyhow::anyhow!("failed to sign PSBT {:?}", errors))?; - - let mut obj = serde_json::Map::new(); - obj.insert("psbt".to_string(), json!(psbt.to_string())); - println!("{}", serde_json::to_string_pretty(&obj)?); - - Ok(()) - } - PsbtCmd::Extract { - broadcast, - chain_specific, - psbt, - } => { - let mut psbt = Psbt::from_str(&psbt)?; - psbt.finalize_mut(&Secp256k1::new()) - .map_err(|errors| anyhow::anyhow!("failed to finalize PSBT {errors:?}"))?; - - let tx = psbt.extract_tx()?; - - if broadcast { - let mut graph = graph.lock().unwrap(); - - match broadcast_fn(chain_specific, &tx) { - Ok(_) => { - println!("Broadcasted Tx: {}", tx.compute_txid()); - - let changeset = graph.insert_tx(tx); - - // We know the tx is at least unconfirmed now. Note if persisting here fails, - // it's not a big deal since we can always find it again from the - // blockchain. - db.lock().unwrap().append(&ChangeSet { - tx_graph: changeset.tx_graph, - indexer: changeset.indexer, - ..Default::default() - })?; - } - Err(e) => { - // We failed to broadcast, so allow our change address to be used in the future - let (change_keychain, _) = graph - .index - .keychains() - .last() - .expect("must have a keychain"); - let change_index = tx.output.iter().find_map(|txout| { - let spk = txout.script_pubkey.clone(); - match graph.index.index_of_spk(spk) { - Some(&(keychain, index)) if keychain == change_keychain => { - Some((keychain, index)) - } - _ => None, - } - }); - if let Some((keychain, index)) = change_index { - graph.index.unmark_used(keychain, index); - } - bail!(e); - } - } - } else { - // encode raw tx hex - let hex = consensus::serialize(&tx).to_lower_hex_string(); - let mut obj = serde_json::Map::new(); - obj.insert("tx".to_string(), json!(hex)); - println!("{}", serde_json::to_string_pretty(&obj)?); - } - - Ok(()) - } - }, - } -} - -/// The initial state returned by [`init_or_load`]. -pub struct Init { - /// CLI args - pub args: Args, - /// Indexed graph - pub graph: Mutex, - /// Local chain - pub chain: Mutex, - /// Database - pub db: Mutex>, - /// Network - pub network: Network, -} - -/// Loads from persistence or creates new -pub fn init_or_load( - db_magic: &[u8], - db_path: &str, -) -> anyhow::Result>> { - let args = Args::::parse(); - - match args.command { - // initialize new db - Commands::Init { .. } => initialize::(args, db_magic, db_path).map(|_| None), - // generate keys - Commands::Generate { network } => generate_bip86_helper(network).map(|_| None), - // try load - _ => { - let (db, changeset) = - Store::::load(db_magic, db_path).context("could not open file store")?; - - let changeset = changeset.expect("should not be empty"); - - let network = changeset.network.expect("changeset network"); - - let chain = Mutex::new({ - let (mut chain, _) = - LocalChain::from_genesis_hash(constants::genesis_block(network).block_hash()); - chain.apply_changeset(&changeset.local_chain)?; - chain - }); - - let graph = Mutex::new({ - // insert descriptors and apply loaded changeset - let mut index = KeychainTxOutIndex::default(); - if let Some(desc) = changeset.descriptor { - index.insert_descriptor(Keychain::External, desc)?; - } - if let Some(change_desc) = changeset.change_descriptor { - index.insert_descriptor(Keychain::Internal, change_desc)?; - } - let mut graph = KeychainTxGraph::new(index); - graph.apply_changeset(indexed_tx_graph::ChangeSet { - tx_graph: changeset.tx_graph, - indexer: changeset.indexer, - }); - graph - }); - - let db = Mutex::new(db); - - Ok(Some(Init { - args, - graph, - chain, - db, - network, - })) - } - } -} - -/// Initialize db backend. -fn initialize(args: Args, db_magic: &[u8], db_path: &str) -> anyhow::Result<()> -where - CS: clap::Subcommand, - S: clap::Args, -{ - if let Commands::Init { - network, - descriptor, - change_descriptor, - } = args.command - { - let mut changeset = ChangeSet::default(); - - // parse descriptors - let secp = Secp256k1::new(); - let mut index = KeychainTxOutIndex::default(); - let (descriptor, _) = - Descriptor::::parse_descriptor(&secp, &descriptor)?; - let _ = index.insert_descriptor(Keychain::External, descriptor.clone())?; - changeset.descriptor = Some(descriptor); - - if let Some(desc) = change_descriptor { - let (change_descriptor, _) = - Descriptor::::parse_descriptor(&secp, &desc)?; - let _ = index.insert_descriptor(Keychain::Internal, change_descriptor.clone())?; - changeset.change_descriptor = Some(change_descriptor); - } - - // create new - let (_, chain_changeset) = - LocalChain::from_genesis_hash(constants::genesis_block(network).block_hash()); - changeset.network = Some(network); - changeset.local_chain = chain_changeset; - let mut db = Store::::create(db_magic, db_path)?; - db.append(&changeset)?; - println!("New database {db_path}"); - } - - Ok(()) -} - -/// Generate BIP86 descriptors. -fn generate_bip86_helper(network: impl Into) -> anyhow::Result<()> { - let secp = Secp256k1::new(); - let mut seed = [0x00; 32]; - thread_rng().fill_bytes(&mut seed); - - let m = bip32::Xpriv::new_master(network, &seed)?; - let fp = m.fingerprint(&secp); - let path = if m.network.is_mainnet() { - "86h/0h/0h" - } else { - "86h/1h/0h" - }; - - let descriptors: Vec = [0, 1] - .iter() - .map(|i| format!("tr([{fp}]{m}/{path}/{i}/*)")) - .collect(); - let external_desc = &descriptors[0]; - let internal_desc = &descriptors[1]; - let (descriptor, keymap) = - >::parse_descriptor(&secp, external_desc)?; - let (internal_descriptor, internal_keymap) = - >::parse_descriptor(&secp, internal_desc)?; - println!("Public"); - println!("{}", descriptor); - println!("{}", internal_descriptor); - println!("\nPrivate"); - println!("{}", descriptor.to_string_with_secret(&keymap)); - println!( - "{}", - internal_descriptor.to_string_with_secret(&internal_keymap) - ); - - Ok(()) -} - -impl Merge for ChangeSet { - fn merge(&mut self, other: Self) { - if other.descriptor.is_some() { - self.descriptor = other.descriptor; - } - if other.change_descriptor.is_some() { - self.change_descriptor = other.change_descriptor; - } - if other.network.is_some() { - self.network = other.network; - } - Merge::merge(&mut self.local_chain, other.local_chain); - Merge::merge(&mut self.tx_graph, other.tx_graph); - Merge::merge(&mut self.indexer, other.indexer); - } - - fn is_empty(&self) -> bool { - self.descriptor.is_none() - && self.change_descriptor.is_none() - && self.network.is_none() - && self.local_chain.is_empty() - && self.tx_graph.is_empty() - && self.indexer.is_empty() - } -} diff --git a/example-crates/example_electrum/Cargo.toml b/example-crates/example_electrum/Cargo.toml deleted file mode 100644 index 9dcd5400..00000000 --- a/example-crates/example_electrum/Cargo.toml +++ /dev/null @@ -1,11 +0,0 @@ -[package] -name = "example_electrum" -version = "0.2.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -bdk_chain = { path = "../../crates/chain", features = ["serde"] } -bdk_electrum = { path = "../../crates/electrum" } -example_cli = { path = "../example_cli" } diff --git a/example-crates/example_electrum/src/main.rs b/example-crates/example_electrum/src/main.rs deleted file mode 100644 index b6c93a1d..00000000 --- a/example-crates/example_electrum/src/main.rs +++ /dev/null @@ -1,288 +0,0 @@ -use std::io::{self, Write}; - -use bdk_chain::{ - bitcoin::Network, - collections::BTreeSet, - indexed_tx_graph, - spk_client::{FullScanRequest, SyncRequest}, - ConfirmationBlockTime, Merge, -}; -use bdk_electrum::{ - electrum_client::{self, Client, ElectrumApi}, - BdkElectrumClient, -}; -use example_cli::{ - self, - anyhow::{self, Context}, - clap::{self, Parser, Subcommand}, - ChangeSet, Keychain, -}; - -const DB_MAGIC: &[u8] = b"bdk_example_electrum"; -const DB_PATH: &str = ".bdk_example_electrum.db"; - -#[derive(Subcommand, Debug, Clone)] -enum ElectrumCommands { - /// Scans the addresses in the wallet using the electrum API. - Scan { - /// When a gap this large has been found for a keychain, it will stop. - #[clap(long, default_value = "5")] - stop_gap: usize, - #[clap(flatten)] - scan_options: ScanOptions, - #[clap(flatten)] - electrum_args: ElectrumArgs, - }, - /// Scans particular addresses using the electrum API. - Sync { - /// Scan all the unused addresses. - #[clap(long)] - unused_spks: bool, - /// Scan every address that you have derived. - #[clap(long)] - all_spks: bool, - /// Scan unspent outpoints for spends or changes to confirmation status of residing tx. - #[clap(long)] - utxos: bool, - /// Scan unconfirmed transactions for updates. - #[clap(long)] - unconfirmed: bool, - #[clap(flatten)] - scan_options: ScanOptions, - #[clap(flatten)] - electrum_args: ElectrumArgs, - }, -} - -impl ElectrumCommands { - fn electrum_args(&self) -> ElectrumArgs { - match self { - ElectrumCommands::Scan { electrum_args, .. } => electrum_args.clone(), - ElectrumCommands::Sync { electrum_args, .. } => electrum_args.clone(), - } - } -} - -#[derive(clap::Args, Debug, Clone)] -pub struct ElectrumArgs { - /// The electrum url to use to connect to. If not provided it will use a default electrum server - /// for your chosen network. - electrum_url: Option, -} - -impl ElectrumArgs { - pub fn client(&self, network: Network) -> anyhow::Result { - let electrum_url = self.electrum_url.as_deref().unwrap_or(match network { - Network::Bitcoin => "ssl://electrum.blockstream.info:50002", - Network::Testnet => "ssl://electrum.blockstream.info:60002", - Network::Regtest => "tcp://localhost:60401", - Network::Signet => "tcp://signet-electrumx.wakiyamap.dev:50001", - _ => panic!("Unknown network"), - }); - let config = electrum_client::Config::builder() - .validate_domain(matches!(network, Network::Bitcoin)) - .build(); - - Ok(electrum_client::Client::from_config(electrum_url, config)?) - } -} - -#[derive(Parser, Debug, Clone, PartialEq)] -pub struct ScanOptions { - /// Set batch size for each script_history call to electrum client. - #[clap(long, default_value = "25")] - pub batch_size: usize, -} - -fn main() -> anyhow::Result<()> { - let example_cli::Init { - args, - graph, - chain, - db, - network, - } = match example_cli::init_or_load::(DB_MAGIC, DB_PATH)? { - Some(init) => init, - None => return Ok(()), - }; - - let electrum_cmd = match &args.command { - example_cli::Commands::ChainSpecific(electrum_cmd) => electrum_cmd, - general_cmd => { - return example_cli::handle_commands( - &graph, - &chain, - &db, - network, - |electrum_args, tx| { - let client = electrum_args.client(network)?; - client.transaction_broadcast(tx)?; - Ok(()) - }, - general_cmd.clone(), - ); - } - }; - - let client = BdkElectrumClient::new(electrum_cmd.electrum_args().client(network)?); - - // Tell the electrum client about the txs we've already got locally so it doesn't re-download them - client.populate_tx_cache( - graph - .lock() - .unwrap() - .graph() - .full_txs() - .map(|tx_node| tx_node.tx), - ); - - let (chain_update, tx_update, keychain_update) = match electrum_cmd.clone() { - ElectrumCommands::Scan { - stop_gap, - scan_options, - .. - } => { - let request = { - let graph = &*graph.lock().unwrap(); - let chain = &*chain.lock().unwrap(); - - FullScanRequest::builder() - .chain_tip(chain.tip()) - .spks_for_keychain( - Keychain::External, - graph - .index - .unbounded_spk_iter(Keychain::External) - .into_iter() - .flatten(), - ) - .spks_for_keychain( - Keychain::Internal, - graph - .index - .unbounded_spk_iter(Keychain::Internal) - .into_iter() - .flatten(), - ) - .inspect({ - let mut once = BTreeSet::new(); - move |k, spk_i, _| { - if once.insert(k) { - eprint!("\nScanning {}: {} ", k, spk_i); - } else { - eprint!("{} ", spk_i); - } - io::stdout().flush().expect("must flush"); - } - }) - }; - - let res = client - .full_scan::<_>(request, stop_gap, scan_options.batch_size, false) - .context("scanning the blockchain")?; - ( - res.chain_update, - res.tx_update, - Some(res.last_active_indices), - ) - } - ElectrumCommands::Sync { - mut unused_spks, - all_spks, - mut utxos, - mut unconfirmed, - scan_options, - .. - } => { - // Get a short lock on the tracker to get the spks we're interested in - let graph = graph.lock().unwrap(); - let chain = chain.lock().unwrap(); - - if !(all_spks || unused_spks || utxos || unconfirmed) { - unused_spks = true; - unconfirmed = true; - utxos = true; - } else if all_spks { - unused_spks = false; - } - - let chain_tip = chain.tip(); - let mut request = - SyncRequest::builder() - .chain_tip(chain_tip.clone()) - .inspect(|item, progress| { - let pc = (100 * progress.consumed()) as f32 / progress.total() as f32; - eprintln!("[ SCANNING {:03.0}% ] {}", pc, item); - }); - - request = request.expected_spk_txids(graph.list_expected_spk_txids( - &*chain, - chain_tip.block_id(), - .., - )); - if all_spks { - request = request.spks_with_indexes(graph.index.revealed_spks(..)); - } - if unused_spks { - request = request.spks_with_indexes(graph.index.unused_spks()); - } - if utxos { - let init_outpoints = graph.index.outpoints(); - request = request.outpoints( - graph - .graph() - .filter_chain_unspents( - &*chain, - chain_tip.block_id(), - init_outpoints.iter().cloned(), - ) - .map(|(_, utxo)| utxo.outpoint), - ); - }; - if unconfirmed { - request = request.txids( - graph - .graph() - .list_canonical_txs(&*chain, chain_tip.block_id()) - .filter(|canonical_tx| !canonical_tx.chain_position.is_confirmed()) - .map(|canonical_tx| canonical_tx.tx_node.txid), - ); - } - - let res = client - .sync(request, scan_options.batch_size, false) - .context("scanning the blockchain")?; - - // drop lock on graph and chain - drop((graph, chain)); - - (res.chain_update, res.tx_update, None) - } - }; - - let db_changeset = { - let mut chain = chain.lock().unwrap(); - let mut graph = graph.lock().unwrap(); - - let chain_changeset = chain.apply_update(chain_update.expect("request has chain tip"))?; - - let mut indexed_tx_graph_changeset = - indexed_tx_graph::ChangeSet::::default(); - if let Some(keychain_update) = keychain_update { - let keychain_changeset = graph.index.reveal_to_target_multi(&keychain_update); - indexed_tx_graph_changeset.merge(keychain_changeset.into()); - } - indexed_tx_graph_changeset.merge(graph.apply_update(tx_update)); - - ChangeSet { - local_chain: chain_changeset, - tx_graph: indexed_tx_graph_changeset.tx_graph, - indexer: indexed_tx_graph_changeset.indexer, - ..Default::default() - } - }; - - let mut db = db.lock().unwrap(); - db.append(&db_changeset)?; - Ok(()) -} diff --git a/example-crates/example_esplora/Cargo.toml b/example-crates/example_esplora/Cargo.toml deleted file mode 100644 index ccad862e..00000000 --- a/example-crates/example_esplora/Cargo.toml +++ /dev/null @@ -1,12 +0,0 @@ -[package] -name = "example_esplora" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -bdk_chain = { path = "../../crates/chain", features = ["serde"] } -bdk_esplora = { path = "../../crates/esplora", features = ["blocking"] } -example_cli = { path = "../example_cli" } - diff --git a/example-crates/example_esplora/src/main.rs b/example-crates/example_esplora/src/main.rs deleted file mode 100644 index 8ef39c2f..00000000 --- a/example-crates/example_esplora/src/main.rs +++ /dev/null @@ -1,292 +0,0 @@ -use core::f32; -use std::{ - collections::BTreeSet, - io::{self, Write}, -}; - -use bdk_chain::{ - bitcoin::Network, - keychain_txout::FullScanRequestBuilderExt, - spk_client::{FullScanRequest, SyncRequest}, - Merge, -}; -use bdk_esplora::{esplora_client, EsploraExt}; -use example_cli::{ - anyhow::{self, Context}, - clap::{self, Parser, Subcommand}, - ChangeSet, Keychain, -}; - -const DB_MAGIC: &[u8] = b"bdk_example_esplora"; -const DB_PATH: &str = ".bdk_example_esplora.db"; - -#[derive(Subcommand, Debug, Clone)] -enum EsploraCommands { - /// Scans the addresses in the wallet using the esplora API. - Scan { - /// When a gap this large has been found for a keychain, it will stop. - #[clap(long, short = 'g', default_value = "10")] - stop_gap: usize, - #[clap(flatten)] - scan_options: ScanOptions, - #[clap(flatten)] - esplora_args: EsploraArgs, - }, - /// Scan for particular addresses and unconfirmed transactions using the esplora API. - Sync { - /// Scan all the unused addresses. - #[clap(long)] - unused_spks: bool, - /// Scan every address that you have derived. - #[clap(long)] - all_spks: bool, - /// Scan unspent outpoints for spends or changes to confirmation status of residing tx. - #[clap(long)] - utxos: bool, - /// Scan unconfirmed transactions for updates. - #[clap(long)] - unconfirmed: bool, - #[clap(flatten)] - scan_options: ScanOptions, - #[clap(flatten)] - esplora_args: EsploraArgs, - }, -} - -impl EsploraCommands { - fn esplora_args(&self) -> EsploraArgs { - match self { - EsploraCommands::Scan { esplora_args, .. } => esplora_args.clone(), - EsploraCommands::Sync { esplora_args, .. } => esplora_args.clone(), - } - } -} - -#[derive(clap::Args, Debug, Clone)] -pub struct EsploraArgs { - /// The esplora url endpoint to connect to. - #[clap(long, short = 'u', env = "ESPLORA_SERVER")] - esplora_url: Option, -} - -impl EsploraArgs { - pub fn client(&self, network: Network) -> anyhow::Result { - let esplora_url = self.esplora_url.as_deref().unwrap_or(match network { - Network::Bitcoin => "https://blockstream.info/api", - Network::Testnet => "https://blockstream.info/testnet/api", - Network::Regtest => "http://localhost:3002", - Network::Signet => "http://signet.bitcoindevkit.net", - _ => panic!("unsupported network"), - }); - - let client = esplora_client::Builder::new(esplora_url).build_blocking(); - Ok(client) - } -} - -#[derive(Parser, Debug, Clone, PartialEq)] -pub struct ScanOptions { - /// Max number of concurrent esplora server requests. - #[clap(long, default_value = "2")] - pub parallel_requests: usize, -} - -fn main() -> anyhow::Result<()> { - let example_cli::Init { - args, - graph, - chain, - db, - network, - } = match example_cli::init_or_load::(DB_MAGIC, DB_PATH)? { - Some(init) => init, - None => return Ok(()), - }; - - let esplora_cmd = match &args.command { - // These are commands that are handled by this example (sync, scan). - example_cli::Commands::ChainSpecific(esplora_cmd) => esplora_cmd, - // These are general commands handled by example_cli. Execute the cmd and return. - general_cmd => { - return example_cli::handle_commands( - &graph, - &chain, - &db, - network, - |esplora_args, tx| { - let client = esplora_args.client(network)?; - client - .broadcast(tx) - .map(|_| ()) - .map_err(anyhow::Error::from) - }, - general_cmd.clone(), - ); - } - }; - - let client = esplora_cmd.esplora_args().client(network)?; - // Prepare the `IndexedTxGraph` and `LocalChain` updates based on whether we are scanning or - // syncing. - // - // Scanning: We are iterating through spks of all keychains and scanning for transactions for - // each spk. We start with the lowest derivation index spk and stop scanning after `stop_gap` - // number of consecutive spks have no transaction history. A Scan is done in situations of - // wallet restoration. It is a special case. Applications should use "sync" style updates - // after an initial scan. - // - // Syncing: We only check for specified spks, utxos and txids to update their confirmation - // status or fetch missing transactions. - let (local_chain_changeset, indexed_tx_graph_changeset) = match &esplora_cmd { - EsploraCommands::Scan { - stop_gap, - scan_options, - .. - } => { - let request = { - let chain_tip = chain.lock().expect("mutex must not be poisoned").tip(); - let indexed_graph = &*graph.lock().expect("mutex must not be poisoned"); - FullScanRequest::builder() - .chain_tip(chain_tip) - .spks_from_indexer(&indexed_graph.index) - .inspect({ - let mut once = BTreeSet::::new(); - move |keychain, spk_i, _| { - if once.insert(keychain) { - eprint!("\nscanning {}: ", keychain); - } - eprint!("{} ", spk_i); - // Flush early to ensure we print at every iteration. - let _ = io::stderr().flush(); - } - }) - .build() - }; - - // The client scans keychain spks for transaction histories, stopping after `stop_gap` - // is reached. It returns a `TxGraph` update (`tx_update`) and a structure that - // represents the last active spk derivation indices of keychains - // (`keychain_indices_update`). - let update = client - .full_scan(request, *stop_gap, scan_options.parallel_requests) - .context("scanning for transactions")?; - - let mut graph = graph.lock().expect("mutex must not be poisoned"); - let mut chain = chain.lock().expect("mutex must not be poisoned"); - // Because we did a stop gap based scan we are likely to have some updates to our - // deriviation indices. Usually before a scan you are on a fresh wallet with no - // addresses derived so we need to derive up to last active addresses the scan found - // before adding the transactions. - ( - chain.apply_update(update.chain_update.expect("request included chain tip"))?, - { - let index_changeset = graph - .index - .reveal_to_target_multi(&update.last_active_indices); - let mut indexed_tx_graph_changeset = graph.apply_update(update.tx_update); - indexed_tx_graph_changeset.merge(index_changeset.into()); - indexed_tx_graph_changeset - }, - ) - } - EsploraCommands::Sync { - mut unused_spks, - all_spks, - mut utxos, - mut unconfirmed, - scan_options, - .. - } => { - if !(*all_spks || unused_spks || utxos || unconfirmed) { - // If nothing is specifically selected, we select everything (except all spks). - unused_spks = true; - unconfirmed = true; - utxos = true; - } else if *all_spks { - // If all spks is selected, we don't need to also select unused spks (as unused spks - // is a subset of all spks). - unused_spks = false; - } - - let local_tip = chain.lock().expect("mutex must not be poisoned").tip(); - // Spks, outpoints and txids we want updates on will be accumulated here. - let mut request = - SyncRequest::builder() - .chain_tip(local_tip.clone()) - .inspect(|item, progress| { - let pc = (100 * progress.consumed()) as f32 / progress.total() as f32; - eprintln!("[ SCANNING {:03.0}% ] {}", pc, item); - // Flush early to ensure we print at every iteration. - let _ = io::stderr().flush(); - }); - - // Get a short lock on the structures to get spks, utxos, and txs that we are interested - // in. - { - let graph = graph.lock().unwrap(); - let chain = chain.lock().unwrap(); - request = request.expected_spk_txids(graph.list_expected_spk_txids( - &*chain, - local_tip.block_id(), - .., - )); - if *all_spks { - request = request.spks_with_indexes(graph.index.revealed_spks(..)); - } - if unused_spks { - request = request.spks_with_indexes(graph.index.unused_spks()); - } - if utxos { - // We want to search for whether the UTXO is spent, and spent by which - // transaction. We provide the outpoint of the UTXO to - // `EsploraExt::update_tx_graph_without_keychain`. - let init_outpoints = graph.index.outpoints(); - request = request.outpoints( - graph - .graph() - .filter_chain_unspents( - &*chain, - local_tip.block_id(), - init_outpoints.iter().cloned(), - ) - .map(|(_, utxo)| utxo.outpoint), - ); - }; - if unconfirmed { - // We want to search for whether the unconfirmed transaction is now confirmed. - // We provide the unconfirmed txids to - // `EsploraExt::update_tx_graph_without_keychain`. - request = request.txids( - graph - .graph() - .list_canonical_txs(&*chain, local_tip.block_id()) - .filter(|canonical_tx| !canonical_tx.chain_position.is_confirmed()) - .map(|canonical_tx| canonical_tx.tx_node.txid), - ); - } - } - - let update = client.sync(request, scan_options.parallel_requests)?; - - ( - chain - .lock() - .unwrap() - .apply_update(update.chain_update.expect("request has chain tip"))?, - graph.lock().unwrap().apply_update(update.tx_update), - ) - } - }; - - println!(); - - // We persist the changes - let mut db = db.lock().unwrap(); - db.append(&ChangeSet { - local_chain: local_chain_changeset, - tx_graph: indexed_tx_graph_changeset.tx_graph, - indexer: indexed_tx_graph_changeset.indexer, - ..Default::default() - })?; - Ok(()) -} diff --git a/example-crates/example_wallet_electrum/Cargo.toml b/examples/example_wallet_electrum/Cargo.toml similarity index 66% rename from example-crates/example_wallet_electrum/Cargo.toml rename to examples/example_wallet_electrum/Cargo.toml index 946cd05a..9e0e4605 100644 --- a/example-crates/example_wallet_electrum/Cargo.toml +++ b/examples/example_wallet_electrum/Cargo.toml @@ -4,6 +4,6 @@ version = "0.2.0" edition = "2021" [dependencies] -bdk_wallet = { path = "../../crates/wallet", features = ["file_store"] } +bdk_wallet = { path = "../../wallet", features = ["file_store"] } bdk_electrum = { version = "0.21" } anyhow = "1" diff --git a/example-crates/example_wallet_electrum/src/main.rs b/examples/example_wallet_electrum/src/main.rs similarity index 100% rename from example-crates/example_wallet_electrum/src/main.rs rename to examples/example_wallet_electrum/src/main.rs diff --git a/example-crates/example_wallet_esplora_async/Cargo.toml b/examples/example_wallet_esplora_async/Cargo.toml similarity index 83% rename from example-crates/example_wallet_esplora_async/Cargo.toml rename to examples/example_wallet_esplora_async/Cargo.toml index 21b27beb..089105d7 100644 --- a/example-crates/example_wallet_esplora_async/Cargo.toml +++ b/examples/example_wallet_esplora_async/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -bdk_wallet = { path = "../../crates/wallet", features = ["rusqlite"] } +bdk_wallet = { path = "../../wallet", features = ["rusqlite"] } bdk_esplora = { version = "0.20", features = ["async-https", "tokio"] } tokio = { version = "1", features = ["rt", "rt-multi-thread", "macros"] } anyhow = "1" diff --git a/example-crates/example_wallet_esplora_async/src/main.rs b/examples/example_wallet_esplora_async/src/main.rs similarity index 100% rename from example-crates/example_wallet_esplora_async/src/main.rs rename to examples/example_wallet_esplora_async/src/main.rs diff --git a/example-crates/example_wallet_esplora_blocking/Cargo.toml b/examples/example_wallet_esplora_blocking/Cargo.toml similarity index 79% rename from example-crates/example_wallet_esplora_blocking/Cargo.toml rename to examples/example_wallet_esplora_blocking/Cargo.toml index ec8cb542..b8bf20c4 100644 --- a/example-crates/example_wallet_esplora_blocking/Cargo.toml +++ b/examples/example_wallet_esplora_blocking/Cargo.toml @@ -7,6 +7,6 @@ publish = false # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -bdk_wallet = { path = "../../crates/wallet", features = ["file_store"] } +bdk_wallet = { path = "../../wallet", features = ["file_store"] } bdk_esplora = { version = "0.20", features = ["blocking"] } anyhow = "1" diff --git a/example-crates/example_wallet_esplora_blocking/src/main.rs b/examples/example_wallet_esplora_blocking/src/main.rs similarity index 100% rename from example-crates/example_wallet_esplora_blocking/src/main.rs rename to examples/example_wallet_esplora_blocking/src/main.rs diff --git a/example-crates/example_wallet_rpc/Cargo.toml b/examples/example_wallet_rpc/Cargo.toml similarity index 81% rename from example-crates/example_wallet_rpc/Cargo.toml rename to examples/example_wallet_rpc/Cargo.toml index b20447e7..546bc64e 100644 --- a/example-crates/example_wallet_rpc/Cargo.toml +++ b/examples/example_wallet_rpc/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -bdk_wallet = { path = "../../crates/wallet", features = ["file_store"] } +bdk_wallet = { path = "../../wallet", features = ["file_store"] } bdk_bitcoind_rpc = { version = "0.18" } anyhow = "1" diff --git a/example-crates/example_wallet_rpc/README.md b/examples/example_wallet_rpc/README.md similarity index 100% rename from example-crates/example_wallet_rpc/README.md rename to examples/example_wallet_rpc/README.md diff --git a/example-crates/example_wallet_rpc/src/main.rs b/examples/example_wallet_rpc/src/main.rs similarity index 100% rename from example-crates/example_wallet_rpc/src/main.rs rename to examples/example_wallet_rpc/src/main.rs diff --git a/nursery/README.md b/nursery/README.md deleted file mode 100644 index 97591628..00000000 --- a/nursery/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Bitcoin Dev Kit Nursery - -This is a directory for crates that are experimental and have not been released yet. -Keep in mind that they may never be released. -Things in `/example-crates` may use them to demonstrate how things might look in the future. diff --git a/crates/wallet/CHANGELOG.md b/wallet/CHANGELOG.md similarity index 100% rename from crates/wallet/CHANGELOG.md rename to wallet/CHANGELOG.md diff --git a/crates/wallet/Cargo.toml b/wallet/Cargo.toml similarity index 94% rename from crates/wallet/Cargo.toml rename to wallet/Cargo.toml index 66202088..4d917c82 100644 --- a/crates/wallet/Cargo.toml +++ b/wallet/Cargo.toml @@ -2,8 +2,8 @@ name = "bdk_wallet" homepage = "https://bitcoindevkit.org" version = "1.2.0" -repository = "https://github.com/bitcoindevkit/bdk" -documentation = "https://docs.rs/bdk" +repository = "https://github.com/bitcoindevkit/bdk_wallet" +documentation = "https://docs.rs/bdk_wallet" description = "A modern, lightweight, descriptor-based wallet library" keywords = ["bitcoin", "wallet", "descriptor", "psbt"] readme = "README.md" diff --git a/crates/wallet/README.md b/wallet/README.md similarity index 94% rename from crates/wallet/README.md rename to wallet/README.md index 0f4a1db8..bf5b644d 100644 --- a/crates/wallet/README.md +++ b/wallet/README.md @@ -50,10 +50,10 @@ that the `Wallet` can use to update its view of the chain. **Examples** -* [`example-crates/example_wallet_esplora_async`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_wallet_esplora_async) -* [`example-crates/example_wallet_esplora_blocking`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_wallet_esplora_blocking) -* [`example-crates/example_wallet_electrum`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_wallet_electrum) -* [`example-crates/example_wallet_rpc`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_wallet_rpc) +* [`examples/example_wallet_esplora_async`](https://github.com/bitcoindevkit/bdk/tree/master/examples/example_wallet_esplora_async) +* [`examples/example_wallet_esplora_blocking`](https://github.com/bitcoindevkit/bdk/tree/master/examples/example_wallet_esplora_blocking) +* [`examples/example_wallet_electrum`](https://github.com/bitcoindevkit/bdk/tree/master/examples/example_wallet_electrum) +* [`examples/example_wallet_rpc`](https://github.com/bitcoindevkit/bdk/tree/master/examples/example_wallet_rpc) ## Persistence diff --git a/crates/wallet/examples/compiler.rs b/wallet/examples/compiler.rs similarity index 100% rename from crates/wallet/examples/compiler.rs rename to wallet/examples/compiler.rs diff --git a/crates/wallet/examples/mnemonic_to_descriptors.rs b/wallet/examples/mnemonic_to_descriptors.rs similarity index 100% rename from crates/wallet/examples/mnemonic_to_descriptors.rs rename to wallet/examples/mnemonic_to_descriptors.rs diff --git a/crates/wallet/examples/policy.rs b/wallet/examples/policy.rs similarity index 100% rename from crates/wallet/examples/policy.rs rename to wallet/examples/policy.rs diff --git a/crates/wallet/src/descriptor/checksum.rs b/wallet/src/descriptor/checksum.rs similarity index 100% rename from crates/wallet/src/descriptor/checksum.rs rename to wallet/src/descriptor/checksum.rs diff --git a/crates/wallet/src/descriptor/dsl.rs b/wallet/src/descriptor/dsl.rs similarity index 100% rename from crates/wallet/src/descriptor/dsl.rs rename to wallet/src/descriptor/dsl.rs diff --git a/crates/wallet/src/descriptor/error.rs b/wallet/src/descriptor/error.rs similarity index 100% rename from crates/wallet/src/descriptor/error.rs rename to wallet/src/descriptor/error.rs diff --git a/crates/wallet/src/descriptor/mod.rs b/wallet/src/descriptor/mod.rs similarity index 100% rename from crates/wallet/src/descriptor/mod.rs rename to wallet/src/descriptor/mod.rs diff --git a/crates/wallet/src/descriptor/policy.rs b/wallet/src/descriptor/policy.rs similarity index 100% rename from crates/wallet/src/descriptor/policy.rs rename to wallet/src/descriptor/policy.rs diff --git a/crates/wallet/src/descriptor/template.rs b/wallet/src/descriptor/template.rs similarity index 100% rename from crates/wallet/src/descriptor/template.rs rename to wallet/src/descriptor/template.rs diff --git a/crates/wallet/src/keys/bip39.rs b/wallet/src/keys/bip39.rs similarity index 100% rename from crates/wallet/src/keys/bip39.rs rename to wallet/src/keys/bip39.rs diff --git a/crates/wallet/src/keys/mod.rs b/wallet/src/keys/mod.rs similarity index 100% rename from crates/wallet/src/keys/mod.rs rename to wallet/src/keys/mod.rs diff --git a/crates/wallet/src/lib.rs b/wallet/src/lib.rs similarity index 100% rename from crates/wallet/src/lib.rs rename to wallet/src/lib.rs diff --git a/crates/wallet/src/psbt/mod.rs b/wallet/src/psbt/mod.rs similarity index 100% rename from crates/wallet/src/psbt/mod.rs rename to wallet/src/psbt/mod.rs diff --git a/crates/wallet/src/test_utils.rs b/wallet/src/test_utils.rs similarity index 100% rename from crates/wallet/src/test_utils.rs rename to wallet/src/test_utils.rs diff --git a/crates/wallet/src/types.rs b/wallet/src/types.rs similarity index 100% rename from crates/wallet/src/types.rs rename to wallet/src/types.rs diff --git a/crates/wallet/src/wallet/changeset.rs b/wallet/src/wallet/changeset.rs similarity index 100% rename from crates/wallet/src/wallet/changeset.rs rename to wallet/src/wallet/changeset.rs diff --git a/crates/wallet/src/wallet/coin_selection.rs b/wallet/src/wallet/coin_selection.rs similarity index 100% rename from crates/wallet/src/wallet/coin_selection.rs rename to wallet/src/wallet/coin_selection.rs diff --git a/crates/wallet/src/wallet/error.rs b/wallet/src/wallet/error.rs similarity index 100% rename from crates/wallet/src/wallet/error.rs rename to wallet/src/wallet/error.rs diff --git a/crates/wallet/src/wallet/export.rs b/wallet/src/wallet/export.rs similarity index 100% rename from crates/wallet/src/wallet/export.rs rename to wallet/src/wallet/export.rs diff --git a/crates/wallet/src/wallet/mod.rs b/wallet/src/wallet/mod.rs similarity index 100% rename from crates/wallet/src/wallet/mod.rs rename to wallet/src/wallet/mod.rs diff --git a/crates/wallet/src/wallet/params.rs b/wallet/src/wallet/params.rs similarity index 100% rename from crates/wallet/src/wallet/params.rs rename to wallet/src/wallet/params.rs diff --git a/crates/wallet/src/wallet/persisted.rs b/wallet/src/wallet/persisted.rs similarity index 100% rename from crates/wallet/src/wallet/persisted.rs rename to wallet/src/wallet/persisted.rs diff --git a/crates/wallet/src/wallet/signer.rs b/wallet/src/wallet/signer.rs similarity index 100% rename from crates/wallet/src/wallet/signer.rs rename to wallet/src/wallet/signer.rs diff --git a/crates/wallet/src/wallet/tx_builder.rs b/wallet/src/wallet/tx_builder.rs similarity index 100% rename from crates/wallet/src/wallet/tx_builder.rs rename to wallet/src/wallet/tx_builder.rs diff --git a/crates/wallet/src/wallet/utils.rs b/wallet/src/wallet/utils.rs similarity index 100% rename from crates/wallet/src/wallet/utils.rs rename to wallet/src/wallet/utils.rs diff --git a/crates/wallet/tests/psbt.rs b/wallet/tests/psbt.rs similarity index 100% rename from crates/wallet/tests/psbt.rs rename to wallet/tests/psbt.rs diff --git a/crates/wallet/tests/wallet.rs b/wallet/tests/wallet.rs similarity index 100% rename from crates/wallet/tests/wallet.rs rename to wallet/tests/wallet.rs