diff --git a/Cargo.lock b/Cargo.lock index 94baaf33..ea7493de 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1835,7 +1835,7 @@ checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" [[package]] name = "encoder-standard" version = "0.1.0" -source = "git+https://github.com/scroll-tech/da-codec#2cfec8c99547b68dc64e2b020fa2b83cfb9c0e99" +source = "git+https://github.com/scroll-tech/da-codec#b4cce5c5d17845fc6f4f6ec422d559470a09dca9" dependencies = [ "zstd", ] @@ -4341,7 +4341,7 @@ dependencies = [ [[package]] name = "reth-chainspec" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-chains", "alloy-consensus", @@ -4361,7 +4361,7 @@ dependencies = [ [[package]] name = "reth-codecs" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-consensus", "alloy-eips", @@ -4379,7 +4379,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "convert_case", "proc-macro2", @@ -4390,7 +4390,7 @@ dependencies = [ [[package]] name = "reth-db-models" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-eips", "alloy-primitives", @@ -4400,7 +4400,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-eip2124", "alloy-hardforks", @@ -4412,7 +4412,7 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-consensus", "alloy-eips", @@ -4427,7 +4427,7 @@ dependencies = [ [[package]] name = "reth-evm" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-consensus", "alloy-eips", @@ -4449,7 +4449,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-consensus", "alloy-eips", @@ -4469,7 +4469,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-evm", "alloy-primitives", @@ -4482,7 +4482,7 @@ dependencies = [ [[package]] name = "reth-execution-types" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-consensus", "alloy-eips", @@ -4498,7 +4498,7 @@ dependencies = [ [[package]] name = "reth-network-peers" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -4510,7 +4510,7 @@ dependencies = [ [[package]] name = "reth-primitives" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-consensus", "once_cell", @@ -4523,7 +4523,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-consensus", "alloy-eips", @@ -4551,7 +4551,7 @@ dependencies = [ [[package]] name = "reth-prune-types" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-primitives", "derive_more", @@ -4561,7 +4561,7 @@ dependencies = [ [[package]] name = "reth-scroll-chainspec" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-chains", "alloy-consensus", @@ -4586,7 +4586,7 @@ dependencies = [ [[package]] name = "reth-scroll-evm" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-consensus", "alloy-eips", @@ -4616,7 +4616,7 @@ dependencies = [ [[package]] name = "reth-scroll-forks" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-chains", "alloy-primitives", @@ -4630,7 +4630,7 @@ dependencies = [ [[package]] name = "reth-scroll-primitives" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-consensus", "alloy-eips", @@ -4647,7 +4647,7 @@ dependencies = [ [[package]] name = "reth-stages-types" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-primitives", "reth-trie-common", @@ -4656,7 +4656,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-primitives", "derive_more", @@ -4667,7 +4667,7 @@ dependencies = [ [[package]] name = "reth-storage-api" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-consensus", "alloy-eips", @@ -4689,7 +4689,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-eips", "alloy-primitives", @@ -4705,7 +4705,7 @@ dependencies = [ [[package]] name = "reth-trie" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-consensus", "alloy-eips", @@ -4727,7 +4727,7 @@ dependencies = [ [[package]] name = "reth-trie-common" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -4743,7 +4743,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -4759,7 +4759,7 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "zstd", ] @@ -4934,7 +4934,7 @@ dependencies = [ [[package]] name = "revm-scroll" version = "0.1.0" -source = "git+https://github.com/scroll-tech/scroll-revm#720ee7802e5ad695ac1f8699bbab9c9f2424417f" +source = "git+https://github.com/scroll-tech/scroll-revm#59d400f1a0b616d7b97a24da1cb5b8dcb8006f4b" dependencies = [ "auto_impl", "enumn", @@ -5249,9 +5249,11 @@ dependencies = [ name = "sbv-core" version = "2.0.0" dependencies = [ + "auto_impl", "cfg-if", "ctor", "itertools 0.14.0", + "reth-primitives-traits", "rkyv", "rstest", "sbv-helpers", @@ -5261,6 +5263,7 @@ dependencies = [ "sbv-trie", "serde", "serde_json", + "serde_with", "thiserror", "tracing", "tracing-subscriber 0.3.19", @@ -5332,7 +5335,6 @@ dependencies = [ "scroll-alloy-rpc-types", "serde", "serde_json", - "serde_with", "tracing", ] @@ -5342,12 +5344,12 @@ version = "2.0.0" dependencies = [ "alloy-rlp", "alloy-trie", - "auto_impl", "reth-trie", - "reth-trie-sparse", + "rlp", "sbv-helpers", "sbv-kv", "sbv-primitives", + "serde", "thiserror", "tracing", ] @@ -5361,6 +5363,7 @@ dependencies = [ "alloy-transport", "async-trait", "futures", + "sbv-core", "sbv-primitives", "serde", "thiserror", @@ -5408,7 +5411,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "scroll-alloy-consensus" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5424,7 +5427,7 @@ dependencies = [ [[package]] name = "scroll-alloy-evm" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5442,7 +5445,7 @@ dependencies = [ [[package]] name = "scroll-alloy-hardforks" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-hardforks", "auto_impl", @@ -5452,7 +5455,7 @@ dependencies = [ [[package]] name = "scroll-alloy-network" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-consensus", "alloy-network", @@ -5467,7 +5470,7 @@ dependencies = [ [[package]] name = "scroll-alloy-rpc-types" version = "1.6.0" -source = "git+https://github.com/scroll-tech/reth?rev=a6f827a71c5523d1d32e579456dfdab2cb003c66#a6f827a71c5523d1d32e579456dfdab2cb003c66" +source = "git+https://github.com/scroll-tech/reth?branch=scroll#d485f4718a0c93f3fc1ec453b8ddde6ce1ebe397" dependencies = [ "alloy-consensus", "alloy-eips", diff --git a/Cargo.toml b/Cargo.toml index bad0820d..eaaaaaba 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -44,25 +44,43 @@ alloy-evm = { version = "0.17", default-features = false } revm-scroll = { git = "https://github.com/scroll-tech/scroll-revm", default-features = false } -reth-chainspec = { git = "https://github.com/scroll-tech/reth", rev = "a6f827a71c5523d1d32e579456dfdab2cb003c66", default-features = false } -reth-evm = { git = "https://github.com/scroll-tech/reth", rev = "a6f827a71c5523d1d32e579456dfdab2cb003c66", default-features = false } -reth-evm-ethereum = { git = "https://github.com/scroll-tech/reth", rev = "a6f827a71c5523d1d32e579456dfdab2cb003c66", default-features = false } -reth-ethereum-forks = { git = "https://github.com/scroll-tech/reth", rev = "a6f827a71c5523d1d32e579456dfdab2cb003c66", default-features = false } -reth-execution-types = { git = "https://github.com/scroll-tech/reth", rev = "a6f827a71c5523d1d32e579456dfdab2cb003c66", default-features = false } -reth-primitives = { git = "https://github.com/scroll-tech/reth", rev = "a6f827a71c5523d1d32e579456dfdab2cb003c66", default-features = false } -reth-primitives-traits = { git = "https://github.com/scroll-tech/reth", rev = "a6f827a71c5523d1d32e579456dfdab2cb003c66", default-features = false } -reth-trie = { git = "https://github.com/scroll-tech/reth", rev = "a6f827a71c5523d1d32e579456dfdab2cb003c66", default-features = false } -reth-trie-sparse = { git = "https://github.com/scroll-tech/reth", rev = "a6f827a71c5523d1d32e579456dfdab2cb003c66", default-features = false } - -reth-scroll-chainspec = { git = "https://github.com/scroll-tech/reth", rev = "a6f827a71c5523d1d32e579456dfdab2cb003c66", default-features = false } -reth-scroll-evm = { git = "https://github.com/scroll-tech/reth", rev = "a6f827a71c5523d1d32e579456dfdab2cb003c66", default-features = false } -reth-scroll-forks = { git = "https://github.com/scroll-tech/reth", rev = "a6f827a71c5523d1d32e579456dfdab2cb003c66", default-features = false } -reth-scroll-primitives = { git = "https://github.com/scroll-tech/reth", rev = "a6f827a71c5523d1d32e579456dfdab2cb003c66", default-features = false } - -scroll-alloy-evm = { git = "https://github.com/scroll-tech/reth", rev = "a6f827a71c5523d1d32e579456dfdab2cb003c66", default-features = false } -scroll-alloy-consensus = { git = "https://github.com/scroll-tech/reth", rev = "a6f827a71c5523d1d32e579456dfdab2cb003c66", default-features = false } -scroll-alloy-rpc-types = { git = "https://github.com/scroll-tech/reth", rev = "a6f827a71c5523d1d32e579456dfdab2cb003c66", default-features = false } -scroll-alloy-network = { git = "https://github.com/scroll-tech/reth", rev = "a6f827a71c5523d1d32e579456dfdab2cb003c66", default-features = false } +reth-chainspec = { git = "https://github.com/scroll-tech/reth", branch = "scroll", default-features = false } +reth-evm = { git = "https://github.com/scroll-tech/reth", branch = "scroll", default-features = false } +reth-evm-ethereum = { git = "https://github.com/scroll-tech/reth", branch = "scroll", default-features = false } +reth-ethereum-forks = { git = "https://github.com/scroll-tech/reth", branch = "scroll", default-features = false } +reth-execution-types = { git = "https://github.com/scroll-tech/reth", branch = "scroll", default-features = false } +reth-primitives = { git = "https://github.com/scroll-tech/reth", branch = "scroll", default-features = false } +reth-primitives-traits = { git = "https://github.com/scroll-tech/reth", branch = "scroll", default-features = false } +reth-trie = { git = "https://github.com/scroll-tech/reth", branch = "scroll", default-features = false } + +reth-scroll-chainspec = { git = "https://github.com/scroll-tech/reth", branch = "scroll", default-features = false } +reth-scroll-evm = { git = "https://github.com/scroll-tech/reth", branch = "scroll", default-features = false } +reth-scroll-forks = { git = "https://github.com/scroll-tech/reth", branch = "scroll", default-features = false } +reth-scroll-primitives = { git = "https://github.com/scroll-tech/reth", branch = "scroll", default-features = false } + +scroll-alloy-evm = { git = "https://github.com/scroll-tech/reth", branch = "scroll", default-features = false } +scroll-alloy-consensus = { git = "https://github.com/scroll-tech/reth", branch = "scroll", default-features = false } +scroll-alloy-rpc-types = { git = "https://github.com/scroll-tech/reth", branch = "scroll", default-features = false } +scroll-alloy-network = { git = "https://github.com/scroll-tech/reth", branch = "scroll", default-features = false } + +#reth-chainspec = { path = "../reth/crates/chainspec", default-features = false } +#reth-evm = { path = "../reth/crates/evm/evm", default-features = false } +#reth-evm-ethereum = { path = "../reth/crates/ethereum/evm", default-features = false } +#reth-ethereum-forks = { path = "../reth/crates/ethereum/hardforks", default-features = false } +#reth-execution-types = { path = "../reth/crates/evm/execution-types", default-features = false } +#reth-primitives = { path = "../reth/crates/primitives", default-features = false } +#reth-primitives-traits = { path = "../reth/crates/primitives-traits", default-features = false } +#reth-trie = { path = "../reth/crates/trie/trie", default-features = false } +# +#reth-scroll-chainspec = { path = "../reth/crates/scroll/chainspec", default-features = false } +#reth-scroll-evm = { path = "../reth/crates/scroll/evm", default-features = false } +#reth-scroll-forks = { path = "../reth/crates/scroll/hardforks", default-features = false } +#reth-scroll-primitives = { path = "../reth/crates/scroll/primitives", default-features = false } +# +#scroll-alloy-evm = { path = "../reth/crates/scroll/alloy/evm", default-features = false } +#scroll-alloy-consensus = { path = "../reth/crates/scroll/alloy/consensus", default-features = false } +#scroll-alloy-rpc-types = { path = "../reth/crates/scroll/alloy/rpc-types", default-features = false } +#scroll-alloy-network = { path = "../reth/crates/scroll/alloy/network", default-features = false } openvm-ecc-guest = { git = "https://github.com/openvm-org/openvm.git", rev = "51d0aa69694fd33def3c433e48228a5675e4fd97" } openvm-keccak256 = { git = "https://github.com/openvm-org/openvm.git", rev = "51d0aa69694fd33def3c433e48228a5675e4fd97" } diff --git a/crates/bin/Cargo.toml b/crates/bin/Cargo.toml index 7df92ea7..9c87f5a9 100644 --- a/crates/bin/Cargo.toml +++ b/crates/bin/Cargo.toml @@ -45,4 +45,4 @@ tracing-subscriber.workspace = true [features] default = ["dev"] dev = ["dep:tracing", "sbv/dev", "dep:tracing-subscriber"] -scroll = ["sbv/scroll"] +scroll = ["sbv/scroll", "sbv/scroll-compress-ratio"] diff --git a/crates/bin/src/helpers/verifier.rs b/crates/bin/src/helpers/verifier.rs index f0a916cc..8287fe51 100644 --- a/crates/bin/src/helpers/verifier.rs +++ b/crates/bin/src/helpers/verifier.rs @@ -4,8 +4,9 @@ use sbv::{ core::{ VerificationError, verifier::{self, VerifyResult}, + witness::BlockWitness, }, - primitives::{chainspec::ChainSpec, types::BlockWitness}, + primitives::chainspec::ChainSpec, }; use std::{ env, @@ -21,16 +22,8 @@ pub fn verify_catch_panics( let block_number = witness.header.number; catch_unwind(AssertUnwindSafe(|| { - verifier::run( - vec![witness], - chain_spec, - #[cfg(feature = "scroll")] - verifier::StateCommitMode::Block, - #[cfg(feature = "scroll")] - None::>>, - ) - .inspect_err(|e| { - if let VerificationError::BlockRootMismatch { bundle_state, .. } = e { + verifier::run_host(&[witness], chain_spec).inspect_err(|e| { + if let VerificationError::RootMismatch { bundle_state, .. } = e { let dump_dir = env::temp_dir() .join("dumps") .join(format!("{chain_id}-{block_number}")); diff --git a/crates/bin/src/run.rs b/crates/bin/src/run.rs index b4ae4e5e..bc320f37 100644 --- a/crates/bin/src/run.rs +++ b/crates/bin/src/run.rs @@ -2,11 +2,10 @@ use crate::helpers::verifier::*; use clap::Args; use eyre::ContextCompat; use sbv::{ - core::verifier::VerifyResult, + core::{verifier::VerifyResult, witness::BlockWitness}, primitives::{ chainspec::{Chain, build_chain_spec_force_hardfork, get_chain_spec}, hardforks::Hardfork, - types::BlockWitness, }, }; use std::path::PathBuf; diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index 27384285..8da8cf1e 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -13,12 +13,15 @@ repository.workspace = true workspace = true [dependencies] +auto_impl.workspace = true thiserror.workspace = true tracing = { workspace = true, optional = true } -serde = { workspace = true, optional = true } +serde.workspace = true rkyv = { workspace = true, optional = true } cfg-if.workspace = true itertools.workspace = true +serde_with.workspace = true +reth-primitives-traits = { workspace = true, features = ["serde", "serde-bincode-compat"] } sbv-primitives = { workspace = true, features = [ "chainspec", @@ -43,11 +46,11 @@ sbv-primitives.workspace = true rkyv = ["dep:rkyv", "sbv-primitives/rkyv"] scroll = [ - "dep:serde", "sbv-primitives/scroll-chainspec", "sbv-primitives/scroll-reth-evm-types", "sbv-precompile/scroll", ] +scroll-compress-ratio = ["sbv-primitives/scroll-compress-ratio"] dev = ["dep:tracing", "sbv-primitives/dev", "sbv-trie/dev"] # sp1 related diff --git a/crates/core/src/database.rs b/crates/core/src/database.rs index e25c601e..c0c7951d 100644 --- a/crates/core/src/database.rs +++ b/crates/core/src/database.rs @@ -1,4 +1,5 @@ use sbv_kv::{HashMap, KeyValueStoreGet}; +pub use sbv_primitives::types::revm::database::DatabaseRef; use sbv_primitives::{ Address, B256, Bytes, U256, types::revm::{ @@ -7,22 +8,18 @@ use sbv_primitives::{ }, }; use sbv_trie::PartialStateTrie; -use std::{cell::RefCell, fmt}; - -pub use sbv_primitives::types::revm::database::DatabaseRef; +use std::{cell::RefCell, collections::BTreeMap, fmt}; /// A database that consists of account and storage information. -pub struct EvmDatabase { +pub struct EvmDatabase { /// Map of code hash to bytecode. pub(crate) code_db: CodeDb, /// Cache of analyzed code analyzed_code_cache: RefCell>>, - /// Provider of trie nodes - pub(crate) nodes_provider: NodesProvider, - /// Provider of block hashes - block_hashes: BlockHashProvider, /// partial merkle patricia trie pub(crate) state: PartialStateTrie, + /// Provider of block hashes + block_hashes: BlockHashProvider, } /// Database error. @@ -42,56 +39,28 @@ pub enum DatabaseError { type Result = std::result::Result; -impl fmt::Debug - for EvmDatabase -{ +impl fmt::Debug for EvmDatabase { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("EvmDatabase").finish() } } -impl< - CodeDb: KeyValueStoreGet, - NodesProvider: KeyValueStoreGet, - BlockHashProvider: KeyValueStoreGet, -> EvmDatabase +impl, BlockHashProvider: KeyValueStoreGet> + EvmDatabase { /// Initialize an EVM database from a zkTrie root. - pub fn new_from_root( - code_db: CodeDb, - state_root_before: B256, - nodes_provider: NodesProvider, - block_hashes: BlockHashProvider, - ) -> Result { - dev_trace!("open trie from root {:?}", state_root_before); - - let state = cycle_track!( - PartialStateTrie::open(&nodes_provider, state_root_before), - "PartialStateTrie::open" - )?; - - Ok(EvmDatabase { + pub fn new(code_db: CodeDb, state: PartialStateTrie, block_hashes: BlockHashProvider) -> Self { + EvmDatabase { code_db, analyzed_code_cache: Default::default(), - nodes_provider, block_hashes, state, - }) + } } /// Update changes to the database. - pub fn update<'a, P: KeyValueStoreGet>( - &mut self, - nodes_provider: P, - post_state: impl IntoIterator, - ) -> Result<()> { - self.state.update(nodes_provider, post_state)?; - Ok(()) - } - - /// Commit changes and return the new state root. - pub fn commit_changes(&mut self) -> B256 { - self.state.commit_state() + pub fn commit(&mut self, post_state: BTreeMap) -> Result { + Ok(self.state.update(post_state)?) } /// Get the withdrawal trie root of scroll. @@ -123,11 +92,8 @@ impl< } } -impl< - CodeDb: KeyValueStoreGet, - NodesProvider: KeyValueStoreGet, - BlockHashProvider: KeyValueStoreGet, -> DatabaseRef for EvmDatabase +impl, BlockHashProvider: KeyValueStoreGet> + DatabaseRef for EvmDatabase { type Error = DatabaseError; @@ -166,10 +132,7 @@ impl< /// Get storage value of address at index. fn storage_ref(&self, address: Address, index: U256) -> Result { dev_trace!("get storage of {:?} at index {:?}", address, index); - Ok(self - .state - .get_storage(&self.nodes_provider, address, index)? - .unwrap_or(U256::ZERO)) + Ok(self.state.get_storage(address, index)?) } /// Get block hash by block number. diff --git a/crates/core/src/error.rs b/crates/core/src/error.rs index f2cd6e33..c72836c4 100644 --- a/crates/core/src/error.rs +++ b/crates/core/src/error.rs @@ -17,6 +17,9 @@ pub enum VerificationError { /// The witnesses are not sequential. #[error("witnesses are not sequential")] NonSequentialWitnesses, + /// Error while building tries from witness states. + #[error(transparent)] + FromWitness(#[from] sbv_trie::FromWitnessError), /// Error while recovering signer from an ECDSA signature. #[error("invalid signature: {0}")] InvalidSignature(#[from] SignatureError), @@ -30,7 +33,7 @@ pub enum VerificationError { #[error( "state root in witness doesn't match with state root executed: expected {expected}, actual {actual}" )] - BlockRootMismatch { + RootMismatch { /// Root after in trace expected: B256, /// Root after in revm @@ -39,37 +42,21 @@ pub enum VerificationError { #[cfg(not(target_os = "zkvm"))] bundle_state: Box, }, - /// Root mismatch error - #[error( - "state root in last witness doesn't match with state root executed: expected {expected}, actual {actual}" - )] - ChunkRootMismatch { - /// Root after in trace - expected: B256, - /// Root after in revm - actual: B256, - }, } impl VerificationError { - /// Create a new [`VerificationError::BlockRootMismatch`] variant. + /// Create a new [`VerificationError::RootMismatch`] variant. #[inline] - pub fn block_root_mismatch( + pub fn root_mismatch( expected: B256, actual: B256, #[cfg(not(target_os = "zkvm"))] bundle_state: impl Into>, ) -> Self { - VerificationError::BlockRootMismatch { + VerificationError::RootMismatch { expected, actual, #[cfg(not(target_os = "zkvm"))] bundle_state: bundle_state.into(), } } - - /// Create a new [`VerificationError::ChunkRootMismatch`] variant. - #[inline] - pub fn chunk_root_mismatch(expected: B256, actual: B256) -> Self { - VerificationError::ChunkRootMismatch { expected, actual } - } } diff --git a/crates/core/src/executor/ethereum.rs b/crates/core/src/executor/ethereum.rs index 0a228904..53cb9c4a 100644 --- a/crates/core/src/executor/ethereum.rs +++ b/crates/core/src/executor/ethereum.rs @@ -33,19 +33,17 @@ pub type EvmConfig = EthEvmConfig; /// EVM executor that handles the block. #[derive(Debug)] -pub struct EvmExecutor<'a, CodeDb, NodesProvider, BlockHashProvider> { +pub struct EvmExecutor<'a, CodeDb, BlockHashProvider> { chain_spec: Arc, - db: &'a EvmDatabase, + db: &'a EvmDatabase, block: &'a RecoveredBlock, } -impl<'a, CodeDb, NodesProvider, BlockHashProvider> - EvmExecutor<'a, CodeDb, NodesProvider, BlockHashProvider> -{ +impl<'a, CodeDb, BlockHashProvider> EvmExecutor<'a, CodeDb, BlockHashProvider> { /// Create a new EVM executor pub fn new( chain_spec: Arc, - db: &'a EvmDatabase, + db: &'a EvmDatabase, block: &'a RecoveredBlock, ) -> Self { Self { @@ -56,11 +54,8 @@ impl<'a, CodeDb, NodesProvider, BlockHashProvider> } } -impl< - CodeDb: KeyValueStoreGet, - NodesProvider: KeyValueStoreGet, - BlockHashProvider: KeyValueStoreGet, -> EvmExecutor<'_, CodeDb, NodesProvider, BlockHashProvider> +impl, BlockHashProvider: KeyValueStoreGet> + EvmExecutor<'_, CodeDb, BlockHashProvider> { /// Handle the block with the given witness pub fn execute(self) -> Result, VerificationError> { diff --git a/crates/core/src/executor/scroll.rs b/crates/core/src/executor/scroll.rs index 431ea924..3800fc47 100644 --- a/crates/core/src/executor/scroll.rs +++ b/crates/core/src/executor/scroll.rs @@ -20,20 +20,20 @@ pub type EvmConfig = /// EVM executor that handles the block. #[derive(Debug)] -pub struct EvmExecutor<'a, CodeDb, NodesProvider, BlockHashProvider, CompressionRatios> { +pub struct EvmExecutor<'a, CodeDb, BlockHashProvider, CompressionRatios> { chain_spec: Arc, - db: &'a EvmDatabase, + db: &'a EvmDatabase, block: &'a RecoveredBlock, compression_ratios: Option, } -impl<'a, CodeDb, NodesProvider, BlockHashProvider, CompressionRatios> - EvmExecutor<'a, CodeDb, NodesProvider, BlockHashProvider, CompressionRatios> +impl<'a, CodeDb, BlockHashProvider, CompressionRatios> + EvmExecutor<'a, CodeDb, BlockHashProvider, CompressionRatios> { /// Create a new EVM executor pub fn new( chain_spec: Arc, - db: &'a EvmDatabase, + db: &'a EvmDatabase, block: &'a RecoveredBlock, compression_ratios: Option, ) -> Self { @@ -48,10 +48,9 @@ impl<'a, CodeDb, NodesProvider, BlockHashProvider, CompressionRatios> impl< CodeDb: KeyValueStoreGet, - NodesProvider: KeyValueStoreGet, BlockHashProvider: KeyValueStoreGet, CompressionRatios: IntoIterator, -> EvmExecutor<'_, CodeDb, NodesProvider, BlockHashProvider, CompressionRatios> +> EvmExecutor<'_, CodeDb, BlockHashProvider, CompressionRatios> { /// Handle the block with the given witness pub fn execute(self) -> Result, VerificationError> { diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index 0aa7b598..a3f38692 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -3,6 +3,10 @@ #[macro_use] extern crate sbv_helpers; +/// Witness type +pub mod witness; +pub use witness::BlockWitness; + mod database; pub use database::{DatabaseError, DatabaseRef, EvmDatabase}; diff --git a/crates/core/src/verifier/ethereum.rs b/crates/core/src/verifier/ethereum.rs index c56038e7..abb86241 100644 --- a/crates/core/src/verifier/ethereum.rs +++ b/crates/core/src/verifier/ethereum.rs @@ -1,15 +1,15 @@ -use crate::{EvmDatabase, EvmExecutor, VerificationError}; +use crate::{ + BlockWitness, EvmDatabase, EvmExecutor, VerificationError, + witness::{BlockWitnessChunkExt, BlockWitnessExt}, +}; +use itertools::Itertools; use sbv_kv::nohash::NoHashMap; use sbv_primitives::{ B256, Bytes, chainspec::ChainSpec, - ext::{BlockWitnessChunkExt, BlockWitnessExt}, - types::{ - BlockWitness, - reth::primitives::{Block, RecoveredBlock}, - }, + types::reth::primitives::{Block, RecoveredBlock}, }; -use sbv_trie::BlockWitnessTrieExt; +use sbv_trie::PartialStateTrie; use std::{collections::BTreeMap, sync::Arc}; /// Result of the block witness verification process. @@ -25,10 +25,23 @@ pub struct VerifyResult { pub gas_used: u64, } +/// Verify the block witness and return the gas used. +pub fn run_host( + witnesses: &[BlockWitness], + chain_spec: Arc, +) -> Result { + let cached_trie = PartialStateTrie::new( + witnesses[0].prev_state_root, + witnesses.iter().flat_map(|w| w.states.iter()), + )?; + run(witnesses, chain_spec, cached_trie) +} + /// Verify the block witness and return the gas used. pub fn run( - witnesses: Vec, + witnesses: &[BlockWitness], chain_spec: Arc, + cached_trie: PartialStateTrie, ) -> Result { if witnesses.is_empty() { return Err(VerificationError::EmptyWitnesses); @@ -39,38 +52,42 @@ pub fn run( if !witnesses.has_seq_block_number() { return Err(VerificationError::NonSequentialWitnesses); } + if !witnesses.has_seq_state_root() { + return Err(VerificationError::NonSequentialWitnesses); + } - let (code_db, nodes_provider, block_hash_provider) = make_providers(&witnesses); - let nodes_provider = manually_drop_on_zkvm!(nodes_provider); + let (code_db, block_hash_provider) = make_providers(witnesses); let pre_state_root = witnesses[0].prev_state_root; let blocks = witnesses - .into_iter() + .iter() .map(|w| { dev_trace!("{w:#?}"); - w.into_reth_block() + w.build_reth_block() }) .collect::>, _>>()?; + if !blocks + .iter() + .tuple_windows() + .all(|(a, b)| a.hash() == b.header().parent_hash) + { + return Err(VerificationError::NonSequentialWitnesses); + } - let mut db = manually_drop_on_zkvm!(EvmDatabase::new_from_root( - code_db, - pre_state_root, - &nodes_provider, - block_hash_provider, - )?); + let mut db = + manually_drop_on_zkvm!(EvmDatabase::new(code_db, cached_trie, block_hash_provider)); let mut gas_used = 0; let mut post_state_root = B256::ZERO; for block in blocks.iter() { - let output = EvmExecutor::new(chain_spec.clone(), &db, block).execute()?; + let executor = EvmExecutor::new(chain_spec.clone(), &db, block); + let output = executor.execute()?; gas_used += output.gas_used; - db.update( - &nodes_provider, - BTreeMap::from_iter(output.state.state.clone()).iter(), - )?; + #[cfg(not(target_os = "zkvm"))] + let state_for_debug = output.state.clone(); - post_state_root = db.commit_changes(); + post_state_root = db.commit(BTreeMap::from_iter(output.state.state.clone()))?; if block.state_root != post_state_root { dev_error!( "Block #{} root mismatch: root after in trace = {:x}, root after in reth = {:x}", @@ -78,11 +95,11 @@ pub fn run( block.state_root, post_state_root ); - return Err(VerificationError::block_root_mismatch( + return Err(VerificationError::root_mismatch( block.state_root, post_state_root, #[cfg(not(target_os = "zkvm"))] - output.state, + state_for_debug, )); } dev_info!("Block #{} verified successfully", block.number); @@ -97,11 +114,10 @@ pub fn run( } type CodeDb = NoHashMap; -type NodesProvider = NoHashMap; type BlockHashProvider = NoHashMap; /// Create the providers needed for the EVM executor from a list of witnesses. -fn make_providers(witnesses: &[BlockWitness]) -> (CodeDb, NodesProvider, BlockHashProvider) { +fn make_providers(witnesses: &[BlockWitness]) -> (CodeDb, BlockHashProvider) { let code_db = { // build code db let num_codes = witnesses.iter().map(|w| w.codes.len()).sum(); @@ -110,13 +126,6 @@ fn make_providers(witnesses: &[BlockWitness]) -> (CodeDb, NodesProvider, BlockHa witnesses.import_codes(&mut code_db); code_db }; - let nodes_provider = { - let num_states = witnesses.iter().map(|w| w.states.len()).sum(); - let mut nodes_provider = - NoHashMap::::with_capacity_and_hasher(num_states, Default::default()); - witnesses.import_nodes(&mut nodes_provider); - nodes_provider - }; let block_hash_provider = { let num_blocks = witnesses.iter().map(|w| w.block_hashes.len()).sum(); let mut block_hash_provider = @@ -125,7 +134,7 @@ fn make_providers(witnesses: &[BlockWitness]) -> (CodeDb, NodesProvider, BlockHa block_hash_provider }; - (code_db, nodes_provider, block_hash_provider) + (code_db, block_hash_provider) } // FIXME: fetch new traces diff --git a/crates/core/src/verifier/scroll.rs b/crates/core/src/verifier/scroll.rs index e6f1c8d0..76f397e9 100644 --- a/crates/core/src/verifier/scroll.rs +++ b/crates/core/src/verifier/scroll.rs @@ -1,16 +1,15 @@ -use crate::{DatabaseError, EvmDatabase, EvmExecutor, VerificationError}; +use crate::{ + BlockWitness, EvmDatabase, EvmExecutor, VerificationError, + witness::{BlockWitnessChunkExt, BlockWitnessExt}, +}; use itertools::Itertools; use sbv_kv::{nohash::NoHashMap, null::NullProvider}; use sbv_primitives::{ B256, Bytes, U256, chainspec::ChainSpec, - ext::{BlockWitnessChunkExt, BlockWitnessExt}, - types::{ - BlockWitness, - reth::primitives::{Block, RecoveredBlock}, - }, + types::reth::primitives::{Block, RecoveredBlock}, }; -use sbv_trie::BlockWitnessTrieExt; +use sbv_trie::PartialStateTrie; use std::{collections::BTreeMap, sync::Arc}; /// State commit mode for the block witness verification process. @@ -44,14 +43,28 @@ pub struct VerifyResult { pub gas_used: u64, } +/// Verify the block witness and return the gas used. +pub fn run_host( + witnesses: &[BlockWitness], + chain_spec: Arc, +) -> Result { + let compression_ratios = witnesses + .iter() + .map(|block| block.compression_ratios()) + .collect::>(); + let cached_trie = PartialStateTrie::new( + witnesses[0].prev_state_root, + witnesses.iter().flat_map(|w| w.states.iter()), + )?; + run(witnesses, chain_spec, compression_ratios, cached_trie) +} + /// Verify the block witness and return the gas used. pub fn run( - witnesses: Vec, + witnesses: &[BlockWitness], chain_spec: Arc, - state_commit_mode: StateCommitMode, - compression_ratios: Option< - impl IntoIterator>> + Clone, - >, + compression_ratios: Vec>, + cached_trie: PartialStateTrie, ) -> Result { if witnesses.is_empty() { return Err(VerificationError::EmptyWitnesses); @@ -62,197 +75,88 @@ pub fn run( if !witnesses.has_seq_block_number() { return Err(VerificationError::NonSequentialWitnesses); } + if !witnesses.has_seq_state_root() { + return Err(VerificationError::NonSequentialWitnesses); + } - let (code_db, nodes_provider) = make_providers(&witnesses); - let code_db = manually_drop_on_zkvm!(code_db); - let nodes_provider = manually_drop_on_zkvm!(nodes_provider); - - let pre_state_root = witnesses[0].prev_state_root; - let blocks = witnesses - .into_iter() - .map(|w| { - dev_trace!("{w:#?}"); - w.into_reth_block() - }) - .collect::>, _>>()?; - - let mut args = ExecuteInnerArgs { - code_db: &code_db, - nodes_provider: &nodes_provider, - pre_state_root, - blocks: &blocks, - chain_spec: chain_spec.clone(), - defer_commit: true, - compression_ratios, - }; - - let result = match state_commit_mode { - StateCommitMode::Chunk | StateCommitMode::Block => { - args.defer_commit = matches!(state_commit_mode, StateCommitMode::Chunk); - execute(args)? - } - StateCommitMode::Auto => match execute(args.clone()) { - Ok(result) => result, - Err(VerificationError::Database(DatabaseError::PartialStateTrie(_e))) => { - dev_warn!( - "Failed to execute with defer commit enabled: {_e}; retrying with defer commit disabled" - ); - #[cfg(target_os = "zkvm")] - { - println!("failed to update db: {_e}; retrying with defer commit disabled"); - } - args.defer_commit = false; - execute(args)? - } - Err(e) => return Err(e), - }, - }; - - let (post_state_root, withdraw_root, gas_used) = result; - - Ok(VerifyResult { - blocks, - pre_state_root, - post_state_root, - withdraw_root, - gas_used, - }) -} - -type CodeDb = NoHashMap; -type NodesProvider = NoHashMap; - -/// Create the providers needed for the EVM executor from a list of witnesses. -#[inline] -fn make_providers(witnesses: &[BlockWitness]) -> (CodeDb, NodesProvider) { let code_db = { // build code db let num_codes = witnesses.iter().map(|w| w.codes.len()).sum(); let mut code_db = NoHashMap::::with_capacity_and_hasher(num_codes, Default::default()); witnesses.import_codes(&mut code_db); - code_db - }; - let nodes_provider = { - let num_states = witnesses.iter().map(|w| w.states.len()).sum(); - let mut nodes_provider = - NoHashMap::::with_capacity_and_hasher(num_states, Default::default()); - witnesses.import_nodes(&mut nodes_provider); - nodes_provider + manually_drop_on_zkvm!(code_db) }; - (code_db, nodes_provider) -} + let pre_state_root = witnesses[0].prev_state_root; + let post_state_root = witnesses.last().unwrap().header.state_root; -#[derive(Clone)] -pub(super) struct ExecuteInnerArgs<'a, I> { - pub(super) code_db: &'a CodeDb, - pub(super) nodes_provider: &'a NodesProvider, - pub(super) pre_state_root: B256, - pub(super) blocks: &'a [RecoveredBlock], - pub(super) chain_spec: Arc, - pub(super) defer_commit: bool, - pub(super) compression_ratios: Option, -} + let blocks = witnesses + .iter() + .map(|w| { + dev_trace!("{w:#?}"); + w.build_reth_block() + }) + .collect::>, _>>()?; + if !blocks + .iter() + .tuple_windows() + .all(|(a, b)| a.hash() == b.header().parent_hash) + { + return Err(VerificationError::NonSequentialWitnesses); + } -#[inline] -fn execute( - ExecuteInnerArgs { - code_db, - nodes_provider, - pre_state_root, - blocks, - chain_spec, - defer_commit, - compression_ratios, - }: ExecuteInnerArgs, -) -> Result<(B256, B256, u64), VerificationError> -where - II: IntoIterator, - I: IntoIterator, - R: Into, -{ let mut gas_used = 0; - - let mut db = manually_drop_on_zkvm!(EvmDatabase::new_from_root( - code_db, - pre_state_root, - nodes_provider, - NullProvider, - )?); + let mut db = EvmDatabase::new(code_db, cached_trie, NullProvider); let mut execute_block = |block, compression_ratio| -> Result<(), VerificationError> { - let output = manually_drop_on_zkvm!( - EvmExecutor::new(chain_spec.clone(), &db, block, compression_ratio).execute()? - ); - + let executor = EvmExecutor::new(chain_spec.clone(), &db, block, compression_ratio); + let output = executor.execute()?; gas_used += output.gas_used; - db.update( - nodes_provider, - BTreeMap::from_iter(output.state.state.clone()).iter(), - )?; - - if !defer_commit { - let post_state_root = db.commit_changes(); - if block.state_root != post_state_root { - dev_error!( - "Block #{} root mismatch: root after in trace = {:x}, root after in reth = {:x}", - block.number, - block.state_root, - post_state_root - ); - return Err(VerificationError::block_root_mismatch( - block.state_root, - post_state_root, - #[cfg(not(target_os = "zkvm"))] - output.state, - )); - } - dev_info!("Block #{} verified successfully", block.number); - } else { - dev_info!("Block #{} executed successfully", block.number); + #[cfg(not(target_os = "zkvm"))] + let state_for_debug = output.state.clone(); + + let post_state_root = db.commit(BTreeMap::from_iter(output.state.state))?; + if block.state_root != post_state_root { + dev_error!( + "Block #{} root mismatch: root after in trace = {:x}, root after in reth = {:x}", + block.number, + block.state_root, + post_state_root + ); + return Err(VerificationError::root_mismatch( + block.state_root, + post_state_root, + #[cfg(not(target_os = "zkvm"))] + state_for_debug, + )); } Ok(()) }; - if let Some(compression_ratios) = compression_ratios { - for (block, compression_ratios) in blocks.iter().zip_eq(compression_ratios) { - execute_block( - block, - Some(compression_ratios.into_iter().map(|u| u.into())), - )?; - } - } else { - for block in blocks { - execute_block(block, None)?; - } + for (block, compression_ratios) in blocks.iter().zip_eq(compression_ratios) { + execute_block(block, Some(compression_ratios))?; } - let post_state_root = db.commit_changes(); - let expected_state_root = blocks.last().unwrap().state_root; - if expected_state_root != post_state_root { - dev_error!( - "Final state root mismatch: expected {expected_state_root:x}, found {post_state_root:x}", - ); - return Err(VerificationError::chunk_root_mismatch( - expected_state_root, - post_state_root, - )); - } let withdraw_root = db.withdraw_root()?; - Ok((post_state_root, withdraw_root, gas_used)) + + Ok(VerifyResult { + blocks, + pre_state_root, + post_state_root, + withdraw_root, + gas_used, + }) } #[cfg(test)] mod tests { use super::*; use sbv_primitives::{ - U256, chainspec::{Chain, build_chain_spec_force_hardfork}, hardforks::Hardfork, - types::BlockWitness, }; #[rstest::rstest] @@ -264,13 +168,7 @@ mod tests { let witness: BlockWitness = serde_json::from_str(witness_json).unwrap(); let chain_spec = build_chain_spec_force_hardfork(Chain::from_id(witness.chain_id), Hardfork::EuclidV2); - run( - vec![witness], - chain_spec, - StateCommitMode::Block, - None::>>, - ) - .unwrap(); + run_host(&[witness], chain_spec).unwrap(); } #[rstest::rstest] @@ -282,12 +180,6 @@ mod tests { let witness: BlockWitness = serde_json::from_str(witness_json).unwrap(); let chain_spec = build_chain_spec_force_hardfork(Chain::from_id(witness.chain_id), Hardfork::Feynman); - run( - vec![witness], - chain_spec, - StateCommitMode::Block, - None::>>, - ) - .unwrap(); + run_host(&[witness], chain_spec).unwrap(); } } diff --git a/crates/core/src/witness.rs b/crates/core/src/witness.rs new file mode 100644 index 00000000..a64ac726 --- /dev/null +++ b/crates/core/src/witness.rs @@ -0,0 +1,291 @@ +use auto_impl::auto_impl; +use itertools::Itertools; +use reth_primitives_traits::serde_bincode_compat::BincodeReprFor; +use sbv_kv::KeyValueStore; +use sbv_primitives::{ + B256, Bytes, ChainId, SignatureError, U256, keccak256, + types::{ + Header, + consensus::{SignerRecoverable, TxEnvelope}, + eips::eip4895::Withdrawals, + reth::primitives::{Block, BlockBody, RecoveredBlock, SealedBlock}, + }, +}; + +/// Witness for a block. +#[serde_with::serde_as] +#[derive(Debug, Clone, Eq, PartialEq, serde::Serialize, serde::Deserialize)] +pub struct BlockWitness { + /// Chain id + pub chain_id: ChainId, + /// Block header representation. + #[serde_as(as = "BincodeReprFor<'_, Header>")] + pub header: Header, + /// State trie root before the block. + pub prev_state_root: B256, + /// Transactions in the block. + #[serde_as(as = "Vec>")] + pub transactions: Vec, + /// Withdrawals in the block. + pub withdrawals: Option, + /// Last 256 Ancestor block hashes. + #[cfg(not(feature = "scroll"))] + pub block_hashes: Vec, + /// Rlp encoded state trie nodes. + #[serde(default)] + pub states: Vec, + /// Code bytecodes + pub codes: Vec, +} + +impl BlockWitness { + /// Calculates compression ratios for all transactions in the block witness. + /// + /// # Panics + /// + /// Panics if called without the "scroll-compress-ratio" feature enabled, as this + /// functionality is not intended to be used in guest environments. + pub fn compression_ratios(&self) -> Vec { + #[cfg(feature = "scroll-compress-ratio")] + { + use sbv_primitives::types::consensus::Transaction; + + self.transactions + .iter() + .map(|tx| sbv_primitives::types::evm::compute_compression_ratio(&tx.input())) + .collect() + } + #[cfg(not(feature = "scroll-compress-ratio"))] + { + unimplemented!("you should not build ChunkWitness in guest?"); + } + } + + /// Converts the `BlockWitness` into a legacy `BlockWitness`. + pub fn into_legacy(self) -> sbv_primitives::legacy_types::BlockWitness { + sbv_primitives::legacy_types::BlockWitness { + chain_id: self.chain_id, + header: self.header.into(), + pre_state_root: self.prev_state_root, + transaction: self.transactions.into_iter().map(Into::into).collect(), + withdrawals: self + .withdrawals + .map(|w| w.into_iter().map(Into::into).collect()), + #[cfg(not(feature = "scroll"))] + block_hashes: self.block_hashes, + states: self.states, + codes: self.codes, + } + } + + /// Build execution context from the witness. + pub fn build_reth_block(&self) -> Result, SignatureError> { + let senders = self + .transactions + .iter() + .map(|tx| tx.recover_signer()) + .collect::, _>>() + .expect("Failed to recover signer"); + + let body = BlockBody { + transactions: self.transactions.clone(), + ommers: vec![], + withdrawals: self.withdrawals.clone(), + }; + let block = RecoveredBlock::new_sealed( + SealedBlock::seal_slow(Block { + header: self.header.clone(), + body, + }), + senders, + ); + + Ok(block) + } +} + +impl From for BlockWitness { + fn from(legacy: sbv_primitives::legacy_types::BlockWitness) -> Self { + Self { + chain_id: legacy.chain_id, + header: legacy.header.into(), + prev_state_root: legacy.pre_state_root, + transactions: legacy + .transaction + .into_iter() + .map(|t| t.try_into().unwrap()) + .collect(), + withdrawals: legacy + .withdrawals + .map(|w| Withdrawals::new(w.into_iter().map(Into::into).collect())), + #[cfg(not(feature = "scroll"))] + block_hashes: legacy.block_hashes, + states: legacy.states, + codes: legacy.codes, + } + } +} + +/// BlockWitnessExt trait +#[auto_impl(&, &mut, Box, Rc, Arc)] +pub trait BlockWitnessExt { + /// Import codes into code db + fn import_codes>(&self, code_db: CodeDb); + /// Import block hashes into block hash provider + #[cfg(not(feature = "scroll"))] + fn import_block_hashes>( + &self, + block_hashes: BlockHashProvider, + ); +} + +impl BlockWitnessExt for BlockWitness { + fn import_codes>(&self, mut code_db: CodeDb) { + for code in self.codes.iter() { + let code = code.as_ref(); + let code_hash = cycle_track!(keccak256(code), "keccak256"); + code_db.or_insert_with(code_hash, || Bytes::copy_from_slice(code)) + } + } + + #[cfg(not(feature = "scroll"))] + fn import_block_hashes>( + &self, + mut block_hashes: BlockHashProvider, + ) { + let block_number = self.header.number; + for (i, hash) in self.block_hashes.iter().enumerate() { + let block_number = block_number + .checked_sub(i as u64 + 1) + .expect("block number underflow"); + block_hashes.insert(block_number, *hash) + } + } +} + +impl BlockWitnessExt for [BlockWitness] { + fn import_codes>(&self, mut code_db: CodeDb) { + for code in self.iter().flat_map(|w| w.codes.iter()) { + let code = code.as_ref(); + let code_hash = cycle_track!(keccak256(code), "keccak256"); + code_db.or_insert_with(code_hash, || Bytes::copy_from_slice(code)) + } + } + + #[cfg(not(feature = "scroll"))] + fn import_block_hashes>( + &self, + mut block_hashes: BlockHashProvider, + ) { + for witness in self.iter() { + let block_number = witness.header.number; + for (i, hash) in witness.block_hashes.iter().enumerate() { + let block_number = block_number + .checked_sub(i as u64 + 1) + .expect("block number underflow"); + block_hashes.insert(block_number, *hash) + } + } + } +} + +/// BlockWitnessCodeExt trait +#[auto_impl(&, &mut, Box, Rc, Arc)] +pub trait BlockWitnessChunkExt { + /// Get the chain id. + fn chain_id(&self) -> ChainId; + /// Get the previous state root. + fn prev_state_root(&self) -> B256; + /// Check if all witnesses have the same chain id. + fn has_same_chain_id(&self) -> bool; + /// Check if all witnesses have a sequence block number. + fn has_seq_block_number(&self) -> bool; + /// Check if all witnesses have a sequence state root. + fn has_seq_state_root(&self) -> bool; +} + +impl BlockWitnessChunkExt for [BlockWitness] { + #[inline(always)] + fn chain_id(&self) -> ChainId { + debug_assert!(self.has_same_chain_id(), "chain id mismatch"); + self.first().expect("empty witnesses").chain_id + } + + #[inline(always)] + fn prev_state_root(&self) -> B256 { + self.first().expect("empty witnesses").prev_state_root + } + + #[inline(always)] + fn has_same_chain_id(&self) -> bool { + self.iter() + .tuple_windows() + .all(|(a, b)| a.chain_id == b.chain_id) + } + + #[inline(always)] + fn has_seq_block_number(&self) -> bool { + self.iter() + .tuple_windows() + .all(|(a, b)| a.header.number + 1 == b.header.number) + } + + #[inline(always)] + fn has_seq_state_root(&self) -> bool { + self.iter() + .tuple_windows() + .all(|(a, b)| a.header.state_root == b.prev_state_root) + } +} + +#[cfg(test)] +#[cfg(feature = "scroll")] +mod tests { + use super::*; + use std::{collections::BTreeSet, ffi::OsStr, path::PathBuf}; + + #[rstest::rstest] + fn serde_scroll_blocks_roundtrip( + #[files("../../testdata/scroll_witness/**/*.json")] + #[mode = path] + path: PathBuf, + ) { + let file_content = std::fs::read_to_string(path).unwrap(); + let witness: BlockWitness = serde_json::from_str(&file_content).unwrap(); + let serialized = serde_json::to_string(&witness).unwrap(); + let deserialized: BlockWitness = serde_json::from_str(&serialized).unwrap(); + assert_eq!(witness, deserialized); + } + + #[rstest::rstest] + fn serde_scroll_blocks_legacy_compatibility( + #[files("../../testdata/scroll_witness/**/*.json")] + #[mode = path] + path: PathBuf, + ) { + let file_content = std::fs::read_to_string(&path).unwrap(); + let witness: BlockWitness = serde_json::from_str(&file_content).unwrap(); + + let base_dir = path + .ancestors() + .find(|p| p.file_name().unwrap() == OsStr::new("testdata")) + .unwrap(); + let filename = path.file_name().unwrap(); + let harfork = path.parent().unwrap().file_name().unwrap(); + let legacy_path = base_dir + .join("legacy") + .join("scroll_witness") + .join(harfork) + .join(filename); + let legacy_content = std::fs::read_to_string(legacy_path).unwrap(); + let mut legacy_witness: sbv_primitives::legacy_types::BlockWitness = + serde_json::from_str(&legacy_content).unwrap(); + legacy_witness.states = Vec::from_iter(BTreeSet::from_iter(legacy_witness.states)); + legacy_witness.codes = Vec::from_iter(BTreeSet::from_iter(legacy_witness.codes)); + + let mut legacy_converted = witness.into_legacy(); + legacy_converted.states = Vec::from_iter(BTreeSet::from_iter(legacy_converted.states)); + legacy_converted.codes = Vec::from_iter(BTreeSet::from_iter(legacy_converted.codes)); + assert_eq!(legacy_converted, legacy_witness); + } +} diff --git a/crates/kv/src/imps/std_collections.rs b/crates/kv/src/imps/std_collections.rs index c1edbe94..77d9cf8d 100644 --- a/crates/kv/src/imps/std_collections.rs +++ b/crates/kv/src/imps/std_collections.rs @@ -1,4 +1,4 @@ -use crate::{HashMap, KeyValueStore, KeyValueStoreGet, KeyValueStoreInsert}; +use crate::{HashMap, KeyValueStore, KeyValueStoreGet, KeyValueStoreInsert, KeyValueStoreRemove}; use core::hash::{BuildHasher, Hash}; use std::{borrow::Borrow, collections::BTreeMap}; @@ -21,6 +21,16 @@ impl KeyValueStoreGet for HashMap KeyValueStoreRemove for HashMap { + fn remove(&mut self, k: &Q) -> Option + where + K: Borrow, + Q: Ord + Hash + Eq + ?Sized, + { + HashMap::remove(self, k) + } +} + impl KeyValueStore for HashMap {} impl KeyValueStoreInsert for BTreeMap { @@ -42,4 +52,14 @@ impl KeyValueStoreGet for BTreeMap { } } +impl KeyValueStoreRemove for BTreeMap { + fn remove(&mut self, k: &Q) -> Option + where + K: Borrow, + Q: Ord + Hash + Eq + ?Sized, + { + BTreeMap::remove(self, k) + } +} + impl KeyValueStore for BTreeMap {} diff --git a/crates/kv/src/lib.rs b/crates/kv/src/lib.rs index 665f0e6a..94d5a2b4 100644 --- a/crates/kv/src/lib.rs +++ b/crates/kv/src/lib.rs @@ -10,7 +10,7 @@ pub use imps::{nohash, null}; /// HashMap pub type HashMap = hashbrown::HashMap; /// HashSet -pub type HashSet = hashbrown::HashSet; +pub type HashSet = hashbrown::HashSet; /// Key-Value store insert trait #[auto_impl(&mut, Box)] @@ -31,6 +31,16 @@ pub trait KeyValueStoreGet { Q: Ord + Hash + Eq + ?Sized; } +/// Key-Value store trait +#[auto_impl(&mut)] +pub trait KeyValueStoreRemove { + /// Get value by key + fn remove(&mut self, k: &Q) -> Option + where + K: Borrow, + Q: Ord + Hash + Eq + ?Sized; +} + /// Key-Value store trait #[auto_impl(&, &mut, Box, Rc, Arc)] pub trait KeyValueStore: diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 92d98409..9633bf95 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -17,7 +17,6 @@ auto_impl.workspace = true itertools.workspace = true rkyv = { workspace = true, optional = true } serde.workspace = true -serde_with.workspace = true tracing = { workspace = true, optional = true } alloy-consensus.workspace = true diff --git a/crates/primitives/src/ext.rs b/crates/primitives/src/ext.rs deleted file mode 100644 index f6115b10..00000000 --- a/crates/primitives/src/ext.rs +++ /dev/null @@ -1,108 +0,0 @@ -use crate::{B256, Bytes, keccak256, types::BlockWitness}; -use auto_impl::auto_impl; -use itertools::Itertools; -use sbv_helpers::cycle_track; -use sbv_kv::KeyValueStore; - -/// BlockWitnessExt trait -#[auto_impl(&, &mut, Box, Rc, Arc)] -pub trait BlockWitnessExt { - /// Import codes into code db - fn import_codes>(&self, code_db: CodeDb); - /// Import block hashes into block hash provider - #[cfg(not(feature = "scroll"))] - fn import_block_hashes>( - &self, - block_hashes: BlockHashProvider, - ); -} - -/// BlockWitnessCodeExt trait -#[auto_impl(&, &mut, Box, Rc, Arc)] -pub trait BlockWitnessChunkExt { - /// Get the chain id. - fn chain_id(&self) -> crate::ChainId; - /// Get the previous state root. - fn prev_state_root(&self) -> B256; - /// Check if all witnesses have the same chain id. - fn has_same_chain_id(&self) -> bool; - /// Check if all witnesses have a sequence block number. - fn has_seq_block_number(&self) -> bool; -} - -impl BlockWitnessExt for BlockWitness { - fn import_codes>(&self, mut code_db: CodeDb) { - for code in self.codes.iter() { - let code = code.as_ref(); - let code_hash = cycle_track!(keccak256(code), "keccak256"); - code_db.or_insert_with(code_hash, || Bytes::copy_from_slice(code)) - } - } - - #[cfg(not(feature = "scroll"))] - fn import_block_hashes>( - &self, - mut block_hashes: BlockHashProvider, - ) { - let block_number = self.header.number; - for (i, hash) in self.block_hashes.iter().enumerate() { - let block_number = block_number - .checked_sub(i as u64 + 1) - .expect("block number underflow"); - block_hashes.insert(block_number, *hash) - } - } -} - -impl BlockWitnessExt for [BlockWitness] { - fn import_codes>(&self, mut code_db: CodeDb) { - for code in self.iter().flat_map(|w| w.codes.iter()) { - let code = code.as_ref(); - let code_hash = cycle_track!(keccak256(code), "keccak256"); - code_db.or_insert_with(code_hash, || Bytes::copy_from_slice(code)) - } - } - - #[cfg(not(feature = "scroll"))] - fn import_block_hashes>( - &self, - mut block_hashes: BlockHashProvider, - ) { - for witness in self.iter() { - let block_number = witness.header.number; - for (i, hash) in witness.block_hashes.iter().enumerate() { - let block_number = block_number - .checked_sub(i as u64 + 1) - .expect("block number underflow"); - block_hashes.insert(block_number, *hash) - } - } - } -} - -impl BlockWitnessChunkExt for [BlockWitness] { - #[inline(always)] - fn chain_id(&self) -> crate::ChainId { - debug_assert!(self.has_same_chain_id(), "chain id mismatch"); - self.first().expect("empty witnesses").chain_id - } - - #[inline(always)] - fn prev_state_root(&self) -> B256 { - self.first().expect("empty witnesses").prev_state_root - } - - #[inline(always)] - fn has_same_chain_id(&self) -> bool { - self.iter() - .tuple_windows() - .all(|(a, b)| a.chain_id == b.chain_id) - } - - #[inline(always)] - fn has_seq_block_number(&self) -> bool { - self.iter() - .tuple_windows() - .all(|(a, b)| a.header.number + 1 == b.header.number) - } -} diff --git a/crates/primitives/src/legacy_types.rs b/crates/primitives/src/legacy_types.rs index 026e9c64..e0377057 100644 --- a/crates/primitives/src/legacy_types.rs +++ b/crates/primitives/src/legacy_types.rs @@ -12,37 +12,3 @@ pub use signature::Signature; pub use transaction::Transaction; pub use withdrawal::Withdrawal; pub use witness::BlockWitness; - -#[cfg(test)] -mod tests { - use super::*; - - #[rstest::rstest] - #[cfg(not(feature = "scroll"))] - fn serde_scroll_legacy_blocks_roundtrip( - #[files("../../testdata/legacy/holesky_witness/**/*.json")] - #[mode = str] - witness_json: &str, - ) { - let witness: BlockWitness = serde_json::from_str(witness_json).unwrap(); - let serialized = serde_json::to_string(&witness).unwrap(); - let deserialized: BlockWitness = serde_json::from_str(&serialized).unwrap(); - assert_eq!(witness, deserialized); - } - - #[rstest::rstest] - #[cfg(feature = "scroll")] - fn serde_scroll_legacy_blocks_roundtrip( - #[files("../../testdata/legacy/scroll_witness/**/*.json")] - #[mode = str] - witness_json: &str, - ) { - let witness: BlockWitness = serde_json::from_str(witness_json).unwrap(); - let serialized = serde_json::to_string(&witness).unwrap(); - let deserialized: BlockWitness = serde_json::from_str(&serialized).unwrap(); - assert_eq!(witness, deserialized); - let current_witness = deserialized.into_current(); - let converted = current_witness.clone().into_legacy(); - assert_eq!(witness, converted); - } -} diff --git a/crates/primitives/src/legacy_types/witness.rs b/crates/primitives/src/legacy_types/witness.rs index fc00ba7f..b51ae916 100644 --- a/crates/primitives/src/legacy_types/witness.rs +++ b/crates/primitives/src/legacy_types/witness.rs @@ -1,7 +1,6 @@ use crate::{ B256, Bytes, ChainId, legacy_types::{BlockHeader, Transaction, Withdrawal}, - types::eips::eip4895::Withdrawals, }; /// Witness for a block. @@ -39,26 +38,3 @@ pub struct BlockWitness { #[cfg_attr(feature = "rkyv", rkyv(attr(doc = "Code bytecodes")))] pub codes: Vec, } - -impl BlockWitness { - /// Converts the legacy `BlockWitness` into a current `BlockWitness`. - pub fn into_current(self) -> crate::types::BlockWitness { - crate::types::BlockWitness { - chain_id: self.chain_id, - header: self.header.into(), - prev_state_root: self.pre_state_root, - transactions: self - .transaction - .into_iter() - .map(|t| t.try_into().unwrap()) - .collect(), - withdrawals: self - .withdrawals - .map(|w| Withdrawals::new(w.into_iter().map(Into::into).collect())), - #[cfg(not(feature = "scroll"))] - block_hashes: self.block_hashes, - states: self.states, - codes: self.codes, - } - } -} diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 7ca16296..7f930af0 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -4,9 +4,6 @@ #[cfg(feature = "chainspec")] pub mod chainspec; -/// Extension Traits -pub mod ext; - /// Ethereum fork types #[cfg(feature = "hardforks")] pub mod hardforks { diff --git a/crates/primitives/src/types.rs b/crates/primitives/src/types.rs index f27eee50..3deda023 100644 --- a/crates/primitives/src/types.rs +++ b/crates/primitives/src/types.rs @@ -66,7 +66,7 @@ pub mod revm { pub mod reth { /// Re-export types from `reth-primitives-types` pub mod primitives { - pub use reth_primitives::RecoveredBlock; + pub use reth_primitives::{RecoveredBlock, SealedBlock}; #[cfg(not(feature = "scroll"))] pub use reth_primitives::{Block, BlockBody, EthPrimitives, Receipt, TransactionSigned}; @@ -116,160 +116,3 @@ pub mod rpc { /// Block representation for RPC. pub type Block = alloy_rpc_types_eth::Block; } - -/// Witness type -pub mod witness { - use crate::{ - B256, Bytes, ChainId, SignatureError, U256, - types::{ - Header, - consensus::{SignerRecoverable, TxEnvelope}, - eips::eip4895::Withdrawals, - reth::primitives::{Block, BlockBody, RecoveredBlock}, - }, - }; - use reth_primitives_traits::serde_bincode_compat::BincodeReprFor; - - /// Witness for a block. - #[serde_with::serde_as] - #[derive(Debug, Clone, Hash, Eq, PartialEq, serde::Serialize, serde::Deserialize)] - pub struct BlockWitness { - /// Chain id - pub chain_id: ChainId, - /// Block header representation. - #[serde_as(as = "BincodeReprFor<'_, Header>")] - pub header: Header, - /// State trie root before the block. - pub prev_state_root: B256, - /// Transactions in the block. - #[serde_as(as = "Vec>")] - pub transactions: Vec, - /// Withdrawals in the block. - pub withdrawals: Option, - /// Last 256 Ancestor block hashes. - #[cfg(not(feature = "scroll"))] - pub block_hashes: Vec, - /// Rlp encoded state trie nodes. - pub states: Vec, - /// Code bytecodes - pub codes: Vec, - } - - impl BlockWitness { - /// Calculates compression ratios for all transactions in the block witness. - /// - /// # Panics - /// - /// Panics if called without the "scroll-compress-ratio" feature enabled, as this - /// functionality is not intended to be used in guest environments. - pub fn compression_ratios(&self) -> Vec { - #[cfg(feature = "scroll-compress-ratio")] - { - use crate::types::consensus::Transaction; - - self.transactions - .iter() - .map(|tx| crate::types::evm::compute_compression_ratio(&tx.input())) - .collect() - } - #[cfg(not(feature = "scroll-compress-ratio"))] - { - unimplemented!("you should not build ChunkWitness in guest?"); - } - } - - /// Converts the `BlockWitness` into a legacy `BlockWitness`. - pub fn into_legacy(self) -> crate::legacy_types::BlockWitness { - crate::legacy_types::BlockWitness { - chain_id: self.chain_id, - header: self.header.into(), - pre_state_root: self.prev_state_root, - transaction: self.transactions.into_iter().map(Into::into).collect(), - withdrawals: self - .withdrawals - .map(|w| w.into_iter().map(Into::into).collect()), - #[cfg(not(feature = "scroll"))] - block_hashes: self.block_hashes, - states: self.states, - codes: self.codes, - } - } - - /// Build a reth block - pub fn into_reth_block(self) -> Result, SignatureError> { - let senders = self - .transactions - .iter() - .map(|tx| tx.recover_signer()) - .collect::, _>>() - .expect("Failed to recover signer"); - - let body = BlockBody { - transactions: self.transactions, - ommers: vec![], - withdrawals: self.withdrawals, - }; - - Ok(RecoveredBlock::new_unhashed( - Block { - header: self.header, - body, - }, - senders, - )) - } - } -} -pub use witness::BlockWitness; - -#[cfg(test)] -#[cfg(feature = "scroll")] -mod tests { - use super::*; - use std::{collections::BTreeSet, ffi::OsStr, path::PathBuf}; - - #[rstest::rstest] - fn serde_scroll_blocks_roundtrip( - #[files("../../testdata/scroll_witness/**/*.json")] - #[mode = path] - path: PathBuf, - ) { - let file_content = std::fs::read_to_string(path).unwrap(); - let witness: BlockWitness = serde_json::from_str(&file_content).unwrap(); - let serialized = serde_json::to_string(&witness).unwrap(); - let deserialized: BlockWitness = serde_json::from_str(&serialized).unwrap(); - assert_eq!(witness, deserialized); - } - - #[rstest::rstest] - fn serde_scroll_blocks_legacy_compatibility( - #[files("../../testdata/scroll_witness/**/*.json")] - #[mode = path] - path: PathBuf, - ) { - let file_content = std::fs::read_to_string(&path).unwrap(); - let witness: BlockWitness = serde_json::from_str(&file_content).unwrap(); - - let base_dir = path - .ancestors() - .find(|p| p.file_name().unwrap() == OsStr::new("testdata")) - .unwrap(); - let filename = path.file_name().unwrap(); - let harfork = path.parent().unwrap().file_name().unwrap(); - let legacy_path = base_dir - .join("legacy") - .join("scroll_witness") - .join(harfork) - .join(filename); - let legacy_content = std::fs::read_to_string(legacy_path).unwrap(); - let mut legacy_witness: crate::legacy_types::BlockWitness = - serde_json::from_str(&legacy_content).unwrap(); - legacy_witness.states = Vec::from_iter(BTreeSet::from_iter(legacy_witness.states)); - legacy_witness.codes = Vec::from_iter(BTreeSet::from_iter(legacy_witness.codes)); - - let mut legacy_converted = witness.into_legacy(); - legacy_converted.states = Vec::from_iter(BTreeSet::from_iter(legacy_converted.states)); - legacy_converted.codes = Vec::from_iter(BTreeSet::from_iter(legacy_converted.codes)); - assert_eq!(legacy_converted, legacy_witness); - } -} diff --git a/crates/sbv/Cargo.toml b/crates/sbv/Cargo.toml index 107cfafb..1634de17 100644 --- a/crates/sbv/Cargo.toml +++ b/crates/sbv/Cargo.toml @@ -24,6 +24,7 @@ c-kzg = ["sbv-primitives/c-kzg"] kzg-rs = ["sbv-primitives/kzg-rs"] scroll = ["sbv-core/scroll", "sbv-primitives/scroll-all", "sbv-utils/scroll"] +scroll-compress-ratio = ["sbv-core/scroll-compress-ratio"] dev = ["sbv-core/dev", "sbv-primitives/dev", "sbv-trie/dev"] # sp1 related diff --git a/crates/trie/Cargo.toml b/crates/trie/Cargo.toml index f2c6ad5f..604443ae 100644 --- a/crates/trie/Cargo.toml +++ b/crates/trie/Cargo.toml @@ -12,11 +12,11 @@ repository.workspace = true workspace = true [dependencies] -auto_impl.workspace = true alloy-rlp.workspace = true alloy-trie.workspace = true reth-trie.workspace = true -reth-trie-sparse.workspace = true +serde = { workspace = true, features = ["derive"] } +rlp = "0.5" sbv-primitives = { workspace = true, features = ["revm-types"] } sbv-kv.workspace = true diff --git a/crates/trie/src/execution_witness.rs b/crates/trie/src/execution_witness.rs new file mode 100644 index 00000000..6c238dad --- /dev/null +++ b/crates/trie/src/execution_witness.rs @@ -0,0 +1,166 @@ +//! This is copied and modified from https://github.com/succinctlabs/rsp +//! crates/mpt/src/execution_witness.rs rev@2a99f35a9b81452eb53af3848e50addfd481363c +//! Under MIT license +use crate::mpt::{MptNode, MptNodeData, MptNodeReference, resolve_nodes}; +use alloy_rlp::Decodable; +use reth_trie::TrieAccount; +use sbv_kv::{HashMap, nohash::NoHashMap}; +use sbv_primitives::{B256, Bytes, keccak256}; + +/// Partial state trie error +#[derive(thiserror::Error, Debug)] +pub enum FromWitnessError { + /// rlp error + #[error("rlp error: {0}")] + Rlp(#[from] alloy_rlp::Error), + /// trie error + #[error(transparent)] + Trie(#[from] crate::mpt::Error), + /// missing storage trie witness + #[error( + "missing storage trie witness for {hashed_address:?} with storage root {storage_root:?}" + )] + MissingStorageTrie { + /// The keccak256 hash of the account address + hashed_address: B256, + /// The storage root of the account + storage_root: B256, + }, + /// state trie validation error + #[error("mismatched state root: expected {expected:?}, got {actual:?}")] + StateTrieValidation { + /// The expected state root hash + expected: B256, + /// The actual computed state root hash + actual: B256, + }, + /// missing account in state trie + #[error("account not found in state trie")] + MissingAccount, + /// storage trie validation error + #[error( + "mismatched storage root for address hash {hashed_address:?}: expected {expected_hash:?}, got {actual_hash:?}" + )] + StorageTrieValidation { + /// The keccak256 hash of the account address + hashed_address: B256, + /// The expected storage root hash from the account + expected_hash: B256, + /// The actual computed storage root hash + actual_hash: B256, + }, +} + +// Builds tries from the witness state. +// +// NOTE: This method should be called outside zkVM! In general, you construct tries, then +// validate them inside zkVM. +pub(crate) fn build_validated_tries<'a, I>( + prev_state_root: B256, + states: I, +) -> Result<(MptNode, NoHashMap), FromWitnessError> +where + I: IntoIterator, +{ + // Step 1: Decode all RLP-encoded trie nodes and index by hash + // IMPORTANT: Witness state contains both *state trie* nodes and *storage tries* nodes! + let mut node_map = HashMap::::default(); + let mut node_by_hash = NoHashMap::::default(); + let mut root_node: Option = None; + + for encoded in states.into_iter() { + let node = MptNode::decode(&mut encoded.as_ref())?; + let hash = keccak256(encoded); + if hash == prev_state_root { + root_node = Some(node.clone()); + } + node_by_hash.insert(hash, node.clone()); + node_map.insert(node.reference(), node); + } + + // Step 2: Use root_node or fallback to Digest + let root = root_node.unwrap_or_else(|| MptNodeData::Digest(prev_state_root).into()); + + // Build state trie. + let mut raw_storage_tries = Vec::with_capacity(node_by_hash.len()); + let state_trie = resolve_nodes(&root, &node_map); + + state_trie.for_each_leaves(|key, mut value| { + let account = TrieAccount::decode(&mut value).unwrap(); + let hashed_address = B256::from_slice(key); + raw_storage_tries.push((hashed_address, account.storage_root)); + }); + + // Step 3: Build storage tries per account efficiently + let mut storage_tries = NoHashMap::::with_capacity_and_hasher( + raw_storage_tries.len(), + Default::default(), + ); + + for (hashed_address, storage_root) in raw_storage_tries { + let root_node = match node_by_hash.get(&storage_root).cloned() { + Some(node) => node, + None => { + // An execution witness can include an account leaf (with non-empty storageRoot), + // but omit its entire storage trie when that account's storage was + // NOT touched during the block. + continue; + } + }; + let storage_trie = resolve_nodes(&root_node, &node_map); + + if storage_trie.is_digest() { + return Err(FromWitnessError::MissingStorageTrie { + hashed_address, + storage_root, + }); + } + + // Insert resolved storage trie. + storage_tries.insert(hashed_address, storage_trie); + } + + // Step 3a: Verify that state_trie was built correctly - confirm tree hash with pre state root. + validate_state_trie(&state_trie, prev_state_root)?; + + // Step 3b: Verify that each storage trie matches the declared storage_root in the state trie. + validate_storage_tries(&state_trie, &storage_tries)?; + + Ok((state_trie, storage_tries)) +} + +// Validate that state_trie was built correctly - confirm tree hash with prev state root. +fn validate_state_trie(state_trie: &MptNode, pre_state_root: B256) -> Result<(), FromWitnessError> { + if state_trie.hash() != pre_state_root { + return Err(FromWitnessError::StateTrieValidation { + expected: pre_state_root, + actual: state_trie.hash(), + }); + } + Ok(()) +} + +// Validates that each storage trie matches the declared storage_root in the state trie. +fn validate_storage_tries( + state_trie: &MptNode, + storage_tries: &NoHashMap, +) -> Result<(), FromWitnessError> { + for (hashed_address, storage_trie) in storage_tries.iter() { + let account = state_trie + .get_rlp::(hashed_address.as_slice())? + .ok_or(FromWitnessError::MissingAccount)?; + + let storage_root = account.storage_root; + let actual_hash = storage_trie.hash(); + + if storage_root != actual_hash { + return Err(FromWitnessError::StorageTrieValidation { + hashed_address: *hashed_address, + expected_hash: storage_root, + actual_hash, + }); + } + } + + Ok(()) +} diff --git a/crates/trie/src/lib.rs b/crates/trie/src/lib.rs index 6c24b9da..c6313bde 100644 --- a/crates/trie/src/lib.rs +++ b/crates/trie/src/lib.rs @@ -2,230 +2,141 @@ #[macro_use] extern crate sbv_helpers; -use alloy_rlp::{Decodable, Encodable, encode_fixed_size}; -use alloy_trie::{ - EMPTY_ROOT_HASH, Nibbles, TrieMask, - nodes::{CHILD_INDEX_RANGE, RlpNode}, -}; -use auto_impl::auto_impl; -use reth_trie::TRIE_ACCOUNT_RLP_MAX_SIZE; -use reth_trie_sparse::{ - SerialSparseTrie, SparseTrieInterface, TrieMasks, errors::SparseTrieError, - provider::DefaultTrieNodeProvider, -}; -use sbv_kv::{HashMap, nohash::NoHashMap}; -use sbv_primitives::{ - Address, B256, Bytes, U256, keccak256, - types::{BlockWitness, revm::database::BundleAccount}, -}; -use std::{cell::RefCell, collections::BTreeMap, fmt::Debug}; +use crate::mpt::MptNode; +use alloy_trie::{EMPTY_ROOT_HASH, TrieAccount}; +use sbv_kv::nohash::NoHashMap; +use sbv_primitives::{Address, B256, Bytes, U256, keccak256, types::revm::database::BundleAccount}; +use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; -pub use alloy_trie::{TrieAccount, nodes::TrieNode}; -pub use reth_trie::{KeccakKeyHasher, KeyHasher}; - -/// Extension trait for BlockWitness -#[auto_impl(&, &mut, Box, Rc, Arc)] -pub trait BlockWitnessTrieExt { - /// Import nodes into a KeyValueStore - fn import_nodes>(&self, provider: &mut P); -} - -impl BlockWitnessTrieExt for BlockWitness { - fn import_nodes>(&self, provider: &mut P) { - for state in self.states.iter() { - let node_hash = cycle_track!(keccak256(state.as_ref()), "keccak256"); - provider.insert(node_hash, state.clone()); - } - } -} - -impl BlockWitnessTrieExt for [BlockWitness] { - fn import_nodes>(&self, provider: &mut P) { - for w in self.iter() { - for state in w.states.iter() { - let node_hash = cycle_track!(keccak256(state.as_ref()), "keccak256"); - provider.insert(node_hash, state.clone()); - } - } - } -} +mod execution_witness; +mod mpt; +pub use execution_witness::FromWitnessError; /// A partial trie that can be updated -#[derive(Debug)] +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] pub struct PartialStateTrie { - state: SerialSparseTrie, - /// hashed address -> storage root - storage_roots: RefCell>, - /// hashed address -> storage tire - storage_tries: RefCell>>, - /// shared rlp buffer - rlp_buffer: Vec, + state_trie: MptNode, + storage_tries: NoHashMap, } /// Partial state trie error #[derive(thiserror::Error, Debug)] pub enum PartialStateTrieError { - /// reth sparse trie error + /// mpt error #[error("error occurred in reth_trie_sparse: {0}")] - Impl(String), // FIXME: wtf, why `SparseTrieError` they don't require Sync? - /// an error occurred while previously try to open the storage trie - #[error("an error occurred while previously try to open the storage trie")] - PreviousError, - /// missing trie witness for node - #[error("missing trie witness for node: {0}")] - MissingWitness(B256), - /// rlp error - #[error(transparent)] - Rlp(#[from] alloy_rlp::Error), - /// extra data in the leaf - #[error("{0}")] - ExtraData(&'static str), + Impl(#[from] mpt::Error), } -type Result = std::result::Result; - impl PartialStateTrie { - /// Open a partial trie from a root node - pub fn open>( - nodes_provider: &P, - root: B256, - ) -> Result { - let state = cycle_track!(open_trie(nodes_provider, root), "open_trie")?; + /// Create a partial state trie from a previous state root and a list of RLP-encoded MPT nodes + pub fn new<'a, I>( + prev_state_root: B256, + states: I, + ) -> Result + where + I: IntoIterator, + { + let (state_trie, storage_tries) = + execution_witness::build_validated_tries(prev_state_root, states)?; Ok(PartialStateTrie { - state, - storage_roots: RefCell::new(HashMap::with_capacity_and_hasher(256, Default::default())), - storage_tries: RefCell::new(HashMap::with_capacity_and_hasher(256, Default::default())), - rlp_buffer: Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE), // pre-allocate 128 bytes + state_trie, + storage_tries, }) } - /// Get account - #[cfg_attr( - feature = "dev", - tracing::instrument(level = tracing::Level::TRACE, skip(self), ret) - )] - pub fn get_account(&self, address: Address) -> Result> { - let path = Nibbles::unpack(keccak256(address)); - let Some(value) = self.state.get_leaf_value(&path) else { - return Ok(None); - }; - let account = TrieAccount::decode(&mut value.as_ref())?; - self.storage_roots - .borrow_mut() - .insert(address, account.storage_root); - Ok(Some(account)) + /// Get account by address + #[inline] + pub fn get_account( + &self, + address: Address, + ) -> Result, PartialStateTrieError> { + let hashed_address = keccak256(address); + let account = self.state_trie.get_rlp::(&*hashed_address)?; + + Ok(account) } - /// Get storage - #[cfg_attr( - feature = "dev", - tracing::instrument(level = tracing::Level::TRACE, skip(self, nodes_provider), ret, err) - )] - pub fn get_storage>( + /// Get storage value of an account at a specific slot. + pub fn get_storage( &self, - nodes_provider: &P, address: Address, index: U256, - ) -> Result> { - let Some(storage_root) = self.storage_roots.borrow().get(&address).copied() else { - return Ok(None); - }; - let path = Nibbles::unpack(keccak256(index.to_be_bytes::<{ U256::BYTES }>())); + ) -> Result { + let hashed_address = keccak256(address); + + // Usual case, where given storage slot is present. + if let Some(storage_trie) = self.storage_tries.get(&hashed_address) { + let key = keccak256(index.to_be_bytes::<32>()); + let value = storage_trie.get_rlp::(&*key)?.unwrap_or_default(); + return Ok(value); + } - let mut tries = self.storage_tries.borrow_mut(); - let storage_trie = tries - .entry(address) - .or_insert_with(|| { - dev_trace!("open storage trie of {address} at {storage_root}"); - open_trie(nodes_provider, storage_root).inspect_err(|_e| { - dev_error!( - "failed to open storage trie of {address} at {storage_root}, cause: {_e}" - ) - }) - }) - .as_mut() - .map_err(|_| PartialStateTrieError::PreviousError)?; - let Some(value) = storage_trie.get_leaf_value(&path) else { - return Ok(None); - }; - let slot = U256::decode(&mut value.as_ref())?; - Ok(Some(slot)) - } + // Storage slot value is not present in the trie, validate that the witness is complete. + println!("[TRIGGERED] EMPTY NODE PROOF"); + let account = self.state_trie.get_rlp::(&*hashed_address)?; + match account { + Some(account) => { + if account.storage_root != EMPTY_ROOT_HASH { + unreachable!("pre-built storage trie shall be present"); + } + } + None => { + println!("[TRIGGERED] CASE 2. STATE TRIE EMPTY NODE PROOF"); + todo!("Validate that account witness is valid"); + } + } - /// Commit state changes and calculate the new state root - #[must_use] - #[cfg_attr(feature = "dev", tracing::instrument(level = tracing::Level::TRACE, skip_all, ret))] - pub fn commit_state(&mut self) -> B256 { - self.state.root() + // Account doesn't exist or has empty storage root. + Ok(U256::ZERO) } - /// Update the trie with the new state - #[cfg_attr(feature = "dev", tracing::instrument(level = tracing::Level::TRACE, skip_all, err))] - pub fn update<'a, P: sbv_kv::KeyValueStoreGet>( + /// Mutates state based on diffs provided in [`HashedPostState`]. + pub fn update( &mut self, - nodes_provider: P, - post_state: impl IntoIterator, - ) -> Result<()> { + post_state: BTreeMap, + ) -> Result { for (address, account) in post_state.into_iter() { dev_trace!("update account: {address} {:?}", account.info); - let account_path = Nibbles::unpack(keccak256(address)); + let address_hash = keccak256(address); if account.was_destroyed() { - self.state - .remove_leaf(&account_path, DefaultTrieNodeProvider)?; + self.state_trie.delete(&*address_hash)?; continue; } + let original_account = self.state_trie.get_rlp::(&*address_hash)?; + let original_storage_root = original_account + .as_ref() + .map(|acc| acc.storage_root) + .unwrap_or(EMPTY_ROOT_HASH); + let storage_root = if !account.storage.is_empty() { dev_trace!("non-empty storage, trie needs to be updated"); - let trie = self - .storage_tries - .get_mut() - .entry(*address) - .or_insert_with(|| { - let storage_root = self - .storage_roots - .get_mut() - .get(address) - .copied() - .unwrap_or(EMPTY_ROOT_HASH); - dev_trace!("open storage trie of {address} at {storage_root}"); - open_trie(&nodes_provider, storage_root) - .inspect_err(|_e| { - dev_error!( - "failed to open storage trie of {address} at {storage_root}, cause: {_e}" - ) - }) - }) - .as_mut() - .map_err(|_| PartialStateTrieError::PreviousError)?; - dev_trace!("opened storage trie of {address} at {}", trie.root()); + let storage_trie = self.storage_tries.entry(address_hash).or_default(); + debug_assert_eq!(storage_trie.hash(), original_storage_root); + dev_trace!( + "opened storage trie of {address} at {}", + storage_trie.hash() + ); for (key, slot) in BTreeMap::from_iter(account.storage.clone()) { let key_hash = keccak256(key.to_be_bytes::<{ U256::BYTES }>()); - let path = Nibbles::unpack(key_hash); - dev_trace!( "update storage of {address}: {key:#064X}={:#064X}, key_hash={key_hash}", slot.present_value ); if slot.present_value.is_zero() { - trie.remove_leaf(&path, DefaultTrieNodeProvider)?; + storage_trie.delete(&*key_hash)?; } else { - let value = encode_fixed_size(&slot.present_value); - trie.update_leaf(path, value.to_vec(), DefaultTrieNodeProvider)?; + storage_trie.insert_rlp(&*key_hash, slot.present_value)?; } } - trie.root() + storage_trie.hash() } else { - dev_trace!("empty storage, skip trie update"); - self.storage_roots - .get_mut() - .get(address) - .copied() - .unwrap_or(EMPTY_ROOT_HASH) + original_storage_root }; dev_trace!("current storage root: {storage_root}"); @@ -237,110 +148,9 @@ impl PartialStateTrie { code_hash: info.code_hash, }; dev_trace!("update account: {address} {:?}", account); - self.rlp_buffer.clear(); - account.encode(&mut self.rlp_buffer); - self.state.update_leaf( - account_path, - self.rlp_buffer.clone(), - DefaultTrieNodeProvider, - )?; + self.state_trie.insert_rlp(&*address_hash, account)?; } - Ok(()) - } -} - -#[inline(always)] -fn open_trie>( - nodes_provider: &P, - root: B256, -) -> Result { - if root == EMPTY_ROOT_HASH { - return Ok(SerialSparseTrie::default()); - } - let root_node = nodes_provider - .get(&root) - .ok_or(PartialStateTrieError::MissingWitness(root))?; - let root = TrieNode::decode(&mut root_node.as_ref())?; - let mut trie = SerialSparseTrie::from_root(root.clone(), TrieMasks::none(), false)?; - cycle_track!( - traverse_import_partial_trie(Nibbles::default(), root, nodes_provider, &mut trie), - "traverse_import_partial_trie" - )?; - Ok(trie) -} - -#[inline(always)] -fn traverse_import_partial_trie>( - path: Nibbles, - node: TrieNode, - nodes: &P, - trie: &mut SerialSparseTrie, -) -> Result<()> { - match node { - TrieNode::EmptyRoot => trie.reveal_node(path, node, TrieMasks::none())?, - TrieNode::Branch(ref branch) => { - let mut stack_ptr = branch.as_ref().first_child_index(); - let mut hash_mask = TrieMask::default(); - let mut tree_mask = TrieMask::default(); - - for idx in CHILD_INDEX_RANGE { - if branch.state_mask.is_bit_set(idx) { - let mut child_path = path; - child_path.push(idx); - let child_node = decode_rlp_node(nodes, &branch.stack[stack_ptr])?; - stack_ptr += 1; - - if let Some(child_node) = child_node { - traverse_import_partial_trie(child_path, child_node, nodes, trie)?; - tree_mask.set_bit(idx); - } else { - hash_mask.set_bit(idx); - } - } - } - - let trie_mask = TrieMasks { - hash_mask: Some(hash_mask), - tree_mask: Some(tree_mask), - }; - trie.reveal_node(path, node, trie_mask)?; - } - TrieNode::Leaf(_) => trie.reveal_node(path, node, TrieMasks::none())?, - TrieNode::Extension(ref extension) => { - let mut child_path = path; - child_path.extend(&extension.key); - - if let Some(child_node) = decode_rlp_node(nodes, &extension.child)? { - traverse_import_partial_trie(child_path, child_node, nodes, trie)?; - } - trie.reveal_node(path, node, TrieMasks::none())?; - } - }; - - Ok(()) -} - -#[inline(always)] -fn decode_rlp_node>( - nodes_provider: P, - node: &RlpNode, -) -> Result> { - if node.len() == B256::len_bytes() + 1 { - let hash = B256::from_slice(&node[1..]); - let Some(node_bytes) = nodes_provider.get(&hash) else { - return Ok(None); - }; - Ok(Some(TrieNode::decode(&mut node_bytes.as_ref())?)) - } else { - let mut buf = node.as_ref(); - Ok(Some(TrieNode::decode(&mut buf)?)) - } -} - -impl From for PartialStateTrieError { - #[inline] - fn from(value: SparseTrieError) -> Self { - PartialStateTrieError::Impl(format!("{value:?}")) + Ok(self.state_trie.hash()) } } diff --git a/crates/trie/src/mpt.rs b/crates/trie/src/mpt.rs new file mode 100644 index 00000000..d4a93047 --- /dev/null +++ b/crates/trie/src/mpt.rs @@ -0,0 +1,1034 @@ +//! This is copied and modified from https://github.com/succinctlabs/rsp +//! crates/mpt/src/mpt.rs rev@2a99f35a9b81452eb53af3848e50addfd481363c +//! Under MIT license +// This code is modified from the original implementation of Zeth. +// +// Reference: https://github.com/risc0/zeth +// +// Copyright 2023 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use alloy_rlp::{Decodable, EMPTY_STRING_CODE, Encodable, Header}; +use alloy_trie::EMPTY_ROOT_HASH; +use reth_trie::Nibbles; +use sbv_kv::HashMap; +use sbv_primitives::{B256, keccak256}; +use serde::{Deserialize, Serialize}; +use std::{cmp, fmt::Debug, iter, mem, sync::Mutex}; + +pub trait RlpBytes { + /// Returns the RLP-encoding. + fn to_rlp(&self) -> Vec; +} + +impl RlpBytes for T +where + T: Encodable, +{ + #[inline] + fn to_rlp(&self) -> Vec { + let rlp_length = self.length(); + let mut out = Vec::with_capacity(rlp_length); + self.encode(&mut out); + debug_assert_eq!(out.len(), rlp_length); + out + } +} + +/// Represents the root node of a sparse Merkle Patricia Trie. +/// +/// The "sparse" nature of this trie allows for truncation of certain unneeded parts, +/// representing them by their node hash. This design choice is particularly useful for +/// optimizing storage. However, operations targeting a truncated part will fail and +/// return an error. Another distinction of this implementation is that branches cannot +/// store values, aligning with the construction of MPTs in Ethereum. +#[derive(Debug, Default, Serialize, Deserialize)] +pub struct MptNode { + /// The type and data of the node. + data: MptNodeData, + /// Cache for a previously computed reference of this node. This is skipped during + /// serialization. + #[serde(skip)] + cached_reference: Mutex>, +} + +impl Ord for MptNode { + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.data.cmp(&other.data) + } +} + +impl PartialOrd for MptNode { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Eq for MptNode {} + +impl PartialEq for MptNode { + fn eq(&self, other: &Self) -> bool { + self.data == other.data + } +} + +impl Clone for MptNode { + fn clone(&self) -> Self { + Self { + data: self.data.clone(), + cached_reference: Mutex::new(self.cached_reference.lock().unwrap().clone()), + } + } +} + +/// Represents custom error types for the sparse Merkle Patricia Trie (MPT). +/// +/// These errors cover various scenarios that can occur during trie operations, such as +/// encountering unresolved nodes, finding values in branches where they shouldn't be, and +/// issues related to RLP (Recursive Length Prefix) encoding and decoding. +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Triggered when an operation reaches an unresolved node. The associated `B256` + /// value provides details about the unresolved node. + #[error("reached an unresolved node: {0:#}")] + NodeNotResolved(B256), + /// Occurs when a value is unexpectedly found in a branch node. + #[error("branch node with value")] + ValueInBranch, + /// Represents errors related to the RLP encoding and decoding using the `alloy_rlp` + /// library. + #[error("RLP error")] + Rlp(#[from] alloy_rlp::Error), +} + +/// Represents the various types of data that can be stored within a node in the sparse +/// Merkle Patricia Trie (MPT). +/// +/// Each node in the trie can be of one of several types, each with its own specific data +/// structure. This enum provides a clear and type-safe way to represent the data +/// associated with each node type. +#[derive(Clone, Debug, Default, PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize)] +pub enum MptNodeData { + /// Represents an empty trie node. + #[default] + Null, + /// A node that can have up to 16 children. Each child is an optional boxed [MptNode]. + Branch([Option>; 16]), + /// A leaf node that contains a key and a value, both represented as byte vectors. + Leaf(Vec, Vec), + /// A node that has exactly one child and is used to represent a shared prefix of + /// several keys. + Extension(Vec, Box), + /// Represents a sub-trie by its hash, allowing for efficient storage of large + /// sub-tries without storing their entire content. + Digest(B256), +} + +/// Represents the ways in which one node can reference another node inside the sparse +/// Merkle Patricia Trie (MPT). +/// +/// Nodes in the MPT can reference other nodes either directly through their byte +/// representation or indirectly through a hash of their encoding. This enum provides a +/// clear and type-safe way to represent these references. +#[derive(Clone, Debug, PartialEq, Eq, Hash, Ord, PartialOrd, Serialize, Deserialize)] +pub enum MptNodeReference { + /// Represents a direct reference to another node using its byte encoding. Typically + /// used for short encodings that are less than 32 bytes in length. + Bytes(Vec), + /// Represents an indirect reference to another node using the Keccak hash of its long + /// encoding. Used for encodings that are not less than 32 bytes in length. + Digest(B256), +} + +/// Provides a conversion from [MptNodeData] to [MptNode]. +/// +/// This implementation allows for conversion from [MptNodeData] to [MptNode], +/// initializing the `data` field with the provided value and setting the +/// `cached_reference` field to `None`. +impl From for MptNode { + fn from(value: MptNodeData) -> Self { + Self { + data: value, + cached_reference: Mutex::new(None), + } + } +} + +/// Provides encoding functionalities for the `MptNode` type. +/// +/// This implementation allows for the serialization of an [MptNode] into its RLP-encoded +/// form. The encoding is done based on the type of node data ([MptNodeData]) it holds. +impl Encodable for MptNode { + /// Encodes the node into the provided `out` buffer. + /// + /// The encoding is done using the Recursive Length Prefix (RLP) encoding scheme. The + /// method handles different node data types and encodes them accordingly. + #[inline] + fn encode(&self, out: &mut dyn alloy_rlp::BufMut) { + match &self.data { + MptNodeData::Null => { + out.put_u8(EMPTY_STRING_CODE); + } + MptNodeData::Branch(nodes) => { + Header { + list: true, + payload_length: self.payload_length(), + } + .encode(out); + nodes.iter().for_each(|child| match child { + Some(node) => node.reference_encode(out), + None => out.put_u8(EMPTY_STRING_CODE), + }); + // in the MPT reference, branches have values so always add empty value + out.put_u8(EMPTY_STRING_CODE); + } + MptNodeData::Leaf(prefix, value) => { + Header { + list: true, + payload_length: self.payload_length(), + } + .encode(out); + prefix.as_slice().encode(out); + value.as_slice().encode(out); + } + MptNodeData::Extension(prefix, node) => { + Header { + list: true, + payload_length: self.payload_length(), + } + .encode(out); + prefix.as_slice().encode(out); + node.reference_encode(out); + } + MptNodeData::Digest(digest) => { + digest.encode(out); + } + } + } + + /// Returns the length of the encoded node in bytes. + /// + /// This method calculates the length of the RLP-encoded node. It's useful for + /// determining the size requirements for storage or transmission. + #[inline] + fn length(&self) -> usize { + let payload_length = self.payload_length(); + payload_length + alloy_rlp::length_of_length(payload_length) + } +} + +/// Provides decoding functionalities for the [MptNode] type. +/// +/// This implementation allows for the deserialization of an RLP-encoded [MptNode] back +/// into its original form. The decoding is done based on the prototype of the RLP data, +/// ensuring that the node is reconstructed accurately. +impl Decodable for MptNode { + #[inline] + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let mut items = match Header::decode_raw(buf)? { + alloy_rlp::PayloadView::List(list) => list, + alloy_rlp::PayloadView::String(val) => { + return if val.is_empty() { + Ok(MptNodeData::Null.into()) + } else if val.len() == 32 { + Ok(MptNodeData::Digest(B256::from_slice(val)).into()) + } else { + println!("Invalid digest length: {val:?} {}", val.len()); + Err(alloy_rlp::Error::Custom("invalid digest")) + }; + } + }; + + // A valid number of trie node items is either 17 (branch node) + // or 2 (extension or leaf node). + match items.len() { + 17 => { + let mut node_list = Vec::with_capacity(16); + for item in items.iter().take(16) { + if *item == [EMPTY_STRING_CODE] { + node_list.push(None); + } else { + node_list.push(Some(Box::new(MptNode::decode(&mut &**item)?))); + } + } + if items[16] != [EMPTY_STRING_CODE] { + return Err(alloy_rlp::Error::Custom( + "branch node values are not supported", + )); + } + Ok(MptNodeData::Branch(node_list.try_into().unwrap()).into()) + } + 2 => { + let path = Header::decode_bytes(&mut &*items[0], false)?; + let prefix = path[0]; + if (prefix & (2 << 4)) == 0 { + let node = MptNode::decode(&mut items[1])?; + Ok(MptNodeData::Extension(path.to_vec(), Box::new(node)).into()) + } else { + let value = Header::decode_bytes(&mut &*items[1], false)?; + Ok(MptNodeData::Leaf(path.to_vec(), value.to_vec()).into()) + } + } + _ => Err(alloy_rlp::Error::Custom( + "invalid number of items in the list", + )), + } + } +} + +/// Represents a node in the sparse Merkle Patricia Trie (MPT). +/// +/// The [MptNode] type encapsulates the data and functionalities associated with a node in +/// the MPT. It provides methods for manipulating the trie, such as inserting, deleting, +/// and retrieving values, as well as utility methods for encoding, decoding, and +/// debugging. +impl MptNode { + /// Retrieves the underlying data of the node. + /// + /// This method provides a reference to the node's data, allowing for inspection and + /// manipulation. + #[inline] + pub fn as_data(&self) -> &MptNodeData { + &self.data + } + + /// Retrieves the [MptNodeReference] reference of the node when it's referenced inside + /// another node. + /// + /// This method provides a way to obtain a compact representation of the node for + /// storage or transmission purposes. + #[inline] + pub fn reference(&self) -> MptNodeReference { + self.cached_reference + .lock() + .unwrap() + .get_or_insert_with(|| self.calc_reference()) + .clone() + } + + /// Transforms a sequence of nibbles into an encoded path. + pub fn for_each_leaves(&self, mut f: F) { + let mut stack = vec![(self, Nibbles::default())]; + + while let Some((node, path)) = stack.pop() { + match node.as_data() { + MptNodeData::Null | MptNodeData::Digest(_) => (), + MptNodeData::Branch(branch) => { + for (i, n) in branch + .iter() + .enumerate() + .filter_map(|(i, n)| n.as_ref().map(|n| (i, n))) + { + let mut new_path = path; + new_path.push(i as u8); + stack.push((n, new_path)); + } + } + MptNodeData::Leaf(prefix, value) => { + let mut full_path = path; + full_path.extend(&Nibbles::from_nibbles(prefix_nibs(prefix))); + f(&full_path.pack(), value) + } + MptNodeData::Extension(prefix, node) => { + let mut new_path = path; + new_path.extend(&Nibbles::from_nibbles(prefix_nibs(prefix))); + stack.push((node, new_path)); + } + } + } + } + + /// Computes and returns the 256-bit hash of the node. + /// + /// This method provides a unique identifier for the node based on its content. + #[inline] + pub fn hash(&self) -> B256 { + match self.data { + MptNodeData::Null => EMPTY_ROOT_HASH, + _ => match self.reference() { + MptNodeReference::Digest(digest) => digest, + MptNodeReference::Bytes(bytes) => keccak256(bytes), + }, + } + } + + /// Encodes the [MptNodeReference] of this node into the `out` buffer. + fn reference_encode(&self, out: &mut dyn alloy_rlp::BufMut) { + match self.reference() { + // if the reference is an RLP-encoded byte slice, copy it directly + MptNodeReference::Bytes(bytes) => out.put_slice(&bytes), + // if the reference is a digest, RLP-encode it with its fixed known length + MptNodeReference::Digest(digest) => { + out.put_u8(alloy_rlp::EMPTY_STRING_CODE + 32); + out.put_slice(digest.as_slice()); + } + } + } + + /// Returns the length of the encoded [MptNodeReference] of this node. + fn reference_length(&self) -> usize { + match self.reference() { + MptNodeReference::Bytes(bytes) => bytes.len(), + MptNodeReference::Digest(_) => 1 + 32, + } + } + + fn calc_reference(&self) -> MptNodeReference { + match &self.data { + MptNodeData::Null => MptNodeReference::Bytes(vec![alloy_rlp::EMPTY_STRING_CODE]), + MptNodeData::Digest(digest) => MptNodeReference::Digest(*digest), + _ => { + let encoded = alloy_rlp::encode(self); + if encoded.len() < 32 { + MptNodeReference::Bytes(encoded) + } else { + MptNodeReference::Digest(keccak256(encoded)) + } + } + } + } + + /// Determines if the trie is empty. + /// + /// This method checks if the node represents an empty trie, i.e., it doesn't contain + /// any key-value pairs. + #[inline] + pub fn is_empty(&self) -> bool { + matches!(&self.data, MptNodeData::Null) + } + + /// Determines if the node represents a digest. + /// + /// A digest is a compact representation of a sub-trie, represented by its hash. + #[inline] + pub fn is_digest(&self) -> bool { + matches!(&self.data, MptNodeData::Digest(_)) + } + + /// Retrieves the value associated with a given key in the trie. + /// + /// If the key is not present in the trie, this method returns `None`. Otherwise, it + /// returns a reference to the associated value. If [None] is returned, the key is + /// provably not in the trie. + #[inline] + pub fn get(&self, key: &[u8]) -> Result, Error> { + self.get_internal(&to_nibs(key)) + } + + /// Retrieves the RLP-decoded value corresponding to the key. + /// + /// If the key is not present in the trie, this method returns `None`. Otherwise, it + /// returns the RLP-decoded value. + #[inline] + pub fn get_rlp(&self, key: &[u8]) -> Result, Error> { + match self.get(key)? { + Some(mut bytes) => Ok(Some(T::decode(&mut bytes)?)), + None => Ok(None), + } + } + + fn get_internal(&self, key_nibs: &[u8]) -> Result, Error> { + match &self.data { + MptNodeData::Null => Ok(None), + MptNodeData::Branch(nodes) => { + if let Some((i, tail)) = key_nibs.split_first() { + match nodes[*i as usize] { + Some(ref node) => node.get_internal(tail), + None => Ok(None), + } + } else { + Ok(None) + } + } + MptNodeData::Leaf(prefix, value) => { + if prefix_nibs(prefix) == key_nibs { + Ok(Some(value)) + } else { + Ok(None) + } + } + MptNodeData::Extension(prefix, node) => { + if let Some(tail) = key_nibs.strip_prefix(prefix_nibs(prefix).as_slice()) { + node.get_internal(tail) + } else { + Ok(None) + } + } + MptNodeData::Digest(digest) => Err(Error::NodeNotResolved(*digest)), + } + } + + /// Removes a key from the trie. + /// + /// This method attempts to remove a key-value pair from the trie. If the key is + /// present, it returns `true`. Otherwise, it returns `false`. + #[inline] + pub fn delete(&mut self, key: &[u8]) -> Result { + self.delete_internal(&to_nibs(key)) + } + + fn delete_internal(&mut self, key_nibs: &[u8]) -> Result { + match &mut self.data { + MptNodeData::Null => return Ok(false), + MptNodeData::Branch(children) => { + if let Some((i, tail)) = key_nibs.split_first() { + let child = &mut children[*i as usize]; + match child { + Some(node) => { + if !node.delete_internal(tail)? { + return Ok(false); + } + // if the node is now empty, remove it + if node.is_empty() { + *child = None; + } + } + None => return Ok(false), + } + } else { + return Err(Error::ValueInBranch); + } + + let mut remaining = children.iter_mut().enumerate().filter(|(_, n)| n.is_some()); + // there will always be at least one remaining node + let (index, node) = remaining.next().unwrap(); + // if there is only exactly one node left, we need to convert the branch + if remaining.next().is_none() { + let mut orphan = node.take().unwrap(); + match &mut orphan.data { + // if the orphan is a leaf, prepend the corresponding nib to it + MptNodeData::Leaf(prefix, orphan_value) => { + let new_nibs: Vec<_> = + iter::once(index as u8).chain(prefix_nibs(prefix)).collect(); + self.data = MptNodeData::Leaf( + to_encoded_path(&new_nibs, true), + mem::take(orphan_value), + ); + } + // if the orphan is an extension, prepend the corresponding nib to it + MptNodeData::Extension(prefix, orphan_child) => { + let new_nibs: Vec<_> = + iter::once(index as u8).chain(prefix_nibs(prefix)).collect(); + self.data = MptNodeData::Extension( + to_encoded_path(&new_nibs, false), + mem::take(orphan_child), + ); + } + // if the orphan is a branch or digest, convert to an extension + MptNodeData::Branch(_) | MptNodeData::Digest(_) => { + self.data = MptNodeData::Extension( + to_encoded_path(&[index as u8], false), + orphan, + ); + } + MptNodeData::Null => unreachable!(), + } + } + } + MptNodeData::Leaf(prefix, _) => { + if prefix_nibs(prefix) != key_nibs { + return Ok(false); + } + self.data = MptNodeData::Null; + } + MptNodeData::Extension(prefix, child) => { + let mut self_nibs = prefix_nibs(prefix); + if let Some(tail) = key_nibs.strip_prefix(self_nibs.as_slice()) { + if !child.delete_internal(tail)? { + return Ok(false); + } + } else { + return Ok(false); + } + + // an extension can only point to a branch or a digest; since it's sub trie was + // modified, we need to make sure that this property still holds + match &mut child.data { + // if the child is empty, remove the extension + MptNodeData::Null => { + self.data = MptNodeData::Null; + } + // for a leaf, replace the extension with the extended leaf + MptNodeData::Leaf(prefix, value) => { + self_nibs.extend(prefix_nibs(prefix)); + self.data = + MptNodeData::Leaf(to_encoded_path(&self_nibs, true), mem::take(value)); + } + // for an extension, replace the extension with the extended extension + MptNodeData::Extension(prefix, node) => { + self_nibs.extend(prefix_nibs(prefix)); + self.data = MptNodeData::Extension( + to_encoded_path(&self_nibs, false), + mem::take(node), + ); + } + // for a branch or digest, the extension is still correct + MptNodeData::Branch(_) | MptNodeData::Digest(_) => {} + } + } + MptNodeData::Digest(digest) => return Err(Error::NodeNotResolved(*digest)), + }; + + self.invalidate_ref_cache(); + Ok(true) + } + + /// Inserts an RLP-encoded value into the trie. + /// + /// This method inserts a value that's been encoded using RLP into the trie. + #[inline] + pub fn insert_rlp(&mut self, key: &[u8], value: impl Encodable) -> Result { + self.insert_internal(&to_nibs(key), value.to_rlp()) + } + + fn insert_internal(&mut self, key_nibs: &[u8], value: Vec) -> Result { + match &mut self.data { + MptNodeData::Null => { + self.data = MptNodeData::Leaf(to_encoded_path(key_nibs, true), value); + } + MptNodeData::Branch(children) => { + if let Some((i, tail)) = key_nibs.split_first() { + let child = &mut children[*i as usize]; + match child { + Some(node) => { + if !node.insert_internal(tail, value)? { + return Ok(false); + } + } + // if the corresponding child is empty, insert a new leaf + None => { + *child = Some(Box::new( + MptNodeData::Leaf(to_encoded_path(tail, true), value).into(), + )); + } + } + } else { + return Err(Error::ValueInBranch); + } + } + MptNodeData::Leaf(prefix, old_value) => { + let self_nibs = prefix_nibs(prefix); + let common_len = lcp(&self_nibs, key_nibs); + if common_len == self_nibs.len() && common_len == key_nibs.len() { + // if self_nibs == key_nibs, update the value if it is different + if old_value == &value { + return Ok(false); + } + *old_value = value; + } else if common_len == self_nibs.len() || common_len == key_nibs.len() { + return Err(Error::ValueInBranch); + } else { + let split_point = common_len + 1; + // otherwise, create a branch with two children + let mut children: [Option>; 16] = Default::default(); + + children[self_nibs[common_len] as usize] = Some(Box::new( + MptNodeData::Leaf( + to_encoded_path(&self_nibs[split_point..], true), + mem::take(old_value), + ) + .into(), + )); + children[key_nibs[common_len] as usize] = Some(Box::new( + MptNodeData::Leaf(to_encoded_path(&key_nibs[split_point..], true), value) + .into(), + )); + + let branch = MptNodeData::Branch(children); + if common_len > 0 { + // create parent extension for new branch + self.data = MptNodeData::Extension( + to_encoded_path(&self_nibs[..common_len], false), + Box::new(branch.into()), + ); + } else { + self.data = branch; + } + } + } + MptNodeData::Extension(prefix, existing_child) => { + let self_nibs = prefix_nibs(prefix); + let common_len = lcp(&self_nibs, key_nibs); + if common_len == self_nibs.len() { + // traverse down for update + if !existing_child.insert_internal(&key_nibs[common_len..], value)? { + return Ok(false); + } + } else if common_len == key_nibs.len() { + return Err(Error::ValueInBranch); + } else { + let split_point = common_len + 1; + // otherwise, create a branch with two children + let mut children: [Option>; 16] = Default::default(); + + children[self_nibs[common_len] as usize] = if split_point < self_nibs.len() { + Some(Box::new( + MptNodeData::Extension( + to_encoded_path(&self_nibs[split_point..], false), + mem::take(existing_child), + ) + .into(), + )) + } else { + Some(mem::take(existing_child)) + }; + children[key_nibs[common_len] as usize] = Some(Box::new( + MptNodeData::Leaf(to_encoded_path(&key_nibs[split_point..], true), value) + .into(), + )); + + let branch = MptNodeData::Branch(children); + if common_len > 0 { + // Create parent extension for new branch + self.data = MptNodeData::Extension( + to_encoded_path(&self_nibs[..common_len], false), + Box::new(branch.into()), + ); + } else { + self.data = branch; + } + } + } + MptNodeData::Digest(digest) => return Err(Error::NodeNotResolved(*digest)), + }; + + self.invalidate_ref_cache(); + Ok(true) + } + + fn invalidate_ref_cache(&mut self) { + self.cached_reference.lock().unwrap().take(); + } + + /// Returns the length of the RLP payload of the node. + fn payload_length(&self) -> usize { + match &self.data { + MptNodeData::Null => 0, + MptNodeData::Branch(nodes) => { + 1 + nodes + .iter() + .map(|child| child.as_ref().map_or(1, |node| node.reference_length())) + .sum::() + } + MptNodeData::Leaf(prefix, value) => { + prefix.as_slice().length() + value.as_slice().length() + } + MptNodeData::Extension(prefix, node) => { + prefix.as_slice().length() + node.reference_length() + } + MptNodeData::Digest(_) => 32, + } + } +} + +/// Converts a byte slice into a vector of nibbles. +/// +/// A nibble is 4 bits or half of an 8-bit byte. This function takes each byte from the +/// input slice, splits it into two nibbles, and appends them to the resulting vector. +pub fn to_nibs(slice: &[u8]) -> Vec { + let mut result = Vec::with_capacity(2 * slice.len()); + for byte in slice { + result.push(byte >> 4); + result.push(byte & 0xf); + } + result +} + +/// Encodes a slice of nibbles into a vector of bytes, with an additional prefix to +/// indicate the type of node (leaf or extension). +/// +/// The function starts by determining the type of node based on the `is_leaf` parameter. +/// If the node is a leaf, the prefix is set to `0x20`. If the length of the nibbles is +/// odd, the prefix is adjusted and the first nibble is incorporated into it. +/// +/// The remaining nibbles are then combined into bytes, with each pair of nibbles forming +/// a single byte. The resulting vector starts with the prefix, followed by the encoded +/// bytes. +pub fn to_encoded_path(mut nibs: &[u8], is_leaf: bool) -> Vec { + let mut prefix = (is_leaf as u8) * 0x20; + if nibs.len() % 2 != 0 { + prefix += 0x10 + nibs[0]; + nibs = &nibs[1..]; + } + iter::once(prefix) + .chain(nibs.chunks_exact(2).map(|byte| (byte[0] << 4) + byte[1])) + .collect() +} + +/// Returns the length of the common prefix. +fn lcp(a: &[u8], b: &[u8]) -> usize { + for (i, (a, b)) in iter::zip(a, b).enumerate() { + if a != b { + return i; + } + } + cmp::min(a.len(), b.len()) +} + +fn prefix_nibs(prefix: &[u8]) -> Vec { + let (extension, tail) = prefix.split_first().unwrap(); + // the first bit of the first nibble denotes the parity + let is_odd = extension & (1 << 4) != 0; + + let mut result = Vec::with_capacity(2 * tail.len() + is_odd as usize); + // for odd lengths, the second nibble contains the first element + if is_odd { + result.push(extension & 0xf); + } + for nib in tail { + result.push(nib >> 4); + result.push(nib & 0xf); + } + result +} + +/// Creates a new MPT trie where all the digests contained in `node_store` are resolved. +pub fn resolve_nodes(root: &MptNode, node_store: &HashMap) -> MptNode { + let trie = match root.as_data() { + MptNodeData::Null | MptNodeData::Leaf(_, _) => root.clone(), + MptNodeData::Branch(children) => { + let children: Vec<_> = children + .iter() + .map(|child| { + child + .as_ref() + .map(|node| Box::new(resolve_nodes(node, node_store))) + }) + .collect(); + MptNodeData::Branch(children.try_into().unwrap()).into() + } + MptNodeData::Extension(prefix, target) => { + MptNodeData::Extension(prefix.clone(), Box::new(resolve_nodes(target, node_store))) + .into() + } + MptNodeData::Digest(digest) => { + if let Some(node) = node_store.get(&MptNodeReference::Digest(*digest)) { + resolve_nodes(node, node_store) + } else { + root.clone() + } + } + }; + // the root hash must not change + debug_assert_eq!(root.hash(), trie.hash()); + + trie +} + +#[cfg(test)] +mod tests { + use super::*; + use sbv_primitives::alloy_primitives::hex; + + #[test] + pub fn test_trie_pointer_no_keccak() { + let cases = [ + ("do", "verb"), + ("dog", "puppy"), + ("doge", "coin"), + ("horse", "stallion"), + ]; + for (k, v) in cases { + let node: MptNode = + MptNodeData::Leaf(k.as_bytes().to_vec(), v.as_bytes().to_vec()).into(); + assert!( + matches!(node.reference(),MptNodeReference::Bytes(bytes) if bytes == node.to_rlp().to_vec()) + ); + } + } + + #[test] + pub fn test_to_encoded_path() { + // extension node with an even path length + let nibbles = vec![0x0a, 0x0b, 0x0c, 0x0d]; + assert_eq!(to_encoded_path(&nibbles, false), vec![0x00, 0xab, 0xcd]); + // extension node with an odd path length + let nibbles = vec![0x0a, 0x0b, 0x0c]; + assert_eq!(to_encoded_path(&nibbles, false), vec![0x1a, 0xbc]); + // leaf node with an even path length + let nibbles = vec![0x0a, 0x0b, 0x0c, 0x0d]; + assert_eq!(to_encoded_path(&nibbles, true), vec![0x20, 0xab, 0xcd]); + // leaf node with an odd path length + let nibbles = vec![0x0a, 0x0b, 0x0c]; + assert_eq!(to_encoded_path(&nibbles, true), vec![0x3a, 0xbc]); + } + + #[test] + pub fn test_lcp() { + let cases = [ + (vec![], vec![], 0), + (vec![0xa], vec![0xa], 1), + (vec![0xa, 0xb], vec![0xa, 0xc], 1), + (vec![0xa, 0xb], vec![0xa, 0xb], 2), + (vec![0xa, 0xb], vec![0xa, 0xb, 0xc], 2), + (vec![0xa, 0xb, 0xc], vec![0xa, 0xb, 0xc], 3), + (vec![0xa, 0xb, 0xc], vec![0xa, 0xb, 0xc, 0xd], 3), + (vec![0xa, 0xb, 0xc, 0xd], vec![0xa, 0xb, 0xc, 0xd], 4), + ]; + for (a, b, cpl) in cases { + assert_eq!(lcp(&a, &b), cpl) + } + } + + #[test] + pub fn test_empty() { + let trie = MptNode::default(); + + assert!(trie.is_empty()); + assert_eq!(trie.reference(), MptNodeReference::Bytes(vec![0x80])); + let expected = hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"); + assert_eq!(expected, trie.hash().0); + + // test RLP encoding + let mut out = Vec::new(); + trie.encode(&mut out); + assert_eq!(out, vec![0x80]); + assert_eq!(trie.length(), out.len()); + let decoded = MptNode::decode(&mut &*out).unwrap(); + assert_eq!(trie.hash(), decoded.hash()); + } + + #[test] + pub fn test_tiny() { + // trie consisting of an extension, a branch and two leafs + let mut trie = MptNode::default(); + trie.insert_rlp(b"a", 0u8).unwrap(); + trie.insert_rlp(b"b", 1u8).unwrap(); + + assert!(!trie.is_empty()); + let exp_rlp = hex!("d816d680c3208180c220018080808080808080808080808080"); + assert_eq!(trie.reference(), MptNodeReference::Bytes(exp_rlp.to_vec())); + let exp_hash = hex!("6fbf23d6ec055dd143ff50d558559770005ff44ae1d41276f1bd83affab6dd3b"); + assert_eq!(trie.hash().0, exp_hash); + + // test RLP encoding + let mut out = Vec::new(); + trie.encode(&mut out); + assert_eq!(out, exp_rlp.to_vec()); + assert_eq!(trie.length(), out.len()); + let decoded = MptNode::decode(&mut &*out).unwrap(); + assert_eq!(trie.hash(), decoded.hash()); + } + + #[test] + pub fn test_partial() { + let mut trie = MptNode::default(); + trie.insert_rlp(b"aa", 0u8).unwrap(); + trie.insert_rlp(b"ab", 1u8).unwrap(); + trie.insert_rlp(b"ba", 2u8).unwrap(); + + let exp_hash = trie.hash(); + + // replace one node with its digest + let MptNodeData::Extension(_, node) = &mut trie.data else { + panic!("extension expected") + }; + **node = MptNodeData::Digest(node.hash()).into(); + assert!(node.is_digest()); + + let trie = MptNode::decode(&mut &*trie.to_rlp()).unwrap(); + assert_eq!(trie.hash(), exp_hash); + + // lookups should fail + trie.get(b"aa").unwrap_err(); + trie.get(b"a0").unwrap_err(); + } + + #[test] + pub fn test_keccak_trie() { + const N: usize = 512; + + // insert + let mut trie = MptNode::default(); + for i in 0..N { + assert!(trie.insert_rlp(&*keccak256(i.to_be_bytes()), i).unwrap()); + + // check hash against trie build in reverse + let mut reference = MptNode::default(); + for j in (0..=i).rev() { + reference + .insert_rlp(&*keccak256(j.to_be_bytes()), j) + .unwrap(); + } + assert_eq!(trie.hash(), reference.hash()); + } + + let expected = hex!("7310027edebdd1f7c950a7fb3413d551e85dff150d45aca4198c2f6315f9b4a7"); + assert_eq!(trie.hash().0, expected); + + // get + for i in 0..N { + assert_eq!(trie.get_rlp(&*keccak256(i.to_be_bytes())).unwrap(), Some(i)); + assert!( + trie.get(&*keccak256((i + N).to_be_bytes())) + .unwrap() + .is_none() + ); + } + + // delete + for i in 0..N { + assert!(trie.delete(&*keccak256(i.to_be_bytes())).unwrap()); + + let mut reference = MptNode::default(); + for j in ((i + 1)..N).rev() { + reference + .insert_rlp(&*keccak256(j.to_be_bytes()), j) + .unwrap(); + } + assert_eq!(trie.hash(), reference.hash()); + } + assert!(trie.is_empty()); + } + + #[test] + pub fn test_index_trie() { + const N: usize = 512; + + // insert + let mut trie = MptNode::default(); + for i in 0..N { + assert!(trie.insert_rlp(&i.to_rlp(), i).unwrap()); + + // check hash against trie build in reverse + let mut reference = MptNode::default(); + for j in (0..=i).rev() { + reference.insert_rlp(&j.to_rlp(), j).unwrap(); + } + assert_eq!(trie.hash(), reference.hash()); + + // try RLP roundtrip + let decoded = MptNode::decode(&mut &*trie.to_rlp()).unwrap(); + assert_eq!(trie.hash(), decoded.hash()); + } + + // get + for i in 0..N { + assert_eq!(trie.get_rlp(&i.to_rlp()).unwrap(), Some(i)); + assert!(trie.get(&(i + N).to_rlp()).unwrap().is_none()); + } + + // delete + for i in 0..N { + assert!(trie.delete(&i.to_rlp()).unwrap()); + + let mut reference = MptNode::default(); + for j in ((i + 1)..N).rev() { + reference.insert_rlp(&j.to_rlp(), j).unwrap(); + } + assert_eq!(trie.hash(), reference.hash()); + } + assert!(trie.is_empty()); + } +} diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml index 839be680..c8d6898a 100644 --- a/crates/utils/Cargo.toml +++ b/crates/utils/Cargo.toml @@ -23,9 +23,11 @@ thiserror.workspace = true serde = { workspace = true, features = ["derive"] } sbv-primitives = { workspace = true, features = ["network-types"] } +sbv-core.workspace = true [features] scroll = [ "sbv-primitives/scroll-network-types", "sbv-primitives/scroll-compress-ratio", + "sbv-core/scroll", ] diff --git a/crates/utils/src/rpc.rs b/crates/utils/src/rpc.rs index fc446d39..a76ea8d4 100644 --- a/crates/utils/src/rpc.rs +++ b/crates/utils/src/rpc.rs @@ -3,11 +3,12 @@ use crate::witness::WitnessBuilder; use alloy_provider::Provider; use alloy_transport::TransportResult; +use sbv_core::witness::BlockWitness; use sbv_primitives::{ B256, BlockNumber, Bytes, ChainId, alloy_primitives::map::B256HashMap, types::{ - BlockWitness, Network, + Network, eips::BlockNumberOrTag, rpc::{Block, ExecutionWitness}, }, diff --git a/crates/utils/src/witness.rs b/crates/utils/src/witness.rs index 765d81a2..33efdc9a 100644 --- a/crates/utils/src/witness.rs +++ b/crates/utils/src/witness.rs @@ -1,11 +1,9 @@ //! Witness builder. +use sbv_core::witness::BlockWitness; use sbv_primitives::{ B256, ChainId, - types::{ - BlockWitness, - rpc::{Block as RpcBlock, ExecutionWitness}, - }, + types::rpc::{Block as RpcBlock, ExecutionWitness}, }; /// Block witness builder.