diff --git a/go.mod b/go.mod index 15e45c4eb6e6..d1a2581dd9fb 100644 --- a/go.mod +++ b/go.mod @@ -21,15 +21,16 @@ require ( github.com/DataDog/zstd v1.5.2 github.com/StephenButtolph/canoto v0.17.2 github.com/antithesishq/antithesis-sdk-go v0.3.8 - github.com/ava-labs/coreth v0.15.4-rc.3 + github.com/ava-labs/coreth v0.15.4-rc.3.0.20251001215137-83dbf26810fb github.com/ava-labs/ledger-avalanche-go v1.1.0 - github.com/ava-labs/libevm v1.13.14-0.3.0.rc.6 + github.com/ava-labs/libevm v1.13.15-0.20250925141903-414b1f5dffea github.com/btcsuite/btcd/btcutil v1.1.3 github.com/cespare/xxhash/v2 v2.3.0 github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 github.com/compose-spec/compose-go v1.20.2 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 github.com/google/btree v1.1.2 + github.com/google/go-cmp v0.7.0 github.com/google/renameio/v2 v2.0.0 github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.0 @@ -54,7 +55,7 @@ require ( github.com/spf13/cobra v1.9.1 github.com/spf13/pflag v1.0.6 github.com/spf13/viper v1.20.1 - github.com/stretchr/testify v1.10.0 + github.com/stretchr/testify v1.11.1 github.com/supranational/blst v0.3.14 github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a github.com/thepudds/fzgen v0.4.3 @@ -68,14 +69,14 @@ require ( go.uber.org/goleak v1.3.0 go.uber.org/mock v0.5.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.41.0 + golang.org/x/crypto v0.42.0 golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e golang.org/x/mod v0.28.0 - golang.org/x/net v0.43.0 - golang.org/x/sync v0.16.0 - golang.org/x/term v0.34.0 + golang.org/x/net v0.44.0 + golang.org/x/sync v0.17.0 + golang.org/x/term v0.35.0 golang.org/x/time v0.12.0 - golang.org/x/tools v0.36.0 + golang.org/x/tools v0.37.0 gonum.org/v1/gonum v0.16.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c google.golang.org/grpc v1.75.0 @@ -195,8 +196,8 @@ require ( go.opentelemetry.io/proto/otlp v1.7.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sys v0.35.0 // indirect - golang.org/x/text v0.28.0 // indirect + golang.org/x/sys v0.36.0 // indirect + golang.org/x/text v0.29.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 3a1258c72cf6..1dfa6f9412a7 100644 --- a/go.sum +++ b/go.sum @@ -32,14 +32,14 @@ github.com/antithesishq/antithesis-sdk-go v0.3.8/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/ava-labs/coreth v0.15.4-rc.3 h1:v33OOerxpGIKa1MpljXMBB3Yljy23xzsez3E/dn7TzY= -github.com/ava-labs/coreth v0.15.4-rc.3/go.mod h1:Esb0FK+KJr6co7rrhtBWsmSMXEL5JWelEsijlqAHdq0= +github.com/ava-labs/coreth v0.15.4-rc.3.0.20251001215137-83dbf26810fb h1:FAypJeUihR3aOLHysSaN8o8aSTyrdarkpY7/DzBAyso= +github.com/ava-labs/coreth v0.15.4-rc.3.0.20251001215137-83dbf26810fb/go.mod h1:LDklDYGhgqnorUFERizRUv49SqwRU97Gewd0qve4kwk= github.com/ava-labs/firewood-go-ethhash/ffi v0.0.12 h1:aMcrLbpJ/dyu2kZDf/Di/4JIWsUcYPyTDKymiHpejt0= github.com/ava-labs/firewood-go-ethhash/ffi v0.0.12/go.mod h1:cq89ua3iiZ5wPBALTEQS5eG8DIZcs7ov6OiL4YR1BVY= github.com/ava-labs/ledger-avalanche-go v1.1.0 h1:OkscKtb/gX20HBt8RyAtwXLrQnCEls5SzWGieE7NoNM= github.com/ava-labs/ledger-avalanche-go v1.1.0/go.mod h1:mAlG9ptnPjvNoLGLHXnM3slGY8ewvBJtJNVTEjG8KvI= -github.com/ava-labs/libevm v1.13.14-0.3.0.rc.6 h1:tyM659nDOknwTeU4A0fUVsGNIU7k0v738wYN92nqs/Y= -github.com/ava-labs/libevm v1.13.14-0.3.0.rc.6/go.mod h1:zP/DOcABRWargBmUWv1jXplyWNcfmBy9cxr0lw3LW3g= +github.com/ava-labs/libevm v1.13.15-0.20250925141903-414b1f5dffea h1:NSyRY0eT1EzD+PRlVB9y6iA6ulp1DxB76dys9W//UuQ= +github.com/ava-labs/libevm v1.13.15-0.20250925141903-414b1f5dffea/go.mod h1:ivRC/KojP8sai7j8WnpXIReQpcRklL2bIzoysnjpARQ= github.com/ava-labs/simplex v0.0.0-20250919142550-9cdfff10fd19 h1:S6oFasZsplNmw8B2S8cMJQMa62nT5ZKGzZRdCpd+5qQ= github.com/ava-labs/simplex v0.0.0-20250919142550-9cdfff10fd19/go.mod h1:GVzumIo3zR23/qGRN2AdnVkIPHcKMq/D89EGWZfMGQ0= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= @@ -532,8 +532,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/supranational/blst v0.3.14 h1:xNMoHRJOTwMn63ip6qoWJ2Ymgvj7E2b9jY2FAwY+qRo= @@ -623,8 +623,8 @@ golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= -golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= +golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= +golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e h1:4qufH0hlUYs6AO6XmZC3GqfDPGSXHVXUFR6OND+iJX4= golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= @@ -663,8 +663,8 @@ golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= -golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I= +golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= @@ -676,8 +676,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -717,12 +717,12 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= -golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= -golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= +golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ= +golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -730,8 +730,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= -golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= +golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= @@ -750,8 +750,8 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= -golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= +golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vms/evm/sync/block/syncer.go b/vms/evm/sync/block/syncer.go new file mode 100644 index 000000000000..3dc152c52a6f --- /dev/null +++ b/vms/evm/sync/block/syncer.go @@ -0,0 +1,114 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package block + +import ( + "context" + "errors" + "fmt" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/ethdb" + "github.com/ava-labs/libevm/log" + + "github.com/ava-labs/avalanchego/vms/evm/sync" + syncclient "github.com/ava-labs/avalanchego/vms/evm/sync/client" +) + +const blocksPerRequest = 32 + +var ( + _ sync.StateSyncer = (*Syncer)(nil) + errBlocksToFetchRequired = errors.New("blocksToFetch must be > 0") + errFromHashRequired = errors.New("fromHash must be non-zero when fromHeight > 0") +) + +type Syncer struct { + db ethdb.Database + client syncclient.SyncClient + fromHash common.Hash + fromHeight uint64 + blocksToFetch uint64 +} + +func NewSyncer(client syncclient.SyncClient, db ethdb.Database, fromHash common.Hash, fromHeight uint64, blocksToFetch uint64) (*Syncer, error) { + if blocksToFetch == 0 { + return nil, errBlocksToFetchRequired + } + + if (fromHash == common.Hash{}) && fromHeight > 0 { + return nil, errFromHashRequired + } + + return &Syncer{ + client: client, + db: db, + fromHash: fromHash, + fromHeight: fromHeight, + blocksToFetch: blocksToFetch, + }, nil +} + +// Name returns the human-readable name for this sync task. +func (*Syncer) Name() string { + return "Block Syncer" +} + +// ID returns the stable identifier for this sync task. +func (*Syncer) ID() string { + return "state_block_sync" +} + +// Sync fetches (up to) BlocksToFetch blocks from peers +// using SyncClient and writes them to disk. +// the process begins with FromHash and it fetches parents recursively. +// fetching starts from the first ancestor not found on disk +// +// TODO: We could inspect the database more accurately to ensure we never fetch +// any blocks that are locally available. +// We could also prevent over-requesting blocks, if the number of blocks needed +// to be fetched isn't a multiple of blocksPerRequest. +func (s *Syncer) Sync(ctx context.Context) error { + nextHash := s.fromHash + nextHeight := s.fromHeight + blocksToFetch := s.blocksToFetch + + // first, check for blocks already available on disk so we don't + // request them from peers. + for blocksToFetch > 0 { + blk := rawdb.ReadBlock(s.db, nextHash, nextHeight) + if blk == nil { + // block was not found + break + } + + // block exists + nextHash = blk.ParentHash() + nextHeight-- + blocksToFetch-- + } + + // get any blocks we couldn't find on disk from peers and write + // them to disk. + batch := s.db.NewBatch() + for fetched := uint64(0); fetched < blocksToFetch && (nextHash != common.Hash{}); { + log.Info("fetching blocks from peer", "fetched", fetched, "total", blocksToFetch) + blocks, err := s.client.GetBlocks(ctx, nextHash, nextHeight, blocksPerRequest) + if err != nil { + return fmt.Errorf("could not get blocks from peer: err: %w, nextHash: %s, fetched: %d", err, nextHash, fetched) + } + for _, block := range blocks { + rawdb.WriteBlock(batch, block) + rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64()) + + fetched++ + nextHash = block.ParentHash() + nextHeight-- + } + } + + log.Info("fetched blocks from peer", "total", blocksToFetch) + return batch.Write() +} diff --git a/vms/evm/sync/block/syncer_test.go b/vms/evm/sync/block/syncer_test.go new file mode 100644 index 000000000000..53eedcf8c207 --- /dev/null +++ b/vms/evm/sync/block/syncer_test.go @@ -0,0 +1,240 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package block + +import ( + "context" + "fmt" + "math/big" + "testing" + + "github.com/ava-labs/coreth/consensus/dummy" + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/core/types" + "github.com/ava-labs/libevm/crypto" + "github.com/ava-labs/libevm/ethdb" + ethparams "github.com/ava-labs/libevm/params" + "github.com/stretchr/testify/require" + + syncclient "github.com/ava-labs/avalanchego/vms/evm/sync/client" + "github.com/ava-labs/avalanchego/vms/evm/sync/handlers" + "github.com/ava-labs/avalanchego/vms/evm/sync/message" + "github.com/ava-labs/avalanchego/vms/evm/sync/stats" +) + +func TestBlockSyncer_ParameterizedTests(t *testing.T) { + tests := []struct { + name string + numBlocks int + prePopulateBlocks []int + fromHeight uint64 + blocksToFetch uint64 + expectedBlocks []int + verifyZeroBlocksReceived bool + }{ + { + name: "normal case - all blocks retrieved from network", + numBlocks: 10, + fromHeight: 5, + blocksToFetch: 3, + expectedBlocks: []int{3, 4, 5}, + }, + { + name: "all blocks already available", + numBlocks: 10, + prePopulateBlocks: []int{3, 4, 5}, + fromHeight: 5, + blocksToFetch: 3, + expectedBlocks: []int{3, 4, 5}, + verifyZeroBlocksReceived: true, + }, + { + name: "some blocks already available", + numBlocks: 10, + prePopulateBlocks: []int{4, 5}, + fromHeight: 5, + blocksToFetch: 3, + expectedBlocks: []int{3, 4, 5}, + }, + { + name: "most recent block missing", + numBlocks: 10, + prePopulateBlocks: []int{3, 4}, + fromHeight: 5, + blocksToFetch: 3, + expectedBlocks: []int{3, 4, 5}, + }, + { + name: "edge case - from height 1", + numBlocks: 10, + fromHeight: 1, + blocksToFetch: 1, + expectedBlocks: []int{1}, + }, + { + name: "single block sync", + numBlocks: 10, + fromHeight: 7, + blocksToFetch: 1, + expectedBlocks: []int{7}, + }, + { + name: "large sync - many blocks", + numBlocks: 50, + fromHeight: 40, + blocksToFetch: 35, + expectedBlocks: []int{6, 10, 20, 30, 40}, + }, + { + name: "fetch genesis block", + numBlocks: 10, + fromHeight: 10, + blocksToFetch: 30, + expectedBlocks: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + env := newTestEnvironment(t, tt.numBlocks) + require.NoError(t, env.prePopulateBlocks(tt.prePopulateBlocks)) + + syncer, err := env.createSyncer(tt.fromHeight, tt.blocksToFetch) + require.NoError(t, err) + + require.NoError(t, syncer.Sync(context.Background())) + + env.verifyBlocksInDB(t, tt.expectedBlocks) + + if tt.verifyZeroBlocksReceived { + // SyncClient should not have received any block requests since all blocks were on disk + require.Zero(t, env.client.BlocksReceived()) + } + }) + } +} + +func TestBlockSyncer_ContextCancellation(t *testing.T) { + env := newTestEnvironment(t, 10) + syncer, err := env.createSyncer(5, 3) + require.NoError(t, err) + + // Immediately cancel the context to simulate cancellation. + ctx, cancel := context.WithCancel(context.Background()) + cancel() + err = syncer.Sync(ctx) + require.ErrorIs(t, err, context.Canceled) +} + +// testEnvironment provides an abstraction for setting up block syncer tests +type testEnvironment struct { + chainDB ethdb.Database + client *syncclient.TestClient + blocks []*types.Block +} + +// newTestEnvironment creates a new test environment with generated blocks +func newTestEnvironment(t *testing.T, numBlocks int) *testEnvironment { + t.Helper() + + var ( + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr = crypto.PubkeyToAddress(key.PublicKey) + genesisBalance = big.NewInt(1000000000) + signer = types.HomesteadSigner{} + ) + + // Ensure that key has some funds in the genesis block. + gspec := &core.Genesis{ + Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, + Alloc: types.GenesisAlloc{addr: {Balance: genesisBalance}}, + } + engine := dummy.NewETHFaker() + + _, blocks, _, err := core.GenerateChainWithGenesis(gspec, engine, numBlocks, 0, func(_ int, gen *core.BlockGen) { + // Generate a transaction to create a unique block + tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr), addr, big.NewInt(10), ethparams.TxGas, nil, nil), signer, key) + gen.AddTx(tx) + }) + require.NoError(t, err) + + // The genesis block is not include in the blocks slice, so we need to prepend it + blocks = append([]*types.Block{gspec.ToBlock()}, blocks...) + + blockProvider := &handlers.TestBlockProvider{GetBlockFn: func(hash common.Hash, height uint64) *types.Block { + if height >= uint64(len(blocks)) { + return nil + } + block := blocks[height] + if block.Hash() != hash { + return nil + } + return block + }} + + blockHandler := handlers.NewBlockRequestHandler( + blockProvider, + message.Codec, + stats.NewNoopHandlerStats(), + ) + + return &testEnvironment{ + chainDB: rawdb.NewMemoryDatabase(), + blocks: blocks, + client: syncclient.NewTestClient( + message.Codec, + nil, + nil, + blockHandler, + ), + } +} + +// prePopulateBlocks writes some blocks to the database before syncing (by block height) +func (e *testEnvironment) prePopulateBlocks(blockHeights []int) error { + batch := e.chainDB.NewBatch() + for _, height := range blockHeights { + if height <= len(e.blocks) { + // blocks[0] is block number 1, blocks[1] is block number 2, etc. + block := e.blocks[height] + rawdb.WriteBlock(batch, block) + rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64()) + } + } + return batch.Write() +} + +// createSyncer creates a block syncer with the given configuration +func (e *testEnvironment) createSyncer(fromHeight uint64, blocksToFetch uint64) (*Syncer, error) { + if fromHeight > uint64(len(e.blocks)) { + return nil, fmt.Errorf("fromHeight %d exceeds available blocks %d", fromHeight, len(e.blocks)) + } + + return NewSyncer( + e.client, + e.chainDB, + e.blocks[fromHeight].Hash(), + fromHeight, + blocksToFetch, + ) +} + +// verifyBlocksInDB checks that the expected blocks are present in the database (by block height) +func (e *testEnvironment) verifyBlocksInDB(t *testing.T, expectedBlockHeights []int) { + t.Helper() + + // Verify expected blocks are present + for _, height := range expectedBlockHeights { + if height > len(e.blocks) { + continue + } + block := e.blocks[height] + dbBlock := rawdb.ReadBlock(e.chainDB, block.Hash(), block.NumberU64()) + require.NotNil(t, dbBlock, "Block %d should be in database", height) + require.Equal(t, block.Hash(), dbBlock.Hash(), "Block %d hash mismatch", height) + } +} diff --git a/vms/evm/sync/client/client.go b/vms/evm/sync/client/client.go new file mode 100644 index 000000000000..67ee16f55e65 --- /dev/null +++ b/vms/evm/sync/client/client.go @@ -0,0 +1,361 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package client + +import ( + "bytes" + "context" + "errors" + "fmt" + "sync/atomic" + "time" + + "github.com/ava-labs/coreth/network" + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/core/types" + "github.com/ava-labs/libevm/crypto" + "github.com/ava-labs/libevm/ethdb" + "github.com/ava-labs/libevm/log" + ethparams "github.com/ava-labs/libevm/params" + "github.com/ava-labs/libevm/trie" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/vms/evm/sync/message" + "github.com/ava-labs/avalanchego/vms/evm/sync/stats" +) + +const ( + failedRequestSleepInterval = 10 * time.Millisecond + + epsilon = 1e-6 // small amount to add to time to avoid division by 0 +) + +var ( + StateSyncVersion = &version.Application{ + Major: 1, + Minor: 7, + Patch: 13, + } + errEmptyResponse = errors.New("empty response") + errTooManyBlocks = errors.New("response contains more blocks than requested") + errHashMismatch = errors.New("hash does not match expected value") + errInvalidRangeProof = errors.New("failed to verify range proof") + errTooManyLeaves = errors.New("response contains more than requested leaves") + errUnmarshalResponse = errors.New("failed to unmarshal response") + errInvalidCodeResponseLen = errors.New("number of code bytes in response does not match requested hashes") + errMaxCodeSizeExceeded = errors.New("max code size exceeded") +) +var _ SyncClient = (*Client)(nil) + +// SyncClient synchronously fetches data from the network to fulfill state sync requests. +// Repeatedly requests failed requests until the context to the request is expired. +type SyncClient interface { + // GetLeafs synchronously sends the given request, returning a parsed LeafsResponse or error + // Note: this verifies the response including the range proofs. + GetLeafs(ctx context.Context, request message.LeafsRequest) (message.LeafsResponse, error) + + // GetBlocks synchronously retrieves blocks starting with specified common.Hash and height up to specified parents + // specified range from height to height-parents is inclusive + GetBlocks(ctx context.Context, blockHash common.Hash, height uint64, parents uint16) ([]*types.Block, error) + + // GetCode synchronously retrieves code associated with the given hashes + GetCode(ctx context.Context, hashes []common.Hash) ([][]byte, error) +} + +// parseResponseFn parses given response bytes in context of specified request +// Validates response in context of the request +// Ensures the returned interface matches the expected response type of the request +// Returns the number of elements in the response (specific to the response type, used in metrics) +type parseResponseFn func(codec codec.Manager, request message.Request, response []byte) (any, int, error) + +type Client struct { + networkClient network.SyncedNetworkClient + codec codec.Manager + stateSyncNodes []ids.NodeID + stateSyncNodeIdx uint32 + stats stats.ClientSyncerStats + blockParser EthBlockParser +} + +type Config struct { + NetworkClient network.SyncedNetworkClient + Codec codec.Manager + Stats stats.ClientSyncerStats + StateSyncNodeIDs []ids.NodeID + BlockParser EthBlockParser +} + +type EthBlockParser interface { + ParseEthBlock(b []byte) (*types.Block, error) +} + +func NewClient(config *Config) *Client { + return &Client{ + networkClient: config.NetworkClient, + codec: config.Codec, + stats: config.Stats, + stateSyncNodes: config.StateSyncNodeIDs, + blockParser: config.BlockParser, + } +} + +// GetLeafs synchronously retrieves leafs as per given [message.LeafsRequest] +// Retries when: +// - response bytes could not be unmarshalled to [message.LeafsResponse] +// - response keys do not correspond to the requested range. +// - response does not contain a valid merkle proof. +func (c *Client) GetLeafs(ctx context.Context, req message.LeafsRequest) (message.LeafsResponse, error) { + data, err := c.get(ctx, req, parseLeafsResponse) + if err != nil { + return message.LeafsResponse{}, err + } + + return data.(message.LeafsResponse), nil +} + +// parseLeafsResponse validates given object as message.LeafsResponse +// assumes reqIntf is of type message.LeafsRequest +// returns a non-nil error if the request should be retried +// returns error when: +// - response bytes could not be unmarshalled into message.LeafsResponse +// - number of response keys is not equal to the response values +// - first and last key in the response is not within the requested start and end range +// - response keys are not in increasing order +// - proof validation failed +func parseLeafsResponse(codec codec.Manager, reqIntf message.Request, data []byte) (any, int, error) { + var leafsResponse message.LeafsResponse + if _, err := codec.Unmarshal(data, &leafsResponse); err != nil { + return nil, 0, err + } + + leafsRequest := reqIntf.(message.LeafsRequest) + + // Ensure the response does not contain more than the maximum requested number of leaves. + if len(leafsResponse.Keys) > int(leafsRequest.Limit) || len(leafsResponse.Vals) > int(leafsRequest.Limit) { + return nil, 0, fmt.Errorf("%w: (%d) > %d)", errTooManyLeaves, len(leafsResponse.Keys), leafsRequest.Limit) + } + + // An empty response (no more keys) requires a merkle proof + if len(leafsResponse.Keys) == 0 && len(leafsResponse.ProofVals) == 0 { + return nil, 0, errors.New("empty key response must include merkle proof") + } + + var proof ethdb.Database + // Populate proof when ProofVals are present in the response. Its ok to pass it as nil to the trie.VerifyRangeProof + // function as it will assert that all the leaves belonging to the specified root are present. + if len(leafsResponse.ProofVals) > 0 { + proof = rawdb.NewMemoryDatabase() + defer proof.Close() + for _, proofVal := range leafsResponse.ProofVals { + proofKey := crypto.Keccak256(proofVal) + if err := proof.Put(proofKey, proofVal); err != nil { + return nil, 0, err + } + } + } + + firstKey := leafsRequest.Start + if len(leafsResponse.Keys) > 0 { + lastKey := leafsResponse.Keys[len(leafsResponse.Keys)-1] + + if firstKey == nil { + firstKey = bytes.Repeat([]byte{0x00}, len(lastKey)) + } + } + + // VerifyRangeProof verifies that the key-value pairs included in [leafResponse] are all of the keys within the range from start + // to the last key returned. + // Also ensures the keys are in monotonically increasing order + more, err := trie.VerifyRangeProof(leafsRequest.Root, firstKey, leafsResponse.Keys, leafsResponse.Vals, proof) + if err != nil { + return nil, 0, fmt.Errorf("%w due to %w", errInvalidRangeProof, err) + } + + // Set the [More] flag to indicate if there are more leaves to the right of the last key in the response + // that needs to be fetched. + leafsResponse.More = more + + return leafsResponse, len(leafsResponse.Keys), nil +} + +func (c *Client) GetBlocks(ctx context.Context, hash common.Hash, height uint64, parents uint16) ([]*types.Block, error) { + req := message.BlockRequest{ + Hash: hash, + Height: height, + Parents: parents, + } + + data, err := c.get(ctx, req, c.parseBlocks) + if err != nil { + return nil, fmt.Errorf("could not get blocks (%s) due to %w", hash, err) + } + + return data.(types.Blocks), nil +} + +// parseBlocks validates given object as message.BlockResponse +// assumes req is of type message.BlockRequest +// returns types.Blocks as any +// returns a non-nil error if the request should be retried +func (c *Client) parseBlocks(codec codec.Manager, req message.Request, data []byte) (any, int, error) { + var response message.BlockResponse + if _, err := codec.Unmarshal(data, &response); err != nil { + return nil, 0, fmt.Errorf("%w: %w", errUnmarshalResponse, err) + } + if len(response.Blocks) == 0 { + return nil, 0, errEmptyResponse + } + blockRequest := req.(message.BlockRequest) + numParentsRequested := blockRequest.Parents + if len(response.Blocks) > int(numParentsRequested) { + return nil, 0, errTooManyBlocks + } + + hash := blockRequest.Hash + + // attempt to decode blocks + blocks := make(types.Blocks, len(response.Blocks)) + for i, blkBytes := range response.Blocks { + block, err := c.blockParser.ParseEthBlock(blkBytes) + if err != nil { + return nil, 0, fmt.Errorf("%w: %w", errUnmarshalResponse, err) + } + + if block.Hash() != hash { + return nil, 0, fmt.Errorf("%w for block: (got %v) (expected %v)", errHashMismatch, block.Hash(), hash) + } + + blocks[i] = block + hash = block.ParentHash() + } + + // return decoded blocks + return blocks, len(blocks), nil +} + +func (c *Client) GetCode(ctx context.Context, hashes []common.Hash) ([][]byte, error) { + req := message.NewCodeRequest(hashes) + + data, err := c.get(ctx, req, parseCode) + if err != nil { + return nil, fmt.Errorf("could not get code (%s): %w", req, err) + } + + return data.([][]byte), nil +} + +// parseCode validates given object as a code object +// assumes req is of type message.CodeRequest +// returns a non-nil error if the request should be retried +func parseCode(codec codec.Manager, req message.Request, data []byte) (any, int, error) { + var response message.CodeResponse + if _, err := codec.Unmarshal(data, &response); err != nil { + return nil, 0, err + } + + codeRequest := req.(message.CodeRequest) + if len(response.Data) != len(codeRequest.Hashes) { + return nil, 0, fmt.Errorf("%w (got %d) (requested %d)", errInvalidCodeResponseLen, len(response.Data), len(codeRequest.Hashes)) + } + + totalBytes := 0 + for i, code := range response.Data { + if len(code) > ethparams.MaxCodeSize { + return nil, 0, fmt.Errorf("%w: (hash %s) (size %d)", errMaxCodeSizeExceeded, codeRequest.Hashes[i], len(code)) + } + + hash := crypto.Keccak256Hash(code) + if hash != codeRequest.Hashes[i] { + return nil, 0, fmt.Errorf("%w for code at index %d: (got %v) (expected %v)", errHashMismatch, i, hash, codeRequest.Hashes[i]) + } + totalBytes += len(code) + } + + return response.Data, totalBytes, nil +} + +// get submits given request and blockingly returns with either a parsed response object or an error +// if [ctx] expires before the Client can successfully retrieve a valid response. +// Retries if there is a network error or if the [parseResponseFn] returns an error indicating an invalid response. +// Returns the parsed interface returned from [parseFn]. +// Thread safe +func (c *Client) get(ctx context.Context, request message.Request, parseFn parseResponseFn) (any, error) { + // marshal the request into requestBytes + requestBytes, err := message.RequestToBytes(c.codec, request) + if err != nil { + return nil, err + } + + metric, err := c.stats.GetMetric(request) + if err != nil { + return nil, err + } + var ( + responseIntf any + numElements int + lastErr error + ) + // Loop until the context is cancelled or we get a valid response. + for attempt := 0; ; attempt++ { + // If the context has finished, return the context error early. + if ctxErr := ctx.Err(); ctxErr != nil { + if lastErr != nil { + return nil, fmt.Errorf("request failed after %d attempts with last error %w and ctx error %w", attempt, lastErr, ctxErr) + } else { + return nil, ctxErr + } + } + + metric.IncRequested() + + var ( + response []byte + nodeID ids.NodeID + start = time.Now() + ) + if len(c.stateSyncNodes) == 0 { + response, nodeID, err = c.networkClient.SendSyncedAppRequestAny(ctx, StateSyncVersion, requestBytes) + } else { + // get the next nodeID using the nodeIdx offset. If we're out of nodes, loop back to 0 + // we do this every attempt to ensure we get a different node each time if possible. + nodeIdx := atomic.AddUint32(&c.stateSyncNodeIdx, 1) + nodeID = c.stateSyncNodes[nodeIdx%uint32(len(c.stateSyncNodes))] + + response, err = c.networkClient.SendSyncedAppRequest(ctx, nodeID, requestBytes) + } + metric.UpdateRequestLatency(time.Since(start)) + + if err != nil { + ctx := make([]any, 0, 8) + if nodeID != ids.EmptyNodeID { + ctx = append(ctx, "nodeID", nodeID) + } + ctx = append(ctx, "attempt", attempt, "request", request, "err", err) + log.Debug("request failed, retrying", ctx...) + metric.IncFailed() + c.networkClient.TrackBandwidth(nodeID, 0) + time.Sleep(failedRequestSleepInterval) + continue + } else { + responseIntf, numElements, err = parseFn(c.codec, request, response) + if err != nil { + lastErr = err + log.Debug("could not validate response, retrying", "nodeID", nodeID, "attempt", attempt, "request", request, "err", err) + c.networkClient.TrackBandwidth(nodeID, 0) + metric.IncFailed() + metric.IncInvalidResponse() + continue + } + + bandwidth := float64(len(response)) / (time.Since(start).Seconds() + epsilon) + c.networkClient.TrackBandwidth(nodeID, bandwidth) + metric.IncSucceeded() + metric.IncReceived(int64(numElements)) + return responseIntf, nil + } + } +} diff --git a/vms/evm/sync/client/client_test.go b/vms/evm/sync/client/client_test.go new file mode 100644 index 000000000000..dd6301f8670c --- /dev/null +++ b/vms/evm/sync/client/client_test.go @@ -0,0 +1,769 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package client + +import ( + "bytes" + "context" + "crypto/rand" + "fmt" + "testing" + + "github.com/ava-labs/coreth/consensus/dummy" + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/core/types" + "github.com/ava-labs/libevm/crypto" + ethparams "github.com/ava-labs/libevm/params" + "github.com/ava-labs/libevm/triedb" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/evm/sync/handlers" + "github.com/ava-labs/avalanchego/vms/evm/sync/message" + "github.com/ava-labs/avalanchego/vms/evm/sync/statesynctest" + "github.com/ava-labs/avalanchego/vms/evm/sync/stats" +) + +func TestGetCode(t *testing.T) { + testNetClient := &testNetwork{} + + tests := map[string]struct { + setupRequest func() (requestHashes []common.Hash, testResponse message.CodeResponse, expectedCode [][]byte) + expectedErr error + }{ + "normal": { + setupRequest: func() ([]common.Hash, message.CodeResponse, [][]byte) { + code := []byte("this is the code") + codeHash := crypto.Keccak256Hash(code) + codeSlices := [][]byte{code} + return []common.Hash{codeHash}, message.CodeResponse{ + Data: codeSlices, + }, codeSlices + }, + expectedErr: nil, + }, + "unexpected code bytes": { + setupRequest: func() (requestHashes []common.Hash, testResponse message.CodeResponse, expectedCode [][]byte) { + return []common.Hash{{1}}, message.CodeResponse{ + Data: [][]byte{{1}}, + }, nil + }, + expectedErr: errHashMismatch, + }, + "too many code elements returned": { + setupRequest: func() (requestHashes []common.Hash, testResponse message.CodeResponse, expectedCode [][]byte) { + return []common.Hash{{1}}, message.CodeResponse{ + Data: [][]byte{{1}, {2}}, + }, nil + }, + expectedErr: errInvalidCodeResponseLen, + }, + "too few code elements returned": { + setupRequest: func() (requestHashes []common.Hash, testResponse message.CodeResponse, expectedCode [][]byte) { + return []common.Hash{{1}}, message.CodeResponse{ + Data: [][]byte{}, + }, nil + }, + expectedErr: errInvalidCodeResponseLen, + }, + "code size is too large": { + setupRequest: func() (requestHashes []common.Hash, testResponse message.CodeResponse, expectedCode [][]byte) { + oversizedCode := make([]byte, ethparams.MaxCodeSize+1) + codeHash := crypto.Keccak256Hash(oversizedCode) + return []common.Hash{codeHash}, message.CodeResponse{ + Data: [][]byte{oversizedCode}, + }, nil + }, + expectedErr: errMaxCodeSizeExceeded, + }, + } + + stateSyncClient := NewClient(&Config{ + NetworkClient: testNetClient, + Codec: message.Codec, + Stats: stats.NewNoOpStats(), + StateSyncNodeIDs: nil, + BlockParser: newTestBlockParser(), + }) + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + codeHashes, res, expectedCode := test.setupRequest() + + responseBytes, err := message.Codec.Marshal(message.Version, res) + require.NoError(t, err) + // Dirty hack required because the client will re-request if it encounters + // an error. + attempted := false + if test.expectedErr == nil { + testNetClient.testResponse(1, nil, responseBytes) + } else { + testNetClient.testResponse(2, func() { + // Cancel before the second attempt is processed. + if attempted { + cancel() + } + attempted = true + }, responseBytes) + } + + codeBytes, err := stateSyncClient.GetCode(ctx, codeHashes) + require.ErrorIs(t, err, test.expectedErr) + // If we expected an error, verify retry behavior and return + if test.expectedErr != nil { + require.Equal(t, uint(2), testNetClient.numCalls) + return + } + // Otherwise, require that the result is as expected + require.Len(t, codeBytes, len(expectedCode)) + for i, code := range codeBytes { + require.Equal(t, expectedCode[i], code) + } + require.Equal(t, uint(1), testNetClient.numCalls) + }) + } +} + +func TestGetBlocks(t *testing.T) { + gspec := &core.Genesis{ + Config: params.TestChainConfig, + } + memdb := rawdb.NewMemoryDatabase() + tdb := triedb.NewDatabase(memdb, nil) + genesis := gspec.MustCommit(memdb, tdb) + engine := dummy.NewETHFaker() + numBlocks := 110 + blocks, _, err := core.GenerateChain(params.TestChainConfig, genesis, engine, memdb, numBlocks, 0, func(_ int, _ *core.BlockGen) {}) + require.NoError(t, err) + require.Len(t, blocks, numBlocks) + + // Construct client + testNetClient := &testNetwork{} + stateSyncClient := NewClient(&Config{ + NetworkClient: testNetClient, + Codec: message.Codec, + Stats: stats.NewNoOpStats(), + StateSyncNodeIDs: nil, + BlockParser: newTestBlockParser(), + }) + + blocksRequestHandler := handlers.NewBlockRequestHandler(buildGetter(blocks), message.Codec, stats.NewNoopHandlerStats()) + + // encodeBlockSlice takes a slice of blocks that are ordered in increasing height order + // and returns a slice of byte slices with those blocks encoded in reverse order + encodeBlockSlice := func(blocks []*types.Block) [][]byte { + blockBytes := make([][]byte, 0, len(blocks)) + for i := len(blocks) - 1; i >= 0; i-- { + buf := new(bytes.Buffer) + require.NoError(t, blocks[i].EncodeRLP(buf)) + blockBytes = append(blockBytes, buf.Bytes()) + } + + return blockBytes + } + tests := map[string]struct { + request message.BlockRequest + getResponse func(t *testing.T, request message.BlockRequest) []byte + assertResponse func(t *testing.T, response []*types.Block) + expectedErr error + }{ + "normal resonse": { + request: message.BlockRequest{ + Hash: blocks[100].Hash(), + Height: 100, + Parents: 16, + }, + getResponse: func(t *testing.T, request message.BlockRequest) []byte { + response, err := blocksRequestHandler.OnBlockRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) + require.NoError(t, err) + require.NotEmpty(t, response, "Failed to generate valid response") + + return response + }, + assertResponse: func(t *testing.T, response []*types.Block) { + require.Len(t, response, 16) + }, + }, + "fewer than requested blocks": { + request: message.BlockRequest{ + Hash: blocks[100].Hash(), + Height: 100, + Parents: 16, + }, + getResponse: func(t *testing.T, request message.BlockRequest) []byte { + request.Parents -= 5 + response, err := blocksRequestHandler.OnBlockRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) + require.NoError(t, err) + require.NotEmpty(t, response) + + return response + }, + // If the server returns fewer than requested blocks, we should consider it valid + assertResponse: func(t *testing.T, response []*types.Block) { + require.Len(t, response, 11) + }, + }, + "gibberish response": { + request: message.BlockRequest{ + Hash: blocks[100].Hash(), + Height: 100, + Parents: 16, + }, + getResponse: func(_ *testing.T, _ message.BlockRequest) []byte { + return []byte("gibberish") + }, + expectedErr: errUnmarshalResponse, + }, + "invalid value replacing block": { + request: message.BlockRequest{ + Hash: blocks[100].Hash(), + Height: 100, + Parents: 16, + }, + getResponse: func(t *testing.T, request message.BlockRequest) []byte { + response, err := blocksRequestHandler.OnBlockRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) + require.NoError(t, err) + var blockResponse message.BlockResponse + _, err = message.Codec.Unmarshal(response, &blockResponse) + require.NoError(t, err) + // Replace middle value with garbage data + blockResponse.Blocks[10] = []byte("invalid value replacing block bytes") + responseBytes, err := message.Codec.Marshal(message.Version, blockResponse) + require.NoError(t, err) + + return responseBytes + }, + expectedErr: errUnmarshalResponse, + }, + "incorrect starting point": { + request: message.BlockRequest{ + Hash: blocks[100].Hash(), + Height: 100, + Parents: 16, + }, + getResponse: func(t *testing.T, _ message.BlockRequest) []byte { + response, err := blocksRequestHandler.OnBlockRequest(context.Background(), ids.GenerateTestNodeID(), 1, message.BlockRequest{ + Hash: blocks[99].Hash(), + Height: 99, + Parents: 16, + }) + require.NoError(t, err) + require.NotEmpty(t, response) + + return response + }, + expectedErr: errHashMismatch, + }, + "missing link in between blocks": { + request: message.BlockRequest{ + Hash: blocks[100].Hash(), + Height: 100, + Parents: 16, + }, + getResponse: func(t *testing.T, _ message.BlockRequest) []byte { + // Encode blocks with a missing link + blks := make([]*types.Block, 0) + blks = append(blks, blocks[84:89]...) + blks = append(blks, blocks[90:101]...) + blockBytes := encodeBlockSlice(blks) + + blockResponse := message.BlockResponse{ + Blocks: blockBytes, + } + responseBytes, err := message.Codec.Marshal(message.Version, blockResponse) + require.NoError(t, err) + + return responseBytes + }, + expectedErr: errHashMismatch, + }, + "no blocks": { + request: message.BlockRequest{ + Hash: blocks[100].Hash(), + Height: 100, + Parents: 16, + }, + getResponse: func(t *testing.T, _ message.BlockRequest) []byte { + blockResponse := message.BlockResponse{ + Blocks: nil, + } + responseBytes, err := message.Codec.Marshal(message.Version, blockResponse) + require.NoError(t, err) + + return responseBytes + }, + expectedErr: errEmptyResponse, + }, + "more than requested blocks": { + request: message.BlockRequest{ + Hash: blocks[100].Hash(), + Height: 100, + Parents: 16, + }, + getResponse: func(t *testing.T, _ message.BlockRequest) []byte { + blockBytes := encodeBlockSlice(blocks[80:100]) + + blockResponse := message.BlockResponse{ + Blocks: blockBytes, + } + responseBytes, err := message.Codec.Marshal(message.Version, blockResponse) + require.NoError(t, err) + + return responseBytes + }, + expectedErr: errTooManyBlocks, + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + responseBytes := test.getResponse(t, test.request) + if test.expectedErr == nil { + testNetClient.testResponse(1, nil, responseBytes) + } else { + attempted := false + testNetClient.testResponse(2, func() { + if attempted { + cancel() + } + attempted = true + }, responseBytes) + } + + blockResponse, err := stateSyncClient.GetBlocks(ctx, test.request.Hash, test.request.Height, test.request.Parents) + if test.expectedErr != nil { + require.ErrorIs(t, err, test.expectedErr) + return + } + require.NoError(t, err) + + test.assertResponse(t, blockResponse) + }) + } +} + +func buildGetter(blocks []*types.Block) handlers.BlockProvider { + return &handlers.TestBlockProvider{ + GetBlockFn: func(blockHash common.Hash, blockHeight uint64) *types.Block { + requestedBlock := blocks[blockHeight] + if requestedBlock.Hash() != blockHash { + fmt.Printf("ERROR height=%d, hash=%s, parentHash=%s, reqHash=%s\n", blockHeight, blockHash, requestedBlock.ParentHash(), requestedBlock.Hash()) + return nil + } + return requestedBlock + }, + } +} + +func TestGetLeafs(t *testing.T) { + const leafsLimit = 1024 + + trieDB := triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil) + largeTrieRoot, largeTrieKeys, _ := statesynctest.GenerateTrie(t, rand.Reader, trieDB, 100_000, common.HashLength) + smallTrieRoot, _, _ := statesynctest.GenerateTrie(t, rand.Reader, trieDB, leafsLimit, common.HashLength) + + handler := handlers.NewLeafsRequestHandler(trieDB, message.StateTrieKeyLength, nil, message.Codec, stats.NewNoopHandlerStats()) + client := NewClient(&Config{ + NetworkClient: &testNetwork{}, + Codec: message.Codec, + Stats: stats.NewNoOpStats(), + StateSyncNodeIDs: nil, + BlockParser: newTestBlockParser(), + }) + + tests := map[string]struct { + request message.LeafsRequest + getResponse func(t *testing.T, request message.LeafsRequest) []byte + requireResponse func(t *testing.T, response message.LeafsResponse) + expectedErr error + }{ + "full response for small (single request) trie": { + request: message.LeafsRequest{ + Root: smallTrieRoot, + Start: bytes.Repeat([]byte{0x00}, common.HashLength), + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: leafsLimit, + NodeType: message.StateTrieNode, + }, + getResponse: func(t *testing.T, request message.LeafsRequest) []byte { + response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) + require.NoError(t, err) + require.NotEmpty(t, response) + + return response + }, + requireResponse: func(t *testing.T, response message.LeafsResponse) { + require.False(t, response.More) + require.Len(t, response.Keys, leafsLimit) + require.Len(t, response.Vals, leafsLimit) + }, + }, + "too many leaves in response": { + request: message.LeafsRequest{ + Root: smallTrieRoot, + Start: bytes.Repeat([]byte{0x00}, common.HashLength), + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: leafsLimit / 2, + NodeType: message.StateTrieNode, + }, + getResponse: func(t *testing.T, request message.LeafsRequest) []byte { + modifiedRequest := request + modifiedRequest.Limit = leafsLimit + response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, modifiedRequest) + require.NoError(t, err) + require.NotEmpty(t, response) + + return response + }, + expectedErr: errTooManyLeaves, + }, + "partial response to request for entire trie (full leaf limit)": { + request: message.LeafsRequest{ + Root: largeTrieRoot, + Start: bytes.Repeat([]byte{0x00}, common.HashLength), + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: leafsLimit, + NodeType: message.StateTrieNode, + }, + getResponse: func(t *testing.T, request message.LeafsRequest) []byte { + response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) + require.NoError(t, err) + require.NotEmpty(t, response) + + return response + }, + requireResponse: func(t *testing.T, response message.LeafsResponse) { + require.True(t, response.More) + require.Len(t, response.Keys, leafsLimit) + require.Len(t, response.Vals, leafsLimit) + }, + }, + "partial response to request for middle range of trie (full leaf limit)": { + request: message.LeafsRequest{ + Root: largeTrieRoot, + Start: largeTrieKeys[1000], + End: largeTrieKeys[99000], + Limit: leafsLimit, + NodeType: message.StateTrieNode, + }, + getResponse: func(t *testing.T, request message.LeafsRequest) []byte { + response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) + require.NoError(t, err) + require.NotEmpty(t, response) + + return response + }, + requireResponse: func(t *testing.T, response message.LeafsResponse) { + require.True(t, response.More) + require.Len(t, response.Keys, leafsLimit) + require.Len(t, response.Vals, leafsLimit) + }, + }, + "full response from near end of trie to end of trie (less than leaf limit)": { + request: message.LeafsRequest{ + Root: largeTrieRoot, + Start: largeTrieKeys[len(largeTrieKeys)-30], // Set start 30 keys from the end of the large trie + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: leafsLimit, + NodeType: message.StateTrieNode, + }, + getResponse: func(t *testing.T, request message.LeafsRequest) []byte { + response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) + require.NoError(t, err) + require.NotEmpty(t, response) + + return response + }, + requireResponse: func(t *testing.T, response message.LeafsResponse) { + require.False(t, response.More) + require.Len(t, response.Keys, 30) + require.Len(t, response.Vals, 30) + }, + }, + "full response for intermediate range of trie (less than leaf limit)": { + request: message.LeafsRequest{ + Root: largeTrieRoot, + Start: largeTrieKeys[1000], // Set the range for 1000 leafs in an intermediate range of the trie + End: largeTrieKeys[1099], // (inclusive range) + Limit: leafsLimit, + NodeType: message.StateTrieNode, + }, + getResponse: func(t *testing.T, request message.LeafsRequest) []byte { + response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) + require.NoError(t, err) + require.NotEmpty(t, response) + + return response + }, + requireResponse: func(t *testing.T, response message.LeafsResponse) { + require.True(t, response.More) + require.Len(t, response.Keys, 100) + require.Len(t, response.Vals, 100) + }, + }, + "removed first key in response": { + request: message.LeafsRequest{ + Root: largeTrieRoot, + Start: bytes.Repeat([]byte{0x00}, common.HashLength), + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: leafsLimit, + NodeType: message.StateTrieNode, + }, + getResponse: func(t *testing.T, request message.LeafsRequest) []byte { + response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) + require.NoError(t, err) + require.NotEmpty(t, response) + + var leafResponse message.LeafsResponse + _, err = message.Codec.Unmarshal(response, &leafResponse) + require.NoError(t, err) + leafResponse.Keys = leafResponse.Keys[1:] + leafResponse.Vals = leafResponse.Vals[1:] + + modifiedResponse, err := message.Codec.Marshal(message.Version, leafResponse) + require.NoError(t, err) + return modifiedResponse + }, + expectedErr: errInvalidRangeProof, + }, + "removed first key in response and replaced proof": { + request: message.LeafsRequest{ + Root: largeTrieRoot, + Start: bytes.Repeat([]byte{0x00}, common.HashLength), + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: leafsLimit, + NodeType: message.StateTrieNode, + }, + getResponse: func(t *testing.T, request message.LeafsRequest) []byte { + response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) + require.NoError(t, err) + require.NotEmpty(t, response) + var leafResponse message.LeafsResponse + _, err = message.Codec.Unmarshal(response, &leafResponse) + require.NoError(t, err) + modifiedRequest := request + modifiedRequest.Start = leafResponse.Keys[1] + modifiedResponse, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 2, modifiedRequest) + require.NoError(t, err) + return modifiedResponse + }, + expectedErr: errInvalidRangeProof, + }, + "removed last key in response": { + request: message.LeafsRequest{ + Root: largeTrieRoot, + Start: bytes.Repeat([]byte{0x00}, common.HashLength), + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: leafsLimit, + NodeType: message.StateTrieNode, + }, + getResponse: func(t *testing.T, request message.LeafsRequest) []byte { + response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) + require.NoError(t, err) + require.NotEmpty(t, response) + var leafResponse message.LeafsResponse + _, err = message.Codec.Unmarshal(response, &leafResponse) + require.NoError(t, err) + leafResponse.Keys = leafResponse.Keys[:len(leafResponse.Keys)-2] + leafResponse.Vals = leafResponse.Vals[:len(leafResponse.Vals)-2] + + modifiedResponse, err := message.Codec.Marshal(message.Version, leafResponse) + require.NoError(t, err) + return modifiedResponse + }, + expectedErr: errInvalidRangeProof, + }, + "removed key from middle of response": { + request: message.LeafsRequest{ + Root: largeTrieRoot, + Start: bytes.Repeat([]byte{0x00}, common.HashLength), + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: leafsLimit, + NodeType: message.StateTrieNode, + }, + getResponse: func(t *testing.T, request message.LeafsRequest) []byte { + response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) + require.NoError(t, err) + require.NotEmpty(t, response) + var leafResponse message.LeafsResponse + _, err = message.Codec.Unmarshal(response, &leafResponse) + require.NoError(t, err) + // Remove middle key-value pair response + leafResponse.Keys = append(leafResponse.Keys[:100], leafResponse.Keys[101:]...) + leafResponse.Vals = append(leafResponse.Vals[:100], leafResponse.Vals[101:]...) + + modifiedResponse, err := message.Codec.Marshal(message.Version, leafResponse) + require.NoError(t, err) + return modifiedResponse + }, + expectedErr: errInvalidRangeProof, + }, + "corrupted value in middle of response": { + request: message.LeafsRequest{ + Root: largeTrieRoot, + Start: bytes.Repeat([]byte{0x00}, common.HashLength), + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: leafsLimit, + NodeType: message.StateTrieNode, + }, + getResponse: func(t *testing.T, request message.LeafsRequest) []byte { + response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) + require.NoError(t, err) + require.NotEmpty(t, response) + var leafResponse message.LeafsResponse + _, err = message.Codec.Unmarshal(response, &leafResponse) + require.NoError(t, err) + // Remove middle key-value pair response + leafResponse.Vals[100] = []byte("garbage value data") + + modifiedResponse, err := message.Codec.Marshal(message.Version, leafResponse) + require.NoError(t, err) + return modifiedResponse + }, + expectedErr: errInvalidRangeProof, + }, + "all proof keys removed from response": { + request: message.LeafsRequest{ + Root: largeTrieRoot, + Start: bytes.Repeat([]byte{0x00}, common.HashLength), + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: leafsLimit, + NodeType: message.StateTrieNode, + }, + getResponse: func(t *testing.T, request message.LeafsRequest) []byte { + response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) + require.NoError(t, err) + require.NotEmpty(t, response) + + var leafResponse message.LeafsResponse + _, err = message.Codec.Unmarshal(response, &leafResponse) + require.NoError(t, err) + // Remove the proof + leafResponse.ProofVals = nil + + modifiedResponse, err := message.Codec.Marshal(message.Version, leafResponse) + require.NoError(t, err) + return modifiedResponse + }, + expectedErr: errInvalidRangeProof, + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + responseBytes := test.getResponse(t, test.request) + + response, _, err := parseLeafsResponse(client.codec, test.request, responseBytes) + require.ErrorIs(t, err, test.expectedErr) + if test.expectedErr != nil { + return + } + + leafsResponse, ok := response.(message.LeafsResponse) + require.True(t, ok, "expected leafs response") + test.requireResponse(t, leafsResponse) + }) + } +} + +func TestGetLeafsRetries(t *testing.T) { + trieDB := triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil) + root, _, _ := statesynctest.GenerateTrie(t, rand.Reader, trieDB, 100_000, common.HashLength) + + handler := handlers.NewLeafsRequestHandler(trieDB, message.StateTrieKeyLength, nil, message.Codec, stats.NewNoopHandlerStats()) + testNetClient := &testNetwork{} + + const maxAttempts = 8 + client := NewClient(&Config{ + NetworkClient: testNetClient, + Codec: message.Codec, + Stats: stats.NewNoOpStats(), + StateSyncNodeIDs: nil, + BlockParser: newTestBlockParser(), + }) + + request := message.LeafsRequest{ + Root: root, + Start: bytes.Repeat([]byte{0x00}, common.HashLength), + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: 1024, + NodeType: message.StateTrieNode, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + goodResponse, responseErr := handler.OnLeafsRequest(ctx, ids.GenerateTestNodeID(), 1, request) + require.NoError(t, responseErr) + testNetClient.testResponse(1, nil, goodResponse) + + res, err := client.GetLeafs(ctx, request) + require.NoError(t, err) + require.Len(t, res.Keys, 1024) + require.Len(t, res.Vals, 1024) + + // Succeeds within the allotted number of attempts + invalidResponse := []byte("invalid response") + testNetClient.testResponses(nil, invalidResponse, invalidResponse, goodResponse) + + res, err = client.GetLeafs(ctx, request) + require.NoError(t, err) + require.Len(t, res.Keys, 1024) + require.Len(t, res.Vals, 1024) + + // Test that GetLeafs stops after the context is cancelled + numAttempts := 0 + testNetClient.testResponse(maxAttempts, func() { + numAttempts++ + if numAttempts >= maxAttempts { + cancel() + } + }, invalidResponse) + _, err = client.GetLeafs(ctx, request) + require.ErrorIs(t, err, context.Canceled) +} + +func TestStateSyncNodes(t *testing.T) { + testNetClient := &testNetwork{} + + stateSyncNodes := []ids.NodeID{ + ids.GenerateTestNodeID(), + ids.GenerateTestNodeID(), + ids.GenerateTestNodeID(), + ids.GenerateTestNodeID(), + } + client := NewClient(&Config{ + NetworkClient: testNetClient, + Codec: message.Codec, + Stats: stats.NewNoOpStats(), + StateSyncNodeIDs: stateSyncNodes, + BlockParser: newTestBlockParser(), + }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + attempt := 0 + responses := [][]byte{{1}, {2}, {3}, {4}} + testNetClient.testResponses(func() { + attempt++ + if attempt >= 4 { + cancel() + } + }, responses...) + + // send some request, doesn't matter what it is because we're testing the interaction with state sync nodes here + response, err := client.GetLeafs(ctx, message.LeafsRequest{}) + require.ErrorIs(t, err, context.Canceled) + require.Empty(t, response) + + // require all nodes were called + require.Contains(t, testNetClient.nodesRequested, stateSyncNodes[0]) + require.Contains(t, testNetClient.nodesRequested, stateSyncNodes[1]) + require.Contains(t, testNetClient.nodesRequested, stateSyncNodes[2]) + require.Contains(t, testNetClient.nodesRequested, stateSyncNodes[3]) +} diff --git a/vms/evm/sync/client/test_client.go b/vms/evm/sync/client/test_client.go new file mode 100644 index 000000000000..5658c5cfb4e8 --- /dev/null +++ b/vms/evm/sync/client/test_client.go @@ -0,0 +1,159 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package client + +import ( + "context" + "fmt" + "sync/atomic" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/types" + "github.com/ava-labs/libevm/rlp" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/evm/sync/handlers" + "github.com/ava-labs/avalanchego/vms/evm/sync/message" +) + +var ( + _ SyncClient = (*TestClient)(nil) + _ EthBlockParser = (*testBlockParser)(nil) +) + +type TestClient struct { + codec codec.Manager + leafsHandler handlers.LeafRequestHandler + leavesReceived int32 + codesHandler *handlers.CodeRequestHandler + codeReceived int32 + blocksHandler *handlers.BlockRequestHandler + blocksReceived int32 + // GetLeafsIntercept is called on every GetLeafs request if set to a non-nil callback. + // The returned response will be returned by TestClient to the caller. + GetLeafsIntercept func(req message.LeafsRequest, res message.LeafsResponse) (message.LeafsResponse, error) + // GetCodesIntercept is called on every GetCode request if set to a non-nil callback. + // The returned response will be returned by TestClient to the caller. + GetCodeIntercept func(hashes []common.Hash, codeBytes [][]byte) ([][]byte, error) + // GetBlocksIntercept is called on every GetBlocks request if set to a non-nil callback. + // The returned response will be returned by TestClient to the caller. + GetBlocksIntercept func(blockReq message.BlockRequest, blocks types.Blocks) (types.Blocks, error) +} + +func NewTestClient( + codec codec.Manager, + leafHandler handlers.LeafRequestHandler, + codesHandler *handlers.CodeRequestHandler, + blocksHandler *handlers.BlockRequestHandler, +) *TestClient { + return &TestClient{ + codec: codec, + leafsHandler: leafHandler, + codesHandler: codesHandler, + blocksHandler: blocksHandler, + } +} + +func (ml *TestClient) GetLeafs(ctx context.Context, request message.LeafsRequest) (message.LeafsResponse, error) { + response, err := ml.leafsHandler.OnLeafsRequest(ctx, ids.GenerateTestNodeID(), 1, request) + if err != nil { + return message.LeafsResponse{}, err + } + + leafResponseIntf, numLeaves, err := parseLeafsResponse(ml.codec, request, response) + if err != nil { + return message.LeafsResponse{}, err + } + leafsResponse := leafResponseIntf.(message.LeafsResponse) + if ml.GetLeafsIntercept != nil { + leafsResponse, err = ml.GetLeafsIntercept(request, leafsResponse) + } + // Increment the number of leaves received by the test client + atomic.AddInt32(&ml.leavesReceived, int32(numLeaves)) + return leafsResponse, err +} + +func (ml *TestClient) LeavesReceived() int32 { + return atomic.LoadInt32(&ml.leavesReceived) +} + +func (ml *TestClient) GetCode(ctx context.Context, hashes []common.Hash) ([][]byte, error) { + if ml.codesHandler == nil { + panic("no code handler for test client") + } + request := message.CodeRequest{Hashes: hashes} + response, err := ml.codesHandler.OnCodeRequest(ctx, ids.GenerateTestNodeID(), 1, request) + if err != nil { + return nil, err + } + + codeBytesIntf, lenCode, err := parseCode(ml.codec, request, response) + if err != nil { + return nil, err + } + code := codeBytesIntf.([][]byte) + if ml.GetCodeIntercept != nil { + code, err = ml.GetCodeIntercept(hashes, code) + } + if err == nil { + atomic.AddInt32(&ml.codeReceived, int32(lenCode)) + } + return code, err +} + +func (ml *TestClient) CodeReceived() int32 { + return atomic.LoadInt32(&ml.codeReceived) +} + +func (ml *TestClient) GetBlocks(ctx context.Context, blockHash common.Hash, height uint64, numParents uint16) ([]*types.Block, error) { + if ml.blocksHandler == nil { + panic("no blocks handler for test client") + } + request := message.BlockRequest{ + Hash: blockHash, + Height: height, + Parents: numParents, + } + response, err := ml.blocksHandler.OnBlockRequest(ctx, ids.GenerateTestNodeID(), 1, request) + if err != nil { + return nil, err + } + // Actual client retries until the context is canceled. + if response == nil { + <-ctx.Done() + return nil, ctx.Err() + } + + client := &Client{blockParser: newTestBlockParser()} // Hack to avoid duplicate code + blocksRes, numBlocks, err := client.parseBlocks(ml.codec, request, response) + if err != nil { + return nil, err + } + blocks := blocksRes.(types.Blocks) + if ml.GetBlocksIntercept != nil { + blocks, err = ml.GetBlocksIntercept(request, blocks) + } + atomic.AddInt32(&ml.blocksReceived, int32(numBlocks)) + return blocks, err +} + +func (ml *TestClient) BlocksReceived() int32 { + return atomic.LoadInt32(&ml.blocksReceived) +} + +type testBlockParser struct{} + +func newTestBlockParser() *testBlockParser { + return &testBlockParser{} +} + +func (*testBlockParser) ParseEthBlock(b []byte) (*types.Block, error) { + block := new(types.Block) + if err := rlp.DecodeBytes(b, block); err != nil { + return nil, fmt.Errorf("%w: %w", errUnmarshalResponse, err) + } + + return block, nil +} diff --git a/vms/evm/sync/client/test_network.go b/vms/evm/sync/client/test_network.go new file mode 100644 index 000000000000..439bc831da14 --- /dev/null +++ b/vms/evm/sync/client/test_network.go @@ -0,0 +1,90 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package client + +import ( + "context" + "errors" + + "github.com/ava-labs/coreth/network" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/version" +) + +var _ network.SyncedNetworkClient = (*testNetwork)(nil) + +type testNetwork struct { + // captured request data + numCalls uint + + // response testing for RequestAny and Request calls + response [][]byte + callback func() // callback is called prior to processing each test call + requestErr []error + nodesRequested []ids.NodeID +} + +func (t *testNetwork) SendSyncedAppRequestAny(_ context.Context, _ *version.Application, _ []byte) ([]byte, ids.NodeID, error) { + if len(t.response) == 0 { + return nil, ids.EmptyNodeID, errors.New("no tested response to return in testNetwork") + } + + response, err := t.processTest() + return response, ids.EmptyNodeID, err +} + +func (t *testNetwork) SendSyncedAppRequest(_ context.Context, nodeID ids.NodeID, _ []byte) ([]byte, error) { + if len(t.response) == 0 { + return nil, errors.New("no tested response to return in testNetwork") + } + + t.nodesRequested = append(t.nodesRequested, nodeID) + + return t.processTest() +} + +func (t *testNetwork) processTest() ([]byte, error) { + t.numCalls++ + + if t.callback != nil { + t.callback() + } + + response := t.response[0] + if len(t.response) > 1 { + t.response = t.response[1:] + } else { + t.response = nil + } + + var err error + if len(t.requestErr) > 0 { + err = t.requestErr[0] + t.requestErr = t.requestErr[1:] + } + + return response, err +} + +func (*testNetwork) Gossip([]byte) error { + panic("not implemented") // we don't care about this function for this test +} + +func (t *testNetwork) testResponse(times uint8, callback func(), response []byte) { + t.response = make([][]byte, times) + for i := uint8(0); i < times; i++ { + t.response[i] = response + } + t.callback = callback + t.numCalls = 0 +} + +func (t *testNetwork) testResponses(callback func(), responses ...[]byte) { + t.response = responses + t.callback = callback + t.numCalls = 0 +} + +func (*testNetwork) TrackBandwidth(ids.NodeID, float64) {} diff --git a/vms/evm/sync/code/queue.go b/vms/evm/sync/code/queue.go new file mode 100644 index 000000000000..0e294b092322 --- /dev/null +++ b/vms/evm/sync/code/queue.go @@ -0,0 +1,222 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package code + +import ( + "errors" + "fmt" + "sync" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/ethdb" + "github.com/ava-labs/libevm/libevm/options" + + "github.com/ava-labs/avalanchego/vms/evm/sync/customrawdb" +) + +const defaultQueueCapacity = 5000 + +var ( + errFailedToAddCodeHashesToQueue = errors.New("failed to add code hashes to queue") + errFailedToFinalizeCodeQueue = errors.New("failed to finalize code queue") +) + +// Queue implements the producer side of code fetching. +// It accepts code hashes, persists durable "to-fetch" markers (idempotent per hash), +// and enqueues the hashes as-is onto an internal channel consumed by the code syncer. +// The queue does not perform in-memory deduplication or local-code checks - that is +// the responsibility of the consumer. +type Queue struct { + db ethdb.Database + quit <-chan struct{} + + // `in` and `out` MUST be the same channel. We need to be able to set `in` + // to nil after closing, to avoid a send-after-close, but + // [CodeQueue.CodeHashes] MUST NOT return a nil channel otherwise consumers + // will block permanently. + in chan<- common.Hash // Invariant: open or nil, but never closed + out <-chan common.Hash // Invariant: never nil + chanLock sync.RWMutex + closeChanOnce sync.Once // See usage in [CodeQueue.closeOutChannelOnce] + + capacity int +} + +type QueueOption = options.Option[Queue] + +// WithCapacity overrides the queue buffer capacity. +func WithCapacity(n int) QueueOption { + return options.Func[Queue](func(q *Queue) { + if n > 0 { + q.capacity = n + } + }) +} + +// NewQueue creates a new code queue applying optional functional options. +// The `quit` channel, if non-nil, MUST eventually be closed to avoid leaking a +// goroutine. +func NewQueue(db ethdb.Database, quit <-chan struct{}, opts ...QueueOption) (*Queue, error) { + // Create with defaults, then apply options. + q := &Queue{ + db: db, + quit: quit, + capacity: defaultQueueCapacity, + } + options.ApplyTo(q, opts...) + + ch := make(chan common.Hash, q.capacity) + q.in = ch + q.out = ch + + if quit != nil { + // Close the output channel on early shutdown to unblock consumers. + go func() { + <-q.quit + q.closeChannelOnce() + }() + } + + // Always initialize eagerly. + if err := q.init(); err != nil { + return nil, err + } + return q, nil +} + +// CodeHashes returns the receive-only channel of code hashes to consume. +func (q *Queue) CodeHashes() <-chan common.Hash { + return q.out +} + +func (q *Queue) closeChannelOnce() bool { + var done bool + q.closeChanOnce.Do(func() { + q.chanLock.Lock() + defer q.chanLock.Unlock() + + close(q.in) + // [CodeQueue.AddCode] takes a read lock before accessing `in` and we + // want it to block instead of allowing a send-after-close. Calling + // AddCode() after Finalize() isn't valid, and calling it after `quit` + // is closed will be picked up by the `select` so a nil alternative case + // is desirable. + q.in = nil + done = true + }) + return done +} + +// AddCode persists and enqueues new code hashes. +// Persists idempotent "to-fetch" markers for all inputs and enqueues them as-is. +// Returns errAddCodeAfterFinalize after a clean finalize and errFailedToAddCodeHashesToQueue on early quit. +func (q *Queue) AddCode(codeHashes []common.Hash) error { + if len(codeHashes) == 0 { + return nil + } + + // Mark this enqueue as in-flight immediately so shutdown paths wait for us + // before closing the output channel. + q.chanLock.RLock() + defer q.chanLock.RUnlock() + if q.in == nil { + // Although this will happen anyway once the `select` is reached, + // bailing early avoids unnecessary database writes. + return errFailedToAddCodeHashesToQueue + } + + batch := q.db.NewBatch() + // Persist all input hashes as to-fetch markers. Consumer will dedupe and skip + // already-present code. Persisting all enables consumer-side retry. + // Note: markers are keyed by code hash, so repeated persists overwrite the same + // key rather than growing DB usage. The consumer deletes the marker after + // fulfilling the request (or when it detects code is already present). + for _, codeHash := range codeHashes { + customrawdb.AddCodeToFetch(batch, codeHash) + } + + if err := batch.Write(); err != nil { + return fmt.Errorf("failed to write batch of code to fetch markers due to: %w", err) + } + + for _, h := range codeHashes { + select { + case q.in <- h: // guaranteed to be open or nil, but never closed + case <-q.quit: + return errFailedToAddCodeHashesToQueue + } + } + return nil +} + +// Finalize signals that no further code hashes will be added. +// Waits for in-flight enqueues to complete, then closes the output channel. +// If the queue was already closed due to early quit, returns errFailedToFinalizeCodeQueue. +func (q *Queue) Finalize() error { + if !q.closeChannelOnce() { + return errFailedToFinalizeCodeQueue + } + return nil +} + +// init enqueues any persisted code markers found on disk. +func (q *Queue) init() error { + // Recover any persisted code markers and enqueue them. + // Note: dbCodeHashes are already present as "to-fetch" markers. addCode will + // re-persist them, which is a trivial redundancy that happens only on resume + // (e.g., after restart). We accept this to keep the code simple. + dbCodeHashes, err := recoverUnfetchedCodeHashes(q.db) + if err != nil { + return fmt.Errorf("unable to recover previous sync state: %w", err) + } + if err := q.AddCode(dbCodeHashes); err != nil { + return fmt.Errorf("unable to resume previous sync: %w", err) + } + + return nil +} + +// recoverUnfetchedCodeHashes cleans out any codeToFetch markers from the database that are no longer +// needed and returns any outstanding markers to the queue. +func recoverUnfetchedCodeHashes(db ethdb.Database) ([]common.Hash, error) { + it := customrawdb.NewCodeToFetchIterator(db) + defer it.Release() + + batch := db.NewBatch() + var codeHashes []common.Hash + + for it.Next() { + codeHash := common.BytesToHash(it.Key()[len(customrawdb.CodeToFetchPrefix):]) + + // If we already have the codeHash, delete the marker from the database and continue. + if !rawdb.HasCode(db, codeHash) { + codeHashes = append(codeHashes, codeHash) + continue + } + + customrawdb.DeleteCodeToFetch(batch, codeHash) + if batch.ValueSize() < ethdb.IdealBatchSize { + continue + } + + // Write the batch to disk if it has reached the ideal batch size. + if err := batch.Write(); err != nil { + return nil, fmt.Errorf("failed to write batch removing old code markers: %w", err) + } + batch.Reset() + } + + if err := it.Error(); err != nil { + return nil, fmt.Errorf("failed to iterate code entries to fetch: %w", err) + } + + if batch.ValueSize() > 0 { + if err := batch.Write(); err != nil { + return nil, fmt.Errorf("failed to write batch removing old code markers: %w", err) + } + } + + return codeHashes, nil +} diff --git a/vms/evm/sync/code/queue_test.go b/vms/evm/sync/code/queue_test.go new file mode 100644 index 000000000000..f861d8e3c784 --- /dev/null +++ b/vms/evm/sync/code/queue_test.go @@ -0,0 +1,246 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package code + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/crypto" + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/require" + "go.uber.org/goleak" + + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/evm/sync/customrawdb" +) + +func TestCodeQueue(t *testing.T) { + hashes := make([]common.Hash, 256) + for i := range hashes { + hashes[i] = crypto.Keccak256Hash([]byte{byte(i)}) + } + + tests := []struct { + name string + alreadyToFetch set.Set[common.Hash] + alreadyHave map[common.Hash][]byte + addCode [][]common.Hash + want []common.Hash + quitInsteadOfFinalize bool + addCodeAfter []common.Hash + }{ + { + name: "multiple_calls_to_addcode", + addCode: [][]common.Hash{ + hashes[:20], + hashes[20:35], + hashes[35:42], + hashes[42:], + }, + want: hashes, + }, + { + name: "allow_duplicates", + addCode: [][]common.Hash{{hashes[0], hashes[0]}}, + want: []common.Hash{hashes[0], hashes[0]}, + }, + { + name: "AddCode_empty", + addCode: [][]common.Hash{{}}, + want: nil, + }, + { + name: "init_resumes_from_db", + alreadyToFetch: set.Of(hashes[1]), + want: []common.Hash{hashes[1]}, + }, + { + name: "deduplication_in_consumer", + alreadyHave: map[common.Hash][]byte{hashes[42]: {42}}, + // It is the consumer's responsibility, not the queue's, to check + // the database. + addCode: [][]common.Hash{{hashes[42]}}, + want: []common.Hash{hashes[42]}, + }, + { + name: "external_shutdown_via_quit_channel", + quitInsteadOfFinalize: true, + addCodeAfter: []common.Hash{hashes[11]}, + want: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + defer goleak.VerifyNone(t, goleak.IgnoreCurrent()) + + db := rawdb.NewMemoryDatabase() + for hash, code := range tt.alreadyHave { + rawdb.WriteCode(db, hash, code) + } + for hash := range tt.alreadyToFetch { + customrawdb.AddCodeToFetch(db, hash) + } + + quit := make(chan struct{}) + q, err := NewQueue(db, quit) + require.NoError(t, err, "NewQueue()") + + recvDone := make(chan struct{}) + go func() { + for _, add := range tt.addCode { + require.NoErrorf(t, q.AddCode(add), "%T.AddCode(%v)", q, add) + } + + if tt.quitInsteadOfFinalize { + close(quit) + <-recvDone + require.ErrorIsf(t, q.AddCode(tt.addCodeAfter), errFailedToAddCodeHashesToQueue, "%T.AddCode() after `quit` channel closed", q) + } else { + require.NoErrorf(t, q.Finalize(), "%T.Finalize()", q) + // Avoid leaking the internal goroutine + close(quit) + } + + t.Run("after_quit_or_Finalize", func(t *testing.T) { + <-recvDone + ch := q.CodeHashes() + require.NotNilf(t, ch, "%T.CodeHashes()", q) + for range ch { + require.FailNowf(t, "Unexpected receive", "%T.CodeHashes()", q) + } + }) + }() + + var got []common.Hash + for h := range q.CodeHashes() { + got = append(got, h) + } + close(recvDone) + require.Emptyf(t, cmp.Diff(tt.want, got), "Diff (-want +got) of values received from %T.CodeHashes()", q) + + t.Run("restart_with_same_db", func(t *testing.T) { + q, err := NewQueue(db, nil, WithCapacity(len(tt.want))) + require.NoError(t, err, "NewQueue([reused db])") + require.NoErrorf(t, q.Finalize(), "%T.Finalize() immediately after creation", q) + + got := make(set.Set[common.Hash]) + for h := range q.CodeHashes() { + got.Add(h) + } + + // Unlike newly added code hashes, the initialisation function + // checks for existing code when recovering from the database. + // The order can't be maintained. + want := set.Of(tt.want...) + for hash := range tt.alreadyHave { + want.Remove(hash) + } + + require.ElementsMatchf(t, want.List(), got.List(), "All received on %T.CodeHashes() after restart", q) + }) + }) + } +} + +// Test that Finalize waits for in-flight AddCode calls to complete before closing the channel. +func TestCodeQueue_FinalizeWaitsForInflightAddCodeCalls(t *testing.T) { + const capacity = 1 + db := rawdb.NewMemoryDatabase() + q, err := NewQueue(db, nil, WithCapacity(capacity)) + require.NoError(t, err, "NewQueue()") + + // Prepare multiple distinct hashes to exceed the buffer and cause AddCode to block on enqueue. + hashes := make([]common.Hash, capacity+2) + for i := range hashes { + hashes[i] = crypto.Keccak256Hash([]byte(fmt.Sprintf("code-%d", i))) + } + + addDone := make(chan error, 1) + go func() { + addDone <- q.AddCode(hashes) + }() + + // Read the first enqueued hash to ensure AddCode is actively enqueuing and will block on the next send. + var got []common.Hash //nolint:prealloc + got = append(got, <-q.CodeHashes()) + + // Call Finalize concurrently - it should block until AddCode returns. + finalized := make(chan struct{}) + go func() { + require.NoError(t, q.Finalize(), "Finalize()") + close(finalized) + }() + + // Finalize should not complete yet because AddCode is still enqueuing (buffer=1 and we haven't drained). + select { + case <-finalized: + require.FailNow(t, "Finalize returned before in-flight AddCode completed") + case <-addDone: + require.FailNow(t, "AddCode returned before enqueuing all hashes") + case <-time.After(100 * time.Millisecond): + // TODO(powerslider) once we're using Go 1.25 and the `synctest` package + // is generally available, use it here instead of an arbitrary amount of + // time. Without this, we have no way to guarantee that Finalize() and + // AddCode() are actually blocked. + } + + // Drain remaining enqueued hashes; this will unblock AddCode so it can finish. + for h := range q.CodeHashes() { + got = append(got, h) + } + require.Equal(t, hashes, got) + + // Now AddCode should complete without error, and Finalize should return and close the channel. + require.NoError(t, <-addDone, "AddCode()") + <-finalized +} + +func TestQuitAndAddCodeRace(t *testing.T) { + { + q := new(Queue) + // Before the introduction of these fields, this test would panic. + _ = []any{&q.closeChanOnce, &q.chanLock} + } + for range 10_000 { + t.Run("", func(t *testing.T) { + t.Parallel() + + quit := make(chan struct{}) + q, err := NewQueue(rawdb.NewMemoryDatabase(), quit) + require.NoError(t, err) + + var ready, finished sync.WaitGroup + ready.Add(2) + finished.Add(2) + start := make(chan struct{}) + + go func() { + defer finished.Done() + + ready.Done() + <-start + close(quit) + }() + + go func() { + defer finished.Done() + + in := []common.Hash{{}} + ready.Done() + <-start + _ = q.AddCode(in) + }() + + ready.Wait() + close(start) + finished.Wait() + }) + } +} diff --git a/vms/evm/sync/code/syncer.go b/vms/evm/sync/code/syncer.go new file mode 100644 index 000000000000..3d5ba447b2c9 --- /dev/null +++ b/vms/evm/sync/code/syncer.go @@ -0,0 +1,197 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package code + +import ( + "context" + "fmt" + "sync" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/ethdb" + "github.com/ava-labs/libevm/libevm/options" + "golang.org/x/sync/errgroup" + + syncpkg "github.com/ava-labs/avalanchego/vms/evm/sync" + statesyncclient "github.com/ava-labs/avalanchego/vms/evm/sync/client" + "github.com/ava-labs/avalanchego/vms/evm/sync/customrawdb" + "github.com/ava-labs/avalanchego/vms/evm/sync/message" +) + +const defaultNumCodeFetchingWorkers = 5 + +var _ syncpkg.StateSyncer = (*Syncer)(nil) + +// Syncer syncs code bytes from the network in a separate thread. +// It consumes code hashes from a queue and persists code into the DB. +// Outstanding requests are tracked via durable "to-fetch" markers in the DB for recovery. +// The syncer performs in-flight deduplication and skips locally-present code before issuing requests. +type Syncer struct { + db ethdb.Database + client statesyncclient.SyncClient + // Channel of incoming code hash requests provided by the fetcher. + codeHashes <-chan common.Hash + + // Config options. + numWorkers int + codeHashesPerReq int // best-effort target size - final batch may be smaller + + // inFlight tracks code hashes currently being processed to dedupe work + // across workers and across repeated queue submissions. + inFlight sync.Map // key: common.Hash, value: struct{} +} + +// codeSyncerConfig carries construction-time options for code syncer. +type codeSyncerConfig struct { + numWorkers int + codeHashesPerReq int +} + +// SyncerOption configures CodeSyncer at construction time. +type SyncerOption = options.Option[codeSyncerConfig] + +// WithNumWorkers overrides the number of concurrent workers. +func WithNumWorkers(n int) SyncerOption { + return options.Func[codeSyncerConfig](func(c *codeSyncerConfig) { + if n > 0 { + c.numWorkers = n + } + }) +} + +// WithCodeHashesPerRequest sets the best-effort target batch size per request. +// The final batch may contain fewer than the configured number if insufficient +// hashes remain when the channel is closed. +func WithCodeHashesPerRequest(n int) SyncerOption { + return options.Func[codeSyncerConfig](func(c *codeSyncerConfig) { + if n > 0 { + c.codeHashesPerReq = n + } + }) +} + +// NewSyncer allows external packages (e.g., registry wiring) to create a code syncer +// that consumes hashes from a provided fetcher queue. +func NewSyncer(client statesyncclient.SyncClient, db ethdb.Database, codeHashes <-chan common.Hash, opts ...SyncerOption) (*Syncer, error) { + cfg := codeSyncerConfig{ + numWorkers: defaultNumCodeFetchingWorkers, + codeHashesPerReq: message.MaxCodeHashesPerRequest, + } + options.ApplyTo(&cfg, opts...) + + return &Syncer{ + db: db, + client: client, + codeHashes: codeHashes, + numWorkers: cfg.numWorkers, + codeHashesPerReq: cfg.codeHashesPerReq, + }, nil +} + +// Name returns the human-readable name for this sync task. +func (*Syncer) Name() string { + return "Code Syncer" +} + +// ID returns the stable identifier for this sync task. +func (*Syncer) ID() string { + return "state_code_sync" +} + +// Sync starts the worker thread and populates the code hashes queue with active work. +// Blocks until all outstanding code requests from a previous sync have been +// fetched and the code channel has been closed, or the context is cancelled. +func (c *Syncer) Sync(ctx context.Context) error { + eg, egCtx := errgroup.WithContext(ctx) + + // Start NumCodeFetchingWorkers threads to fetch code from the network. + for range c.numWorkers { + eg.Go(func() error { return c.work(egCtx) }) + } + + return eg.Wait() +} + +// work fulfills any incoming requests from the producer channel by fetching code bytes from the network +// and fulfilling them by updating the database. +func (c *Syncer) work(ctx context.Context) error { + codeHashes := make([]common.Hash, 0, message.MaxCodeHashesPerRequest) + + for { + select { + case <-ctx.Done(): // If ctx is done, set the error to the ctx error since work has been cancelled. + return ctx.Err() + case codeHash, ok := <-c.codeHashes: + // If there are no more [codeHashes], fulfill a last code request for any [codeHashes] previously + // read from the channel, then return. + if !ok { + if len(codeHashes) > 0 { + return c.fulfillCodeRequest(ctx, codeHashes) + } + return nil + } + + // Deduplicate in-flight code hashes across workers first to avoid + // racing repeated HasCode() checks for the same hash. + if _, loaded := c.inFlight.LoadOrStore(codeHash, struct{}{}); loaded { + continue + } + + // After acquiring responsibility for this hash, re-check whether the code + // is already present locally. If so, clean up and release responsibility. + if rawdb.HasCode(c.db, codeHash) { + // Best-effort cleanup of stale marker. + batch := c.db.NewBatch() + customrawdb.DeleteCodeToFetch(batch, codeHash) + + if err := batch.Write(); err != nil { + return fmt.Errorf("failed to write batch for stale code marker: %w", err) + } + // Release in-flight ownership since no network fetch is needed. + c.inFlight.Delete(codeHash) + continue + } + + codeHashes = append(codeHashes, codeHash) + // Try to batch up to [codeHashesPerReq] code hashes into a single request when more work remains. + if len(codeHashes) < c.codeHashesPerReq { + continue + } + if err := c.fulfillCodeRequest(ctx, codeHashes); err != nil { + return err + } + + // Reset the codeHashes array + codeHashes = codeHashes[:0] + } + } +} + +// fulfillCodeRequest sends a request for [codeHashes], writes the result to the database, and +// marks the work as complete. +// codeHashes should not be empty or contain duplicate hashes. +// Returns an error if one is encountered, signaling the worker thread to terminate. +func (c *Syncer) fulfillCodeRequest(ctx context.Context, codeHashes []common.Hash) error { + codeByteSlices, err := c.client.GetCode(ctx, codeHashes) + if err != nil { + return err + } + + batch := c.db.NewBatch() + for i, codeHash := range codeHashes { + customrawdb.DeleteCodeToFetch(batch, codeHash) + rawdb.WriteCode(batch, codeHash, codeByteSlices[i]) + } + + if err := batch.Write(); err != nil { + return fmt.Errorf("failed to write batch for fulfilled code requests: %w", err) + } + // After successfully committing to the database, release in-flight ownership + // so that subsequent work for these hashes can be considered again if needed. + for _, codeHash := range codeHashes { + c.inFlight.Delete(codeHash) + } + return nil +} diff --git a/vms/evm/sync/code/syncer_test.go b/vms/evm/sync/code/syncer_test.go new file mode 100644 index 000000000000..a74318bf12fb --- /dev/null +++ b/vms/evm/sync/code/syncer_test.go @@ -0,0 +1,171 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package code + +import ( + "errors" + "testing" + + "github.com/ava-labs/coreth/utils/utilstest" + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/crypto" + "github.com/ava-labs/libevm/ethdb" + "github.com/ava-labs/libevm/ethdb/memorydb" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils" + statesyncclient "github.com/ava-labs/avalanchego/vms/evm/sync/client" + "github.com/ava-labs/avalanchego/vms/evm/sync/customrawdb" + "github.com/ava-labs/avalanchego/vms/evm/sync/handlers" + "github.com/ava-labs/avalanchego/vms/evm/sync/stats" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/message" +) + +type syncerTest struct { + clientDB ethdb.Database + queueCapacity int + codeRequestHashes [][]common.Hash + codeByteSlices [][]byte + getCodeIntercept func(hashes []common.Hash, codeBytes [][]byte) ([][]byte, error) + err error +} + +func testCodeSyncer(t *testing.T, test syncerTest) { + // Set up serverDB + serverDB := memorydb.New() + + codeHashes := make([]common.Hash, 0, len(test.codeByteSlices)) + for _, codeBytes := range test.codeByteSlices { + codeHash := crypto.Keccak256Hash(codeBytes) + rawdb.WriteCode(serverDB, codeHash, codeBytes) + codeHashes = append(codeHashes, codeHash) + } + + // Set up mockClient + codeRequestHandler := handlers.NewCodeRequestHandler(serverDB, message.Codec, stats.NewNoopHandlerStats()) + mockClient := statesyncclient.NewTestClient(message.Codec, nil, codeRequestHandler, nil) + mockClient.GetCodeIntercept = test.getCodeIntercept + + clientDB := test.clientDB + if clientDB == nil { + clientDB = rawdb.NewMemoryDatabase() + } + + codeQueue, err := NewQueue( + clientDB, + make(chan struct{}), + WithCapacity(test.queueCapacity), + ) + require.NoError(t, err) + + codeSyncer, err := NewSyncer( + mockClient, + clientDB, + codeQueue.CodeHashes(), + ) + require.NoError(t, err) + go func() { + for _, codeHashes := range test.codeRequestHashes { + if err := codeQueue.AddCode(codeHashes); err != nil { + require.ErrorIs(t, err, test.err) + } + } + if err := codeQueue.Finalize(); err != nil { + require.ErrorIs(t, err, test.err) + } + }() + + ctx, cancel := utilstest.NewTestContext(t) + t.Cleanup(cancel) + + // Run the sync and handle expected error. + err = codeSyncer.Sync(ctx) + require.ErrorIs(t, err, test.err) + if err != nil { + return // don't check the state + } + + // Assert that the client synced the code correctly. + for i, codeHash := range codeHashes { + codeBytes := rawdb.ReadCode(clientDB, codeHash) + require.Equal(t, test.codeByteSlices[i], codeBytes) + } +} + +func TestCodeSyncerSingleCodeHash(t *testing.T) { + codeBytes := utils.RandomBytes(100) + codeHash := crypto.Keccak256Hash(codeBytes) + testCodeSyncer(t, syncerTest{ + codeRequestHashes: [][]common.Hash{{codeHash}}, + codeByteSlices: [][]byte{codeBytes}, + }) +} + +func TestCodeSyncerManyCodeHashes(t *testing.T) { + numCodeSlices := 5000 + codeHashes := make([]common.Hash, 0, numCodeSlices) + codeByteSlices := make([][]byte, 0, numCodeSlices) + for i := 0; i < numCodeSlices; i++ { + codeBytes := utils.RandomBytes(100) + codeHash := crypto.Keccak256Hash(codeBytes) + codeHashes = append(codeHashes, codeHash) + codeByteSlices = append(codeByteSlices, codeBytes) + } + + testCodeSyncer(t, syncerTest{ + queueCapacity: 10, + codeRequestHashes: [][]common.Hash{codeHashes[0:100], codeHashes[100:2000], codeHashes[2000:2005], codeHashes[2005:]}, + codeByteSlices: codeByteSlices, + }) +} + +func TestCodeSyncerRequestErrors(t *testing.T) { + codeBytes := utils.RandomBytes(100) + codeHash := crypto.Keccak256Hash(codeBytes) + err := errors.New("dummy error") + testCodeSyncer(t, syncerTest{ + codeRequestHashes: [][]common.Hash{{codeHash}}, + codeByteSlices: [][]byte{codeBytes}, + getCodeIntercept: func([]common.Hash, [][]byte) ([][]byte, error) { + return nil, err + }, + err: err, + }) +} + +func TestCodeSyncerAddsInProgressCodeHashes(t *testing.T) { + codeBytes := utils.RandomBytes(100) + codeHash := crypto.Keccak256Hash(codeBytes) + clientDB := rawdb.NewMemoryDatabase() + customrawdb.AddCodeToFetch(clientDB, codeHash) + testCodeSyncer(t, syncerTest{ + clientDB: clientDB, + codeRequestHashes: nil, + codeByteSlices: [][]byte{codeBytes}, + }) +} + +func TestCodeSyncerAddsMoreInProgressThanQueueSize(t *testing.T) { + numCodeSlices := 100 + codeHashes := make([]common.Hash, 0, numCodeSlices) + codeByteSlices := make([][]byte, 0, numCodeSlices) + for i := 0; i < numCodeSlices; i++ { + codeBytes := utils.RandomBytes(100) + codeHash := crypto.Keccak256Hash(codeBytes) + codeHashes = append(codeHashes, codeHash) + codeByteSlices = append(codeByteSlices, codeBytes) + } + + db := rawdb.NewMemoryDatabase() + for _, codeHash := range codeHashes { + customrawdb.AddCodeToFetch(db, codeHash) + } + + testCodeSyncer(t, syncerTest{ + clientDB: db, + codeRequestHashes: nil, + codeByteSlices: codeByteSlices, + }) +} diff --git a/vms/evm/sync/customrawdb/accessors_snapshot_ext.go b/vms/evm/sync/customrawdb/accessors_snapshot_ext.go new file mode 100644 index 000000000000..cd1ab1a92651 --- /dev/null +++ b/vms/evm/sync/customrawdb/accessors_snapshot_ext.go @@ -0,0 +1,46 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package customrawdb + +import ( + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/ethdb" + "github.com/ava-labs/libevm/log" +) + +// ReadSnapshotBlockHash retrieves the hash of the block whose state is contained in +// the persisted snapshot. +func ReadSnapshotBlockHash(db ethdb.KeyValueReader) common.Hash { + data, _ := db.Get(snapshotBlockHashKey) + if len(data) != common.HashLength { + return common.Hash{} + } + return common.BytesToHash(data) +} + +// WriteSnapshotBlockHash stores the root of the block whose state is contained in +// the persisted snapshot. +func WriteSnapshotBlockHash(db ethdb.KeyValueWriter, blockHash common.Hash) { + if err := db.Put(snapshotBlockHashKey, blockHash[:]); err != nil { + log.Crit("Failed to store snapshot block hash", "err", err) + } +} + +// DeleteSnapshotBlockHash deletes the hash of the block whose state is contained in +// the persisted snapshot. Since snapshots are not immutable, this method can +// be used during updates, so a crash or failure will mark the entire snapshot +// invalid. +func DeleteSnapshotBlockHash(db ethdb.KeyValueWriter) { + if err := db.Delete(snapshotBlockHashKey); err != nil { + log.Crit("Failed to remove snapshot block hash", "err", err) + } +} + +// IterateAccountSnapshots returns an iterator for walking all of the accounts in the snapshot +func IterateAccountSnapshots(db ethdb.Iteratee) ethdb.Iterator { + it := db.NewIterator(rawdb.SnapshotAccountPrefix, nil) + keyLen := len(rawdb.SnapshotAccountPrefix) + common.HashLength + return rawdb.NewKeyLengthIterator(it, keyLen) +} diff --git a/vms/evm/sync/customrawdb/accessors_state_sync.go b/vms/evm/sync/customrawdb/accessors_state_sync.go new file mode 100644 index 000000000000..67f41771c33c --- /dev/null +++ b/vms/evm/sync/customrawdb/accessors_state_sync.go @@ -0,0 +1,223 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package customrawdb + +import ( + "encoding/binary" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/ethdb" + "github.com/ava-labs/libevm/log" + + "github.com/ava-labs/avalanchego/utils/wrappers" +) + +// ReadSyncRoot reads the root corresponding to the main trie of an in-progress +// sync and returns common.Hash{} if no in-progress sync was found. +func ReadSyncRoot(db ethdb.KeyValueReader) (common.Hash, error) { + has, err := db.Has(syncRootKey) + if err != nil || !has { + return common.Hash{}, err + } + root, err := db.Get(syncRootKey) + if err != nil { + return common.Hash{}, err + } + return common.BytesToHash(root), nil +} + +// WriteSyncRoot writes root as the root of the main trie of the in-progress sync. +func WriteSyncRoot(db ethdb.KeyValueWriter, root common.Hash) error { + return db.Put(syncRootKey, root[:]) +} + +// AddCodeToFetch adds a marker that we need to fetch the code for `hash`. +func AddCodeToFetch(db ethdb.KeyValueWriter, hash common.Hash) { + if err := db.Put(codeToFetchKey(hash), nil); err != nil { + log.Crit("Failed to put code to fetch", "codeHash", hash, "err", err) + } +} + +// DeleteCodeToFetch removes the marker that the code corresponding to `hash` needs to be fetched. +func DeleteCodeToFetch(db ethdb.KeyValueWriter, hash common.Hash) { + if err := db.Delete(codeToFetchKey(hash)); err != nil { + log.Crit("Failed to delete code to fetch", "codeHash", hash, "err", err) + } +} + +// NewCodeToFetchIterator returns a KeyLength iterator over all code +// hashes that are pending syncing. It is the caller's responsibility to +// unpack the key and call Release on the returned iterator. +func NewCodeToFetchIterator(db ethdb.Iteratee) ethdb.Iterator { + return rawdb.NewKeyLengthIterator( + db.NewIterator(CodeToFetchPrefix, nil), + codeToFetchKeyLength, + ) +} + +func codeToFetchKey(hash common.Hash) []byte { + codeToFetchKey := make([]byte, codeToFetchKeyLength) + copy(codeToFetchKey, CodeToFetchPrefix) + copy(codeToFetchKey[len(CodeToFetchPrefix):], hash[:]) + return codeToFetchKey +} + +// NewSyncSegmentsIterator returns a KeyLength iterator over all trie segments +// added for root. It is the caller's responsibility to unpack the key and call +// Release on the returned iterator. +func NewSyncSegmentsIterator(db ethdb.Iteratee, root common.Hash) ethdb.Iterator { + segmentsPrefix := make([]byte, len(syncSegmentsPrefix)+common.HashLength) + copy(segmentsPrefix, syncSegmentsPrefix) + copy(segmentsPrefix[len(syncSegmentsPrefix):], root[:]) + + return rawdb.NewKeyLengthIterator( + db.NewIterator(segmentsPrefix, nil), + syncSegmentsKeyLength, + ) +} + +// WriteSyncSegment adds a trie segment for root at the given start position. +func WriteSyncSegment(db ethdb.KeyValueWriter, root common.Hash, start common.Hash) error { + return db.Put(packSyncSegmentKey(root, start), []byte{0x01}) +} + +// ClearSyncSegments removes segment markers for root from db +func ClearSyncSegments(db ethdb.KeyValueStore, root common.Hash) error { + segmentsPrefix := make([]byte, len(syncSegmentsPrefix)+common.HashLength) + copy(segmentsPrefix, syncSegmentsPrefix) + copy(segmentsPrefix[len(syncSegmentsPrefix):], root[:]) + return clearPrefix(db, segmentsPrefix, syncSegmentsKeyLength) +} + +// ClearAllSyncSegments removes all segment markers from db +func ClearAllSyncSegments(db ethdb.KeyValueStore) error { + return clearPrefix(db, syncSegmentsPrefix, syncSegmentsKeyLength) +} + +// UnpackSyncSegmentKey returns the root and start position for a trie segment +// key returned from NewSyncSegmentsIterator. +func UnpackSyncSegmentKey(keyBytes []byte) (common.Hash, []byte) { + keyBytes = keyBytes[len(syncSegmentsPrefix):] // skip prefix + root := common.BytesToHash(keyBytes[:common.HashLength]) + start := keyBytes[common.HashLength:] + return root, start +} + +// packSyncSegmentKey packs root and account into a key for storage in db. +func packSyncSegmentKey(root common.Hash, start common.Hash) []byte { + bytes := make([]byte, syncSegmentsKeyLength) + copy(bytes, syncSegmentsPrefix) + copy(bytes[len(syncSegmentsPrefix):], root[:]) + copy(bytes[len(syncSegmentsPrefix)+common.HashLength:], start.Bytes()) + return bytes +} + +// NewSyncStorageTriesIterator returns a KeyLength iterator over all storage tries +// added for syncing (beginning at seek). It is the caller's responsibility to unpack +// the key and call Release on the returned iterator. +func NewSyncStorageTriesIterator(db ethdb.Iteratee, seek []byte) ethdb.Iterator { + return rawdb.NewKeyLengthIterator(db.NewIterator(syncStorageTriesPrefix, seek), syncStorageTriesKeyLength) +} + +// WriteSyncStorageTrie adds a storage trie for account (with the given root) to be synced. +func WriteSyncStorageTrie(db ethdb.KeyValueWriter, root common.Hash, account common.Hash) error { + return db.Put(packSyncStorageTrieKey(root, account), []byte{0x01}) +} + +// ClearSyncStorageTrie removes all storage trie accounts (with the given root) from db. +// Intended for use when the trie with root has completed syncing. +func ClearSyncStorageTrie(db ethdb.KeyValueStore, root common.Hash) error { + accountsPrefix := make([]byte, len(syncStorageTriesPrefix)+common.HashLength) + copy(accountsPrefix, syncStorageTriesPrefix) + copy(accountsPrefix[len(syncStorageTriesPrefix):], root[:]) + return clearPrefix(db, accountsPrefix, syncStorageTriesKeyLength) +} + +// ClearAllSyncStorageTries removes all storage tries added for syncing from db +func ClearAllSyncStorageTries(db ethdb.KeyValueStore) error { + return clearPrefix(db, syncStorageTriesPrefix, syncStorageTriesKeyLength) +} + +// UnpackSyncStorageTrieKey returns the root and account for a storage trie +// key returned from NewSyncStorageTriesIterator. +func UnpackSyncStorageTrieKey(keyBytes []byte) (common.Hash, common.Hash) { + keyBytes = keyBytes[len(syncStorageTriesPrefix):] // skip prefix + root := common.BytesToHash(keyBytes[:common.HashLength]) + account := common.BytesToHash(keyBytes[common.HashLength:]) + return root, account +} + +// packSyncStorageTrieKey packs root and account into a key for storage in db. +func packSyncStorageTrieKey(root common.Hash, account common.Hash) []byte { + bytes := make([]byte, 0, syncStorageTriesKeyLength) + bytes = append(bytes, syncStorageTriesPrefix...) + bytes = append(bytes, root[:]...) + bytes = append(bytes, account[:]...) + return bytes +} + +// WriteSyncPerformed logs an entry in `db` indicating the VM state synced to `blockNumber`. +func WriteSyncPerformed(db ethdb.KeyValueWriter, blockNumber uint64) error { + syncPerformedPrefixLen := len(syncPerformedPrefix) + bytes := make([]byte, syncPerformedPrefixLen+wrappers.LongLen) + copy(bytes[:syncPerformedPrefixLen], syncPerformedPrefix) + binary.BigEndian.PutUint64(bytes[syncPerformedPrefixLen:], blockNumber) + return db.Put(bytes, []byte{0x01}) +} + +// NewSyncPerformedIterator returns an iterator over all block numbers the VM +// has state synced to. +func NewSyncPerformedIterator(db ethdb.Iteratee) ethdb.Iterator { + return rawdb.NewKeyLengthIterator(db.NewIterator(syncPerformedPrefix, nil), syncPerformedKeyLength) +} + +// UnpackSyncPerformedKey returns the block number from keys the iterator returned +// from NewSyncPerformedIterator. +func UnpackSyncPerformedKey(key []byte) uint64 { + return binary.BigEndian.Uint64(key[len(syncPerformedPrefix):]) +} + +// GetLatestSyncPerformed returns the latest block number state synced performed to. +func GetLatestSyncPerformed(db ethdb.Iteratee) uint64 { + it := NewSyncPerformedIterator(db) + defer it.Release() + + var latestSyncPerformed uint64 + for it.Next() { + syncPerformed := UnpackSyncPerformedKey(it.Key()) + if syncPerformed > latestSyncPerformed { + latestSyncPerformed = syncPerformed + } + } + return latestSyncPerformed +} + +// clearPrefix removes all keys in db that begin with prefix and match an +// expected key length. `keyLen` must include the length of the prefix. +func clearPrefix(db ethdb.KeyValueStore, prefix []byte, keyLen int) error { + it := db.NewIterator(prefix, nil) + defer it.Release() + + batch := db.NewBatch() + for it.Next() { + key := common.CopyBytes(it.Key()) + if len(key) != keyLen { + continue + } + if err := batch.Delete(key); err != nil { + return err + } + if batch.ValueSize() > ethdb.IdealBatchSize { + if err := batch.Write(); err != nil { + return err + } + batch.Reset() + } + } + if err := it.Error(); err != nil { + return err + } + return batch.Write() +} diff --git a/vms/evm/sync/customrawdb/accessors_state_sync_test.go b/vms/evm/sync/customrawdb/accessors_state_sync_test.go new file mode 100644 index 000000000000..9c689a1cf06f --- /dev/null +++ b/vms/evm/sync/customrawdb/accessors_state_sync_test.go @@ -0,0 +1,34 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package customrawdb + +import ( + "slices" + "testing" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/stretchr/testify/require" +) + +func TestClearPrefix(t *testing.T) { + db := rawdb.NewMemoryDatabase() + // add a key that should be cleared + require.NoError(t, WriteSyncSegment(db, common.Hash{1}, common.Hash{})) + + // add a key that should not be cleared + key := slices.Concat(syncSegmentsPrefix, []byte("foo")) + require.NoError(t, db.Put(key, []byte("bar"))) + + require.NoError(t, ClearAllSyncSegments(db)) + + count := 0 + it := db.NewIterator(syncSegmentsPrefix, nil) + defer it.Release() + for it.Next() { + count++ + } + require.NoError(t, it.Error()) + require.Equal(t, 1, count) +} diff --git a/vms/evm/sync/customrawdb/database_ext.go b/vms/evm/sync/customrawdb/database_ext.go new file mode 100644 index 000000000000..c301a5bfff54 --- /dev/null +++ b/vms/evm/sync/customrawdb/database_ext.go @@ -0,0 +1,80 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package customrawdb + +import ( + "bytes" + "fmt" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/ethdb" +) + +// InspectDatabase traverses the entire database and checks the size +// of all different categories of data. +func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { + type stat = rawdb.DatabaseStat + stats := []struct { + name string + keyLen int + keyPrefix []byte + stat *stat + }{ + {"Trie segments", syncSegmentsKeyLength, syncSegmentsPrefix, &stat{}}, + {"Storage tries to fetch", syncStorageTriesKeyLength, syncStorageTriesPrefix, &stat{}}, + {"Code to fetch", codeToFetchKeyLength, CodeToFetchPrefix, &stat{}}, + {"Block numbers synced to", syncPerformedKeyLength, syncPerformedPrefix, &stat{}}, + } + + options := []rawdb.InspectDatabaseOption{ + rawdb.WithDatabaseMetadataKeys(func(key []byte) bool { + return bytes.Equal(key, snapshotBlockHashKey) || + bytes.Equal(key, syncRootKey) + }), + rawdb.WithDatabaseStatRecorder(func(key []byte, size common.StorageSize) bool { + for _, s := range stats { + if len(key) == s.keyLen && bytes.HasPrefix(key, s.keyPrefix) { + s.stat.Add(size) + return true + } + } + return false + }), + rawdb.WithDatabaseStatsTransformer(func(rows [][]string) [][]string { + newRows := make([][]string, 0, len(rows)) + for _, row := range rows { + switch db, cat := row[0], row[1]; { + // Discard rows specific to libevm (geth) but irrelevant to coreth. + case db == "Key-Value store" && (cat == "Difficulties" || cat == "Beacon sync headers"): + case db == "Ancient store (Chain)": + default: + newRows = append(newRows, row) + } + } + for _, s := range stats { + newRows = append(newRows, []string{"State sync", s.name, s.stat.Size(), s.stat.Count()}) + } + return newRows + }), + } + + return rawdb.InspectDatabase(db, keyPrefix, keyStart, options...) +} + +// ParseStateSchemeExt parses the state scheme from the provided string. +func ParseStateSchemeExt(provided string, disk ethdb.Database) (string, error) { + // Check for custom scheme + if provided == FirewoodScheme { + if diskScheme := rawdb.ReadStateScheme(disk); diskScheme != "" { + // Valid scheme on disk mismatched + return "", fmt.Errorf("state scheme %s already set on disk, can't use Firewood", diskScheme) + } + // If no conflicting scheme is found, is valid. + return FirewoodScheme, nil + } + + // Check for valid eth scheme + return rawdb.ParseStateScheme(provided, disk) +} diff --git a/vms/evm/sync/customrawdb/database_ext_test.go b/vms/evm/sync/customrawdb/database_ext_test.go new file mode 100644 index 000000000000..2b786b7f8177 --- /dev/null +++ b/vms/evm/sync/customrawdb/database_ext_test.go @@ -0,0 +1,135 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package customrawdb + +import ( + "fmt" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/ethdb" +) + +func ExampleInspectDatabase() { + db := &stubDatabase{ + iterator: &stubIterator{}, + } + + // Extra metadata keys: (17 + 32) + (12 + 32) = 93 bytes + WriteSnapshotBlockHash(db, common.Hash{}) + rawdb.WriteSnapshotRoot(db, common.Hash{}) + // Trie segments: (77 + 2) + 1 = 80 bytes + _ = WriteSyncSegment(db, common.Hash{}, common.Hash{}) + // Storage tries to fetch: 76 + 1 = 77 bytes + _ = WriteSyncStorageTrie(db, common.Hash{}, common.Hash{}) + // Code to fetch: 34 + 0 = 34 bytes + AddCodeToFetch(db, common.Hash{}) + // Block numbers synced to: 22 + 1 = 23 bytes + _ = WriteSyncPerformed(db, 0) + + keyPrefix := []byte(nil) + keyStart := []byte(nil) + + err := InspectDatabase(db, keyPrefix, keyStart) + if err != nil { + fmt.Println(err) + } + // Output: + // +-----------------+-------------------------+----------+-------+ + // | DATABASE | CATEGORY | SIZE | ITEMS | + // +-----------------+-------------------------+----------+-------+ + // | Key-Value store | Headers | 0.00 B | 0 | + // | Key-Value store | Bodies | 0.00 B | 0 | + // | Key-Value store | Receipt lists | 0.00 B | 0 | + // | Key-Value store | Block number->hash | 0.00 B | 0 | + // | Key-Value store | Block hash->number | 0.00 B | 0 | + // | Key-Value store | Transaction index | 0.00 B | 0 | + // | Key-Value store | Bloombit index | 0.00 B | 0 | + // | Key-Value store | Contract codes | 0.00 B | 0 | + // | Key-Value store | Hash trie nodes | 0.00 B | 0 | + // | Key-Value store | Path trie state lookups | 0.00 B | 0 | + // | Key-Value store | Path trie account nodes | 0.00 B | 0 | + // | Key-Value store | Path trie storage nodes | 0.00 B | 0 | + // | Key-Value store | Trie preimages | 0.00 B | 0 | + // | Key-Value store | Account snapshot | 0.00 B | 0 | + // | Key-Value store | Storage snapshot | 0.00 B | 0 | + // | Key-Value store | Clique snapshots | 0.00 B | 0 | + // | Key-Value store | Singleton metadata | 93.00 B | 2 | + // | Light client | CHT trie nodes | 0.00 B | 0 | + // | Light client | Bloom trie nodes | 0.00 B | 0 | + // | State sync | Trie segments | 78.00 B | 1 | + // | State sync | Storage tries to fetch | 77.00 B | 1 | + // | State sync | Code to fetch | 34.00 B | 1 | + // | State sync | Block numbers synced to | 23.00 B | 1 | + // +-----------------+-------------------------+----------+-------+ + // | TOTAL | 305.00 B | | + // +-----------------+-------------------------+----------+-------+ +} + +type stubDatabase struct { + ethdb.Database + iterator *stubIterator +} + +func (s *stubDatabase) NewIterator(_, _ []byte) ethdb.Iterator { + return s.iterator +} + +// AncientSize is used in [InspectDatabase] to determine the ancient sizes. +func (*stubDatabase) AncientSize(string) (uint64, error) { + return 0, nil +} + +func (*stubDatabase) Ancients() (uint64, error) { + return 0, nil +} + +func (*stubDatabase) Tail() (uint64, error) { + return 0, nil +} + +func (s *stubDatabase) Put(key, value []byte) error { + s.iterator.kvs = append(s.iterator.kvs, keyValue{key: key, value: value}) + return nil +} + +func (*stubDatabase) Get([]byte) ([]byte, error) { + return nil, nil +} + +func (*stubDatabase) ReadAncients(func(ethdb.AncientReaderOp) error) error { + return nil +} + +type stubIterator struct { + ethdb.Iterator + i int // see [stubIterator.pos] + kvs []keyValue +} + +type keyValue struct { + key []byte + value []byte +} + +// pos returns the true iterator position, which is otherwise off by one because +// Next() is called _before_ usage. +func (s *stubIterator) pos() int { + return s.i - 1 +} + +func (s *stubIterator) Next() bool { + s.i++ + return s.pos() < len(s.kvs) +} + +func (*stubIterator) Release() {} + +func (s *stubIterator) Key() []byte { + return s.kvs[s.pos()].key +} + +func (s *stubIterator) Value() []byte { + return s.kvs[s.pos()].value +} diff --git a/vms/evm/sync/customrawdb/schema_ext.go b/vms/evm/sync/customrawdb/schema_ext.go new file mode 100644 index 000000000000..ed3f66f6200c --- /dev/null +++ b/vms/evm/sync/customrawdb/schema_ext.go @@ -0,0 +1,56 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package customrawdb + +import ( + "github.com/ava-labs/libevm/common" + + "github.com/ava-labs/avalanchego/utils/wrappers" +) + +var ( + // snapshotBlockHashKey tracks the block hash of the last snapshot. + snapshotBlockHashKey = []byte("SnapshotBlockHash") + // offlinePruningKey tracks runs of offline pruning + offlinePruningKey = []byte("OfflinePruning") + // populateMissingTriesKey tracks runs of trie backfills + populateMissingTriesKey = []byte("PopulateMissingTries") + // pruningDisabledKey tracks whether the node has ever run in archival mode + // to ensure that a user does not accidentally corrupt an archival node. + pruningDisabledKey = []byte("PruningDisabled") + // acceptorTipKey tracks the tip of the last accepted block that has been fully processed. + acceptorTipKey = []byte("AcceptorTipKey") +) + +// State sync progress keys and prefixes +var ( + // syncRootKey indicates the root of the main account trie currently being synced + syncRootKey = []byte("sync_root") + // syncStorageTriesPrefix is the prefix for storage tries that need to be fetched. + // syncStorageTriesPrefix + trie root + account hash: indicates a storage trie must be fetched for the account + syncStorageTriesPrefix = []byte("sync_storage") + // syncSegmentsPrefix is the prefix for segments. + // syncSegmentsPrefix + trie root + 32-byte start key: indicates the trie at root has a segment starting at the specified key + syncSegmentsPrefix = []byte("sync_segments") + // CodeToFetchPrefix is the prefix for code hashes that need to be fetched. + // CodeToFetchPrefix + code hash -> empty value tracks the outstanding code hashes we need to fetch. + CodeToFetchPrefix = []byte("CP") +) + +// State sync progress key lengths +var ( + syncStorageTriesKeyLength = len(syncStorageTriesPrefix) + 2*common.HashLength + syncSegmentsKeyLength = len(syncSegmentsPrefix) + 2*common.HashLength + codeToFetchKeyLength = len(CodeToFetchPrefix) + common.HashLength +) + +// State sync metadata +var ( + syncPerformedPrefix = []byte("sync_performed") + // syncPerformedKeyLength is the length of the key for the sync performed metadata key, + // and is equal to [syncPerformedPrefix] + block number as uint64. + syncPerformedKeyLength = len(syncPerformedPrefix) + wrappers.LongLen +) + +var FirewoodScheme = "firewood" diff --git a/vms/evm/sync/handlers/block_request.go b/vms/evm/sync/handlers/block_request.go new file mode 100644 index 000000000000..0abf1d287be3 --- /dev/null +++ b/vms/evm/sync/handlers/block_request.go @@ -0,0 +1,119 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package handlers + +import ( + "bytes" + "context" + "time" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/log" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/evm/sync/message" + "github.com/ava-labs/avalanchego/vms/evm/sync/stats" +) + +const ( + // parentLimit specifies how many parents to retrieve and send given a starting hash + // This value overrides any specified limit in blockRequest.Parents if it is greater than this value + parentLimit = uint16(64) + targetMessageByteSize = units.MiB - units.KiB // Target total block bytes slightly under original network codec max size of 1MB +) + +// BlockRequestHandler is a peer.RequestHandler for message.BlockRequest +// serving requested blocks starting at specified hash +type BlockRequestHandler struct { + stats stats.BlockRequestHandlerStats + blockProvider BlockProvider + codec codec.Manager +} + +func NewBlockRequestHandler(blockProvider BlockProvider, codec codec.Manager, handlerStats stats.BlockRequestHandlerStats) *BlockRequestHandler { + return &BlockRequestHandler{ + blockProvider: blockProvider, + codec: codec, + stats: handlerStats, + } +} + +// OnBlockRequest handles incoming message.BlockRequest, returning blocks as requested +// Never returns error +// Expects returned errors to be treated as FATAL +// Returns empty response or subset of requested blocks if ctx expires during fetch +// Assumes ctx is active +func (b *BlockRequestHandler) OnBlockRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, blockRequest message.BlockRequest) ([]byte, error) { + startTime := time.Now() + b.stats.IncBlockRequest() + + // override given Parents limit if it is greater than parentLimit + parents := blockRequest.Parents + if parents > parentLimit { + parents = parentLimit + } + blocks := make([][]byte, 0, parents) + totalBytes := 0 + + // ensure metrics are captured properly on all return paths + defer func() { + b.stats.UpdateBlockRequestProcessingTime(time.Since(startTime)) + b.stats.UpdateBlocksReturned(uint16(len(blocks))) + }() + + hash := blockRequest.Hash + height := blockRequest.Height + for i := 0; i < int(parents); i++ { + // we return whatever we have until ctx errors, limit is exceeded, or we reach the genesis block + // this will happen either when the ctx is cancelled or we hit the ctx deadline + if ctx.Err() != nil { + break + } + + if (hash == common.Hash{}) { + break + } + + block := b.blockProvider.GetBlock(hash, height) + if block == nil { + b.stats.IncMissingBlockHash() + break + } + + buf := new(bytes.Buffer) + if err := block.EncodeRLP(buf); err != nil { + log.Error("failed to RLP encode block", "hash", block.Hash(), "height", block.NumberU64(), "err", err) + return nil, nil + } + + if buf.Len()+totalBytes > targetMessageByteSize && len(blocks) > 0 { + log.Debug("Skipping block due to max total bytes size", "totalBlockDataSize", totalBytes, "blockSize", buf.Len(), "maxTotalBytesSize", targetMessageByteSize) + break + } + + blocks = append(blocks, buf.Bytes()) + totalBytes += buf.Len() + hash = block.ParentHash() + height-- + } + + if len(blocks) == 0 { + // drop this request + log.Debug("no requested blocks found, dropping request", "nodeID", nodeID, "requestID", requestID, "hash", blockRequest.Hash, "parents", blockRequest.Parents) + return nil, nil + } + + response := message.BlockResponse{ + Blocks: blocks, + } + responseBytes, err := b.codec.Marshal(message.Version, response) + if err != nil { + log.Error("failed to marshal BlockResponse, dropping request", "nodeID", nodeID, "requestID", requestID, "hash", blockRequest.Hash, "parents", blockRequest.Parents, "blocksLen", len(response.Blocks), "err", err) + return nil, nil + } + + return responseBytes, nil +} diff --git a/vms/evm/sync/handlers/block_request_test.go b/vms/evm/sync/handlers/block_request_test.go new file mode 100644 index 000000000000..c24e563f0ae9 --- /dev/null +++ b/vms/evm/sync/handlers/block_request_test.go @@ -0,0 +1,265 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package handlers + +import ( + "context" + "math/big" + "testing" + + "github.com/ava-labs/coreth/consensus/dummy" + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/core/types" + "github.com/ava-labs/libevm/crypto" + "github.com/ava-labs/libevm/rlp" + "github.com/ava-labs/libevm/triedb" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/evm/sync/message" + "github.com/ava-labs/avalanchego/vms/evm/sync/statesynctest" + "github.com/ava-labs/avalanchego/vms/evm/sync/stats" +) + +type blockRequestTest struct { + name string + + // starting block, specify either Index or (hash+height) + startBlockIndex int + startBlockHash common.Hash + startBlockHeight uint64 + + requestedParents uint16 + expectedBlocks int + expectNilResponse bool + assertResponse func(t testing.TB, stats *statesynctest.TestHandlerStats, b []byte) +} + +func executeBlockRequestTest(t testing.TB, test blockRequestTest, blocks []*types.Block) { + testHandlerStats := &statesynctest.TestHandlerStats{} + + // convert into map + blocksDB := make(map[common.Hash]*types.Block, len(blocks)) + for _, blk := range blocks { + blocksDB[blk.Hash()] = blk + } + blockProvider := &TestBlockProvider{ + GetBlockFn: func(hash common.Hash, height uint64) *types.Block { + blk, ok := blocksDB[hash] + if !ok || blk.NumberU64() != height { + return nil + } + return blk + }, + } + blockRequestHandler := NewBlockRequestHandler(blockProvider, message.Codec, testHandlerStats) + + var blockRequest message.BlockRequest + if test.startBlockHash != (common.Hash{}) { + blockRequest.Hash = test.startBlockHash + blockRequest.Height = test.startBlockHeight + } else { + startingBlock := blocks[test.startBlockIndex] + blockRequest.Hash = startingBlock.Hash() + blockRequest.Height = startingBlock.NumberU64() + } + blockRequest.Parents = test.requestedParents + + responseBytes, err := blockRequestHandler.OnBlockRequest(context.Background(), ids.GenerateTestNodeID(), 1, blockRequest) + require.NoError(t, err) + if test.assertResponse != nil { + test.assertResponse(t, testHandlerStats, responseBytes) + } + + if test.expectNilResponse { + require.Nil(t, responseBytes) + return + } + + require.NotEmpty(t, responseBytes) + + var response message.BlockResponse + _, err = message.Codec.Unmarshal(responseBytes, &response) + require.NoError(t, err) + require.Len(t, response.Blocks, test.expectedBlocks) + + for _, blockBytes := range response.Blocks { + block := new(types.Block) + require.NoError(t, rlp.DecodeBytes(blockBytes, block)) + require.GreaterOrEqual(t, test.startBlockIndex, 0) + require.Equal(t, blocks[test.startBlockIndex].Hash(), block.Hash()) + test.startBlockIndex-- + } + testHandlerStats.Reset() +} + +func TestBlockRequestHandler(t *testing.T) { + gspec := &core.Genesis{ + Config: params.TestChainConfig, + } + memdb := rawdb.NewMemoryDatabase() + tdb := triedb.NewDatabase(memdb, nil) + genesis := gspec.MustCommit(memdb, tdb) + engine := dummy.NewETHFaker() + blocks, _, err := core.GenerateChain(params.TestChainConfig, genesis, engine, memdb, 96, 0, func(_ int, _ *core.BlockGen) {}) + require.NoError(t, err) + require.Len(t, blocks, 96) + + tests := []blockRequestTest{ + { + name: "handler_returns_blocks_as_requested", + startBlockIndex: 64, + requestedParents: 32, + expectedBlocks: 32, + }, + { + name: "handler_caps_blocks_parent_limit", + startBlockIndex: 95, + requestedParents: 96, + expectedBlocks: 64, + }, + { + name: "handler_handles_genesis", + startBlockIndex: 0, + requestedParents: 64, + expectedBlocks: 1, + }, + { + name: "handler_unknown_block", + startBlockHash: common.BytesToHash([]byte("some block pls k thx bye")), + startBlockHeight: 1_000_000, + requestedParents: 64, + expectNilResponse: true, + assertResponse: func(t testing.TB, testHandlerStats *statesynctest.TestHandlerStats, _ []byte) { + require.Equal(t, uint32(1), testHandlerStats.MissingBlockHashCount) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + executeBlockRequestTest(t, test, blocks) + }) + } +} + +func TestBlockRequestHandlerLargeBlocks(t *testing.T) { + var ( + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + funds = big.NewInt(1000000000000000000) + gspec = &core.Genesis{ + Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, + Alloc: types.GenesisAlloc{addr1: {Balance: funds}}, + } + signer = types.LatestSigner(gspec.Config) + ) + memdb := rawdb.NewMemoryDatabase() + tdb := triedb.NewDatabase(memdb, nil) + genesis := gspec.MustCommit(memdb, tdb) + engine := dummy.NewETHFaker() + blocks, _, err := core.GenerateChain(gspec.Config, genesis, engine, memdb, 96, 0, func(i int, b *core.BlockGen) { + var data []byte + switch { + case i <= 32: + data = make([]byte, units.MiB) + default: + data = make([]byte, units.MiB/16) + } + tx, err := types.SignTx(types.NewTransaction(b.TxNonce(addr1), addr1, big.NewInt(10000), 4_215_304, nil, data), signer, key1) + require.NoError(t, err) + b.AddTx(tx) + }) + require.NoError(t, err) + require.Len(t, blocks, 96) + + tests := []blockRequestTest{ + { + name: "handler_returns_blocks_as_requested", + startBlockIndex: 64, + requestedParents: 10, + expectedBlocks: 10, + }, + { + name: "handler_caps_blocks_size_limit", + startBlockIndex: 64, + requestedParents: 16, + expectedBlocks: 15, + }, + { + name: "handler_caps_blocks_size_limit_on_first_block", + startBlockIndex: 32, + requestedParents: 10, + expectedBlocks: 1, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + executeBlockRequestTest(t, test, blocks) + }) + } +} + +func TestBlockRequestHandlerCtxExpires(t *testing.T) { + gspec := &core.Genesis{ + Config: params.TestChainConfig, + } + memdb := rawdb.NewMemoryDatabase() + tdb := triedb.NewDatabase(memdb, nil) + genesis := gspec.MustCommit(memdb, tdb) + engine := dummy.NewETHFaker() + blocks, _, err := core.GenerateChain(params.TestChainConfig, genesis, engine, memdb, 11, 0, func(_ int, _ *core.BlockGen) {}) + require.NoError(t, err) + + require.Len(t, blocks, 11) + + // convert into map + blocksDB := make(map[common.Hash]*types.Block, 11) + for _, blk := range blocks { + blocksDB[blk.Hash()] = blk + } + + cancelAfterNumRequests := 2 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + blockRequestCallCount := 0 + blockProvider := &TestBlockProvider{ + GetBlockFn: func(hash common.Hash, height uint64) *types.Block { + blockRequestCallCount++ + // cancel ctx after the 2nd call to simulate ctx expiring due to deadline exceeding + if blockRequestCallCount >= cancelAfterNumRequests { + cancel() + } + blk, ok := blocksDB[hash] + if !ok || blk.NumberU64() != height { + return nil + } + return blk + }, + } + blockRequestHandler := NewBlockRequestHandler(blockProvider, message.Codec, stats.NewNoopHandlerStats()) + + responseBytes, err := blockRequestHandler.OnBlockRequest(ctx, ids.GenerateTestNodeID(), 1, message.BlockRequest{ + Hash: blocks[10].Hash(), + Height: blocks[10].NumberU64(), + Parents: uint16(8), + }) + require.NoError(t, err) + require.NotEmpty(t, responseBytes) + + var response message.BlockResponse + _, err = message.Codec.Unmarshal(responseBytes, &response) + require.NoError(t, err) + // requested 8 blocks, received cancelAfterNumRequests because of timeout + require.Len(t, response.Blocks, cancelAfterNumRequests) + + for i, blockBytes := range response.Blocks { + block := new(types.Block) + require.NoError(t, rlp.DecodeBytes(blockBytes, block)) + require.Equal(t, blocks[len(blocks)-i-1].Hash(), block.Hash()) + } +} diff --git a/vms/evm/sync/handlers/code_request.go b/vms/evm/sync/handlers/code_request.go new file mode 100644 index 000000000000..1e47ea96e4b5 --- /dev/null +++ b/vms/evm/sync/handlers/code_request.go @@ -0,0 +1,94 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package handlers + +import ( + "context" + "time" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/ethdb" + "github.com/ava-labs/libevm/log" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/evm/sync/message" + "github.com/ava-labs/avalanchego/vms/evm/sync/stats" +) + +// CodeRequestHandler is a peer.RequestHandler for message.CodeRequest +// serving requested contract code bytes +type CodeRequestHandler struct { + codeReader ethdb.KeyValueReader + codec codec.Manager + stats stats.CodeRequestHandlerStats +} + +func NewCodeRequestHandler(codeReader ethdb.KeyValueReader, codec codec.Manager, stats stats.CodeRequestHandlerStats) *CodeRequestHandler { + handler := &CodeRequestHandler{ + codeReader: codeReader, + codec: codec, + stats: stats, + } + return handler +} + +// OnCodeRequest handles request to retrieve contract code by its hash in message.CodeRequest +// Never returns error +// Returns nothing if code hash is not found +// Expects returned errors to be treated as FATAL +// Assumes ctx is active +func (n *CodeRequestHandler) OnCodeRequest(_ context.Context, nodeID ids.NodeID, requestID uint32, codeRequest message.CodeRequest) ([]byte, error) { + startTime := time.Now() + n.stats.IncCodeRequest() + + // always report code read time metric + defer func() { + n.stats.UpdateCodeReadTime(time.Since(startTime)) + }() + + if len(codeRequest.Hashes) > message.MaxCodeHashesPerRequest { + n.stats.IncTooManyHashesRequested() + log.Debug("too many hashes requested, dropping request", "nodeID", nodeID, "requestID", requestID, "numHashes", len(codeRequest.Hashes)) + return nil, nil + } + if !isUnique(codeRequest.Hashes) { + n.stats.IncDuplicateHashesRequested() + log.Debug("duplicate code hashes requested, dropping request", "nodeID", nodeID, "requestID", requestID) + return nil, nil + } + + codeBytes := make([][]byte, len(codeRequest.Hashes)) + totalBytes := 0 + for i, hash := range codeRequest.Hashes { + codeBytes[i] = rawdb.ReadCode(n.codeReader, hash) + if len(codeBytes[i]) == 0 { + n.stats.IncMissingCodeHash() + log.Debug("requested code not found, dropping request", "nodeID", nodeID, "requestID", requestID, "hash", hash) + return nil, nil + } + totalBytes += len(codeBytes[i]) + } + + codeResponse := message.CodeResponse{Data: codeBytes} + responseBytes, err := n.codec.Marshal(message.Version, codeResponse) + if err != nil { + log.Error("could not marshal CodeResponse, dropping request", "nodeID", nodeID, "requestID", requestID, "request", codeRequest, "err", err) + return nil, nil + } + n.stats.UpdateCodeBytesReturned(uint32(totalBytes)) + return responseBytes, nil +} + +func isUnique(hashes []common.Hash) bool { + seen := make(map[common.Hash]struct{}) + for _, hash := range hashes { + if _, found := seen[hash]; found { + return false + } + seen[hash] = struct{}{} + } + return true +} diff --git a/vms/evm/sync/handlers/code_request_test.go b/vms/evm/sync/handlers/code_request_test.go new file mode 100644 index 000000000000..b3d343f15bb8 --- /dev/null +++ b/vms/evm/sync/handlers/code_request_test.go @@ -0,0 +1,112 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package handlers + +import ( + "context" + "crypto/rand" + "testing" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/crypto" + "github.com/ava-labs/libevm/ethdb/memorydb" + ethparams "github.com/ava-labs/libevm/params" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/evm/sync/message" + "github.com/ava-labs/avalanchego/vms/evm/sync/statesynctest" +) + +func TestCodeRequestHandler(t *testing.T) { + database := memorydb.New() + + codeBytes := []byte("some code goes here") + codeHash := crypto.Keccak256Hash(codeBytes) + rawdb.WriteCode(database, codeHash, codeBytes) + + maxSizeCodeBytes := make([]byte, ethparams.MaxCodeSize) + n, err := rand.Read(maxSizeCodeBytes) + require.NoError(t, err) + require.Equal(t, ethparams.MaxCodeSize, n) + maxSizeCodeHash := crypto.Keccak256Hash(maxSizeCodeBytes) + rawdb.WriteCode(database, maxSizeCodeHash, maxSizeCodeBytes) + + testHandlerStats := &statesynctest.TestHandlerStats{} + codeRequestHandler := NewCodeRequestHandler(database, message.Codec, testHandlerStats) + + tests := map[string]struct { + setup func() (request message.CodeRequest, expectedCodeResponse [][]byte) + verifyStats func(t *testing.T) + }{ + "normal": { + setup: func() (request message.CodeRequest, expectedCodeResponse [][]byte) { + return message.CodeRequest{ + Hashes: []common.Hash{codeHash}, + }, [][]byte{codeBytes} + }, + verifyStats: func(t *testing.T) { + require.Equal(t, uint32(1), testHandlerStats.CodeRequestCount) + require.Equal(t, uint32(len(codeBytes)), testHandlerStats.CodeBytesReturnedSum) + }, + }, + "duplicate hashes": { + setup: func() (request message.CodeRequest, expectedCodeResponse [][]byte) { + return message.CodeRequest{ + Hashes: []common.Hash{codeHash, codeHash}, + }, nil + }, + verifyStats: func(t *testing.T) { + require.Equal(t, uint32(1), testHandlerStats.DuplicateHashesRequested) + }, + }, + "too many hashes": { + setup: func() (request message.CodeRequest, expectedCodeResponse [][]byte) { + return message.CodeRequest{ + Hashes: []common.Hash{{1}, {2}, {3}, {4}, {5}, {6}}, + }, nil + }, + verifyStats: func(t *testing.T) { + require.Equal(t, uint32(1), testHandlerStats.TooManyHashesRequested) + }, + }, + "max size code handled": { + setup: func() (request message.CodeRequest, expectedCodeResponse [][]byte) { + return message.CodeRequest{ + Hashes: []common.Hash{maxSizeCodeHash}, + }, [][]byte{maxSizeCodeBytes} + }, + verifyStats: func(t *testing.T) { + require.Equal(t, uint32(1), testHandlerStats.CodeRequestCount) + require.Equal(t, uint32(ethparams.MaxCodeSize), testHandlerStats.CodeBytesReturnedSum) + }, + }, + } + + for name, test := range tests { + // Reset stats before each test + testHandlerStats.Reset() + + t.Run(name, func(t *testing.T) { + request, expectedResponse := test.setup() + responseBytes, err := codeRequestHandler.OnCodeRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) + require.NoError(t, err) + + // If the expected response is empty, require that the handler returns an empty response and return early. + if len(expectedResponse) == 0 { + require.Empty(t, responseBytes, "expected response to be empty") + return + } + var response message.CodeResponse + _, err = message.Codec.Unmarshal(responseBytes, &response) + require.NoError(t, err) + require.Len(t, response.Data, len(expectedResponse)) + for i, code := range expectedResponse { + require.Equal(t, code, response.Data[i], "code bytes mismatch at index %d", i) + } + test.verifyStats(t) + }) + } +} diff --git a/vms/evm/sync/handlers/handler.go b/vms/evm/sync/handlers/handler.go new file mode 100644 index 000000000000..967bcb8e3624 --- /dev/null +++ b/vms/evm/sync/handlers/handler.go @@ -0,0 +1,23 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package handlers + +import ( + "github.com/ava-labs/coreth/core/state/snapshot" + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/types" +) + +type BlockProvider interface { + GetBlock(common.Hash, uint64) *types.Block +} + +type SnapshotProvider interface { + Snapshots() *snapshot.Tree +} + +type SyncDataProvider interface { + BlockProvider + SnapshotProvider +} diff --git a/vms/evm/sync/handlers/leafs_request.go b/vms/evm/sync/handlers/leafs_request.go new file mode 100644 index 000000000000..f9c4fef7527e --- /dev/null +++ b/vms/evm/sync/handlers/leafs_request.go @@ -0,0 +1,474 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package handlers + +import ( + "bytes" + "context" + "time" + + "github.com/ava-labs/coreth/core/state/snapshot" + "github.com/ava-labs/coreth/sync/syncutils" + "github.com/ava-labs/coreth/utils" + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/types" + "github.com/ava-labs/libevm/ethdb" + "github.com/ava-labs/libevm/ethdb/memorydb" + "github.com/ava-labs/libevm/log" + "github.com/ava-labs/libevm/trie" + "github.com/ava-labs/libevm/triedb" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/evm/sync/message" + "github.com/ava-labs/avalanchego/vms/evm/sync/stats" +) + +var _ LeafRequestHandler = (*leafsRequestHandler)(nil) + +const ( + // Maximum number of leaves to return in a message.LeafsResponse + // This parameter overrides any other Limit specified + // in message.LeafsRequest if it is greater than this value + maxLeavesLimit = uint16(1024) + + // Maximum percent of the time left to deadline to spend on optimistically + // reading the snapshot to find the response + maxSnapshotReadTimePercent = 75 + + segmentLen = 64 // divide data from snapshot to segments of this size +) + +type LeafRequestHandler interface { + OnLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest message.LeafsRequest) ([]byte, error) +} + +// leafsRequestHandler is a peer.RequestHandler for types.LeafsRequest +// serving requested trie data +type leafsRequestHandler struct { + trieDB *triedb.Database + snapshotProvider SnapshotProvider + codec codec.Manager + stats stats.LeafsRequestHandlerStats + trieKeyLength int +} + +func NewLeafsRequestHandler(trieDB *triedb.Database, trieKeyLength int, snapshotProvider SnapshotProvider, codec codec.Manager, syncerStats stats.LeafsRequestHandlerStats) *leafsRequestHandler { + return &leafsRequestHandler{ + trieDB: trieDB, + snapshotProvider: snapshotProvider, + codec: codec, + stats: syncerStats, + trieKeyLength: trieKeyLength, + } +} + +// OnLeafsRequest returns encoded message.LeafsResponse for a given message.LeafsRequest +// Returns leaves with proofs for specified (Start-End) (both inclusive) ranges +// Returned message.LeafsResponse may contain partial leaves within requested Start and End range if: +// - ctx expired while fetching leafs +// - number of leaves read is greater than Limit (message.LeafsRequest) +// Specified Limit in message.LeafsRequest is overridden to maxLeavesLimit if it is greater than maxLeavesLimit +// Expects returned errors to be treated as FATAL +// Never returns errors +// Returns nothing if NodeType is invalid or requested trie root is not found +// Assumes ctx is active +func (lrh *leafsRequestHandler) OnLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest message.LeafsRequest) ([]byte, error) { + startTime := time.Now() + lrh.stats.IncLeafsRequest() + + if (len(leafsRequest.End) > 0 && bytes.Compare(leafsRequest.Start, leafsRequest.End) > 0) || + leafsRequest.Root == (common.Hash{}) || + leafsRequest.Root == types.EmptyRootHash || + leafsRequest.Limit == 0 { + log.Debug("invalid leafs request, dropping request", "nodeID", nodeID, "requestID", requestID, "request", leafsRequest) + lrh.stats.IncInvalidLeafsRequest() + return nil, nil + } + if (len(leafsRequest.Start) != 0 && len(leafsRequest.Start) != lrh.trieKeyLength) || + (len(leafsRequest.End) != 0 && len(leafsRequest.End) != lrh.trieKeyLength) { + log.Debug("invalid length for leafs request range, dropping request", "startLen", len(leafsRequest.Start), "endLen", len(leafsRequest.End), "expected", lrh.trieKeyLength) + lrh.stats.IncInvalidLeafsRequest() + return nil, nil + } + + // TODO: We should know the state root that accounts correspond to, + // as this information will be necessary to access storage tries when + // the trie is path based. + // stateRoot := common.Hash{} + t, err := trie.New(trie.TrieID(leafsRequest.Root), lrh.trieDB) + if err != nil { + log.Debug("error opening trie when processing request, dropping request", "nodeID", nodeID, "requestID", requestID, "root", leafsRequest.Root, "err", err) + lrh.stats.IncMissingRoot() + return nil, nil + } + // override limit if it is greater than the configured maxLeavesLimit + limit := leafsRequest.Limit + if limit > maxLeavesLimit { + limit = maxLeavesLimit + } + + var leafsResponse message.LeafsResponse + leafsResponse.Keys = make([][]byte, 0, limit) + leafsResponse.Vals = make([][]byte, 0, limit) + + responseBuilder := &responseBuilder{ + request: &leafsRequest, + response: &leafsResponse, + t: t, + keyLength: lrh.trieKeyLength, + limit: limit, + stats: lrh.stats, + } + // pass snapshot to responseBuilder if non-nil snapshot getter provided + if lrh.snapshotProvider != nil { + responseBuilder.snap = lrh.snapshotProvider.Snapshots() + } + err = responseBuilder.handleRequest(ctx) + + // ensure metrics are captured properly on all return paths + defer func() { + lrh.stats.UpdateLeafsRequestProcessingTime(time.Since(startTime)) + lrh.stats.UpdateLeafsReturned(uint16(len(leafsResponse.Keys))) + lrh.stats.UpdateRangeProofValsReturned(int64(len(leafsResponse.ProofVals))) + lrh.stats.UpdateGenerateRangeProofTime(responseBuilder.proofTime) + lrh.stats.UpdateReadLeafsTime(responseBuilder.trieReadTime) + }() + if err != nil { + log.Debug("failed to serve leafs request", "nodeID", nodeID, "requestID", requestID, "request", leafsRequest, "err", err) + return nil, nil + } + if len(leafsResponse.Keys) == 0 && ctx.Err() != nil { + log.Debug("context err set before any leafs were iterated", "nodeID", nodeID, "requestID", requestID, "request", leafsRequest, "ctxErr", ctx.Err()) + return nil, nil + } + + responseBytes, err := lrh.codec.Marshal(message.Version, leafsResponse) + if err != nil { + log.Debug("failed to marshal LeafsResponse, dropping request", "nodeID", nodeID, "requestID", requestID, "request", leafsRequest, "err", err) + return nil, nil + } + + log.Debug("handled leafsRequest", "time", time.Since(startTime), "leafs", len(leafsResponse.Keys), "proofLen", len(leafsResponse.ProofVals)) + return responseBytes, nil +} + +type responseBuilder struct { + request *message.LeafsRequest + response *message.LeafsResponse + t *trie.Trie + snap *snapshot.Tree + keyLength int + limit uint16 + + // stats + trieReadTime time.Duration + proofTime time.Duration + stats stats.LeafsRequestHandlerStats +} + +func (rb *responseBuilder) handleRequest(ctx context.Context) error { + // Read from snapshot if a [snapshot.Tree] was provided in initialization + if rb.snap != nil { + if done, err := rb.fillFromSnapshot(ctx); err != nil { + return err + } else if done { + return nil + } + // reset the proof if we will iterate the trie further + rb.response.ProofVals = nil + } + + if len(rb.response.Keys) < int(rb.limit) { + // more indicates whether there are more leaves in the trie + more, err := rb.fillFromTrie(ctx, rb.request.End) + if err != nil { + rb.stats.IncTrieError() + return err + } + if len(rb.request.Start) == 0 && !more { + // omit proof via early return + return nil + } + } + + // Generate the proof and add it to the response. + proof, err := rb.generateRangeProof(rb.request.Start, rb.response.Keys) + if err != nil { + rb.stats.IncProofError() + return err + } + defer proof.Close() // closing memdb does not error + + rb.response.ProofVals, err = iterateVals(proof) + if err != nil { + rb.stats.IncProofError() + return err + } + return nil +} + +// fillFromSnapshot reads data from snapshot and returns true if the response is complete. +// Otherwise, the caller should attempt to iterate the trie and determine if a range proof +// should be added to the response. +func (rb *responseBuilder) fillFromSnapshot(ctx context.Context) (bool, error) { + snapshotReadStart := time.Now() + rb.stats.IncSnapshotReadAttempt() + + // Optimistically read leafs from the snapshot, assuming they have not been + // modified since the requested root. If this assumption can be verified with + // range proofs and data from the trie, we can skip iterating the trie as + // an optimization. + // Since we are performing this read optimistically, we use a separate context + // with reduced timeout so there is enough time to read the trie if the snapshot + // read does not contain up-to-date data. + snapCtx := ctx + if deadline, ok := ctx.Deadline(); ok { + timeTillDeadline := time.Until(deadline) + bufferedDeadline := time.Now().Add(timeTillDeadline * maxSnapshotReadTimePercent / 100) + + var cancel context.CancelFunc + snapCtx, cancel = context.WithDeadline(ctx, bufferedDeadline) + defer cancel() + } + snapKeys, snapVals, err := rb.readLeafsFromSnapshot(snapCtx) + // Update read snapshot time here, so that we include the case that an error occurred. + rb.stats.UpdateSnapshotReadTime(time.Since(snapshotReadStart)) + if err != nil { + rb.stats.IncSnapshotReadError() + return false, err + } + + // Check if the entire range read from the snapshot is valid according to the trie. + proof, ok, more, err := rb.isRangeValid(snapKeys, snapVals, false) + if err != nil { + rb.stats.IncProofError() + return false, err + } + defer proof.Close() // closing memdb does not error + if ok { + rb.response.Keys, rb.response.Vals = snapKeys, snapVals + if len(rb.request.Start) == 0 && !more { + // omit proof via early return + rb.stats.IncSnapshotReadSuccess() + return true, nil + } + rb.response.ProofVals, err = iterateVals(proof) + if err != nil { + rb.stats.IncProofError() + return false, err + } + rb.stats.IncSnapshotReadSuccess() + return !more, nil + } + // The data from the snapshot could not be validated as a whole. It is still likely + // most of the data from the snapshot is useable, so we try to validate smaller + // segments of the data and use them in the response. + hasGap := false + for i := 0; i < len(snapKeys); i += segmentLen { + segmentEnd := min(i+segmentLen, len(snapKeys)) + proof, ok, _, err := rb.isRangeValid(snapKeys[i:segmentEnd], snapVals[i:segmentEnd], hasGap) + if err != nil { + rb.stats.IncProofError() + return false, err + } + _ = proof.Close() // we don't need this proof + if !ok { + // segment is not valid + rb.stats.IncSnapshotSegmentInvalid() + hasGap = true + continue + } + + // segment is valid + rb.stats.IncSnapshotSegmentValid() + if hasGap { + // if there is a gap between valid segments, fill the gap with data from the trie + _, err := rb.fillFromTrie(ctx, snapKeys[i]) + if err != nil { + rb.stats.IncTrieError() + return false, err + } + if len(rb.response.Keys) >= int(rb.limit) || ctx.Err() != nil { + break + } + // remove the last key added since it is snapKeys[i] and will be added back + // Note: this is safe because we were able to verify the range proof that + // shows snapKeys[i] is part of the trie. + rb.response.Keys = rb.response.Keys[:len(rb.response.Keys)-1] + rb.response.Vals = rb.response.Vals[:len(rb.response.Vals)-1] + } + hasGap = false + // all the key/vals in the segment are valid, but possibly shorten segmentEnd + // here to respect limit. this is necessary in case the number of leafs we read + // from the trie is more than the length of a segment which cannot be validated. limit + segmentEnd = min(segmentEnd, i+int(rb.limit)-len(rb.response.Keys)) + rb.response.Keys = append(rb.response.Keys, snapKeys[i:segmentEnd]...) + rb.response.Vals = append(rb.response.Vals, snapVals[i:segmentEnd]...) + + if len(rb.response.Keys) >= int(rb.limit) { + break + } + } + return false, nil +} + +// generateRangeProof returns a range proof for the range specified by [start] and [keys] using [t]. +func (rb *responseBuilder) generateRangeProof(start []byte, keys [][]byte) (*memorydb.Database, error) { + proof := memorydb.New() + startTime := time.Now() + defer func() { rb.proofTime += time.Since(startTime) }() + + // If [start] is empty, populate it with the appropriate length key starting at 0. + if len(start) == 0 { + start = bytes.Repeat([]byte{0x00}, rb.keyLength) + } + + if err := rb.t.Prove(start, proof); err != nil { + _ = proof.Close() // closing memdb does not error + return nil, err + } + if len(keys) > 0 { + // If there is a non-zero number of keys, set [end] for the range proof to the last key. + end := keys[len(keys)-1] + if err := rb.t.Prove(end, proof); err != nil { + _ = proof.Close() // closing memdb does not error + return nil, err + } + } + return proof, nil +} + +// verifyRangeProof verifies the provided range proof with [keys/vals], starting at [start]. +// Returns a boolean indicating if there are more leaves to the right of the last key in the trie and a nil error if the range proof is successfully verified. +func (rb *responseBuilder) verifyRangeProof(keys, vals [][]byte, start []byte, proof *memorydb.Database) (bool, error) { + startTime := time.Now() + defer func() { rb.proofTime += time.Since(startTime) }() + + // If [start] is empty, populate it with the appropriate length key starting at 0. + if len(start) == 0 { + start = bytes.Repeat([]byte{0x00}, rb.keyLength) + } + return trie.VerifyRangeProof(rb.request.Root, start, keys, vals, proof) +} + +// iterateVals returns the values contained in [db] +func iterateVals(db *memorydb.Database) ([][]byte, error) { + if db == nil { + return nil, nil + } + // iterate db into [][]byte and return + it := db.NewIterator(nil, nil) + defer it.Release() + + vals := make([][]byte, 0, db.Len()) + for it.Next() { + vals = append(vals, it.Value()) + } + + return vals, it.Error() +} + +// isRangeValid generates and verifies a range proof, returning true if keys/vals are +// part of the trie. If [hasGap] is true, the range is validated independent of the +// existing response. If [hasGap] is false, the range proof begins at a key which +// guarantees the range can be appended to the response. +// Additionally returns a boolean indicating if there are more leaves in the trie. +func (rb *responseBuilder) isRangeValid(keys, vals [][]byte, hasGap bool) (*memorydb.Database, bool, bool, error) { + var startKey []byte + if hasGap { + startKey = keys[0] + } else { + startKey = rb.nextKey() + } + + proof, err := rb.generateRangeProof(startKey, keys) + if err != nil { + return nil, false, false, err + } + more, proofErr := rb.verifyRangeProof(keys, vals, startKey, proof) + return proof, proofErr == nil, more, nil +} + +// nextKey returns the nextKey that could potentially be part of the response. +func (rb *responseBuilder) nextKey() []byte { + if len(rb.response.Keys) == 0 { + return rb.request.Start + } + nextKey := common.CopyBytes(rb.response.Keys[len(rb.response.Keys)-1]) + utils.IncrOne(nextKey) + return nextKey +} + +// fillFromTrie iterates key/values from the response builder's trie and appends +// them to the response. Iteration begins from the last key already in the response, +// or the request start if the response is empty. Iteration ends at [end] or if +// the number of leafs reaches the builder's limit. +// Returns true if there are more keys in the trie. +func (rb *responseBuilder) fillFromTrie(ctx context.Context, end []byte) (bool, error) { + startTime := time.Now() + defer func() { rb.trieReadTime += time.Since(startTime) }() + + // create iterator to iterate the trie + nodeIt, err := rb.t.NodeIterator(rb.nextKey()) + if err != nil { + return false, err + } + it := trie.NewIterator(nodeIt) + more := false + for it.Next() { + // if we're at the end, break this loop + if len(end) > 0 && bytes.Compare(it.Key, end) > 0 { + more = true + break + } + + // If we've returned enough data or run out of time, set the more flag and exit + // this flag will determine if the proof is generated or not + if len(rb.response.Keys) >= int(rb.limit) || ctx.Err() != nil { + more = true + break + } + + // append key/vals to the response + rb.response.Keys = append(rb.response.Keys, it.Key) + rb.response.Vals = append(rb.response.Vals, it.Value) + } + return more, it.Err +} + +// readLeafsFromSnapshot iterates the storage snapshot of the requested account +// (or the main account trie if account is empty). Returns up to [rb.limit] key/value +// pairs for keys that are in the request's range (inclusive). +func (rb *responseBuilder) readLeafsFromSnapshot(ctx context.Context) ([][]byte, [][]byte, error) { + var ( + snapIt ethdb.Iterator + startHash = common.BytesToHash(rb.request.Start) + keys = make([][]byte, 0, rb.limit) + vals = make([][]byte, 0, rb.limit) + ) + + // Get an iterator into the storage or the main account snapshot. + if rb.request.Account == (common.Hash{}) { + snapIt = &syncutils.AccountIterator{AccountIterator: rb.snap.DiskAccountIterator(startHash)} + } else { + snapIt = &syncutils.StorageIterator{StorageIterator: rb.snap.DiskStorageIterator(rb.request.Account, startHash)} + } + defer snapIt.Release() + for snapIt.Next() { + // if we're at the end, break this loop + if len(rb.request.End) > 0 && bytes.Compare(snapIt.Key(), rb.request.End) > 0 { + break + } + // If we've returned enough data or run out of time, set the more flag and exit + // this flag will determine if the proof is generated or not + if len(keys) >= int(rb.limit) || ctx.Err() != nil { + break + } + + keys = append(keys, snapIt.Key()) + vals = append(vals, snapIt.Value()) + } + return keys, vals, snapIt.Error() +} diff --git a/vms/evm/sync/handlers/leafs_request_test.go b/vms/evm/sync/handlers/leafs_request_test.go new file mode 100644 index 000000000000..f399f3474682 --- /dev/null +++ b/vms/evm/sync/handlers/leafs_request_test.go @@ -0,0 +1,708 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package handlers + +import ( + "bytes" + "context" + "crypto/rand" + "testing" + + "github.com/ava-labs/coreth/core/state/snapshot" + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/core/types" + "github.com/ava-labs/libevm/crypto" + "github.com/ava-labs/libevm/ethdb" + "github.com/ava-labs/libevm/trie" + "github.com/ava-labs/libevm/triedb" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/evm/sync/message" + "github.com/ava-labs/avalanchego/vms/evm/sync/statesynctest" +) + +func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { + r := rand.Reader + testHandlerStats := &statesynctest.TestHandlerStats{} + memdb := rawdb.NewMemoryDatabase() + trieDB := triedb.NewDatabase(memdb, nil) + + corruptedTrieRoot, _, _ := statesynctest.GenerateTrie(t, r, trieDB, 100, common.HashLength) + tr, err := trie.New(trie.TrieID(corruptedTrieRoot), trieDB) + require.NoError(t, err) + // Corrupt [corruptedTrieRoot] + statesynctest.CorruptTrie(t, memdb, tr, 5) + + largeTrieRoot, largeTrieKeys, _ := statesynctest.GenerateTrie(t, r, trieDB, 10_000, common.HashLength) + smallTrieRoot, _, _ := statesynctest.GenerateTrie(t, r, trieDB, 500, common.HashLength) + accountTrieRoot, accounts := statesynctest.FillAccounts( + t, + trieDB, + common.Hash{}, + 10_000, + func(_ *testing.T, i int, acc types.StateAccount) types.StateAccount { + // set the storage trie root for two accounts + switch i { + case 0: + acc.Root = largeTrieRoot + case 1: + acc.Root = smallTrieRoot + } + + return acc + }) + + // find the hash of the account we set to have a storage + var ( + largeStorageAccount common.Hash + smallStorageAccount common.Hash + ) + for key, account := range accounts { + if account.Root == largeTrieRoot { + largeStorageAccount = crypto.Keccak256Hash(key.Address[:]) + } + if account.Root == smallTrieRoot { + smallStorageAccount = crypto.Keccak256Hash(key.Address[:]) + } + if (largeStorageAccount != common.Hash{}) && (smallStorageAccount != common.Hash{}) { + // we can break if we found both accounts of interest to the test + break + } + } + snapshotProvider := &TestSnapshotProvider{} + leafsHandler := NewLeafsRequestHandler(trieDB, message.StateTrieKeyLength, snapshotProvider, message.Codec, testHandlerStats) + snapConfig := snapshot.Config{ + CacheSize: 64, + AsyncBuild: false, + NoBuild: false, + SkipVerify: true, + } + + tests := map[string]struct { + prepareTestFn func() (context.Context, message.LeafsRequest) + requireResponseFn func(*testing.T, message.LeafsRequest, []byte, error) + }{ + "zero limit dropped": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + return context.Background(), message.LeafsRequest{ + Root: largeTrieRoot, + Start: bytes.Repeat([]byte{0x00}, common.HashLength), + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: 0, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, _ message.LeafsRequest, response []byte, err error) { + require.Nil(t, response) + require.NoError(t, err) + require.Equal(t, uint32(1), testHandlerStats.InvalidLeafsRequestCount) + }, + }, + "empty root dropped": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + return context.Background(), message.LeafsRequest{ + Root: common.Hash{}, + Start: bytes.Repeat([]byte{0x00}, common.HashLength), + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: maxLeavesLimit, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, _ message.LeafsRequest, response []byte, err error) { + require.Nil(t, response) + require.NoError(t, err) + require.Equal(t, uint32(1), testHandlerStats.InvalidLeafsRequestCount) + }, + }, + "bad start len dropped": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + return context.Background(), message.LeafsRequest{ + Root: common.Hash{}, + Start: bytes.Repeat([]byte{0x00}, common.HashLength+2), + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: maxLeavesLimit, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, _ message.LeafsRequest, response []byte, err error) { + require.Nil(t, response) + require.NoError(t, err) + require.Equal(t, uint32(1), testHandlerStats.InvalidLeafsRequestCount) + }, + }, + "bad end len dropped": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + return context.Background(), message.LeafsRequest{ + Root: common.Hash{}, + Start: bytes.Repeat([]byte{0x00}, common.HashLength), + End: bytes.Repeat([]byte{0xff}, common.HashLength-1), + Limit: maxLeavesLimit, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, _ message.LeafsRequest, response []byte, err error) { + require.Nil(t, response) + require.NoError(t, err) + require.Equal(t, uint32(1), testHandlerStats.InvalidLeafsRequestCount) + }, + }, + "empty storage root dropped": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + return context.Background(), message.LeafsRequest{ + Root: types.EmptyRootHash, + Start: bytes.Repeat([]byte{0x00}, common.HashLength), + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: maxLeavesLimit, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, _ message.LeafsRequest, response []byte, err error) { + require.Nil(t, response) + require.NoError(t, err) + require.Equal(t, uint32(1), testHandlerStats.InvalidLeafsRequestCount) + }, + }, + "missing root dropped": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + return context.Background(), message.LeafsRequest{ + Root: common.BytesToHash([]byte("something is missing here...")), + Start: bytes.Repeat([]byte{0x00}, common.HashLength), + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: maxLeavesLimit, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, _ message.LeafsRequest, response []byte, err error) { + require.Nil(t, response) + require.NoError(t, err) + require.Equal(t, uint32(1), testHandlerStats.MissingRootCount) + }, + }, + "corrupted trie drops request": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + return context.Background(), message.LeafsRequest{ + Root: corruptedTrieRoot, + Start: bytes.Repeat([]byte{0x00}, common.HashLength), + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: maxLeavesLimit, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, _ message.LeafsRequest, response []byte, err error) { + require.Nil(t, response) + require.NoError(t, err) + require.Equal(t, uint32(1), testHandlerStats.TrieErrorCount) + }, + }, + "cancelled context dropped": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + return ctx, message.LeafsRequest{ + Root: largeTrieRoot, + Start: bytes.Repeat([]byte{0x00}, common.HashLength), + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: maxLeavesLimit, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, _ message.LeafsRequest, response []byte, err error) { + require.Nil(t, response) + require.NoError(t, err) + }, + }, + "nil start and end range returns entire trie": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + return context.Background(), message.LeafsRequest{ + Root: smallTrieRoot, + Start: nil, + End: nil, + Limit: maxLeavesLimit, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, _ message.LeafsRequest, response []byte, err error) { + require.NoError(t, err) + var leafsResponse message.LeafsResponse + _, err = message.Codec.Unmarshal(response, &leafsResponse) + require.NoError(t, err) + require.Len(t, leafsResponse.Keys, 500) + require.Len(t, leafsResponse.Vals, 500) + require.Empty(t, leafsResponse.ProofVals) + }, + }, + "nil end range treated like greatest possible value": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + return context.Background(), message.LeafsRequest{ + Root: smallTrieRoot, + Start: bytes.Repeat([]byte{0x00}, common.HashLength), + End: nil, + Limit: maxLeavesLimit, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, _ message.LeafsRequest, response []byte, err error) { + require.NoError(t, err) + var leafsResponse message.LeafsResponse + _, err = message.Codec.Unmarshal(response, &leafsResponse) + require.NoError(t, err) + require.Len(t, leafsResponse.Keys, 500) + require.Len(t, leafsResponse.Vals, 500) + }, + }, + "end greater than start dropped": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + return ctx, message.LeafsRequest{ + Root: largeTrieRoot, + Start: bytes.Repeat([]byte{0xbb}, common.HashLength), + End: bytes.Repeat([]byte{0xaa}, common.HashLength), + Limit: maxLeavesLimit, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, _ message.LeafsRequest, response []byte, err error) { + require.Nil(t, response) + require.NoError(t, err) + require.Equal(t, uint32(1), testHandlerStats.InvalidLeafsRequestCount) + }, + }, + "invalid node type dropped": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + return ctx, message.LeafsRequest{ + Root: largeTrieRoot, + Start: bytes.Repeat([]byte{0xbb}, common.HashLength), + End: bytes.Repeat([]byte{0xaa}, common.HashLength), + Limit: maxLeavesLimit, + NodeType: message.NodeType(11), + } + }, + requireResponseFn: func(t *testing.T, _ message.LeafsRequest, response []byte, err error) { + require.Nil(t, response) + require.NoError(t, err) + }, + }, + "max leaves overridden": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + return context.Background(), message.LeafsRequest{ + Root: largeTrieRoot, + Start: bytes.Repeat([]byte{0x00}, common.HashLength), + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: maxLeavesLimit * 10, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, _ message.LeafsRequest, response []byte, err error) { + require.NoError(t, err) + var leafsResponse message.LeafsResponse + _, err = message.Codec.Unmarshal(response, &leafsResponse) + require.NoError(t, err) + require.Len(t, leafsResponse.Keys, int(maxLeavesLimit)) + require.Len(t, leafsResponse.Vals, int(maxLeavesLimit)) + require.Equal(t, uint32(1), testHandlerStats.LeafsRequestCount) + require.Equal(t, uint32(len(leafsResponse.Keys)), testHandlerStats.LeafsReturnedSum) + }, + }, + "full range with nil start": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + return context.Background(), message.LeafsRequest{ + Root: largeTrieRoot, + Start: nil, + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: maxLeavesLimit, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { + require.NoError(t, err) + var leafsResponse message.LeafsResponse + _, err = message.Codec.Unmarshal(response, &leafsResponse) + require.NoError(t, err) + require.Len(t, leafsResponse.Keys, int(maxLeavesLimit)) + require.Len(t, leafsResponse.Vals, int(maxLeavesLimit)) + require.Equal(t, uint32(1), testHandlerStats.LeafsRequestCount) + require.Equal(t, uint32(len(leafsResponse.Keys)), testHandlerStats.LeafsReturnedSum) + requireRangeProofIsValid(t, &request, &leafsResponse, true) + }, + }, + "full range with 0x00 start": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + return context.Background(), message.LeafsRequest{ + Root: largeTrieRoot, + Start: bytes.Repeat([]byte{0x00}, common.HashLength), + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: maxLeavesLimit, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { + require.NoError(t, err) + var leafsResponse message.LeafsResponse + _, err = message.Codec.Unmarshal(response, &leafsResponse) + require.NoError(t, err) + require.Len(t, leafsResponse.Keys, int(maxLeavesLimit)) + require.Len(t, leafsResponse.Vals, int(maxLeavesLimit)) + require.Equal(t, uint32(1), testHandlerStats.LeafsRequestCount) + require.Equal(t, uint32(len(leafsResponse.Keys)), testHandlerStats.LeafsReturnedSum) + requireRangeProofIsValid(t, &request, &leafsResponse, true) + }, + }, + "partial mid range": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + startKey := largeTrieKeys[1_000] + startKey[31]++ // exclude start key from response + endKey := largeTrieKeys[1_040] // include end key in response + return context.Background(), message.LeafsRequest{ + Root: largeTrieRoot, + Start: startKey, + End: endKey, + Limit: maxLeavesLimit, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { + require.NoError(t, err) + var leafsResponse message.LeafsResponse + _, err = message.Codec.Unmarshal(response, &leafsResponse) + require.NoError(t, err) + require.Len(t, leafsResponse.Keys, 40) + require.Len(t, leafsResponse.Vals, 40) + require.Equal(t, uint32(1), testHandlerStats.LeafsRequestCount) + require.Equal(t, uint32(len(leafsResponse.Keys)), testHandlerStats.LeafsReturnedSum) + requireRangeProofIsValid(t, &request, &leafsResponse, true) + }, + }, + "partial end range": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + return context.Background(), message.LeafsRequest{ + Root: largeTrieRoot, + Start: largeTrieKeys[9_400], + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: maxLeavesLimit, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { + require.NoError(t, err) + var leafsResponse message.LeafsResponse + _, err = message.Codec.Unmarshal(response, &leafsResponse) + require.NoError(t, err) + require.Len(t, leafsResponse.Keys, 600) + require.Len(t, leafsResponse.Vals, 600) + require.Equal(t, uint32(1), testHandlerStats.LeafsRequestCount) + require.Equal(t, uint32(len(leafsResponse.Keys)), testHandlerStats.LeafsReturnedSum) + requireRangeProofIsValid(t, &request, &leafsResponse, false) + }, + }, + "final end range": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + return context.Background(), message.LeafsRequest{ + Root: largeTrieRoot, + Start: bytes.Repeat([]byte{0xff}, common.HashLength), + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: maxLeavesLimit, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { + require.NoError(t, err) + var leafsResponse message.LeafsResponse + _, err = message.Codec.Unmarshal(response, &leafsResponse) + require.NoError(t, err) + require.Empty(t, leafsResponse.Keys) + require.Empty(t, leafsResponse.Vals) + require.Equal(t, uint32(1), testHandlerStats.LeafsRequestCount) + require.Equal(t, uint32(len(leafsResponse.Keys)), testHandlerStats.LeafsReturnedSum) + requireRangeProofIsValid(t, &request, &leafsResponse, false) + }, + }, + "small trie root": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + return context.Background(), message.LeafsRequest{ + Root: smallTrieRoot, + Start: nil, + End: bytes.Repeat([]byte{0xff}, common.HashLength), + Limit: maxLeavesLimit, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { + require.NotEmpty(t, response) + require.NoError(t, err) + + var leafsResponse message.LeafsResponse + _, err = message.Codec.Unmarshal(response, &leafsResponse) + require.NoError(t, err) + + require.Len(t, leafsResponse.Keys, 500) + require.Len(t, leafsResponse.Vals, 500) + require.Empty(t, leafsResponse.ProofVals) + require.Equal(t, uint32(1), testHandlerStats.LeafsRequestCount) + require.Equal(t, uint32(len(leafsResponse.Keys)), testHandlerStats.LeafsReturnedSum) + requireRangeProofIsValid(t, &request, &leafsResponse, false) + }, + }, + "account data served from snapshot": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) + require.NoError(t, err) + snapshotProvider.Snapshot = snap + return context.Background(), message.LeafsRequest{ + Root: accountTrieRoot, + Limit: maxLeavesLimit, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { + require.NoError(t, err) + var leafsResponse message.LeafsResponse + _, err = message.Codec.Unmarshal(response, &leafsResponse) + require.NoError(t, err) + require.Len(t, leafsResponse.Keys, int(maxLeavesLimit)) + require.Len(t, leafsResponse.Vals, int(maxLeavesLimit)) + require.Equal(t, uint32(1), testHandlerStats.LeafsRequestCount) + require.Equal(t, uint32(len(leafsResponse.Keys)), testHandlerStats.LeafsReturnedSum) + require.Equal(t, uint32(1), testHandlerStats.SnapshotReadAttemptCount) + require.Equal(t, uint32(1), testHandlerStats.SnapshotReadSuccessCount) + requireRangeProofIsValid(t, &request, &leafsResponse, true) + }, + }, + "partial account data served from snapshot": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) + require.NoError(t, err) + snapshotProvider.Snapshot = snap + it := snap.DiskAccountIterator(common.Hash{}) + defer it.Release() + i := 0 + for it.Next() { + if i > int(maxLeavesLimit) { + // no need to modify beyond the request limit + break + } + // modify one entry of 1 in 4 segments + if i%(segmentLen*4) == 0 { + acc, err := types.FullAccount(it.Account()) + require.NoError(t, err) + acc.Nonce++ + slimAccRLP := types.SlimAccountRLP(*acc) + rawdb.WriteAccountSnapshot(memdb, it.Hash(), slimAccRLP) + } + i++ + } + + return context.Background(), message.LeafsRequest{ + Root: accountTrieRoot, + Limit: maxLeavesLimit, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { + require.NoError(t, err) + var leafsResponse message.LeafsResponse + _, err = message.Codec.Unmarshal(response, &leafsResponse) + require.NoError(t, err) + require.Len(t, leafsResponse.Keys, int(maxLeavesLimit)) + require.Len(t, leafsResponse.Vals, int(maxLeavesLimit)) + require.Equal(t, uint32(1), testHandlerStats.LeafsRequestCount) + require.Equal(t, uint32(len(leafsResponse.Keys)), testHandlerStats.LeafsReturnedSum) + require.Equal(t, uint32(1), testHandlerStats.SnapshotReadAttemptCount) + require.Equal(t, uint32(0), testHandlerStats.SnapshotReadSuccessCount) + requireRangeProofIsValid(t, &request, &leafsResponse, true) + + // expect 1/4th of segments to be invalid + numSegments := maxLeavesLimit / segmentLen + require.Equal(t, uint32(numSegments/4), testHandlerStats.SnapshotSegmentInvalidCount) + require.Equal(t, uint32(3*numSegments/4), testHandlerStats.SnapshotSegmentValidCount) + }, + }, + "storage data served from snapshot": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) + require.NoError(t, err) + snapshotProvider.Snapshot = snap + return context.Background(), message.LeafsRequest{ + Root: largeTrieRoot, + Account: largeStorageAccount, + Limit: maxLeavesLimit, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { + require.NoError(t, err) + var leafsResponse message.LeafsResponse + _, err = message.Codec.Unmarshal(response, &leafsResponse) + require.NoError(t, err) + require.Len(t, leafsResponse.Keys, int(maxLeavesLimit)) + require.Len(t, leafsResponse.Vals, int(maxLeavesLimit)) + require.Equal(t, uint32(1), testHandlerStats.LeafsRequestCount) + require.Equal(t, uint32(len(leafsResponse.Keys)), testHandlerStats.LeafsReturnedSum) + require.Equal(t, uint32(1), testHandlerStats.SnapshotReadAttemptCount) + require.Equal(t, uint32(1), testHandlerStats.SnapshotReadSuccessCount) + requireRangeProofIsValid(t, &request, &leafsResponse, true) + }, + }, + "partial storage data served from snapshot": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) + require.NoError(t, err) + snapshotProvider.Snapshot = snap + it := snap.DiskStorageIterator(largeStorageAccount, common.Hash{}) + defer it.Release() + i := 0 + for it.Next() { + if i > int(maxLeavesLimit) { + // no need to modify beyond the request limit + break + } + // modify one entry of 1 in 4 segments + if i%(segmentLen*4) == 0 { + randomBytes := make([]byte, 5) + _, err := r.Read(randomBytes) + require.NoError(t, err) + rawdb.WriteStorageSnapshot(memdb, largeStorageAccount, it.Hash(), randomBytes) + } + i++ + } + + return context.Background(), message.LeafsRequest{ + Root: largeTrieRoot, + Account: largeStorageAccount, + Limit: maxLeavesLimit, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { + require.NoError(t, err) + var leafsResponse message.LeafsResponse + _, err = message.Codec.Unmarshal(response, &leafsResponse) + require.NoError(t, err) + require.Len(t, leafsResponse.Keys, int(maxLeavesLimit)) + require.Len(t, leafsResponse.Vals, int(maxLeavesLimit)) + require.Equal(t, uint32(1), testHandlerStats.LeafsRequestCount) + require.Equal(t, uint32(len(leafsResponse.Keys)), testHandlerStats.LeafsReturnedSum) + require.Equal(t, uint32(1), testHandlerStats.SnapshotReadAttemptCount) + require.Equal(t, uint32(0), testHandlerStats.SnapshotReadSuccessCount) + requireRangeProofIsValid(t, &request, &leafsResponse, true) + + // expect 1/4th of segments to be invalid + numSegments := maxLeavesLimit / segmentLen + require.Equal(t, uint32(numSegments/4), testHandlerStats.SnapshotSegmentInvalidCount) + require.Equal(t, uint32(3*numSegments/4), testHandlerStats.SnapshotSegmentValidCount) + }, + }, + "last snapshot key removed": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) + require.NoError(t, err) + snapshotProvider.Snapshot = snap + it := snap.DiskStorageIterator(smallStorageAccount, common.Hash{}) + defer it.Release() + var lastKey common.Hash + for it.Next() { + lastKey = it.Hash() + } + rawdb.DeleteStorageSnapshot(memdb, smallStorageAccount, lastKey) + + return context.Background(), message.LeafsRequest{ + Root: smallTrieRoot, + Account: smallStorageAccount, + Limit: maxLeavesLimit, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { + require.NoError(t, err) + var leafsResponse message.LeafsResponse + _, err = message.Codec.Unmarshal(response, &leafsResponse) + require.NoError(t, err) + require.Len(t, leafsResponse.Keys, 500) + require.Len(t, leafsResponse.Vals, 500) + require.Equal(t, uint32(1), testHandlerStats.LeafsRequestCount) + require.Equal(t, uint32(len(leafsResponse.Keys)), testHandlerStats.LeafsReturnedSum) + require.Equal(t, uint32(1), testHandlerStats.SnapshotReadAttemptCount) + require.Equal(t, uint32(1), testHandlerStats.SnapshotReadSuccessCount) + requireRangeProofIsValid(t, &request, &leafsResponse, false) + }, + }, + "request last key when removed from snapshot": { + prepareTestFn: func() (context.Context, message.LeafsRequest) { + snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) + require.NoError(t, err) + snapshotProvider.Snapshot = snap + it := snap.DiskStorageIterator(smallStorageAccount, common.Hash{}) + defer it.Release() + var lastKey common.Hash + for it.Next() { + lastKey = it.Hash() + } + rawdb.DeleteStorageSnapshot(memdb, smallStorageAccount, lastKey) + + return context.Background(), message.LeafsRequest{ + Root: smallTrieRoot, + Account: smallStorageAccount, + Start: lastKey[:], + Limit: maxLeavesLimit, + NodeType: message.StateTrieNode, + } + }, + requireResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { + require.NoError(t, err) + var leafsResponse message.LeafsResponse + _, err = message.Codec.Unmarshal(response, &leafsResponse) + require.NoError(t, err) + require.Len(t, leafsResponse.Keys, 1) + require.Len(t, leafsResponse.Vals, 1) + require.Equal(t, uint32(1), testHandlerStats.LeafsRequestCount) + require.Equal(t, uint32(len(leafsResponse.Keys)), testHandlerStats.LeafsReturnedSum) + require.Equal(t, uint32(1), testHandlerStats.SnapshotReadAttemptCount) + require.Equal(t, uint32(0), testHandlerStats.SnapshotReadSuccessCount) + requireRangeProofIsValid(t, &request, &leafsResponse, false) + }, + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ctx, request := test.prepareTestFn() + t.Cleanup(func() { + <-snapshot.WipeSnapshot(memdb, true) + testHandlerStats.Reset() + snapshotProvider.Snapshot = nil // reset the snapshot to nil + }) + + response, err := leafsHandler.OnLeafsRequest(ctx, ids.GenerateTestNodeID(), 1, request) + test.requireResponseFn(t, request, response, err) + }) + } +} + +func requireRangeProofIsValid(t *testing.T, request *message.LeafsRequest, response *message.LeafsResponse, expectMore bool) { + t.Helper() + + var start []byte + if len(request.Start) == 0 { + start = bytes.Repeat([]byte{0x00}, common.HashLength) + } else { + start = request.Start + } + + var proof ethdb.Database + if len(response.ProofVals) > 0 { + proof = rawdb.NewMemoryDatabase() + defer proof.Close() + for _, proofVal := range response.ProofVals { + proofKey := crypto.Keccak256(proofVal) + require.NoError(t, proof.Put(proofKey, proofVal)) + } + } + + more, err := trie.VerifyRangeProof(request.Root, start, response.Keys, response.Vals, proof) + require.NoError(t, err) + require.Equal(t, expectMore, more) +} diff --git a/vms/evm/sync/handlers/test_providers.go b/vms/evm/sync/handlers/test_providers.go new file mode 100644 index 000000000000..8ebea9f77b9c --- /dev/null +++ b/vms/evm/sync/handlers/test_providers.go @@ -0,0 +1,31 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package handlers + +import ( + "github.com/ava-labs/coreth/core/state/snapshot" + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/types" +) + +var ( + _ BlockProvider = (*TestBlockProvider)(nil) + _ SnapshotProvider = (*TestSnapshotProvider)(nil) +) + +type TestBlockProvider struct { + GetBlockFn func(common.Hash, uint64) *types.Block +} + +func (t *TestBlockProvider) GetBlock(hash common.Hash, number uint64) *types.Block { + return t.GetBlockFn(hash, number) +} + +type TestSnapshotProvider struct { + Snapshot *snapshot.Tree +} + +func (t *TestSnapshotProvider) Snapshots() *snapshot.Tree { + return t.Snapshot +} diff --git a/vms/evm/sync/message/block_request.go b/vms/evm/sync/message/block_request.go new file mode 100644 index 000000000000..f14bc88992cd --- /dev/null +++ b/vms/evm/sync/message/block_request.go @@ -0,0 +1,41 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "context" + "fmt" + + "github.com/ava-labs/libevm/common" + + "github.com/ava-labs/avalanchego/ids" +) + +var _ Request = (*BlockRequest)(nil) + +// BlockRequest is a request to retrieve Parents number of blocks starting from Hash from newest-oldest manner +type BlockRequest struct { + Hash common.Hash `serialize:"true"` + Height uint64 `serialize:"true"` + Parents uint16 `serialize:"true"` +} + +func (b BlockRequest) String() string { + return fmt.Sprintf( + "BlockRequest(Hash=%s, Height=%d, Parents=%d)", + b.Hash, b.Height, b.Parents, + ) +} + +func (b BlockRequest) Handle(ctx context.Context, nodeID ids.NodeID, requestID uint32, handler RequestHandler) ([]byte, error) { + return handler.HandleBlockRequest(ctx, nodeID, requestID, b) +} + +// BlockResponse is a response to a BlockRequest +// Blocks is slice of RLP encoded blocks starting with the block +// requested in BlockRequest.Hash. The next block is the parent, etc. +// handler: handlers.BlockRequestHandler +type BlockResponse struct { + Blocks [][]byte `serialize:"true"` +} diff --git a/vms/evm/sync/message/block_request_test.go b/vms/evm/sync/message/block_request_test.go new file mode 100644 index 000000000000..92420bd5d5d7 --- /dev/null +++ b/vms/evm/sync/message/block_request_test.go @@ -0,0 +1,65 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "encoding/base64" + "math/rand" + "testing" + + "github.com/ava-labs/libevm/common" + "github.com/stretchr/testify/require" +) + +// TestMarshalBlockRequest requires that the structure or serialization logic hasn't changed, primarily to +// ensure compatibility with the network. +func TestMarshalBlockRequest(t *testing.T) { + blockRequest := BlockRequest{ + Hash: common.BytesToHash([]byte("some hash is here yo")), + Height: 1337, + Parents: 64, + } + + base64BlockRequest := "AAAAAAAAAAAAAAAAAABzb21lIGhhc2ggaXMgaGVyZSB5bwAAAAAAAAU5AEA=" + + blockRequestBytes, err := Codec.Marshal(Version, blockRequest) + require.NoError(t, err) + require.Equal(t, base64BlockRequest, base64.StdEncoding.EncodeToString(blockRequestBytes)) + + var b BlockRequest + _, err = Codec.Unmarshal(blockRequestBytes, &b) + require.NoError(t, err) + require.Equal(t, blockRequest.Hash, b.Hash) + require.Equal(t, blockRequest.Height, b.Height) + require.Equal(t, blockRequest.Parents, b.Parents) +} + +// TestMarshalBlockResponse requires that the structure or serialization logic hasn't changed, primarily to +// ensure compatibility with the network. +func TestMarshalBlockResponse(t *testing.T) { + // create some random bytes + // set seed to ensure deterministic random behaviour + r := rand.New(rand.NewSource(1)) //nolint:gosec // deterministic bytes for golden assertion + blocksBytes := make([][]byte, 32) + for i := range blocksBytes { + blocksBytes[i] = make([]byte, r.Intn(32)+32) + _, err := r.Read(blocksBytes[i]) + require.NoError(t, err) + } + + blockResponse := BlockResponse{ + Blocks: blocksBytes, + } + + base64BlockResponse := "AAAAAAAgAAAAIU8WP18PmmIdcpVmx00QA3xNe7sEB9HixkmBhVrYaB0NhgAAADnR6ZTSxCKs0gigByk5SH9pmeudGKRHhARdh/PGfPInRumVr1olNnlRuqL/bNRxxIPxX7kLrbN8WCEAAAA6tmgLTnyLdjobHUnUlVyEhiFjJSU/7HON16nii/khEZwWDwcCRIYVu9oIMT9qjrZo0gv1BZh1kh5migAAACtb3yx/xIRo0tbFL1BU4tCDa/hMcXTLdHY2TMPb2Wiw9xcu2FeUuzWLDDtSAAAAO12heG+f69ehnQ97usvgJVqlt9RL7ED4TIkrm//UNimwIjvupfT3Q5H0RdFa/UKUBAN09pJLmMv4cT+NAAAAMpYtJOLK/Mrjph+1hrFDI6a8j5598dkpMz/5k5M76m9bOvbeA3Q2bEcZ5DobBn2JvH8BAAAAOfHxekxyFaO1OeseWEnGB327VyL1cXoomiZvl2R5gZmOvqicC0s3OXARXoLtb0ElyPpzEeTX3vqSLQAAACc2zU8kq/ffhmuqVgODZ61hRd4e6PSosJk+vfiIOgrYvpw5eLBIg+UAAAAkahVqnexqQOmh0AfwM8KCMGG90Oqln45NpkMBBSINCyloi3NLAAAAKI6gENd8luqAp6Zl9gb2pjt/Pf0lZ8GJeeTWDyZobZvy+ybJAf81TN4AAAA8FgfuKbpk+Eq0PKDG5rkcH9O+iZBDQXnTr0SRo2kBLbktGE/DnRc0/1cWQolTu2hl/PkrDDoXyQKL6ZFOAAAAMwl50YMDVvKlTD3qsqS0R11jr76PtWmHx39YGFJvGBS+gjNQ6rE5NfMdhEhFF+kkrveK4QAAADhRwAdVkgww7CmjcDk0v1CijaECl13tp351hXnqPf5BNqv3UrO4Jx0D6USzyds2a3UEX479adIq5QAAADpBGUfLVbzqQGsy1hCL1oWE9X43yqxuM/6qMmOjmUNwJLqcmxRniidPAakQrilfbvv+X1q/RMzeJjtWAAAAKAZjPn05Bp8BojnENlhUw69/a0HWMfkrmo0S9BJXMl//My91drBiBVYAAAAqMEo+Pq6QGlJyDahcoeSzjq8/RMbG74Ni8vVPwA4J1vwlZAhUwV38rKqKAAAAOyzszlo6lLTTOKUUPmNAjYcksM8/rhej95vhBy+2PDXWBCxBYPOO6eKp8/tP+wAZtFTVIrX/oXYEGT+4AAAAMpZnz1PD9SDIibeb9QTPtXx2ASMtWJuszqnW4mPiXCd0HT9sYsu7FdmvvL9/faQasECOAAAALzk4vxd0rOdwmk8JHpqD/erg7FXrIzqbU5TLPHhWtUbTE8ijtMHA4FRH9Lo3DrNtAAAAPLz97PUi4qbx7Qr+wfjiD6q+32sWLnF9OnSKWGd6DFY0j4khomaxHQ8zTGL+UrpTrxl3nLKUi2Vw/6C3cwAAADqWPBMK15dRJSEPDvHDFAkPB8eab1ccJG8+msC3QT7xEL1YsAznO/9wb3/0tvRAkKMnEfMgjk5LictRAAAAJ2XOZAA98kaJKNWiO5ynQPgMk4LZxgNK0pYMeWUD4c4iFyX1DK8fvwAAADtcR6U9v459yvyeE4ZHpLRO1LzpZO1H90qllEaM7TI8t28NP6xHbJ+wP8kij7roj9WAZjoEVLaDEiB/CgAAADc7WExi1QJ84VpPClglDY+1Dnfyv08BUuXUlDWAf51Ll75vt3lwRmpWJv4zQIz56I4seXQIoy0pAAAAKkFrryBqmDIJgsharXA4SFnAWksTodWy9b/vWm7ZLaSCyqlWjltv6dip3QAAAC7Z6wkne1AJRMvoAKCxUn6mRymoYdL2SXoyNcN/QZJ3nsHZazscVCT84LcnsDByAAAAI+ZAq8lEj93rIZHZRcBHZ6+Eev0O212IV7eZrLGOSv+r4wN/AAAAL/7MQW5zTTc8Xr68nNzFlbzOPHvT2N+T+rfhJd3rr+ZaMb1dQeLSzpwrF4kvD+oZAAAAMTGikNy/poQG6HcHP/CINOGXpANKpIr6P4W4picIyuu6yIC1uJuT2lOBAWRAIQTmSLYAAAA1ImobDzE6id38RUxfj3KsibOLGfU3hMGem+rAPIdaJ9sCneN643pCMYgTSHaFkpNZyoxeuU4AAAA9FS3Br0LquOKSXG2u5N5e+fnc8I38vQK4CAk5hYWSig995QvhptwdV2joU3mI/dzlYum5SMkYu6PpM+XEAAAAAC3Nrne6HSWbGIpLIchvvCPXKLRTR+raZQryTFbQgAqGkTMgiKgFvVXERuJesHU=" + + blockResponseBytes, err := Codec.Marshal(Version, blockResponse) + require.NoError(t, err) + require.Equal(t, base64BlockResponse, base64.StdEncoding.EncodeToString(blockResponseBytes)) + + var b BlockResponse + _, err = Codec.Unmarshal(blockResponseBytes, &b) + require.NoError(t, err) + require.Equal(t, blockResponse.Blocks, b.Blocks) +} diff --git a/vms/evm/sync/message/block_sync_summary.go b/vms/evm/sync/message/block_sync_summary.go new file mode 100644 index 000000000000..aedb630e19d3 --- /dev/null +++ b/vms/evm/sync/message/block_sync_summary.go @@ -0,0 +1,82 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "context" + "fmt" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/crypto" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" +) + +var _ Syncable = (*BlockSyncSummary)(nil) + +// BlockSyncSummary provides the information necessary to sync a node starting +// at the given block. +type BlockSyncSummary struct { + BlockNumber uint64 `serialize:"true"` + BlockHash common.Hash `serialize:"true"` + BlockRoot common.Hash `serialize:"true"` + + summaryID ids.ID + bytes []byte + acceptImpl AcceptImplFn +} + +func NewBlockSyncSummary(blockHash common.Hash, blockNumber uint64, blockRoot common.Hash) (*BlockSyncSummary, error) { + // We intentionally do not use the acceptImpl here and leave it for the parser to set. + summary := BlockSyncSummary{ + BlockNumber: blockNumber, + BlockHash: blockHash, + BlockRoot: blockRoot, + } + bytes, err := Codec.Marshal(Version, &summary) + if err != nil { + return nil, fmt.Errorf("failed to marshal syncable summary: %w", err) + } + + summary.bytes = bytes + summaryID, err := ids.ToID(crypto.Keccak256(bytes)) + if err != nil { + return nil, fmt.Errorf("failed to compute summary ID: %w", err) + } + summary.summaryID = summaryID + + return &summary, nil +} + +func (s *BlockSyncSummary) GetBlockHash() common.Hash { + return s.BlockHash +} + +func (s *BlockSyncSummary) GetBlockRoot() common.Hash { + return s.BlockRoot +} + +func (s *BlockSyncSummary) Bytes() []byte { + return s.bytes +} + +func (s *BlockSyncSummary) Height() uint64 { + return s.BlockNumber +} + +func (s *BlockSyncSummary) ID() ids.ID { + return s.summaryID +} + +func (s *BlockSyncSummary) String() string { + return fmt.Sprintf("BlockSyncSummary(BlockHash=%s, BlockNumber=%d, BlockRoot=%s)", s.BlockHash, s.BlockNumber, s.BlockRoot) +} + +func (s *BlockSyncSummary) Accept(context.Context) (block.StateSyncMode, error) { + if s.acceptImpl == nil { + return block.StateSyncSkipped, fmt.Errorf("accept implementation not specified for summary: %s", s) + } + return s.acceptImpl(s) +} diff --git a/vms/evm/sync/message/block_sync_summary_parser.go b/vms/evm/sync/message/block_sync_summary_parser.go new file mode 100644 index 000000000000..abeed28d36c2 --- /dev/null +++ b/vms/evm/sync/message/block_sync_summary_parser.go @@ -0,0 +1,34 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "fmt" + + "github.com/ava-labs/libevm/crypto" + + "github.com/ava-labs/avalanchego/ids" +) + +type BlockSyncSummaryParser struct{} + +func NewBlockSyncSummaryParser() *BlockSyncSummaryParser { + return &BlockSyncSummaryParser{} +} + +func (*BlockSyncSummaryParser) Parse(summaryBytes []byte, acceptImpl AcceptImplFn) (Syncable, error) { + summary := BlockSyncSummary{} + if _, err := Codec.Unmarshal(summaryBytes, &summary); err != nil { + return nil, fmt.Errorf("failed to parse syncable summary: %w", err) + } + + summary.bytes = summaryBytes + summaryID, err := ids.ToID(crypto.Keccak256(summaryBytes)) + if err != nil { + return nil, fmt.Errorf("failed to compute summary ID: %w", err) + } + summary.summaryID = summaryID + summary.acceptImpl = acceptImpl + return &summary, nil +} diff --git a/vms/evm/sync/message/block_sync_summary_provider.go b/vms/evm/sync/message/block_sync_summary_provider.go new file mode 100644 index 000000000000..98eece22b0e6 --- /dev/null +++ b/vms/evm/sync/message/block_sync_summary_provider.go @@ -0,0 +1,17 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "github.com/ava-labs/libevm/core/types" + + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" +) + +type BlockSyncSummaryProvider struct{} + +// StateSummaryAtBlock returns the block state summary at [block] if valid. +func (*BlockSyncSummaryProvider) StateSummaryAtBlock(blk *types.Block) (block.StateSummary, error) { + return NewBlockSyncSummary(blk.Hash(), blk.NumberU64(), blk.Root()) +} diff --git a/vms/evm/sync/message/block_sync_summary_test.go b/vms/evm/sync/message/block_sync_summary_test.go new file mode 100644 index 000000000000..5321a14b6f70 --- /dev/null +++ b/vms/evm/sync/message/block_sync_summary_test.go @@ -0,0 +1,45 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "context" + "encoding/base64" + "testing" + + "github.com/ava-labs/libevm/common" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" +) + +func TestMarshalBlockSyncSummary(t *testing.T) { + blockSyncSummary, err := NewBlockSyncSummary(common.Hash{1}, 2, common.Hash{3}) + require.NoError(t, err) + + require.Equal(t, common.Hash{1}, blockSyncSummary.GetBlockHash()) + require.Equal(t, uint64(2), blockSyncSummary.Height()) + require.Equal(t, common.Hash{3}, blockSyncSummary.GetBlockRoot()) + + expectedBase64Bytes := "AAAAAAAAAAAAAgEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + require.Equal(t, expectedBase64Bytes, base64.StdEncoding.EncodeToString(blockSyncSummary.Bytes())) + + parser := NewBlockSyncSummaryParser() + called := false + acceptImplTest := func(Syncable) (block.StateSyncMode, error) { + called = true + return block.StateSyncSkipped, nil + } + s, err := parser.Parse(blockSyncSummary.Bytes(), acceptImplTest) + require.NoError(t, err) + require.Equal(t, blockSyncSummary.GetBlockHash(), s.GetBlockHash()) + require.Equal(t, blockSyncSummary.Height(), s.Height()) + require.Equal(t, blockSyncSummary.GetBlockRoot(), s.GetBlockRoot()) + require.Equal(t, blockSyncSummary.Bytes(), s.Bytes()) + + mode, err := s.Accept(context.TODO()) + require.NoError(t, err) + require.Equal(t, block.StateSyncSkipped, mode) + require.True(t, called) +} diff --git a/vms/evm/sync/message/code_request.go b/vms/evm/sync/message/code_request.go new file mode 100644 index 000000000000..be59623c4351 --- /dev/null +++ b/vms/evm/sync/message/code_request.go @@ -0,0 +1,48 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "context" + "fmt" + "strings" + + "github.com/ava-labs/libevm/common" + + "github.com/ava-labs/avalanchego/ids" +) + +var _ Request = CodeRequest{} + +// CodeRequest is a request to retrieve a contract code with specified Hash +type CodeRequest struct { + // Hashes is a list of contract code hashes + Hashes []common.Hash `serialize:"true"` +} + +func (c CodeRequest) String() string { + hashStrs := make([]string, len(c.Hashes)) + for i, hash := range c.Hashes { + hashStrs[i] = hash.String() + } + return fmt.Sprintf("CodeRequest(Hashes=%s)", strings.Join(hashStrs, ", ")) +} + +func (c CodeRequest) Handle(ctx context.Context, nodeID ids.NodeID, requestID uint32, handler RequestHandler) ([]byte, error) { + return handler.HandleCodeRequest(ctx, nodeID, requestID, c) +} + +func NewCodeRequest(hashes []common.Hash) CodeRequest { + return CodeRequest{ + Hashes: hashes, + } +} + +// CodeResponse is a response to a CodeRequest +// crypto.Keccak256Hash of each element in Data is expected to equal +// the corresponding element in CodeRequest.Hashes +// handler: handlers.CodeRequestHandler +type CodeResponse struct { + Data [][]byte `serialize:"true"` +} diff --git a/vms/evm/sync/message/code_request_test.go b/vms/evm/sync/message/code_request_test.go new file mode 100644 index 000000000000..a4e823f84d84 --- /dev/null +++ b/vms/evm/sync/message/code_request_test.go @@ -0,0 +1,58 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "encoding/base64" + "math/rand" + "testing" + + "github.com/ava-labs/libevm/common" + "github.com/stretchr/testify/require" +) + +// TestMarshalCodeRequest requires that the structure or serialization logic hasn't changed, primarily to +// ensure compatibility with the network. +func TestMarshalCodeRequest(t *testing.T) { + codeRequest := CodeRequest{ + Hashes: []common.Hash{common.BytesToHash([]byte("some code pls"))}, + } + + base64CodeRequest := "AAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAHNvbWUgY29kZSBwbHM=" + + codeRequestBytes, err := Codec.Marshal(Version, codeRequest) + require.NoError(t, err) + require.Equal(t, base64CodeRequest, base64.StdEncoding.EncodeToString(codeRequestBytes)) + + var c CodeRequest + _, err = Codec.Unmarshal(codeRequestBytes, &c) + require.NoError(t, err) + require.Equal(t, codeRequest.Hashes, c.Hashes) +} + +// TestMarshalCodeResponse requires that the structure or serialization logic hasn't changed, primarily to +// ensure compatibility with the network. +func TestMarshalCodeResponse(t *testing.T) { + // generate some random code data + // set random seed for deterministic random + codeData := make([]byte, 50) + r := rand.New(rand.NewSource(1)) //nolint:gosec // deterministic bytes for golden assertion + _, err := r.Read(codeData) + require.NoError(t, err) + + codeResponse := CodeResponse{ + Data: [][]byte{codeData}, + } + + base64CodeResponse := "AAAAAAABAAAAMlL9/AchgmVPFj9fD5piHXKVZsdNEAN8TXu7BAfR4sZJgYVa2GgdDYbR6R4AFnk5y2aU" + + codeResponseBytes, err := Codec.Marshal(Version, codeResponse) + require.NoError(t, err) + require.Equal(t, base64CodeResponse, base64.StdEncoding.EncodeToString(codeResponseBytes)) + + var c CodeResponse + _, err = Codec.Unmarshal(codeResponseBytes, &c) + require.NoError(t, err) + require.Equal(t, codeResponse.Data, c.Data) +} diff --git a/vms/evm/sync/message/codec.go b/vms/evm/sync/message/codec.go new file mode 100644 index 000000000000..aee9a70775d1 --- /dev/null +++ b/vms/evm/sync/message/codec.go @@ -0,0 +1,46 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/codec/linearcodec" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/utils/wrappers" +) + +const ( + Version = uint16(0) + maxMessageSize = 2*units.MiB - 64*units.KiB // Subtract 64 KiB from p2p network cap to leave room for encoding overhead from AvalancheGo +) + +var Codec codec.Manager + +func init() { + Codec = codec.NewManager(maxMessageSize) + c := linearcodec.NewDefault() + + errs := wrappers.Errs{} + // Gossip types and sync summary type removed from codec + c.SkipRegistrations(3) + errs.Add( + // state sync types + c.RegisterType(BlockRequest{}), + c.RegisterType(BlockResponse{}), + c.RegisterType(LeafsRequest{}), + c.RegisterType(LeafsResponse{}), + c.RegisterType(CodeRequest{}), + c.RegisterType(CodeResponse{}), + ) + + // Deprecated Warp request/responde types are skipped + // See https://github.com/ava-labs/coreth/pull/999 + c.SkipRegistrations(3) + + errs.Add(Codec.RegisterCodec(Version, c)) + + if errs.Errored() { + panic(errs.Err) + } +} diff --git a/vms/evm/sync/message/handler.go b/vms/evm/sync/message/handler.go new file mode 100644 index 000000000000..42a5319249c9 --- /dev/null +++ b/vms/evm/sync/message/handler.go @@ -0,0 +1,45 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "context" + + "github.com/ava-labs/avalanchego/ids" +) + +var _ RequestHandler = NoopRequestHandler{} + +// RequestHandler interface handles incoming requests from peers +// Must have methods in format of handleType(context.Context, ids.NodeID, uint32, request Type) error +// so that the Request object of relevant Type can invoke its respective handle method +// on this struct. +type RequestHandler interface { + HandleLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest LeafsRequest) ([]byte, error) + HandleBlockRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, request BlockRequest) ([]byte, error) + HandleCodeRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, codeRequest CodeRequest) ([]byte, error) +} + +// ResponseHandler handles response for a sent request +// Only one of OnResponse or OnFailure is called for a given requestID, not both +type ResponseHandler interface { + // OnResponse is invoked when the peer responded to a request + OnResponse(response []byte) error + // OnFailure is invoked when there was a failure in processing a request + OnFailure() error +} + +type NoopRequestHandler struct{} + +func (NoopRequestHandler) HandleLeafsRequest(context.Context, ids.NodeID, uint32, LeafsRequest) ([]byte, error) { + return nil, nil +} + +func (NoopRequestHandler) HandleBlockRequest(context.Context, ids.NodeID, uint32, BlockRequest) ([]byte, error) { + return nil, nil +} + +func (NoopRequestHandler) HandleCodeRequest(context.Context, ids.NodeID, uint32, CodeRequest) ([]byte, error) { + return nil, nil +} diff --git a/vms/evm/sync/message/leafs_request.go b/vms/evm/sync/message/leafs_request.go new file mode 100644 index 000000000000..842c80c21ad0 --- /dev/null +++ b/vms/evm/sync/message/leafs_request.go @@ -0,0 +1,77 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "context" + "fmt" + + "github.com/ava-labs/libevm/common" + + "github.com/ava-labs/avalanchego/ids" +) + +const MaxCodeHashesPerRequest = 5 + +var _ Request = LeafsRequest{} + +// NodeType outlines the trie that a leaf node belongs to +// handlers.LeafsRequestHandler uses this information to determine +// which trie type to fetch the information from +type NodeType uint8 + +const ( + StateTrieNode = NodeType(1) + StateTrieKeyLength = common.HashLength +) + +// LeafsRequest is a request to receive trie leaves at specified Root within Start and End byte range +// Limit outlines maximum number of leaves to returns starting at Start +// NodeType outlines which trie to read from state/atomic. +type LeafsRequest struct { + Root common.Hash `serialize:"true"` + Account common.Hash `serialize:"true"` + Start []byte `serialize:"true"` + End []byte `serialize:"true"` + Limit uint16 `serialize:"true"` + NodeType NodeType `serialize:"true"` +} + +func (l LeafsRequest) String() string { + return fmt.Sprintf( + "LeafsRequest(Root=%s, Account=%s, Start=%s, End=%s, Limit=%d, NodeType=%d)", + l.Root, l.Account, common.Bytes2Hex(l.Start), common.Bytes2Hex(l.End), l.Limit, l.NodeType, + ) +} + +func (l LeafsRequest) Handle(ctx context.Context, nodeID ids.NodeID, requestID uint32, handler RequestHandler) ([]byte, error) { + return handler.HandleLeafsRequest(ctx, nodeID, requestID, l) +} + +// LeafsResponse is a response to a LeafsRequest +// Keys must be within LeafsRequest.Start and LeafsRequest.End and sorted in lexicographical order. +// +// ProofVals must be non-empty and contain a valid range proof unless the key-value pairs in the +// response are the entire trie. +// If the key-value pairs make up the entire trie, ProofVals should be empty since the root will be +// sufficient to prove that the leaves are included in the trie. +// +// More is a flag set in the client after verifying the response, which indicates if the last key-value +// pair in the response has any more elements to its right within the trie. +type LeafsResponse struct { + // Keys and Vals provides the key-value pairs in the trie in the response. + Keys [][]byte `serialize:"true"` + Vals [][]byte `serialize:"true"` + + // More indicates if there are more leaves to the right of the last value in this response. + // + // This is not serialized since it is set in the client after verifying the response via + // VerifyRangeProof and determining if there are in fact more leaves to the right of the + // last value in this response. + More bool + + // ProofVals contain the edge merkle-proofs for the range of keys included in the response. + // The keys for the proof are simply the keccak256 hashes of the values, so they are not included in the response to save bandwidth. + ProofVals [][]byte `serialize:"true"` +} diff --git a/vms/evm/sync/message/leafs_request_test.go b/vms/evm/sync/message/leafs_request_test.go new file mode 100644 index 000000000000..2e168c915bc2 --- /dev/null +++ b/vms/evm/sync/message/leafs_request_test.go @@ -0,0 +1,106 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "encoding/base64" + "math/rand" + "testing" + + "github.com/ava-labs/libevm/common" + "github.com/stretchr/testify/require" +) + +// TestMarshalLeafsRequest requires that the structure or serialization logic hasn't changed, primarily to +// ensure compatibility with the network. +func TestMarshalLeafsRequest(t *testing.T) { + startBytes := make([]byte, common.HashLength) + endBytes := make([]byte, common.HashLength) + + r := rand.New(rand.NewSource(1)) //nolint:gosec // deterministic bytes for golden assertion + _, err := r.Read(startBytes) + require.NoError(t, err) + _, err = r.Read(endBytes) + require.NoError(t, err) + require.NotEmpty(t, startBytes) + require.NotEmpty(t, endBytes) + + leafsRequest := LeafsRequest{ + Root: common.BytesToHash([]byte("im ROOTing for ya")), + Start: startBytes, + End: endBytes, + Limit: 1024, + NodeType: StateTrieNode, + } + + base64LeafsRequest := "AAAAAAAAAAAAAAAAAAAAAABpbSBST09UaW5nIGZvciB5YQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIFL9/AchgmVPFj9fD5piHXKVZsdNEAN8TXu7BAfR4sZJAAAAIIGFWthoHQ2G0ekeABZ5OctmlNLEIqzSCKAHKTlIf2mZBAAB" + + leafsRequestBytes, err := Codec.Marshal(Version, leafsRequest) + require.NoError(t, err) + require.Equal(t, base64LeafsRequest, base64.StdEncoding.EncodeToString(leafsRequestBytes)) + + var l LeafsRequest + _, err = Codec.Unmarshal(leafsRequestBytes, &l) + require.NoError(t, err) + require.Equal(t, leafsRequest.Root, l.Root) + require.Equal(t, leafsRequest.Start, l.Start) + require.Equal(t, leafsRequest.End, l.End) + require.Equal(t, leafsRequest.Limit, l.Limit) + require.Equal(t, leafsRequest.NodeType, l.NodeType) +} + +// TestMarshalLeafsResponse requires that the structure or serialization logic hasn't changed, primarily to +// ensure compatibility with the network. +func TestMarshalLeafsResponse(t *testing.T) { + keysBytes := make([][]byte, 16) + valsBytes := make([][]byte, 16) + r := rand.New(rand.NewSource(1)) //nolint:gosec // deterministic bytes for golden assertion + for i := range keysBytes { + keysBytes[i] = make([]byte, common.HashLength) + n := r.Intn(8) + valsBytes[i] = make([]byte, n+8) + _, err := r.Read(keysBytes[i]) + require.NoError(t, err) + + _, err = r.Read(valsBytes[i]) + require.NoError(t, err) + } + + nextKey := make([]byte, common.HashLength) + if _, e := r.Read(nextKey); e != nil { + require.NoError(t, e) + } + require.NotEmpty(t, nextKey) + + proofVals := make([][]byte, 4) + r2 := rand.New(rand.NewSource(2)) //nolint:gosec // deterministic bytes for golden assertion + for i := range proofVals { + n := r2.Intn(8) + proofVals[i] = make([]byte, n+8) + if _, e := r2.Read(proofVals[i]); e != nil { + require.NoError(t, e) + } + } + + leafsResponse := LeafsResponse{ + Keys: keysBytes, + Vals: valsBytes, + More: true, + ProofVals: proofVals, + } + + base64LeafsResponse := "AAAAAAAQAAAAIE8WP18PmmIdcpVmx00QA3xNe7sEB9HixkmBhVrYaB0NAAAAIGagByk5SH9pmeudGKRHhARdh/PGfPInRumVr1olNnlRAAAAIK2zfFghtmgLTnyLdjobHUnUlVyEhiFjJSU/7HON16niAAAAIIYVu9oIMfUFmHWSHmaKW98sf8SERZLSVyvNBmjS1sUvAAAAIHHb2Wiw9xcu2FeUuzWLDDtSXaF4b5//CUJ52xlE69ehAAAAIPhMiSs77qX090OR9EXRWv1ClAQDdPaSS5jL+HE/jZYtAAAAIMr8yuOmvI+effHZKTM/+ZOTO+pvWzr23gN0NmxHGeQ6AAAAIBZZpE856x5YScYHfbtXIvVxeiiaJm+XZHmBmY6+qJwLAAAAIHOq53hmZ/fpNs1PJKv334ZrqlYDg2etYUXeHuj0qLCZAAAAIHiN5WOvpGfUnexqQOmh0AfwM8KCMGG90Oqln45NpkMBAAAAIKAQ13yW6oCnpmX2BvamO389/SVnwYl55NYPJmhtm/L7AAAAIAfuKbpk+Eq0PKDG5rkcH9O+iZBDQXnTr0SRo2kBLbktAAAAILsXyQKL6ZFOt2ScbJNHgAl50YMDVvKlTD3qsqS0R11jAAAAIOqxOTXzHYRIRRfpJK73iuFRwAdVklg2twdYhWUMMOwpAAAAIHnqPf5BNqv3UrO4Jx0D6USzyds2a3UEX479adIq5UEZAAAAIDLWEMqsbjP+qjJjo5lDcCS6nJsUZ4onTwGpEK4pX277AAAAEAAAAAmG0ekeABZ5OcsAAAAMuqL/bNRxxIPxX7kLAAAACov5IRGcFg8HAkQAAAAIUFTi0INr+EwAAAAOnQ97usvgJVqlt9RL7EAAAAAJfI0BkZLCQiTiAAAACxsGfYm8fwHx9XOYAAAADUs3OXARXoLtb0ElyPoAAAAKPr34iDoK2L6cOQAAAAoFIg0LKWiLc0uOAAAACCbJAf81TN4WAAAADBhPw50XNP9XFkKJUwAAAAuvvo+1aYfHf1gYUgAAAAqjcDk0v1CijaECAAAADkfLVT12lCZ670686kBrAAAADf5fWr9EzN4mO1YGYz4AAAAEAAAACm8xRMCqTO1W29kAAAAIZ9wol8oW4YsAAAAOaGugcKI9oAJrZhCPutAAAAAPhENjuCNqN/goPvsnNn9u" + + leafsResponseBytes, err := Codec.Marshal(Version, leafsResponse) + require.NoError(t, err) + require.Equal(t, base64LeafsResponse, base64.StdEncoding.EncodeToString(leafsResponseBytes)) + + var l LeafsResponse + _, err = Codec.Unmarshal(leafsResponseBytes, &l) + require.NoError(t, err) + require.Equal(t, leafsResponse.Keys, l.Keys) + require.Equal(t, leafsResponse.Vals, l.Vals) + require.False(t, l.More) // make sure it is not serialized + require.Equal(t, leafsResponse.ProofVals, l.ProofVals) +} diff --git a/vms/evm/sync/message/request.go b/vms/evm/sync/message/request.go new file mode 100644 index 000000000000..b4149311390a --- /dev/null +++ b/vms/evm/sync/message/request.go @@ -0,0 +1,27 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "context" + "fmt" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" +) + +// Request represents a Network request type. +type Request interface { + // Stringer enables requests to implement String() for logging. + fmt.Stringer + + // Handle allows `Request` to call respective methods on handler to handle + // this particular request type. + Handle(ctx context.Context, nodeID ids.NodeID, requestID uint32, handler RequestHandler) ([]byte, error) +} + +// RequestToBytes marshals the given request object into bytes. +func RequestToBytes(codec codec.Manager, request Request) ([]byte, error) { + return codec.Marshal(Version, &request) +} diff --git a/vms/evm/sync/message/syncable.go b/vms/evm/sync/message/syncable.go new file mode 100644 index 000000000000..3a3690b8f9c9 --- /dev/null +++ b/vms/evm/sync/message/syncable.go @@ -0,0 +1,22 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "github.com/ava-labs/libevm/common" + + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" +) + +type Syncable interface { + block.StateSummary + GetBlockHash() common.Hash + GetBlockRoot() common.Hash +} + +type SyncableParser interface { + Parse(summaryBytes []byte, acceptImpl AcceptImplFn) (Syncable, error) +} + +type AcceptImplFn func(Syncable) (block.StateSyncMode, error) diff --git a/vms/evm/sync/statesynctest/test_stats.go b/vms/evm/sync/statesynctest/test_stats.go new file mode 100644 index 000000000000..b0685f16b6b8 --- /dev/null +++ b/vms/evm/sync/statesynctest/test_stats.go @@ -0,0 +1,234 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package statesynctest + +import ( + "sync" + "time" + + "github.com/ava-labs/coreth/sync/handlers/stats" +) + +var _ stats.HandlerStats = (*TestHandlerStats)(nil) + +// TestHandlerStats is test for capturing and asserting on handler metrics in test +type TestHandlerStats struct { + lock sync.Mutex + + BlockRequestCount, + MissingBlockHashCount, + BlocksReturnedSum uint32 + BlockRequestProcessingTimeSum time.Duration + + CodeRequestCount, + MissingCodeHashCount, + TooManyHashesRequested, + DuplicateHashesRequested, + CodeBytesReturnedSum uint32 + CodeReadTimeSum time.Duration + + LeafsRequestCount, + InvalidLeafsRequestCount, + LeafsReturnedSum, + MissingRootCount, + TrieErrorCount, + ProofErrorCount, + SnapshotReadErrorCount, + SnapshotReadAttemptCount, + SnapshotReadSuccessCount, + SnapshotSegmentValidCount, + SnapshotSegmentInvalidCount uint32 + ProofValsReturned int64 + LeafsReadTime, + SnapshotReadTime, + GenerateRangeProofTime, + LeafRequestProcessingTimeSum time.Duration +} + +func (m *TestHandlerStats) Reset() { + m.lock.Lock() + defer m.lock.Unlock() + m.BlockRequestCount = 0 + m.MissingBlockHashCount = 0 + m.BlocksReturnedSum = 0 + m.BlockRequestProcessingTimeSum = 0 + m.CodeRequestCount = 0 + m.MissingCodeHashCount = 0 + m.TooManyHashesRequested = 0 + m.DuplicateHashesRequested = 0 + m.CodeBytesReturnedSum = 0 + m.CodeReadTimeSum = 0 + m.LeafsRequestCount = 0 + m.InvalidLeafsRequestCount = 0 + m.LeafsReturnedSum = 0 + m.MissingRootCount = 0 + m.TrieErrorCount = 0 + m.ProofErrorCount = 0 + m.SnapshotReadErrorCount = 0 + m.SnapshotReadAttemptCount = 0 + m.SnapshotReadSuccessCount = 0 + m.SnapshotSegmentValidCount = 0 + m.SnapshotSegmentInvalidCount = 0 + m.ProofValsReturned = 0 + m.LeafsReadTime = 0 + m.SnapshotReadTime = 0 + m.GenerateRangeProofTime = 0 + m.LeafRequestProcessingTimeSum = 0 +} + +func (m *TestHandlerStats) IncBlockRequest() { + m.lock.Lock() + defer m.lock.Unlock() + m.BlockRequestCount++ +} + +func (m *TestHandlerStats) IncMissingBlockHash() { + m.lock.Lock() + defer m.lock.Unlock() + m.MissingBlockHashCount++ +} + +func (m *TestHandlerStats) UpdateBlocksReturned(num uint16) { + m.lock.Lock() + defer m.lock.Unlock() + m.BlocksReturnedSum += uint32(num) +} + +func (m *TestHandlerStats) UpdateBlockRequestProcessingTime(duration time.Duration) { + m.lock.Lock() + defer m.lock.Unlock() + m.BlockRequestProcessingTimeSum += duration +} + +func (m *TestHandlerStats) IncCodeRequest() { + m.lock.Lock() + defer m.lock.Unlock() + m.CodeRequestCount++ +} + +func (m *TestHandlerStats) IncMissingCodeHash() { + m.lock.Lock() + defer m.lock.Unlock() + m.MissingCodeHashCount++ +} + +func (m *TestHandlerStats) IncTooManyHashesRequested() { + m.lock.Lock() + defer m.lock.Unlock() + m.TooManyHashesRequested++ +} + +func (m *TestHandlerStats) IncDuplicateHashesRequested() { + m.lock.Lock() + defer m.lock.Unlock() + m.DuplicateHashesRequested++ +} + +func (m *TestHandlerStats) UpdateCodeReadTime(duration time.Duration) { + m.lock.Lock() + defer m.lock.Unlock() + m.CodeReadTimeSum += duration +} + +func (m *TestHandlerStats) UpdateCodeBytesReturned(bytes uint32) { + m.lock.Lock() + defer m.lock.Unlock() + m.CodeBytesReturnedSum += bytes +} + +func (m *TestHandlerStats) IncLeafsRequest() { + m.lock.Lock() + defer m.lock.Unlock() + m.LeafsRequestCount++ +} + +func (m *TestHandlerStats) IncInvalidLeafsRequest() { + m.lock.Lock() + defer m.lock.Unlock() + m.InvalidLeafsRequestCount++ +} + +func (m *TestHandlerStats) UpdateLeafsReturned(numLeafs uint16) { + m.lock.Lock() + defer m.lock.Unlock() + m.LeafsReturnedSum += uint32(numLeafs) +} + +func (m *TestHandlerStats) UpdateLeafsRequestProcessingTime(duration time.Duration) { + m.lock.Lock() + defer m.lock.Unlock() + m.LeafRequestProcessingTimeSum += duration +} + +func (m *TestHandlerStats) UpdateReadLeafsTime(duration time.Duration) { + m.lock.Lock() + defer m.lock.Unlock() + m.LeafsReadTime += duration +} + +func (m *TestHandlerStats) UpdateGenerateRangeProofTime(duration time.Duration) { + m.lock.Lock() + defer m.lock.Unlock() + m.GenerateRangeProofTime += duration +} + +func (m *TestHandlerStats) UpdateSnapshotReadTime(duration time.Duration) { + m.lock.Lock() + defer m.lock.Unlock() + m.SnapshotReadTime += duration +} + +func (m *TestHandlerStats) UpdateRangeProofValsReturned(numProofVals int64) { + m.lock.Lock() + defer m.lock.Unlock() + m.ProofValsReturned += numProofVals +} + +func (m *TestHandlerStats) IncMissingRoot() { + m.lock.Lock() + defer m.lock.Unlock() + m.MissingRootCount++ +} + +func (m *TestHandlerStats) IncTrieError() { + m.lock.Lock() + defer m.lock.Unlock() + m.TrieErrorCount++ +} + +func (m *TestHandlerStats) IncProofError() { + m.lock.Lock() + defer m.lock.Unlock() + m.ProofErrorCount++ +} + +func (m *TestHandlerStats) IncSnapshotReadError() { + m.lock.Lock() + defer m.lock.Unlock() + m.SnapshotReadErrorCount++ +} + +func (m *TestHandlerStats) IncSnapshotReadAttempt() { + m.lock.Lock() + defer m.lock.Unlock() + m.SnapshotReadAttemptCount++ +} + +func (m *TestHandlerStats) IncSnapshotReadSuccess() { + m.lock.Lock() + defer m.lock.Unlock() + m.SnapshotReadSuccessCount++ +} + +func (m *TestHandlerStats) IncSnapshotSegmentValid() { + m.lock.Lock() + defer m.lock.Unlock() + m.SnapshotSegmentValidCount++ +} + +func (m *TestHandlerStats) IncSnapshotSegmentInvalid() { + m.lock.Lock() + defer m.lock.Unlock() + m.SnapshotSegmentInvalidCount++ +} diff --git a/vms/evm/sync/statesynctest/test_trie.go b/vms/evm/sync/statesynctest/test_trie.go new file mode 100644 index 000000000000..8d9ae1aae81a --- /dev/null +++ b/vms/evm/sync/statesynctest/test_trie.go @@ -0,0 +1,136 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package statesynctest + +import ( + "crypto/rand" + "encoding/binary" + "io" + "math/big" + "testing" + + "github.com/ava-labs/coreth/utils/utilstest" + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/types" + "github.com/ava-labs/libevm/ethdb" + "github.com/ava-labs/libevm/rlp" + "github.com/ava-labs/libevm/trie" + "github.com/ava-labs/libevm/trie/trienode" + "github.com/ava-labs/libevm/triedb" + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils/wrappers" +) + +// GenerateTrie creates a trie with [numKeys] random key-value pairs inside of [trieDB]. +// Returns the root of the generated trie, the slice of keys inserted into the trie in lexicographical +// order, and the slice of corresponding values. +// GenerateTrie reads from [r] +func GenerateTrie(t *testing.T, r io.Reader, trieDB *triedb.Database, numKeys int, keySize int) (common.Hash, [][]byte, [][]byte) { + require.GreaterOrEqual(t, keySize, wrappers.LongLen+1, "key size must be at least 9 bytes (8 bytes for uint64 and 1 random byte)") + return FillTrie(t, r, 0, numKeys, keySize, trieDB, types.EmptyRootHash) +} + +// FillTrie fills a given trie with [numKeys] random keys, each of size [keySize] +// returns inserted keys and values +func FillTrie(t *testing.T, r io.Reader, start, numKeys int, keySize int, trieDB *triedb.Database, root common.Hash) (common.Hash, [][]byte, [][]byte) { + testTrie, err := trie.New(trie.TrieID(root), trieDB) + require.NoError(t, err) + + keys := make([][]byte, 0, numKeys) + values := make([][]byte, 0, numKeys) + + // Generate key-value pairs + for i := start; i < numKeys; i++ { + key := make([]byte, keySize) + binary.BigEndian.PutUint64(key[:wrappers.LongLen], uint64(i+1)) + _, err := io.ReadFull(r, key[wrappers.LongLen:]) + require.NoError(t, err) + + nBig, err := rand.Int(rand.Reader, big.NewInt(128)) + require.NoError(t, err) + value := make([]byte, int(nBig.Int64())+128) // min 128 bytes, max 256 bytes + _, err = io.ReadFull(r, value) + require.NoError(t, err) + + testTrie.MustUpdate(key, value) + + keys = append(keys, key) + values = append(values, value) + } + + // Commit the root to [trieDB] + nextRoot, nodes, err := testTrie.Commit(false) + require.NoError(t, err) + require.NoError(t, trieDB.Update(nextRoot, root, 0, trienode.NewWithNodeSet(nodes), nil)) + require.NoError(t, trieDB.Commit(nextRoot, false)) + + return nextRoot, keys, values +} + +// CorruptTrie deletes every [n]th trie node from the trie given by [tr] from the underlying [db]. +// Assumes [tr] can be iterated without issue. +func CorruptTrie(t *testing.T, diskdb ethdb.Batcher, tr *trie.Trie, n int) { + // Delete some trie nodes + batch := diskdb.NewBatch() + nodeIt, err := tr.NodeIterator(nil) + require.NoError(t, err) + count := 0 + for nodeIt.Next(true) { + count++ + if count%n == 0 && nodeIt.Hash() != (common.Hash{}) { + require.NoError(t, batch.Delete(nodeIt.Hash().Bytes())) + } + } + require.NoError(t, nodeIt.Error()) + require.NoError(t, batch.Write()) +} + +// FillAccounts adds [numAccounts] randomly generated accounts to the secure trie at [root] and commits it to [trieDB]. +// [onAccount] is called if non-nil (so the caller can modify the account before it is stored in the secure trie). +// returns the new trie root and a map of funded keys to StateAccount structs. +func FillAccounts( + t *testing.T, trieDB *triedb.Database, root common.Hash, numAccounts int, + onAccount func(*testing.T, int, types.StateAccount) types.StateAccount, +) (common.Hash, map[*utilstest.Key]*types.StateAccount) { + var ( + minBalance = uint256.NewInt(3000000000000000000) + randBalance = uint256.NewInt(1000000000000000000) + maxNonce = 10 + accounts = make(map[*utilstest.Key]*types.StateAccount, numAccounts) + ) + + tr, err := trie.NewStateTrie(trie.TrieID(root), trieDB) + require.NoError(t, err) + + for i := range numAccounts { + // random nonce in [0, maxNonce) + nBig, err := rand.Int(rand.Reader, big.NewInt(int64(maxNonce))) + require.NoError(t, err) + acc := types.StateAccount{ + Nonce: uint64(nBig.Int64()), + Balance: new(uint256.Int).Add(minBalance, randBalance), + CodeHash: types.EmptyCodeHash[:], + Root: types.EmptyRootHash, + } + if onAccount != nil { + acc = onAccount(t, i, acc) + } + + accBytes, err := rlp.EncodeToBytes(&acc) + require.NoError(t, err) + + key := utilstest.NewKey(t) + tr.MustUpdate(key.Address[:], accBytes) + accounts[key] = &acc + } + + newRoot, nodes, err := tr.Commit(false) + require.NoError(t, err) + require.NoError(t, trieDB.Update(newRoot, root, 0, trienode.NewWithNodeSet(nodes), nil)) + require.NoError(t, trieDB.Commit(newRoot, false)) + + return newRoot, accounts +} diff --git a/vms/evm/sync/stats/handler_stats.go b/vms/evm/sync/stats/handler_stats.go new file mode 100644 index 000000000000..2a03ca9d766c --- /dev/null +++ b/vms/evm/sync/stats/handler_stats.go @@ -0,0 +1,244 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package stats + +import ( + "time" + + "github.com/ava-labs/libevm/metrics" +) + +// HandlerStats reports prometheus metrics for the state sync handlers +type HandlerStats interface { + BlockRequestHandlerStats + CodeRequestHandlerStats + LeafsRequestHandlerStats +} + +type BlockRequestHandlerStats interface { + IncBlockRequest() + IncMissingBlockHash() + UpdateBlocksReturned(num uint16) + UpdateBlockRequestProcessingTime(duration time.Duration) +} + +type CodeRequestHandlerStats interface { + IncCodeRequest() + IncMissingCodeHash() + IncTooManyHashesRequested() + IncDuplicateHashesRequested() + UpdateCodeReadTime(duration time.Duration) + UpdateCodeBytesReturned(bytes uint32) +} + +type LeafsRequestHandlerStats interface { + IncLeafsRequest() + IncInvalidLeafsRequest() + UpdateLeafsReturned(numLeafs uint16) + UpdateLeafsRequestProcessingTime(duration time.Duration) + UpdateReadLeafsTime(duration time.Duration) + UpdateSnapshotReadTime(duration time.Duration) + UpdateGenerateRangeProofTime(duration time.Duration) + UpdateRangeProofValsReturned(numProofVals int64) + IncMissingRoot() + IncTrieError() + IncProofError() + IncSnapshotReadError() + IncSnapshotReadAttempt() + IncSnapshotReadSuccess() + IncSnapshotSegmentValid() + IncSnapshotSegmentInvalid() +} + +type handlerStats struct { + // BlockRequestHandler metrics + blockRequest metrics.Counter + missingBlockHash metrics.Counter + blocksReturned metrics.Histogram + blockRequestProcessingTime metrics.Timer + + // CodeRequestHandler stats + codeRequest metrics.Counter + missingCodeHash metrics.Counter + tooManyHashesRequested metrics.Counter + duplicateHashesRequested metrics.Counter + codeBytesReturned metrics.Histogram + codeReadDuration metrics.Timer + + // LeafsRequestHandler stats + leafsRequest metrics.Counter + invalidLeafsRequest metrics.Counter + leafsReturned metrics.Histogram + leafsRequestProcessingTime metrics.Timer + leafsReadTime metrics.Timer + snapshotReadTime metrics.Timer + generateRangeProofTime metrics.Timer + proofValsReturned metrics.Histogram + missingRoot metrics.Counter + trieError metrics.Counter + proofError metrics.Counter + snapshotReadError metrics.Counter + snapshotReadAttempt metrics.Counter + snapshotReadSuccess metrics.Counter + snapshotSegmentValid metrics.Counter + snapshotSegmentInvalid metrics.Counter +} + +func (h *handlerStats) IncBlockRequest() { + h.blockRequest.Inc(1) +} + +func (h *handlerStats) IncMissingBlockHash() { + h.missingBlockHash.Inc(1) +} + +func (h *handlerStats) UpdateBlocksReturned(num uint16) { + h.blocksReturned.Update(int64(num)) +} + +func (h *handlerStats) UpdateBlockRequestProcessingTime(duration time.Duration) { + h.blockRequestProcessingTime.Update(duration) +} + +func (h *handlerStats) IncCodeRequest() { + h.codeRequest.Inc(1) +} + +func (h *handlerStats) IncMissingCodeHash() { + h.missingCodeHash.Inc(1) +} + +func (h *handlerStats) IncTooManyHashesRequested() { + h.tooManyHashesRequested.Inc(1) +} + +func (h *handlerStats) IncDuplicateHashesRequested() { + h.duplicateHashesRequested.Inc(1) +} + +func (h *handlerStats) UpdateCodeReadTime(duration time.Duration) { + h.codeReadDuration.Update(duration) +} + +func (h *handlerStats) UpdateCodeBytesReturned(bytesLen uint32) { + h.codeBytesReturned.Update(int64(bytesLen)) +} + +func (h *handlerStats) IncLeafsRequest() { + h.leafsRequest.Inc(1) +} + +func (h *handlerStats) IncInvalidLeafsRequest() { + h.invalidLeafsRequest.Inc(1) +} + +func (h *handlerStats) UpdateLeafsRequestProcessingTime(duration time.Duration) { + h.leafsRequestProcessingTime.Update(duration) +} + +func (h *handlerStats) UpdateLeafsReturned(numLeafs uint16) { + h.leafsReturned.Update(int64(numLeafs)) +} + +func (h *handlerStats) UpdateReadLeafsTime(duration time.Duration) { + h.leafsReadTime.Update(duration) +} + +func (h *handlerStats) UpdateSnapshotReadTime(duration time.Duration) { + h.snapshotReadTime.Update(duration) +} + +func (h *handlerStats) UpdateGenerateRangeProofTime(duration time.Duration) { + h.generateRangeProofTime.Update(duration) +} + +func (h *handlerStats) UpdateRangeProofValsReturned(numProofVals int64) { + h.proofValsReturned.Update(numProofVals) +} + +func (h *handlerStats) IncMissingRoot() { h.missingRoot.Inc(1) } +func (h *handlerStats) IncTrieError() { h.trieError.Inc(1) } +func (h *handlerStats) IncProofError() { h.proofError.Inc(1) } +func (h *handlerStats) IncSnapshotReadError() { h.snapshotReadError.Inc(1) } +func (h *handlerStats) IncSnapshotReadAttempt() { h.snapshotReadAttempt.Inc(1) } +func (h *handlerStats) IncSnapshotReadSuccess() { h.snapshotReadSuccess.Inc(1) } +func (h *handlerStats) IncSnapshotSegmentValid() { h.snapshotSegmentValid.Inc(1) } +func (h *handlerStats) IncSnapshotSegmentInvalid() { h.snapshotSegmentInvalid.Inc(1) } + +// GetOrRegisterHandlerStats returns a [HandlerStats] to track state sync handler metrics. +// If `enabled` is false, a no-op implementation is returned. +// if `enabled` is true, calling this multiple times will return the same registered metrics. +func GetOrRegisterHandlerStats(enabled bool) HandlerStats { + if !enabled { + return NewNoopHandlerStats() + } + return &handlerStats{ + // initialize block request stats + blockRequest: metrics.GetOrRegisterCounter("block_request_count", nil), + missingBlockHash: metrics.GetOrRegisterCounter("block_request_missing_block_hash", nil), + blocksReturned: metrics.GetOrRegisterHistogram("block_request_total_blocks", nil, metrics.NewExpDecaySample(1028, 0.015)), + blockRequestProcessingTime: metrics.GetOrRegisterTimer("block_request_processing_time", nil), + + // initialize code request stats + codeRequest: metrics.GetOrRegisterCounter("code_request_count", nil), + missingCodeHash: metrics.GetOrRegisterCounter("code_request_missing_code_hash", nil), + tooManyHashesRequested: metrics.GetOrRegisterCounter("code_request_too_many_hashes", nil), + duplicateHashesRequested: metrics.GetOrRegisterCounter("code_request_duplicate_hashes", nil), + codeReadDuration: metrics.GetOrRegisterTimer("code_request_read_time", nil), + codeBytesReturned: metrics.GetOrRegisterHistogram("code_request_bytes_returned", nil, metrics.NewExpDecaySample(1028, 0.015)), + + // initialize leafs request stats + leafsRequest: metrics.GetOrRegisterCounter("leafs_request_count", nil), + invalidLeafsRequest: metrics.GetOrRegisterCounter("leafs_request_invalid", nil), + leafsRequestProcessingTime: metrics.GetOrRegisterTimer("leafs_request_processing_time", nil), + leafsReturned: metrics.GetOrRegisterHistogram("leafs_request_total_leafs", nil, metrics.NewExpDecaySample(1028, 0.015)), + leafsReadTime: metrics.GetOrRegisterTimer("leafs_request_read_time", nil), + snapshotReadTime: metrics.GetOrRegisterTimer("leafs_request_snapshot_read_time", nil), + generateRangeProofTime: metrics.GetOrRegisterTimer("leafs_request_generate_range_proof_time", nil), + proofValsReturned: metrics.GetOrRegisterHistogram("leafs_request_proof_vals_returned", nil, metrics.NewExpDecaySample(1028, 0.015)), + missingRoot: metrics.GetOrRegisterCounter("leafs_request_missing_root", nil), + trieError: metrics.GetOrRegisterCounter("leafs_request_trie_error", nil), + proofError: metrics.GetOrRegisterCounter("leafs_request_proof_error", nil), + snapshotReadError: metrics.GetOrRegisterCounter("leafs_request_snapshot_read_error", nil), + snapshotReadAttempt: metrics.GetOrRegisterCounter("leafs_request_snapshot_read_attempt", nil), + snapshotReadSuccess: metrics.GetOrRegisterCounter("leafs_request_snapshot_read_success", nil), + snapshotSegmentValid: metrics.GetOrRegisterCounter("leafs_request_snapshot_segment_valid", nil), + snapshotSegmentInvalid: metrics.GetOrRegisterCounter("leafs_request_snapshot_segment_invalid", nil), + } +} + +// no op implementation +type noopHandlerStats struct{} + +func NewNoopHandlerStats() HandlerStats { + return &noopHandlerStats{} +} + +// all operations are no-ops +func (*noopHandlerStats) IncBlockRequest() {} +func (*noopHandlerStats) IncMissingBlockHash() {} +func (*noopHandlerStats) UpdateBlocksReturned(uint16) {} +func (*noopHandlerStats) UpdateBlockRequestProcessingTime(time.Duration) {} +func (*noopHandlerStats) IncCodeRequest() {} +func (*noopHandlerStats) IncMissingCodeHash() {} +func (*noopHandlerStats) IncTooManyHashesRequested() {} +func (*noopHandlerStats) IncDuplicateHashesRequested() {} +func (*noopHandlerStats) UpdateCodeReadTime(time.Duration) {} +func (*noopHandlerStats) UpdateCodeBytesReturned(uint32) {} +func (*noopHandlerStats) IncLeafsRequest() {} +func (*noopHandlerStats) IncInvalidLeafsRequest() {} +func (*noopHandlerStats) UpdateLeafsRequestProcessingTime(time.Duration) {} +func (*noopHandlerStats) UpdateLeafsReturned(uint16) {} +func (*noopHandlerStats) UpdateReadLeafsTime(_ time.Duration) {} +func (*noopHandlerStats) UpdateSnapshotReadTime(_ time.Duration) {} +func (*noopHandlerStats) UpdateGenerateRangeProofTime(_ time.Duration) {} +func (*noopHandlerStats) UpdateRangeProofValsReturned(_ int64) {} +func (*noopHandlerStats) IncMissingRoot() {} +func (*noopHandlerStats) IncTrieError() {} +func (*noopHandlerStats) IncProofError() {} +func (*noopHandlerStats) IncSnapshotReadError() {} +func (*noopHandlerStats) IncSnapshotReadAttempt() {} +func (*noopHandlerStats) IncSnapshotReadSuccess() {} +func (*noopHandlerStats) IncSnapshotSegmentValid() {} +func (*noopHandlerStats) IncSnapshotSegmentInvalid() {} diff --git a/vms/evm/sync/stats/syncer_stats.go b/vms/evm/sync/stats/syncer_stats.go new file mode 100644 index 000000000000..6bba3344e755 --- /dev/null +++ b/vms/evm/sync/stats/syncer_stats.go @@ -0,0 +1,135 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package stats + +import ( + "fmt" + "time" + + "github.com/ava-labs/libevm/metrics" + + "github.com/ava-labs/avalanchego/vms/evm/sync/message" +) + +var ( + _ ClientSyncerStats = (*clientSyncerStats)(nil) + _ ClientSyncerStats = (*noopStats)(nil) +) + +type ClientSyncerStats interface { + GetMetric(message.Request) (MessageMetric, error) +} + +type MessageMetric interface { + IncRequested() + IncSucceeded() + IncFailed() + IncInvalidResponse() + IncReceived(int64) + UpdateRequestLatency(time.Duration) +} + +type messageMetric struct { + requested metrics.Counter // Number of times a request has been sent + succeeded metrics.Counter // Number of times a request has succeeded + failed metrics.Counter // Number of times a request failed (does not include invalid responses) + invalidResponse metrics.Counter // Number of times a request failed due to an invalid response + received metrics.Counter // Number of items that have been received + + requestLatency metrics.Timer // Latency for this request +} + +func NewMessageMetric(name string) MessageMetric { + return &messageMetric{ + requested: metrics.GetOrRegisterCounter(name+"_requested", nil), + succeeded: metrics.GetOrRegisterCounter(name+"_succeeded", nil), + failed: metrics.GetOrRegisterCounter(name+"_failed", nil), + invalidResponse: metrics.GetOrRegisterCounter(name+"_invalid_response", nil), + received: metrics.GetOrRegisterCounter(name+"_received", nil), + requestLatency: metrics.GetOrRegisterTimer(name+"_request_latency", nil), + } +} + +func (m *messageMetric) IncRequested() { + m.requested.Inc(1) +} + +func (m *messageMetric) IncSucceeded() { + m.succeeded.Inc(1) +} + +func (m *messageMetric) IncFailed() { + m.failed.Inc(1) +} + +func (m *messageMetric) IncInvalidResponse() { + m.invalidResponse.Inc(1) +} + +func (m *messageMetric) IncReceived(size int64) { + m.received.Inc(size) +} + +func (m *messageMetric) UpdateRequestLatency(duration time.Duration) { + m.requestLatency.Update(duration) +} + +type clientSyncerStats struct { + leafMetrics map[message.NodeType]MessageMetric + codeRequestMetric, + blockRequestMetric MessageMetric +} + +// NewClientSyncerStats returns stats for the client syncer +func NewClientSyncerStats(leafMetricNames map[message.NodeType]string) *clientSyncerStats { + leafMetrics := make(map[message.NodeType]MessageMetric, len(leafMetricNames)) + for nodeType, name := range leafMetricNames { + leafMetrics[nodeType] = NewMessageMetric(name) + } + return &clientSyncerStats{ + leafMetrics: leafMetrics, + codeRequestMetric: NewMessageMetric("sync_code"), + blockRequestMetric: NewMessageMetric("sync_blocks"), + } +} + +// GetMetric returns the appropriate messaage metric for the given request +func (c *clientSyncerStats) GetMetric(msgIntf message.Request) (MessageMetric, error) { + switch msg := msgIntf.(type) { + case message.BlockRequest: + return c.blockRequestMetric, nil + case message.CodeRequest: + return c.codeRequestMetric, nil + case message.LeafsRequest: + metric, ok := c.leafMetrics[msg.NodeType] + if !ok { + return nil, fmt.Errorf("invalid leafs request for node type: %T", msg.NodeType) + } + return metric, nil + default: + return nil, fmt.Errorf("attempted to get metric for invalid request with type %T", msg) + } +} + +// no-op implementation of ClientSyncerStats +type noopStats struct { + noop noopMsgMetric +} + +type noopMsgMetric struct{} + +func (noopMsgMetric) IncRequested() {} +func (noopMsgMetric) IncSucceeded() {} +func (noopMsgMetric) IncFailed() {} +func (noopMsgMetric) IncInvalidResponse() {} +func (noopMsgMetric) IncReceived(int64) {} +func (noopMsgMetric) UpdateRequestLatency(time.Duration) {} + +func NewNoOpStats() ClientSyncerStats { + return &noopStats{} +} + +func (n noopStats) GetMetric(_ message.Request) (MessageMetric, error) { + return n.noop, nil +} diff --git a/vms/evm/sync/types.go b/vms/evm/sync/types.go new file mode 100644 index 000000000000..f04658622f0e --- /dev/null +++ b/vms/evm/sync/types.go @@ -0,0 +1,24 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sync + +import ( + "context" +) + +// StateSyncer is the common interface for all sync operations. +// This provides a unified interface for atomic state sync and state trie sync. +type StateSyncer interface { + // Sync completes the full sync operation, returning any errors encountered. + // The sync will respect context cancellation. + Sync(ctx context.Context) error + + // Name returns a human-readable name for this syncer implementation. + Name() string + + // ID returns a stable, machine-oriented identifier (e.g., "state_block_sync", "state_code_sync", + // "state_evm_state_sync", "state_atomic_sync"). Implementations should ensure this is unique and + // stable across renames for logging/metrics/deduplication. + ID() string +}