diff --git a/AllTests-mainnet.md b/AllTests-mainnet.md index 47f95461fb..80af79528d 100644 --- a/AllTests-mainnet.md +++ b/AllTests-mainnet.md @@ -159,6 +159,20 @@ AllTests-mainnet + atSlot sanity OK + parent sanity OK ``` +## BlocksRangeBuffer test suite +```diff ++ Add and query blocks test [backward] OK ++ Add and query blocks test [forward] OK ++ Block insertion test [backward] OK ++ Block insertion test [forward] OK ++ Buffer advance test [backward] OK ++ Buffer advance test [forward] OK ++ Buffer invalidate test [backward] OK ++ Buffer invalidate test [forward] OK ++ Range peek real test cases [forward] OK ++ Range peek test [backward] OK ++ Range peek test [forward] OK +``` ## ColumnMap test suite ```diff + and() operation test OK @@ -990,29 +1004,19 @@ AllTests-mainnet ## SyncManager test suite ```diff + [SyncManager] groupBlobs() test OK -+ [SyncQueue# & Backward] Combination of missing parent and good blocks [3 peers] test OK -+ [SyncQueue# & Backward] Empty responses should not advance queue until other peers will no OK -+ [SyncQueue# & Backward] Empty responses should not be accounted [3 peers] test OK -+ [SyncQueue# & Backward] Failure request push test OK -+ [SyncQueue# & Backward] Invalid block [3 peers] test OK -+ [SyncQueue# & Backward] Smoke [3 peers] test OK -+ [SyncQueue# & Backward] Smoke [single peer] test OK -+ [SyncQueue# & Backward] Unviable block [3 peers] test OK -+ [SyncQueue# & Backward] epochFilter() test OK -+ [SyncQueue# & Forward] Combination of missing parent and good blocks [3 peers] test OK -+ [SyncQueue# & Forward] Empty responses should not advance queue until other peers will not OK -+ [SyncQueue# & Forward] Empty responses should not be accounted [3 peers] test OK -+ [SyncQueue# & Forward] Failure request push test OK -+ [SyncQueue# & Forward] Invalid block [3 peers] test OK -+ [SyncQueue# & Forward] Smoke [3 peers] test OK -+ [SyncQueue# & Forward] Smoke [single peer] test OK -+ [SyncQueue# & Forward] Unviable block [3 peers] test OK -+ [SyncQueue# & Forward] epochFilter() test OK ++ [SyncQueue#Backward] Combination of missing parent and good blocks [3 peers] test OK ++ [SyncQueue#Backward] Empty responses should not advance queue until other peers will not c OK ++ [SyncQueue#Backward] Empty responses should not be accounted [3 peers] test OK ++ [SyncQueue#Backward] Failure request push test OK ++ [SyncQueue#Backward] Invalid block [3 peers] test OK + [SyncQueue#Backward] Missing parent and exponential rewind [3 peers] test OK ++ [SyncQueue#Backward] Smoke [3 peers] test OK ++ [SyncQueue#Backward] Smoke [single peer] test OK ++ [SyncQueue#Backward] Unviable block [3 peers] test OK ++ [SyncQueue#Backward] epochFilter() test OK + [SyncQueue#Backward] getRewindPoint() test OK + [SyncQueue#Forward] Missing parent and exponential rewind [3 peers] test OK + [SyncQueue#Forward] getRewindPoint() test OK -+ [SyncQueue] checkBlobsResponse() test OK + [SyncQueue] checkResponse() test OK + [SyncQueue] hasEndGap() test OK ``` diff --git a/beacon_chain/beacon_node.nim b/beacon_chain/beacon_node.nim index 185781793c..7f5ee238ff 100644 --- a/beacon_chain/beacon_node.nim +++ b/beacon_chain/beacon_node.nim @@ -28,7 +28,7 @@ import ./spec/datatypes/[base, altair], ./spec/eth2_apis/dynamic_fee_recipients, ./spec/signatures_batch, - ./sync/[sync_manager, request_manager, sync_types, validator_custody], + ./sync/[sync_overseer2, sync_manager, request_manager, sync_types, validator_custody], ./validators/[ action_tracker, message_router, validator_monitor, validator_pool, keystore_management], @@ -41,13 +41,14 @@ export eth2_network, el_manager, request_manager, sync_manager, eth2_processor, optimistic_processor, blockchain_dag, block_quarantine, base, message_router, validator_monitor, validator_pool, - consensus_manager, dynamic_fee_recipients, sync_types + consensus_manager, dynamic_fee_recipients, sync_types, sync_overseer2 type EventBus* = object headQueue*: AsyncEventQueue[HeadChangeInfoObject] blocksQueue*: AsyncEventQueue[EventBeaconBlockObject] blockGossipQueue*: AsyncEventQueue[EventBeaconBlockGossipObject] + blockGossipPeerQueue*: AsyncEventQueue[EventBeaconBlockGossipPeerObject] phase0AttestQueue*: AsyncEventQueue[phase0.Attestation] singleAttestQueue*: AsyncEventQueue[SingleAttestation] exitQueue*: AsyncEventQueue[SignedVoluntaryExit] @@ -100,7 +101,7 @@ type syncManager*: SyncManager[Peer, PeerId] backfiller*: SyncManager[Peer, PeerId] untrustedManager*: SyncManager[Peer, PeerId] - syncOverseer*: SyncOverseerRef + syncOverseer*: SyncOverseerRef2 processor*: ref Eth2Processor batchVerifier*: ref BatchVerifier blockProcessor*: ref BlockProcessor diff --git a/beacon_chain/beacon_node_light_client.nim b/beacon_chain/beacon_node_light_client.nim index 58ab9cb9b2..edd223182a 100644 --- a/beacon_chain/beacon_node_light_client.nim +++ b/beacon_chain/beacon_node_light_client.nim @@ -54,7 +54,7 @@ proc initLightClient*( optimisticProcessor = initOptimisticProcessor( cfg.timeParams, getBeaconTime, optimisticHandler) - shouldInhibitSync = func(): bool = + shouldInhibitSync = proc(): bool = if isNil(node.syncOverseer): false else: diff --git a/beacon_chain/consensus_object_pools/block_pools_types.nim b/beacon_chain/consensus_object_pools/block_pools_types.nim index 9e66ffc781..25611f4deb 100644 --- a/beacon_chain/consensus_object_pools/block_pools_types.nim +++ b/beacon_chain/consensus_object_pools/block_pools_types.nim @@ -11,8 +11,7 @@ import # Standard library std/[tables, hashes], # Status libraries - chronicles, - results, + chronicles, libp2p/peerid, results, # Internals ../spec/[signatures_batch, forks, helpers], ".."/[beacon_chain_db, era_db], @@ -46,6 +45,9 @@ type Duplicate ## We've seen this value already, can't add again + MissingSidecars + ## We do not have sidecars at the moment + OnBlockCallback* = proc(data: ForkedTrustedSignedBeaconBlock) {.gcsafe, raises: [].} OnBlockGossipCallback* = @@ -336,6 +338,30 @@ type slot*: Slot block_root* {.serializedFieldName: "block".}: Eth2Digest + EventBeaconBlockGossipPeerObject* = object + blck*: ForkedSignedBeaconBlock + src*: PeerId + +template OnBlockAddedCallback*(kind: static ConsensusFork): auto = + when kind == ConsensusFork.Gloas: + typedesc[OnGloasBlockAdded] + elif kind == ConsensusFork.Fulu: + typedesc[OnFuluBlockAdded] + elif kind == ConsensusFork.Electra: + typedesc[OnElectraBlockAdded] + elif kind == ConsensusFork.Deneb: + typedesc[OnDenebBlockAdded] + elif kind == ConsensusFork.Capella: + typedesc[OnCapellaBlockAdded] + elif kind == ConsensusFork.Bellatrix: + typedesc[OnBellatrixBlockAdded] + elif kind == ConsensusFork.Altair: + typedesc[OnAltairBlockAdded] + elif kind == ConsensusFork.Phase0: + typedesc[OnPhase0BlockAdded] + else: + static: raiseAssert "Unreachable" + template timeParams*(dag: ChainDAGRef): TimeParams = dag.cfg.timeParams @@ -470,3 +496,13 @@ func init*(t: typedesc[EventBeaconBlockGossipObject], slot: forkyBlck.message.slot, block_root: forkyBlck.root ) + +func init*( + t: typedesc[EventBeaconBlockGossipPeerObject], + v: ForkySignedBeaconBlock, + s: PeerId +): EventBeaconBlockGossipPeerObject = + EventBeaconBlockGossipPeerObject( + blck: ForkedSignedBeaconBlock.init(v), + src: s + ) diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index c3a5bd11e1..eb0f24ab87 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -24,7 +24,7 @@ import ./spec/datatypes/[altair, bellatrix, phase0], ./spec/[ engine_authentication, weak_subjectivity, peerdas_helpers], - ./sync/[sync_protocol, light_client_protocol, sync_overseer, validator_custody], + ./sync/[sync_protocol, light_client_protocol, sync_overseer2, validator_custody], ./validators/[keystore_management, beacon_validators], ./[ beacon_node, beacon_node_light_client, buildinfo, deposits, @@ -535,56 +535,57 @@ proc initFullNode( {SyncManagerFlag.NoGenesisSync} else: {} - syncManager = newSyncManager[Peer, PeerId]( - node.network.peerPool, - dag.cfg.DENEB_FORK_EPOCH, - dag.cfg.FULU_FORK_EPOCH, - dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, - dag.cfg.MAX_BLOBS_PER_BLOCK_ELECTRA, - SyncQueueKind.Forward, getLocalHeadSlot, - getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot, - getFrontfillSlot, isWithinWeakSubjectivityPeriod, - dag.tail.slot, blockVerifier, forkAtEpoch, - shutdownEvent = node.shutdownEvent, - flags = syncManagerFlags) - backfiller = newSyncManager[Peer, PeerId]( - node.network.peerPool, - dag.cfg.DENEB_FORK_EPOCH, - dag.cfg.FULU_FORK_EPOCH, - dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, - dag.cfg.MAX_BLOBS_PER_BLOCK_ELECTRA, - SyncQueueKind.Backward, getLocalHeadSlot, - getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot, - getFrontfillSlot, isWithinWeakSubjectivityPeriod, - dag.backfill.slot, blockVerifier, forkAtEpoch, maxHeadAge = 0, - shutdownEvent = node.shutdownEvent, - flags = syncManagerFlags) - clistPivotSlot = - if clist.tail.isSome(): - clist.tail.get().blck.slot() - else: - getLocalWallSlot() +# syncManager = newSyncManager[Peer, PeerId]( +# node.network.peerPool, +# dag.cfg.DENEB_FORK_EPOCH, +# dag.cfg.FULU_FORK_EPOCH, +# dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, +# dag.cfg.MAX_BLOBS_PER_BLOCK_ELECTRA, +# SyncQueueKind.Forward, getLocalHeadSlot, +# getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot, +# getFrontfillSlot, isWithinWeakSubjectivityPeriod, +# dag.tail.slot, blockVerifier, forkAtEpoch, +# shutdownEvent = node.shutdownEvent, +# flags = syncManagerFlags) +# backfiller = newSyncManager[Peer, PeerId]( +# node.network.peerPool, +# dag.cfg.DENEB_FORK_EPOCH, +# dag.cfg.FULU_FORK_EPOCH, +# dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, +# dag.cfg.MAX_BLOBS_PER_BLOCK_ELECTRA, +# SyncQueueKind.Backward, getLocalHeadSlot, +# getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot, +# getFrontfillSlot, isWithinWeakSubjectivityPeriod, +# dag.backfill.slot, blockVerifier, forkAtEpoch, maxHeadAge = 0, +# shutdownEvent = node.shutdownEvent, +# flags = syncManagerFlags) +# clistPivotSlot = +# if clist.tail.isSome(): +# clist.tail.get().blck.slot() +# else: +# getLocalWallSlot() eaSlot = dag.head.slot - untrustedManager = newSyncManager[Peer, PeerId]( - node.network.peerPool, - dag.cfg.DENEB_FORK_EPOCH, - dag.cfg.FULU_FORK_EPOCH, - dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, - dag.cfg.MAX_BLOBS_PER_BLOCK_ELECTRA, - SyncQueueKind.Backward, getLocalHeadSlot, - getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getUntrustedBackfillSlot, - getFrontfillSlot, isWithinWeakSubjectivityPeriod, - clistPivotSlot, untrustedBlockVerifier, forkAtEpoch, maxHeadAge = 0, - shutdownEvent = node.shutdownEvent, - flags = syncManagerFlags) + erSlot = dag.head.slot +# untrustedManager = newSyncManager[Peer, PeerId]( +# node.network.peerPool, +# dag.cfg.DENEB_FORK_EPOCH, +# dag.cfg.FULU_FORK_EPOCH, +# dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, +# dag.cfg.MAX_BLOBS_PER_BLOCK_ELECTRA, +# SyncQueueKind.Backward, getLocalHeadSlot, +# getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getUntrustedBackfillSlot, +# getFrontfillSlot, isWithinWeakSubjectivityPeriod, +# clistPivotSlot, untrustedBlockVerifier, forkAtEpoch, maxHeadAge = 0, +# shutdownEvent = node.shutdownEvent, +# flags = syncManagerFlags) router = (ref MessageRouter)( processor: processor, network: node.network) - requestManager = RequestManager.init( - node.network, supernode, custodyColumns, - dag.cfg.DENEB_FORK_EPOCH, getBeaconTime, (proc(): bool = syncManager.inProgress), - quarantine, blobQuarantine, dataColumnQuarantine, rmanBlockVerifier, - rmanBlockLoader, rmanBlobLoader, rmanDataColumnLoader) +# requestManager = RequestManager.init( +# node.network, supernode, custodyColumns, +# dag.cfg.DENEB_FORK_EPOCH, getBeaconTime, (proc(): bool = syncManager.inProgress), +# quarantine, blobQuarantine, dataColumnQuarantine, rmanBlockVerifier, +# rmanBlockLoader, rmanBlobLoader, rmanDataColumnLoader) validatorCustody = ValidatorCustodyRef.init(node.network, dag, custodyColumns, dataColumnQuarantine) @@ -647,22 +648,18 @@ proc initFullNode( node.batchVerifier = batchVerifier node.blockProcessor = blockProcessor node.consensusManager = consensusManager - node.requestManager = requestManager node.validatorCustody = validatorCustody - node.syncManager = syncManager - node.backfiller = backfiller - node.untrustedManager = untrustedManager - node.syncOverseer = SyncOverseerRef.new(node.consensusManager, - node.validatorMonitor, - config, - getBeaconTime, - node.list, - node.beaconClock, - node.eventBus.optFinHeaderUpdateQueue, - node.network.peerPool, - node.batchVerifier, - syncManager, backfiller, - untrustedManager) + # node.requestManager = requestManager + # node.syncManager = syncManager + # node.backfiller = backfiller + # node.untrustedManager = untrustedManager + node.syncOverseer = + SyncOverseerRef2.new(node.network, node.consensusManager, config, + getBeaconTime, node.beaconClock, blockProcessor, + quarantine, blobQuarantine, dataColumnQuarantine, + node.eventBus.blockGossipPeerQueue, + node.eventBus.blocksQueue, + node.eventBus.finalQueue) node.router = router await node.addValidators() @@ -766,6 +763,7 @@ proc init*( headQueue: newAsyncEventQueue[HeadChangeInfoObject](), blocksQueue: newAsyncEventQueue[EventBeaconBlockObject](), blockGossipQueue: newAsyncEventQueue[EventBeaconBlockGossipObject](), + blockGossipPeerQueue: newAsyncEventQueue[EventBeaconBlockGossipPeerObject](), phase0AttestQueue: newAsyncEventQueue[phase0.Attestation](), singleAttestQueue: newAsyncEventQueue[SingleAttestation](), exitQueue: newAsyncEventQueue[SignedVoluntaryExit](), @@ -1813,8 +1811,6 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} = .pruneAfterFinalization( node.dag.finalizedHead.slot.epoch() ) - node.processor.blobQuarantine[].pruneAfterFinalization( - node.dag.finalizedHead.slot.epoch(), node.dag.needsBackfill()) node.processor.quarantine[].pruneAfterFinalization( node.dag.finalizedHead.slot.epoch(), node.dag.needsBackfill()) @@ -2149,7 +2145,7 @@ proc onSlotStart(node: BeaconNode, wallTime: BeaconTime, node.consensusManager[].updateHead(wallSlot) await node.handleValidatorDuties(lastSlot, wallSlot) - node.requestManager.switchToColumnLoop() + # node.requestManager.switchToColumnLoop() await onSlotEnd(node, wallSlot) # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/validator.md#registration-dissemination @@ -2289,20 +2285,27 @@ proc installMessageValidators(node: BeaconNode) = # beacon_block # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#beacon_block # https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/p2p-interface.md#beacon_block - node.network.addValidator( - getBeaconBlocksTopic(digest), proc ( - signedBlock: consensusFork.SignedBeaconBlock, - src: PeerId, - ): ValidationResult = - if node.shouldSyncOptimistically(node.currentSlot): - toValidationResult( - node.optimisticProcessor.processSignedBeaconBlock( - signedBlock)) - else: - toValidationResult( - node.processor[].processSignedBeaconBlock( - MsgSource.gossip, signedBlock))) - + when consensusFork >= ConsensusFork.Gloas: + debugGloasComment " " + else: + node.network.addValidator( + getBeaconBlocksTopic(digest), proc ( + signedBlock: consensusFork.SignedBeaconBlock, + src: PeerId, + ): ValidationResult = + if node.shouldSyncOptimistically(node.currentSlot): + toValidationResult( + node.optimisticProcessor.processSignedBeaconBlock( + signedBlock)) + else: + let res = + toValidationResult( + node.processor[].processSignedBeaconBlock( + MsgSource.gossip, signedBlock)) + if res == ValidationResult.Accept: + node.eventBus.blockGossipPeerQueue.emit( + EventBeaconBlockGossipPeerObject.init(signedBlock, src)) + res) # beacon_attestation_{subnet_id} # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id # https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/p2p-interface.md#beacon_attestation_subnet_id @@ -2571,7 +2574,7 @@ proc run*(node: BeaconNode, stopper: StopFuture) {.raises: [CatchableError].} = wallSlot = wallTime.slotOrZero(node.dag.timeParams) node.startLightClient() - node.requestManager.start() + # node.requestManager.start() node.syncOverseer.start() waitFor node.updateGossipStatus(wallSlot) diff --git a/beacon_chain/rpc/rest_node_api.nim b/beacon_chain/rpc/rest_node_api.nim index 95e2438636..c8295ff175 100644 --- a/beacon_chain/rpc/rest_node_api.nim +++ b/beacon_chain/rpc/rest_node_api.nim @@ -260,14 +260,17 @@ proc installNodeApiHandlers*(router: var RestRouter, node: BeaconNode) = # https://ethereum.github.io/beacon-APIs/#/Node/getSyncingStatus router.api2(MethodGet, "/eth/v1/node/syncing") do () -> RestApiResponse: let - wallSlot = node.currentSlot headSlot = node.dag.head.slot - distance = wallSlot - headSlot + distance = + if isNil(node.syncOverseer): + 0'u64 + else: + node.syncOverseer.syncDistance() isSyncing = - if isNil(node.syncManager): + if isNil(node.syncOverseer): false else: - node.syncManager.inProgress + node.syncOverseer.syncInProgress() isOptimistic = if node.currentSlot().epoch() >= node.dag.cfg.BELLATRIX_FORK_EPOCH: Opt.some(not node.dag.head.executionValid) @@ -290,9 +293,13 @@ proc installNodeApiHandlers*(router: var RestRouter, node: BeaconNode) = router.api2(MethodGet, "/eth/v1/node/health") do () -> RestApiResponse: # TODO: Add ability to detect node's issues and return 503 error according # to specification. + if isNil(node.syncOverseer): + return RestApiResponse.response(Http200) + let status = - if node.syncManager.inProgress: + if node.syncOverseer.syncInProgress(): Http206 else: Http200 + RestApiResponse.response(status) diff --git a/beacon_chain/spec/forks.nim b/beacon_chain/spec/forks.nim index dd1b73e554..9751d3bd00 100644 --- a/beacon_chain/spec/forks.nim +++ b/beacon_chain/spec/forks.nim @@ -1313,6 +1313,10 @@ template slot*(x: ForkedSignedBeaconBlock | ForkedTrustedSignedBeaconBlock): Slot = withBlck(x): forkyBlck.message.slot +template parent_root*(x: ForkedSignedBeaconBlock | + ForkedTrustedSignedBeaconBlock): Eth2Digest = + withBlck(x): forkyBlck.message.parent_root + template shortLog*(x: ForkedBeaconBlock): auto = withBlck(x): shortLog(forkyBlck) diff --git a/beacon_chain/sync/block_buffer.nim b/beacon_chain/sync/block_buffer.nim new file mode 100644 index 0000000000..1e6b194b66 --- /dev/null +++ b/beacon_chain/sync/block_buffer.nim @@ -0,0 +1,429 @@ +# beacon_chain +# Copyright (c) 2018-2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} + +import + # Std lib + std/tables, + # Status libs + results, + ../consensus_object_pools/block_pools_types, + ../sync/sync_queue, + ../spec/forks + +type + BlocksRangeBuffer* = object + direction: SyncQueueKind + blocks: seq[ref ForkedSignedBeaconBlock] + roots: Table[Eth2Digest, ref ForkedSignedBeaconBlock] + maxBufferSize: int + + BlocksRootBuffer* = object + roots: Table[Eth2Digest, ref ForkedSignedBeaconBlock] + +func startSlot*(buffer: BlocksRangeBuffer): Slot = + buffer.blocks[0][].slot + +func lastSlot*(buffer: BlocksRangeBuffer): Slot = + buffer.blocks[^1][].slot + +func startBlock*(buffer: BlocksRangeBuffer): ref ForkedSignedBeaconBlock = + buffer.blocks[0] + +func lastBlock*(buffer: BlocksRangeBuffer): ref ForkedSignedBeaconBlock = + buffer.blocks[^1] + +func shortLog*(buffer: BlocksRangeBuffer): string = + if len(buffer.blocks) == 0: + return "[empty]" + "[" & $buffer.startSlot & ":" & $buffer.lastSlot & "]/" & $len(buffer.blocks) + +func getIndex(buffer: BlocksRangeBuffer, slot: Slot): Opt[int] = + case buffer.direction + of SyncQueueKind.Forward: + if (slot < buffer.startSlot): + return Opt.none(int) + let res = uint64(slot - buffer.startSlot) + if res >= lenu64(buffer.blocks): + return Opt.none(int) + # `int` conversion is safe here, because we compared `res` value with + # length of `blocks` sequence. + Opt.some(int(res)) + of SyncQueueKind.Backward: + if (slot > buffer.startSlot): + return Opt.none(int) + let res = uint64(buffer.startSlot - slot) + if res >= lenu64(buffer.blocks): + return Opt.none(int) + # `int` conversion is safe here, because we compared `res` value with + # length of `blocks` sequence. + Opt.some(int(res)) + +func toSlot(buffer: BlocksRangeBuffer, index: int): Opt[Slot] = + if (index < 0) or (index >= len(buffer.blocks)): + return Opt.none(Slot) + case buffer.direction + of SyncQueueKind.Forward: + Opt.some(buffer.startSlot + uint64(index)) + of SyncQueueKind.Backward: + Opt.some(buffer.startSlot - uint64(index)) + +func `[]`*( + buffer: BlocksRangeBuffer, + root: Eth2Digest +): ref ForkedSignedBeaconBlock = + buffer.roots.getOrDefault(root) + +func `[]`*( + buffer: BlocksRangeBuffer, + slot: Slot +): ref ForkedSignedBeaconBlock = + if len(buffer.blocks) == 0: + return nil + let index = buffer.getIndex(slot).valueOr: + return nil + let blck = buffer.blocks[index] + if blck[].slot != slot: + return nil + blck + +template isNew(buffer: BlocksRangeBuffer, s: Slot): bool = + case buffer.direction + of SyncQueueKind.Forward: + buffer.lastSlot < s + of SyncQueueKind.Backward: + buffer.lastSlot > s + +func fillGap( + buffer: var BlocksRangeBuffer, + slot: Slot +) = + let lastBlock = buffer.lastBlock + case buffer.direction + of SyncQueueKind.Forward: + let count = int(slot - lastBlock[].slot) - 1 + for i in 0 ..< count: + buffer.blocks.add(lastBlock) + of SyncQueueKind.Backward: + let count = int(lastBlock[].slot - slot) - 1 + for i in 0 ..< count: + buffer.blocks.add(lastBlock) + +func resetBuffer(buffer: var BlocksRangeBuffer, count: int) = + for index in count ..< len(buffer.blocks): + let blck = buffer.blocks[index] + buffer.roots.del(blck[].root) + buffer.blocks[index] = nil + buffer.blocks.setLen(count) + +func before(buffer: BlocksRangeBuffer, slota, slotb: Slot): bool = + case buffer.direction + of SyncQueueKind.Forward: + slota < slotb + of SyncQueueKind.Backward: + slota > slotb + +func beforeOrEq(buffer: BlocksRangeBuffer, slota, slotb: Slot): bool = + case buffer.direction + of SyncQueueKind.Forward: + slota <= slotb + of SyncQueueKind.Backward: + slota >= slotb + +func after(buffer: BlocksRangeBuffer, slota, slotb: Slot): bool = + case buffer.direction + of SyncQueueKind.Forward: + slota > slotb + of SyncQueueKind.Backward: + slota < slotb + +func prev(buffer: BlocksRangeBuffer, slot: Slot): Slot = + case buffer.direction + of SyncQueueKind.Forward: + if slot == GENESIS_SLOT: + return slot + slot - 1 + of SyncQueueKind.Backward: + if slot == FAR_FUTURE_SLOT: + return FAR_FUTURE_SLOT + slot + 1 + +func checkRoots( + buffer: BlocksRangeBuffer, + newBlock, lastBlock: ref ForkedSignedBeaconBlock +): bool = + case buffer.direction + of SyncQueueKind.Forward: + lastBlock[].root() == newBlock[].parent_root() + of SyncQueueKind.Backward: + newBlock[].root() == lastBlock[].parent_root() + +proc add*( + buffer: var BlocksRangeBuffer, + blck: ref ForkedSignedBeaconBlock +): Result[void, VerifierError] = + doAssert(not(isNil(blck)), "Block should not be nil at this point!") + + let + (blockSlot, blockRoot, blockParentRoot) = + withBlck(blck[]): + (forkyBlck.message.slot, forkyBlck.root, forkyBlck.message.parent_root) + + if len(buffer.blocks) == 0: + buffer.blocks.add(blck) + buffer.roots[blockRoot] = blck + return ok() + + if buffer.before(blockSlot, buffer.startSlot): + buffer.resetBuffer(0) + buffer.blocks.add(blck) + buffer.roots[blockRoot] = blck + return ok() + + if buffer.isNew(blockSlot): + # This is new block + let lastBlock = buffer.blocks[^1] + if not(buffer.checkRoots(blck, lastBlock)): + return err(VerifierError.MissingParent) + buffer.fillGap(blockSlot) + buffer.blocks.add(blck) + buffer.roots[blockRoot] = blck + ok() + else: + # Block replacement + let + index = buffer.getIndex(blockSlot).get() + innerBlock = buffer.blocks[index] + if (innerBlock[].slot == blockSlot) and (innerBlock[].root == blockRoot) and + (innerBlock[].parent_root == blockParentRoot): + return err(VerifierError.Duplicate) + if index == 0: + buffer.resetBuffer(0) + buffer.blocks.add(blck) + buffer.roots[blockRoot] = blck + return ok() + + let prevBlock = buffer.blocks[index - 1] + if not(buffer.checkRoots(blck, prevBlock)): + return err(VerifierError.MissingParent) + buffer.resetBuffer(index) + buffer.blocks.add(blck) + buffer.roots[blockRoot] = blck + ok() + +iterator blocks( + buffer: BlocksRangeBuffer, + index, count: int +): ref ForkedSignedBeaconBlock = + case buffer.direction + of SyncQueueKind.Forward: + let lastIndex = min(len(buffer.blocks) - 1, index + count - 1) + for i in countup(index, lastIndex): + let blck = buffer.blocks[i] + if blck[].slot == buffer.toSlot(i).get(): + yield blck + of SyncQueueKind.Backward: + let lastIndex = max(0, index - count + 1) + for i in countdown(index, lastIndex): + if buffer.blocks[i][].slot == buffer.toSlot(i).get(): + let blck = buffer.blocks[i] + if blck[].slot == buffer.toSlot(i).get(): + yield blck + +func contains*(buffer: BlocksRangeBuffer, srange: SyncRange): bool = + doAssert(srange.count > 0) + if len(buffer.blocks) == 0: + return false + if (srange.last_slot() < buffer.startSlot()) or + (srange.start_slot() > buffer.lastSlot()): + return false + true + +func peekRange*( + buffer: BlocksRangeBuffer, + srange: SyncRange +): seq[ref ForkedSignedBeaconBlock] = + var res: seq[ref ForkedSignedBeaconBlock] + + if len(buffer.blocks) == 0: + return res + + let + (startSlot, lastSlot, ecount) = + case buffer.direction + of SyncQueueKind.Forward: + if srange.start_slot() > buffer.lastSlot: + return res + if srange.last_slot() < buffer.startSlot: + return res + let + slota = + if srange.start_slot() <= buffer.startSlot: + buffer.startSlot + else: + srange.start_slot() + startGap = slota - srange.start_slot() + slotb = slota + uint64(srange.count - 1) - startGap + (slota, slotb, int(slotb - slota + 1)) + of SyncQueueKind.Backward: + if srange.start_slot() > buffer.startSlot: + return res + if srange.last_slot() < buffer.lastSlot: + return res + let + slota = + if srange.start_slot() <= buffer.lastSlot: + buffer.lastSlot + else: + srange.start_slot() + lastGap = slota - srange.start_slot() + slotb = slota + uint64(srange.count - 1) - lastGap + (slota, slotb, int(slotb - slota + 1)) + startIndex = buffer.getIndex(startSlot).valueOr: + return res + + for blck in buffer.blocks(startIndex, ecount): + if len(res) == 0: + res.add(blck) + else: + if res[^1] != blck: + res.add(blck) + if blck[].slot == lastSlot: + break + res + +func getNonEmptyIndex( + buffer: BlocksRangeBuffer, + slot: Slot, + forward: bool +): Opt[int] = + var res = ? buffer.getIndex(slot) + if buffer.blocks[res][].slot == slot: + return Opt.some(res) + if forward: + for index in countup(res, len(buffer.blocks) - 1): + if buffer.blocks[index][].slot == buffer.toSlot(index).get(): + return Opt.some(index) + else: + for index in countdown(res, 0): + if buffer.blocks[index][].slot == buffer.toSlot(index).get(): + return Opt.some(index) + Opt.none(int) + +proc advance*( + buffer: var BlocksRangeBuffer, + slot: Slot +) = + if len(buffer.blocks) == 0: + return + if buffer.beforeOrEq(slot, buffer.startSlot): + return + if buffer.after(slot, buffer.lastSlot): + buffer.resetBuffer(0) + return + let startIndex = buffer.getNonEmptyIndex(slot, true).valueOr: + buffer.resetBuffer(0) + return + + var count = 0 + for index in startIndex ..< len(buffer.blocks): + let blck = buffer.blocks[count] + buffer.roots.del(blck[].root) + buffer.blocks[count] = buffer.blocks[index] + inc(count) + buffer.resetBuffer(count) + +proc invalidate*( + buffer: var BlocksRangeBuffer, + slot: Slot +) = + if len(buffer.blocks) == 0: + return + if buffer.beforeOrEq(slot, buffer.startSlot): + buffer.resetBuffer(0) + return + if buffer.after(slot, buffer.lastSlot): + return + + let startIndex = buffer.getNonEmptyIndex(buffer.prev(slot), false).valueOr: + buffer.resetBuffer(0) + return + + buffer.resetBuffer(startIndex + 1) + +proc add*( + buffer: var BlocksRootBuffer, + blck: ref ForkedSignedBeaconBlock +) = + buffer.roots[blck[].root] = blck + +proc add*( + buffer: var BlocksRootBuffer, + blcks: openArray[ref ForkedSignedBeaconBlock] +) = + for blck in blcks: + buffer.roots[blck[].root] = blck + +func popRoot*( + buffer: var BlocksRootBuffer, + root: Eth2Digest +): ref ForkedSignedBeaconBlock = + var res: ref ForkedSignedBeaconBlock + discard buffer.roots.pop(root, res) + res + +func remove*( + buffer: var BlocksRootBuffer, + root: Eth2Digest +) = + buffer.roots.del(root) + +func getOrDefault*( + buffer: BlocksRootBuffer, + root: Eth2Digest +): ref ForkedSignedBeaconBlock = + buffer.roots.getOrDefault(root) + +func len*(buffer: BlocksRootBuffer): int = + len(buffer.roots) + +func len*(buffer: BlocksRangeBuffer): int = + len(buffer.blocks) + +func almostFull*(buffer: BlocksRangeBuffer): bool = + # len(buffer.blocks) >= 2/3 * maxBufferSize + len(buffer.blocks) >= 2 * (buffer.maxBufferSize div 3) + +func reset*(buffer: var BlocksRangeBuffer) = + buffer.resetBuffer(0) + +func init*( + t: typedesc[BlocksRangeBuffer], + kind: SyncQueueKind, +): BlocksRangeBuffer = + BlocksRangeBuffer( + direction: kind, + ) + +func init*( + t: typedesc[BlocksRangeBuffer], + kind: SyncQueueKind, + maxBufferSize: int, +): BlocksRangeBuffer = + doAssert(maxBufferSize > 0, "Buffer size could not be negative or zero") + BlocksRangeBuffer( + direction: kind, + maxBufferSize: maxBufferSize, + ) + +func new*( + t: typedesc[BlocksRangeBuffer], + kind: SyncQueueKind, + maxBufferSize: int +): ref BlocksRangeBuffer = + newClone BlocksRangeBuffer.init(kind, maxBufferSize) diff --git a/beacon_chain/sync/request_manager.nim b/beacon_chain/sync/request_manager.nim index 3a77aa1ccd..fb2ed6dc95 100644 --- a/beacon_chain/sync/request_manager.nim +++ b/beacon_chain/sync/request_manager.nim @@ -218,6 +218,10 @@ proc requestBlocksByRoot(rman: RequestManager, items: seq[Eth2Digest]) {.async: # Ignoring because these errors could occur due to the # concurrent/parallel requests we made. discard + of VerifierError.MissingSidecars: + # We downloading only blocks, so its possible to get this error, + # because sidecars will be downloaded later. + discard of VerifierError.UnviableFork: # If they're working a different fork, we'll want to descore them # but also process the other blocks (in case we can register the diff --git a/beacon_chain/sync/response_utils.nim b/beacon_chain/sync/response_utils.nim new file mode 100644 index 0000000000..f8afccf484 --- /dev/null +++ b/beacon_chain/sync/response_utils.nim @@ -0,0 +1,393 @@ +# beacon_chain +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} + +import std/[sequtils, strutils], + results, + ../spec/[helpers, forks, peerdas_helpers, column_map], + ../spec/datatypes/[deneb, electra, fulu], + ../consensus_object_pools/blob_quarantine, + ./sync_queue + +export results + +type + SidecarType = BlobSidecar | fulu.DataColumnSidecar + SidecarResponseRecord*[T: SidecarType] = object + block_root*: Eth2Digest + sidecar*: ref T + + BlobSidecarResponseRecord* = + SidecarResponseRecord[BlobSidecar] + DataColumnSidecarResponseRecord* = + SidecarResponseRecord[fulu.DataColumnSidecar] + +func shortLog*[T: SidecarType]( + a: openArray[SidecarResponseRecord[T]] +): string = + "[" & a.mapIt(shortLog(it.block_root) & "/" & $it.sidecar[].index).join(",") & "]" + +func groupSidecars*( + srange: SyncRange, + blobs: openArray[ref BlobSidecar] +): Result[seq[BlobSidecarResponseRecord], cstring] = + # We do not do signature verifications here, just because it will be done + # later by block_processor. So the only thing we validating is that we + # received sidecars in proper order and in proper range. + var + grouped: seq[BlobSidecarResponseRecord] + slot = srange.start_slot() + + for sidecar in blobs: + let + block_root = hash_tree_root(sidecar[].signed_block_header.message) + block_slot = sidecar[].signed_block_header.message.slot + + if (block_slot < slot) or (block_slot > srange.last_slot()): + return err("Invalid blob sidecar slot") + + slot = block_slot + + if len(grouped) != 0: + if grouped[^1].block_root == block_root: + if grouped[^1].sidecar[].index >= uint64(sidecar[].index): + return err("Invalid index order of blob sidecars") + + # TODO (cheatfate): Batch verification could improve performance here. + sidecar[].verify_blob_sidecar_inclusion_proof().isOkOr: + return err("BlobSidecar: inclusion proof not valid") + + grouped.add( + BlobSidecarResponseRecord(block_root: block_root, sidecar: sidecar)) + + ok(grouped) + +func groupSidecars*( + srange: SyncRange, + map: ColumnMap, + columns: openArray[ref fulu.DataColumnSidecar] +): Result[seq[DataColumnSidecarResponseRecord], cstring] = + # We do not do signature verifications here, just because it will be done + # later by block_processor. So the only thing we validating is that we + # received sidecars in proper order and in proper range. + var + grouped: seq[DataColumnSidecarResponseRecord] + slot = srange.start_slot() + + for sidecar in columns: + let + block_root = hash_tree_root(sidecar[].signed_block_header.message) + block_slot = sidecar[].signed_block_header.message.slot + + if block_slot < slot or block_slot > srange.last_slot(): + return err("Invalid data column sidecar slot") + if sidecar[].index notin map: + return err("Invalid data column index") + + slot = block_slot + if len(grouped) != 0: + if grouped[^1].block_root == block_root: + if uint64(grouped[^1].sidecar[].index) >= uint64(sidecar[].index): + return err("Invalid order of data column sidecars") + + # TODO (cheatfate): Batch verification could improve performance here. + ? verify_data_column_sidecar_inclusion_proof(sidecar[]) + + grouped.add( + DataColumnSidecarResponseRecord(block_root: block_root, sidecar: sidecar)) + + ok(grouped) + +func groupSidecars*( + idents: openArray[BlobIdentifier], + blobs: openArray[ref BlobSidecar] +): Result[seq[BlobSidecarResponseRecord], cstring] = + # Cannot respond more than what I have asked + if len(blobs) > len(idents): + return err("Number of blobs received is greater than number of requested") + + var + checks = idents.toHashSet() + grouped: seq[BlobSidecarResponseRecord] + + for sidecar in blobs: + let + block_root = hash_tree_root(sidecar[].signed_block_header.message) + sidecarIdent = + BlobIdentifier(block_root: block_root, index: sidecar[].index) + + if checks.missingOrExcl(sidecarIdent): + return err("Received blobs outside the request") + + # TODO (cheatfate): Batch verification could improve performance here. + sidecar[].verify_blob_sidecar_inclusion_proof().isOkOr: + return err("BlobSidecar: inclusion proof not valid") + + grouped.add( + BlobSidecarResponseRecord(block_root: block_root, sidecar: sidecar)) + + ok(grouped) + +func groupSidecars*( + idents: openArray[DataColumnsByRootIdentifier], + columnsRequested: int, + columns: openArray[ref fulu.DataColumnSidecar] +): Result[seq[DataColumnSidecarResponseRecord], cstring] = + if len(columns) > columnsRequested: + return err( + "Number of data columns received is greater than number of requested") + + var + checks = + block: + var res: HashSet[DataColumnIdentifier] + for rident in idents: + for rindex in rident.indices: + res.incl( + DataColumnIdentifier( + block_root: rident.block_root, index: rindex)) + res + grouped: seq[DataColumnSidecarResponseRecord] + + for sidecar in columns: + let + block_root = hash_tree_root(sidecar[].signed_block_header.message) + sidecarIdent = + DataColumnIdentifier(block_root: block_root, index: sidecar[].index) + + if checks.missingOrExcl(sidecarIdent): + return err("Received data column outside the request") + + # TODO (cheatfate): Batch verification could improve performance here. + ? verify_data_column_sidecar_inclusion_proof(sidecar[]) + + grouped.add( + DataColumnSidecarResponseRecord(block_root: block_root, sidecar: sidecar)) + + ok(grouped) + +func validateBlocks*( + blocks: openArray[ref ForkedSignedBeaconBlock], + sidecars: openArray[BlobSidecarResponseRecord] +): Result[int, cstring] = + var sindex = 0 + for blck in blocks: + withBlck(blck[]): + when consensusFork in [ConsensusFork.Deneb, ConsensusFork.Electra]: + let blobsCount = len(forkyBlck.message.body.blob_kzg_commitments) + if blobsCount == 0: + continue + if (sindex >= len(sidecars)) or (sindex + blobsCount > len(sidecars)): + return err("Not enough blob sidecars") + for index in 0 ..< blobsCount: + let record = sidecars[sindex + index] + if record.block_root != forkyBlck.root: + return err("Some blob sidecars missing for block") + if record.sidecar[].index != BlobIndex(index): + return err("Some blob sidecars sent in wrong order for block") + sindex += blobsCount + else: + return err("Found block with incorrect fork") + + ok(sindex) + +func validateBlocks*( + blocks: openArray[ref ForkedSignedBeaconBlock], + sidecars: openArray[DataColumnSidecarResponseRecord], + map: ColumnMap +): Result[int, cstring] = + let mapCount = len(map) + var sindex = 0 + for blck in blocks: + withBlck(blck[]): + when consensusFork == ConsensusFork.Fulu: + let columnsCount = len(forkyBlck.message.body.blob_kzg_commitments) + if columnsCount == 0: + continue + if (sindex >= len(sidecars)) or (sindex + mapCount > len(sidecars)): + return err("Not enough data column sidecars") + for index in 0 ..< mapCount: + let record = sidecars[sindex + index] + if record.block_root != forkyBlck.root: + return err("Some data column sidecars missing for block") + sindex += mapCount + else: + return err("Found block with incorrect fork") + + ok(sindex) + +func checkResponse*( + srange: SyncRange, + blocks: openArray[ref ForkedSignedBeaconBlock] +): Result[void, cstring] = + ## This procedure checks peer's getBlockByRange() response. + if len(blocks) == 0: + return ok() + + if lenu64(blocks) > srange.count: + return err("Number of received blocks greater than number of requested") + + var + slot = FAR_FUTURE_SLOT + root: Eth2Digest + + for blk in blocks: + let block_slot = blk[].slot() + if block_slot notin srange: + return err("Some of the blocks are outside the requested range") + if slot != FAR_FUTURE_SLOT: + if slot >= block_slot: + return err("Incorrect order or duplicate blocks found") + if blk[].parent_root() != root: + return err("Incorrect order or chain of blocks, invalid parent_root") + root = blk[].root() + slot = blk[].slot() + ok() + +func checkResponse*( + roots: openArray[Eth2Digest], + blocks: openArray[ref ForkedSignedBeaconBlock] +): Result[void, cstring] = + ## This procedure checks peer's getBlocksByRoot() response. + var checks = @roots + if len(blocks) == 0: + return ok() + if len(blocks) > len(roots): + return err("Number of received blocks greater than number of requested") + for blk in blocks: + let res = checks.find(blk[].root) + if res == -1: + return err("Unexpected block root encountered") + checks.del(res) + ok() + +func getShortMap*[T]( + request: SyncRequest[T], + blobs: openArray[BlobSidecarResponseRecord] +): string = + let sidecars = blobs.mapIt(it.sidecar) + getShortMap(request, sidecars) + +when isMainModule: + type + BlobsDataItem = tuple[slot: int, index: int] + + TestData = object + blocks: seq[ref ForkedSignedBeaconBlock] + blobs: seq[BlobSidecarResponseRecord] + + const + BlocksData = [ + #1253024, + 1253025, 1253026, 1253027, 1253028, 1253029, + #1253030, + 1253031, 1253032, 1253033, 1253034, 1253035, + #1253036, + 1253037, 1253038, 1253039, 1253040, 1253041, + #1253042, 1253043, + 1253044, 1253045, 1253046, + #1253047, + 1253048, 1253049, 1253050, 1253051, 1253052, 1253053, 1253054, + #1253055 + ] + + BlobsData = [ + (slot: 1253025, index: 0), (slot: 1253025, index: 1), (slot: 1253025, index: 2), (slot: 1253025, index: 3), + (slot: 1253026, index: 0),(slot: 1253026, index: 1), + (slot: 1253027, index: 0),(slot: 1253027, index: 1),(slot: 1253027, index: 2), + (slot: 1253028, index: 0),(slot: 1253028, index: 1),(slot: 1253028, index: 2),(slot: 1253028, index: 3), + (slot: 1253029, index: 0),(slot: 1253029, index: 1),(slot: 1253029, index: 2),(slot: 1253029, index: 3), + (slot: 1253031, index: 0),(slot: 1253031, index: 1),(slot: 1253031, index: 2), + (slot: 1253032, index: 0),(slot: 1253032, index: 1),(slot: 1253032, index: 2), + (slot: 1253033, index: 0),(slot: 1253033, index: 1),(slot: 1253033, index: 2),(slot: 1253033, index: 3), + (slot: 1253035, index: 0),(slot: 1253035, index: 1),(slot: 1253035, index: 2),(slot: 1253035, index: 3), + (slot: 1253037, index: 0),(slot: 1253037, index: 1),(slot: 1253037, index: 2),(slot: 1253037, index: 3), + (slot: 1253038, index: 0),(slot: 1253038, index: 1),(slot: 1253038, index: 2),(slot: 1253038, index: 3),(slot: 1253038, index: 4), + (slot: 1253039, index: 0), + (slot: 1253040, index: 0),(slot: 1253040, index: 1),(slot: 1253040, index: 2),(slot: 1253040, index: 3), + (slot: 1253041, index: 0),(slot: 1253041, index: 1),(slot: 1253041, index: 2),(slot: 1253041, index: 3),(slot: 1253041, index: 4), + (slot: 1253044, index: 0),(slot: 1253044, index: 1),(slot: 1253044, index: 2),(slot: 1253044, index: 3), + (slot: 1253045, index: 0),(slot: 1253045, index: 1),(slot: 1253045, index: 2), + (slot: 1253046, index: 0),(slot: 1253046, index: 1),(slot: 1253046, index: 2),(slot: 1253046, index: 3), + (slot: 1253048, index: 0),(slot: 1253048, index: 1),(slot: 1253048, index: 2), + (slot: 1253049, index: 0),(slot: 1253049, index: 1),(slot: 1253049, index: 2), + (slot: 1253050, index: 0),(slot: 1253050, index: 1),(slot: 1253050, index: 2), + (slot: 1253051, index: 0),(slot: 1253051, index: 1),(slot: 1253051, index: 2),(slot: 1253051, index: 3), + (slot: 1253052, index: 0),(slot: 1253052, index: 1),(slot: 1253052, index: 2), + (slot: 1253053, index: 0),(slot: 1253053, index: 1), + (slot: 1253054, index: 0),(slot: 1253054, index: 1),(slot: 1253054, index: 2),(slot: 1253054, index: 3) + ] + + func createBlobSidecar( + data: tuple[slot: int, index: int] + ): ref BlobSidecar = + newClone BlobSidecar( + index: BlobIndex(data.index), + signed_block_header: SignedBeaconBlockHeader( + message: BeaconBlockHeader(slot: Slot(data.slot)))) + + func createRoot(root: int): Eth2Digest = + var res = Eth2Digest() + res.data[0] = byte(root and 0xFF) + res + + func createBlockWithBlobs(root, slot, count: int): ref ForkedSignedBeaconBlock = + newClone ForkedSignedBeaconBlock.init(deneb.SignedBeaconBlock( + message: deneb.BeaconBlock( + slot: Slot(slot), + body: deneb.BeaconBlockBody( + blob_kzg_commitments: List[KzgCommitment, Limit MAX_BLOB_COMMITMENTS_PER_BLOCK](newSeq[KzgCommitment](count)) + ) + ), + root: createRoot(root))) + + func createBlockWithoutBlobs(root, slot: int): ref ForkedSignedBeaconBlock = + newClone ForkedSignedBeaconBlock.init(deneb.SignedBeaconBlock( + message: deneb.BeaconBlock(slot: Slot(slot)), + root: createRoot(root))) + + func find*(b: openArray[BlobsDataItem], a: int): int = + for index, item in b.pairs(): + if item.slot == a: + return index + -1 + + func getBlobRecords*( + b: openArray[BlobsDataItem], + r, a: int + ): seq[BlobSidecarResponseRecord] = + var res: seq[BlobSidecarResponseRecord] + let slot = b[a].slot + for index in a ..< len(b): + if b[index].slot != slot: + break + res.add(BlobSidecarResponseRecord( + block_root: createRoot(r), + sidecar: createBlobSidecar((slot, b[index].index)))) + res + + func createTestData( + blockSlots: openArray[int], + blobsData: openArray[BlobsDataItem] + ): TestData = + var res: TestData + for index, slot in blockSlots.pairs(): + let index = blobsData.find(slot) + if index == -1: + res.blocks.add( + createBlockWithoutBlobs(index, slot)) + else: + let records = blobsData.getBlobRecords(index, index) + res.blocks.add( + createBlockWithBlobs(index, slot, len(records))) + res.blobs.add(records) + res + + let data = createTestData(BlocksData, BlobsData) + + echo validateBlocks(data.blocks, data.blobs) diff --git a/beacon_chain/sync/sync_dag.nim b/beacon_chain/sync/sync_dag.nim new file mode 100644 index 0000000000..2904421f43 --- /dev/null +++ b/beacon_chain/sync/sync_dag.nim @@ -0,0 +1,285 @@ +# beacon_chain +# Copyright (c) 2018-2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. +{.push raises: [].} + +import std/[sets, tables, strutils] +import stew/base10, chronos, chronicles, results +import ../spec/[forks, block_id, column_map] +import ./sync_queue + +type + DagEntryFlag* {.pure.} = enum + Local, Unviable, Finalized, Pending, MissingSidecars + + SyncDagEntryRef* = ref object + blockId*: BlockId + parent*: SyncDagEntryRef + flags*: set[DagEntryFlag] + + PeerEntryRef*[A] = ref object + peer*: A + pendingRoots*: Deque[Eth2Digest] + maxBlocksPerRequest*: int + maxSidecarsPerRequest*: int + columnsMap*: Opt[ColumnMap] + peerLoopFut*: Future[void].Raising([]) + + SyncDag*[A, B] = object + roots*: Table[Eth2Digest, SyncDagEntryRef] + slots*: Table[Slot, HashSet[Eth2Digest]] + peers*: Table[B, PeerEntryRef[A]] + lastSlot*: Slot + +const + EmptyBlockId* = BlockId(slot: FAR_FUTURE_SLOT) + +func toBlockId*(checkpoint: Checkpoint): BlockId = + BlockId(root: checkpoint.root, slot: checkpoint.epoch.start_slot()) + +func init*( + t: typedesc[SyncDagEntryRef], + blockId: BlockId +): SyncDagEntryRef = + SyncDagEntryRef( + blockId: blockId, + flags: {DagEntryFlag.Pending}) + +func init*( + t: typedesc[SyncDagEntryRef], + root: Eth2Digest +): SyncDagEntryRef = + SyncDagEntryRef( + blockId: BlockId(root: root, slot: FAR_FUTURE_SLOT), + flags: {DagEntryFlag.Pending}) + +func init*( + t: typedesc[SyncDagEntryRef], + checkpoint: Checkpoint +): SyncDagEntryRef = + SyncDagEntryRef( + blockId: checkpoint.toBlockId(), + flags: {DagEntryFlag.Finalized, DagEntryFlag.Pending}) + +func init*[T]( + t: typedesc[PeerEntryRef], + peer: T, +): PeerEntryRef[T] = + PeerEntryRef[T]( + pendingRoots: initDeque[Eth2Digest](16), + peer: peer, + maxBlocksPerRequest: 2, + maxSidecarsPerRequest: 16 + ) + +func init*[T]( + t: typedesc[PeerEntryRef], + peer: T, + columns: ColumnMap +): PeerEntryRef[T] = + PeerEntryRef[T]( + pendingRoots: initDeque[Eth2Digest](16), + peer: peer, + maxBlocksPerRequest: 2, + maxSidecarsPerRequest: 16, + columnsMap: Opt.some(columns) + ) + +iterator parents*(entry: SyncDagEntryRef): SyncDagEntryRef = + doAssert(not(isNil(entry)), "Entry should not be nil") + var currentEntry = entry + while true: + if isNil(currentEntry.parent): + break + yield currentEntry.parent + currentEntry = currentEntry.parent + +proc getPendingParent*( + entry: SyncDagEntryRef +): Opt[SyncDagEntryRef] = + for currentEntry in entry.parents(): + if DagEntryFlag.Finalized in currentEntry.flags: + # We reach finalized root, so this is finish. + return Opt.none(SyncDagEntryRef) + if isNil(currentEntry.parent): + # Entry missing parent root, so its good candidate + return Opt.some(currentEntry) + Opt.some(entry) + +proc getPendingParentRoot*( + entry: SyncDagEntryRef +): Opt[Eth2Digest] = + let res = getPendingParent(entry).valueOr: + return Opt.none(Eth2Digest) + Opt.some(res.blockId.root) + +proc getFinalizedParent*( + entry: SyncDagEntryRef +): Opt[SyncDagEntryRef] = + for currentEntry in entry.parents(): + if DagEntryFlag.Finalized in currentEntry.flags: + return Opt.some(currentEntry) + Opt.none(SyncDagEntryRef) + +proc getFinalizedParent*[A, B]( + sdag: SyncDag[A, B], + root: Eth2Digest +): Opt[SyncDagEntryRef] = + let entry = sdag.roots.getOrDefault(root) + if isNil(entry): + return Opt.none(SyncDagEntryRef) + getFinalizedParent(entry) + +proc updateSlot*[A, B]( + sdag: var SyncDag[A, B], + slot: Slot, + root: Eth2Digest +) = + sdag.slots.mgetOrPut(slot, default(HashSet[Eth2Digest])).incl(root) + if sdag.lastSlot < slot: + sdag.lastSlot = slot + +proc shortLog*(a: set[DagEntryFlag]): string = + var res = "" + if DagEntryFlag.Pending in a: + res.add("P") + if DagEntryFlag.Finalized in a: + res.add("F") + if DagEntryFlag.MissingSidecars in a: + res.add("M") + if DagEntryFlag.Unviable in a: + res.add("U") + res + +func getRootItem( + root: Eth2Digest, + slot: Slot, + flags: set[DagEntryFlag] +): string = + $slot & "@" & shortLog(root) & "[" & shortLog(flags) & "]" + +proc getRootMap*[A, B](sdag: SyncDag[A, B], root: Eth2Digest): string = + let entry = sdag.roots.getOrDefault(root) + if isNil(entry): + return "" + var res: seq[string] + res.add(getRootItem(entry.blockId.root, entry.blockId.slot, entry.flags)) + for centry in entry.parents(): + res.add(getRootItem(centry.blockId.root, centry.blockId.slot, centry.flags)) + if DagEntryFlag.Finalized in centry.flags: + break + res.join(",") + +func getShortRootMap*[A, B](sdag: SyncDag[A, B], root: Eth2Digest): string = + var + missingSidecars = 0 + pendingBlocks = 0 + count = 0 + let entry = sdag.roots.getOrDefault(root) + if isNil(entry): + return "" + var res: seq[string] + res.add(getRootItem(entry.blockId.root, entry.blockId.slot, entry.flags)) + inc(count) + if DagEntryFlag.Pending in entry.flags: + inc(pendingBlocks) + if DagEntryFlag.MissingSidecars in entry.flags: + inc(missingSidecars) + for centry in entry.parents(): + if DagEntryFlag.Pending in centry.flags: + inc(pendingBlocks) + if DagEntryFlag.MissingSidecars in centry.flags: + inc(missingSidecars) + inc(count) + res.add(getRootItem(centry.blockId.root, centry.blockId.slot, centry.flags)) + if DagEntryFlag.Finalized in centry.flags: + break + res[^1] & "..." & res[0] & + "[P:" & $pendingBlocks & "/M:" & $missingSidecars & " of " & $count & "]" + +proc updateRoot*[A, B]( + sdag: var SyncDag[A, B], + root: Eth2Digest, + slot: Slot, + parent_root: Eth2Digest, + sidecarsMissed: bool +): Opt[Eth2Digest] = + let entry = sdag.roots.getOrDefault(root) + if isNil(entry): + # This could happen, when data from peer come later than pruning has been + # made. + return Opt.none(Eth2Digest) + + let + bid = BlockId(root: parent_root, slot: GENESIS_SLOT) + parentEntry = sdag.roots.mgetOrPut(parent_root, SyncDagEntryRef.init(bid)) + + # It is possible that data is already in SyncDag, because different peers + # could follow same history and we could receive equal data from 2 peers. + if DagEntryFlag.Pending in entry.flags: + # Only update entry's data if it was in `Pending` state. + entry.flags.excl(DagEntryFlag.Pending) + if sidecarsMissed: + entry.flags.incl(DagEntryFlag.MissingSidecars) + entry.blockId.slot = slot + entry.parent = parentEntry + sdag.updateSlot(slot, root) + + if DagEntryFlag.Finalized in entry.flags: + # If we downloaded finalized checkpoint's root block - update `epochs` + # table. + entry.parent = nil + return Opt.none(Eth2Digest) + + if (DagEntryFlag.Pending notin parentEntry.flags) and + (DagEntryFlag.Finalized in parentEntry.flags): + # Our parent is finalized entry, so we should not continue anymore. + return Opt.none(Eth2Digest) + + if DagEntryFlag.Pending in parentEntry.flags: + # Parent entry is still in `pending` state, so we return `parent_root` + # as missing. + Opt.some(parent_root) + else: + # Parent entry is already present and has its own parent, so we need + # to find last pending root. + let + res = getPendingParentRoot(parentEntry) + resentry = + if res.isSome(): + shortLog(res.get()) + else: + "" + res + +proc prune*[A, B]( + sdag: var SyncDag[A, B], + epoch: Epoch +) = + var + entriesToDelete: seq[Eth2Digest] + slotsToDelete: seq[Slot] + + for cslot, roots in sdag.slots.pairs(): + if cslot.epoch() < epoch: + slotsToDelete.add(cslot) + entriesToDelete.add(roots.toSeq()) + + for slot in slotsToDelete: + sdag.slots.del(slot) + + var entry: SyncDagEntryRef = nil + for item in entriesToDelete: + if sdag.roots.pop(item, entry): + entry.parent = nil + entry = nil + +proc init*( + t: typedesc[SyncDag], + A: typedesc, + B: typedesc +): SyncDag[A, B] = + SyncDag[A, B]() diff --git a/beacon_chain/sync/sync_manager.nim b/beacon_chain/sync/sync_manager.nim index a57118cdfb..669e1cd1e7 100644 --- a/beacon_chain/sync/sync_manager.nim +++ b/beacon_chain/sync/sync_manager.nim @@ -15,7 +15,7 @@ import ../networking/[peer_pool, peer_scores, eth2_network], ../gossip_processing/block_processor, ../beacon_clock, - "."/[sync_protocol, sync_queue] + "."/[sync_protocol, sync_queue, response_utils] export phase0, altair, merge, chronos, chronicles, results, helpers, peer_scores, sync_queue, forks, sync_protocol @@ -461,12 +461,12 @@ proc getSyncBlockData[A, B]( sync_ident = man.ident, topics = "syncman" - if len(blobData) > 0: - let blobSlots = mapIt(blobData, it[].signed_block_header.message.slot) - checkBlobsResponse( - sr, blobSlots, man.MAX_BLOBS_PER_BLOCK_ELECTRA).isOkOr: - peer.updateScore(PeerScoreBadResponse) - return err("Incorrect blobs sequence received, reason: " & $error) + # if len(blobData) > 0: + # let blobSlots = mapIt(blobData, it[].signed_block_header.message.slot) + # checkBlobsResponse( + # sr, blobSlots, man.MAX_BLOBS_PER_BLOCK_ELECTRA).isOkOr: + # peer.updateScore(PeerScoreBadResponse) + # return err("Incorrect blobs sequence received, reason: " & $error) let groupedBlobs = groupBlobs(blocks.asSeq(), blobData).valueOr: peer.updateScore(PeerScoreNoValues) diff --git a/beacon_chain/sync/sync_overseer2.nim b/beacon_chain/sync/sync_overseer2.nim new file mode 100644 index 0000000000..c6f83ae8c2 --- /dev/null +++ b/beacon_chain/sync/sync_overseer2.nim @@ -0,0 +1,2754 @@ +# beacon_chain +# Copyright (c) 2018-2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. +{.push raises: [].} + +import std/[sequtils, strutils, sets, algorithm] +import chronos, chronicles, results +import + ../spec/eth2_apis/rest_types, + ../spec/[helpers, forks, network, peerdas_helpers, column_map], + ../networking/[peer_pool, eth2_network], + ../consensus_object_pools/[consensus_manager, block_pools_types, + blockchain_dag, block_quarantine, blob_quarantine], + ../gossip_processing/block_processor, + ../[beacon_clock], + ./[sync_types, sync_dag, sync_queue, sync_protocol, response_utils, + block_buffer] + +from ../consensus_object_pools/spec_cache import get_attesting_indices +from nimcrypto/utils import isFullZero + +export sync_types + +logScope: + topics = "sync" + +const + SyncDeviationSlotsCount = 1 + ## Number of slot allowed for deviation to continue backfilling job + RootSyncEpochsActivationCount = 10'u64 + ## Number of epochs before latest known finalized epoch, when root sync + ## starts working. + ConcurrentRequestsCount* = 3 + ## Number of requests performed by single peer in one syncing step + RepeatingFailuresCount* = 2 + ## Number of repeating errors before starting rewind process. + StatusStalePeriod* = 5 + ## Number of slots before peer's status information could be stale. + GenesisCheckpoint = Checkpoint(root: Eth2Digest(), epoch: GENESIS_EPOCH) + +func shortLog(optblkid: Opt[BlockId]): string = + if optblkid.isNone(): + "" + else: + shortLog(optblkid.get()) + +func shortLog(digests: openArray[Eth2Digest]): string = + "[" & digests.mapIt(shortLog(it)).join(",") & "]" + +func shortLog(blocks: openArray[ref ForkedSignedBeaconBlock]): string = + "[" & blocks.mapIt( + "(slot: " & $it[].slot() & ", root: " & shortLog(it[].root) & ")"). + join(",") & "]" + +func shortLog(bids: openArray[BlockId]): string = + "[" & bids.mapIt( + "(slot: " & $it.slot & ", root: " & shortLog(it.root) & ")").join(",") & "]" + +func shortLog(blobs: Opt[seq[ref BlobSidecar]]): string = + if blobs.isNone(): + "" + else: + $len(blobs.get()) + +func shortLog(cols: Opt[seq[ref fulu.DataColumnSidecar]]): string = + if cols.isNone(): + "" + else: + $len(cols.get()) + +func slimLog(blobs: openArray[ref BlobSidecar]): string = + "[" & blobs.mapIt( + "(slot: " & $it[].signed_block_header.message.slot & + ", index: " & $it[].index & ")").join(",") & "]" + +func slimLog(columns: openArray[ref fulu.DataColumnSidecar]): string = + "[" & columns.mapIt( + "(slot: " & $it[].signed_block_header.message.slot & + ", index: " & $it[].index & ")").join(",") & "]" + +template blobsCount(blck: ForkedSignedBeaconBlock): int = + withBlck(blck): + when consensusFork in [ConsensusFork.Deneb, ConsensusFork.Electra]: + len(forkyBlck.message.body.blob_kzg_commitments) + else: + 0 + +func slimLog(blck: ref ForkedSignedBeaconBlock): string = + "(fork: " & $blck.kind & + ",slot: " & $blck[].slot() & + ",root: " & shortLog(blck[].root()) & + ",parent_root: " & shortLog(blck[].parent_root()) & + ",blobs_count: " & $blck[].blobsCount() & + ")" + +func slimLog(blocks: openArray[ref ForkedSignedBeaconBlock]): string = + "[" & blocks.mapIt(slimLog(it)).join(",") & "]" + +proc getEaSlotLog(peer: Peer): string = + let res = peer.getEarliestAvailableSlot().valueOr: + return "" + $res + +func isGenesis(checkpoint: Checkpoint): bool = + (checkpoint.epoch == GenesisCheckpoint.epoch) and + (checkpoint.root == GenesisCheckpoint.root) + +func increaseBlocksCount( + overseer: SyncOverseerRef2, + blocksCount: var int, + fork: ConsensusFork +) = + # We increase by 1/4, but not bigger than fork's limit value. + let + maxCount = + case fork + of ConsensusFork.Phase0 .. ConsensusFork.Fulu: + int(MAX_REQUEST_BLOCKS_DENEB) + of ConsensusFork.Gloas: + int(MAX_REQUEST_BLOCKS_DENEB) + res = blocksCount + max(1, blocksCount div 4) + + if res > maxCount: + blocksCount = maxCount + else: + blocksCount = res + +func increaseSidecarsCount( + overseer: SyncOverseerRef2, + sidecarsCount: var int, + fork: ConsensusFork +) = + # We increase by 1/4, but not bigger than fork's limit value. + let + cfg = overseer.consensusManager.dag.cfg + maxCount = + case fork + of ConsensusFork.Phase0 .. ConsensusFork.Deneb: + int(cfg.MAX_REQUEST_BLOB_SIDECARS) + of ConsensusFork.Electra: + int(cfg.MAX_REQUEST_BLOB_SIDECARS_ELECTRA) + of ConsensusFork.Fulu: + int(cfg.MAX_REQUEST_DATA_COLUMN_SIDECARS) + of ConsensusFork.Gloas: + int(cfg.MAX_REQUEST_DATA_COLUMN_SIDECARS) + + res = sidecarsCount + max(1, sidecarsCount div 4) + + if res > maxCount: + sidecarsCount = maxCount + else: + sidecarsCount = res + +func decreaseSidecarsCount(sidecarsCount: var int) = + if sidecarsCount == 1: + return + sidecarsCount = sidecarsCount div 2 + +func decreaseBlocksCount(blocksCount: var int) = + if blocksCount == 1: + return + blocksCount = blocksCount div 2 + +func getColumnsDistribution( + overseer: SyncOverseerRef2 +): string = + var res: seq[string] + let custodyMap = overseer.columnQuarantine[].custodyMap + if len(overseer.columnsState.distribution) == 0: + return "[]" + for index in custodyMap: + let count = overseer.columnsState.distribution.getOrDefault(index, 0) + res.add($uint64(index) & ": " & $count) + "[" & res.join(", ") & "]" + +func getColumnsFillRate( + overseer: SyncOverseerRef2 +): string = + let custodyMap = overseer.columnQuarantine[].custodyMap + if len(overseer.columnsState.distribution) == 0: + return "0.00%" + var columns = 0 + for index in custodyMap: + let tmp = overseer.columnsState.distribution.getOrDefault(index, 0) + if tmp > 0: inc(columns) + let + columnsCount = + if len(custodyMap) == NUMBER_OF_COLUMNS: + NUMBER_OF_COLUMNS div 2 + 1 + else: + len(custodyMap) + fillRate = (float(columns) * 100.0) / float(columnsCount) + fillRate.formatBiggestFloat(ffDecimal, 2) & "%" + +func getMissingColumnsLog( + overseer: SyncOverseerRef2, + blocks: openArray[ref ForkedSignedBeaconBlock] +): string = + var res: seq[string] + for blck in blocks: + withBlck(blck[]): + when consensusFork == ConsensusFork.Fulu: + let map = + overseer.columnQuarantine[].getMissingColumnsMap( + forkyBlck.root, forkyBlck) + res.add($map) + else: + raiseAssert "Unsupported fork" + "[ " & res.join(",") & " ]" + +func getMissingSidecarsLog( + overseer: SyncOverseerRef2, + blck: ref ForkedSignedBeaconBlock, + fullSupernodeLog = false +): string = + var res = "[" + withBlck(blck[]): + when consensusFork < ConsensusFork.Deneb: + discard + elif consensusFork in [ConsensusFork.Deneb, ConsensusFork.Electra]: + let indices = + overseer.blobQuarantine[].getMissingSidecarIndices( + forkyBlck.root, forkyBlck) + if len(indices) > 0: + res.add(indices.mapIt($uint8(it)).join(",")) + elif consensusFork == ConsensusFork.Fulu: + let indices = + overseer.columnQuarantine[].getMissingSidecarIndices( + forkyBlck.root, forkyBlck) + if len(indices) > 0: + if overseer.config.peerdasSupernode: + if fullSupernodeLog: + res.add(indices.mapIt($uint8(it)).join(",")) + else: + let superCount = (NUMBER_OF_COLUMNS div 2) - 1 + res.add($len(indices)) + res.add(" of ") + res.add($superCount) + else: + res.add(indices.mapIt($uint8(it)).join(",")) + else: + raiseAssert "Unsupported fork" + res.add("]") + res + +func consensusForkAtEpoch( + overseer: SyncOverseerRef2, + epoch: Epoch +): ConsensusFork = + overseer.consensusManager.dag.cfg.consensusForkAtEpoch(epoch) + +template contains*( + buffer: BlocksRangeBuffer, + request: SyncRequest[Peer] +): bool = + buffer.contains(request.data.slot, request.data.count) + +func getSidecarsHorizon( + overseer: SyncOverseerRef2, + fork: ConsensusFork +): uint64 = + let dag = overseer.consensusManager.dag + if fork < ConsensusFork.Deneb: + raiseAssert "Incorrect fork" + elif fork < ConsensusFork.Fulu: + dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS * SLOTS_PER_EPOCH + elif fork == ConsensusFork.Fulu: + dag.cfg.MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS * SLOTS_PER_EPOCH + else: + raiseAssert "Unsupported fork" + +func getBlobsHorizon(overseer: SyncOverseerRef2): Epoch = + let + dag = overseer.consensusManager.dag + currentEpoch = dag.finalizedHead.slot.epoch() + horizon = dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS + tempEpoch = + if currentEpoch < horizon: + GENESIS_EPOCH + else: + currentEpoch - horizon + tempFork = overseer.consensusForkAtEpoch(tempEpoch) + + if tempFork < ConsensusFork.Deneb: + dag.cfg.DENEB_FORK_EPOCH + else: + tempEpoch + +func getColumnsHorizon(overseer: SyncOverseerRef2): Epoch = + let + dag = overseer.consensusManager.dag + currentEpoch = dag.finalizedHead.slot.epoch() + horizon = dag.cfg.MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS + tempEpoch = + if currentEpoch < horizon: + GENESIS_EPOCH + else: + currentEpoch - horizon + tempFork = overseer.consensusForkAtEpoch(tempEpoch) + + if tempFork < ConsensusFork.Fulu: + dag.cfg.FULU_FORK_EPOCH + else: + tempEpoch + +proc shouldGetBlobs(overseer: SyncOverseerRef2, slot: Slot): bool = + if overseer.config.historyMode == HistoryMode.Archive: + let dag = overseer.consensusManager.dag + if slot.epoch() >= dag.cfg.DENEB_FORK_EPOCH: + return true + return false + slot.epoch() >= overseer.getBlobsHorizon() + +proc shouldGetColumns(overseer: SyncOverseerRef2, slot: Slot): bool = + if overseer.config.historyMode == HistoryMode.Archive: + let dag = overseer.consensusManager.dag + if slot.epoch() >= dag.cfg.FULU_FORK_EPOCH: + return true + return false + slot.epoch() >= overseer.getColumnsHorizon() + +proc getPeerColumns(overseer: SyncOverseerRef2, peer: Peer): seq[ColumnIndex] = + let + cfg = overseer.consensusManager.dag.cfg + nodeId = peer.fetchNodeIdFromPeerId() + custodyGroupCount = peer.lookupCgcFromPeer() + # TODO: get_custody_groups performs O(custodyGroupCount) eth2digest operations + cfg.get_custody_groups(nodeId, custodyGroupCount) + +proc getPeerColumnMap( + overseer: SyncOverseerRef2, + peer: Peer +): ColumnMap = + let + cfg = overseer.consensusManager.dag.cfg + nodeId = peer.fetchNodeIdFromPeerId() + custodyGroupCount = peer.lookupCgcFromPeer() + ColumnMap.init(cfg.get_custody_groups(nodeId, custodyGroupCount)) + +proc getPeerColumnMap( + overseer: SyncOverseerRef2, + peerEntry: PeerEntryRef +): ColumnMap = + if peerEntry.columnsMap.isNone(): + let map = overseer.getPeerColumnMap(peerEntry.peer) + peerEntry.columnsMap = Opt.some(map) + map + else: + peerEntry.columnsMap.get() + +proc checkDataAvailable( + overseer: SyncOverseerRef2, + peer: Peer, + direction: SyncQueueKind, + srange: SyncRange +): bool = + let eaSlot = peer.getEarliestAvailableSlot().valueOr: + return true + case direction + of SyncQueueKind.Forward: + srange.start_slot() >= eaSlot + of SyncQueueKind.Backward: + srange.last_slot() >= eaSlot + +proc startPeer( + overseer: SyncOverseerRef2, peer: Peer): Future[void] {.async: (raises: []).} + +func getFrontfillSlot(overseer: SyncOverseerRef2): Slot = + let dag = overseer.consensusManager.dag + max(dag.frontfill.get(BlockId()).slot, dag.horizon) + +func getLastAddedBackfillSlot(overseer: SyncOverseerRef2): Slot = + let dag = overseer.consensusManager.dag + if dag.backfill.parent_root != dag.tail.root: + dag.backfill.slot + else: + dag.tail.slot + +func getMissingIndicesLog( + overseer: SyncOverseerRef2, + blck: ref ForkedSignedBeaconBlock +): string = + withBlck(blck[]): + when consensusFork < ConsensusFork.Deneb: + raiseAssert "Invalid fork" + elif consensusFork in [ConsensusFork.Deneb, ConsensusFork.Electra]: + let indices = + overseer.blobQuarantine[].getMissingSidecarIndices( + forkyBlck.root, forkyBlck) + indexLog(indices) + elif consensusFork == ConsensusFork.Fulu: + let indices = + overseer.columnQuarantine[].getMissingSidecarIndices( + forkyBlck.root, forkyBlck) + indexLog(indices) + else: + raiseAssert "Unsupported fork" + +proc getForwardSidecarStartSlot(overseer: SyncOverseerRef2): Slot = + let + dag = overseer.consensusManager.dag + checkpoint = overseer.lastSeenCheckpoint.get() + lastSlot = checkpoint.epoch.start_slot() + consensusFork = consensusForkAtEpoch(dag.cfg, checkpoint.epoch) + + if consensusFork < ConsensusFork.Deneb: + return max(dag.finalizedHead.slot, dag.cfg.DENEB_FORK_EPOCH.start_slot()) + + let horizon = overseer.getSidecarsHorizon(consensusFork) + if lastSlot < horizon: + max(dag.finalizedHead.slot, GENESIS_SLOT) + else: + max(dag.finalizedHead.slot, lastSlot - horizon) + +proc getBackfillSidecarFinalSlot(overseer: SyncOverseerRef2): Slot = + let + dag = overseer.consensusManager.dag + backfillSlot = overseer.getLastAddedBackfillSlot() + currentSlot = overseer.beaconClock.currentSlot() + consensusFork = consensusForkAtEpoch(dag.cfg, currentSlot.epoch()) + + if consensusFork < ConsensusFork.Deneb: + return min(backfillSlot, (dag.cfg.DENEB_FORK_EPOCH).start_slot) + + let horizon = overseer.getSidecarsHorizon(consensusFork) + if dag.finalizedHead.slot < horizon: + min(backfillSlot, GENESIS_SLOT) + else: + min(backfillSlot, dag.finalizedHead.slot - horizon) + +proc createQueues( + overseer: SyncOverseerRef2 +) = + let + dag = overseer.consensusManager.dag + checkpoint = overseer.lastSeenCheckpoint.get() + + func getFirstSlotAtFinalizedEpoch(): Slot = + dag.finalizedHead.slot + + func getLastAddedBackfillSlot(): Slot = + overseer.getLastAddedBackfillSlot() + + func forkAtEpoch(epoch: Epoch): ConsensusFork = + consensusForkAtEpoch(dag.cfg, epoch) + + template declareBlockVerifier( + procName: untyped, + direction: static SyncQueueKind + ): untyped = + proc `procName`( + signedBlock: ref ForkedSignedBeaconBlock, + maybeFinalized: bool + ): Future[Result[void, VerifierError]] {. + async: (raises: [CancelledError]).} = + doAssert(not(isNil(signedBlock)), "Block reference should not be nil") + withBlck(signedBlock[]): + when consensusFork < ConsensusFork.Deneb: + (await overseer.blockProcessor.addBlock( + MsgSource.sync, forkyBlck, noSidecars, + maybeFinalized = maybeFinalized)) + elif consensusFork < ConsensusFork.Fulu: + if overseer.shouldGetBlobs(forkyBlck.message.slot): + # We add all the blocks to BlockBuffer, just to avoid BlockProcessor + # `MissingParent` errors which could be generated, because some of + # the blocks was added to BlockBuffer and some of the blocks + # transferred to BlockProcessor. + debug "Block buffered", + fork = consensusFork, + block_root = forkyBlck.root, + blck = shortLog(forkyBlck), + verifier = "block" + + when direction == SyncQueueKind.Forward: + overseer.fblockBuffer.add(signedBlock) + elif direction == SyncQueueKind.Backward: + overseer.bblockBuffer.add(signedBlock) + else: + let commitmentsLen = + len(forkyBlck.message.body.blob_kzg_commitments) + + if commitmentsLen > 0: + (await overseer.blockProcessor.addBlock( + MsgSource.sync, forkyBlck, Opt.none(BlobSidecars), + maybeFinalized = maybeFinalized)) + else: + (await overseer.blockProcessor.addBlock( + MsgSource.sync, forkyBlck, Opt.some(default(BlobSidecars)), + maybeFinalized = maybeFinalized)) + + elif consensusFork == ConsensusFork.Fulu: + if overseer.shouldGetColumns(forkyBlck.message.slot): + # We add all the blocks to BlockBuffer, just to avoid BlockProcessor + # `MissingParent` errors which could be generated, because some of + # the blocks was added to BlockBuffer and some of the blocks + # transferred to BlockProcessor. + debug "Block buffered", + fork = consensusFork, + block_root = forkyBlck.root, + blck = shortLog(forkyBlck), + verifier = "block" + + when direction == SyncQueueKind.Forward: + overseer.fblockBuffer.add(signedBlock) + elif direction == SyncQueueKind.Backward: + overseer.bblockBuffer.add(signedBlock) + else: + let commitmentsLen = + len(forkyBlck.message.body.blob_kzg_commitments) + + if commitmentsLen > 0: + (await overseer.blockProcessor.addBlock( + MsgSource.sync, forkyBlck, Opt.none(fulu.DataColumnSidecars), + maybeFinalized = maybeFinalized)) + else: + (await overseer.blockProcessor.addBlock( + MsgSource.sync, forkyBlck, + Opt.some(default(fulu.DataColumnSidecars)), + maybeFinalized = maybeFinalized)) + else: + raiseAssert "Unsupported fork" + + declareBlockVerifier(forwardBlockVerifier, SyncQueueKind.Forward) + declareBlockVerifier(backwardBlockVerifier, SyncQueueKind.Backward) + + proc sidecarsVerifier( + signedBlock: ref ForkedSignedBeaconBlock, + maybeFinalized: bool + ): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} = + doAssert(not(isNil(signedBlock)), "Block reference should not be nil") + withBlck(signedBlock[]): + when consensusFork < ConsensusFork.Deneb: + raiseAssert "Incorrect block consensus fork" + elif consensusFork < ConsensusFork.Fulu: + let + commitmentsLen = len(forkyBlck.message.body.blob_kzg_commitments) + bres = + if commitmentsLen > 0: + if overseer.shouldGetBlobs(forkyBlck.message.slot): + let res = overseer.blobQuarantine[].popSidecars(forkyBlck) + if res.isNone(): + debug "Block verification failed, because sidecars missing", + fork = consensusFork, + block_root = signedBlock[].root, + blck = shortLog(forkyBlck), + verifier = "sidecar" + return err(VerifierError.MissingSidecars) + res + else: + Opt.none(seq[ref BlobSidecar]) + else: + Opt.some(default(seq[ref BlobSidecar])) + + (await overseer.blockProcessor.addBlock( + MsgSource.sync, forkyBlck, bres, + maybeFinalized = maybeFinalized)) + elif consensusFork == ConsensusFork.Fulu: + let + commitmentsLen = len(forkyBlck.message.body.blob_kzg_commitments) + cres = + if commitmentsLen > 0: + if overseer.shouldGetColumns(forkyBlck.message.slot): + let res = overseer.columnQuarantine[].popSidecars(forkyBlck) + if res.isNone(): + debug "Block verification failed, because sidecars missing", + fork = consensusFork, + block_root = signedBlock[].root, + blck = shortLog(forkyBlck), + missing_sidecars = + overseer.getMissingIndicesLog(signedBlock), + verifier = "sidecar" + return err(VerifierError.MissingSidecars) + res + else: + Opt.none(seq[ref fulu.DataColumnSidecar]) + else: + Opt.some(default(seq[ref fulu.DataColumnSidecar])) + + (await overseer.blockProcessor.addBlock( + MsgSource.sync, forkyBlck, cres, + maybeFinalized = maybeFinalized)) + else: + raiseAssert "Unsupported fork" + + let + localHead = dag.finalizedHead.slot + backfillSlot = overseer.getLastAddedBackfillSlot() + frontfillSlot = overseer.getFrontfillSlot() + + overseer.fqueue = + SyncQueue.init( + Peer, SyncQueueKind.Forward, + localHead, checkpoint.epoch.start_slot(), + uint64(overseer.blocksChunkSize), + ConcurrentRequestsCount, + RepeatingFailuresCount, + getFirstSlotAtFinalizedEpoch, + forwardBlockVerifier, forkAtEpoch, "fblock") + overseer.fsqueue = + SyncQueue.init( + Peer, SyncQueueKind.Forward, + overseer.getForwardSidecarStartSlot(), + checkpoint.epoch.start_slot(), + uint64(overseer.blocksChunkSize), + ConcurrentRequestsCount, + RepeatingFailuresCount, + getFirstSlotAtFinalizedEpoch, + sidecarsVerifier, forkAtEpoch, "fsidecar") + overseer.bqueue = + if dag.needsBackfill(): + SyncQueue.init( + Peer, SyncQueueKind.Backward, + backfillSlot, frontfillSlot, + uint64(overseer.blocksChunkSize), + ConcurrentRequestsCount, + RepeatingFailuresCount, + getLastAddedBackfillSlot, + backwardBlockVerifier, forkAtEpoch, "bblock") + else: + nil + overseer.bsqueue = + if dag.needsBackfill(): + SyncQueue.init( + Peer, SyncQueueKind.Backward, + backfillSlot, + overseer.getBackfillSidecarFinalSlot(), + uint64(overseer.blocksChunkSize), + ConcurrentRequestsCount, + RepeatingFailuresCount, + getLastAddedBackfillSlot, + sidecarsVerifier, forkAtEpoch, "bsidecar") + else: + nil + +proc updateQueues( + overseer: SyncOverseerRef2 +) = + let + dag = overseer.consensusManager.dag + checkpoint = overseer.lastSeenCheckpoint.get() + localHead = dag.finalizedHead.slot + + if overseer.fqueue.running(): + # Forward syncing is in progress. + overseer.fqueue.updateLastSlot(checkpoint.epoch.start_slot()) + else: + # Forward sync is not active, but we keep it up-to date. + overseer.fqueue.reset(localHead, checkpoint.epoch.start_slot()) + + if overseer.fsqueue.running(): + # Forward syncing is in progress. + overseer.fsqueue.updateLastSlot(checkpoint.epoch.start_slot()) + else: + # Forward sync is not active, but we keep it up-to date. + overseer.fsqueue.reset( + overseer.getForwardSidecarStartSlot(), checkpoint.epoch.start_slot()) + + if not(isNil(overseer.bqueue)): + if not(overseer.bqueue.running()): + let + startSlot = dag.backfill.slot + finishSlot = + if dag.horizon >= startSlot: + startSlot + else: + dag.horizon + overseer.bqueue.reset(startSlot, finishSlot) + + if not(isNil(overseer.bsqueue)): + if not(overseer.bsqueue.running()): + let + startSlot = dag.backfill.slot + finishSlot = overseer.getBackfillSidecarFinalSlot() + overseer.bsqueue.reset(startSlot, finishSlot) + +proc updateColumnStatistics( + overseer: SyncOverseerRef2, + peer: Peer, + map: ColumnMap, + remove: bool +) = + let + custodyMap = overseer.columnQuarantine[].custodyMap + difference = custodyMap and map + + if remove: + if len(difference) > 0: + dec(overseer.columnsState.usefulCount) + for index in difference: + overseer.columnsState.distribution.mgetOrPut(index, 0).dec() + else: + dec(overseer.columnsState.uselessCount) + else: + if len(difference) > 0: + inc(overseer.columnsState.usefulCount) + for index in difference: + overseer.columnsState.distribution.mgetOrPut(index, 0).inc() + else: + inc(overseer.columnsState.uselessCount) + +proc initPeer( + overseer: SyncOverseerRef2, + peer: Peer, +): PeerEntryRef[Peer] = + let dag = overseer.consensusManager.dag + if dag.head.slot.epoch >= dag.cfg.FULU_FORK_EPOCH: + let map = overseer.getPeerColumnMap(peer) + overseer.updateColumnStatistics(peer, map, false) + overseer.sdag.peers.mgetOrPut( + peer.getKey(), PeerEntryRef.init(peer, map)) + else: + overseer.sdag.peers.mgetOrPut(peer.getKey(), PeerEntryRef.init(peer)) + +proc updatePeer(overseer: SyncOverseerRef2, peer: Peer) = + let + blockId = + peer.getHeadBlockId() + checkpoint = + peer.getFinalizedCheckpoint() + hentry = + overseer.sdag.roots.mgetOrPut( + blockId.root, SyncDagEntryRef.init(blockId)) + fentry = + if checkpoint.isGenesis(): + nil + else: + overseer.sdag.roots.mgetOrPut( + checkpoint.root, SyncDagEntryRef.init(checkpoint)) + missingHeadRoot = + if DagEntryFlag.Pending in hentry.flags: + # Missing parent situation + Opt.some(hentry.blockId.root) + else: + # Parent is present, so we searching for first missing one. + let root = getPendingParentRoot(hentry) + if root.isSome() and (root.get() == GenesisCheckpoint.root): + Opt.none(Eth2Digest) + else: + root + missingFinalizedRoot = + if not(isNil(fentry)) and (DagEntryFlag.Pending in fentry.flags): + # Missing parent situation + Opt.some(fentry.blockId.root) + else: + Opt.none(Eth2Digest) + pendingRoots = + block: + var res: seq[Eth2Digest] + if missingHeadRoot.isSome(): res.add(missingHeadRoot.get()) + if missingFinalizedRoot.isSome(): res.add(missingFinalizedRoot.get()) + res + + if overseer.lastSeenCheckpoint.isNone(): + overseer.lastSeenCheckpoint = Opt.some(checkpoint) + overseer.createQueues() + else: + if checkpoint.epoch > overseer.lastSeenCheckpoint.get().epoch: + overseer.lastSeenCheckpoint = Opt.some(checkpoint) + overseer.updateQueues() + + if overseer.lastSeenHead.isNone(): + overseer.lastSeenHead = Opt.some(blockId) + else: + if blockId.slot > overseer.lastSeenHead.get().slot: + overseer.lastSeenHead = Opt.some(blockId) + + let entry = overseer.sdag.peers.getOrDefault(peer.getKey()) + if isNil(entry): + return + + for root in pendingRoots: + entry.pendingRoots.addLast(root) + + if not(isNil(fentry)) and (DagEntryFlag.Pending notin fentry.flags): + # Finalized root is already present in SyncDag. + fentry.flags.incl(DagEntryFlag.Finalized) + +proc updatePeer( + overseer: SyncOverseerRef2, + peerId: PeerId, + peerMustPresent: bool, + block_slot: Slot, + block_root: Eth2Digest, + block_parent_root: Eth2Digest, + sidecarsMissed: bool +) = + let peerEntry = overseer.sdag.peers.getOrDefault(peerId) + if isNil(peerEntry) and peerMustPresent: + return + + let + missingParentRoot = + overseer.sdag.updateRoot(block_root, block_slot, block_parent_root, + sidecarsMissed) + + if missingParentRoot.isSome() and + (missingParentRoot.get() != GenesisCheckpoint.root): + if not(isNil(peerEntry)): + peerEntry.pendingRoots.addLast(missingParentRoot.get()) + else: + if missingParentRoot.get() == block_parent_root: + # We only change global `missingRoots` if we got a block without + # parent. + let bid = BlockId(slot: block_slot, root: block_root) + debug "Peer is anonymous, adding root to global missing roots table", + bid = shortLog(bid), parent_root = shortLog(block_parent_root) + overseer.missingRoots.incl(missingParentRoot.get()) + +proc updatePeer( + overseer: SyncOverseerRef2, + peerId: PeerId, + peerMustPresent: bool, + blck: ref ForkedSignedBeaconBlock, + missingSidecars: bool +) = + let (slot, root, parentRoot) = + withBlck(blck[]): + (forkyBlck.message.slot, forkyBlck.root, forkyBlck.message.parent_root) + overseer.updatePeer( + peerId, peerMustPresent, slot, root, parentRoot, missingSidecars) + +proc updatePeer( + overseer: SyncOverseerRef2, + peerId: PeerId, + peerMustPresent: bool, + blck: ForkedSignedBeaconBlock, + missingSidecars: bool +) = + let (slot, root, parentRoot) = + withBlck(blck): + (forkyBlck.message.slot, forkyBlck.root, forkyBlck.message.parent_root) + overseer.updatePeer( + peerId, peerMustPresent, slot, root, parentRoot, missingSidecars) + +func finalizedDistance*( + overseer: SyncOverseerRef2 +): Opt[uint64] = + let + dag = overseer.consensusManager.dag + checkpoint = getStateField(dag.headState, finalized_checkpoint) + + if overseer.lastSeenCheckpoint.isNone(): + return Opt.none(uint64) + + let lastSeenEpoch = overseer.lastSeenCheckpoint.get().epoch + if lastSeenEpoch > checkpoint.epoch: + Opt.some(lastSeenEpoch - checkpoint.epoch) + else: + Opt.some(0'u64) + +func backfillDistance*( + overseer: SyncOverseerRef2 +): uint64 = + let + dag = overseer.consensusManager.dag + + if dag.backfill.slot <= dag.horizon: + 0'u64 + else: + dag.backfill.slot - dag.horizon + +proc networkSyncDistance*( + overseer: SyncOverseerRef2 +): Opt[uint64] = + let + dag = overseer.consensusManager.dag + localHead = dag.head.slot + + if overseer.lastSeenHead.isNone(): + return Opt.none(uint64) + + let lastSeenHead = overseer.lastSeenHead.get().slot + if lastSeenHead > localHead: + Opt.some(lastSeenHead - localHead) + else: + Opt.some(0'u64) + +proc wallSyncDistance*( + overseer: SyncOverseerRef2 +): uint64 = + let + dag = overseer.consensusManager.dag + wallSlot = overseer.beaconClock.currentSlot() + headSlot = dag.head.slot + wallSlot - headSlot + +proc finalizedDistance*( + overseer: SyncOverseerRef2, + peer: Peer +): uint64 = + let + dag = overseer.consensusManager.dag + checkpoint = getStateField(dag.headState, finalized_checkpoint) + peerCheckpoint = peer.getFinalizedCheckpoint() + + if peerCheckpoint.epoch > checkpoint.epoch: + peerCheckpoint.epoch - checkpoint.epoch + else: + 0'u64 + +proc syncDistance*( + overseer: SyncOverseerRef2, + peer: Peer +): uint64 = + let + dag = overseer.consensusManager.dag + localHead = dag.head.slot + peerHead = peer.getHeadBlockId().slot + + if peerHead > localHead: + peerHead - localHead + else: + 0'u64 + +proc verifyBlock( + overseer: SyncOverseerRef2, + signedBlock: ref ForkedSignedBeaconBlock, + maybeFinalized: bool +): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} = + withBlck(signedBlock[]): + when consensusFork > ConsensusFork.Fulu: + raiseAssert "Unsupported fork" + elif consensusFork in [ConsensusFork.Deneb, ConsensusFork.Electra]: + if overseer.shouldGetBlobs(forkyBlck.message.slot): + let bres = + overseer.blobQuarantine[].popSidecars(forkyBlck.root, forkyBlck) + if bres.isSome(): + let res = + await overseer.blockProcessor.addBlock( + MsgSource.sync, forkyBlck, bres, + maybeFinalized = maybeFinalized) + if res.isErr() and (res.error == VerifierError.MissingParent): + # In this case block will be stored in quarantine, so we need to + # preserve blobs in blob quarantine. + overseer.blobQuarantine[].put(forkyBlck.root, bres.get()) + res + else: + overseer.rblockBuffer.add(signedBlock) + Result[void, VerifierError].err(VerifierError.MissingSidecars) + else: + await overseer.blockProcessor.addBlock( + MsgSource.sync, forkyBlck, Opt.none(BlobSidecars), + maybeFinalized = maybeFinalized) + elif consensusFork == ConsensusFork.Fulu: + if overseer.shouldGetColumns(forkyBlck.message.slot): + let cres = + overseer.columnQuarantine[].popSidecars(forkyBlck.root, forkyBlck) + if cres.isSome(): + let res = + await overseer.blockProcessor.addBlock( + MsgSource.sync, forkyBlck, cres, + maybeFinalized = maybeFinalized) + if res.isErr() and (res.error == VerifierError.MissingParent): + # In this case block will be stored in quarantine, so we need to + # preserve columns in column quarantine. + overseer.columnQuarantine[].put(forkyBlck.root, cres.get()) + res + else: + overseer.rblockBuffer.add(signedBlock) + Result[void, VerifierError].err(VerifierError.MissingSidecars) + else: + await overseer.blockProcessor.addBlock( + MsgSource.sync, forkyBlck, Opt.none(fulu.DataColumnSidecars), + maybeFinalized = maybeFinalized) + else: + await overseer.blockProcessor.addBlock( + MsgSource.sync, forkyBlck, noSidecars, maybeFinalized = maybeFinalized) + +proc getStatusPeriod*( + overseer: SyncOverseerRef2, + peer: Peer +): chronos.Duration = + let + dag = overseer.consensusManager.dag + localHead = dag.head.bid + peerHead = peer.getHeadBlockId() + peerFinalizedCheckpoint = peer.getFinalizedCheckpoint() + secondsPerSlot = int(dag.cfg.timeParams.SLOT_DURATION.seconds) + + if peerFinalizedCheckpoint.epoch < overseer.lastSeenCheckpoint.get.epoch: + # Peer is not in sync with the network. + return chronos.seconds(10 * secondsPerSlot) + + if localHead.slot.epoch() < peerFinalizedCheckpoint.epoch: + # We are behind peer's finalized checkpoint, performing forward syncing. + # 10 slots (mainnet: 2.minutes) + return chronos.seconds(10 * secondsPerSlot) + + if (localHead.slot >= peerHead.slot) and + (localHead.slot < overseer.lastSeenHead.get.slot): + # Peer's head slot is behind ours, but we still not in sync with network. + # So we need to refresh status information immediately. + return chronos.seconds(0) + + if peerHead.slot < overseer.lastSeenHead.get.slot: + # Peer's head is behind network's peer head. + return chronos.seconds(1 * secondsPerSlot) + + if localHead.slot == overseer.lastSeenHead.get.slot: + # Node is optimistically synced + return chronos.seconds(5 * secondsPerSlot) + + # Node is almost synced, but still behind peer's head. + chronos.seconds(1 * secondsPerSlot) + +func getMissingSidecarsRoots(entry: SyncDagEntryRef): seq[BlockId] = + var res: seq[BlockId] + if DagEntryFlag.MissingSidecars in entry.flags: + res.add(entry.blockId) + for currentEntry in entry.parents(): + if DagEntryFlag.MissingSidecars in currentEntry.flags: + res.add(currentEntry.blockId) + if DagEntryFlag.Finalized in currentEntry.flags: + break + res.reversed() + +func cleanMissingSidecarsRoots(entry: SyncDagEntryRef) = + if DagEntryFlag.MissingSidecars in entry.flags: + entry.flags.excl(DagEntryFlag.MissingSidecars) + for currentEntry in entry.parents(): + entry.flags.excl(DagEntryFlag.MissingSidecars) + +func getBlock( + blocks: openArray[ref ForkedSignedBeaconBlock], + root: Eth2Digest, + slot: Slot +): ref ForkedSignedBeaconBlock = + for blck in blocks: + if (blck[].root == root) and (blck[].slot == slot): + return blck + nil + +proc doPeerPause( + overseer: SyncOverseerRef2, + peer: Peer, + loopTime: chronos.Moment +): Future[bool] {.async: (raises: [CancelledError]).} = + let + dag = overseer.consensusManager.dag + timeParams = dag.cfg.timeParams + peerHead = peer.getHeadBlockId() + peerEntry = + block: + let res = overseer.sdag.peers.getOrDefault(peer.getKey()) + if isNil(res): return + res + hentry = + block: + let res = overseer.sdag.roots.getOrDefault(peerHead.root) + if isNil(res): return + res + + logScope: + peer = peer + peer_head = shortLog(peerHead) + peer_finalized_head = shortLog(peer.getFinalizedCheckpoint()) + peer_ea_slot = getEaSlotLog(peer) + peer_agent = $peer.getRemoteAgent() + peer_score = peer.getScore() + peer_speed = peer.netKbps() + + var doSleep = false + + if overseer.finalizedDistance().get() > 1'u64: + ## We are in forward syncing. + if dag.finalizedHead.slot.epoch >= peer.getFinalizedCheckpoint().epoch: + doSleep = true + else: + ## We are in sync or almost in sync. + if len(peerEntry.pendingRoots) == 0 and + len(overseer.missingRoots) == 0 and + len(getMissingSidecarsRoots(hentry)) == 0: + doSleep = true + + if not(dag.needsBackfill()): + doSleep = true + + if not(doSleep) and (Moment.now() - loopTime) < 50.milliseconds: + debug "Endless loop detected for peer" + doSleep = true + + if doSleep: + let + currentTime = overseer.beaconClock.now() + currentSlot = overseer.beaconClock.currentSlot() + timeToSlot = + if overseer.syncDistance(peer) == 0: + let + next = currentSlot + 1 + nanos = + (next.start_beacon_time(timeParams) - currentTime).nanoseconds + if nanos <= 0: + ZeroDuration + else: + nanoseconds(nanos) + else: + 1.seconds + peerFut = peer.getFuture().join() + + debug "Peer is entering sleeping state", sleep_time = timeToSlot + discard await race(sleepAsync(timeToSlot), peerFut) + if peerFut.finished(): + return false + + true + +proc doPeerUpdateStatus( + overseer: SyncOverseerRef2, + peer: Peer +): Future[bool] {.async: (raises: [CancelledError]).} = + let + dag = overseer.consensusManager.dag + peerHead = peer.getHeadBlockId() + peerFinalizedCheckpoint = peer.getFinalizedCheckpoint() + peerStatusAge = Moment.now() - peer.getStatusLastTime() + statusPeriod = overseer.getStatusPeriod(peer) + + logScope: + peer = peer + peer_head = shortLog(peerHead) + peer_finalized_head = shortLog(peerFinalizedCheckpoint) + status_age = peerStatusAge + status_period = statusPeriod + + if peerStatusAge < statusPeriod: + # Peer's status information is still relevant + return true + + debug "Requesting fresh status information from peer" + + if not(await peer.updateStatus()): + debug "Failed to obtain fresh status information from peer" + peer.updateScore(PeerScoreNoStatus) + return false + + let + newPeerHead = peer.getHeadBlockId() + + if peerHead.slot >= newPeerHead.slot: + let stalePeriod = + (dag.cfg.timeParams.SLOT_DURATION * StatusStalePeriod) + if peerStatusAge >= stalePeriod: + peer.updateScore(PeerScoreStaleStatus) + debug "Peer's status information is stale" + else: + # Updating data structures about newly received Peer's status information. + overseer.updatePeer(peer) + peer.updateScore(PeerScoreGoodStatus) + debug "Peer status information updated" + + true + +proc doPeerUpdateRoots( + overseer: SyncOverseerRef2, + peer: Peer, +): Future[bool] {.async: (raises: [CancelledError]).} = + let + peerEntry = + block: + let res = overseer.sdag.peers.getOrDefault(peer.getKey()) + if isNil(res): return false + res + var + roots = + block: + var + res: seq[Eth2Digest] + counter = 0 + # Add peer missing roots + while counter < peerEntry.maxBlocksPerRequest: + if len(peerEntry.pendingRoots) > 0: + res.add(peerEntry.pendingRoots.popFirst()) + inc(counter) + else: + break + # Add global missing roots. + for item in overseer.missingRoots: + if counter < peerEntry.maxBlocksPerRequest: + res.add(item) + inc(counter) + else: + break + res + + template restoreRoots() = + # We should return all the roots back to the pending queue. + for index in countdown(len(roots) - 1, 0): + peerEntry.pendingRoots.addFirst(roots[index]) + + template removeRoot(root: Eth2Digest) = + let index = roots.find(root) + if index >= 0: + # We perform O(n) delete to keep order of roots. + roots.delete(index) + overseer.missingRoots.excl(root) + + logScope: + peer = peer + block_roots = shortLog(roots) + roots_count = len(roots) + max_blocks_per_request = peerEntry.maxBlocksPerRequest + peer_agent = $peer.getRemoteAgent() + peer_score = peer.getScore() + peer_speed = peer.netKbps() + data_type = "blocks" + + if len(roots) == 0: + debug "No pending roots available for peer" + return true + + debug "Requesting blocks by root from peer" + + let + blocks = + try: + (await beaconBlocksByRoot_v2(peer, BlockRootsList roots)).valueOr: + debug "Blocks by root request failed", reason = error, version = 2 + peer.updateScore(PeerScoreNoValues) + return false + except CancelledError as exc: + restoreRoots() + raise exc + + debug "Received blocks by root on request", + blocks = slimLog(blocks.asSeq()), blocks_count = len(blocks) + + checkResponse(roots, blocks.asSeq()).isOkOr: + restoreRoots() + debug "Incorrect blocks by root received", + blocks = slimLog(blocks.asSeq()), blocks_count = len(blocks), + reason = $error + peer.updateScore(PeerScoreBadResponse) + return false + + debug "Blocks by root passed response validation", + blocks = slimLog(blocks.asSeq()), blocks_count = len(blocks) + + if len(roots) > len(blocks): + # Number of requested roots is bigger than number of received blocks. + if len(roots) == 1: + restoreRoots() + debug "Empty response received for single root request", + blocks = slimLog(blocks.asSeq()), blocks_count = len(blocks) + peer.updateScore(PeerScoreBadResponse) + return false + peerEntry.maxBlocksPerRequest.decreaseBlocksCount() + else: + let consensusFork = blocks[0][].kind + overseer.increaseBlocksCount( + peerEntry.maxBlocksPerRequest, consensusFork) + + for signedBlock in blocks.asSeq(): + # maybeFinalized = false because we are working in range `>finalizedEpoch`. + let res = + try: + await overseer.verifyBlock(signedBlock, maybeFinalized = false) + except CancelledError as exc: + restoreRoots() + raise exc + + if res.isErr() and (res.error == VerifierError.Invalid): + debug "Block verification NOT passed", + fork = signedBlock[].kind, + block_root = shortLog(signedBlock[].root), + reason = $res.error + restoreRoots() + peer.updateScore(PeerScoreBadResponse) + return false + let missingSidecars = + if res.isErr() and (res.error == VerifierError.MissingSidecars): + true + else: + false + if res.isErr(): + if missingSidecars: + debug "Block missing sidecars", + fork = signedBlock[].kind, + missing_sidecars = overseer.getMissingIndicesLog(signedBlock), + reason = $res.error + else: + debug "Block verification passed", + fork = signedBlock[].kind, + block_root = shortLog(signedBlock[].root), + reason = $res.error + else: + debug "Block verification passed", + fork = signedBlock[].kind, + block_root = shortLog(signedBlock[].root), + reason = "ok" + # Update SyncDAG with block + overseer.updatePeer(peer.getKey(), true, signedBlock, missingSidecars) + removeRoot(signedBlock[].root) + + true + +proc doPeerUpdateRootsSidecars( + overseer: SyncOverseerRef2, + peer: Peer +): Future[bool] {.async: (raises: [CancelledError]).} = + let + dag = overseer.consensusManager.dag + peerEntry = + block: + let res = overseer.sdag.peers.getOrDefault(peer.getKey()) + if isNil(res): return false + res + peerHead = peer.getHeadBlockId() + headEntry = + block: + let res = overseer.sdag.roots.getOrDefault(peerHead.root) + if isNil(res): return false + res + bids = headEntry.getMissingSidecarsRoots() + + var + emptyBlobBlocks: seq[ref ForkedSignedBeaconBlock] + emptyColumnBlocks: seq[ref ForkedSignedBeaconBlock] + blobRoots: seq[BlobIdentifier] + columnRoots: seq[DataColumnsByRootIdentifier] + columnsCount = 0 + peerColumns: seq[ColumnIndex] + + logScope: + peer = peer + max_blocks_per_request = peerEntry.maxBlocksPerRequest + max_sidecars_per_request = peerEntry.maxSidecarsPerRequest + peer_agent = $peer.getRemoteAgent() + peer_score = peer.getScore() + peer_speed = peer.netKbps() + + if len(bids) == 0: + debug "No pending sidecars available for peer" + return true + + debug "Preparing sidecars by root for peer", + block_ids = shortLog(bids), block_ids_count = len(bids) + + for bid in bids: + let signedBlock = + block: + var res: ref ForkedSignedBeaconBlock + res = overseer.rblockBuffer.getOrDefault(bid.root) + if isNil(res): + let qres = + try: + newClone overseer.blockQuarantine.sidecarless[bid.root] + except KeyError: + nil + if isNil(qres): + continue + res = qres + res + + withBlck(signedBlock[]): + when consensusFork in [ConsensusFork.Deneb, ConsensusFork.Electra]: + let requests = + overseer.blobQuarantine[].fetchMissingSidecars( + bid.root, forkyBlck) + emptyBlobBlocks.add(signedBlock) + for request in requests: + blobRoots.add(request) + if len(blobRoots) >= peerEntry.maxSidecarsPerRequest: + break + elif consensusFork == ConsensusFork.Fulu: + let + peerMap = overseer.getPeerColumnMap(peerEntry) + request = + overseer.columnQuarantine[].fetchMissingSidecars( + bid.root, forkyBlck, peerMap) + if len(request.indices) > 0: + # len(request.indices) == 0 when we already have data column sidecars + # which peer could provide. + emptyColumnBlocks.add(signedBlock) + columnRoots.add(request) + columnsCount.inc(len(request.indices)) + if columnsCount >= peerEntry.maxSidecarsPerRequest: + break + elif consensusFork in ConsensusFork.Phase0 .. ConsensusFork.Capella: + raiseAssert "Should not be happen!" + else: + raiseAssert "Unsupported fork" + + ## + ## Blob sidecars processing + ## + if len(blobRoots) > 0: + logScope: + roots = shortLog(blobRoots) + roots_count = len(blobRoots) + data_type = "blobs" + + debug "Requesting blob sidecars by root from peer" + + let + blobSidecars = + (await blobSidecarsByRoot(peer, BlobIdentifierList blobRoots, + maxResponseItems = len(blobRoots))).valueOr: + debug "Blobs by root request failed", reason = error + peer.updateScore(PeerScoreNoValues) + return false + + debug "Received blob sidecars by root on request", + blobs = slimLog(blobSidecars.asSeq()), blobs_count = len(blobSidecars) + + let + records = + groupSidecars(blobRoots, blobSidecars.asSeq()).valueOr: + debug "Response to blobs by root is incorrect", + blobs = slimLog(blobSidecars.asSeq()), + blobs_count = len(blobSidecars), reason = error + peer.updateScore(PeerScoreBadResponse) + return false + + for record in records: + overseer.blobQuarantine[].put(record.block_root, record.sidecar) + + if len(records) < len(blobRoots): + if len(blobRoots) == 1: + debug "Empty response received for single root request", + blobs = slimLog(blobSidecars.asSeq()), + blobs_count = len(blobSidecars) + peer.updateScore(PeerScoreBadResponse) + return false + # Number of received sidecars is less than number of requested. + peerEntry.maxSidecarsPerRequest.decreaseSidecarsCount() + else: + overseer.increaseSidecarsCount( + peerEntry.maxSidecarsPerRequest, ConsensusFork.Electra) + + debug "Processing block and sidecars by root", + blocks = slimLog(emptyBlobBlocks) + + for signedBlock in emptyBlobBlocks: + debug "Processing block by root", blck = slimLog(signedBlock) + withBlck(signedBlock[]): + when consensusFork in [ConsensusFork.Deneb, ConsensusFork.Electra]: + let entry = overseer.sdag.roots.getOrDefault(forkyBlck.root) + if not(isNil(entry)) and (DagEntryFlag.MissingSidecars in entry.flags): + let res = await overseer.verifyBlock(signedBlock, false) + if res.isErr(): + debug "Block processor response", reason = res.error, + blck = slimLog(signedBlock) + case res.error + of VerifierError.Invalid: + peer.updateScore(PeerScoreBadResponse) + entry.flags.excl(DagEntryFlag.MissingSidecars) + overseer.rblockBuffer.remove(forkyBlck.root) + return false + of VerifierError.UnviableFork: + peer.updateScore(PeerScoreUnviableFork) + entry.flags.incl(DagEntryFlag.Unviable) + entry.flags.excl(DagEntryFlag.MissingSidecars) + overseer.rblockBuffer.remove(forkyBlck.root) + return false + of VerifierError.MissingParent, VerifierError.Duplicate: + # This flags means that we have sidecars. + entry.flags.excl(DagEntryFlag.MissingSidecars) + peer.updateScore(PeerScoreGoodValues) + overseer.rblockBuffer.remove(forkyBlck.root) + of VerifierError.MissingSidecars: + # We still missing sidecars. + discard + else: + debug "Block processor response", reason = "ok", + blck = slimLog(signedBlock) + overseer.rblockBuffer.remove(forkyBlck.root) + peer.updateScore(PeerScoreGoodValues) + entry.flags.excl(DagEntryFlag.MissingSidecars) + else: + raiseAssert "Should not be happen!" + + ## + ## Data column sidecars processing. + ## + if len(columnRoots) > 0: + logScope: + head = shortLog(dag.head) + roots = shortLog(columnRoots) + roots_count = columnsCount + peer_map = overseer.getPeerColumnMap(peerEntry) + data_type = "columns" + + debug "Requesting data column sidecars by root from peer" + + let + consensusFork = ConsensusFork.Fulu + # This `consensusFork` is only used for request sidecars amount + # adjustments. + columnSidecars = + (await dataColumnSidecarsByRoot(peer, + DataColumnsByRootIdentifierList columnRoots)).valueOr: + debug "Data columns by root request failed", reason = error + peer.updateScore(PeerScoreNoValues) + return false + + debug "Received data column sidecars by root on request", + columns = slimLog(columnSidecars.asSeq()), + columns_count = len(columnSidecars) + + let + records = + groupSidecars( + columnRoots, columnsCount, columnSidecars.asSeq()).valueOr: + debug "Response to data columns by root is incorrect", + columns = slimLog(columnSidecars.asSeq()), + columns_count = len(columnSidecars), reason = error + peer.updateScore(PeerScoreBadResponse) + return false + + for record in records: + overseer.columnQuarantine[].put(record.block_root, record.sidecar) + + if len(records) < columnsCount: + if columnsCount == 1: + debug "Empty response received for single root request", + columns = slimLog(columnSidecars.asSeq()), + columns_count = len(columnSidecars) + peer.updateScore(PeerScoreBadResponse) + return false + # Number of received sidecars is less than number of requested. + peerEntry.maxSidecarsPerRequest.decreaseSidecarsCount() + else: + overseer.increaseSidecarsCount( + peerEntry.maxSidecarsPerRequest, consensusFork) + + debug "Processing block and sidecars by root", + blocks = slimLog(emptyBlobBlocks) + + for signedBlock in emptyColumnBlocks: + debug "Processing block and sidecars by root", blck = slimLog(signedBlock) + withBlck(signedBlock[]): + when consensusFork == ConsensusFork.Fulu: + let entry = overseer.sdag.roots.getOrDefault(forkyBlck.root) + if not(isNil(entry)) and (DagEntryFlag.MissingSidecars in entry.flags): + let res = await overseer.verifyBlock(signedBlock, false) + if res.isErr(): + debug "Block and sidecars by root processor response", + reason = res.error, blck = slimLog(signedBlock) + case res.error + of VerifierError.Invalid: + entry.flags.excl(DagEntryFlag.MissingSidecars) + peer.updateScore(PeerScoreBadResponse) + overseer.rblockBuffer.remove(forkyBlck.root) + return false + of VerifierError.UnviableFork: + entry.flags.excl(DagEntryFlag.MissingSidecars) + peer.updateScore(PeerScoreUnviableFork) + entry.flags.incl(DagEntryFlag.Unviable) + overseer.rblockBuffer.remove(forkyBlck.root) + return false + of VerifierError.MissingParent, VerifierError.Duplicate: + # This flags means that we have sidecars. + entry.flags.excl(DagEntryFlag.MissingSidecars) + overseer.rblockBuffer.remove(forkyBlck.root) + of VerifierError.MissingSidecars: + # We still missing sidecars. + discard + else: + debug "Block and sidecars by root processor response", + reason = "ok", blck = slimLog(signedBlock) + overseer.rblockBuffer.remove(forkyBlck.root) + peer.updateScore(PeerScoreGoodValues) + entry.flags.excl(DagEntryFlag.MissingSidecars) + else: + raiseAssert "Should not be happen!" + true + +template bsqueue( + overseer: SyncOverseerRef2, + direction: SyncQueueKind +): untyped = + case direction + of SyncQueueKind.Forward: + overseer.fqueue + of SyncQueueKind.Backward: + overseer.bqueue + +template ssqueue( + overseer: SyncOverseerRef2, + direction: SyncQueueKind +): untyped = + case direction + of SyncQueueKind.Forward: + overseer.fsqueue + of SyncQueueKind.Backward: + overseer.bsqueue + +template sbuffer( + overseer: SyncOverseerRef2, + direction: SyncQueueKind +): var BlocksRangeBuffer = + case direction + of SyncQueueKind.Forward: + overseer.fblockBuffer + of SyncQueueKind.Backward: + overseer.bblockBuffer + +proc doRangeSyncStep( + overseer: SyncOverseerRef2, + peer: Peer, + direction: SyncQueueKind +): Future[bool] {.async: (raises: [CancelledError]).} = + if isNil(overseer.sdag.peers.getOrDefault(peer.getKey())): + return false + + let + dag = overseer.consensusManager.dag + checkpoint = peer.getFinalizedCheckpoint() + + let request = + overseer.bsqueue(direction).pop(checkpoint.epoch.start_slot(), peer) + + logScope: + peer = peer + request = request + head = shortLog(dag.head) + block_buffer = shortLog(overseer.sbuffer(direction)) + blocks_queue = shortLog(overseer.bsqueue(direction)) + sidecars_queue = shortLog(overseer.ssqueue(direction)) + peer_checkpoint = shortLog(checkpoint) + peer_head = shortLog(peer.getHeadBlockId()) + peer_ea_slot = getEaSlotLog(peer) + peer_agent = $peer.getRemoteAgent() + peer_score = peer.getScore() + peer_speed = peer.netKbps() + direction = direction + + debug "New blocks range request" + + if request.isEmpty(): + debug "Empty request received from blocks queue" + return true + + try: + let + blocks = + (await beaconBlocksByRange_v2( + peer, request.data.slot, request.data.count, 1'u64)).valueOr: + debug "Failed to get block range from peer", reason = error + overseer.bsqueue(direction).push(request) + return false + + debug "Received blocks range on request", + blocks_count = len(blocks), + blocks_map = getShortMap(request, blocks.toSeq()) + + checkResponse(request.data, blocks.asSeq()).isOkOr: + debug "Incorrect range of blocks received", + blocks_count = len(blocks), + blocks_map = getShortMap(request, blocks.toSeq()), reason = $error + peer.updateScore(PeerScoreBadResponse) + overseer.bsqueue(direction).push(request) + return false + + debug "Sending blocks range to processor", + blocks_count = len(blocks), + blocks_map = getShortMap(request, blocks.asSeq()), + blocks = slimLog(blocks.asSeq()) + + let resp = + await overseer.bsqueue(direction).push( + request, blocks.asSeq(), maybeFinalized = true) + + debug "Blocks queue response", + code = resp.code, count = resp.count, blck = shortLog(resp.blck), + blocks_count = len(blocks), + blocks_map = getShortMap(request, blocks.asSeq()), + blocks = slimLog(blocks.asSeq()), + block_buffer = shortLog(overseer.sbuffer(direction)), + blocks_queue = shortLog(overseer.bsqueue(direction)), + sidecars_queue = shortLog(overseer.bsqueue(direction)) + + if resp.count > 0: + peer.updateScore(PeerScoreGoodValues) + true + elif resp.count == 0: + true + else: + let rewindPoint = overseer.bsqueue(direction).inpSlot + + logScope: + code = resp.code + count = resp.count + rewind_point = rewindPoint + blck = shortLog(resp.blck) + + let before = shortLog(overseer.sbuffer(direction)) + case direction + of SyncQueueKind.Forward: + overseer.fblockBuffer.invalidate(rewindPoint) + of SyncQueueKind.Backward: + overseer.bblockBuffer.invalidate(rewindPoint) + debug "Blocks queue rewind detected, invalidating block buffer", + block_buffer_before = before + false + + except CancelledError as exc: + overseer.bsqueue(direction).push(request) + raise exc + +proc doRangeSidecarsStep( + overseer: SyncOverseerRef2, + peer: Peer, + direction: SyncQueueKind +): Future[bool] {.async: (raises: [CancelledError]).} = + let + dag = overseer.consensusManager.dag + checkpoint = peer.getFinalizedCheckpoint() + peerEntry = + block: + let res = overseer.sdag.peers.getOrDefault(peer.getKey()) + if isNil(res): return false + res + + block: + let + blockSlot = overseer.bsqueue(direction).inpSlot + blockRange = + SyncRange.init(blockSlot, uint64(overseer.blocksChunkSize)) + + logScope: + peer = peer + block_slot = blockSlot + block_range = $block_range + head = shortLog(dag.head) + block_buffer = shortLog(overseer.sbuffer(direction)) + blocks_queue = shortLog(overseer.bsqueue(direction)) + sidecars_queue = shortLog(overseer.ssqueue(direction)) + peer_checkpoint = shortLog(checkpoint) + peer_head = shortLog(peer.getHeadBlockId()) + direction = direction + + let notInRange = + case direction + of SyncQueueKind.Forward: + blockRange.last_slot < overseer.ssqueue(direction).startSlot + of SyncQueueKind.Backward: + blockRange.last_slot < overseer.ssqueue(direction).finalSlot + if notInRange: + debug "Sidecars queue is not in range, skipping step" + return true + + let request = + overseer.ssqueue(direction).pop(checkpoint.epoch.start_slot(), peer) + + logScope: + peer = peer + request = request + head = shortLog(dag.head) + block_buffer = shortLog(overseer.sbuffer(direction)) + blocks_queue = shortLog(overseer.bsqueue(direction)) + sidecars_queue = shortLog(overseer.ssqueue(direction)) + blob_quarantine = shortLog(overseer.blobQuarantine[]) + column_quarantine = shortLog(overseer.columnQuarantine[]) + peer_checkpoint = shortLog(checkpoint) + peer_head = shortLog(peer.getHeadBlockId()) + peer_ea_slot = getEaSlotLog(peer) + peer_agent = $peer.getRemoteAgent() + peer_score = peer.getScore() + peer_speed = peer.netKbps() + direction = direction + + debug "New sidecars range request" + + if request.isEmpty(): + debug "Empty request received from sidecars queue" + return true + + if not(overseer.checkDataAvailable(peer, direction, request.data)): + debug "Request cannot be satisfied by the peer", + peer_ea_slot = peer.getEarliestAvailableSlot().get() + peer.updateScore(PeerScoreNoValues) + overseer.ssqueue(direction).push(request) + return true + + let consensusFork = dag.cfg.consensusForkAtEpoch( + request.data.start_slot().epoch) + + let resp = + case consensusFork + of ConsensusFork.Phase0 .. ConsensusFork.Capella: + SyncPushResponse() + of ConsensusFork.Deneb, ConsensusFork.Electra: + try: + let + data = + (await blobSidecarsByRange(peer, request.data.slot, + request.data.count, maxResponseItems = + (request.data.count * + dag.cfg.MAX_BLOBS_PER_BLOCK_ELECTRA).Limit) + ).valueOr: + peer.updateScore(PeerScoreNoValues) + debug "Failed to receive blob sidecars range on request", + reason = $error + overseer.ssqueue(direction).push(request) + return false + + debug "Received blob sidecars range from peer", + blobs_map = getShortMap(request, data.toSeq()) + + let + grouped = groupSidecars(request.data, data.asSeq()).valueOr: + peer.updateScore(PeerScoreBadResponse) + debug "Received invalid blob sidecars range", + reason = $error, blobs_count = len(data), + blobs = slimLog(data.asSeq()) + overseer.ssqueue(direction).push(request) + return false + blocks = overseer.sbuffer(direction).peekRange(request.data) + + # Early detection of empty response. + sindex = validateBlocks(blocks, grouped).valueOr: + peer.updateScore(PeerScoreMissingValues) + debug "Received non-complete blob sidecars range", + reason = $error, blobs_count = len(data), + blobs_map = getShortMap(request, grouped), + blocks_map = getShortMap(request, blocks), + block_blobs_map = getBlockBlobsMap(request, blocks), + blobs = slimLog(data.asSeq()), + blocks = slimLog(blocks) + overseer.ssqueue(direction).push(request) + return false + + if (len(blocks) == 0) and (len(grouped) > 0): + # Case when we have no blocks, but a lot of blobs. + debug "Received blobs range that, do not have corresponding blocks " & + "range" + overseer.ssqueue(direction).push(request) + return false + + if sindex != len(grouped): + let missing = + block: + var res: seq[Eth2Digest] + for item in grouped.toOpenArray(sindex, len(grouped) - 1): + if (len(res) == 0) or (res[^1] != item.block_root): + res.add(item.block_root) + "[" & res.mapIt(shortLog(it)).join(",") & "]" + debug "Received blobs range indicates that some blocks in " & + "corresponding range are missing", missing_blocks = missing + + for record in grouped: + overseer.blobQuarantine[].put(record.block_root, record.sidecar) + + debug "Sending sidecars range to processor", + blobs_map = getShortMap(request, grouped), + blocks_count = len(blocks), + blocks_map = getShortMap(request, blocks), + blocks = slimLog(blocks), + blobs = slimLog(data.asSeq()) + + let res = await overseer.ssqueue(direction).push( + request, blocks, maybeFinalized = true) + + debug "Sidecars queue response", + code = res.code, count = res.count, blck = shortLog(res.blck), + blobs_map = getShortMap(request, grouped), + blocks_count = len(blocks), + blocks_map = getShortMap(request, blocks), + blocks = slimLog(blocks), + blobs = slimLog(data.asSeq()) + + # In case we not advance - we should cleanup blob/column quarantines on + # fatal errors. + if res.count <= 0: + if res.code in [SyncProcessError.Invalid, + SyncProcessError.UnviableFork]: + for signed in blocks: + overseer.blobQuarantine[].remove(signed[].root) + res + + except CancelledError as exc: + overseer.ssqueue(direction).push(request) + raise exc + + of ConsensusFork.Fulu: + try: + let + blocks = overseer.sbuffer(direction).peekRange(request.data) + custodyMap = overseer.columnQuarantine[].custodyMap + peerMap = overseer.getPeerColumnMap(peerEntry) + intersectMap = custodyMap and peerMap + + # Here we perform check if remote peer has compatible columns or not. + if len(intersectMap) == 0: + peer.updateScore(PeerScoreNoValues) + debug "Peer does not have compatible columns", + custody_map = shortLog(custodyMap), + peer_map = shortLog(peerMap) + overseer.ssqueue(direction).push(request) + return true + + let (columnsNeeded, columnsHave) = + if len(blocks) > 0: + # Here we perform check if remote peer can provide columns that we + # do not have already. + var + res1 = false + res2 = false + for blck in blocks: + let + missingMap = + withBlck(blck[]): + when consensusFork == ConsensusFork.Fulu: + overseer.columnQuarantine[].getMissingColumnsMap( + forkyBlck.root, forkyBlck) + else: + raiseAssert "Should not happen!" + if not(missingMap.empty()): + # We have missing columns. + res1 = true + let newOnlyMap = missingMap and intersectMap + if not(newOnlyMap.empty()): + # Peer has something that we don't have. + res2 = true + break + (res1, res2) + else: + # This is undefined case, because its impossible to obtain + # blocks. + (false, false) + + let missingLog = overseer.getMissingColumnsLog(blocks) + + debug "Peer columns compatibility", + custody_map = shortLog(custodyMap), + peer_map = shortLog(peerMap), + intersect_map = shortLog(intersectMap), + missing_log = missingLog + + if (len(blocks) > 0) and (columnsNeeded and not(columnsHave)): + peer.updateScore(PeerScoreNoValues) + debug "Peer has compatible columns that we already have", + custody_map = shortLog(custodyMap), + peer_map = shortLog(peerMap), + intersect_map = shortLog(intersectMap), + missing_log = missingLog + overseer.ssqueue(direction).push(request) + return true + + if (len(blocks) == 0) or (columnsNeeded and columnsHave): + # We only download sidecars if we miss it and peer have it. + let + data = + (await dataColumnSidecarsByRange( + peer, request.data.slot, request.data.count, + List[ColumnIndex, NUMBER_OF_COLUMNS]( + intersectMap.items().toSeq()))).valueOr: + peer.updateScore(PeerScoreNoValues) + debug "Failed to receive data column sidecars range " & + "on request", reason = $error + overseer.ssqueue(direction).push(request) + return false + + debug "Received data columns sidecars range from peer", + columns_map = getShortMap(request, intersectMap, data.toSeq()), + peer_map = shortLog(peerMap), + intersection_map = shortLog(intersectMap), + columns = slimLog(data.asSeq()), + missing_log = missingLog + + let + grouped = + groupSidecars(request.data, intersectMap, data.asSeq()).valueOr: + peer.updateScore(PeerScoreBadResponse) + debug "Received invalid data column sidecars range", + reason = $error, columns_count = len(data), + columns = slimLog(data.asSeq()) + overseer.ssqueue(direction).push(request) + return false + + # Early detection of empty response. + sindex = validateBlocks(blocks, grouped, intersectMap).valueOr: + peer.updateScore(PeerScoreMissingValues) + debug "Received non-complete data column sidecars range", + reason = $error, columns_count = len(data), + columns = shortLog(grouped) + overseer.ssqueue(direction).push(request) + return false + + if (len(blocks) == 0) and (len(grouped) > 0): + # Case when we have no blocks, but a lot of blobs. + debug "Received columns range which do not have corresponding " & + "blocks range" + overseer.ssqueue(direction).push(request) + return + + if sindex != len(grouped): + let missing = + block: + var res: seq[Eth2Digest] + for item in grouped.toOpenArray(sindex, len(grouped) - 1): + if (len(res) == 0) or (res[^1] != item.block_root): + res.add(item.block_root) + "[" & res.mapIt(shortLog(it)).join(",") & "]" + debug "Received columns range indicates that some blocks in " & + "corresponding range are missing", missing_blocks = missing + + for record in grouped: + overseer.columnQuarantine[].put(record.block_root, record.sidecar) + + else: + debug "Sidecars are already downloaded", + custody_map = shortLog(custodyMap), + peer_map = shortLog(peerMap), + intersect_map = shortLog(intersectMap), + missing_log = missingLog, + columns_needed = columnsNeeded, columns_have = columnsHave + + debug "Sending sidecars range to processor", + peer_map = shortLog(peerMap), + blocks_count = len(blocks), + blocks_map = getShortMap(request, blocks), + blocks = slimLog(blocks) + + let res = await overseer.ssqueue(direction).push( + request, blocks, maybeFinalized = true) + + debug "Sidecars queue response", + code = res.code, count = res.count, blck = shortLog(res.blck), + peer_map = shortLog(peerMap), + blocks_count = len(blocks), + blocks_map = getShortMap(request, blocks), + blocks = slimLog(blocks) + + if res.code == SyncProcessError.MissingSidecars: + let + blck = getBlock(blocks, res.blck.get().root, res.blck.get().slot) + doAssert(not(isNil(blck)), "Should not be nil") + debug "Sidecars range still missing items", + blck = slimLog(blck), + peer_map = shortLog(peerMap), + missing_sidecars = overseer.getMissingIndicesLog(blck) + + # In case we not advance - we should cleanup blob/column quarantines on + # fatal errors. + if res.count <= 0: + if res.code in [SyncProcessError.Invalid, + SyncProcessError.UnviableFork]: + for signed in blocks: + overseer.columnQuarantine[].remove(signed[].root) + res + + except CancelledError as exc: + overseer.ssqueue(direction).push(request) + raise exc + + of ConsensusFork.Gloas: + raiseAssert "Unsupported fork" + + if resp.count > 0: + peer.updateScore(PeerScoreGoodValues) + case direction + of SyncQueueKind.Forward: + let advanceSlot = + min(overseer.bsqueue(direction).inpSlot, + overseer.ssqueue(direction).inpSlot) + debug "Pruning sync data structures", + advance_slot = advanceSlot, prune_epoch = advanceSlot.epoch() + overseer.fblockBuffer.advance(advanceSlot) + of SyncQueueKind.Backward: + let advanceSlot = + max(overseer.bsqueue(direction).inpSlot, + overseer.ssqueue(direction).inpSlot) + debug "Pruning sync data structures", + advance_slot = advanceSlot, prune_epoch = advanceSlot.epoch() + overseer.bblockBuffer.advance(advanceSlot) + true + elif resp.count == 0: + true + else: + let rewindPoint = overseer.ssqueue(direction).inpSlot + + logScope: + code = resp.code + count = resp.count + rewind_point = rewindPoint + blck = shortLog(resp.blck) + + let before = shortLog(overseer.sbuffer(direction)) + case direction + of SyncQueueKind.Forward: + overseer.fblockBuffer.invalidate(rewindPoint) + of SyncQueueKind.Backward: + overseer.bblockBuffer.invalidate(rewindPoint) + debug "Blocks queue rewind detected, invalidating block buffer", + block_buffer_before = before + + case direction + of SyncQueueKind.Forward: + if rewindPoint < overseer.bsqueue(direction).startSlot: + debug "Sidecars queue is not in range yet, no syncing needed" + return false + + if rewindPoint >= overseer.bsqueue(direction).inpSlot: + debug "Blocks queue is far behind, no syncing needed" + return false + + debug "Sidecars queue got rewind, syncing blocks queue" + await overseer.bsqueue(direction).resetWait(rewindPoint) + debug "Sync queues are in sync" + + of SyncQueueKind.Backward: + if rewindPoint > overseer.bsqueue(direction).startSlot: + debug "Sidecars queue is not in range yet, no syncing needed" + return false + + if rewindPoint <= overseer.bsqueue(direction).inpSlot: + debug "Blocks queue is far behind, no syncing needed" + return false + + debug "Sidecars queue got rewind, syncing blocks queue" + await overseer.bsqueue(direction).resetWait(rewindPoint) + debug "Sync queues are in sync" + + false + +func getLastSeenFinalizedEpoch( + overseer: SyncOverseerRef2, +): Epoch = + if overseer.lastSeenCheckpoint.isNone(): + return GENESIS_EPOCH + overseer.lastSeenCheckpoint.get().epoch + +func getLastSeenHeadSlot( + overseer: SyncOverseerRef2 +): Slot = + if overseer.lastSeenHead.isNone(): + return GENESIS_SLOT + overseer.lastSeenHead.get().slot + +proc startPeer( + overseer: SyncOverseerRef2, + peer: Peer +): Future[void] {.async: (raises: []).} = + let dag = overseer.consensusManager.dag + + logScope: + peer = peer + peer_agent = $peer.getRemoteAgent() + peer_score = peer.getScore() + peer_speed = peer.netKbps() + peer_head = shortLog(peer.getHeadBlockId()) + peer_checkpoint = shortLog(peer.getFinalizedCheckpoint()) + + try: + debug "Peer loop established" + + while true: + let loopTime = Moment.now() + if not(await overseer.doPeerUpdateStatus(peer)): + return + if not(overseer.pool.checkPeerScore(peer)): + return + + let peerEntry = overseer.sdag.peers.getOrDefault(peer.getKey()) + if isNil(peerEntry): + return + + if overseer.finalizedDistance().get() < RootSyncEpochsActivationCount: + debug "Peer current root state", + local_head = dag.head.slot, + head_distance = overseer.syncDistance(peer) + + if not(await overseer.doPeerUpdateRoots(peer)): + return + if not(overseer.pool.checkPeerScore(peer)): + return + + if not(await overseer.doPeerUpdateRootsSidecars(peer)): + return + if not(overseer.pool.checkPeerScore(peer)): + return + + let checkpoint = getStateField(dag.headState, finalized_checkpoint) + + if overseer.finalizedDistance().get() > 1'u64: + # TODO (cheatfate): we should check for WSP. + debug "Peer current forward state", + local_checkpoint = shortLog(checkpoint), + peer_finalized_distance = overseer.finalizedDistance(peer), + finalized_distance = overseer.finalizedDistance().get(), + forward_block_buffer = shortLog(overseer.fblockBuffer) + + if not(overseer.fblockBuffer.almostFull()): + if not(await overseer.doRangeSyncStep(peer, SyncQueueKind.Forward)): + return + if not(overseer.pool.checkPeerScore(peer)): + return + + if not(await overseer.doRangeSidecarsStep(peer, SyncQueueKind.Forward)): + return + if not(overseer.pool.checkPeerScore(peer)): + return + + if dag.needsBackfill(): + debug "Peer current backfill state", + needs_backfill = dag.needsBackfill(), + backfill_slot = dag.backfill.slot, + backfill_distance = overseer.backfillDistance(), + backward_block_buffer = shortLog(overseer.bblockBuffer) + if overseer.wallSyncDistance() <= SyncDeviationSlotsCount: + if not(overseer.bblockBuffer.almostFull()): + if not( + await overseer.doRangeSyncStep(peer, SyncQueueKind.Backward)): + return + if not(overseer.pool.checkPeerScore(peer)): + return + + if not( + await overseer.doRangeSidecarsStep(peer, SyncQueueKind.Backward)): + return + if not(overseer.pool.checkPeerScore(peer)): + return + + if not(await overseer.doPeerPause(peer, loopTime)): + return + + except CancelledError: + discard + finally: + # Cleanup + var entry: PeerEntryRef[Peer] + if overseer.sdag.peers.pop(peer.getKey(), entry): + overseer.pool.release(peer) + if entry.columnsMap.isSome(): + overseer.updateColumnStatistics(peer, entry.columnsMap.get(), true) + debug "Remote peer disconnected" + +proc speed( + startslot, lastslot: Slot, + starttime, lasttime: chronos.Moment +): float {.inline.} = + ## Returns number of slots per second. + if (lastslot <= startslot) or (lasttime <= starttime): + 0.0 # replays for example + else: + float(lastslot - startslot) / toFloatSeconds(lasttime - starttime) + +proc toTimeLeftString(d: Duration): string = + if d == InfiniteDuration: + return "--h--m" + + var v = d + var res = "" + let ndays = chronos.days(v) + if ndays > 0: + res = res & (if ndays < 10: "0" & $ndays else: $ndays) & "d" + v = v - chronos.days(ndays) + + let nhours = chronos.hours(v) + if nhours > 0: + res = res & (if nhours < 10: "0" & $nhours else: $nhours) & "h" + v = v - chronos.hours(nhours) + else: + res = res & "00h" + + let nmins = chronos.minutes(v) + if nmins > 0: + res = res & (if nmins < 10: "0" & $nmins else: $nmins) & "m" + v = v - chronos.minutes(nmins) + else: + res = res & "00m" + res + +type + SyncPerformance = object + average: float + count: int + done: float + timeLeft: chronos.Duration + +func init(t: typedesc[SyncPerformance]): SyncPerformance = + SyncPerformance() + +func update( + performance: var SyncPerformance, + slota, slotb: Slot, + timea, timeb: chronos.Moment, + total: uint64, + remains: uint64 +) = + let + forwardSlotsPerSec = speed(slota, slotb, timea, timeb) + remainsFloat = float(remains) + totalFloat = float(total) + inc(performance.count) + performance.average = + performance.average + + (forwardSlotsPerSec - performance.average) / float(performance.count) + performance.done = + if totalFloat == 0.0: + 0.0 + else: + (totalFloat - remainsFloat) / totalFloat + performance.timeLeft = + if performance.average >= 0.001: + Duration.fromFloatSeconds(remainsFloat / performance.average) + else: + InfiniteDuration + +func formatString(performance: SyncPerformance): string = + performance.timeLeft.toTimeLeftString() & " (" & + (performance.done * 100.0).formatBiggestFloat(ffDecimal, 2) & "%) " & + performance.average.formatBiggestFloat(ffDecimal, 4) & "slots/s" + +proc maintenanceLoop( + overseer: SyncOverseerRef2 +): Future[void] {.async: (raises: []).} = + try: + debug "Overseer maintenance established" + + while true: + await sleepAsync(1.seconds) + if overseer.finalizedDistance.isSome() and + (overseer.finalizedDistance().get() == 0'u64): + # We perform reset of forward block buffer when forward syncing + # finished + overseer.fblockBuffer.reset() + if overseer.backfillDistance() == 0'u64: + # We perform reset of backfill block buffer when backfill process + # finished + overseer.bblockBuffer.reset() + + except CancelledError: + discard + +proc timeMonitoringLoop( + overseer: SyncOverseerRef2 +): Future[void] {.async: (raises: []).} = + let + dag = overseer.consensusManager.dag + bootForwardSlot = dag.head.slot + bootBackwardSlot = dag.backfill.slot + + func forwardRemains(slot: Slot): uint64 = + let + checkpoint = overseer.lastSeenCheckpoint.valueOr: + return 0'u64 + checkpointSlot = checkpoint.epoch.start_slot() + if slot >= checkpointSlot: + return 0'u64 + checkpointSlot - slot + + func backwardRemains(slot: Slot): uint64 = + if slot < dag.horizon(): + return 0'u64 + slot - dag.horizon() + + template forwardRemains(): uint64 = forwardRemains(dag.head.slot) + template forwardTotal(): uint64 = forwardRemains(bootForwardSlot) + template backwardRemains(): uint64 = backwardRemains(dag.backfill.slot) + template backwardTotal(): uint64 = backwardRemains(bootBackwardSlot) + + try: + debug "Time/performance monitoring established" + + var + forwardPerf = SyncPerformance.init() + backwardPerf = SyncPerformance.init() + + while true: + let + startTime = Moment.now() + startForwardSlot = dag.head.slot + startBackwardSlot = dag.backfill.slot + + await sleepAsync(5.seconds) + + let + lastTime = Moment.now() + lastForwardSlot = dag.head.slot + lastBackwardSlot = dag.backfill.slot + + if overseer.fqueue.running() or overseer.fsqueue.running(): + forwardPerf.update( + startForwardSlot, lastForwardSlot, startTime, lastTime, + forwardTotal(), forwardRemains()) + if overseer.bqueue.running() or overseer.bsqueue.running(): + # Reverse order of slots here for a reason. + backwardPerf.update( + lastBackwardSlot, startBackwardSlot, startTime, lastTime, + backwardTotal(), backwardRemains()) + + let + lastSeenHead = + if overseer.lastSeenHead.isNone(): + "[n/a]" + else: + shortLog(overseer.lastSeenHead.get()) + lastSeenFinalizedHead = + if overseer.lastSeenCheckpoint.isNone(): + "[n/a]" + else: + shortLog(overseer.lastSeenCheckpoint.get()) + finalizedDistance = + if overseer.finalizedDistance().isNone(): + "[n/a]" + else: + $overseer.finalizedDistance().get() + backfillDistance = $overseer.backfillDistance() + lastSeenSyncDagPath = + if overseer.lastSeenHead.isNone(): + "[none]" + else: + overseer.sdag.getShortRootMap(overseer.lastSeenHead.get().root) + + overseer.statusMessages[0] = + if overseer.finalizedDistance.isNone(): + "[initializing]" + else: + if overseer.finalizedDistance().get() > 0'u64: + forwardPerf.formatString() + else: + "[finished]" + overseer.statusMessages[1] = + if overseer.backfillDistance() > 0'u64: + backwardPerf.formatString() + else: + "[finished]" + + debug "Overseer debug statistics", + wall_slot = overseer.beaconClock.currentSlot(), + head = shortLog(dag.head), + finalized = shortLog( + getStateField(dag.headState, finalized_checkpoint)), + last_seen_head = lastSeenHead, + last_seen_finalized = lastSeenFinalizedHead, + finalized_distance = finalizedDistance, + backfill_distance = backfillDistance, + blob_horizon = overseer.getBlobsHorizon().start_slot(), + column_horizon = overseer.getColumnsHorizon().start_slot(), + sdag_peer_entries_count = len(overseer.sdag.peers), + sdag_roots_count = len(overseer.sdag.roots), + sdag_slots_count = len(overseer.sdag.slots), + forward_sync_status = overseer.statusMessages[0], + backward_sync_status = overseer.statusMessages[1], + forward_block_buffer = shortLog(overseer.fblockBuffer), + backward_block_buffer = shortLog(overseer.bblockBuffer), + forward_blocks_sync_queue = shortLog(overseer.fqueue), + forward_sidecars_sync_queue = shortLog(overseer.fsqueue), + backfill_blocks_sync_queue = shortLog(overseer.bqueue), + backfill_sidecars_sync_queue = shortLog(overseer.bsqueue), + root_block_buffer_length = len(overseer.rblockBuffer), + blob_quarantine = shortLog(overseer.blobQuarantine[]), + column_quarantine = shortLog(overseer.columnQuarantine[]), + useful_peers = overseer.columnsState.usefulCount, + useless_peers = overseer.columnsState.uselessCount, + distribution = overseer.getColumnsDistribution(), + columns_fill_rate = overseer.getColumnsFillRate(), + last_seen_syncdag_path = lastSeenSyncDagPath + + except CancelledError: + discard + +proc gossipMonitoringLoop( + overseer: SyncOverseerRef2 +): Future[void] {.async: (raises: []).} = + try: + let eventKey = overseer.blockGossipBus.register() + debug "Gossip block monitoring established" + while true: + let + events = await overseer.blockGossipBus.waitEvents(eventKey, 1) + event = events[0] + + consensusFork = event.blck.kind + (blockId, missingSidecars) = + withBlck(event.blck): + when consensusFork < ConsensusFork.Deneb: + ( + BlockId(slot: forkyBlck.message.slot, root: forkyBlck.root), + true + ) + elif consensusFork in [ConsensusFork.Deneb, ConsensusFork.Electra]: + let res = + if forkyBlck.root in overseer.blockQuarantine[].sidecarless: + if overseer.blobQuarantine[].hasSidecars( + forkyBlck.root, forkyBlck): + false + else: + true + else: + false + (BlockId(slot: forkyBlck.message.slot, root: forkyBlck.root), res) + elif consensusFork == ConsensusFork.Fulu: + let res = + if forkyBlck.root in overseer.blockQuarantine[].sidecarless: + if overseer.columnQuarantine[].hasSidecars( + forkyBlck.root, forkyBlck): + false + else: + true + else: + false + (BlockId(slot: forkyBlck.message.slot, root: forkyBlck.root), res) + else: + raiseAssert "Unsupported fork" + + let peerLog = + if isFullZero(event.src): + # `libp2p` return an empty 'src' when this field is not filled. + "[anonymous]" + else: + shortLog(event.src) + + debug "Got block from gossip event", block_root = blockId.root, + block_slot = blockId.slot, peer = peerLog, + fork = consensusFork, missing_sidecars = missingSidecars + + discard overseer.sdag.roots.mgetOrPut( + blockId.root, SyncDagEntryRef.init(blockId)) + + overseer.updatePeer(event.src, false, event.blck, missingSidecars) + except AsyncEventQueueFullError: + raiseAssert "Unlimited AsyncEventQueue should not raise exception" + except CancelledError: + discard + + debug "Gossip block monitoring stopped" + +proc blockMonitoringLoop( + overseer: SyncOverseerRef2 +): Future[void] {.async: (raises: []).} = + try: + let + dag = overseer.consensusManager.dag + eventKey = overseer.blocksQueueBus.register() + blockQuarantine = overseer.blockQuarantine + debug "Block monitoring established" + while true: + let + events = await overseer.blocksQueueBus.waitEvents(eventKey, 1) + event = events[0] + entry = overseer.sdag.roots.getOrDefault(event.block_root) + + logScope: + block_root = shortLog(event.block_root) + block_slot = event.slot + last_seen_slot = overseer.getLastSeenHeadSlot() + last_seen_finalized_epoch = overseer.getLastSeenFinalizedEpoch() + + debug "Got block event" + + if event.slot.epoch() >= overseer.getLastSeenFinalizedEpoch(): + # We clearing `MissingSidecars` flag from all the ancestors of the + # block, because if event received, it means that block is validated and + # stored in DAG, so the block and all its ancestors has all the sidecars + # with it. + if not(isNil(entry)): + debug "Block processed, cleaning flags" + cleanMissingSidecarsRoots(entry) + + let blck = dag.getBlockRef(event.block_root).valueOr: + continue + if isNil(blck.parent): + continue + let + slot = blck.bid.slot + blockRoot = blck.bid.root + parentRoot = blck.parent.bid.root + blockId = BlockId(slot: slot, root: blockRoot) + + if isNil(entry): + debug "Got block event, which is not known", + block_root = shortLog(blockRoot), block_slot = slot, + parent_root = shortLog(parentRoot) + + discard + overseer.sdag.roots.mgetOrPut( + blockId.root, SyncDagEntryRef.init(blockId)) + + overseer.updatePeer( + overseer.localPeerId, false, slot, blockRoot, parentRoot, false) + + except AsyncEventQueueFullError: + raiseAssert "Unlimited AsyncEventQueue should not raise exception" + except CancelledError: + discard + + debug "Block monitoring stopped" + +proc finalMonitoringLoop( + overseer: SyncOverseerRef2 +): Future[void] {.async: (raises: []).} = + try: + let + dag = overseer.consensusManager.dag + eventKey = overseer.blockFinalizationBus.register() + debug "Finalization monitoring established" + + while true: + let + events = await overseer.blockFinalizationBus.waitEvents(eventKey, 1) + event = events[0] + checkpoint = getStateField(dag.headState, finalized_checkpoint) + + doAssert(dag.finalizedHead.slot > GENESIS_SLOT) + let + slot = dag.finalizedHead.slot + blockRoot = dag.finalizedHead.blck.root + parentRoot = + block: + let parentBid = dag.getBlockIdAtSlot(slot - 1) + doAssert(parentBid.isSome(), + "Parent block of recently finalized block should be available") + parentBid.get().bid.root + + debug "Got finalized head event", + block_root = shortLog(event.block_root), + state_root = shortLog(event.state_root), epoch = event.epoch, + checkpoint = shortLog(checkpoint), parent_root = shortLog(parentRoot), + block_slot = slot, + last_seen_epoch = overseer.getLastSeenFinalizedEpoch() + + if event.epoch > overseer.getLastSeenFinalizedEpoch(): + debug "Got finalized head event, which is not known", + block_root = shortLog(event.block_root), + state_root = shortLog(event.state_root), epoch = event.epoch, + checkpoint = shortLog(checkpoint), parent_root = shortLog(parentRoot), + block_slot = slot + + let fentry = + overseer.sdag.roots.mgetOrPut( + checkpoint.root, SyncDagEntryRef.init(checkpoint)) + + # In case this entry already exists in DAG we should mark it. + fentry.flags.incl(DagEntryFlag.Finalized) + + # sidecarsMissing == false in this case because this block was recently + # selected as finalized head, so it is sure has sidecars already. + overseer.updatePeer( + overseer.localPeerId, false, slot, blockRoot, parentRoot, false) + + # Pruning SyncDag. + overseer.sdag.prune(event.epoch) + + except AsyncEventQueueFullError: + raiseAssert "Unlimited AsyncEventQueue should not raise exception" + except CancelledError: + discard + + debug "Finalization monitoring stopped" + +proc mainLoop*( + overseer: SyncOverseerRef2 +): Future[void] {.async: (raises: []).} = + let dag = overseer.consensusManager.dag + + logScope: + wall_slot = overseer.beaconClock.currentSlot() + head_slot = dag.head.slot + finalized_checkpoint = + shortLog(getStateField(dag.headState, finalized_checkpoint)) + horizon = dag.horizon() + fulu_fork_epoch = dag.cfg.FULU_FORK_EPOCH + backfill_slot = dag.backfill.slot + + overseer.fblockBuffer = + BlocksRangeBuffer.init(SyncQueueKind.Forward, 320) + let backSlot = + if dag.backfill.parent_root != dag.tail.root: + dag.backfill.slot + else: + dag.tail.slot + overseer.bblockBuffer = + BlocksRangeBuffer.init(SyncQueueKind.Backward, 320) + + info "Sync overseer started" + + let + gossipMonitoringLoopFut = overseer.gossipMonitoringLoop() + blockMonitoringLoopFut = overseer.blockMonitoringLoop() + finalMonitoringLoopFut = overseer.finalMonitoringLoop() + timeMonitoringLoopFut = overseer.timeMonitoringLoop() + maintenanceLoopFut = overseer.maintenanceLoop() + + while true: + let peer = + try: + await overseer.pool.acquire() + except CancelledError: + # TODO (cheatfate): Release all peers? + debug "Sync overseer interrupted" + let pending = @[ + gossipMonitoringLoopFut.cancelAndWait(), + blockMonitoringLoopFut.cancelAndWait(), + finalMonitoringLoopFut.cancelAndWait(), + timeMonitoringLoopFut.cancelAndWait(), + maintenanceLoopFut.cancelAndWait() + ] + await noCancel allFutures(pending) + return + let entry = overseer.initPeer(peer) + overseer.updatePeer(peer) + entry.peerLoopFut = overseer.startPeer(peer) + +proc start*(overseer: SyncOverseerRef2) = + overseer.loopFuture = overseer.mainLoop() + +proc stop*(overseer: SyncOverseerRef2) {.async: (raises: []).} = + doAssert(not(isNil(overseer.loopFuture)), + "SyncOverseer was not started yet") + if not(overseer.loopFuture.finished()): + await cancelAndWait(overseer.loopFuture) + +proc syncDistance*(overseer: SyncOverseerRef2): uint64 = + let + wallSlot = overseer.beaconClock.currentSlot() + dag = overseer.consensusManager.dag + syncedSlot = + if overseer.lastSeenHead.isNone(): + wallSlot + else: + overseer.lastSeenHead.get.slot + + if syncedSlot < dag.head.slot: + return 0'u64 + + if (syncedSlot - dag.head.slot) < SyncDeviationSlotsCount: + 0'u64 + else: + syncedSlot - dag.head.slot + +proc syncInProgress*(overseer: SyncOverseerRef2): bool = + overseer.syncDistance() > 0 + +proc syncStatusMessage*( + overseer: SyncOverseerRef2, +): string = + let + dag = overseer.consensusManager.dag + wallSlot = overseer.beaconClock.currentSlot() + optimistic = not(dag.head.executionValid) + optSuffix = if optimistic: " [opt]" else: "" + lcSuffix = + if overseer.consensusManager[].shouldSyncOptimistically(wallSlot): + " - lc: " & $shortLog(overseer.consensusManager[].optimisticHead) + else: + "" + if overseer.lastSeenCheckpoint.isNone(): + return "pending" + + let epoch = overseer.getLastSeenFinalizedEpoch() + if dag.head.slot.epoch() < epoch: + return overseer.statusMessages[0] & optSuffix & lcSuffix + + if dag.needsBackfill(): + return "backfill: " & overseer.statusMessages[1] + + "synced" & optSuffix & lcSuffix diff --git a/beacon_chain/sync/sync_queue.nim b/beacon_chain/sync/sync_queue.nim index 5c78ab44e6..06c1c56a37 100644 --- a/beacon_chain/sync/sync_queue.nim +++ b/beacon_chain/sync/sync_queue.nim @@ -10,7 +10,7 @@ import std/[deques, heapqueue, tables, strutils, sequtils, math, typetraits] import stew/base10, chronos, chronicles, results import - ../spec/[helpers, forks], + ../spec/[helpers, forks, column_map], ../networking/[peer_pool, eth2_network], ../gossip_processing/block_processor, ../consensus_object_pools/block_pools_types @@ -22,12 +22,14 @@ type GetSlotCallback* = proc(): Slot {.gcsafe, raises: [].} GetBoolCallback* = proc(): bool {.gcsafe, raises: [].} ProcessingCallback* = proc() {.gcsafe, raises: [].} - BlockVerifier* = proc(signedBlock: ForkedSignedBeaconBlock, - blobs: Opt[BlobSidecars], maybeFinalized: bool): + BlockVerifier* = + proc(signedBlock: ref ForkedSignedBeaconBlock, maybeFinalized: bool): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} ForkAtEpochCallback* = proc(epoch: Epoch): ConsensusFork {.gcsafe, raises: [].} + UniqueId* = distinct uint64 + SyncRange* = object slot*: Slot count*: uint64 @@ -44,6 +46,7 @@ type SyncRequest*[T] = object kind*: SyncQueueKind + id*: UniqueId data*: SyncRange flags*: set[SyncRequestFlag] item*: T @@ -59,22 +62,25 @@ type request: SyncRequest[T] resetFlag: bool - SyncProcessError {.pure.} = enum + SyncProcessError* {.pure.} = enum Invalid, MissingParent, GoodAndMissingParent, UnviableFork, Duplicate, Empty, + MissingSidecars, + NoRelevant, NoError - SyncBlock = object - slot: Slot - root: Eth2Digest - SyncProcessingResult = object code: SyncProcessError - blck: Opt[SyncBlock] + blck: Opt[BlockId] + + SyncPushResponse* = object + code*: SyncProcessError + count*: int64 + blck*: Opt[BlockId] GapItem[T] = object data: SyncRange @@ -101,27 +107,49 @@ type waiters: seq[SyncWaiterItem[T]] gapList: seq[GapItem[T]] lock: AsyncLock + uniqId: uint64 + skipId: uint64 ident: string -chronicles.formatIt SyncQueueKind: toLowerAscii($it) - proc `$`*(srange: SyncRange): string = - "[" & Base10.toString(uint64(srange.slot)) & ":" & - Base10.toString(uint64(srange.slot + srange.count - 1)) & "]" + if (srange.slot == FAR_FUTURE_SLOT) and (srange.count == 0): + "[empty]" + else: + "[" & Base10.toString(uint64(srange.slot)) & ":" & + Base10.toString(uint64(srange.slot + srange.count - 1)) & "]" -template shortLog[T](req: SyncRequest[T]): string = - $req.data & "@" & Base10.toString(req.data.count) +template shortLog*[T](req: SyncRequest[T]): string = + if (req.data.slot == FAR_FUTURE_SLOT) and (req.data.count == 0): + "[empty]" + else: + $req.data & "@" & Base10.toString(req.data.count) +chronicles.formatIt SyncQueueKind: toLowerAscii($it) chronicles.expandIt SyncRequest: `it` = shortLog(it) peer = shortLog(it.item) direction = toLowerAscii($it.kind) -chronicles.formatIt Opt[SyncBlock]: - if it.isSome(): - Base10.toString(uint64(it.get().slot)) & "@" & shortLog(it.get().root) +func getId[T](sq: SyncQueue[T]): UniqueId = + inc(sq.uniqId) + UniqueId(sq.uniqId) + +proc shortLog*[T](sq: SyncQueue[T]): string = + if isNil(sq): + "[empty]" else: - "" + let start = + case sq.kind + of SyncQueueKind.Forward: + "[F:" + of SyncQueueKind.Backward: + "[B:" + start & $sq.startSlot & ":" & $sq.finalSlot & "@" & $sq.inpSlot & "]" + +func slimLog*(blocks: openArray[ref ForkedSignedBeaconBlock]): string = + "[" & blocks.mapIt( + "(slot: " & $it[].slot() & ", root: " & shortLog(it[].root()) & + ", parent_root: " & shortLog(it[].parent_root()) & ")").join(",") & "]" func getShortMap*[T]( req: SyncRequest[T], @@ -148,8 +176,42 @@ func getShortMap*[T]( slider = slider + 1 res -proc getShortMap*[T](req: SyncRequest[T], - data: openArray[ref BlobSidecar]): string = +func getBlockBlobsMap*[T]( + req: SyncRequest[T], + data: openArray[ref ForkedSignedBeaconBlock] +): string = + var + res = newStringOfCap(req.data.count) + slider = req.data.slot + last = 0 + + for i in 0 ..< req.data.count: + if last < len(data): + for k in last ..< len(data): + let (slot, count) = + withBlck(data[k][]): + when consensusFork in [ConsensusFork.Deneb, ConsensusFork.Electra]: + (forkyBlck.message.slot, + len(forkyBlck.message.body.blob_kzg_commitments)) + else: + (forkyBlck.message.slot, 0) + if slider == slot: + res.add($count) + last = k + 1 + break + elif slider < slot: + res.add('.') + break + else: + res.add('.') + slider = slider + 1 + + res + +proc getShortMap*[T]( + req: SyncRequest[T], + data: openArray[ref BlobSidecar] +): string = var res = newStringOfCap(req.data.count) slider = req.data.slot @@ -214,6 +276,43 @@ proc getShortMap*[T]( return '.'.repeat(req.data.count) getShortMap(req, data.get()) +func getShortMap*[T]( + req: SyncRequest[T], + map: ColumnMap, + data: openArray[ref fulu.DataColumnSidecar] +): string = + let + alphabet = + "123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/#" + unknown = "…" + + var + res = newStringOfCap(req.data.count) + slider = req.data.slot + last = 0 + + for i in 0 ..< req.data.count: + if last < len(data): + var counter = 0 + for k in last ..< len(data): + if slider < data[k][].signed_block_header.message.slot: + break + elif slider == data[k][].signed_block_header.message.slot: + if data[k][].index in map: + inc(counter) + last = last + counter + if counter == 0: + res.add('.') + else: + if counter < 66: + res.add(alphabet[counter - 1]) + else: + res.add(unknown) + else: + res.add('.') + slider = slider + 1 + res + func init*(t: typedesc[SyncRange], slot: Slot, count: uint64): SyncRange = SyncRange(slot: slot, count: count) @@ -228,45 +327,58 @@ func init(t: typedesc[SyncProcessError], SyncProcessError.UnviableFork of VerifierError.Duplicate: SyncProcessError.Duplicate - -func init(t: typedesc[SyncBlock], slot: Slot, root: Eth2Digest): SyncBlock = - SyncBlock(slot: slot, root: root) + of VerifierError.MissingSidecars: + SyncProcessError.MissingSidecars func init(t: typedesc[SyncProcessError]): SyncProcessError = SyncProcessError.NoError func init(t: typedesc[SyncProcessingResult], se: SyncProcessError, slot: Slot, root: Eth2Digest): SyncProcessingResult = - SyncProcessingResult(blck: Opt.some(SyncBlock.init(slot, root)), code: se) + SyncProcessingResult(blck: Opt.some(BlockId(slot: slot, root: root)), + code: se) func init(t: typedesc[SyncProcessingResult], se: SyncProcessError): SyncProcessingResult = SyncProcessingResult(code: se) func init(t: typedesc[SyncProcessingResult], se: SyncProcessError, - sblck: SyncBlock): SyncProcessingResult = + sblck: BlockId): SyncProcessingResult = SyncProcessingResult(blck: Opt.some(sblck), code: se) func init(t: typedesc[SyncProcessingResult], ve: VerifierError, slot: Slot, root: Eth2Digest): SyncProcessingResult = - SyncProcessingResult(blck: Opt.some(SyncBlock.init(slot, root)), - code: SyncProcessError.init(ve)) + SyncProcessingResult(blck: Opt.some(BlockId(slot: slot, root: root)), + code: SyncProcessError.init(ve)) func init(t: typedesc[SyncProcessingResult], ve: VerifierError, - sblck: SyncBlock): SyncProcessingResult = + sblck: BlockId): SyncProcessingResult = SyncProcessingResult(blck: Opt.some(sblck), code: SyncProcessError.init(ve)) -func init*[T](t: typedesc[SyncRequest], kind: SyncQueueKind, - item: T): SyncRequest[T] = +func init*[T]( + t: typedesc[SyncRequest], + kind: SyncQueueKind, + item: T +): SyncRequest[T] = SyncRequest[T]( kind: kind, data: SyncRange(slot: FAR_FUTURE_SLOT, count: 0'u64), item: item ) -func init*[T](t: typedesc[SyncRequest], kind: SyncQueueKind, - data: SyncRange, item: T): SyncRequest[T] = - SyncRequest[T](kind: kind, data: data, item: item) +func init*[T]( + t: typedesc[SyncRequest], + sq: SyncQueue[T], + kind: SyncQueueKind, + data: SyncRange, + item: T +): SyncRequest[T] = + SyncRequest[T]( + kind: kind, + data: data, + item: item, + id: sq.getId() + ) func init[T](t: typedesc[SyncQueueItem], req: SyncRequest[T]): SyncQueueItem[T] = @@ -281,10 +393,10 @@ func last_slot*(epoch: Epoch): Slot = if epoch >= maxEpoch: FAR_FUTURE_SLOT else: Slot(epoch * SLOTS_PER_EPOCH + (SLOTS_PER_EPOCH - 1'u64)) -func start_slot*(sr: SyncRange): Slot = +template start_slot*(sr: SyncRange): Slot = sr.slot -func last_slot*(sr: SyncRange): Slot = +template last_slot*(sr: SyncRange): Slot = if sr.slot + (uint64(sr.count) - 1'u64) < sr.slot: FAR_FUTURE_SLOT else: @@ -328,7 +440,7 @@ proc epochFilter*[T](squeue: SyncQueue[T], srange: SyncRange): SyncRange = else: srange -func next[T](sq: SyncQueue[T], srange: SyncRange): SyncRange {.inline.} = +func next*[T](sq: SyncQueue[T], srange: SyncRange): SyncRange {.inline.} = let slot = srange.slot + srange.count if slot == FAR_FUTURE_SLOT: # Finish range @@ -342,7 +454,7 @@ func next[T](sq: SyncQueue[T], srange: SyncRange): SyncRange {.inline.} = else: SyncRange.init(slot, sq.chunkSize) -func prev[T](sq: SyncQueue[T], srange: SyncRange): SyncRange {.inline.} = +func prev*[T](sq: SyncQueue[T], srange: SyncRange): SyncRange {.inline.} = if srange.slot == GENESIS_SLOT: # Start range srange @@ -354,25 +466,25 @@ func prev[T](sq: SyncQueue[T], srange: SyncRange): SyncRange {.inline.} = else: SyncRange.init(slot, sq.chunkSize) -func contains(srange: SyncRange, slot: Slot): bool {.inline.} = +func contains*(srange: SyncRange, slot: Slot): bool {.inline.} = ## Returns `true` if `slot` is in range of `srange`. if (srange.slot + srange.count) < srange.slot: (slot >= srange.slot) and (slot <= FAR_FUTURE_SLOT) else: (slot >= srange.slot) and (slot < (srange.slot + srange.count)) -func `>`(a, b: SyncRange): bool {.inline.} = +func `>`*(a, b: SyncRange): bool {.inline.} = ## Returns `true` if range `a` is above of range `b`. (a.slot > b.slot) and (a.slot + a.count - 1 > b.slot) -func `<`(a, b: SyncRange): bool {.inline.} = +func `<`*(a, b: SyncRange): bool {.inline.} = ## Returns `true` if range `a` is below of range `b`. (a.slot < b.slot) and (a.slot + a.count - 1 < b.slot) -func `==`(a, b: SyncRange): bool {.inline.} = +func `==`*(a, b: SyncRange): bool {.inline.} = (a.slot == b.slot) and (a.count == b.count) -func `==`[T](a, b: SyncRequest[T]): bool {.inline.} = +func `==`*[T](a, b: SyncRequest[T]): bool {.inline.} = (a.kind == b.kind) and (a.item == b.item) and (a.data == b.data) proc hasEndGap*[T]( @@ -390,6 +502,10 @@ proc updateLastSlot*[T](sq: SyncQueue[T], last: Slot) {.inline.} = ## Update last slot stored in queue ``sq`` with value ``last``. sq.finalSlot = last +func contains*[T](sq: SyncQueue[T], slot: Slot): bool = + ## Returns ``true`` if ``slot`` is in queue's range [startSlot, finalSlot]. + (slot >= sq.startSlot) and (slot <= sq.finalSlot) + proc getRewindPoint*[T](sq: SyncQueue[T], failSlot: Slot, safeSlot: Slot): Slot = case sq.kind @@ -431,13 +547,13 @@ proc getRewindPoint*[T](sq: SyncQueue[T], failSlot: Slot, finalized_epoch = finalizedEpoch, sync_ident = sq.ident, direction = sq.kind, - topics = "syncman" + topics = "sync" 0'u64 else: # `MissingParent` happened at different slot so we going to rewind for # 1 epoch only. if (failEpoch < 1'u64) or (failEpoch - 1'u64 < finalizedEpoch): - warn "Сould not rewind further than the last finalized epoch", + warn "Could not rewind further than the last finalized epoch", finalized_slot = safeSlot, fail_slot = failSlot, finalized_epoch = finalizedEpoch, @@ -446,14 +562,14 @@ proc getRewindPoint*[T](sq: SyncQueue[T], failSlot: Slot, finalized_epoch = finalizedEpoch, sync_ident = sq.ident, direction = sq.kind, - topics = "syncman" + topics = "sync" 0'u64 else: 1'u64 else: # `MissingParent` happened first time. if (failEpoch < 1'u64) or (failEpoch - 1'u64 < finalizedEpoch): - warn "Сould not rewind further than the last finalized epoch", + warn "Could not rewind further than the last finalized epoch", finalized_slot = safeSlot, fail_slot = failSlot, finalized_epoch = finalizedEpoch, @@ -461,7 +577,7 @@ proc getRewindPoint*[T](sq: SyncQueue[T], failSlot: Slot, finalized_epoch = finalizedEpoch, sync_ident = sq.ident, direction = sq.kind, - topics = "syncman" + topics = "sync" 0'u64 else: 1'u64 @@ -475,7 +591,7 @@ proc getRewindPoint*[T](sq: SyncQueue[T], failSlot: Slot, finalized_epoch = finalizedEpoch, sync_ident = sq.ident, direction = sq.kind, - topics = "syncman" + topics = "sync" # Calculate the rewind epoch, which will be equal to last rewind point or # finalizedEpoch let rewindEpoch = @@ -501,7 +617,7 @@ proc getRewindPoint*[T](sq: SyncQueue[T], failSlot: Slot, fail_slot = failSlot, sync_ident = sq.ident, direction = sq.kind, - topics = "syncman" + topics = "sync" safeSlot func init*[T](t1: typedesc[SyncQueue], t2: typedesc[T], @@ -531,9 +647,20 @@ func init*[T](t1: typedesc[SyncQueue], t2: typedesc[T], forkAtEpoch: forkAtEpoch, requests: initDeque[SyncQueueItem[T]](), lock: newAsyncLock(), + uniqId: 0'u64, + skipId: 0'u64, ident: ident ) +func reset*[T](sq: SyncQueue[T], start, final: Slot) = + sq.startSlot = start + sq.finalSlot = final + sq.inpSlot = start + sq.outSlot = start + sq.skipId = 0'u64 + sq.uniqId = 0'u64 + sq.requests.reset() + func searchPeer[T](requests: openArray[SyncRequest[T]], source: T): int = for index, request in requests.pairs(): if request.item == source: @@ -591,7 +718,7 @@ proc rewardForGaps[T](sq: SyncQueue[T], score: int) = penalty = newScore, sync_ident = sq.ident, direction = sq.kind, - topics = "syncman" + topics = "sync" else: gap.item.updateScore(score) @@ -610,7 +737,7 @@ proc pop*[T](sq: SyncQueue[T], peerMaxSlot: Slot, item: T): SyncRequest[T] = else: doAssert(count < sq.requestsCount, "You should not pop so many requests for single peer") - let request = SyncRequest.init(sq.kind, qitem.data, item) + let request = SyncRequest.init(sq, sq.kind, qitem.data, item) qitem.requests.add(request) request else: @@ -647,11 +774,11 @@ proc pop*[T](sq: SyncQueue[T], peerMaxSlot: Slot, item: T): SyncRequest[T] = # Peer could not satisfy our request, returning empty one. SyncRequest.init(sq.kind, item) else: - let request = SyncRequest.init(sq.kind, sq.epochFilter(newrange), item) + let request = SyncRequest.init(sq, sq.kind, sq.epochFilter(newrange), item) sq.requests.addLast(SyncQueueItem.init(request)) request -proc wakeupWaiters[T](sq: SyncQueue[T], resetFlag = false) = +proc wakeupWaiters*[T](sq: SyncQueue[T], resetFlag = false) = ## Wakeup one or all blocked waiters. for item in sq.waiters: item.resetFlag = resetFlag @@ -703,18 +830,37 @@ proc advanceOutput[T](sq: SyncQueue[T], number: uint64) = proc advanceInput[T](sq: SyncQueue[T], number: uint64) = advanceImpl(sq.kind, sq.inpSlot, number) -proc advanceQueue[T](sq: SyncQueue[T]) = +proc advanceQueue[T](sq: SyncQueue[T], count: var int64) = if len(sq.requests) > 0: let item = sq.requests.popFirst() sq.advanceInput(item.data.count) sq.advanceOutput(item.data.count) + # It is usually safe conversion, because value is limited by `sq.chunkSize`. + count = int64(item.data.count) else: sq.advanceInput(sq.chunkSize) sq.advanceOutput(sq.chunkSize) + # It is usually safe conversion, because value is limited by `sq.chunkSize`. + count = int64(sq.chunkSize) sq.wakeupWaiters() +proc getRetreatCount(requestSlot, rewindSlot: Slot): int64 = + let res = + if requestSlot >= rewindSlot: + # In some case this value could exceed int64 bounds, but it would be fully + # unfunctional network. + -int64(requestSlot - rewindSlot) + else: + # In some case this value could exceed int64 bounds, but it would be fully + # unfunctional network. + -int64(rewindSlot - requestSlot) + res + proc resetQueue[T](sq: SyncQueue[T]) = sq.requests.reset() + # We are making all requests that have been issued up to this moment of time - + # non-relevant. + sq.skipId = sq.uniqId proc clearAndWakeup*[T](sq: SyncQueue[T]) = # Reset queue and wakeup all the waiters. @@ -725,7 +871,7 @@ proc isEmpty*[T](sr: SyncRequest[T]): bool = # Returns `true` if request `sr` is empty. sr.data.count == 0'u64 -proc resetWait[T]( +proc resetWait*[T]( sq: SyncQueue[T], toSlot: Slot ) {.async: (raises: [CancelledError], raw: true).} = @@ -735,24 +881,17 @@ proc resetWait[T]( sq.resetQueue() sq.wakeupAndWaitWaiters() -func getOpt(blobs: Opt[seq[BlobSidecars]], i: int): Opt[BlobSidecars] = - if blobs.isSome: - Opt.some(blobs.get()[i]) - else: - Opt.none(BlobSidecars) - iterator blocks( kind: SyncQueueKind, - blcks: seq[ref ForkedSignedBeaconBlock], - blobs: Opt[seq[BlobSidecars]] -): (ref ForkedSignedBeaconBlock, Opt[BlobSidecars]) = + blcks: openArray[ref ForkedSignedBeaconBlock], +): ref ForkedSignedBeaconBlock = case kind of SyncQueueKind.Forward: for i in countup(0, len(blcks) - 1): - yield (blcks[i], blobs.getOpt(i)) + yield blcks[i] of SyncQueueKind.Backward: for i in countdown(len(blcks) - 1, 0): - yield (blcks[i], blobs.getOpt(i)) + yield blcks[i] proc push*[T](sq: SyncQueue[T], requests: openArray[SyncRequest[T]]) = ## Push multiple failed requests back to queue. @@ -770,22 +909,27 @@ proc process[T]( sq: SyncQueue[T], sr: SyncRequest[T], blcks: seq[ref ForkedSignedBeaconBlock], - blobs: Opt[seq[BlobSidecars]], maybeFinalized: bool ): Future[SyncProcessingResult] {. async: (raises: [CancelledError]).} = var - slot: Opt[SyncBlock] - unviableBlock: Opt[SyncBlock] - dupBlock: Opt[SyncBlock] + slot: Opt[BlockId] + unviableBlock: Opt[BlockId] + dupBlock: Opt[BlockId] if len(blcks) == 0: return SyncProcessingResult.init(SyncProcessError.Empty) - for blk, blb in blocks(sq.kind, blcks, blobs): - let res = await sq.blockVerifier(blk[], blb, maybeFinalized) + debug "Processing request", + request = sr, + sync_ident = sq.ident, + queue = shortLog(sq), + blocks = slimLog(blcks) + + for blk in blocks(sq.kind, blcks): + let res = await sq.blockVerifier(blk, maybeFinalized) if res.isOk(): - slot = Opt.some(SyncBlock.init(blk[].slot, blk[].root)) + slot = Opt.some(BlockId(slot: blk[].slot, root: blk[].root)) else: case res.error() of VerifierError.MissingParent: @@ -797,13 +941,15 @@ proc process[T]( of VerifierError.Duplicate: # Keep going, happens naturally if dupBlock.isNone(): - dupBlock = Opt.some(SyncBlock.init(blk[].slot, blk[].root)) + dupBlock = Opt.some(BlockId(slot: blk[].slot, root: blk[].root)) + of VerifierError.MissingSidecars: + return SyncProcessingResult.init(res.error(), blk[].slot, blk[].root) of VerifierError.UnviableFork: # Keep going so as to register other unviable blocks with the # quarantine if unviableBlock.isNone(): # Remember the first unviable block, so we can log it - unviableBlock = Opt.some(SyncBlock.init(blk[].slot, blk[].root)) + unviableBlock = Opt.some(BlockId(slot: blk[].slot, root: blk[].root)) of VerifierError.Invalid: return SyncProcessingResult.init(res.error(), blk[].slot, blk[].root) @@ -819,29 +965,42 @@ proc process[T]( func isError(e: SyncProcessError): bool = case e of SyncProcessError.Empty, SyncProcessError.NoError, - SyncProcessError.Duplicate, SyncProcessError.GoodAndMissingParent: + SyncProcessError.Duplicate, SyncProcessError.GoodAndMissingParent, + SyncProcessError.NoRelevant, SyncProcessError.MissingSidecars: false of SyncProcessError.Invalid, SyncProcessError.UnviableFork, SyncProcessError.MissingParent: true +func isRelevant*[T](sq: SyncQueue[T], sr: SyncRequest[T]): bool = + uint64(sr.id) > uint64(sq.skipId) + proc push*[T]( sq: SyncQueue[T], sr: SyncRequest[T], data: seq[ref ForkedSignedBeaconBlock], - blobs: Opt[seq[BlobSidecars]], maybeFinalized: bool = false, processingCb: ProcessingCallback = nil -) {.async: (raises: [CancelledError]).} = +): Future[SyncPushResponse] {.async: (raises: [CancelledError]).} = ## Push successful result to queue ``sq``. mixin updateScore, updateStats, getStats template findPosition(sq, sr: untyped): SyncPosition = sq.find(sr).valueOr: debug "Request is not relevant anymore", - request = sr, sync_ident = sq.ident, topics = "syncman" + request = sr, queue = shortLog(sq), sync_ident = sq.ident, + topics = "sync" # Request is not in queue anymore, probably reset happened. - return + return SyncPushResponse( + code: SyncProcessError.NoRelevant, count: 0'i64) + + template checkRelevance(sq, sr: untyped) = + if not(sq.isRelevant(sr)): + debug "Request is not relevant anymore", + request = sr, queue = shortLog(sq), sync_ident = sq.ident, + topics = "sync" + return SyncPushResponse( + code: SyncProcessError.NoRelevant, count: 0'i64) # This is backpressure handling algorithm, this algorithm is blocking # all pending `push` requests if `request` is not in range. @@ -850,6 +1009,7 @@ proc push*[T]( block: var pos: SyncPosition while true: + sq.checkRelevance(sr) pos = sq.findPosition(sr) if pos.qindex == 0: @@ -861,10 +1021,11 @@ proc push*[T]( if res: # SyncQueue reset happen debug "Request is not relevant anymore, reset has happened", - request = sr, + request = sr, queue = shortLog(sq), sync_ident = sq.ident, - topics = "syncman" - return + topics = "sync" + return SyncPushResponse( + code: SyncProcessError.NoRelevant, count: 0'i64) except CancelledError as exc: # Removing request from queue. sq.del(sr) @@ -874,16 +1035,20 @@ proc push*[T]( try: await sq.lock.acquire() except CancelledError as exc: + # Removing request from queue sq.del(sr) raise exc + var res = 0'i64 try: + sq.checkRelevance(sr) + position = sq.findPosition(sr) if not(isNil(processingCb)): processingCb() - let pres = await sq.process(sr, data, blobs, maybeFinalized) + let pres = await sq.process(sr, data, maybeFinalized) # We need to update position, because while we waiting for `process()` to # complete - clearAndWakeup() could be invoked which could clean whole the @@ -895,13 +1060,13 @@ proc push*[T]( # Empty responses does not affect failures count debug "Received empty response", request = sr, + queue = shortLog(sq), voids_count = sq.requests[position.qindex].voidsCount, failures_count = sq.requests[position.qindex].failuresCount, blocks_count = len(data), blocks_map = getShortMap(sr, data), - blobs_map = getShortMap(sr, blobs), sync_ident = sq.ident, - topics = "syncman" + topics = "sync" sr.item.updateStats(SyncResponseKind.Empty, 1'u64) inc(sq.requests[position.qindex].voidsCount) @@ -913,89 +1078,109 @@ proc push*[T]( # With empty response - advance only when `requestsCount` of different # peers returns empty response for the same range. if sq.requests[position.qindex].voidsCount >= sq.requestsCount: - sq.advanceQueue() + sq.advanceQueue(res) of SyncProcessError.Duplicate: # Duplicate responses does not affect failures count debug "Received duplicate response", request = sr, + queue = shortLog(sq), voids_count = sq.requests[position.qindex].voidsCount, failures_count = sq.requests[position.qindex].failuresCount, blocks_count = len(data), blocks_map = getShortMap(sr, data), - blobs_map = getShortMap(sr, blobs), sync_ident = sq.ident, - topics = "syncman" + topics = "sync" + sq.gapList.reset() - sq.advanceQueue() + sq.advanceQueue(res) + + of SyncProcessError.MissingSidecars: + debug "Received blocks without sidecars", + request = sr, + queue = shortLog(sq), + voids_count = sq.requests[position.qindex].voidsCount, + failures_count = sq.requests[position.qindex].failuresCount, + blocks_count = len(data), + blocks_map = getShortMap(sr, data), + sync_ident = sq.ident, + topics = "sync" + + inc(sq.requests[position.qindex].failuresCount) + sq.del(position) + res = 0'i64 of SyncProcessError.Invalid: debug "Block pool rejected peer's response", request = sr, + queue = shortLog(sq), invalid_block = pres.blck, voids_count = sq.requests[position.qindex].voidsCount, failures_count = sq.requests[position.qindex].failuresCount, blocks_count = len(data), blocks_map = getShortMap(sr, data), - blobs_map = getShortMap(sr, blobs), sync_ident = sq.ident, - topics = "syncman" + topics = "sync" inc(sq.requests[position.qindex].failuresCount) sq.del(position) + res = 0'i64 of SyncProcessError.UnviableFork: notice "Received blocks from an unviable fork", request = sr, + queue = shortLog(sq), unviable_block = pres.blck, voids_count = sq.requests[position.qindex].voidsCount, failures_count = sq.requests[position.qindex].failuresCount, blocks_count = len(data), blocks_map = getShortMap(sr, data), - blobs_map = getShortMap(sr, blobs), sync_ident = sq.ident, - topics = "syncman" + topics = "sync" sr.item.updateScore(PeerScoreUnviableFork) inc(sq.requests[position.qindex].failuresCount) sq.del(position) + res = 0'i64 of SyncProcessError.MissingParent: debug "Unexpected missing parent", request = sr, + queue = shortLog(sq), missing_parent_block = pres.blck, voids_count = sq.requests[position.qindex].voidsCount, failures_count = sq.requests[position.qindex].failuresCount, blocks_count = len(data), blocks_map = getShortMap(sr, data), - blobs_map = getShortMap(sr, blobs), sync_ident = sq.ident, direction = sq.kind, - topics = "syncman" + topics = "sync" sr.item.updateScore(PeerScoreMissingValues) sq.rewardForGaps(PeerScoreMissingValues) sq.gapList.reset() inc(sq.requests[position.qindex].failuresCount) sq.del(position) + res = 0'i64 of SyncProcessError.GoodAndMissingParent: # Responses which has at least one good block and a gap does not affect # failures count debug "Unexpected missing parent, but no rewind needed", request = sr, + queue = shortLog(sq), finalized_slot = sq.getSafeSlot(), missing_parent_block = pres.blck, voids_count = sq.requests[position.qindex].voidsCount, failures_count = sq.requests[position.qindex].failuresCount, blocks_count = len(data), blocks_map = getShortMap(sr, data), - blobs_map = getShortMap(sr, blobs), sync_ident = sq.ident, - topics = "syncman" + topics = "sync" sr.item.updateScore(PeerScoreMissingValues) sq.del(position) + res = 0'i64 of SyncProcessError.NoError: sr.item.updateScore(PeerScoreGoodValues) @@ -1006,20 +1191,25 @@ proc push*[T]( if sr.hasEndGap(data): sq.gapList.add(GapItem.init(sr)) - sq.advanceQueue() + sq.advanceQueue(res) + of SyncProcessError.NoRelevant: + raiseAssert "Processor should not return this error code" if pres.code.isError(): if sq.requests[position.qindex].failuresCount >= sq.failureResetThreshold: let point = sq.getRewindPoint(pres.blck.get().slot, sq.getSafeSlot()) debug "Multiple repeating errors occured, rewinding", + reason = pres.code, + request = sr, + queue = shortLog(sq), voids_count = sq.requests[position.qindex].voidsCount, failures_count = sq.requests[position.qindex].failuresCount, rewind_slot = point, sync_ident = sq.ident, - direction = sq.kind, - topics = "syncman" + topics = "sync" await sq.resetWait(point) - + res = getRetreatCount(sr.data.slot, point) + SyncPushResponse(code: pres.code, count: res, blck: pres.blck) except CancelledError as exc: sq.del(sr) raise exc @@ -1029,72 +1219,6 @@ proc push*[T]( except AsyncLockError: raiseAssert "Lock is not acquired" -proc checkResponse*[T](req: SyncRequest[T], - data: openArray[Slot]): Result[void, cstring] = - if len(data) == 0: - # Impossible to verify empty response. - return ok() - - if lenu64(data) > req.data.count: - # Number of blocks in response should be less or equal to number of - # requested blocks. - return err("Too many blocks received") - - var - slot = req.data.slot - rindex = 0'u64 - dindex = 0 - - while (rindex < req.data.count) and (dindex < len(data)): - if slot < data[dindex]: - discard - elif slot == data[dindex]: - inc(dindex) - else: - return err("Incorrect order or duplicate blocks found") - slot += 1'u64 - rindex += 1'u64 - - if dindex != len(data): - return err("Some of the blocks are outside the requested range") - - ok() - -proc checkBlobsResponse*[T]( - req: SyncRequest[T], - data: openArray[Slot], - maxBlobsPerBlockElectra: uint64): Result[void, cstring] = - if len(data) == 0: - # Impossible to verify empty response. - return ok() - - if lenu64(data) > (req.data.count * maxBlobsPerBlockElectra): - # Number of blobs in response should be less or equal to number of - # requested (blocks * MAX_BLOBS_PER_BLOCK_ELECTRA). - # NOTE: This is not strict check, proper check will be done in blobs - # validation. - return err("Too many blobs received") - - var - pslot = data[0] - counter = 0'u64 - for slot in data: - if slot notin req.data: - return err("Some of the blobs are not in requested range") - if slot < pslot: - return err("Incorrect order") - if slot == pslot: - inc(counter) - if counter > maxBlobsPerBlockElectra: - # NOTE: This is not strict check, proper check will be done in blobs - # validation. - return err("Number of blobs in the block exceeds the limit") - else: - counter = 1'u64 - pslot = slot - - ok() - proc len*[T](sq: SyncQueue[T]): uint64 {.inline.} = ## Returns number of slots left in queue ``sq``. case sq.kind @@ -1127,3 +1251,19 @@ proc progress*[T](sq: SyncQueue[T]): uint64 = ## How many useful slots we've synced so far, adjusting for how much has ## become obsolete by time movements sq.total() - len(sq) + +func running*[T](sq: SyncQueue[T]): bool = + ## Returns `true` when SyncQueue is in process. + if isNil(sq): + return false + + case sq.kind + of SyncQueueKind.Forward: + (sq.startSlot < sq.inpSlot) and (sq.finalSlot > sq.inpSlot) + of SyncQueueKind.Backward: + (sq.startSlot > sq.inpSlot) and (sq.finalSlot < sq.inpSlot) + +func started*[T](sq: SyncQueue[T]): bool = + ## Returns `true` if SyncQueue was started, e.g. internal counters changed + ## since starting state. + sq.startSlot != sq.inpSlot diff --git a/beacon_chain/sync/sync_types.nim b/beacon_chain/sync/sync_types.nim index 60402fe882..05f5cfd6c5 100644 --- a/beacon_chain/sync/sync_types.nim +++ b/beacon_chain/sync/sync_types.nim @@ -11,13 +11,15 @@ import results, chronos, ".."/spec/[forks_light_client, signatures_batch], ".."/consensus_object_pools/[block_pools_types, blockchain_dag, attestation_pool, blockchain_list, + blob_quarantine, block_quarantine, consensus_manager], + ".."/gossip_processing/block_processor, ".."/validators/validator_monitor, ".."/[beacon_clock, conf], ".."/networking/eth2_network, - "."/sync_manager + "."/[sync_manager, sync_dag, block_buffer] -export results, chronos, block_pools_types, conf +export results, chronos, block_pools_types, conf, sync_dag type BlockDataChunk* = ref object @@ -52,8 +54,49 @@ type untrustedInProgress*: bool syncKind*: SyncKind + ColumnsPeerState* = object + usefulCount*: int + uselessCount*: int + distribution*: Table[ColumnIndex, int] + + SyncOverseer2* = object + network*: Eth2Node + consensusManager*: ref ConsensusManager + config*: BeaconNodeConf + getBeaconTimeFn*: GetBeaconTimeFn + beaconClock*: BeaconClock + loopFuture*: Future[void].Raising([]) + pool*: PeerPool[Peer, PeerId] + blockProcessor*: ref BlockProcessor + fblockBuffer*: BlocksRangeBuffer + bblockBuffer*: BlocksRangeBuffer + rblockBuffer*: BlocksRootBuffer + blockQuarantine*: ref Quarantine + blobQuarantine*: ref BlobQuarantine + columnQuarantine*: ref ColumnQuarantine + blockGossipBus*: AsyncEventQueue[EventBeaconBlockGossipPeerObject] + blocksQueueBus*: AsyncEventQueue[EventBeaconBlockObject] + blockFinalizationBus*: AsyncEventQueue[FinalizationInfoObject] + missingRoots*: HashSet[Eth2Digest] + avgSpeedCounter*: int + avgSpeed*: float + blocksChunkSize*: int + sidecarsChunkSize*: int + fqueue*: SyncQueue[Peer] + fsqueue*: SyncQueue[Peer] + bqueue*: SyncQueue[Peer] + bsqueue*: SyncQueue[Peer] + localPeerId*: PeerId + lastSeenCheckpoint*: Opt[Checkpoint] + lastSeenHead*: Opt[BlockId] + statusMessages*: array[2, string] + sdag*: SyncDag[Peer, PeerId] + columnsState*: ColumnsPeerState + SyncOverseerRef* = ref SyncOverseer + SyncOverseerRef2* = ref SyncOverseer2 + proc new*( t: typedesc[SyncOverseerRef], cm: ref ConsensusManager, @@ -64,7 +107,7 @@ proc new*( clock: BeaconClock, eq: AsyncEventQueue[ForkedLightClientHeader], pool: PeerPool[Peer, PeerId], - batchVerifier: ref BatchVerifier, + blockVerifier: BlockVerifier, forwardSync: SyncManager[Peer, PeerId], backwardSync: SyncManager[Peer, PeerId], untrustedSync: SyncManager[Peer, PeerId] @@ -78,7 +121,7 @@ proc new*( beaconClock: clock, eventQueue: eq, pool: pool, - batchVerifier: batchVerifier, + blockVerifier: BlockVerifier, forwardSync: forwardSync, backwardSync: backwardSync, untrustedSync: untrustedSync, @@ -90,3 +133,40 @@ proc syncInProgress*(overseer: SyncOverseerRef): bool = overseer.backwardSync.inProgress or overseer.untrustedSync.inProgress or overseer.untrustedInProgress + +proc new*( + t: typedesc[SyncOverseerRef2], + net: Eth2Node, + cm: ref ConsensusManager, + configuration: BeaconNodeConf, + bt: GetBeaconTimeFn, + clock: BeaconClock, + blockProcessor: ref BlockProcessor, + blockQuarantine: ref Quarantine, + blobQuarantine: ref BlobQuarantine, + columnQuarantine: ref ColumnQuarantine, + gossipQueue: AsyncEventQueue[EventBeaconBlockGossipPeerObject], + blocksQueue: AsyncEventQueue[EventBeaconBlockObject], + finalizationQueue: AsyncEventQueue[FinalizationInfoObject], + blocksChunkSize = int(SLOTS_PER_EPOCH), + sidecarsChunkSize = int(SLOTS_PER_EPOCH) +): SyncOverseerRef2 = + SyncOverseerRef2( + network: net, + consensusManager: cm, + config: configuration, + getBeaconTimeFn: bt, + beaconClock: clock, + pool: net.peerPool, + blockProcessor: blockProcessor, + blobQuarantine: blobQuarantine, + columnQuarantine: columnQuarantine, + blockQuarantine: blockQuarantine, + blockGossipBus: gossipQueue, + blocksQueueBus: blocksQueue, + blockFinalizationBus: finalizationQueue, + blocksChunkSize: blocksChunkSize, + sidecarsChunkSize: sidecarsChunkSize, + localPeerId: net.peerId(), + sdag: SyncDag.init(Peer, PeerId), + ) diff --git a/beacon_chain/trusted_node_sync.nim b/beacon_chain/trusted_node_sync.nim index 259d60b1a8..f930d84329 100644 --- a/beacon_chain/trusted_node_sync.nim +++ b/beacon_chain/trusted_node_sync.nim @@ -468,7 +468,7 @@ proc doTrustedNodeSync*( error "Got invalid block from trusted node - is it on the right network?", blck = shortLog(forkyBlck), err = res.error() quit 1 - of VerifierError.Duplicate: + of VerifierError.Duplicate, VerifierError.MissingSidecars: discard # Download blocks backwards from the backfill slot, ie the first slot for diff --git a/tests/all_tests.nim b/tests/all_tests.nim index d73ac785ad..6c331b8521 100644 --- a/tests/all_tests.nim +++ b/tests/all_tests.nim @@ -63,6 +63,7 @@ import # Unit test ./test_beacon_chain_file, ./test_mev_calls, ./test_column_map, + ./test_block_buffer, ./test_quarantine, ./test_keymanager_api # currently has to run after test_remote_keystore diff --git a/tests/test_block_buffer.nim b/tests/test_block_buffer.nim new file mode 100644 index 0000000000..ac454b5fb2 --- /dev/null +++ b/tests/test_block_buffer.nim @@ -0,0 +1,767 @@ +# beacon_chain +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} +{.used.} + +import + unittest2, + ../beacon_chain/spec/datatypes/constants, + ../beacon_chain/spec/forks, + ../beacon_chain/sync/[block_buffer, sync_queue] + +type + SlotRange = object + slota, slotb: Slot + +func init(t: typedesc[SlotRange], a, b: Slot): SlotRange = + SlotRange(slota: a, slotb: b) + +iterator items(srange: SlotRange): Slot = + if srange.slota <= srange.slotb: + for slot in countup(uint64(srange.slota), uint64(srange.slotb)): + yield Slot(slot) + else: + for slot in countdown(uint64(srange.slota), uint64(srange.slotb)): + yield Slot(slot) + +proc createRoot(i: int): Eth2Digest = + var res = Eth2Digest() + res.data[0] = byte(i and 255) + res + +proc createBlock( + slot: Slot, + root, parent_root: Eth2Digest +): ref ForkedSignedBeaconBlock = + newClone ForkedSignedBeaconBlock.init( + deneb.SignedBeaconBlock( + message: deneb.BeaconBlock(slot: slot, parent_root: parent_root), + root: root)) + +suite "BlocksRangeBuffer test suite": + test "Add and query blocks test [forward]": + var buffer = BlocksRangeBuffer.init(SyncQueueKind.Forward) + const TestChain = [ + (Slot(1923340), createRoot(1), createRoot(0)), + (Slot(1923341), createRoot(2), createRoot(1)), + (Slot(1923342), createRoot(3), createRoot(2)), + (Slot(1923345), createRoot(4), createRoot(3)), + (Slot(1923350), createRoot(5), createRoot(4)) + ] + for vector in TestChain: + check buffer.add(createBlock(vector[0], vector[1], vector[2])).isOk() + for slot in SlotRange.init(Slot(1923330), Slot(1923339)): + check isNil(buffer[slot]) == true + check: + isNil(buffer[GENESIS_SLOT]) == true + isNil(buffer[Slot(1923340)]) == false + buffer[Slot(1923340)][].slot == Slot(1923340) + isNil(buffer[Slot(1923341)]) == false + buffer[Slot(1923341)][].slot == Slot(1923341) + isNil(buffer[Slot(1923342)]) == false + buffer[Slot(1923342)][].slot == Slot(1923342) + isNil(buffer[Slot(1923343)]) == true + isNil(buffer[Slot(1923344)]) == true + isNil(buffer[Slot(1923345)]) == false + buffer[Slot(1923345)][].slot == Slot(1923345) + isNil(buffer[Slot(1923346)]) == true + isNil(buffer[Slot(1923347)]) == true + isNil(buffer[Slot(1923348)]) == true + isNil(buffer[Slot(1923349)]) == true + isNil(buffer[Slot(1923350)]) == false + buffer[Slot(1923350)][].slot == Slot(1923350) + isNil(buffer[Slot(1923351)]) == true + isNil(buffer[FAR_FUTURE_SLOT]) == true + + test "Add and query blocks test [backward]": + var buffer = BlocksRangeBuffer.init(SyncQueueKind.Backward) + const TestChain = [ + (Slot(1923340), createRoot(5), createRoot(4)), + (Slot(1923339), createRoot(4), createRoot(3)), + (Slot(1923338), createRoot(3), createRoot(2)), + (Slot(1923335), createRoot(2), createRoot(1)), + (Slot(1923330), createRoot(1), createRoot(0)) + ] + for vector in TestChain: + let res = buffer.add(createBlock(vector[0], vector[1], vector[2])) + check res.isOk() == true + for slot in SlotRange.init(Slot(1923350), Slot(1923341)): + check isNil(buffer[slot]) == true + check: + isNil(buffer[FAR_FUTURE_SLOT]) == true + isNil(buffer[Slot(1923340)]) == false + buffer[Slot(1923340)][].slot == Slot(1923340) + isNil(buffer[Slot(1923339)]) == false + buffer[Slot(1923339)][].slot == Slot(1923339) + isNil(buffer[Slot(1923338)]) == false + buffer[Slot(1923338)][].slot == Slot(1923338) + isNil(buffer[Slot(1923337)]) == true + isNil(buffer[Slot(1923336)]) == true + isNil(buffer[Slot(1923335)]) == false + buffer[Slot(1923335)][].slot == Slot(1923335) + isNil(buffer[Slot(1923334)]) == true + isNil(buffer[Slot(1923333)]) == true + isNil(buffer[Slot(1923332)]) == true + isNil(buffer[Slot(1923331)]) == true + isNil(buffer[Slot(1923330)]) == false + buffer[Slot(1923330)][].slot == Slot(1923330) + isNil(buffer[Slot(1923329)]) == true + isNil(buffer[GENESIS_SLOT]) == true + + test "Block insertion test [forward]": + var buffer = BlocksRangeBuffer.init(SyncQueueKind.Forward) + const TestChain = [ + (Slot(1923340), createRoot(1), createRoot(0)), + (Slot(1923341), createRoot(2), createRoot(1)), + (Slot(1923342), createRoot(3), createRoot(2)), + (Slot(1923345), createRoot(4), createRoot(3)), + (Slot(1923350), createRoot(5), createRoot(4)) + ] + for vector in TestChain: + check buffer.add(createBlock(vector[0], vector[1], vector[2])).isOk() + + check: + len(buffer) == 11 + + let r1 = + buffer.add(createBlock(Slot(1923350), createRoot(6), createRoot(0))) + check: + r1.isErr() == true + r1.error == VerifierError.MissingParent + + let r2 = + buffer.add(createBlock(Slot(1923350), createRoot(7), createRoot(4))) + check: + r2.isOk() == true + buffer[Slot(1923350)][].root == createRoot(7) + len(buffer) == 11 + + let r3 = + buffer.add(createBlock(Slot(1923349), createRoot(8), createRoot(3))) + check: + r3.isErr() == true + r3.error == VerifierError.MissingParent + + let r4 = + buffer.add(createBlock(Slot(1923349), createRoot(8), createRoot(4))) + check: + r4.isOk() == true + isNil(buffer[Slot(1923350)]) == true + buffer[Slot(1923349)][].root == createRoot(8) + len(buffer) == 10 + + let r5 = + buffer.add(createBlock(Slot(1923346), createRoot(9), createRoot(2))) + check: + r5.isErr() == true + r5.error == VerifierError.MissingParent + + let r6 = + buffer.add(createBlock(Slot(1923346), createRoot(9), createRoot(4))) + check r6.isOk() == true + for slot in SlotRange.init(Slot(1923350), Slot(1923347)): + check isNil(buffer[slot]) == true + check: + buffer[Slot(1923346)][].root == createRoot(9) + len(buffer) == 7 + + let r7 = + buffer.add(createBlock(Slot(1923345), createRoot(10), createRoot(3))) + check r7.isOk() == true + for slot in SlotRange.init(Slot(1923350), Slot(1923346)): + check isNil(buffer[slot]) == true + check: + buffer[Slot(1923345)][].root == createRoot(10) + len(buffer) == 6 + + let r8 = + buffer.add(createBlock(Slot(1923345), createRoot(11), createRoot(3))) + check r8.isOk() == true + for slot in SlotRange.init(Slot(1923350), Slot(1923346)): + check isNil(buffer[slot]) == true + check: + buffer[Slot(1923345)][].root == createRoot(11) + len(buffer) == 6 + + let r9 = + buffer.add(createBlock(Slot(1923344), createRoot(12), createRoot(3))) + check r9.isOk() == true + for slot in SlotRange.init(Slot(1923350), Slot(1923345)): + check isNil(buffer[slot]) == true + check: + buffer[Slot(1923344)][].root == createRoot(12) + len(buffer) == 5 + + let r10 = + buffer.add(createBlock(Slot(1923343), createRoot(13), createRoot(3))) + check r10.isOk() == true + for slot in SlotRange.init(Slot(1923350), Slot(1923344)): + check isNil(buffer[slot]) == true + check: + buffer[Slot(1923343)][].root == createRoot(13) + len(buffer) == 4 + + let r11 = + buffer.add(createBlock(Slot(1923342), createRoot(14), createRoot(2))) + check r11.isOk() == true + for slot in SlotRange.init(Slot(1923350), Slot(1923343)): + check isNil(buffer[slot]) == true + check: + buffer[Slot(1923342)][].root == createRoot(14) + len(buffer) == 3 + + let r12 = + buffer.add(createBlock(Slot(1923341), createRoot(15), createRoot(1))) + check r12.isOk() == true + for slot in SlotRange.init(Slot(1923350), Slot(1923342)): + check isNil(buffer[slot]) == true + check: + buffer[Slot(1923341)][].root == createRoot(15) + len(buffer) == 2 + + let r13 = + buffer.add(createBlock(Slot(1923340), createRoot(16), createRoot(0))) + check r13.isOk() == true + for slot in SlotRange.init(Slot(1923350), Slot(1923341)): + check isNil(buffer[slot]) == true + check: + buffer[Slot(1923340)][].root == createRoot(16) + len(buffer) == 1 + + let r14 = + buffer.add(createBlock(Slot(1923339), createRoot(17), createRoot(0))) + check r14.isOk() == true + for slot in SlotRange.init(Slot(1923350), Slot(1923340)): + check isNil(buffer[slot]) == true + check: + buffer[Slot(1923339)][].root == createRoot(17) + len(buffer) == 1 + + let r15 = + buffer.add(createBlock(Slot(1923335), createRoot(18), createRoot(0))) + check r15.isOk() == true + for slot in SlotRange.init(Slot(1923350), Slot(1923336)): + check isNil(buffer[slot]) == true + check: + buffer[Slot(1923335)][].root == createRoot(18) + len(buffer) == 1 + + let r16 = + buffer.add(createBlock(Slot(1923330), createRoot(19), createRoot(0))) + check r16.isOk() == true + for slot in SlotRange.init(Slot(1923350), Slot(1923331)): + check isNil(buffer[slot]) == true + check: + buffer[Slot(1923330)][].root == createRoot(19) + len(buffer) == 1 + + let r17 = + buffer.add(createBlock(Slot(1923329), createRoot(20), createRoot(0))) + check: + r17.isOk() + len(buffer) == 1 + buffer[Slot(1923329)][].root == createRoot(20) + + test "Block insertion test [backward]": + var buffer = BlocksRangeBuffer.init(SyncQueueKind.Backward) + const TestChain = [ + (Slot(1923340), createRoot(5), createRoot(4)), + (Slot(1923339), createRoot(4), createRoot(3)), + (Slot(1923338), createRoot(3), createRoot(2)), + (Slot(1923335), createRoot(2), createRoot(1)), + (Slot(1923330), createRoot(1), createRoot(0)) + ] + for vector in TestChain: + check buffer.add(createBlock(vector[0], vector[1], vector[2])).isOk() + + check: + len(buffer) == 11 + + let r1 = + buffer.add(createBlock(Slot(1923330), createRoot(6), createRoot(0))) + check: + r1.isErr() == true + r1.error == VerifierError.MissingParent + + let r2 = + buffer.add(createBlock(Slot(1923330), createRoot(1), createRoot(10))) + check: + r2.isOk() == true + buffer[Slot(1923330)][].root == createRoot(1) + len(buffer) == 11 + + let r3 = + buffer.add(createBlock(Slot(1923331), createRoot(8), createRoot(3))) + check: + r3.isErr() == true + r3.error == VerifierError.MissingParent + + let r4 = + buffer.add(createBlock(Slot(1923331), createRoot(1), createRoot(11))) + check: + r4.isOk() == true + isNil(buffer[Slot(1923330)]) == true + buffer[Slot(1923331)][].root == createRoot(1) + len(buffer) == 10 + + let r5 = + buffer.add(createBlock(Slot(1923334), createRoot(9), createRoot(2))) + check: + r5.isErr() == true + r5.error == VerifierError.MissingParent + + let r6 = + buffer.add(createBlock(Slot(1923334), createRoot(1), createRoot(12))) + check r6.isOk() == true + for slot in SlotRange.init(Slot(1923330), Slot(1923333)): + check isNil(buffer[slot]) == true + check: + buffer[Slot(1923334)][].root == createRoot(1) + len(buffer) == 7 + + let r7 = + buffer.add(createBlock(Slot(1923335), createRoot(2), createRoot(13))) + check r7.isOk() == true + for slot in SlotRange.init(Slot(1923330), Slot(1923334)): + check isNil(buffer[slot]) == true + check: + buffer[Slot(1923335)][].root == createRoot(2) + len(buffer) == 6 + + let r8 = + buffer.add(createBlock(Slot(1923335), createRoot(2), createRoot(14))) + check r8.isOk() == true + for slot in SlotRange.init(Slot(1923330), Slot(1923334)): + check isNil(buffer[slot]) == true + check: + buffer[Slot(1923335)][].root == createRoot(2) + len(buffer) == 6 + + let r9 = + buffer.add(createBlock(Slot(1923336), createRoot(2), createRoot(15))) + check r9.isOk() == true + for slot in SlotRange.init(Slot(1923330), Slot(1923335)): + check isNil(buffer[slot]) == true + check: + buffer[Slot(1923336)][].root == createRoot(2) + len(buffer) == 5 + + let r10 = + buffer.add(createBlock(Slot(1923337), createRoot(2), createRoot(16))) + check r10.isOk() == true + for slot in SlotRange.init(Slot(1923330), Slot(1923336)): + check isNil(buffer[slot]) == true + check: + buffer[Slot(1923337)][].root == createRoot(2) + len(buffer) == 4 + + let r11 = + buffer.add(createBlock(Slot(1923338), createRoot(3), createRoot(17))) + check r11.isOk() == true + for slot in SlotRange.init(Slot(1923330), Slot(1923337)): + check isNil(buffer[slot]) == true + check: + buffer[Slot(1923338)][].root == createRoot(3) + len(buffer) == 3 + + let r12 = + buffer.add(createBlock(Slot(1923339), createRoot(4), createRoot(18))) + check r12.isOk() == true + for slot in SlotRange.init(Slot(1923330), Slot(1923338)): + check isNil(buffer[slot]) == true + check: + buffer[Slot(1923339)][].root == createRoot(4) + len(buffer) == 2 + + let r13 = + buffer.add(createBlock(Slot(1923340), createRoot(16), createRoot(0))) + check r13.isOk() == true + for slot in SlotRange.init(Slot(1923330), Slot(1923339)): + check isNil(buffer[slot]) == true + check: + buffer[Slot(1923340)][].root == createRoot(16) + len(buffer) == 1 + + let r14 = + buffer.add(createBlock(Slot(1923341), createRoot(17), createRoot(0))) + check r14.isOk() == true + for slot in SlotRange.init(Slot(1923330), Slot(1923340)): + check isNil(buffer[slot]) == true + check: + buffer[Slot(1923341)][].root == createRoot(17) + len(buffer) == 1 + + let r15 = + buffer.add(createBlock(Slot(1923345), createRoot(18), createRoot(0))) + check r15.isOk() == true + for slot in SlotRange.init(Slot(1923330), Slot(1923344)): + check isNil(buffer[slot]) == true + check: + buffer[Slot(1923345)][].root == createRoot(18) + len(buffer) == 1 + + let r16 = + buffer.add(createBlock(Slot(1923350), createRoot(19), createRoot(0))) + check r16.isOk() == true + for slot in SlotRange.init(Slot(1923330), Slot(1923349)): + check isNil(buffer[slot]) == true + check: + buffer[Slot(1923350)][].root == createRoot(19) + len(buffer) == 1 + + let r17 = + buffer.add(createBlock(Slot(1923351), createRoot(20), createRoot(0))) + check: + r17.isOk() + len(buffer) == 1 + buffer[Slot(1923351)][].root == createRoot(20) + + test "Buffer advance test [forward]": + var buffer = BlocksRangeBuffer.init(SyncQueueKind.Forward) + const TestChain = [ + (Slot(1923340), createRoot(1), createRoot(0)), + (Slot(1923341), createRoot(2), createRoot(1)), + (Slot(1923342), createRoot(3), createRoot(2)), + (Slot(1923345), createRoot(4), createRoot(3)), + (Slot(1923350), createRoot(5), createRoot(4)) + ] + const TestVectors = [ + (Slot(1923320), Slot(1923340), Slot(1923350), 11), + (Slot(1923330), Slot(1923340), Slot(1923350), 11), + (Slot(1923335), Slot(1923340), Slot(1923350), 11), + (Slot(1923340), Slot(1923340), Slot(1923350), 11), + (Slot(1923341), Slot(1923341), Slot(1923350), 10), + (Slot(1923342), Slot(1923342), Slot(1923350), 9), + (Slot(1923343), Slot(1923345), Slot(1923350), 6), + (Slot(1923344), Slot(1923345), Slot(1923350), 6), + (Slot(1923345), Slot(1923345), Slot(1923350), 6), + (Slot(1923346), Slot(1923350), Slot(1923350), 1), + (Slot(1923347), Slot(1923350), Slot(1923350), 1), + (Slot(1923348), Slot(1923350), Slot(1923350), 1), + (Slot(1923349), Slot(1923350), Slot(1923350), 1), + (Slot(1923350), Slot(1923350), Slot(1923350), 1) + ] + for vector in TestChain: + check buffer.add(createBlock(vector[0], vector[1], vector[2])).isOk() + + check: + buffer.startSlot == Slot(1923340) + buffer.lastSlot == Slot(1923350) + len(buffer) == 11 + + for vector in TestVectors: + buffer.advance(vector[0]) + check: + buffer.startSlot == vector[1] + buffer.lastSlot == vector[2] + len(buffer) == vector[3] + + buffer.advance(Slot(1923351)) + check: + len(buffer) == 0 + + test "Buffer advance test [backward]": + var buffer = BlocksRangeBuffer.init(SyncQueueKind.Backward) + const TestChain = [ + (Slot(1923340), createRoot(5), createRoot(4)), + (Slot(1923339), createRoot(4), createRoot(3)), + (Slot(1923338), createRoot(3), createRoot(2)), + (Slot(1923335), createRoot(2), createRoot(1)), + (Slot(1923330), createRoot(1), createRoot(0)) + ] + const TestVectors = [ + (Slot(1923360), Slot(1923340), Slot(1923330), 11), + (Slot(1923350), Slot(1923340), Slot(1923330), 11), + (Slot(1923345), Slot(1923340), Slot(1923330), 11), + (Slot(1923340), Slot(1923340), Slot(1923330), 11), + (Slot(1923339), Slot(1923339), Slot(1923330), 10), + (Slot(1923338), Slot(1923338), Slot(1923330), 9), + (Slot(1923337), Slot(1923335), Slot(1923330), 6), + (Slot(1923336), Slot(1923335), Slot(1923330), 6), + (Slot(1923335), Slot(1923335), Slot(1923330), 6), + (Slot(1923334), Slot(1923330), Slot(1923330), 1), + (Slot(1923333), Slot(1923330), Slot(1923330), 1), + (Slot(1923332), Slot(1923330), Slot(1923330), 1), + (Slot(1923331), Slot(1923330), Slot(1923330), 1), + (Slot(1923330), Slot(1923330), Slot(1923330), 1) + ] + + for vector in TestChain: + check buffer.add(createBlock(vector[0], vector[1], vector[2])).isOk() + + check: + buffer.startSlot == Slot(1923340) + buffer.lastSlot == Slot(1923330) + len(buffer) == 11 + + for vector in TestVectors: + buffer.advance(vector[0]) + check: + buffer.startSlot == vector[1] + buffer.lastSlot == vector[2] + len(buffer) == vector[3] + + buffer.advance(Slot(1923329)) + check: + len(buffer) == 0 + + test "Buffer invalidate test [forward]": + const TestChain = [ + (Slot(1923340), createRoot(1), createRoot(0)), + (Slot(1923341), createRoot(2), createRoot(1)), + (Slot(1923342), createRoot(3), createRoot(2)), + (Slot(1923345), createRoot(4), createRoot(3)), + (Slot(1923350), createRoot(5), createRoot(4)) + ] + const TestVectors = [ + (GENESIS_SLOT, GENESIS_SLOT, GENESIS_SLOT, 0), + (Slot(1923339), GENESIS_SLOT, GENESIS_SLOT, 0), + (Slot(1923340), GENESIS_SLOT, GENESIS_SLOT, 0), + (Slot(1923341), Slot(1923340), Slot(1923340), 1), + (Slot(1923342), Slot(1923340), Slot(1923341), 2), + (Slot(1923343), Slot(1923340), Slot(1923342), 3), + (Slot(1923344), Slot(1923340), Slot(1923342), 3), + (Slot(1923345), Slot(1923340), Slot(1923342), 3), + (Slot(1923346), Slot(1923340), Slot(1923345), 6), + (Slot(1923347), Slot(1923340), Slot(1923345), 6), + (Slot(1923348), Slot(1923340), Slot(1923345), 6), + (Slot(1923349), Slot(1923340), Slot(1923345), 6), + (Slot(1923350), Slot(1923340), Slot(1923345), 6), + (Slot(1923351), Slot(1923340), Slot(1923350), 11) + ] + for vector in TestVectors: + var buffer = BlocksRangeBuffer.init(SyncQueueKind.Forward) + for blck in TestChain: + check buffer.add(createBlock(blck[0], blck[1], blck[2])).isOk() + + check: + buffer.startSlot == Slot(1923340) + buffer.lastSlot == Slot(1923350) + len(buffer) == 11 + + buffer.invalidate(vector[0]) + check: + len(buffer) == vector[3] + if len(buffer) > 0: + check: + buffer.startSlot == vector[1] + buffer.lastSlot == vector[2] + + test "Buffer invalidate test [backward]": + const TestChain = [ + (Slot(1923340), createRoot(5), createRoot(4)), + (Slot(1923339), createRoot(4), createRoot(3)), + (Slot(1923338), createRoot(3), createRoot(2)), + (Slot(1923335), createRoot(2), createRoot(1)), + (Slot(1923330), createRoot(1), createRoot(0)) + ] + const TestVectors = [ + (FAR_FUTURE_SLOT, FAR_FUTURE_SLOT, FAR_FUTURE_SLOT, 0), + (Slot(1923341), FAR_FUTURE_SLOT, FAR_FUTURE_SLOT, 0), + (Slot(1923340), FAR_FUTURE_SLOT, FAR_FUTURE_SLOT, 0), + (Slot(1923339), Slot(1923340), Slot(1923340), 1), + (Slot(1923338), Slot(1923340), Slot(1923339), 2), + (Slot(1923337), Slot(1923340), Slot(1923338), 3), + (Slot(1923336), Slot(1923340), Slot(1923338), 3), + (Slot(1923335), Slot(1923340), Slot(1923338), 3), + (Slot(1923334), Slot(1923340), Slot(1923335), 6), + (Slot(1923333), Slot(1923340), Slot(1923335), 6), + (Slot(1923332), Slot(1923340), Slot(1923335), 6), + (Slot(1923331), Slot(1923340), Slot(1923335), 6), + (Slot(1923330), Slot(1923340), Slot(1923335), 6), + (Slot(1923329), Slot(1923340), Slot(1923330), 11) + ] + for vector in TestVectors: + var buffer = BlocksRangeBuffer.init(SyncQueueKind.Backward) + for blck in TestChain: + check buffer.add(createBlock(blck[0], blck[1], blck[2])).isOk() + + check: + buffer.startSlot == Slot(1923340) + buffer.lastSlot == Slot(1923330) + len(buffer) == 11 + + buffer.invalidate(vector[0]) + check: + len(buffer) == vector[3] + if len(buffer) > 0: + check: + buffer.startSlot == vector[1] + buffer.lastSlot == vector[2] + + test "Range peek test [forward]": + var buffer = BlocksRangeBuffer.init(SyncQueueKind.Forward) + const TestChain = [ + (Slot(1923340), createRoot(1), createRoot(0)), + (Slot(1923341), createRoot(2), createRoot(1)), + (Slot(1923342), createRoot(3), createRoot(2)), + (Slot(1923345), createRoot(4), createRoot(3)), + (Slot(1923350), createRoot(5), createRoot(4)) + ] + const TestVectors = [ + (Slot(1923320), Slot(1923360), 5, + @[Slot(1923340), Slot(1923341), Slot(1923342), Slot(1923345), + Slot(1923350)]), + (Slot(1923330), Slot(1923360), 5, + @[Slot(1923340), Slot(1923341), Slot(1923342), Slot(1923345), + Slot(1923350)]), + (Slot(1923340), Slot(1923360), 5, + @[Slot(1923340), Slot(1923341), Slot(1923342), Slot(1923345), + Slot(1923350)]), + (Slot(1923341), Slot(1923360), 4, + @[Slot(1923341), Slot(1923342), Slot(1923345), Slot(1923350)]), + (Slot(1923342), Slot(1923360), 3, + @[Slot(1923342), Slot(1923345), Slot(1923350)]), + (Slot(1923343), Slot(1923360), 2, @[Slot(1923345), Slot(1923350)]), + (Slot(1923344), Slot(1923350), 2, @[Slot(1923345), Slot(1923350)]), + (Slot(1923345), Slot(1923345), 1, @[Slot(1923345)]), + (Slot(1923346), Slot(1923350), 1, @[Slot(1923350)]), + (Slot(1923350), Slot(1923350), 1, @[Slot(1923350)]), + (Slot(1923350), Slot(1923360), 1, @[Slot(1923350)]), + (Slot(1923351), Slot(1923360), 0, default(seq[Slot])), + (Slot(1923320), Slot(1923329), 0, default(seq[Slot])), + (Slot(1923320), Slot(1923340), 1, @[Slot(1923340)]), + (Slot(1923330), Slot(1923341), 2, @[Slot(1923340), Slot(1923341)]), + (Slot(1923341), Slot(1923342), 2, @[Slot(1923341), Slot(1923342)]), + (Slot(1923341), Slot(1923344), 2, @[Slot(1923341), Slot(1923342)]) + ] + for vector in TestChain: + check buffer.add(createBlock(vector[0], vector[1], vector[2])).isOk() + + for vector in TestVectors: + let + count = int(vector[1] - vector[0] + 1) + res = buffer.peekRange(SyncRange.init(vector[0], uint64(count))) + check len(res) == vector[2] + for i in 0 ..< len(vector[3]): + check res[i][].slot == vector[3][i] + + test "Range peek test [backward]": + var buffer = BlocksRangeBuffer.init(SyncQueueKind.Backward) + const TestChain = [ + (Slot(1923340), createRoot(5), createRoot(4)), + (Slot(1923339), createRoot(4), createRoot(3)), + (Slot(1923338), createRoot(3), createRoot(2)), + (Slot(1923335), createRoot(2), createRoot(1)), + (Slot(1923330), createRoot(1), createRoot(0)) + ] + const TestVectors = [ + (Slot(1923320), Slot(1923360), 5, + @[Slot(1923330), Slot(1923335), Slot(1923338), Slot(1923339), + Slot(1923340)]), + (Slot(1923330), Slot(1923360), 5, + @[Slot(1923330), Slot(1923335), Slot(1923338), Slot(1923339), + Slot(1923340)]), + (Slot(1923330), Slot(1923360), 5, + @[Slot(1923330), Slot(1923335), Slot(1923338), Slot(1923339), + Slot(1923340)]), + (Slot(1923331), Slot(1923360), 4, + @[Slot(1923335), Slot(1923338), Slot(1923339), Slot(1923340)]), + (Slot(1923332), Slot(1923360), 4, + @[Slot(1923335), Slot(1923338), Slot(1923339), Slot(1923340)]), + (Slot(1923333), Slot(1923360), 4, + @[Slot(1923335), Slot(1923338), Slot(1923339), Slot(1923340)]), + (Slot(1923334), Slot(1923360), 4, + @[Slot(1923335), Slot(1923338), Slot(1923339), Slot(1923340)]), + (Slot(1923335), Slot(1923360), 4, + @[Slot(1923335), Slot(1923338), Slot(1923339), Slot(1923340)]), + (Slot(1923336), Slot(1923360), 3, + @[Slot(1923338), Slot(1923339), Slot(1923340)]), + (Slot(1923337), Slot(1923360), 3, + @[Slot(1923338), Slot(1923339), Slot(1923340)]), + (Slot(1923338), Slot(1923360), 3, + @[Slot(1923338), Slot(1923339), Slot(1923340)]), + (Slot(1923339), Slot(1923360), 2, @[Slot(1923339), Slot(1923340)]), + (Slot(1923340), Slot(1923360), 1, @[Slot(1923340)]), + (Slot(1923341), Slot(1923341), 0, default(seq[Slot])), + (Slot(1923320), Slot(1923320), 0, default(seq[Slot])), + (Slot(1923360), Slot(1923360), 0, default(seq[Slot])), + (Slot(1923310), Slot(1923330), 1, @[Slot(1923330)]), + (Slot(1923340), Slot(1923340), 1, @[Slot(1923340)]), + (Slot(1923340), Slot(1923341), 1, @[Slot(1923340)]), + (Slot(1923336), Slot(1923338), 1, @[Slot(1923338)]), + (Slot(1923335), Slot(1923338), 2, @[Slot(1923335), Slot(1923338)]), + (Slot(1923337), Slot(1923339), 2, @[Slot(1923338), Slot(1923339)]), + (Slot(1923339), Slot(1923360), 2, @[Slot(1923339), Slot(1923340)]) + ] + + for vector in TestChain: + check buffer.add(createBlock(vector[0], vector[1], vector[2])).isOk() + + for vector in TestVectors: + let + count = int(vector[1] - vector[0] + 1) + res = buffer.peekRange(SyncRange.init(vector[0], uint64(count))) + check len(res) == vector[2] + for i in 0 ..< len(vector[3]): + check res[i][].slot == vector[3][i] + + test "Range peek real test cases [forward]": + var + buffer1 = BlocksRangeBuffer.init(SyncQueueKind.Forward) + buffer2 = BlocksRangeBuffer.init(SyncQueueKind.Forward) + + const TestChain = [ + (Slot(1254722), createRoot(1), createRoot(0)), + (Slot(1254723), createRoot(2), createRoot(1)), + (Slot(1254724), createRoot(3), createRoot(2)), + (Slot(1254725), createRoot(4), createRoot(3)), + (Slot(1254726), createRoot(5), createRoot(4)), + (Slot(1254727), createRoot(6), createRoot(5)), + (Slot(1254728), createRoot(7), createRoot(6)), + (Slot(1254729), createRoot(8), createRoot(7)), + (Slot(1254731), createRoot(9), createRoot(8)), + (Slot(1254732), createRoot(10), createRoot(9)), + (Slot(1254733), createRoot(11), createRoot(10)), + (Slot(1254734), createRoot(12), createRoot(11)), + (Slot(1254735), createRoot(13), createRoot(12)), + (Slot(1254736), createRoot(14), createRoot(13)), + (Slot(1254737), createRoot(15), createRoot(14)), + (Slot(1254738), createRoot(16), createRoot(15)), + (Slot(1254739), createRoot(17), createRoot(16)), + (Slot(1254740), createRoot(18), createRoot(17)), + (Slot(1254741), createRoot(19), createRoot(18)), + (Slot(1254743), createRoot(20), createRoot(19)), + (Slot(1254745), createRoot(21), createRoot(20)), + (Slot(1254746), createRoot(22), createRoot(21)), + (Slot(1254747), createRoot(23), createRoot(22)), + (Slot(1254748), createRoot(24), createRoot(23)), + (Slot(1254749), createRoot(25), createRoot(24)), + (Slot(1254750), createRoot(26), createRoot(25)), + (Slot(1254752), createRoot(27), createRoot(26)), + (Slot(1254753), createRoot(28), createRoot(27)) + ] + + const TestVectors = [ + (Slot(1254720), Slot(1254751), 26, + @[Slot(1254722), Slot(1254723), Slot(1254724), Slot(1254725), + Slot(1254726), Slot(1254727), Slot(1254728), Slot(1254729), + Slot(1254731), Slot(1254732), Slot(1254733), Slot(1254734), + Slot(1254735), Slot(1254736), Slot(1254737), Slot(1254738), + Slot(1254739), Slot(1254740), Slot(1254741), Slot(1254743), + Slot(1254745), Slot(1254746), Slot(1254747), Slot(1254748), + Slot(1254749), Slot(1254750)]) + ] + + for vector in TestChain: + check buffer1.add(createBlock(vector[0], vector[1], vector[2])).isOk() + check buffer2.add(createBlock(vector[0], vector[1], vector[2])).isOk() + + for vector in TestVectors: + let + count = int(vector[1] - vector[0] + 1) + res1 = buffer1.peekRange(SyncRange.init(vector[0], uint64(count))) + res2 = buffer2.peekRange(SyncRange.init(vector[0], uint64(count))) + check: + len(res1) == vector[2] + len(res2) == vector[2] + + for i in 0 ..< len(res1): + check res1[i][].slot == vector[3][i] + for i in 0 ..< len(res2): + check res2[i][].slot == vector[3][i] diff --git a/tests/test_keymanager_api.nim b/tests/test_keymanager_api.nim index 287a1098da..4cc4375f6d 100644 --- a/tests/test_keymanager_api.nim +++ b/tests/test_keymanager_api.nim @@ -2080,7 +2080,7 @@ let for topicName in [ "libp2p", "gossipsub", "gossip_eth2", "message_router", "batch_validation", - "syncpool", "syncman", "fork_choice", "attpool", "val_pool", "consens", + "syncpool", "sync", "fork_choice", "attpool", "val_pool", "consens", "state_transition"]: doAssert setTopicState(topicName, Disabled) diff --git a/tests/test_sync_manager.nim b/tests/test_sync_manager.nim index 23c7afc4e8..835f379584 100644 --- a/tests/test_sync_manager.nim +++ b/tests/test_sync_manager.nim @@ -8,13 +8,12 @@ {.push raises: [], gcsafe.} {.used.} -import std/[strutils, sequtils] +import std/strutils import unittest2 import chronos, stew/base10, chronos/unittest2/asynctests import ../beacon_chain/networking/peer_scores import ../beacon_chain/gossip_processing/block_processor, - ../beacon_chain/sync/sync_manager, - ../beacon_chain/sync/sync_queue, + ../beacon_chain/sync/[sync_manager, sync_queue, response_utils], ../beacon_chain/spec/forks type @@ -61,6 +60,32 @@ func createChain(slots: Slice[Slot]): seq[ref ForkedSignedBeaconBlock] = res.add(item) res +func createDigest(data: int): Eth2Digest = + var res = Eth2Digest() + let tmp = uint64(data).toBytesBE() + copyMem(addr res.data[0], addr tmp[0], 8) + res + +func createChain(slots: openArray[Slot]): seq[ref ForkedSignedBeaconBlock] = + var + res: seq[ref ForkedSignedBeaconBlock] + root = 0 + + for slot in slots: + let item = newClone ForkedSignedBeaconBlock(kind: ConsensusFork.Deneb) + item[].denebData.message.slot = slot + if root == 0: + item[].denebData.root = createDigest(1) + item[].denebData.message.parent_root = createDigest(0) + inc(root) + else: + let prev_root = root + inc(root) + item[].denebData.root = createDigest(root) + item[].denebData.message.parent_root = createDigest(prev_root) + res.add(item) + res + proc createChain(srange: SyncRange): seq[ref ForkedSignedBeaconBlock] = createChain(srange.slot .. (srange.slot + srange.count - 1)) @@ -100,15 +125,14 @@ func createBlobs( func collector(queue: AsyncQueue[BlockEntry]): BlockVerifier = proc verify( - signedBlock: ForkedSignedBeaconBlock, - blobs: Opt[BlobSidecars], + signedBlock: ref ForkedSignedBeaconBlock, maybeFinalized: bool ): Future[Result[void, VerifierError]] {. async: (raises: [CancelledError], raw: true).} = let fut = Future[Result[void, VerifierError]].Raising([CancelledError]).init() try: - queue.addLastNoWait(BlockEntry(blck: signedBlock, resfut: fut)) + queue.addLastNoWait(BlockEntry(blck: signedBlock[], resfut: fut)) except CatchableError as exc: raiseAssert exc.msg fut @@ -160,8 +184,8 @@ proc setupVerifier( (collector(aq), verifier(aq)) suite "SyncManager test suite": - for kind in [SyncQueueKind.Forward, SyncQueueKind.Backward]: - asyncTest "[SyncQueue# & " & $kind & "] Smoke [single peer] test": + for kind in [SyncQueueKind.Backward]: + asyncTest "[SyncQueue#" & $kind & "] Smoke [single peer] test": # Four ranges was distributed to single peer only. let scenario = [ @@ -195,30 +219,33 @@ suite "SyncManager test suite": d3 = createChain(r3.data) let - f1 = sq.push(r1, d1, Opt.none(seq[BlobSidecars])) - f2 = sq.push(r2, d2, Opt.none(seq[BlobSidecars])) - f3 = sq.push(r3, d3, Opt.none(seq[BlobSidecars])) + f1 = sq.push(r1, d1) + f2 = sq.push(r2, d2) + f3 = sq.push(r3, d3) check: f1.finished == false f2.finished == false f3.finished == false - await noCancel f1 + check: + (await noCancel f1).count == 32 check: f1.finished == true f2.finished == false f3.finished == false - await noCancel f2 + check: + (await noCancel f2).count == 32 check: f1.finished == true f2.finished == true f3.finished == false - await noCancel f3 + check: + (await noCancel f3).count == 32 check: f1.finished == true @@ -228,9 +255,10 @@ suite "SyncManager test suite": let r4 = sq.pop(Slot(127), peer) d4 = createChain(r4.data) - f4 = sq.push(r4, d4, Opt.none(seq[BlobSidecars])) + f4 = sq.push(r4, d4) - await noCancel f4 + check: + (await noCancel f4).count == 32 check: f1.finished == true @@ -240,7 +268,7 @@ suite "SyncManager test suite": await noCancel wait(verifier.verifier, 2.seconds) - asyncTest "[SyncQueue# & " & $kind & "] Smoke [3 peers] test": + asyncTest "[SyncQueue#" & $kind & "] Smoke [3 peers] test": # Three ranges was distributed between 3 peers, every range is going to # be pushed by all peers. let @@ -289,19 +317,21 @@ suite "SyncManager test suite": d33 = createChain(r33.data) let - f11 = sq.push(r11, d11, Opt.none(seq[BlobSidecars])) - f12 = sq.push(r12, d12, Opt.none(seq[BlobSidecars])) - f13 = sq.push(r13, d13, Opt.none(seq[BlobSidecars])) + f11 = sq.push(r11, d11) + f12 = sq.push(r12, d12) + f13 = sq.push(r13, d13) - f22 = sq.push(r22, d22, Opt.none(seq[BlobSidecars])) - f21 = sq.push(r21, d21, Opt.none(seq[BlobSidecars])) - f23 = sq.push(r23, d23, Opt.none(seq[BlobSidecars])) + f22 = sq.push(r22, d22) + f21 = sq.push(r21, d21) + f23 = sq.push(r23, d23) - f33 = sq.push(r33, d33, Opt.none(seq[BlobSidecars])) - f32 = sq.push(r32, d32, Opt.none(seq[BlobSidecars])) - f31 = sq.push(r31, d31, Opt.none(seq[BlobSidecars])) + f33 = sq.push(r33, d33) + f32 = sq.push(r32, d32) + f31 = sq.push(r31, d31) + + check: + (await noCancel f11).count == 32 - await noCancel f11 check: f11.finished == true # We do not check f12 and f13 here because their state is undefined @@ -313,7 +343,9 @@ suite "SyncManager test suite": f32.finished == false f33.finished == false - await noCancel f22 + check: + (await noCancel f22).count == 32 + check: f11.finished == true f12.finished == true @@ -325,7 +357,9 @@ suite "SyncManager test suite": f32.finished == false f33.finished == false - await noCancel f33 + check: + (await noCancel f33).count == 32 + check: f11.finished == true f12.finished == true @@ -341,7 +375,8 @@ suite "SyncManager test suite": r41 = sq.pop(Slot(127), peer1) d41 = createChain(r41.data) - await noCancel sq.push(r41, d41, Opt.none(seq[BlobSidecars])) + check: + (await noCancel sq.push(r41, d41)).count == 32 check: f11.finished == true @@ -356,7 +391,7 @@ suite "SyncManager test suite": await noCancel wait(verifier.verifier, 2.seconds) - asyncTest "[SyncQueue# & " & $kind & "] Failure request push test": + asyncTest "[SyncQueue#" & $kind & "] Failure request push test": let scenario = case kind @@ -415,7 +450,8 @@ suite "SyncManager test suite": d12 = createChain(r12.data) sq.push(r11) - await noCancel sq.push(r12, d12, Opt.none(seq[BlobSidecars])) + check: + (await noCancel sq.push(r12, d12)).count == 32 sq.push(r13) # Next couple of calls should be detected as non relevant sq.push(r11) @@ -431,7 +467,8 @@ suite "SyncManager test suite": sq.push(r11) sq.push(r12) - await noCancel sq.push(r13, d13, Opt.none(seq[BlobSidecars])) + check: + (await noCancel sq.push(r13, d13)).count == 32 # Next couple of calls should be detected as non relevant sq.push(r11) sq.push(r12) @@ -439,7 +476,7 @@ suite "SyncManager test suite": await noCancel wait(verifier.verifier, 2.seconds) - asyncTest "[SyncQueue# & " & $kind & "] Invalid block [3 peers] test": + asyncTest "[SyncQueue#" & $kind & "] Invalid block [3 peers] test": # This scenario performs test for 2 cases. # 1. When first error encountered it just drops the the response and # increases `failuresCounter`. @@ -508,26 +545,29 @@ suite "SyncManager test suite": d23 = createChain(r23.data) let - f11 = sq.push(r11, d11, Opt.none(seq[BlobSidecars])) - f12 = sq.push(r12, d12, Opt.none(seq[BlobSidecars])) - f13 = sq.push(r13, d13, Opt.none(seq[BlobSidecars])) + f11 = sq.push(r11, d11) + f12 = sq.push(r12, d12) + f13 = sq.push(r13, d13) - await noCancel f11 - check f11.finished == true + check: + (await noCancel f11).count == 32 + f11.finished == true let - f21 = sq.push(r21, d21, Opt.none(seq[BlobSidecars])) - f22 = sq.push(r22, d22, Opt.none(seq[BlobSidecars])) - f23 = sq.push(r23, d23, Opt.none(seq[BlobSidecars])) + f21 = sq.push(r21, d21) + f22 = sq.push(r22, d22) + f23 = sq.push(r23, d23) - await noCancel f21 + check: + (await noCancel f21).count == 0 check: f21.finished == true f11.finished == true f12.finished == true f13.finished == true - await noCancel f22 + check: + (await noCancel f22).count == -63 check: f21.finished == true f22.finished == true @@ -535,7 +575,8 @@ suite "SyncManager test suite": f12.finished == true f13.finished == true - await noCancel f23 + check: + (await noCancel f23).count == 0 check: f21.finished == true f22.finished == true @@ -559,25 +600,28 @@ suite "SyncManager test suite": d43 = createChain(r43.data) let - f31 = sq.push(r31, d31, Opt.none(seq[BlobSidecars])) - f32 = sq.push(r32, d32, Opt.none(seq[BlobSidecars])) - f33 = sq.push(r33, d33, Opt.none(seq[BlobSidecars])) - f42 = sq.push(r42, d42, Opt.none(seq[BlobSidecars])) - f41 = sq.push(r41, d41, Opt.none(seq[BlobSidecars])) - f43 = sq.push(r43, d43, Opt.none(seq[BlobSidecars])) + f31 = sq.push(r31, d31) + f32 = sq.push(r32, d32) + f33 = sq.push(r33, d33) + f42 = sq.push(r42, d42) + f41 = sq.push(r41, d41) + f43 = sq.push(r43, d43) - await noCancel f31 + check: + (await noCancel f31).count == 32 check: f31.finished == true - await noCancel f42 + check: + (await noCancel f42).count == 32 check: f31.finished == true f32.finished == true f33.finished == true f42.finished == true - await noCancel f43 + check: + (await noCancel f43).count == 0 check: f31.finished == true f32.finished == true @@ -588,7 +632,7 @@ suite "SyncManager test suite": await noCancel wait(verifier.verifier, 2.seconds) - asyncTest "[SyncQueue# & " & $kind & "] Unviable block [3 peers] test": + asyncTest "[SyncQueue#" & $kind & "] Unviable block [3 peers] test": # This scenario performs test for 2 cases. # 1. When first error encountered it just drops the the response and # increases `failuresCounter`. @@ -658,26 +702,29 @@ suite "SyncManager test suite": d23 = createChain(r23.data) let - f11 = sq.push(r11, d11, Opt.none(seq[BlobSidecars])) - f12 = sq.push(r12, d12, Opt.none(seq[BlobSidecars])) - f13 = sq.push(r13, d13, Opt.none(seq[BlobSidecars])) + f11 = sq.push(r11, d11) + f12 = sq.push(r12, d12) + f13 = sq.push(r13, d13) - await noCancel f11 + check: + (await noCancel f11).count == 32 check f11.finished == true let - f21 = sq.push(r21, d21, Opt.none(seq[BlobSidecars])) - f22 = sq.push(r22, d22, Opt.none(seq[BlobSidecars])) - f23 = sq.push(r23, d23, Opt.none(seq[BlobSidecars])) + f21 = sq.push(r21, d21) + f22 = sq.push(r22, d22) + f23 = sq.push(r23, d23) - await noCancel f21 + check: + (await noCancel f21).count == 0 check: f21.finished == true f11.finished == true f12.finished == true f13.finished == true - await noCancel f22 + check: + (await noCancel f22).count == -63 check: f21.finished == true f22.finished == true @@ -685,7 +732,8 @@ suite "SyncManager test suite": f12.finished == true f13.finished == true - await noCancel f23 + check: + (await noCancel f23).count == 0 check: f21.finished == true f22.finished == true @@ -711,25 +759,28 @@ suite "SyncManager test suite": d43 = createChain(r43.data) let - f31 = sq.push(r31, d31, Opt.none(seq[BlobSidecars])) - f32 = sq.push(r32, d32, Opt.none(seq[BlobSidecars])) - f33 = sq.push(r33, d33, Opt.none(seq[BlobSidecars])) - f42 = sq.push(r42, d42, Opt.none(seq[BlobSidecars])) - f41 = sq.push(r41, d41, Opt.none(seq[BlobSidecars])) - f43 = sq.push(r43, d43, Opt.none(seq[BlobSidecars])) + f31 = sq.push(r31, d31) + f32 = sq.push(r32, d32) + f33 = sq.push(r33, d33) + f42 = sq.push(r42, d42) + f41 = sq.push(r41, d41) + f43 = sq.push(r43, d43) - await noCancel f31 + check: + (await noCancel f31).count == 32 check: f31.finished == true - await noCancel f42 + check: + (await noCancel f42).count == 32 check: f31.finished == true f32.finished == true f33.finished == true f42.finished == true - await noCancel f43 + check: + (await noCancel f43).count == 0 check: f31.finished == true f32.finished == true @@ -740,7 +791,7 @@ suite "SyncManager test suite": await noCancel wait(verifier.verifier, 2.seconds) - asyncTest "[SyncQueue# & " & $kind & "] Empty responses should not " & + asyncTest "[SyncQueue#" & $kind & "] Empty responses should not " & "advance queue until other peers will not confirm [3 peers] " & "test": var emptyResponse: seq[ref ForkedSignedBeaconBlock] @@ -811,7 +862,8 @@ suite "SyncManager test suite": let r11 = sq.pop(Slot(127), peer1) - await sq.push(r11, emptyResponse, Opt.none(seq[BlobSidecars])) + check: + (await sq.push(r11, emptyResponse)).count == 0 check: # No movement after 1st empty response sq.inpSlot == startSlot @@ -819,7 +871,8 @@ suite "SyncManager test suite": let r12 = sq.pop(Slot(127), peer2) - await sq.push(r12, emptyResponse, Opt.none(seq[BlobSidecars])) + check: + (await sq.push(r12, emptyResponse)).count == 0 check: # No movement after 2nd empty response sq.inpSlot == startSlot @@ -827,7 +880,8 @@ suite "SyncManager test suite": let r13 = sq.pop(Slot(127), peer3) - await sq.push(r13, emptyResponse, Opt.none(seq[BlobSidecars])) + check: + (await sq.push(r13, emptyResponse)).count == 32 check: # After 3rd empty response we moving forward sq.inpSlot == middleSlot1 @@ -835,7 +889,8 @@ suite "SyncManager test suite": let r21 = sq.pop(Slot(127), peer1) - await sq.push(r21, emptyResponse, Opt.none(seq[BlobSidecars])) + check: + (await sq.push(r21, emptyResponse)).count == 0 check: # No movement after 1st empty response sq.inpSlot == middleSlot1 @@ -843,7 +898,8 @@ suite "SyncManager test suite": let r22 = sq.pop(Slot(127), peer2) - await sq.push(r22, emptyResponse, Opt.none(seq[BlobSidecars])) + check: + (await sq.push(r22, emptyResponse)).count == 0 check: # No movement after 2nd empty response sq.inpSlot == middleSlot1 @@ -853,7 +909,8 @@ suite "SyncManager test suite": r23 = sq.pop(Slot(127), peer3) d23 = createChain(r23.data) - await sq.push(r23, d23, Opt.none(seq[BlobSidecars])) + check: + (await sq.push(r23, d23)).count == 32 check: # We got non-empty response so we should advance sq.inpSlot == middleSlot2 @@ -861,7 +918,8 @@ suite "SyncManager test suite": let r31 = sq.pop(Slot(127), peer1) - await sq.push(r31, emptyResponse, Opt.none(seq[BlobSidecars])) + check: + (await sq.push(r31, emptyResponse)).count == 0 check: # No movement after 1st empty response sq.inpSlot == middleSlot2 @@ -870,13 +928,14 @@ suite "SyncManager test suite": let r32 = sq.pop(Slot(127), peer2) d32 = createChain(r32.data) - await sq.push(r32, d32, Opt.none(seq[BlobSidecars])) + check: + (await sq.push(r32, d32)).count == 32 check: # We got non-empty response, so we should advance sq.inpSlot == finishSlot sq.outSlot == finishSlot - asyncTest "[SyncQueue# & " & $kind & "] Empty responses should not " & + asyncTest "[SyncQueue#" & $kind & "] Empty responses should not " & "be accounted [3 peers] test": var emptyResponse: seq[ref ForkedSignedBeaconBlock] let @@ -930,7 +989,8 @@ suite "SyncManager test suite": let r11 = sq.pop(Slot(159), peer1) r21 = sq.pop(Slot(159), peer2) - await sq.push(r11, emptyResponse, Opt.none(seq[BlobSidecars])) + check: + (await sq.push(r11, emptyResponse)).count == 0 let r12 = sq.pop(Slot(159), peer1) r13 = sq.pop(Slot(159), peer1) @@ -948,19 +1008,24 @@ suite "SyncManager test suite": r14.data.slot == slots[3] # Scenario requires some finish steps - await sq.push(r21, createChain(r21.data), Opt.none(seq[BlobSidecars])) + check: + (await sq.push(r21, createChain(r21.data))).count == 32 let r22 = sq.pop(Slot(159), peer2) - await sq.push(r22, createChain(r22.data), Opt.none(seq[BlobSidecars])) + check: + (await sq.push(r22, createChain(r22.data))).count == 32 let r23 = sq.pop(Slot(159), peer2) - await sq.push(r23, createChain(r23.data), Opt.none(seq[BlobSidecars])) + check: + (await sq.push(r23, createChain(r23.data))).count == 32 let r24 = sq.pop(Slot(159), peer2) - await sq.push(r24, createChain(r24.data), Opt.none(seq[BlobSidecars])) + check: + (await sq.push(r24, createChain(r24.data))).count == 32 let r35 = sq.pop(Slot(159), peer3) - await sq.push(r35, createChain(r35.data), Opt.none(seq[BlobSidecars])) + check: + (await sq.push(r35, createChain(r35.data))).count == 32 await noCancel wait(verifier.verifier, 2.seconds) - asyncTest "[SyncQueue# & " & $kind & "] Combination of missing parent " & + asyncTest "[SyncQueue#" & $kind & "] Combination of missing parent " & "and good blocks [3 peers] test": let scenario = @@ -1037,26 +1102,29 @@ suite "SyncManager test suite": d23 = createChain(r23.data) let - f11 = sq.push(r11, d11, Opt.none(seq[BlobSidecars])) - f12 = sq.push(r12, d12, Opt.none(seq[BlobSidecars])) - f13 = sq.push(r13, d13, Opt.none(seq[BlobSidecars])) + f11 = sq.push(r11, d11) + f12 = sq.push(r12, d12) + f13 = sq.push(r13, d13) - await noCancel f11 + check: + (await noCancel f11).count == 32 check f11.finished == true let - f21 = sq.push(r21, d21, Opt.none(seq[BlobSidecars])) - f22 = sq.push(r22, d22, Opt.none(seq[BlobSidecars])) - f23 = sq.push(r23, d23, Opt.none(seq[BlobSidecars])) + f21 = sq.push(r21, d21) + f22 = sq.push(r22, d22) + f23 = sq.push(r23, d23) - await noCancel f21 + check: + (await noCancel f21).count == 0 check: f21.finished == true f11.finished == true f12.finished == true f13.finished == true - await noCancel f22 + check: + (await noCancel f22).count == 0 check: f21.finished == true f22.finished == true @@ -1064,7 +1132,8 @@ suite "SyncManager test suite": f12.finished == true f13.finished == true - await noCancel f23 + check: + (await noCancel f23).count == 0 check: f21.finished == true f22.finished == true @@ -1080,13 +1149,14 @@ suite "SyncManager test suite": d31 = createChain(r31.data) d32 = createChain(r32.data) d33 = createChain(r33.data) - f31 = sq.push(r31, d31, Opt.none(seq[BlobSidecars])) - f32 = sq.push(r32, d32, Opt.none(seq[BlobSidecars])) - f33 = sq.push(r33, d33, Opt.none(seq[BlobSidecars])) + f31 = sq.push(r31, d31) + f32 = sq.push(r32, d32) + f33 = sq.push(r33, d33) - await noCancel f31 - await noCancel f32 - await noCancel f33 + check: + (await noCancel f31).count == 0 + (await noCancel f32).count == 0 + (await noCancel f33).count == 0 let r41 = sq.pop(Slot(63), peer1) @@ -1095,15 +1165,15 @@ suite "SyncManager test suite": d41 = createChain(r41.data) d42 = createChain(r42.data) d43 = createChain(r43.data) - f42 = sq.push(r32, d42, Opt.none(seq[BlobSidecars])) - f41 = sq.push(r31, d41, Opt.none(seq[BlobSidecars])) - f43 = sq.push(r33, d43, Opt.none(seq[BlobSidecars])) + f42 = sq.push(r32, d42) + f41 = sq.push(r31, d41) + f43 = sq.push(r33, d43) await noCancel allFutures(f42, f41, f43) await noCancel wait(verifier.verifier, 2.seconds) - test "[SyncQueue# & " & $kind & "] epochFilter() test": + test "[SyncQueue#" & $kind & "] epochFilter() test": let aq = newAsyncQueue[BlockEntry]() scenario = @@ -1276,13 +1346,14 @@ suite "SyncManager test suite": d11 = createChain(r11.data) d12 = createChain(r12.data) d13 = createChain(r13.data) - f11 = sq.push(r11, d11, Opt.none(seq[BlobSidecars])) - f12 = sq.push(r12, d12, Opt.none(seq[BlobSidecars])) - f13 = sq.push(r13, d13, Opt.none(seq[BlobSidecars])) + f11 = sq.push(r11, d11) + f12 = sq.push(r12, d12) + f13 = sq.push(r13, d13) - await noCancel f11 - await noCancel f12 - await noCancel f13 + check: + (await noCancel f11).count == 32 + (await noCancel f12).count == 0 + (await noCancel f13).count == 0 for i in 0 ..< 3: let @@ -1292,13 +1363,13 @@ suite "SyncManager test suite": de1 = default(seq[ref ForkedSignedBeaconBlock]) de2 = default(seq[ref ForkedSignedBeaconBlock]) de3 = default(seq[ref ForkedSignedBeaconBlock]) - fe1 = sq.push(re1, de1, Opt.none(seq[BlobSidecars])) - fe2 = sq.push(re2, de2, Opt.none(seq[BlobSidecars])) - fe3 = sq.push(re3, de3, Opt.none(seq[BlobSidecars])) + fe1 = sq.push(re1, de1) + fe2 = sq.push(re2, de2) + fe3 = sq.push(re3, de3) - await noCancel fe1 - await noCancel fe2 - await noCancel fe3 + discard await noCancel fe1 + discard await noCancel fe2 + discard await noCancel fe3 let r21 = sq.pop(Slot(159), peer1) @@ -1307,13 +1378,14 @@ suite "SyncManager test suite": d21 = createChain(r21.data) d22 = createChain(r22.data) d23 = createChain(r23.data) - f21 = sq.push(r21, d21, Opt.none(seq[BlobSidecars])) - f22 = sq.push(r22, d22, Opt.none(seq[BlobSidecars])) - f23 = sq.push(r23, d23, Opt.none(seq[BlobSidecars])) + f21 = sq.push(r21, d21) + f22 = sq.push(r22, d22) + f23 = sq.push(r23, d23) - await noCancel f21 - await noCancel f22 - await noCancel f23 + check: + (await noCancel f21).count == 0 + (await noCancel f22).count == -32 + (await noCancel f23).count == 0 for i in 0 ..< 1: let @@ -1323,13 +1395,13 @@ suite "SyncManager test suite": de1 = default(seq[ref ForkedSignedBeaconBlock]) de2 = default(seq[ref ForkedSignedBeaconBlock]) de3 = default(seq[ref ForkedSignedBeaconBlock]) - fe1 = sq.push(re1, de1, Opt.none(seq[BlobSidecars])) - fe2 = sq.push(re2, de2, Opt.none(seq[BlobSidecars])) - fe3 = sq.push(re3, de3, Opt.none(seq[BlobSidecars])) + fe1 = sq.push(re1, de1) + fe2 = sq.push(re2, de2) + fe3 = sq.push(re3, de3) - await noCancel fe1 - await noCancel fe2 - await noCancel fe3 + discard await noCancel fe1 + discard await noCancel fe2 + discard await noCancel fe3 let r31 = sq.pop(Slot(159), peer1) @@ -1338,13 +1410,14 @@ suite "SyncManager test suite": d31 = createChain(r31.data) d32 = createChain(r32.data) d33 = createChain(r33.data) - f31 = sq.push(r31, d31, Opt.none(seq[BlobSidecars])) - f32 = sq.push(r32, d32, Opt.none(seq[BlobSidecars])) - f33 = sq.push(r33, d33, Opt.none(seq[BlobSidecars])) + f31 = sq.push(r31, d31) + f32 = sq.push(r32, d32) + f33 = sq.push(r33, d33) - await noCancel f31 - await noCancel f32 - await noCancel f33 + check: + (await noCancel f31).count == 0 + (await noCancel f32).count == -64 + (await noCancel f33).count == 0 for i in 0 ..< 2: let @@ -1354,13 +1427,13 @@ suite "SyncManager test suite": de1 = default(seq[ref ForkedSignedBeaconBlock]) de2 = default(seq[ref ForkedSignedBeaconBlock]) de3 = default(seq[ref ForkedSignedBeaconBlock]) - fe1 = sq.push(re1, de1, Opt.none(seq[BlobSidecars])) - fe2 = sq.push(re2, de2, Opt.none(seq[BlobSidecars])) - fe3 = sq.push(re3, de3, Opt.none(seq[BlobSidecars])) + fe1 = sq.push(re1, de1) + fe2 = sq.push(re2, de2) + fe3 = sq.push(re3, de3) - await noCancel fe1 - await noCancel fe2 - await noCancel fe3 + discard await noCancel fe1 + discard await noCancel fe2 + discard await noCancel fe3 let r41 = sq.pop(Slot(159), peer1) @@ -1369,13 +1442,14 @@ suite "SyncManager test suite": d41 = createChain(r41.data) d42 = createChain(r42.data) d43 = createChain(r43.data) - f41 = sq.push(r41, d41, Opt.none(seq[BlobSidecars])) - f42 = sq.push(r42, d42, Opt.none(seq[BlobSidecars])) - f43 = sq.push(r43, d43, Opt.none(seq[BlobSidecars])) + f41 = sq.push(r41, d41) + f42 = sq.push(r42, d42) + f43 = sq.push(r43, d43) - await noCancel f41 - await noCancel f42 - await noCancel f43 + check: + (await noCancel f41).count == 0 + (await noCancel f42).count == -128 + (await noCancel f43).count == 0 for i in 0 ..< 5: let @@ -1385,13 +1459,14 @@ suite "SyncManager test suite": df1 = createChain(rf1.data) df2 = createChain(rf2.data) df3 = createChain(rf3.data) - ff1 = sq.push(rf1, df1, Opt.none(seq[BlobSidecars])) - ff2 = sq.push(rf2, df2, Opt.none(seq[BlobSidecars])) - ff3 = sq.push(rf3, df3, Opt.none(seq[BlobSidecars])) + ff1 = sq.push(rf1, df1) + ff2 = sq.push(rf2, df2) + ff3 = sq.push(rf3, df3) - await noCancel ff1 - await noCancel ff2 - await noCancel ff3 + check: + (await noCancel ff1).count == 32 + (await noCancel ff2).count == 0 + (await noCancel ff3).count == 0 await noCancel wait(verifier.verifier, 2.seconds) @@ -1439,13 +1514,14 @@ suite "SyncManager test suite": d11 = createChain(r11.data) d12 = createChain(r12.data) d13 = createChain(r13.data) - f11 = sq.push(r11, d11, Opt.none(seq[BlobSidecars])) - f12 = sq.push(r12, d12, Opt.none(seq[BlobSidecars])) - f13 = sq.push(r13, d13, Opt.none(seq[BlobSidecars])) + f11 = sq.push(r11, d11) + f12 = sq.push(r12, d12) + f13 = sq.push(r13, d13) - await noCancel f11 - await noCancel f12 - await noCancel f13 + check: + (await noCancel f11).count == 32 + (await noCancel f12).count == 0 + (await noCancel f13).count == 0 for i in 0 ..< 3: let @@ -1455,13 +1531,13 @@ suite "SyncManager test suite": de1 = default(seq[ref ForkedSignedBeaconBlock]) de2 = default(seq[ref ForkedSignedBeaconBlock]) de3 = default(seq[ref ForkedSignedBeaconBlock]) - fe1 = sq.push(re1, de1, Opt.none(seq[BlobSidecars])) - fe2 = sq.push(re2, de2, Opt.none(seq[BlobSidecars])) - fe3 = sq.push(re3, de3, Opt.none(seq[BlobSidecars])) + fe1 = sq.push(re1, de1) + fe2 = sq.push(re2, de2) + fe3 = sq.push(re3, de3) - await noCancel fe1 - await noCancel fe2 - await noCancel fe3 + discard await noCancel fe1 + discard await noCancel fe2 + discard await noCancel fe3 let r21 = sq.pop(Slot(159), peer1) @@ -1470,13 +1546,14 @@ suite "SyncManager test suite": d21 = createChain(r21.data) d22 = createChain(r22.data) d23 = createChain(r23.data) - f21 = sq.push(r21, d21, Opt.none(seq[BlobSidecars])) - f22 = sq.push(r22, d22, Opt.none(seq[BlobSidecars])) - f23 = sq.push(r23, d23, Opt.none(seq[BlobSidecars])) + f21 = sq.push(r21, d21) + f22 = sq.push(r22, d22) + f23 = sq.push(r23, d23) - await noCancel f21 - await noCancel f22 - await noCancel f23 + check: + (await noCancel f21).count == 0 + (await noCancel f22).count == -159 + (await noCancel f23).count == 0 for i in 0 ..< 2: let @@ -1486,13 +1563,14 @@ suite "SyncManager test suite": d31 = createChain(r31.data) d32 = createChain(r32.data) d33 = createChain(r33.data) - f31 = sq.push(r31, d31, Opt.none(seq[BlobSidecars])) - f32 = sq.push(r32, d32, Opt.none(seq[BlobSidecars])) - f33 = sq.push(r33, d33, Opt.none(seq[BlobSidecars])) + f31 = sq.push(r31, d31) + f32 = sq.push(r32, d32) + f33 = sq.push(r33, d33) - await noCancel f31 - await noCancel f32 - await noCancel f33 + check: + (await noCancel f31).count == 32 + (await noCancel f32).count == 0 + (await noCancel f33).count == 0 for i in 0 ..< 2: let @@ -1502,13 +1580,13 @@ suite "SyncManager test suite": de1 = default(seq[ref ForkedSignedBeaconBlock]) de2 = default(seq[ref ForkedSignedBeaconBlock]) de3 = default(seq[ref ForkedSignedBeaconBlock]) - fe1 = sq.push(re1, de1, Opt.none(seq[BlobSidecars])) - fe2 = sq.push(re2, de2, Opt.none(seq[BlobSidecars])) - fe3 = sq.push(re3, de3, Opt.none(seq[BlobSidecars])) + fe1 = sq.push(re1, de1) + fe2 = sq.push(re2, de2) + fe3 = sq.push(re3, de3) - await noCancel fe1 - await noCancel fe2 - await noCancel fe3 + discard await noCancel fe1 + discard await noCancel fe2 + discard await noCancel fe3 let r41 = sq.pop(Slot(159), peer1) @@ -1517,13 +1595,14 @@ suite "SyncManager test suite": d41 = createChain(r41.data) d42 = createChain(r42.data) d43 = createChain(r43.data) - f41 = sq.push(r41, d41, Opt.none(seq[BlobSidecars])) - f42 = sq.push(r42, d42, Opt.none(seq[BlobSidecars])) - f43 = sq.push(r43, d43, Opt.none(seq[BlobSidecars])) + f41 = sq.push(r41, d41) + f42 = sq.push(r42, d42) + f43 = sq.push(r43, d43) - await noCancel f41 - await noCancel f42 - await noCancel f43 + check: + (await noCancel f41).count == 0 + (await noCancel f42).count == -159 + (await noCancel f43).count == 0 for i in 0 ..< 3: let @@ -1533,13 +1612,14 @@ suite "SyncManager test suite": d51 = createChain(r51.data) d52 = createChain(r52.data) d53 = createChain(r53.data) - f51 = sq.push(r51, d51, Opt.none(seq[BlobSidecars])) - f52 = sq.push(r52, d52, Opt.none(seq[BlobSidecars])) - f53 = sq.push(r53, d53, Opt.none(seq[BlobSidecars])) + f51 = sq.push(r51, d51) + f52 = sq.push(r52, d52) + f53 = sq.push(r53, d53) - await noCancel f51 - await noCancel f52 - await noCancel f53 + check: + (await noCancel f51).count == 32 + (await noCancel f52).count == 0 + (await noCancel f53).count == 0 for i in 0 ..< 1: let @@ -1549,13 +1629,13 @@ suite "SyncManager test suite": de1 = default(seq[ref ForkedSignedBeaconBlock]) de2 = default(seq[ref ForkedSignedBeaconBlock]) de3 = default(seq[ref ForkedSignedBeaconBlock]) - fe1 = sq.push(re1, de1, Opt.none(seq[BlobSidecars])) - fe2 = sq.push(re2, de2, Opt.none(seq[BlobSidecars])) - fe3 = sq.push(re3, de3, Opt.none(seq[BlobSidecars])) + fe1 = sq.push(re1, de1) + fe2 = sq.push(re2, de2) + fe3 = sq.push(re3, de3) - await noCancel fe1 - await noCancel fe2 - await noCancel fe3 + discard await noCancel fe1 + discard await noCancel fe2 + discard await noCancel fe3 let r61 = sq.pop(Slot(159), peer1) @@ -1564,13 +1644,14 @@ suite "SyncManager test suite": d61 = createChain(r61.data) d62 = createChain(r62.data) d63 = createChain(r63.data) - f61 = sq.push(r61, d61, Opt.none(seq[BlobSidecars])) - f62 = sq.push(r62, d62, Opt.none(seq[BlobSidecars])) - f63 = sq.push(r63, d63, Opt.none(seq[BlobSidecars])) + f61 = sq.push(r61, d61) + f62 = sq.push(r62, d62) + f63 = sq.push(r63, d63) - await noCancel f61 - await noCancel f62 - await noCancel f63 + check: + (await noCancel f61).count == 0 + (await noCancel f62).count == -159 + (await noCancel f63).count == 0 for i in 0 ..< 5: let @@ -1580,13 +1661,14 @@ suite "SyncManager test suite": d71 = createChain(r71.data) d72 = createChain(r72.data) d73 = createChain(r73.data) - f71 = sq.push(r71, d71, Opt.none(seq[BlobSidecars])) - f72 = sq.push(r72, d72, Opt.none(seq[BlobSidecars])) - f73 = sq.push(r73, d73, Opt.none(seq[BlobSidecars])) + f71 = sq.push(r71, d71) + f72 = sq.push(r72, d72) + f73 = sq.push(r73, d73) - await noCancel f71 - await noCancel f72 - await noCancel f73 + check: + (await noCancel f71).count == 32 + (await noCancel f72).count == 0 + (await noCancel f73).count == 0 await noCancel wait(verifier.verifier, 2.seconds) @@ -1693,130 +1775,110 @@ suite "SyncManager test suite": r1 = SyncRequest[SomeTPeer](data: SyncRange.init(Slot(11), 1'u64)) r2 = SyncRequest[SomeTPeer](data: SyncRange.init(Slot(11), 2'u64)) r3 = SyncRequest[SomeTPeer](data: SyncRange.init(Slot(11), 3'u64)) + r4 = SyncRequest[SomeTPeer](data: SyncRange.init(Slot(11), 4'u64)) check: - checkResponse(r1, [Slot(11)]).isOk() == true - checkResponse(r1, @[]).isOk() == true - checkResponse(r1, @[Slot(11), Slot(11)]).isOk() == false - checkResponse(r1, [Slot(10)]).isOk() == false - checkResponse(r1, [Slot(12)]).isOk() == false - - checkResponse(r2, [Slot(11)]).isOk() == true - checkResponse(r2, [Slot(12)]).isOk() == true - checkResponse(r2, @[]).isOk() == true - checkResponse(r2, [Slot(11), Slot(12)]).isOk() == true - checkResponse(r2, [Slot(12)]).isOk() == true - checkResponse(r2, [Slot(11), Slot(12), Slot(13)]).isOk() == false - checkResponse(r2, [Slot(10), Slot(11)]).isOk() == false - checkResponse(r2, [Slot(10)]).isOk() == false - checkResponse(r2, [Slot(12), Slot(11)]).isOk() == false - checkResponse(r2, [Slot(12), Slot(13)]).isOk() == false - checkResponse(r2, [Slot(13)]).isOk() == false - - checkResponse(r2, [Slot(11), Slot(11)]).isOk() == false - checkResponse(r2, [Slot(12), Slot(12)]).isOk() == false - - checkResponse(r3, @[Slot(11)]).isOk() == true - checkResponse(r3, @[Slot(12)]).isOk() == true - checkResponse(r3, @[Slot(13)]).isOk() == true - checkResponse(r3, @[Slot(11), Slot(12)]).isOk() == true - checkResponse(r3, @[Slot(11), Slot(13)]).isOk() == true - checkResponse(r3, @[Slot(12), Slot(13)]).isOk() == true - checkResponse(r3, @[Slot(11), Slot(13), Slot(12)]).isOk() == false - checkResponse(r3, @[Slot(12), Slot(13), Slot(11)]).isOk() == false - checkResponse(r3, @[Slot(13), Slot(12), Slot(11)]).isOk() == false - checkResponse(r3, @[Slot(13), Slot(11)]).isOk() == false - checkResponse(r3, @[Slot(13), Slot(12)]).isOk() == false - checkResponse(r3, @[Slot(12), Slot(11)]).isOk() == false - - checkResponse(r3, @[Slot(11), Slot(11), Slot(11)]).isOk() == false - checkResponse(r3, @[Slot(11), Slot(12), Slot(12)]).isOk() == false - checkResponse(r3, @[Slot(11), Slot(13), Slot(13)]).isOk() == false - checkResponse(r3, @[Slot(12), Slot(13), Slot(13)]).isOk() == false - checkResponse(r3, @[Slot(12), Slot(12), Slot(12)]).isOk() == false - checkResponse(r3, @[Slot(13), Slot(13), Slot(13)]).isOk() == false - checkResponse(r3, @[Slot(11), Slot(11)]).isOk() == false - checkResponse(r3, @[Slot(12), Slot(12)]).isOk() == false - checkResponse(r3, @[Slot(13), Slot(13)]).isOk() == false - - test "[SyncQueue] checkBlobsResponse() test": - const maxBlobsPerBlockElectra = 9 - - proc checkBlobsResponse[T]( - req: SyncRequest[T], - data: openArray[Slot]): Result[void, cstring] = - checkBlobsResponse(req, data, maxBlobsPerBlockElectra) + checkResponse(r1.data, + createChain([Slot(11)])).isOk() == true + checkResponse(r1.data, + createChain(@[])).isOk() == true + checkResponse(r1.data, + createChain(@[Slot(11), Slot(11)])).isOk() == false + checkResponse(r1.data, + createChain([Slot(10)])).isOk() == false + checkResponse(r1.data, + createChain([Slot(12)])).isOk() == false + + checkResponse(r2.data, + createChain([Slot(11)])).isOk() == true + checkResponse(r2.data, + createChain([Slot(12)])).isOk() == true + checkResponse(r2.data, + createChain(@[])).isOk() == true + checkResponse(r2.data, + createChain([Slot(11), Slot(12)])).isOk() == true + checkResponse(r2.data, + createChain([Slot(12)])).isOk() == true + checkResponse(r2.data, + createChain([Slot(11), Slot(12), Slot(13)])).isOk() == false + checkResponse(r2.data, + createChain([Slot(10), Slot(11)])).isOk() == false + checkResponse(r2.data, + createChain([Slot(10)])).isOk() == false + checkResponse(r2.data, + createChain([Slot(12), Slot(11)])).isOk() == false + checkResponse(r2.data, + createChain([Slot(12), Slot(13)])).isOk() == false + checkResponse(r2.data, + createChain([Slot(13)])).isOk() == false + + checkResponse(r2.data, + createChain([Slot(11), Slot(11)])).isOk() == false + checkResponse(r2.data, + createChain([Slot(12), Slot(12)])).isOk() == false + + checkResponse(r3.data, + createChain(@[Slot(11)])).isOk() == true + checkResponse(r3.data, + createChain(@[Slot(12)])).isOk() == true + checkResponse(r3.data, + createChain(@[Slot(13)])).isOk() == true + checkResponse(r3.data, + createChain(@[Slot(11), Slot(12)])).isOk() == true + checkResponse(r3.data, + createChain(@[Slot(11), Slot(13)])).isOk() == true + checkResponse(r3.data, + createChain(@[Slot(12), Slot(13)])).isOk() == true + checkResponse(r3.data, + createChain(@[Slot(11), Slot(13), Slot(12)])).isOk() == false + checkResponse(r3.data, + createChain(@[Slot(12), Slot(13), Slot(11)])).isOk() == false + checkResponse(r3.data, + createChain(@[Slot(13), Slot(12), Slot(11)])).isOk() == false + checkResponse(r3.data, + createChain(@[Slot(13), Slot(11)])).isOk() == false + checkResponse(r3.data, + createChain(@[Slot(13), Slot(12)])).isOk() == false + checkResponse(r3.data, + createChain(@[Slot(12), Slot(11)])).isOk() == false + + checkResponse(r3.data, + createChain(@[Slot(11), Slot(11), Slot(11)])).isOk() == false + checkResponse(r3.data, + createChain(@[Slot(11), Slot(12), Slot(12)])).isOk() == false + checkResponse(r3.data, + createChain(@[Slot(11), Slot(13), Slot(13)])).isOk() == false + checkResponse(r3.data, + createChain(@[Slot(12), Slot(13), Slot(13)])).isOk() == false + checkResponse(r3.data, + createChain(@[Slot(12), Slot(12), Slot(12)])).isOk() == false + checkResponse(r3.data, + createChain(@[Slot(13), Slot(13), Slot(13)])).isOk() == false + checkResponse(r3.data, + createChain(@[Slot(11), Slot(11)])).isOk() == false + checkResponse(r3.data, + createChain(@[Slot(12), Slot(12)])).isOk() == false + checkResponse(r3.data, + createChain(@[Slot(13), Slot(13)])).isOk() == false - let - r1 = SyncRequest[SomeTPeer](data: SyncRange.init(Slot(11), 1'u64)) - r2 = SyncRequest[SomeTPeer](data: SyncRange.init(Slot(11), 2'u64)) - r3 = SyncRequest[SomeTPeer](data: SyncRange.init(Slot(11), 3'u64)) - - d1 = Slot(11).repeat(maxBlobsPerBlockElectra) - d2 = Slot(12).repeat(maxBlobsPerBlockElectra) - d3 = Slot(13).repeat(maxBlobsPerBlockElectra) + var + chain1 = createChain(@[Slot(11), Slot(12), Slot(13), Slot(14)]) + chain2 = createChain(@[Slot(11), Slot(12), Slot(13), Slot(14)]) + chain3 = createChain(@[Slot(11), Slot(12), Slot(13), Slot(14)]) + chain4 = createChain(@[Slot(11), Slot(12), Slot(13), Slot(14)]) + + withBlck(chain2[1][]): + forkyBlck.message.parent_root = Eth2Digest() + withBlck(chain3[2][]): + forkyBlck.message.parent_root = Eth2Digest() + withBlck(chain4[3][]): + forkyBlck.message.parent_root = Eth2Digest() check: - checkBlobsResponse(r1, [Slot(11)]).isOk() == true - checkBlobsResponse(r1, @[]).isOk() == true - checkBlobsResponse(r1, [Slot(11), Slot(11)]).isOk() == true - checkBlobsResponse(r1, [Slot(11), Slot(11), Slot(11)]).isOk() == true - checkBlobsResponse(r1, d1).isOk() == true - checkBlobsResponse(r1, d1 & @[Slot(11)]).isOk() == false - checkBlobsResponse(r1, [Slot(10)]).isOk() == false - checkBlobsResponse(r1, [Slot(12)]).isOk() == false - - checkBlobsResponse(r2, [Slot(11)]).isOk() == true - checkBlobsResponse(r2, [Slot(12)]).isOk() == true - checkBlobsResponse(r2, @[]).isOk() == true - checkBlobsResponse(r2, [Slot(11), Slot(12)]).isOk() == true - checkBlobsResponse(r2, [Slot(11), Slot(11)]).isOk() == true - checkBlobsResponse(r2, [Slot(12), Slot(12)]).isOk() == true - checkBlobsResponse(r2, d1).isOk() == true - checkBlobsResponse(r2, d2).isOk() == true - checkBlobsResponse(r2, d1 & d2).isOk() == true - checkBlobsResponse(r2, [Slot(11), Slot(12), Slot(11)]).isOk() == false - checkBlobsResponse(r2, [Slot(12), Slot(11)]).isOk() == false - checkBlobsResponse(r2, d1 & @[Slot(11)]).isOk() == false - checkBlobsResponse(r2, d2 & @[Slot(12)]).isOk() == false - checkBlobsResponse(r2, @[Slot(11)] & d2 & @[Slot(12)]).isOk() == false - checkBlobsResponse(r2, d1 & d2 & @[Slot(12)]).isOk() == false - checkBlobsResponse(r2, d2 & d1).isOk() == false - - checkBlobsResponse(r3, [Slot(11)]).isOk() == true - checkBlobsResponse(r3, [Slot(12)]).isOk() == true - checkBlobsResponse(r3, [Slot(13)]).isOk() == true - checkBlobsResponse(r3, @[]).isOk() == true - checkBlobsResponse(r3, [Slot(11), Slot(12)]).isOk() == true - checkBlobsResponse(r3, [Slot(11), Slot(11)]).isOk() == true - checkBlobsResponse(r3, [Slot(12), Slot(12)]).isOk() == true - checkBlobsResponse(r3, [Slot(11), Slot(13)]).isOk() == true - checkBlobsResponse(r3, [Slot(12), Slot(13)]).isOk() == true - checkBlobsResponse(r3, [Slot(13), Slot(13)]).isOk() == true - checkBlobsResponse(r3, d1).isOk() == true - checkBlobsResponse(r3, d2).isOk() == true - checkBlobsResponse(r3, d3).isOk() == true - checkBlobsResponse(r3, d1 & d2).isOk() == true - checkBlobsResponse(r3, d1 & d3).isOk() == true - checkBlobsResponse(r3, d2 & d3).isOk() == true - checkBlobsResponse(r3, [Slot(11), Slot(12), Slot(11)]).isOk() == false - checkBlobsResponse(r3, [Slot(11), Slot(13), Slot(12)]).isOk() == false - checkBlobsResponse(r3, [Slot(12), Slot(13), Slot(11)]).isOk() == false - checkBlobsResponse(r3, [Slot(12), Slot(11)]).isOk() == false - checkBlobsResponse(r3, [Slot(13), Slot(12)]).isOk() == false - checkBlobsResponse(r3, [Slot(13), Slot(11)]).isOk() == false - checkBlobsResponse(r3, d1 & @[Slot(11)]).isOk() == false - checkBlobsResponse(r3, d2 & @[Slot(12)]).isOk() == false - checkBlobsResponse(r3, d3 & @[Slot(13)]).isOk() == false - checkBlobsResponse(r3, @[Slot(11)] & d2 & @[Slot(12)]).isOk() == false - checkBlobsResponse(r3, @[Slot(12)] & d3 & @[Slot(13)]).isOk() == false - checkBlobsResponse(r3, @[Slot(11)] & d3 & @[Slot(13)]).isOk() == false - checkBlobsResponse(r2, d1 & d2 & @[Slot(12)]).isOk() == false - checkBlobsResponse(r2, d1 & d3 & @[Slot(13)]).isOk() == false - checkBlobsResponse(r2, d2 & d3 & @[Slot(13)]).isOk() == false - checkBlobsResponse(r2, d2 & d1).isOk() == false - checkBlobsResponse(r2, d3 & d2).isOk() == false - checkBlobsResponse(r2, d3 & d1).isOk() == false + checkResponse(r4.data, chain1).isOk() == true + checkResponse(r4.data, chain2).isOk() == false + checkResponse(r4.data, chain3).isOk() == false + checkResponse(r4.data, chain4).isOk() == false test "[SyncManager] groupBlobs() test": var