@@ -12,6 +12,7 @@ import std/sets
12
12
import std/ options
13
13
import std/ algorithm
14
14
import std/ sugar
15
+ import std/ random
15
16
16
17
import pkg/ chronos
17
18
import pkg/ libp2p/ [cid, switch, multihash, multicodec]
@@ -199,7 +200,6 @@ proc refreshBlockKnowledge(self: BlockExcEngine) {.async: (raises: [CancelledErr
199
200
200
201
# In dynamic swarms, staleness will dominate latency.
201
202
if peer.lastRefresh < self.pendingBlocks.lastInclusion or peer.isKnowledgeStale:
202
- trace " Refreshing block knowledge for peer" , peer = peer.id
203
203
peer.refreshRequested ()
204
204
# TODO : optimize this by keeping track of what was sent and sending deltas.
205
205
# This should allow us to run much more frequent refreshes, and be way more
@@ -269,8 +269,9 @@ proc downloadInternal(
269
269
270
270
# We now wait for a bit and then retry. If the handle gets completed in the
271
271
# meantime (cause the presence handler might have requested the block and
272
- # received it in the meantime), we are done.
273
- await handle or sleepAsync (self.pendingBlocks.retryInterval)
272
+ # received it in the meantime), we are done. Retry delays are randomized
273
+ # so we don't get all block loops spinning at the same time.
274
+ await handle or sleepAsync (secs (rand (self.pendingBlocks.retryInterval.secs)))
274
275
if handle.finished:
275
276
break
276
277
# If we still don't have the block, we'll go for another cycle.
@@ -484,6 +485,9 @@ proc cancelBlocks(
484
485
# If so, schedules a cancellation.
485
486
scheduledCancellations[peerCtx.id] = intersection
486
487
488
+ if scheduledCancellations.len == 0 :
489
+ return
490
+
487
491
let (succeededFuts, failedFuts) = await allFinishedFailed [PeerId ](
488
492
toSeq (scheduledCancellations.pairs).map (dispatchCancellations)
489
493
)
0 commit comments