Skip to content

Commit ab0dda7

Browse files
committed
Only mark all mon updates complete if there are no blocked updates
In `handle_new_monitor_update!`, we correctly check that the channel doesn't have any blocked monitor updates pending before calling `handle_monitor_update_completion!` (which calls `Channel::monitor_updating_restored`, which in turn assumes that all generated `ChannelMonitorUpdate`s, including blocked ones, have completed). We, however, did not do the same check at several other places where we called `handle_monitor_update_completion!`. Specifically, after a monitor update completes during reload (processed via a `BackgroundEvent` or when monitor update completes async, we didn't check if there were any blocked monitor updates before completing). Here we add the missing check, as well as an assertion in `Channel::monitor_updating_restored`.
1 parent 257ebad commit ab0dda7

File tree

4 files changed

+78
-17
lines changed

4 files changed

+78
-17
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

Lines changed: 56 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -3410,20 +3410,22 @@ fn test_inbound_reload_without_init_mon() {
34103410
do_test_inbound_reload_without_init_mon(false, false);
34113411
}
34123412

3413-
#[test]
3414-
fn test_blocked_chan_preimage_release() {
3413+
fn do_test_blocked_chan_preimage_release(completion_mode: u8) {
34153414
// Test that even if a channel's `ChannelMonitorUpdate` flow is blocked waiting on an event to
34163415
// be handled HTLC preimage `ChannelMonitorUpdate`s will still go out.
34173416
let chanmon_cfgs = create_chanmon_cfgs(3);
34183417
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3418+
let persister;
3419+
let new_chain_mon;
34193420
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3421+
let nodes_1_reload;
34203422
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
34213423

34223424
let node_a_id = nodes[0].node.get_our_node_id();
34233425
let node_b_id = nodes[1].node.get_our_node_id();
34243426
let node_c_id = nodes[2].node.get_our_node_id();
34253427

3426-
create_announced_chan_between_nodes(&nodes, 0, 1);
3428+
let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2;
34273429
let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
34283430

34293431
send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5_000_000);
@@ -3456,29 +3458,63 @@ fn test_blocked_chan_preimage_release() {
34563458
expect_payment_claimed!(nodes[0], payment_hash_2, 1_000_000);
34573459

34583460
let as_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[0], node_b_id);
3461+
if completion_mode != 0 {
3462+
// We use to incorrectly handle monitor update completion in cases where we completed a
3463+
// monitor update async or after reload. We test both based on the `completion_mode`.
3464+
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3465+
}
34593466
nodes[1]
34603467
.node
34613468
.handle_update_fulfill_htlc(node_a_id, &as_htlc_fulfill_updates.update_fulfill_htlcs[0]);
34623469
check_added_monitors(&nodes[1], 1); // We generate only a preimage monitor update
34633470
assert!(get_monitor!(nodes[1], chan_id_2).get_stored_preimages().contains_key(&payment_hash_2));
34643471
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3472+
if completion_mode == 1 {
3473+
let node_ser = nodes[1].node.encode();
3474+
let chan_mon_0 = get_monitor!(nodes[1], chan_id_1).encode();
3475+
let chan_mon_1 = get_monitor!(nodes[1], chan_id_2).encode();
3476+
3477+
let mons = &[&chan_mon_0[..], &chan_mon_1[..]];
3478+
reload_node!(nodes[1], &node_ser, mons, persister, new_chain_mon, nodes_1_reload);
3479+
3480+
nodes[0].node.peer_disconnected(node_b_id);
3481+
nodes[2].node.peer_disconnected(node_b_id);
3482+
3483+
let mut a_b_reconnect = ReconnectArgs::new(&nodes[0], &nodes[1]);
3484+
a_b_reconnect.pending_htlc_claims.1 = 1;
3485+
// Note that we will expect no final RAA monitor update in
3486+
// `commitment_signed_dance_through_cp_raa` during the reconnect, matching the below case.
3487+
reconnect_nodes(a_b_reconnect);
3488+
reconnect_nodes(ReconnectArgs::new(&nodes[2], &nodes[1]));
3489+
} else if completion_mode == 2 {
3490+
let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_id_2);
3491+
nodes[1]
3492+
.chain_monitor
3493+
.chain_monitor
3494+
.channel_monitor_updated(chan_id_2, latest_update)
3495+
.unwrap();
3496+
}
34653497

34663498
// Finish the CS dance between nodes[0] and nodes[1]. Note that until the event handling, the
34673499
// update_fulfill_htlc + CS is held, even though the preimage is already on disk for the
34683500
// channel.
3469-
nodes[1]
3470-
.node
3471-
.handle_commitment_signed_batch_test(node_a_id, &as_htlc_fulfill_updates.commitment_signed);
3472-
check_added_monitors(&nodes[1], 1);
3473-
let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false);
3474-
assert!(a.is_none());
3501+
// Note that in completion_mode 1 we completed the CS dance in `reconnect_nodes` above.
3502+
if completion_mode != 1 {
3503+
nodes[1].node.handle_commitment_signed_batch_test(
3504+
node_a_id,
3505+
&as_htlc_fulfill_updates.commitment_signed,
3506+
);
3507+
check_added_monitors(&nodes[1], 1);
3508+
let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false);
3509+
assert!(a.is_none());
34753510

3476-
nodes[1].node.handle_revoke_and_ack(node_a_id, &raa);
3477-
check_added_monitors(&nodes[1], 0);
3478-
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3511+
nodes[1].node.handle_revoke_and_ack(node_a_id, &raa);
3512+
check_added_monitors(&nodes[1], 0);
3513+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3514+
}
34793515

34803516
let events = nodes[1].node.get_and_clear_pending_events();
3481-
assert_eq!(events.len(), 3);
3517+
assert_eq!(events.len(), 3, "{events:?}");
34823518
if let Event::PaymentSent { .. } = events[0] {
34833519
} else {
34843520
panic!();
@@ -3508,6 +3544,13 @@ fn test_blocked_chan_preimage_release() {
35083544
expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true);
35093545
}
35103546

3547+
#[test]
3548+
fn test_blocked_chan_preimage_release() {
3549+
do_test_blocked_chan_preimage_release(0);
3550+
do_test_blocked_chan_preimage_release(1);
3551+
do_test_blocked_chan_preimage_release(2);
3552+
}
3553+
35113554
fn do_test_inverted_mon_completion_order(
35123555
with_latest_manager: bool, complete_bc_commitment_dance: bool,
35133556
) {

lightning/src/ln/channel.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7925,6 +7925,7 @@ where
79257925
{
79267926
assert!(self.context.channel_state.is_monitor_update_in_progress());
79277927
self.context.channel_state.clear_monitor_update_in_progress();
7928+
assert_eq!(self.blocked_monitor_updates_pending(), 0);
79287929

79297930
// If we're past (or at) the AwaitingChannelReady stage on an outbound (or V2-established) channel,
79307931
// try to (re-)broadcast the funding transaction as we may have declined to broadcast it when we

lightning/src/ln/channelmanager.rs

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6934,7 +6934,9 @@ where
69346934
.get_mut(&channel_id)
69356935
.and_then(Channel::as_funded_mut)
69366936
{
6937-
handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
6937+
if chan.blocked_monitor_updates_pending() == 0 {
6938+
handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
6939+
}
69386940
} else {
69396941
let update_actions = peer_state.monitor_update_blocked_actions
69406942
.remove(&channel_id).unwrap_or(Vec::new());
@@ -8227,7 +8229,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
82278229
{
82288230
if chan.is_awaiting_monitor_update() {
82298231
log_trace!(logger, "Channel is open and awaiting update, resuming it");
8230-
handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
8232+
if chan.blocked_monitor_updates_pending() == 0 {
8233+
handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
8234+
}
82318235
} else {
82328236
log_trace!(logger, "Channel is open but not awaiting update");
82338237
}

lightning/src/ln/quiescence_tests.rs

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -260,9 +260,22 @@ fn test_quiescence_waits_for_async_signer_and_monitor_update() {
260260
let chain_monitor = &nodes[0].chain_monitor.chain_monitor;
261261
// One for the latest commitment transaction update from the last `revoke_and_ack`
262262
chain_monitor.channel_monitor_updated(chan_id, latest_update).unwrap();
263-
expect_payment_sent(&nodes[0], preimage, None, true, true);
263+
expect_payment_sent(&nodes[0], preimage, None, false, true);
264+
265+
let chain_monitor = &nodes[0].chain_monitor;
266+
let (_, new_latest_update) =
267+
chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone();
268+
assert_eq!(new_latest_update, latest_update + 1);
269+
let chain_monitor = &nodes[0].chain_monitor.chain_monitor;
264270
// One for the commitment secret update from the last `revoke_and_ack`
265-
chain_monitor.channel_monitor_updated(chan_id, latest_update + 1).unwrap();
271+
chain_monitor.channel_monitor_updated(chan_id, new_latest_update).unwrap();
272+
// Once that update completes, we'll get the `PaymentPathSuccessful` event
273+
let events = nodes[0].node.get_and_clear_pending_events();
274+
assert_eq!(events.len(), 1);
275+
if let Event::PaymentPathSuccessful { .. } = &events[0] {
276+
} else {
277+
panic!("{events:?}");
278+
}
266279
}
267280

268281
// With the updates completed, we can now become quiescent.

0 commit comments

Comments
 (0)