Skip to content

Commit 4dec660

Browse files
committed
Block the mon update removing a preimage until upstream mon writes
When we forward a payment and receive an `update_fulfill_htlc` message from the downstream channel, we immediately claim the HTLC on the upstream channel, before even doing a `commitment_signed` dance on the downstream channel. This implies that our `ChannelMonitorUpdate`s "go out" in the right order - first we ensure we'll get our money by writing the preimage down, then we write the update that resolves giving money on the downstream node. This is safe as long as `ChannelMonitorUpdate`s complete in the order in which they are generated, but of course looking forward we want to support asynchronous updates, which may complete in any order. Thus, here, we enforce the correct ordering by blocking the downstream `ChannelMonitorUpdate` until the upstream one completes. Like the `PaymentSent` event handling we do so only for the `revoke_and_ack` `ChannelMonitorUpdate`, ensuring the preimage-containing upstream update has a full RTT to complete before we actually manage to slow anything down.
1 parent 87ec087 commit 4dec660

File tree

3 files changed

+196
-37
lines changed

3 files changed

+196
-37
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

Lines changed: 133 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3021,18 +3021,27 @@ fn test_blocked_chan_preimage_release() {
30213021
check_added_monitors(&nodes[1], 1); // We generate only a preimage monitor update
30223022
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
30233023

3024-
// Finish the CS dance between nodes[0] and nodes[1].
3025-
commitment_signed_dance!(nodes[1], nodes[0], as_htlc_fulfill_updates.commitment_signed, false);
3024+
// Finish the CS dance between nodes[0] and nodes[1]. Note that until the final RAA CS is held
3025+
// until the full set of `ChannelMonitorUpdate`s on the nodes[1] <-> nodes[2] channel are
3026+
// complete, while the preimage that we care about ensuring is on disk did make it there above,
3027+
// the holding logic doesn't care about the type of update, it just cares that there is one.
3028+
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.commitment_signed);
3029+
check_added_monitors(&nodes[1], 1);
3030+
let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false);
3031+
assert!(a.is_none());
3032+
3033+
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
30263034
check_added_monitors(&nodes[1], 0);
3035+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
30273036

30283037
let events = nodes[1].node.get_and_clear_pending_events();
30293038
assert_eq!(events.len(), 3);
30303039
if let Event::PaymentSent { .. } = events[0] {} else { panic!(); }
30313040
if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
30323041
if let Event::PaymentForwarded { .. } = events[1] {} else { panic!(); }
30333042

3034-
// The event processing should release the last RAA update.
3035-
check_added_monitors(&nodes[1], 1);
3043+
// The event processing should release the last RAA updates on both channels.
3044+
check_added_monitors(&nodes[1], 2);
30363045

30373046
// When we fetch the next update the message getter will generate the next update for nodes[2],
30383047
// generating a further monitor update.
@@ -3043,3 +3052,123 @@ fn test_blocked_chan_preimage_release() {
30433052
commitment_signed_dance!(nodes[2], nodes[1], bs_htlc_fulfill_updates.commitment_signed, false);
30443053
expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true);
30453054
}
3055+
3056+
fn do_test_inverted_mon_completion_order(complete_bc_commitment_dance: bool) {
3057+
// When we forward a payment and receive an `update_fulfill_htlc` message from the downstream
3058+
// channel, we immediately claim the HTLC on the upstream channel, before even doing a
3059+
// `commitment_signed` dance on the downstream channel. This implies that our
3060+
// `ChannelMonitorUpdate`s "go out" in the right order - first we ensure we'll get our money,
3061+
// then we write the update that resolves giving money on the downstream node. This is safe as
3062+
// long as `ChannelMonitorUpdate`s complete in the order in which they are generated, but of
3063+
// course this may not be the case. For asynchronous update writes, we have to ensure monitor
3064+
// updates can block each other, preventing the inversion all together.
3065+
let chanmon_cfgs = create_chanmon_cfgs(3);
3066+
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3067+
3068+
let persister;
3069+
let new_chain_monitor;
3070+
let nodes_1_deserialized;
3071+
3072+
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3073+
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3074+
3075+
let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
3076+
let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2;
3077+
3078+
// Route a payment from A, through B, to C, then claim it on C. Once we pass B the
3079+
// `update_fulfill_htlc` we have a monitor update for both of B's channels. We complete the one
3080+
// on the B<->C channel but leave the A<->B monitor update pending, then reload B.
3081+
let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
3082+
3083+
let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
3084+
3085+
nodes[2].node.claim_funds(payment_preimage);
3086+
check_added_monitors(&nodes[2], 1);
3087+
expect_payment_claimed!(nodes[2], payment_hash, 100_000);
3088+
3089+
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3090+
let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
3091+
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
3092+
3093+
// B generates a new monitor update for the A <-> B channel, but doesn't send the new messages
3094+
// for it since the monitor update is marked in-progress.
3095+
check_added_monitors(&nodes[1], 1);
3096+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3097+
3098+
// Now step the Commitment Signed Dance between B and C forward a bit (or fully), ensuring we
3099+
// won't get the preimage when the nodes reconnect, at which point we have to ensure we get it
3100+
// from the ChannelMonitor.
3101+
nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed);
3102+
check_added_monitors(&nodes[1], 1);
3103+
if complete_bc_commitment_dance {
3104+
let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
3105+
nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
3106+
check_added_monitors(&nodes[2], 1);
3107+
nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
3108+
check_added_monitors(&nodes[2], 1);
3109+
let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
3110+
3111+
// At this point node B still hasn't persisted the `ChannelMonitorUpdate` with the
3112+
// preimage in the A <-> B channel, which will prevent it from persisting the
3113+
// `ChannelMonitorUpdate` here to avoid "losing" the preimage.
3114+
nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &cs_raa);
3115+
check_added_monitors(&nodes[1], 0);
3116+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3117+
}
3118+
3119+
// Now reload node B
3120+
let manager_b = nodes[1].node.encode();
3121+
3122+
let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
3123+
reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
3124+
3125+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3126+
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3127+
3128+
// If we used the latest ChannelManager to reload from, we should have both channels still
3129+
// live. The B <-> C channel's final RAA ChannelMonitorUpdate must still be blocked as
3130+
// before - the ChannelMonitorUpdate for the A <-> B channel hasn't completed.
3131+
// When we call `timer_tick_occurred` we will get that monitor update back, which we'll
3132+
// complete after reconnecting to our peers.
3133+
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3134+
nodes[1].node.timer_tick_occurred();
3135+
check_added_monitors(&nodes[1], 1);
3136+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3137+
3138+
// Now reconnect B to both A and C. If the B <-> C commitment signed dance wasn't run to
3139+
// the end go ahead and do that, though the -2 in `reconnect_nodes` indicates that we
3140+
// expect to *not* receive the final RAA ChannelMonitorUpdate.
3141+
if complete_bc_commitment_dance {
3142+
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3143+
} else {
3144+
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, -2), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
3145+
}
3146+
3147+
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3148+
3149+
// (Finally) complete the A <-> B ChannelMonitorUpdate, ensuring the preimage is durably on
3150+
// disk in the proper ChannelMonitor, unblocking the B <-> C ChannelMonitor updating
3151+
// process.
3152+
let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
3153+
nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
3154+
3155+
// When we fetch B's HTLC update messages here (now that the ChannelMonitorUpdate has
3156+
// completed), it will also release the final RAA ChannelMonitorUpdate on the B <-> C
3157+
// channel.
3158+
let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
3159+
check_added_monitors(&nodes[1], 1);
3160+
3161+
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
3162+
do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, false, false);
3163+
3164+
expect_payment_forwarded!(nodes[1], &nodes[0], &nodes[2], Some(1_000), false, false);
3165+
3166+
// Finally, check that the payment was, ultimately, seen as sent by node A.
3167+
expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
3168+
}
3169+
3170+
#[test]
3171+
fn test_inverted_mon_completion_order() {
3172+
do_test_inverted_mon_completion_order(true);
3173+
do_test_inverted_mon_completion_order(false);
3174+
}

lightning/src/ln/channelmanager.rs

Lines changed: 35 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -4406,9 +4406,12 @@ where
44064406
self.pending_outbound_payments.finalize_claims(sources, &self.pending_events);
44074407
}
44084408

4409-
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_outpoint: OutPoint) {
4409+
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_counterparty_node_id: Option<PublicKey>, next_channel_outpoint: OutPoint) {
44104410
match source {
44114411
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
4412+
if let Some(pubkey) = next_channel_counterparty_node_id {
4413+
debug_assert_eq!(pubkey, path[0].pubkey);
4414+
}
44124415
let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
44134416
channel_funding_outpoint: next_channel_outpoint,
44144417
counterparty_node_id: path[0].pubkey,
@@ -4435,7 +4438,17 @@ where
44354438
next_channel_id: Some(next_channel_outpoint.to_channel_id()),
44364439
outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
44374440
},
4438-
downstream_counterparty_and_funding_outpoint: None,
4441+
downstream_counterparty_and_funding_outpoint:
4442+
if let Some(node_id) = next_channel_counterparty_node_id {
4443+
Some((node_id, next_channel_outpoint, completed_blocker))
4444+
} else {
4445+
// We can only get `None` here if we are processing a
4446+
// `ChannelMonitor`-originated event, in which case we
4447+
// don't care about ensuring we wake the downstream
4448+
// channel's monitor updating - the channel is already
4449+
// closed.
4450+
None
4451+
},
44394452
})
44404453
} else { None }
44414454
});
@@ -5148,13 +5161,27 @@ where
51485161
match peer_state.channel_by_id.entry(msg.channel_id) {
51495162
hash_map::Entry::Occupied(mut chan) => {
51505163
let res = try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan);
5164+
if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
5165+
peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id)
5166+
.or_insert_with(Vec::new)
5167+
.push(RAAMonitorUpdateBlockingAction::from_prev_hop_data(&prev_hop));
5168+
}
5169+
// Note that we do not need to push an `actions_blocking_raa_monitor_updates`
5170+
// entry here, even though we *do* need to block the next RAA coming in from
5171+
// generating a monitor update which we let fly. We do this instead in the
5172+
// `claim_funds_internal` by attaching a `ReleaseRAAChannelMonitorUpdate`
5173+
// action to the event generated when we "claim" the sent payment. This is
5174+
// guaranteed to all complete before we process the RAA even though there is no
5175+
// lock held through that point as we aren't allowed to see another P2P message
5176+
// from the counterparty until we return, but `claim_funds_internal` runs
5177+
// first.
51515178
funding_txo = chan.get().get_funding_txo().expect("We won't accept a fulfill until funded");
51525179
res
51535180
},
51545181
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
51555182
}
51565183
};
5157-
self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, funding_txo);
5184+
self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, Some(*counterparty_node_id), funding_txo);
51585185
Ok(())
51595186
}
51605187

@@ -5361,12 +5388,10 @@ where
53615388
match peer_state.channel_by_id.entry(msg.channel_id) {
53625389
hash_map::Entry::Occupied(mut chan) => {
53635390
let funding_txo = chan.get().get_funding_txo();
5364-
let mon_update_blocked = self.pending_events.lock().unwrap().iter().any(|(_, action)| {
5365-
action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
5366-
channel_funding_outpoint: funding_txo.expect("We won't accept an RAA until funded"),
5367-
counterparty_node_id: *counterparty_node_id,
5368-
})
5369-
});
5391+
let mon_update_blocked = self.raa_monitor_updates_held(
5392+
&peer_state.actions_blocking_raa_monitor_updates,
5393+
chan.get().get_funding_txo().expect("We won't accept an RAA until funded"),
5394+
*counterparty_node_id);
53705395
let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self,
53715396
chan.get_mut().revoke_and_ack(&msg, &self.logger, mon_update_blocked), chan);
53725397
let res = if let Some(monitor_update) = monitor_update_opt {
@@ -5547,7 +5572,7 @@ where
55475572
MonitorEvent::HTLCEvent(htlc_update) => {
55485573
if let Some(preimage) = htlc_update.payment_preimage {
55495574
log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
5550-
self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint);
5575+
self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, counterparty_node_id, funding_outpoint);
55515576
} else {
55525577
log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
55535578
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };

lightning/src/ln/functional_test_utils.rs

Lines changed: 28 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,20 @@ pub fn mine_transactions<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, txn: &[&Tra
7474
let height = node.best_block_info().1 + 1;
7575
confirm_transactions_at(node, txn, height);
7676
}
77+
/// Mine a single block containing the given transaction without extra checks which may impact
78+
/// ChannelManager state.
79+
pub fn mine_transaction_without_checks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction) {
80+
let height = node.best_block_info().1 + 1;
81+
let mut block = Block {
82+
header: BlockHeader { version: 0x20000000, prev_blockhash: node.best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: height, bits: 42, nonce: 42 },
83+
txdata: Vec::new(),
84+
};
85+
for _ in 0..*node.network_chan_count.borrow() { // Make sure we don't end up with channels at the same short id by offsetting by chan_count
86+
block.txdata.push(Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() });
87+
}
88+
block.txdata.push((*tx).clone());
89+
do_connect_block_without_checks(node, block, false);
90+
}
7791
/// Mine the given transaction at the given height, mining blocks as required to build to that
7892
/// height
7993
///
@@ -179,19 +193,19 @@ pub fn connect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, depth: u32) ->
179193
assert!(depth >= 1);
180194
for i in 1..depth {
181195
let prev_blockhash = block.header.block_hash();
182-
do_connect_block(node, block, skip_intermediaries);
196+
do_connect_block_with_checks(node, block, skip_intermediaries);
183197
block = Block {
184198
header: BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: TxMerkleNode::all_zeros(), time: height + i, bits: 42, nonce: 42 },
185199
txdata: vec![],
186200
};
187201
}
188202
let hash = block.header.block_hash();
189-
do_connect_block(node, block, false);
203+
do_connect_block_with_checks(node, block, false);
190204
hash
191205
}
192206

193207
pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block) {
194-
do_connect_block(node, block.clone(), false);
208+
do_connect_block_with_checks(node, block.clone(), false);
195209
}
196210

197211
fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) {
@@ -201,8 +215,14 @@ fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) {
201215
}
202216
}
203217

204-
fn do_connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, skip_intermediaries: bool) {
218+
fn do_connect_block_with_checks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, skip_intermediaries: bool) {
219+
call_claimable_balances(node);
220+
do_connect_block_without_checks(node, block, skip_intermediaries);
205221
call_claimable_balances(node);
222+
node.node.test_process_background_events();
223+
}
224+
225+
fn do_connect_block_without_checks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, skip_intermediaries: bool) {
206226
let height = node.best_block_info().1 + 1;
207227
#[cfg(feature = "std")] {
208228
eprintln!("Connecting block using Block Connection Style: {:?}", *node.connect_style.borrow());
@@ -254,8 +274,6 @@ fn do_connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, sk
254274
}
255275
}
256276
}
257-
call_claimable_balances(node);
258-
node.node.test_process_background_events();
259277
node.blocks.lock().unwrap().push((block, height));
260278
}
261279

@@ -1619,20 +1637,7 @@ pub fn do_commitment_signed_dance(node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '
16191637
check_added_monitors!(node_a, 1);
16201638

16211639
// If this commitment signed dance was due to a claim, don't check for an RAA monitor update.
1622-
let got_claim = node_a.node.pending_events.lock().unwrap().iter().any(|(ev, action)| {
1623-
let matching_action = if let Some(channelmanager::EventCompletionAction::ReleaseRAAChannelMonitorUpdate
1624-
{ channel_funding_outpoint, counterparty_node_id }) = action
1625-
{
1626-
if channel_funding_outpoint.to_channel_id() == commitment_signed.channel_id {
1627-
assert_eq!(*counterparty_node_id, node_b.node.get_our_node_id());
1628-
true
1629-
} else { false }
1630-
} else { false };
1631-
if matching_action {
1632-
if let Event::PaymentSent { .. } = ev {} else { panic!(); }
1633-
}
1634-
matching_action
1635-
});
1640+
let got_claim = node_a.node.test_raa_monitor_updates_held(node_b.node.get_our_node_id(), commitment_signed.channel_id);
16361641
if fail_backwards { assert!(!got_claim); }
16371642
commitment_signed_dance!(node_a, node_b, (), fail_backwards, true, false, got_claim);
16381643

@@ -2936,7 +2941,7 @@ pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a,
29362941
}
29372942
if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
29382943
let commitment_update = chan_msgs.2.unwrap();
2939-
if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
2944+
if pending_htlc_adds.1 > 0 { // We use -1/-2 to denote a response commitment_signed
29402945
assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1 as usize);
29412946
}
29422947
assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.1 + pending_cell_htlc_claims.1);
@@ -2952,7 +2957,7 @@ pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a,
29522957
node_b.node.handle_update_fail_htlc(&node_a.node.get_our_node_id(), &update_fail);
29532958
}
29542959

2955-
if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
2960+
if pending_htlc_adds.1 >= 0 { // We use -1/-2 to denote a response commitment_signed
29562961
commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false);
29572962
} else {
29582963
node_b.node.handle_commitment_signed(&node_a.node.get_our_node_id(), &commitment_update.commitment_signed);
@@ -2961,7 +2966,7 @@ pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a,
29612966
// No commitment_signed so get_event_msg's assert(len == 1) passes
29622967
node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack);
29632968
assert!(node_a.node.get_and_clear_pending_msg_events().is_empty());
2964-
check_added_monitors!(node_a, 1);
2969+
check_added_monitors(node_a, if pending_htlc_adds.1 == -1 { 1 } else { 0 });
29652970
}
29662971
} else {
29672972
assert!(chan_msgs.2.is_none());

0 commit comments

Comments
 (0)