Skip to content

Commit 13c72ab

Browse files
committed
Add infra to block ChannelMonitorUpdates on forwarded claims
When we forward a payment and receive an `update_fulfill_htlc` message from the downstream channel, we immediately claim the HTLC on the upstream channel, before even doing a `commitment_signed` dance on the downstream channel. This implies that our `ChannelMonitorUpdate`s "go out" in the right order - first we ensure we'll get our money by writing the preimage down, then we write the update that resolves giving money on the downstream node. This is safe as long as `ChannelMonitorUpdate`s complete in the order in which they are generated, but of course looking forward we want to support asynchronous updates, which may complete in any order. Here we add infrastructure to handle downstream `ChannelMonitorUpdate`s which are blocked on an upstream preimage-containing one. We don't yet actually do the blocking which will come in a future commit.
1 parent 1e16b23 commit 13c72ab

File tree

1 file changed

+122
-26
lines changed

1 file changed

+122
-26
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 122 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -504,12 +504,24 @@ pub(crate) enum MonitorUpdateCompletionAction {
504504
/// event can be generated.
505505
PaymentClaimed { payment_hash: PaymentHash },
506506
/// Indicates an [`events::Event`] should be surfaced to the user.
507-
EmitEvent { event: events::Event },
507+
EmitEventAndFreeOtherChannel {
508+
event: events::Event,
509+
downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, RAAMonitorUpdateBlockingAction)>,
510+
},
508511
}
509512

510513
impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
511514
(0, PaymentClaimed) => { (0, payment_hash, required) },
512-
(2, EmitEvent) => { (0, event, upgradable_required) },
515+
(2, EmitEventAndFreeOtherChannel) => {
516+
(0, event, upgradable_required),
517+
// LDK prior to 0.0.115 did not have this field as the monitor update application order was
518+
// required by clients. If we downgrade to something prior to 0.0.115 this may result in
519+
// monitor updates which aren't properly blocked or resumed, however that's fine - we don't
520+
// support async monitor updates even in LDK 0.0.115 and once we do we'll require no
521+
// downgrades to prior versions. Thus, while this would break on downgrade, we don't
522+
// support it even without downgrade, so if it breaks its not on us ¯\_(ツ)_/¯.
523+
(1, downstream_counterparty_and_funding_outpoint, option),
524+
},
513525
);
514526

515527
#[derive(Clone, Debug, PartialEq, Eq)]
@@ -526,6 +538,29 @@ impl_writeable_tlv_based_enum!(EventCompletionAction,
526538
};
527539
);
528540

541+
#[derive(Clone, PartialEq, Eq, Debug)]
542+
pub(crate) enum RAAMonitorUpdateBlockingAction {
543+
/// The inbound channel's channel_id
544+
ForwardedPaymentOtherChannelClaim {
545+
channel_id: [u8; 32],
546+
htlc_id: u64,
547+
},
548+
}
549+
550+
impl RAAMonitorUpdateBlockingAction {
551+
fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
552+
Self::ForwardedPaymentOtherChannelClaim {
553+
channel_id: prev_hop.outpoint.to_channel_id(),
554+
htlc_id: prev_hop.htlc_id,
555+
}
556+
}
557+
}
558+
559+
impl_writeable_tlv_based_enum!(RAAMonitorUpdateBlockingAction,
560+
(0, ForwardedPaymentOtherChannelClaim) => { (0, channel_id, required), (2, htlc_id, required) }
561+
;);
562+
563+
529564
/// State we hold per-peer.
530565
pub(super) struct PeerState<Signer: ChannelSigner> {
531566
/// `temporary_channel_id` or `channel_id` -> `channel`.
@@ -554,6 +589,11 @@ pub(super) struct PeerState<Signer: ChannelSigner> {
554589
/// to funding appearing on-chain), the downstream `ChannelMonitor` set is required to ensure
555590
/// duplicates do not occur, so such channels should fail without a monitor update completing.
556591
monitor_update_blocked_actions: BTreeMap<[u8; 32], Vec<MonitorUpdateCompletionAction>>,
592+
/// If another channel's [`ChannelMonitorUpdate`] needs to complete before a channel we have
593+
/// with this peer can complete an RAA [`ChannelMonitorUpdate`] (e.g. because the RAA update
594+
/// will remove a preimage that needs to be durably in an upstream channel first), we put an
595+
/// entry here to note that the channel with the key's ID is blocked on a set of actions.
596+
actions_blocking_raa_monitor_updates: BTreeMap<[u8; 32], Vec<RAAMonitorUpdateBlockingAction>>,
557597
/// The peer is currently connected (i.e. we've seen a
558598
/// [`ChannelMessageHandler::peer_connected`] and no corresponding
559599
/// [`ChannelMessageHandler::peer_disconnected`].
@@ -4386,23 +4426,24 @@ where
43864426
},
43874427
HTLCSource::PreviousHopData(hop_data) => {
43884428
let prev_outpoint = hop_data.outpoint;
4429+
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
43894430
let res = self.claim_funds_from_hop(hop_data, payment_preimage,
43904431
|htlc_claim_value_msat| {
43914432
if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
43924433
let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
43934434
Some(claimed_htlc_value - forwarded_htlc_value)
43944435
} else { None };
43954436

4396-
let prev_channel_id = Some(prev_outpoint.to_channel_id());
4397-
let next_channel_id = Some(next_channel_id);
4398-
4399-
Some(MonitorUpdateCompletionAction::EmitEvent { event: events::Event::PaymentForwarded {
4400-
fee_earned_msat,
4401-
claim_from_onchain_tx: from_onchain,
4402-
prev_channel_id,
4403-
next_channel_id,
4404-
outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
4405-
}})
4437+
Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
4438+
event: events::Event::PaymentForwarded {
4439+
fee_earned_msat,
4440+
claim_from_onchain_tx: from_onchain,
4441+
prev_channel_id: Some(prev_outpoint.to_channel_id()),
4442+
next_channel_id: Some(next_channel_id),
4443+
outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
4444+
},
4445+
downstream_counterparty_and_funding_outpoint: None,
4446+
})
44064447
} else { None }
44074448
});
44084449
if let Err((pk, err)) = res {
@@ -4429,8 +4470,13 @@ where
44294470
}, None));
44304471
}
44314472
},
4432-
MonitorUpdateCompletionAction::EmitEvent { event } => {
4473+
MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
4474+
event, downstream_counterparty_and_funding_outpoint
4475+
} => {
44334476
self.pending_events.lock().unwrap().push_back((event, None));
4477+
if let Some((node_id, funding_outpoint, blocker)) = downstream_counterparty_and_funding_outpoint {
4478+
self.handle_monitor_update_release(node_id, funding_outpoint, Some(blocker));
4479+
}
44344480
},
44354481
}
44364482
}
@@ -5277,6 +5323,36 @@ where
52775323
}
52785324
}
52795325

5326+
fn raa_monitor_updates_held(&self,
5327+
actions_blocking_raa_monitor_updates: &BTreeMap<[u8; 32], Vec<RAAMonitorUpdateBlockingAction>>,
5328+
channel_funding_outpoint: OutPoint, counterparty_node_id: PublicKey
5329+
) -> bool {
5330+
actions_blocking_raa_monitor_updates
5331+
.get(&channel_funding_outpoint.to_channel_id()).map(|v| !v.is_empty()).unwrap_or(false)
5332+
|| self.pending_events.lock().unwrap().iter().any(|(_, action)| {
5333+
action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
5334+
channel_funding_outpoint,
5335+
counterparty_node_id,
5336+
})
5337+
})
5338+
}
5339+
5340+
pub(crate) fn test_raa_monitor_updates_held(&self, counterparty_node_id: PublicKey,
5341+
channel_id: [u8; 32])
5342+
-> bool {
5343+
let per_peer_state = self.per_peer_state.read().unwrap();
5344+
if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
5345+
let mut peer_state_lck = peer_state_mtx.lock().unwrap();
5346+
let peer_state = &mut *peer_state_lck;
5347+
5348+
if let Some(chan) = peer_state.channel_by_id.get(&channel_id) {
5349+
return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
5350+
chan.get_funding_txo().unwrap(), counterparty_node_id);
5351+
}
5352+
}
5353+
false
5354+
}
5355+
52805356
fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
52815357
let (htlcs_to_fail, res) = {
52825358
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -5939,24 +6015,28 @@ where
59396015
self.pending_outbound_payments.clear_pending_payments()
59406016
}
59416017

5942-
fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint) {
6018+
fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
59436019
loop {
59446020
let per_peer_state = self.per_peer_state.read().unwrap();
59456021
if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
59466022
let mut peer_state_lck = peer_state_mtx.lock().unwrap();
59476023
let peer_state = &mut *peer_state_lck;
5948-
if self.pending_events.lock().unwrap().iter()
5949-
.any(|(_ev, action_opt)| action_opt == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
5950-
channel_funding_outpoint, counterparty_node_id
5951-
}))
5952-
{
5953-
// Check that, while holding the peer lock, we don't have another event
5954-
// blocking any monitor updates for this channel. If we do, let those
5955-
// events be the ones that ultimately release the monitor update(s).
5956-
log_trace!(self.logger, "Delaying monitor unlock for channel {} as another event is pending",
6024+
6025+
if let Some(blocker) = &completed_blocker {
6026+
if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
6027+
.get_mut(&channel_funding_outpoint.to_channel_id())
6028+
{
6029+
blockers.retain(|iter| iter != blocker);
6030+
}
6031+
}
6032+
6033+
if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
6034+
channel_funding_outpoint, counterparty_node_id) {
6035+
log_trace!(self.logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
59576036
log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
59586037
return;
59596038
}
6039+
59606040
if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
59616041
debug_assert_eq!(chan.get().get_funding_txo().unwrap(), channel_funding_outpoint);
59626042
if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() {
@@ -5993,7 +6073,7 @@ where
59936073
EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
59946074
channel_funding_outpoint, counterparty_node_id
59956075
} => {
5996-
self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint);
6076+
self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, None);
59976077
}
59986078
}
59996079
}
@@ -6644,6 +6724,7 @@ where
66446724
latest_features: init_msg.features.clone(),
66456725
pending_msg_events: Vec::new(),
66466726
monitor_update_blocked_actions: BTreeMap::new(),
6727+
actions_blocking_raa_monitor_updates: BTreeMap::new(),
66476728
is_connected: true,
66486729
}));
66496730
},
@@ -7770,6 +7851,7 @@ where
77707851
latest_features: Readable::read(reader)?,
77717852
pending_msg_events: Vec::new(),
77727853
monitor_update_blocked_actions: BTreeMap::new(),
7854+
actions_blocking_raa_monitor_updates: BTreeMap::new(),
77737855
is_connected: false,
77747856
};
77757857
per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
@@ -7852,7 +7934,7 @@ where
78527934
let mut probing_cookie_secret: Option<[u8; 32]> = None;
78537935
let mut claimable_htlc_purposes = None;
78547936
let mut pending_claiming_payments = Some(HashMap::new());
7855-
let mut monitor_update_blocked_actions_per_peer = Some(Vec::new());
7937+
let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> = Some(Vec::new());
78567938
let mut events_override = None;
78577939
read_tlv_fields!(reader, {
78587940
(1, pending_outbound_payments_no_retry, option),
@@ -8156,7 +8238,21 @@ where
81568238
}
81578239

81588240
for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
8159-
if let Some(peer_state) = per_peer_state.get_mut(&node_id) {
8241+
if let Some(peer_state) = per_peer_state.get(&node_id) {
8242+
for (_, actions) in monitor_update_blocked_actions.iter() {
8243+
for action in actions.iter() {
8244+
if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
8245+
downstream_counterparty_and_funding_outpoint:
8246+
Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), ..
8247+
} = action {
8248+
if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
8249+
blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
8250+
.entry(blocked_channel_outpoint.to_channel_id())
8251+
.or_insert_with(Vec::new).push(blocking_action.clone());
8252+
}
8253+
}
8254+
}
8255+
}
81608256
peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
81618257
} else {
81628258
log_error!(args.logger, "Got blocked actions without a per-peer-state for {}", node_id);

0 commit comments

Comments
 (0)