@@ -728,15 +728,17 @@ impl<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> even
728
728
#[ cfg( test) ]
729
729
mod tests {
730
730
use bitcoin:: BlockHeader ;
731
- use :: { check_added_monitors, check_closed_broadcast, check_closed_event, expect_payment_sent} ;
732
- use :: { get_local_commitment_txn, get_route_and_payment_hash, unwrap_send_err} ;
731
+ use :: { check_added_monitors, check_closed_broadcast, check_closed_event} ;
732
+ use :: { expect_payment_sent, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg} ;
733
+ use :: { get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err} ;
733
734
use chain:: { ChannelMonitorUpdateErr , Confirm , Watch } ;
734
735
use chain:: channelmonitor:: LATENCY_GRACE_PERIOD_BLOCKS ;
735
736
use ln:: channelmanager:: PaymentSendFailure ;
736
737
use ln:: features:: InitFeatures ;
737
738
use ln:: functional_test_utils:: * ;
739
+ use ln:: msgs:: ChannelMessageHandler ;
738
740
use util:: errors:: APIError ;
739
- use util:: events:: { ClosureReason , MessageSendEventsProvider } ;
741
+ use util:: events:: { ClosureReason , MessageSendEvent , MessageSendEventsProvider } ;
740
742
use util:: test_utils:: { OnRegisterOutput , TxOutReference } ;
741
743
742
744
/// Tests that in-block dependent transactions are processed by `block_connected` when not
@@ -782,6 +784,80 @@ mod tests {
782
784
nodes[ 1 ] . node . get_and_clear_pending_events ( ) ;
783
785
}
784
786
787
+ #[ test]
788
+ fn test_async_ooo_offchain_updates ( ) {
789
+ // Test that if we have multiple offchain updates being persisted and they complete
790
+ // out-of-order, the ChainMonitor waits until all have completed until informing the
791
+ // ChannelManager.
792
+ let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
793
+ let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs) ;
794
+ let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs, & [ None , None ] ) ;
795
+ let nodes = create_network ( 2 , & node_cfgs, & node_chanmgrs) ;
796
+ create_announced_chan_between_nodes ( & nodes, 0 , 1 , InitFeatures :: known ( ) , InitFeatures :: known ( ) ) ;
797
+
798
+ // Route two payments to be claimed at the same time.
799
+ let payment_preimage_1 = route_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , 1_000_000 ) . 0 ;
800
+ let payment_preimage_2 = route_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , 1_000_000 ) . 0 ;
801
+
802
+ chanmon_cfgs[ 1 ] . persister . offchain_sync_monitor_persistences . lock ( ) . unwrap ( ) . clear ( ) ;
803
+ chanmon_cfgs[ 1 ] . persister . set_update_ret ( Err ( ChannelMonitorUpdateErr :: TemporaryFailure ) ) ;
804
+
805
+ nodes[ 1 ] . node . claim_funds ( payment_preimage_1) ;
806
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
807
+ nodes[ 1 ] . node . claim_funds ( payment_preimage_2) ;
808
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
809
+
810
+ chanmon_cfgs[ 1 ] . persister . set_update_ret ( Ok ( ( ) ) ) ;
811
+
812
+ let persistences = chanmon_cfgs[ 1 ] . persister . offchain_sync_monitor_persistences . lock ( ) . unwrap ( ) . clone ( ) ;
813
+ assert_eq ! ( persistences. len( ) , 1 ) ;
814
+ let ( funding_txo, updates) = persistences. iter ( ) . next ( ) . unwrap ( ) ;
815
+ assert_eq ! ( updates. len( ) , 2 ) ;
816
+
817
+ // Note that updates is a HashMap so the ordering here is actually random. This shouldn't
818
+ // fail either way but if it fails intermittently its depending on the ordering of updates.
819
+ let mut update_iter = updates. iter ( ) ;
820
+ nodes[ 1 ] . chain_monitor . chain_monitor . channel_monitor_updated ( * funding_txo, update_iter. next ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
821
+ assert ! ( nodes[ 1 ] . chain_monitor. release_pending_monitor_events( ) . is_empty( ) ) ;
822
+ nodes[ 1 ] . chain_monitor . chain_monitor . channel_monitor_updated ( * funding_txo, update_iter. next ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
823
+
824
+ // Now manually walk the commitment signed dance - because we claimed two payments
825
+ // back-to-back it doesn't fit into the neat walk commitment_signed_dance does.
826
+
827
+ let updates = get_htlc_update_msgs ! ( nodes[ 1 ] , nodes[ 0 ] . node. get_our_node_id( ) ) ;
828
+ nodes[ 0 ] . node . handle_update_fulfill_htlc ( & nodes[ 1 ] . node . get_our_node_id ( ) , & updates. update_fulfill_htlcs [ 0 ] ) ;
829
+ expect_payment_sent_without_paths ! ( nodes[ 0 ] , payment_preimage_1) ;
830
+ nodes[ 0 ] . node . handle_commitment_signed ( & nodes[ 1 ] . node . get_our_node_id ( ) , & updates. commitment_signed ) ;
831
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
832
+ let ( as_first_raa, as_first_update) = get_revoke_commit_msgs ! ( nodes[ 0 ] , nodes[ 1 ] . node. get_our_node_id( ) ) ;
833
+
834
+ nodes[ 1 ] . node . handle_revoke_and_ack ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_first_raa) ;
835
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
836
+ let bs_second_updates = get_htlc_update_msgs ! ( nodes[ 1 ] , nodes[ 0 ] . node. get_our_node_id( ) ) ;
837
+ nodes[ 1 ] . node . handle_commitment_signed ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_first_update) ;
838
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
839
+ let bs_first_raa = get_event_msg ! ( nodes[ 1 ] , MessageSendEvent :: SendRevokeAndACK , nodes[ 0 ] . node. get_our_node_id( ) ) ;
840
+
841
+ nodes[ 0 ] . node . handle_update_fulfill_htlc ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_second_updates. update_fulfill_htlcs [ 0 ] ) ;
842
+ expect_payment_sent_without_paths ! ( nodes[ 0 ] , payment_preimage_2) ;
843
+ nodes[ 0 ] . node . handle_commitment_signed ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_second_updates. commitment_signed ) ;
844
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
845
+ nodes[ 0 ] . node . handle_revoke_and_ack ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_first_raa) ;
846
+ expect_payment_path_successful ! ( nodes[ 0 ] ) ;
847
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
848
+ let ( as_second_raa, as_second_update) = get_revoke_commit_msgs ! ( nodes[ 0 ] , nodes[ 1 ] . node. get_our_node_id( ) ) ;
849
+
850
+ nodes[ 1 ] . node . handle_revoke_and_ack ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_second_raa) ;
851
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
852
+ nodes[ 1 ] . node . handle_commitment_signed ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_second_update) ;
853
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
854
+ let bs_second_raa = get_event_msg ! ( nodes[ 1 ] , MessageSendEvent :: SendRevokeAndACK , nodes[ 0 ] . node. get_our_node_id( ) ) ;
855
+
856
+ nodes[ 0 ] . node . handle_revoke_and_ack ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_second_raa) ;
857
+ expect_payment_path_successful ! ( nodes[ 0 ] ) ;
858
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
859
+ }
860
+
785
861
fn do_chainsync_pauses_events ( block_timeout : bool ) {
786
862
// When a chainsync monitor update occurs, any MonitorUpdates should be held before being
787
863
// passed upstream. This tests that behavior, as well as some ways it might go wrong.
0 commit comments