@@ -44,10 +44,19 @@ static struct smc_lgr_list smc_lgr_list = { /* established link groups */
44
44
static atomic_t lgr_cnt = ATOMIC_INIT (0 ); /* number of existing link groups */
45
45
static DECLARE_WAIT_QUEUE_HEAD (lgrs_deleted );
46
46
47
+ struct smc_ib_up_work {
48
+ struct work_struct work ;
49
+ struct smc_link_group * lgr ;
50
+ struct smc_ib_device * smcibdev ;
51
+ u8 ibport ;
52
+ };
53
+
47
54
static void smc_buf_free (struct smc_link_group * lgr , bool is_rmb ,
48
55
struct smc_buf_desc * buf_desc );
49
56
static void __smc_lgr_terminate (struct smc_link_group * lgr , bool soft );
50
57
58
+ static void smc_link_up_work (struct work_struct * work );
59
+
51
60
/* return head of link group list and its lock for a given link group */
52
61
static inline struct list_head * smc_lgr_list_head (struct smc_link_group * lgr ,
53
62
spinlock_t * * lgr_lock )
@@ -928,6 +937,83 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
928
937
}
929
938
}
930
939
940
+ /* link is up - establish alternate link if applicable */
941
+ static void smcr_link_up (struct smc_link_group * lgr ,
942
+ struct smc_ib_device * smcibdev , u8 ibport )
943
+ {
944
+ struct smc_link * link = NULL ;
945
+
946
+ if (list_empty (& lgr -> list ) ||
947
+ lgr -> type == SMC_LGR_SYMMETRIC ||
948
+ lgr -> type == SMC_LGR_ASYMMETRIC_PEER )
949
+ return ;
950
+
951
+ if (lgr -> role == SMC_SERV ) {
952
+ /* trigger local add link processing */
953
+ link = smc_llc_usable_link (lgr );
954
+ if (!link )
955
+ return ;
956
+ /* tbd: call smc_llc_srv_add_link_local(link); */
957
+ } else {
958
+ /* invite server to start add link processing */
959
+ u8 gid [SMC_GID_SIZE ];
960
+
961
+ if (smc_ib_determine_gid (smcibdev , ibport , lgr -> vlan_id , gid ,
962
+ NULL ))
963
+ return ;
964
+ if (lgr -> llc_flow_lcl .type != SMC_LLC_FLOW_NONE ) {
965
+ /* some other llc task is ongoing */
966
+ wait_event_interruptible_timeout (lgr -> llc_waiter ,
967
+ (lgr -> llc_flow_lcl .type == SMC_LLC_FLOW_NONE ),
968
+ SMC_LLC_WAIT_TIME );
969
+ }
970
+ if (list_empty (& lgr -> list ) ||
971
+ !smc_ib_port_active (smcibdev , ibport ))
972
+ return ; /* lgr or device no longer active */
973
+ link = smc_llc_usable_link (lgr );
974
+ if (!link )
975
+ return ;
976
+ smc_llc_send_add_link (link , smcibdev -> mac [ibport - 1 ], gid ,
977
+ NULL , SMC_LLC_REQ );
978
+ }
979
+ }
980
+
981
+ void smcr_port_add (struct smc_ib_device * smcibdev , u8 ibport )
982
+ {
983
+ struct smc_ib_up_work * ib_work ;
984
+ struct smc_link_group * lgr , * n ;
985
+
986
+ list_for_each_entry_safe (lgr , n , & smc_lgr_list .list , list ) {
987
+ if (strncmp (smcibdev -> pnetid [ibport - 1 ], lgr -> pnet_id ,
988
+ SMC_MAX_PNETID_LEN ) ||
989
+ lgr -> type == SMC_LGR_SYMMETRIC ||
990
+ lgr -> type == SMC_LGR_ASYMMETRIC_PEER )
991
+ continue ;
992
+ ib_work = kmalloc (sizeof (* ib_work ), GFP_KERNEL );
993
+ if (!ib_work )
994
+ continue ;
995
+ INIT_WORK (& ib_work -> work , smc_link_up_work );
996
+ ib_work -> lgr = lgr ;
997
+ ib_work -> smcibdev = smcibdev ;
998
+ ib_work -> ibport = ibport ;
999
+ schedule_work (& ib_work -> work );
1000
+ }
1001
+ }
1002
+
1003
+ static void smc_link_up_work (struct work_struct * work )
1004
+ {
1005
+ struct smc_ib_up_work * ib_work = container_of (work ,
1006
+ struct smc_ib_up_work ,
1007
+ work );
1008
+ struct smc_link_group * lgr = ib_work -> lgr ;
1009
+
1010
+ if (list_empty (& lgr -> list ))
1011
+ goto out ;
1012
+ smcr_link_up (lgr , ib_work -> smcibdev , ib_work -> ibport );
1013
+ out :
1014
+ kfree (ib_work );
1015
+ }
1016
+
931
1017
/* Determine vlan of internal TCP socket.
932
1018
* @vlan_id: address to store the determined vlan id into
933
1019
*/
0 commit comments