@@ -115,7 +115,6 @@ static int smc_release(struct socket *sock)
115
115
goto out ;
116
116
117
117
smc = smc_sk (sk );
118
- sock_hold (sk );
119
118
if (sk -> sk_state == SMC_LISTEN )
120
119
/* smc_close_non_accepted() is called and acquires
121
120
* sock lock for child sockets again
@@ -124,10 +123,7 @@ static int smc_release(struct socket *sock)
124
123
else
125
124
lock_sock (sk );
126
125
127
- if (smc -> use_fallback ) {
128
- sk -> sk_state = SMC_CLOSED ;
129
- sk -> sk_state_change (sk );
130
- } else {
126
+ if (!smc -> use_fallback ) {
131
127
rc = smc_close_active (smc );
132
128
sock_set_flag (sk , SOCK_DEAD );
133
129
sk -> sk_shutdown |= SHUTDOWN_MASK ;
@@ -136,20 +132,21 @@ static int smc_release(struct socket *sock)
136
132
sock_release (smc -> clcsock );
137
133
smc -> clcsock = NULL ;
138
134
}
135
+ if (smc -> use_fallback ) {
136
+ sock_put (sk ); /* passive closing */
137
+ sk -> sk_state = SMC_CLOSED ;
138
+ sk -> sk_state_change (sk );
139
+ }
139
140
140
141
/* detach socket */
141
142
sock_orphan (sk );
142
143
sock -> sk = NULL ;
143
- if (smc -> use_fallback ) {
144
- schedule_delayed_work (& smc -> sock_put_work , TCP_TIMEWAIT_LEN );
145
- } else if (sk -> sk_state == SMC_CLOSED ) {
144
+ if (!smc -> use_fallback && sk -> sk_state == SMC_CLOSED )
146
145
smc_conn_free (& smc -> conn );
147
- schedule_delayed_work (& smc -> sock_put_work ,
148
- SMC_CLOSE_SOCK_PUT_DELAY );
149
- }
150
146
release_sock (sk );
151
147
152
- sock_put (sk );
148
+ sk -> sk_prot -> unhash (sk );
149
+ sock_put (sk ); /* final sock_put */
153
150
out :
154
151
return rc ;
155
152
}
@@ -181,7 +178,6 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock)
181
178
INIT_WORK (& smc -> tcp_listen_work , smc_tcp_listen_work );
182
179
INIT_LIST_HEAD (& smc -> accept_q );
183
180
spin_lock_init (& smc -> accept_q_lock );
184
- INIT_DELAYED_WORK (& smc -> sock_put_work , smc_close_sock_put_work );
185
181
sk -> sk_prot -> hash (sk );
186
182
sk_refcnt_debug_inc (sk );
187
183
@@ -399,6 +395,8 @@ static int smc_connect_rdma(struct smc_sock *smc)
399
395
int rc = 0 ;
400
396
u8 ibport ;
401
397
398
+ sock_hold (& smc -> sk ); /* sock put in passive closing */
399
+
402
400
if (!tcp_sk (smc -> clcsock -> sk )-> syn_smc ) {
403
401
/* peer has not signalled SMC-capability */
404
402
smc -> use_fallback = true;
@@ -542,6 +540,8 @@ static int smc_connect_rdma(struct smc_sock *smc)
542
540
mutex_unlock (& smc_create_lgr_pending );
543
541
smc_conn_free (& smc -> conn );
544
542
out_err :
543
+ if (smc -> sk .sk_state == SMC_INIT )
544
+ sock_put (& smc -> sk ); /* passive closing */
545
545
return rc ;
546
546
}
547
547
@@ -620,7 +620,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
620
620
new_sk -> sk_state = SMC_CLOSED ;
621
621
sock_set_flag (new_sk , SOCK_DEAD );
622
622
new_sk -> sk_prot -> unhash (new_sk );
623
- sock_put (new_sk );
623
+ sock_put (new_sk ); /* final */
624
624
* new_smc = NULL ;
625
625
goto out ;
626
626
}
@@ -637,7 +637,7 @@ static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
637
637
{
638
638
struct smc_sock * par = smc_sk (parent );
639
639
640
- sock_hold (sk );
640
+ sock_hold (sk ); /* sock_put in smc_accept_unlink () */
641
641
spin_lock (& par -> accept_q_lock );
642
642
list_add_tail (& smc_sk (sk )-> accept_q , & par -> accept_q );
643
643
spin_unlock (& par -> accept_q_lock );
@@ -653,7 +653,7 @@ static void smc_accept_unlink(struct sock *sk)
653
653
list_del_init (& smc_sk (sk )-> accept_q );
654
654
spin_unlock (& par -> accept_q_lock );
655
655
sk_acceptq_removed (& smc_sk (sk )-> listen_smc -> sk );
656
- sock_put (sk );
656
+ sock_put (sk ); /* sock_hold in smc_accept_enqueue */
657
657
}
658
658
659
659
/* remove a sock from the accept queue to bind it to a new socket created
@@ -671,7 +671,7 @@ struct sock *smc_accept_dequeue(struct sock *parent,
671
671
smc_accept_unlink (new_sk );
672
672
if (new_sk -> sk_state == SMC_CLOSED ) {
673
673
new_sk -> sk_prot -> unhash (new_sk );
674
- sock_put (new_sk );
674
+ sock_put (new_sk ); /* final */
675
675
continue ;
676
676
}
677
677
if (new_sock )
@@ -686,14 +686,11 @@ void smc_close_non_accepted(struct sock *sk)
686
686
{
687
687
struct smc_sock * smc = smc_sk (sk );
688
688
689
- sock_hold (sk );
690
689
lock_sock (sk );
691
690
if (!sk -> sk_lingertime )
692
691
/* wait for peer closing */
693
692
sk -> sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT ;
694
- if (smc -> use_fallback ) {
695
- sk -> sk_state = SMC_CLOSED ;
696
- } else {
693
+ if (!smc -> use_fallback ) {
697
694
smc_close_active (smc );
698
695
sock_set_flag (sk , SOCK_DEAD );
699
696
sk -> sk_shutdown |= SHUTDOWN_MASK ;
@@ -706,14 +703,15 @@ void smc_close_non_accepted(struct sock *sk)
706
703
sock_release (tcp );
707
704
}
708
705
if (smc -> use_fallback ) {
709
- schedule_delayed_work ( & smc -> sock_put_work , TCP_TIMEWAIT_LEN );
710
- } else if ( sk -> sk_state == SMC_CLOSED ) {
711
- smc_conn_free ( & smc -> conn );
712
- schedule_delayed_work ( & smc -> sock_put_work ,
713
- SMC_CLOSE_SOCK_PUT_DELAY );
706
+ sock_put ( sk ); /* passive closing */
707
+ sk -> sk_state = SMC_CLOSED ;
708
+ } else {
709
+ if ( sk -> sk_state == SMC_CLOSED )
710
+ smc_conn_free ( & smc -> conn );
714
711
}
715
712
release_sock (sk );
716
- sock_put (sk );
713
+ sk -> sk_prot -> unhash (sk );
714
+ sock_put (sk ); /* final sock_put */
717
715
}
718
716
719
717
static int smc_serv_conf_first_link (struct smc_sock * smc )
@@ -937,6 +935,8 @@ static void smc_listen_work(struct work_struct *work)
937
935
smc_lgr_forget (new_smc -> conn .lgr );
938
936
mutex_unlock (& smc_create_lgr_pending );
939
937
out_err :
938
+ if (newsmcsk -> sk_state == SMC_INIT )
939
+ sock_put (& new_smc -> sk ); /* passive closing */
940
940
newsmcsk -> sk_state = SMC_CLOSED ;
941
941
smc_conn_free (& new_smc -> conn );
942
942
goto enqueue ; /* queue new sock with sk_err set */
@@ -963,12 +963,15 @@ static void smc_tcp_listen_work(struct work_struct *work)
963
963
sock_hold (lsk ); /* sock_put in smc_listen_work */
964
964
INIT_WORK (& new_smc -> smc_listen_work , smc_listen_work );
965
965
smc_copy_sock_settings_to_smc (new_smc );
966
- schedule_work (& new_smc -> smc_listen_work );
966
+ sock_hold (& new_smc -> sk ); /* sock_put in passive closing */
967
+ if (!schedule_work (& new_smc -> smc_listen_work ))
968
+ sock_put (& new_smc -> sk );
967
969
}
968
970
969
971
out :
970
972
release_sock (lsk );
971
973
lsk -> sk_data_ready (lsk ); /* no more listening, wake accept */
974
+ sock_put (& lsmc -> sk ); /* sock_hold in smc_listen */
972
975
}
973
976
974
977
static int smc_listen (struct socket * sock , int backlog )
@@ -1002,7 +1005,9 @@ static int smc_listen(struct socket *sock, int backlog)
1002
1005
sk -> sk_ack_backlog = 0 ;
1003
1006
sk -> sk_state = SMC_LISTEN ;
1004
1007
INIT_WORK (& smc -> tcp_listen_work , smc_tcp_listen_work );
1005
- schedule_work (& smc -> tcp_listen_work );
1008
+ sock_hold (sk ); /* sock_hold in tcp_listen_worker */
1009
+ if (!schedule_work (& smc -> tcp_listen_work ))
1010
+ sock_put (sk );
1006
1011
1007
1012
out :
1008
1013
release_sock (sk );
@@ -1019,6 +1024,7 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,
1019
1024
int rc = 0 ;
1020
1025
1021
1026
lsmc = smc_sk (sk );
1027
+ sock_hold (sk ); /* sock_put below */
1022
1028
lock_sock (sk );
1023
1029
1024
1030
if (lsmc -> sk .sk_state != SMC_LISTEN ) {
@@ -1053,6 +1059,7 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,
1053
1059
1054
1060
out :
1055
1061
release_sock (sk );
1062
+ sock_put (sk ); /* sock_hold above */
1056
1063
return rc ;
1057
1064
}
1058
1065
0 commit comments