Skip to content

Commit 51f1de7

Browse files
Ursula Braundavem330
Ursula Braun
authored andcommitted
net/smc: replace sock_put worker by socket refcounting
Proper socket refcounting makes the sock_put worker obsolete. Signed-off-by: Ursula Braun <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 8dce278 commit 51f1de7

File tree

6 files changed

+88
-68
lines changed

6 files changed

+88
-68
lines changed

net/smc/af_smc.c

Lines changed: 36 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,6 @@ static int smc_release(struct socket *sock)
115115
goto out;
116116

117117
smc = smc_sk(sk);
118-
sock_hold(sk);
119118
if (sk->sk_state == SMC_LISTEN)
120119
/* smc_close_non_accepted() is called and acquires
121120
* sock lock for child sockets again
@@ -124,10 +123,7 @@ static int smc_release(struct socket *sock)
124123
else
125124
lock_sock(sk);
126125

127-
if (smc->use_fallback) {
128-
sk->sk_state = SMC_CLOSED;
129-
sk->sk_state_change(sk);
130-
} else {
126+
if (!smc->use_fallback) {
131127
rc = smc_close_active(smc);
132128
sock_set_flag(sk, SOCK_DEAD);
133129
sk->sk_shutdown |= SHUTDOWN_MASK;
@@ -136,20 +132,21 @@ static int smc_release(struct socket *sock)
136132
sock_release(smc->clcsock);
137133
smc->clcsock = NULL;
138134
}
135+
if (smc->use_fallback) {
136+
sock_put(sk); /* passive closing */
137+
sk->sk_state = SMC_CLOSED;
138+
sk->sk_state_change(sk);
139+
}
139140

140141
/* detach socket */
141142
sock_orphan(sk);
142143
sock->sk = NULL;
143-
if (smc->use_fallback) {
144-
schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN);
145-
} else if (sk->sk_state == SMC_CLOSED) {
144+
if (!smc->use_fallback && sk->sk_state == SMC_CLOSED)
146145
smc_conn_free(&smc->conn);
147-
schedule_delayed_work(&smc->sock_put_work,
148-
SMC_CLOSE_SOCK_PUT_DELAY);
149-
}
150146
release_sock(sk);
151147

152-
sock_put(sk);
148+
sk->sk_prot->unhash(sk);
149+
sock_put(sk); /* final sock_put */
153150
out:
154151
return rc;
155152
}
@@ -181,7 +178,6 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock)
181178
INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
182179
INIT_LIST_HEAD(&smc->accept_q);
183180
spin_lock_init(&smc->accept_q_lock);
184-
INIT_DELAYED_WORK(&smc->sock_put_work, smc_close_sock_put_work);
185181
sk->sk_prot->hash(sk);
186182
sk_refcnt_debug_inc(sk);
187183

@@ -399,6 +395,8 @@ static int smc_connect_rdma(struct smc_sock *smc)
399395
int rc = 0;
400396
u8 ibport;
401397

398+
sock_hold(&smc->sk); /* sock put in passive closing */
399+
402400
if (!tcp_sk(smc->clcsock->sk)->syn_smc) {
403401
/* peer has not signalled SMC-capability */
404402
smc->use_fallback = true;
@@ -542,6 +540,8 @@ static int smc_connect_rdma(struct smc_sock *smc)
542540
mutex_unlock(&smc_create_lgr_pending);
543541
smc_conn_free(&smc->conn);
544542
out_err:
543+
if (smc->sk.sk_state == SMC_INIT)
544+
sock_put(&smc->sk); /* passive closing */
545545
return rc;
546546
}
547547

@@ -620,7 +620,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
620620
new_sk->sk_state = SMC_CLOSED;
621621
sock_set_flag(new_sk, SOCK_DEAD);
622622
new_sk->sk_prot->unhash(new_sk);
623-
sock_put(new_sk);
623+
sock_put(new_sk); /* final */
624624
*new_smc = NULL;
625625
goto out;
626626
}
@@ -637,7 +637,7 @@ static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
637637
{
638638
struct smc_sock *par = smc_sk(parent);
639639

640-
sock_hold(sk);
640+
sock_hold(sk); /* sock_put in smc_accept_unlink () */
641641
spin_lock(&par->accept_q_lock);
642642
list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
643643
spin_unlock(&par->accept_q_lock);
@@ -653,7 +653,7 @@ static void smc_accept_unlink(struct sock *sk)
653653
list_del_init(&smc_sk(sk)->accept_q);
654654
spin_unlock(&par->accept_q_lock);
655655
sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
656-
sock_put(sk);
656+
sock_put(sk); /* sock_hold in smc_accept_enqueue */
657657
}
658658

659659
/* remove a sock from the accept queue to bind it to a new socket created
@@ -671,7 +671,7 @@ struct sock *smc_accept_dequeue(struct sock *parent,
671671
smc_accept_unlink(new_sk);
672672
if (new_sk->sk_state == SMC_CLOSED) {
673673
new_sk->sk_prot->unhash(new_sk);
674-
sock_put(new_sk);
674+
sock_put(new_sk); /* final */
675675
continue;
676676
}
677677
if (new_sock)
@@ -686,14 +686,11 @@ void smc_close_non_accepted(struct sock *sk)
686686
{
687687
struct smc_sock *smc = smc_sk(sk);
688688

689-
sock_hold(sk);
690689
lock_sock(sk);
691690
if (!sk->sk_lingertime)
692691
/* wait for peer closing */
693692
sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
694-
if (smc->use_fallback) {
695-
sk->sk_state = SMC_CLOSED;
696-
} else {
693+
if (!smc->use_fallback) {
697694
smc_close_active(smc);
698695
sock_set_flag(sk, SOCK_DEAD);
699696
sk->sk_shutdown |= SHUTDOWN_MASK;
@@ -706,14 +703,15 @@ void smc_close_non_accepted(struct sock *sk)
706703
sock_release(tcp);
707704
}
708705
if (smc->use_fallback) {
709-
schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN);
710-
} else if (sk->sk_state == SMC_CLOSED) {
711-
smc_conn_free(&smc->conn);
712-
schedule_delayed_work(&smc->sock_put_work,
713-
SMC_CLOSE_SOCK_PUT_DELAY);
706+
sock_put(sk); /* passive closing */
707+
sk->sk_state = SMC_CLOSED;
708+
} else {
709+
if (sk->sk_state == SMC_CLOSED)
710+
smc_conn_free(&smc->conn);
714711
}
715712
release_sock(sk);
716-
sock_put(sk);
713+
sk->sk_prot->unhash(sk);
714+
sock_put(sk); /* final sock_put */
717715
}
718716

719717
static int smc_serv_conf_first_link(struct smc_sock *smc)
@@ -937,6 +935,8 @@ static void smc_listen_work(struct work_struct *work)
937935
smc_lgr_forget(new_smc->conn.lgr);
938936
mutex_unlock(&smc_create_lgr_pending);
939937
out_err:
938+
if (newsmcsk->sk_state == SMC_INIT)
939+
sock_put(&new_smc->sk); /* passive closing */
940940
newsmcsk->sk_state = SMC_CLOSED;
941941
smc_conn_free(&new_smc->conn);
942942
goto enqueue; /* queue new sock with sk_err set */
@@ -963,12 +963,15 @@ static void smc_tcp_listen_work(struct work_struct *work)
963963
sock_hold(lsk); /* sock_put in smc_listen_work */
964964
INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
965965
smc_copy_sock_settings_to_smc(new_smc);
966-
schedule_work(&new_smc->smc_listen_work);
966+
sock_hold(&new_smc->sk); /* sock_put in passive closing */
967+
if (!schedule_work(&new_smc->smc_listen_work))
968+
sock_put(&new_smc->sk);
967969
}
968970

969971
out:
970972
release_sock(lsk);
971973
lsk->sk_data_ready(lsk); /* no more listening, wake accept */
974+
sock_put(&lsmc->sk); /* sock_hold in smc_listen */
972975
}
973976

974977
static int smc_listen(struct socket *sock, int backlog)
@@ -1002,7 +1005,9 @@ static int smc_listen(struct socket *sock, int backlog)
10021005
sk->sk_ack_backlog = 0;
10031006
sk->sk_state = SMC_LISTEN;
10041007
INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
1005-
schedule_work(&smc->tcp_listen_work);
1008+
sock_hold(sk); /* sock_hold in tcp_listen_worker */
1009+
if (!schedule_work(&smc->tcp_listen_work))
1010+
sock_put(sk);
10061011

10071012
out:
10081013
release_sock(sk);
@@ -1019,6 +1024,7 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,
10191024
int rc = 0;
10201025

10211026
lsmc = smc_sk(sk);
1027+
sock_hold(sk); /* sock_put below */
10221028
lock_sock(sk);
10231029

10241030
if (lsmc->sk.sk_state != SMC_LISTEN) {
@@ -1053,6 +1059,7 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,
10531059

10541060
out:
10551061
release_sock(sk);
1062+
sock_put(sk); /* sock_hold above */
10561063
return rc;
10571064
}
10581065

net/smc/smc.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,6 @@ struct smc_sock { /* smc sock container */
178178
struct work_struct smc_listen_work;/* prepare new accept socket */
179179
struct list_head accept_q; /* sockets to be accepted */
180180
spinlock_t accept_q_lock; /* protects accept_q */
181-
struct delayed_work sock_put_work; /* final socket freeing */
182181
bool use_fallback; /* fallback to tcp */
183182
u8 wait_close_tx_prepared : 1;
184183
/* shutdown wr or close

net/smc/smc_cdc.c

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -212,6 +212,14 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
212212
smc->sk.sk_data_ready(&smc->sk);
213213
}
214214

215+
/* piggy backed tx info */
216+
/* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
217+
if (diff_cons && smc_tx_prepared_sends(conn)) {
218+
smc_tx_sndbuf_nonempty(conn);
219+
/* trigger socket release if connection closed */
220+
smc_close_wake_tx_prepared(smc);
221+
}
222+
215223
if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
216224
smc->sk.sk_err = ECONNRESET;
217225
conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
@@ -221,15 +229,9 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
221229
if (smc->clcsock && smc->clcsock->sk)
222230
smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
223231
sock_set_flag(&smc->sk, SOCK_DONE);
224-
schedule_work(&conn->close_work);
225-
}
226-
227-
/* piggy backed tx info */
228-
/* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
229-
if (diff_cons && smc_tx_prepared_sends(conn)) {
230-
smc_tx_sndbuf_nonempty(conn);
231-
/* trigger socket release if connection closed */
232-
smc_close_wake_tx_prepared(smc);
232+
sock_hold(&smc->sk); /* sock_put in close_work */
233+
if (!schedule_work(&conn->close_work))
234+
sock_put(&smc->sk);
233235
}
234236
}
235237

0 commit comments

Comments
 (0)