Skip to content

Commit 8284066

Browse files
Ming Leiaxboe
Ming Lei
authored andcommitted
ublk: grab request reference when the request is handled by userspace
Add one reference counter into request pdu data, and hold this reference in the request's lifetime. Prepare for supporting to move request data copy into userspace, which needs to copy request data by read()/write() on /dev/ublkcN, so we have to guarantee that read()/write() is done on one valid/active request, and that will be enhanced by holding the io request reference in read()/write(). Signed-off-by: Ming Lei <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent 981f95a commit 8284066

File tree

1 file changed

+64
-3
lines changed

1 file changed

+64
-3
lines changed

drivers/block/ublk_drv.c

Lines changed: 64 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@
4343
#include <asm/page.h>
4444
#include <linux/task_work.h>
4545
#include <linux/namei.h>
46+
#include <linux/kref.h>
4647
#include <uapi/linux/ublk_cmd.h>
4748

4849
#define UBLK_MINORS (1U << MINORBITS)
@@ -62,6 +63,8 @@
6263

6364
struct ublk_rq_data {
6465
struct llist_node node;
66+
67+
struct kref ref;
6568
};
6669

6770
struct ublk_uring_cmd_pdu {
@@ -181,6 +184,9 @@ struct ublk_params_header {
181184
__u32 types;
182185
};
183186

187+
static inline void __ublk_complete_rq(struct request *req);
188+
static void ublk_complete_rq(struct kref *ref);
189+
184190
static dev_t ublk_chr_devt;
185191
static struct class *ublk_chr_class;
186192

@@ -289,6 +295,45 @@ static int ublk_apply_params(struct ublk_device *ub)
289295
return 0;
290296
}
291297

298+
static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
299+
{
300+
return false;
301+
}
302+
303+
static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
304+
struct request *req)
305+
{
306+
if (ublk_need_req_ref(ubq)) {
307+
struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
308+
309+
kref_init(&data->ref);
310+
}
311+
}
312+
313+
static inline bool ublk_get_req_ref(const struct ublk_queue *ubq,
314+
struct request *req)
315+
{
316+
if (ublk_need_req_ref(ubq)) {
317+
struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
318+
319+
return kref_get_unless_zero(&data->ref);
320+
}
321+
322+
return true;
323+
}
324+
325+
static inline void ublk_put_req_ref(const struct ublk_queue *ubq,
326+
struct request *req)
327+
{
328+
if (ublk_need_req_ref(ubq)) {
329+
struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
330+
331+
kref_put(&data->ref, ublk_complete_rq);
332+
} else {
333+
__ublk_complete_rq(req);
334+
}
335+
}
336+
292337
static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
293338
{
294339
return ubq->flags & UBLK_F_NEED_GET_DATA;
@@ -625,13 +670,19 @@ static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
625670
}
626671

627672
/* todo: handle partial completion */
628-
static void ublk_complete_rq(struct request *req)
673+
static inline void __ublk_complete_rq(struct request *req)
629674
{
630675
struct ublk_queue *ubq = req->mq_hctx->driver_data;
631676
struct ublk_io *io = &ubq->ios[req->tag];
632677
unsigned int unmapped_bytes;
633678
blk_status_t res = BLK_STS_OK;
634679

680+
/* called from ublk_abort_queue() code path */
681+
if (io->flags & UBLK_IO_FLAG_ABORTED) {
682+
res = BLK_STS_IOERR;
683+
goto exit;
684+
}
685+
635686
/* failed read IO if nothing is read */
636687
if (!io->res && req_op(req) == REQ_OP_READ)
637688
io->res = -EIO;
@@ -671,6 +722,15 @@ static void ublk_complete_rq(struct request *req)
671722
blk_mq_end_request(req, res);
672723
}
673724

725+
static void ublk_complete_rq(struct kref *ref)
726+
{
727+
struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data,
728+
ref);
729+
struct request *req = blk_mq_rq_from_pdu(data);
730+
731+
__ublk_complete_rq(req);
732+
}
733+
674734
/*
675735
* Since __ublk_rq_task_work always fails requests immediately during
676736
* exiting, __ublk_fail_req() is only called from abort context during
@@ -689,7 +749,7 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
689749
if (ublk_queue_can_use_recovery_reissue(ubq))
690750
blk_mq_requeue_request(req, false);
691751
else
692-
blk_mq_end_request(req, BLK_STS_IOERR);
752+
ublk_put_req_ref(ubq, req);
693753
}
694754
}
695755

@@ -798,6 +858,7 @@ static inline void __ublk_rq_task_work(struct request *req,
798858
mapped_bytes >> 9;
799859
}
800860

861+
ublk_init_req_ref(ubq, req);
801862
ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
802863
}
803864

@@ -1002,7 +1063,7 @@ static void ublk_commit_completion(struct ublk_device *ub,
10021063
req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
10031064

10041065
if (req && likely(!blk_should_fake_timeout(req->q)))
1005-
ublk_complete_rq(req);
1066+
ublk_put_req_ref(ubq, req);
10061067
}
10071068

10081069
/*

0 commit comments

Comments
 (0)