43
43
#include <asm/page.h>
44
44
#include <linux/task_work.h>
45
45
#include <linux/namei.h>
46
+ #include <linux/kref.h>
46
47
#include <uapi/linux/ublk_cmd.h>
47
48
48
49
#define UBLK_MINORS (1U << MINORBITS)
62
63
63
64
struct ublk_rq_data {
64
65
struct llist_node node ;
66
+
67
+ struct kref ref ;
65
68
};
66
69
67
70
struct ublk_uring_cmd_pdu {
@@ -181,6 +184,9 @@ struct ublk_params_header {
181
184
__u32 types ;
182
185
};
183
186
187
+ static inline void __ublk_complete_rq (struct request * req );
188
+ static void ublk_complete_rq (struct kref * ref );
189
+
184
190
static dev_t ublk_chr_devt ;
185
191
static struct class * ublk_chr_class ;
186
192
@@ -289,6 +295,45 @@ static int ublk_apply_params(struct ublk_device *ub)
289
295
return 0 ;
290
296
}
291
297
298
+ static inline bool ublk_need_req_ref (const struct ublk_queue * ubq )
299
+ {
300
+ return false;
301
+ }
302
+
303
+ static inline void ublk_init_req_ref (const struct ublk_queue * ubq ,
304
+ struct request * req )
305
+ {
306
+ if (ublk_need_req_ref (ubq )) {
307
+ struct ublk_rq_data * data = blk_mq_rq_to_pdu (req );
308
+
309
+ kref_init (& data -> ref );
310
+ }
311
+ }
312
+
313
+ static inline bool ublk_get_req_ref (const struct ublk_queue * ubq ,
314
+ struct request * req )
315
+ {
316
+ if (ublk_need_req_ref (ubq )) {
317
+ struct ublk_rq_data * data = blk_mq_rq_to_pdu (req );
318
+
319
+ return kref_get_unless_zero (& data -> ref );
320
+ }
321
+
322
+ return true;
323
+ }
324
+
325
+ static inline void ublk_put_req_ref (const struct ublk_queue * ubq ,
326
+ struct request * req )
327
+ {
328
+ if (ublk_need_req_ref (ubq )) {
329
+ struct ublk_rq_data * data = blk_mq_rq_to_pdu (req );
330
+
331
+ kref_put (& data -> ref , ublk_complete_rq );
332
+ } else {
333
+ __ublk_complete_rq (req );
334
+ }
335
+ }
336
+
292
337
static inline bool ublk_need_get_data (const struct ublk_queue * ubq )
293
338
{
294
339
return ubq -> flags & UBLK_F_NEED_GET_DATA ;
@@ -625,13 +670,19 @@ static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
625
670
}
626
671
627
672
/* todo: handle partial completion */
628
- static void ublk_complete_rq (struct request * req )
673
+ static inline void __ublk_complete_rq (struct request * req )
629
674
{
630
675
struct ublk_queue * ubq = req -> mq_hctx -> driver_data ;
631
676
struct ublk_io * io = & ubq -> ios [req -> tag ];
632
677
unsigned int unmapped_bytes ;
633
678
blk_status_t res = BLK_STS_OK ;
634
679
680
+ /* called from ublk_abort_queue() code path */
681
+ if (io -> flags & UBLK_IO_FLAG_ABORTED ) {
682
+ res = BLK_STS_IOERR ;
683
+ goto exit ;
684
+ }
685
+
635
686
/* failed read IO if nothing is read */
636
687
if (!io -> res && req_op (req ) == REQ_OP_READ )
637
688
io -> res = - EIO ;
@@ -671,6 +722,15 @@ static void ublk_complete_rq(struct request *req)
671
722
blk_mq_end_request (req , res );
672
723
}
673
724
725
+ static void ublk_complete_rq (struct kref * ref )
726
+ {
727
+ struct ublk_rq_data * data = container_of (ref , struct ublk_rq_data ,
728
+ ref );
729
+ struct request * req = blk_mq_rq_from_pdu (data );
730
+
731
+ __ublk_complete_rq (req );
732
+ }
733
+
674
734
/*
675
735
* Since __ublk_rq_task_work always fails requests immediately during
676
736
* exiting, __ublk_fail_req() is only called from abort context during
@@ -689,7 +749,7 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
689
749
if (ublk_queue_can_use_recovery_reissue (ubq ))
690
750
blk_mq_requeue_request (req , false);
691
751
else
692
- blk_mq_end_request ( req , BLK_STS_IOERR );
752
+ ublk_put_req_ref ( ubq , req );
693
753
}
694
754
}
695
755
@@ -798,6 +858,7 @@ static inline void __ublk_rq_task_work(struct request *req,
798
858
mapped_bytes >> 9 ;
799
859
}
800
860
861
+ ublk_init_req_ref (ubq , req );
801
862
ubq_complete_io_cmd (io , UBLK_IO_RES_OK , issue_flags );
802
863
}
803
864
@@ -1002,7 +1063,7 @@ static void ublk_commit_completion(struct ublk_device *ub,
1002
1063
req = blk_mq_tag_to_rq (ub -> tag_set .tags [qid ], tag );
1003
1064
1004
1065
if (req && likely (!blk_should_fake_timeout (req -> q )))
1005
- ublk_complete_rq ( req );
1066
+ ublk_put_req_ref ( ubq , req );
1006
1067
}
1007
1068
1008
1069
/*
0 commit comments