Skip to content

Commit dc3ecfc

Browse files
committed
Merge branch 'nvme/for-5.5' of git://git.infradead.org/nvme into for-linus
Pull NVMe fixes from Keith * 'nvme/for-5.5' of git://git.infradead.org/nvme: nvme/pci: Fix read queue count nvme/pci Limit write queue sizes to possible cpus nvme/pci: Fix write and poll queue types nvme/pci: Remove last_cq_head nvme: Namepace identification descriptor list is optional nvme-fc: fix double-free scenarios on hw queues nvme: else following return is not needed nvme: add error message on mismatching controller ids nvme_fc: add module to ops template to allow module references nvmet-loop: Avoid preallocating big SGL for data nvme-fc: Avoid preallocating big SGL for data nvme-rdma: Avoid preallocating big SGL for data
2 parents 8539429 + 7e4c6b9 commit dc3ecfc

File tree

10 files changed

+69
-32
lines changed

10 files changed

+69
-32
lines changed

drivers/nvme/host/core.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1735,6 +1735,8 @@ static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
17351735
if (ret)
17361736
dev_warn(ctrl->device,
17371737
"Identify Descriptors failed (%d)\n", ret);
1738+
if (ret > 0)
1739+
ret = 0;
17381740
}
17391741
return ret;
17401742
}
@@ -2862,6 +2864,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
28622864
* admin connect
28632865
*/
28642866
if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
2867+
dev_err(ctrl->device,
2868+
"Mismatching cntlid: Connect %u vs Identify "
2869+
"%u, rejecting\n",
2870+
ctrl->cntlid, le16_to_cpu(id->cntlid));
28652871
ret = -EINVAL;
28662872
goto out_free;
28672873
}

drivers/nvme/host/fc.c

Lines changed: 31 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ struct nvme_fc_fcp_op {
9595

9696
struct nvme_fcp_op_w_sgl {
9797
struct nvme_fc_fcp_op op;
98-
struct scatterlist sgl[SG_CHUNK_SIZE];
98+
struct scatterlist sgl[NVME_INLINE_SG_CNT];
9999
uint8_t priv[0];
100100
};
101101

@@ -342,7 +342,8 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
342342
!template->ls_req || !template->fcp_io ||
343343
!template->ls_abort || !template->fcp_abort ||
344344
!template->max_hw_queues || !template->max_sgl_segments ||
345-
!template->max_dif_sgl_segments || !template->dma_boundary) {
345+
!template->max_dif_sgl_segments || !template->dma_boundary ||
346+
!template->module) {
346347
ret = -EINVAL;
347348
goto out_reghost_failed;
348349
}
@@ -2015,6 +2016,7 @@ nvme_fc_ctrl_free(struct kref *ref)
20152016
{
20162017
struct nvme_fc_ctrl *ctrl =
20172018
container_of(ref, struct nvme_fc_ctrl, ref);
2019+
struct nvme_fc_lport *lport = ctrl->lport;
20182020
unsigned long flags;
20192021

20202022
if (ctrl->ctrl.tagset) {
@@ -2041,6 +2043,7 @@ nvme_fc_ctrl_free(struct kref *ref)
20412043
if (ctrl->ctrl.opts)
20422044
nvmf_free_options(ctrl->ctrl.opts);
20432045
kfree(ctrl);
2046+
module_put(lport->ops->module);
20442047
}
20452048

20462049
static void
@@ -2141,7 +2144,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
21412144
freq->sg_table.sgl = freq->first_sgl;
21422145
ret = sg_alloc_table_chained(&freq->sg_table,
21432146
blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
2144-
SG_CHUNK_SIZE);
2147+
NVME_INLINE_SG_CNT);
21452148
if (ret)
21462149
return -ENOMEM;
21472150

@@ -2150,7 +2153,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
21502153
freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
21512154
op->nents, rq_dma_dir(rq));
21522155
if (unlikely(freq->sg_cnt <= 0)) {
2153-
sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
2156+
sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
21542157
freq->sg_cnt = 0;
21552158
return -EFAULT;
21562159
}
@@ -2173,7 +2176,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
21732176
fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
21742177
rq_dma_dir(rq));
21752178

2176-
sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
2179+
sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
21772180

21782181
freq->sg_cnt = 0;
21792182
}
@@ -2910,10 +2913,22 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
29102913
static void
29112914
__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
29122915
{
2913-
nvme_stop_keep_alive(&ctrl->ctrl);
2916+
/*
2917+
* if state is connecting - the error occurred as part of a
2918+
* reconnect attempt. The create_association error paths will
2919+
* clean up any outstanding io.
2920+
*
2921+
* if it's a different state - ensure all pending io is
2922+
* terminated. Given this can delay while waiting for the
2923+
* aborted io to return, we recheck adapter state below
2924+
* before changing state.
2925+
*/
2926+
if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
2927+
nvme_stop_keep_alive(&ctrl->ctrl);
29142928

2915-
/* will block will waiting for io to terminate */
2916-
nvme_fc_delete_association(ctrl);
2929+
/* will block will waiting for io to terminate */
2930+
nvme_fc_delete_association(ctrl);
2931+
}
29172932

29182933
if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
29192934
!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
@@ -3059,10 +3074,15 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
30593074
goto out_fail;
30603075
}
30613076

3077+
if (!try_module_get(lport->ops->module)) {
3078+
ret = -EUNATCH;
3079+
goto out_free_ctrl;
3080+
}
3081+
30623082
idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
30633083
if (idx < 0) {
30643084
ret = -ENOSPC;
3065-
goto out_free_ctrl;
3085+
goto out_mod_put;
30663086
}
30673087

30683088
ctrl->ctrl.opts = opts;
@@ -3215,6 +3235,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
32153235
out_free_ida:
32163236
put_device(ctrl->dev);
32173237
ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
3238+
out_mod_put:
3239+
module_put(lport->ops->module);
32183240
out_free_ctrl:
32193241
kfree(ctrl);
32203242
out_fail:

drivers/nvme/host/nvme.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,12 @@ extern unsigned int admin_timeout;
2828
#define NVME_DEFAULT_KATO 5
2929
#define NVME_KATO_GRACE 10
3030

31+
#ifdef CONFIG_ARCH_NO_SG_CHAIN
32+
#define NVME_INLINE_SG_CNT 0
33+
#else
34+
#define NVME_INLINE_SG_CNT 2
35+
#endif
36+
3137
extern struct workqueue_struct *nvme_wq;
3238
extern struct workqueue_struct *nvme_reset_wq;
3339
extern struct workqueue_struct *nvme_delete_wq;

drivers/nvme/host/pci.c

Lines changed: 9 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -68,14 +68,14 @@ static int io_queue_depth = 1024;
6868
module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
6969
MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
7070

71-
static int write_queues;
72-
module_param(write_queues, int, 0644);
71+
static unsigned int write_queues;
72+
module_param(write_queues, uint, 0644);
7373
MODULE_PARM_DESC(write_queues,
7474
"Number of queues to use for writes. If not set, reads and writes "
7575
"will share a queue set.");
7676

77-
static int poll_queues;
78-
module_param(poll_queues, int, 0644);
77+
static unsigned int poll_queues;
78+
module_param(poll_queues, uint, 0644);
7979
MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO.");
8080

8181
struct nvme_dev;
@@ -176,7 +176,6 @@ struct nvme_queue {
176176
u16 sq_tail;
177177
u16 last_sq_tail;
178178
u16 cq_head;
179-
u16 last_cq_head;
180179
u16 qid;
181180
u8 cq_phase;
182181
u8 sqes;
@@ -1026,10 +1025,7 @@ static irqreturn_t nvme_irq(int irq, void *data)
10261025
* the irq handler, even if that was on another CPU.
10271026
*/
10281027
rmb();
1029-
if (nvmeq->cq_head != nvmeq->last_cq_head)
1030-
ret = IRQ_HANDLED;
10311028
nvme_process_cq(nvmeq, &start, &end, -1);
1032-
nvmeq->last_cq_head = nvmeq->cq_head;
10331029
wmb();
10341030

10351031
if (start != end) {
@@ -1549,7 +1545,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
15491545
result = adapter_alloc_sq(dev, qid, nvmeq);
15501546
if (result < 0)
15511547
return result;
1552-
else if (result)
1548+
if (result)
15531549
goto release_cq;
15541550

15551551
nvmeq->cq_vector = vector;
@@ -2058,7 +2054,6 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
20582054
.priv = dev,
20592055
};
20602056
unsigned int irq_queues, this_p_queues;
2061-
unsigned int nr_cpus = num_possible_cpus();
20622057

20632058
/*
20642059
* Poll queues don't need interrupts, but we need at least one IO
@@ -2069,10 +2064,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
20692064
this_p_queues = nr_io_queues - 1;
20702065
irq_queues = 1;
20712066
} else {
2072-
if (nr_cpus < nr_io_queues - this_p_queues)
2073-
irq_queues = nr_cpus + 1;
2074-
else
2075-
irq_queues = nr_io_queues - this_p_queues + 1;
2067+
irq_queues = nr_io_queues - this_p_queues + 1;
20762068
}
20772069
dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
20782070

@@ -3142,6 +3134,9 @@ static int __init nvme_init(void)
31423134
BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
31433135
BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
31443136
BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
3137+
3138+
write_queues = min(write_queues, num_possible_cpus());
3139+
poll_queues = min(poll_queues, num_possible_cpus());
31453140
return pci_register_driver(&nvme_driver);
31463141
}
31473142

drivers/nvme/host/rdma.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -731,7 +731,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
731731
set->reserved_tags = 2; /* connect + keep-alive */
732732
set->numa_node = nctrl->numa_node;
733733
set->cmd_size = sizeof(struct nvme_rdma_request) +
734-
SG_CHUNK_SIZE * sizeof(struct scatterlist);
734+
NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
735735
set->driver_data = ctrl;
736736
set->nr_hw_queues = 1;
737737
set->timeout = ADMIN_TIMEOUT;
@@ -745,7 +745,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
745745
set->numa_node = nctrl->numa_node;
746746
set->flags = BLK_MQ_F_SHOULD_MERGE;
747747
set->cmd_size = sizeof(struct nvme_rdma_request) +
748-
SG_CHUNK_SIZE * sizeof(struct scatterlist);
748+
NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
749749
set->driver_data = ctrl;
750750
set->nr_hw_queues = nctrl->queue_count - 1;
751751
set->timeout = NVME_IO_TIMEOUT;
@@ -1160,7 +1160,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
11601160
}
11611161

11621162
ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
1163-
sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
1163+
sg_free_table_chained(&req->sg_table, NVME_INLINE_SG_CNT);
11641164
}
11651165

11661166
static int nvme_rdma_set_sg_null(struct nvme_command *c)
@@ -1276,7 +1276,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
12761276
req->sg_table.sgl = req->first_sgl;
12771277
ret = sg_alloc_table_chained(&req->sg_table,
12781278
blk_rq_nr_phys_segments(rq), req->sg_table.sgl,
1279-
SG_CHUNK_SIZE);
1279+
NVME_INLINE_SG_CNT);
12801280
if (ret)
12811281
return -ENOMEM;
12821282

@@ -1314,7 +1314,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
13141314
out_unmap_sg:
13151315
ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
13161316
out_free_table:
1317-
sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
1317+
sg_free_table_chained(&req->sg_table, NVME_INLINE_SG_CNT);
13181318
return ret;
13191319
}
13201320

drivers/nvme/target/fcloop.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -850,6 +850,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
850850
#define FCLOOP_DMABOUND_4G 0xFFFFFFFF
851851

852852
static struct nvme_fc_port_template fctemplate = {
853+
.module = THIS_MODULE,
853854
.localport_delete = fcloop_localport_delete,
854855
.remoteport_delete = fcloop_remoteport_delete,
855856
.create_queue = fcloop_create_queue,

drivers/nvme/target/loop.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ static void nvme_loop_complete_rq(struct request *req)
7676
{
7777
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
7878

79-
sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE);
79+
sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT);
8080
nvme_complete_rq(req);
8181
}
8282

@@ -156,7 +156,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
156156
iod->sg_table.sgl = iod->first_sgl;
157157
if (sg_alloc_table_chained(&iod->sg_table,
158158
blk_rq_nr_phys_segments(req),
159-
iod->sg_table.sgl, SG_CHUNK_SIZE)) {
159+
iod->sg_table.sgl, NVME_INLINE_SG_CNT)) {
160160
nvme_cleanup_cmd(req);
161161
return BLK_STS_RESOURCE;
162162
}
@@ -342,7 +342,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
342342
ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
343343
ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
344344
ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
345-
SG_CHUNK_SIZE * sizeof(struct scatterlist);
345+
NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
346346
ctrl->admin_tag_set.driver_data = ctrl;
347347
ctrl->admin_tag_set.nr_hw_queues = 1;
348348
ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
@@ -516,7 +516,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
516516
ctrl->tag_set.numa_node = NUMA_NO_NODE;
517517
ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
518518
ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
519-
SG_CHUNK_SIZE * sizeof(struct scatterlist);
519+
NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
520520
ctrl->tag_set.driver_data = ctrl;
521521
ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
522522
ctrl->tag_set.timeout = NVME_IO_TIMEOUT;

drivers/scsi/lpfc/lpfc_nvme.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1976,6 +1976,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
19761976

19771977
/* Declare and initialization an instance of the FC NVME template. */
19781978
static struct nvme_fc_port_template lpfc_nvme_template = {
1979+
.module = THIS_MODULE,
1980+
19791981
/* initiator-based functions */
19801982
.localport_delete = lpfc_nvme_localport_delete,
19811983
.remoteport_delete = lpfc_nvme_remoteport_delete,

drivers/scsi/qla2xxx/qla_nvme.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -610,6 +610,7 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
610610
}
611611

612612
static struct nvme_fc_port_template qla_nvme_fc_transport = {
613+
.module = THIS_MODULE,
613614
.localport_delete = qla_nvme_localport_delete,
614615
.remoteport_delete = qla_nvme_remoteport_delete,
615616
.create_queue = qla_nvme_alloc_queue,

include/linux/nvme-fc-driver.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -270,6 +270,8 @@ struct nvme_fc_remote_port {
270270
*
271271
* Host/Initiator Transport Entrypoints/Parameters:
272272
*
273+
* @module: The LLDD module using the interface
274+
*
273275
* @localport_delete: The LLDD initiates deletion of a localport via
274276
* nvme_fc_deregister_localport(). However, the teardown is
275277
* asynchronous. This routine is called upon the completion of the
@@ -383,6 +385,8 @@ struct nvme_fc_remote_port {
383385
* Value is Mandatory. Allowed to be zero.
384386
*/
385387
struct nvme_fc_port_template {
388+
struct module *module;
389+
386390
/* initiator-based functions */
387391
void (*localport_delete)(struct nvme_fc_local_port *);
388392
void (*remoteport_delete)(struct nvme_fc_remote_port *);

0 commit comments

Comments
 (0)