Skip to content

Commit 38dabe2

Browse files
Keith Buschaxboe
authored andcommitted
nvme: centralize AEN defines
All the transports were unnecessarilly duplicating the AEN request accounting. This patch defines everything in one place. Signed-off-by: Keith Busch <keith.busch@intel.com> Reviewed-by: Guan Junxiong <guanjunxiong@huawei.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 4185f25 commit 38dabe2

File tree

7 files changed

+30
-58
lines changed

7 files changed

+30
-58
lines changed

drivers/nvme/host/core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2779,7 +2779,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
27792779

27802780
void nvme_queue_async_events(struct nvme_ctrl *ctrl)
27812781
{
2782-
ctrl->event_limit = NVME_NR_AERS;
2782+
ctrl->event_limit = NVME_NR_AEN_COMMANDS;
27832783
queue_work(nvme_wq, &ctrl->async_event_work);
27842784
}
27852785
EXPORT_SYMBOL_GPL(nvme_queue_async_events);

drivers/nvme/host/fc.c

Lines changed: 12 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -30,15 +30,6 @@
3030
/* *************************** Data Structures/Defines ****************** */
3131

3232

33-
/*
34-
* We handle AEN commands ourselves and don't even let the
35-
* block layer know about them.
36-
*/
37-
#define NVME_FC_NR_AEN_COMMANDS 1
38-
#define NVME_FC_AQ_BLKMQ_DEPTH \
39-
(NVME_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
40-
#define AEN_CMDID_BASE (NVME_FC_AQ_BLKMQ_DEPTH + 1)
41-
4233
enum nvme_fc_queue_flags {
4334
NVME_FC_Q_CONNECTED = (1 << 0),
4435
};
@@ -170,7 +161,7 @@ struct nvme_fc_ctrl {
170161
u32 iocnt;
171162
wait_queue_head_t ioabort_wait;
172163

173-
struct nvme_fc_fcp_op aen_ops[NVME_FC_NR_AEN_COMMANDS];
164+
struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS];
174165

175166
struct nvme_ctrl ctrl;
176167
};
@@ -1546,7 +1537,7 @@ nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
15461537
unsigned long flags;
15471538
int i, ret;
15481539

1549-
for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1540+
for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
15501541
if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE)
15511542
continue;
15521543

@@ -1816,7 +1807,7 @@ nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
18161807
int i, ret;
18171808

18181809
aen_op = ctrl->aen_ops;
1819-
for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1810+
for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
18201811
private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
18211812
GFP_KERNEL);
18221813
if (!private)
@@ -1826,7 +1817,7 @@ nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
18261817
sqe = &cmdiu->sqe;
18271818
ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
18281819
aen_op, (struct request *)NULL,
1829-
(AEN_CMDID_BASE + i));
1820+
(NVME_AQ_BLK_MQ_DEPTH + i));
18301821
if (ret) {
18311822
kfree(private);
18321823
return ret;
@@ -1839,7 +1830,7 @@ nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
18391830
memset(sqe, 0, sizeof(*sqe));
18401831
sqe->common.opcode = nvme_admin_async_event;
18411832
/* Note: core layer may overwrite the sqe.command_id value */
1842-
sqe->common.command_id = AEN_CMDID_BASE + i;
1833+
sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
18431834
}
18441835
return 0;
18451836
}
@@ -1851,7 +1842,7 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
18511842
int i;
18521843

18531844
aen_op = ctrl->aen_ops;
1854-
for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1845+
for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
18551846
if (!aen_op->fcp_req.private)
18561847
continue;
18571848

@@ -2402,7 +2393,7 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
24022393
bool terminating = false;
24032394
blk_status_t ret;
24042395

2405-
if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
2396+
if (aer_idx > NVME_NR_AEN_COMMANDS)
24062397
return;
24072398

24082399
spin_lock_irqsave(&ctrl->lock, flags);
@@ -2722,16 +2713,16 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
27222713
* Create the admin queue
27232714
*/
27242715

2725-
nvme_fc_init_queue(ctrl, 0, NVME_FC_AQ_BLKMQ_DEPTH);
2716+
nvme_fc_init_queue(ctrl, 0, NVME_AQ_BLK_MQ_DEPTH);
27262717

27272718
ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2728-
NVME_FC_AQ_BLKMQ_DEPTH);
2719+
NVME_AQ_BLK_MQ_DEPTH);
27292720
if (ret)
27302721
goto out_free_queue;
27312722

27322723
ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
2733-
NVME_FC_AQ_BLKMQ_DEPTH,
2734-
(NVME_FC_AQ_BLKMQ_DEPTH / 4));
2724+
NVME_AQ_BLK_MQ_DEPTH,
2725+
(NVME_AQ_BLK_MQ_DEPTH / 4));
27352726
if (ret)
27362727
goto out_delete_hw_queue;
27372728

@@ -3145,7 +3136,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
31453136

31463137
memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
31473138
ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
3148-
ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH;
3139+
ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
31493140
ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
31503141
ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
31513142
ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +

drivers/nvme/host/nvme.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -313,7 +313,6 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
313313
int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
314314
bool send);
315315

316-
#define NVME_NR_AERS 1
317316
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
318317
union nvme_result *res);
319318
void nvme_queue_async_events(struct nvme_ctrl *ctrl);

drivers/nvme/host/pci.c

Lines changed: 3 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -35,12 +35,6 @@
3535
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
3636
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
3737

38-
/*
39-
* We handle AEN commands ourselves and don't even let the
40-
* block layer know about them.
41-
*/
42-
#define NVME_AQ_BLKMQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AERS)
43-
4438
#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
4539

4640
static int use_threaded_interrupts;
@@ -956,7 +950,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
956950
* for them but rather special case them here.
957951
*/
958952
if (unlikely(nvmeq->qid == 0 &&
959-
cqe->command_id >= NVME_AQ_BLKMQ_DEPTH)) {
953+
cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
960954
nvme_complete_async_event(&nvmeq->dev->ctrl,
961955
cqe->status, &cqe->result);
962956
return;
@@ -1057,7 +1051,7 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
10571051

10581052
memset(&c, 0, sizeof(c));
10591053
c.common.opcode = nvme_admin_async_event;
1060-
c.common.command_id = NVME_AQ_BLKMQ_DEPTH + aer_idx;
1054+
c.common.command_id = NVME_AQ_BLK_MQ_DEPTH + aer_idx;
10611055

10621056
spin_lock_irq(&nvmeq->q_lock);
10631057
__nvme_submit_cmd(nvmeq, &c);
@@ -1524,11 +1518,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
15241518
dev->admin_tagset.ops = &nvme_mq_admin_ops;
15251519
dev->admin_tagset.nr_hw_queues = 1;
15261520

1527-
/*
1528-
* Subtract one to leave an empty queue entry for 'Full Queue'
1529-
* condition. See NVM-Express 1.2 specification, section 4.1.2.
1530-
*/
1531-
dev->admin_tagset.queue_depth = NVME_AQ_BLKMQ_DEPTH - 1;
1521+
dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
15321522
dev->admin_tagset.timeout = ADMIN_TIMEOUT;
15331523
dev->admin_tagset.numa_node = dev_to_node(dev->dev);
15341524
dev->admin_tagset.cmd_size = nvme_pci_cmd_size(dev, false);

drivers/nvme/host/rdma.c

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -41,14 +41,6 @@
4141

4242
#define NVME_RDMA_MAX_INLINE_SEGMENTS 1
4343

44-
/*
45-
* We handle AEN commands ourselves and don't even let the
46-
* block layer know about them.
47-
*/
48-
#define NVME_RDMA_NR_AEN_COMMANDS 1
49-
#define NVME_RDMA_AQ_BLKMQ_DEPTH \
50-
(NVME_AQ_DEPTH - NVME_RDMA_NR_AEN_COMMANDS)
51-
5244
struct nvme_rdma_device {
5345
struct ib_device *dev;
5446
struct ib_pd *pd;
@@ -690,7 +682,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
690682
set = &ctrl->admin_tag_set;
691683
memset(set, 0, sizeof(*set));
692684
set->ops = &nvme_rdma_admin_mq_ops;
693-
set->queue_depth = NVME_RDMA_AQ_BLKMQ_DEPTH;
685+
set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
694686
set->reserved_tags = 2; /* connect + keep-alive */
695687
set->numa_node = NUMA_NO_NODE;
696688
set->cmd_size = sizeof(struct nvme_rdma_request) +
@@ -1318,7 +1310,7 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
13181310

13191311
memset(cmd, 0, sizeof(*cmd));
13201312
cmd->common.opcode = nvme_admin_async_event;
1321-
cmd->common.command_id = NVME_RDMA_AQ_BLKMQ_DEPTH;
1313+
cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
13221314
cmd->common.flags |= NVME_CMD_SGL_METABUF;
13231315
nvme_rdma_set_sg_null(cmd);
13241316

@@ -1380,7 +1372,7 @@ static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag)
13801372
* for them but rather special case them here.
13811373
*/
13821374
if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
1383-
cqe->command_id >= NVME_RDMA_AQ_BLKMQ_DEPTH))
1375+
cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
13841376
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
13851377
&cqe->result);
13861378
else

drivers/nvme/target/loop.c

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -23,14 +23,6 @@
2323

2424
#define NVME_LOOP_MAX_SEGMENTS 256
2525

26-
/*
27-
* We handle AEN commands ourselves and don't even let the
28-
* block layer know about them.
29-
*/
30-
#define NVME_LOOP_NR_AEN_COMMANDS 1
31-
#define NVME_LOOP_AQ_BLKMQ_DEPTH \
32-
(NVME_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
33-
3426
struct nvme_loop_iod {
3527
struct nvme_request nvme_req;
3628
struct nvme_command cmd;
@@ -112,7 +104,7 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
112104
* for them but rather special case them here.
113105
*/
114106
if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
115-
cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
107+
cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
116108
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
117109
&cqe->result);
118110
} else {
@@ -200,7 +192,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
200192

201193
memset(&iod->cmd, 0, sizeof(iod->cmd));
202194
iod->cmd.common.opcode = nvme_admin_async_event;
203-
iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
195+
iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
204196
iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
205197

206198
if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
@@ -356,7 +348,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
356348

357349
memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
358350
ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
359-
ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
351+
ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
360352
ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
361353
ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
362354
ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +

include/linux/nvme.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,14 @@ enum {
9090
};
9191

9292
#define NVME_AQ_DEPTH 32
93+
#define NVME_NR_AEN_COMMANDS 1
94+
#define NVME_AQ_BLK_MQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
95+
96+
/*
97+
* Subtract one to leave an empty queue entry for 'Full Queue' condition. See
98+
* NVM-Express 1.2 specification, section 4.1.2.
99+
*/
100+
#define NVME_AQ_MQ_TAG_DEPTH (NVME_AQ_BLK_MQ_DEPTH - 1)
93101

94102
enum {
95103
NVME_REG_CAP = 0x0000, /* Controller Capabilities */

0 commit comments

Comments
 (0)