Skip to content

Commit a1e2103

Browse files
committed
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: - an NVMe fix from Gabriel, fixing a suspend/resume issue on some setups - addition of a few missing entries in the block queue sysfs documentation, from Joe - a fix for a sparse shadow warning for the bvec iterator, from Johannes - a writeback deadlock involving raid issuing barriers, and not flushing the plug when we wakeup the flusher threads. From Konstantin - a set of patches for the NVMe target/loop/rdma code, from Roland and Sagi * 'for-linus' of git://git.kernel.dk/linux-block: bvec: avoid variable shadowing warning doc: update block/queue-sysfs.txt entries nvme: Suspend all queues before deletion mm, writeback: flush plugged IO in wakeup_flusher_threads() nvme-rdma: Remove unused includes nvme-rdma: start async event handler after reconnecting to a controller nvmet: Fix controller serial number inconsistency nvmet-rdma: Don't use the inline buffer in order to avoid allocation for small reads nvmet-rdma: Correctly handle RDMA device hot removal nvme-rdma: Make sure to shutdown the controller if we can nvme-loop: Remove duplicate call to nvme_remove_namespaces nvme-rdma: Free the I/O tags when we delete the controller nvme-rdma: Remove duplicate call to nvme_remove_namespaces nvme-rdma: Fix device removal handling nvme-rdma: Queue ns scanning after a sucessful reconnection nvme-rdma: Don't leak uninitialized memory in connect request private data
2 parents f31494b + 1ea049b commit a1e2103

File tree

10 files changed

+160
-85
lines changed

10 files changed

+160
-85
lines changed

Documentation/block/queue-sysfs.txt

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,12 @@ add_random (RW)
1414
This file allows to turn off the disk entropy contribution. Default
1515
value of this file is '1'(on).
1616

17+
dax (RO)
18+
--------
19+
This file indicates whether the device supports Direct Access (DAX),
20+
used by CPU-addressable storage to bypass the pagecache. It shows '1'
21+
if true, '0' if not.
22+
1723
discard_granularity (RO)
1824
-----------------------
1925
This shows the size of internal allocation of the device in bytes, if
@@ -46,6 +52,12 @@ hw_sector_size (RO)
4652
-------------------
4753
This is the hardware sector size of the device, in bytes.
4854

55+
io_poll (RW)
56+
------------
57+
When read, this file shows the total number of block IO polls and how
58+
many returned success. Writing '0' to this file will disable polling
59+
for this device. Writing any non-zero value will enable this feature.
60+
4961
iostats (RW)
5062
-------------
5163
This file is used to control (on/off) the iostats accounting of the
@@ -151,5 +163,11 @@ device state. This means that it might not be safe to toggle the
151163
setting from "write back" to "write through", since that will also
152164
eliminate cache flushes issued by the kernel.
153165

166+
write_same_max_bytes (RO)
167+
-------------------------
168+
This is the number of bytes the device can write in a single write-same
169+
command. A value of '0' means write-same is not supported by this
170+
device.
171+
154172

155173
Jens Axboe <jens.axboe@oracle.com>, February 2009

drivers/nvme/host/pci.c

Lines changed: 8 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1543,15 +1543,10 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
15431543
reinit_completion(&dev->ioq_wait);
15441544
retry:
15451545
timeout = ADMIN_TIMEOUT;
1546-
for (; i > 0; i--) {
1547-
struct nvme_queue *nvmeq = dev->queues[i];
1548-
1549-
if (!pass)
1550-
nvme_suspend_queue(nvmeq);
1551-
if (nvme_delete_queue(nvmeq, opcode))
1546+
for (; i > 0; i--, sent++)
1547+
if (nvme_delete_queue(dev->queues[i], opcode))
15521548
break;
1553-
++sent;
1554-
}
1549+
15551550
while (sent--) {
15561551
timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout);
15571552
if (timeout == 0)
@@ -1693,11 +1688,12 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
16931688
nvme_stop_queues(&dev->ctrl);
16941689
csts = readl(dev->bar + NVME_REG_CSTS);
16951690
}
1691+
1692+
for (i = dev->queue_count - 1; i > 0; i--)
1693+
nvme_suspend_queue(dev->queues[i]);
1694+
16961695
if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
1697-
for (i = dev->queue_count - 1; i >= 0; i--) {
1698-
struct nvme_queue *nvmeq = dev->queues[i];
1699-
nvme_suspend_queue(nvmeq);
1700-
}
1696+
nvme_suspend_queue(dev->queues[0]);
17011697
} else {
17021698
nvme_disable_io_queues(dev);
17031699
nvme_disable_admin_queue(dev, shutdown);

drivers/nvme/host/rdma.c

Lines changed: 45 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -12,21 +12,18 @@
1212
* more details.
1313
*/
1414
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15-
#include <linux/delay.h>
1615
#include <linux/module.h>
1716
#include <linux/init.h>
1817
#include <linux/slab.h>
1918
#include <linux/err.h>
2019
#include <linux/string.h>
21-
#include <linux/jiffies.h>
2220
#include <linux/atomic.h>
2321
#include <linux/blk-mq.h>
2422
#include <linux/types.h>
2523
#include <linux/list.h>
2624
#include <linux/mutex.h>
2725
#include <linux/scatterlist.h>
2826
#include <linux/nvme.h>
29-
#include <linux/t10-pi.h>
3027
#include <asm/unaligned.h>
3128

3229
#include <rdma/ib_verbs.h>
@@ -169,7 +166,6 @@ MODULE_PARM_DESC(register_always,
169166
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
170167
struct rdma_cm_event *event);
171168
static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
172-
static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl);
173169

174170
/* XXX: really should move to a generic header sooner or later.. */
175171
static inline void put_unaligned_le24(u32 val, u8 *p)
@@ -687,11 +683,6 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
687683
list_del(&ctrl->list);
688684
mutex_unlock(&nvme_rdma_ctrl_mutex);
689685

690-
if (ctrl->ctrl.tagset) {
691-
blk_cleanup_queue(ctrl->ctrl.connect_q);
692-
blk_mq_free_tag_set(&ctrl->tag_set);
693-
nvme_rdma_dev_put(ctrl->device);
694-
}
695686
kfree(ctrl->queues);
696687
nvmf_free_options(nctrl->opts);
697688
free_ctrl:
@@ -748,8 +739,11 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
748739
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
749740
WARN_ON_ONCE(!changed);
750741

751-
if (ctrl->queue_count > 1)
742+
if (ctrl->queue_count > 1) {
752743
nvme_start_queues(&ctrl->ctrl);
744+
nvme_queue_scan(&ctrl->ctrl);
745+
nvme_queue_async_events(&ctrl->ctrl);
746+
}
753747

754748
dev_info(ctrl->ctrl.device, "Successfully reconnected\n");
755749

@@ -1269,7 +1263,7 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
12691263
{
12701264
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
12711265
struct rdma_conn_param param = { };
1272-
struct nvme_rdma_cm_req priv;
1266+
struct nvme_rdma_cm_req priv = { };
12731267
int ret;
12741268

12751269
param.qp_num = queue->qp->qp_num;
@@ -1318,37 +1312,39 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
13181312
* that caught the event. Since we hold the callout until the controller
13191313
* deletion is completed, we'll deadlock if the controller deletion will
13201314
* call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership
1321-
* of destroying this queue before-hand, destroy the queue resources
1322-
* after the controller deletion completed with the exception of destroying
1323-
* the cm_id implicitely by returning a non-zero rc to the callout.
1315+
* of destroying this queue before-hand, destroy the queue resources,
1316+
* then queue the controller deletion which won't destroy this queue and
1317+
* we destroy the cm_id implicitely by returning a non-zero rc to the callout.
13241318
*/
13251319
static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue)
13261320
{
13271321
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1328-
int ret, ctrl_deleted = 0;
1322+
int ret;
13291323

1330-
/* First disable the queue so ctrl delete won't free it */
1331-
if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags))
1332-
goto out;
1324+
/* Own the controller deletion */
1325+
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
1326+
return 0;
13331327

1334-
/* delete the controller */
1335-
ret = __nvme_rdma_del_ctrl(ctrl);
1336-
if (!ret) {
1337-
dev_warn(ctrl->ctrl.device,
1338-
"Got rdma device removal event, deleting ctrl\n");
1339-
flush_work(&ctrl->delete_work);
1328+
dev_warn(ctrl->ctrl.device,
1329+
"Got rdma device removal event, deleting ctrl\n");
13401330

1341-
/* Return non-zero so the cm_id will destroy implicitly */
1342-
ctrl_deleted = 1;
1331+
/* Get rid of reconnect work if its running */
1332+
cancel_delayed_work_sync(&ctrl->reconnect_work);
13431333

1334+
/* Disable the queue so ctrl delete won't free it */
1335+
if (test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) {
13441336
/* Free this queue ourselves */
1345-
rdma_disconnect(queue->cm_id);
1346-
ib_drain_qp(queue->qp);
1337+
nvme_rdma_stop_queue(queue);
13471338
nvme_rdma_destroy_queue_ib(queue);
1339+
1340+
/* Return non-zero so the cm_id will destroy implicitly */
1341+
ret = 1;
13481342
}
13491343

1350-
out:
1351-
return ctrl_deleted;
1344+
/* Queue controller deletion */
1345+
queue_work(nvme_rdma_wq, &ctrl->delete_work);
1346+
flush_work(&ctrl->delete_work);
1347+
return ret;
13521348
}
13531349

13541350
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
@@ -1648,7 +1644,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
16481644
nvme_rdma_free_io_queues(ctrl);
16491645
}
16501646

1651-
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
1647+
if (test_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[0].flags))
16521648
nvme_shutdown_ctrl(&ctrl->ctrl);
16531649

16541650
blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
@@ -1657,15 +1653,27 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
16571653
nvme_rdma_destroy_admin_queue(ctrl);
16581654
}
16591655

1656+
static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
1657+
{
1658+
nvme_uninit_ctrl(&ctrl->ctrl);
1659+
if (shutdown)
1660+
nvme_rdma_shutdown_ctrl(ctrl);
1661+
1662+
if (ctrl->ctrl.tagset) {
1663+
blk_cleanup_queue(ctrl->ctrl.connect_q);
1664+
blk_mq_free_tag_set(&ctrl->tag_set);
1665+
nvme_rdma_dev_put(ctrl->device);
1666+
}
1667+
1668+
nvme_put_ctrl(&ctrl->ctrl);
1669+
}
1670+
16601671
static void nvme_rdma_del_ctrl_work(struct work_struct *work)
16611672
{
16621673
struct nvme_rdma_ctrl *ctrl = container_of(work,
16631674
struct nvme_rdma_ctrl, delete_work);
16641675

1665-
nvme_remove_namespaces(&ctrl->ctrl);
1666-
nvme_rdma_shutdown_ctrl(ctrl);
1667-
nvme_uninit_ctrl(&ctrl->ctrl);
1668-
nvme_put_ctrl(&ctrl->ctrl);
1676+
__nvme_rdma_remove_ctrl(ctrl, true);
16691677
}
16701678

16711679
static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
@@ -1698,9 +1706,7 @@ static void nvme_rdma_remove_ctrl_work(struct work_struct *work)
16981706
struct nvme_rdma_ctrl *ctrl = container_of(work,
16991707
struct nvme_rdma_ctrl, delete_work);
17001708

1701-
nvme_remove_namespaces(&ctrl->ctrl);
1702-
nvme_uninit_ctrl(&ctrl->ctrl);
1703-
nvme_put_ctrl(&ctrl->ctrl);
1709+
__nvme_rdma_remove_ctrl(ctrl, false);
17041710
}
17051711

17061712
static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
@@ -1739,6 +1745,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
17391745
if (ctrl->queue_count > 1) {
17401746
nvme_start_queues(&ctrl->ctrl);
17411747
nvme_queue_scan(&ctrl->ctrl);
1748+
nvme_queue_async_events(&ctrl->ctrl);
17421749
}
17431750

17441751
return;

drivers/nvme/target/admin-cmd.c

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
*/
1414
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1515
#include <linux/module.h>
16-
#include <linux/random.h>
1716
#include <generated/utsrelease.h>
1817
#include "nvmet.h"
1918

@@ -83,7 +82,6 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
8382
{
8483
struct nvmet_ctrl *ctrl = req->sq->ctrl;
8584
struct nvme_id_ctrl *id;
86-
u64 serial;
8785
u16 status = 0;
8886

8987
id = kzalloc(sizeof(*id), GFP_KERNEL);
@@ -96,10 +94,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
9694
id->vid = 0;
9795
id->ssvid = 0;
9896

99-
/* generate a random serial number as our controllers are ephemeral: */
100-
get_random_bytes(&serial, sizeof(serial));
10197
memset(id->sn, ' ', sizeof(id->sn));
102-
snprintf(id->sn, sizeof(id->sn), "%llx", serial);
98+
snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial);
10399

104100
memset(id->mn, ' ', sizeof(id->mn));
105101
strncpy((char *)id->mn, "Linux", sizeof(id->mn));

drivers/nvme/target/core.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
*/
1414
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1515
#include <linux/module.h>
16+
#include <linux/random.h>
1617
#include "nvmet.h"
1718

1819
static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
@@ -728,6 +729,9 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
728729
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
729730
memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
730731

732+
/* generate a random serial number as our controllers are ephemeral: */
733+
get_random_bytes(&ctrl->serial, sizeof(ctrl->serial));
734+
731735
kref_init(&ctrl->ref);
732736
ctrl->subsys = subsys;
733737

drivers/nvme/target/loop.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -414,9 +414,8 @@ static void nvme_loop_del_ctrl_work(struct work_struct *work)
414414
struct nvme_loop_ctrl *ctrl = container_of(work,
415415
struct nvme_loop_ctrl, delete_work);
416416

417-
nvme_remove_namespaces(&ctrl->ctrl);
418-
nvme_loop_shutdown_ctrl(ctrl);
419417
nvme_uninit_ctrl(&ctrl->ctrl);
418+
nvme_loop_shutdown_ctrl(ctrl);
420419
nvme_put_ctrl(&ctrl->ctrl);
421420
}
422421

@@ -501,7 +500,6 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
501500
nvme_loop_destroy_admin_queue(ctrl);
502501
out_disable:
503502
dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
504-
nvme_remove_namespaces(&ctrl->ctrl);
505503
nvme_uninit_ctrl(&ctrl->ctrl);
506504
nvme_put_ctrl(&ctrl->ctrl);
507505
}

drivers/nvme/target/nvmet.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,7 @@ struct nvmet_ctrl {
113113

114114
struct mutex lock;
115115
u64 cap;
116+
u64 serial;
116117
u32 cc;
117118
u32 csts;
118119

0 commit comments

Comments
 (0)