Skip to content

Commit dc5db21

Browse files
committed
Merge tag 'for-linus-20181109' of git://git.kernel.dk/linux-block
Pull block layer fixes from Jens Axboe: - Two fixes for an ubd regression, one for missing locking, and one for a missing initialization of a field. The latter was an old latent bug, but it's now visible and triggers (Me, Anton Ivanov) - Set of NVMe fixes via Christoph, but applied manually due to a git tree mixup (Christoph, Sagi) - Fix for a discard split regression, in three patches (Ming) - Update libata git trees (Geert) - SPDX identifier for sata_rcar (Kuninori Morimoto) - Virtual boundary merge fix (Johannes) - Preemptively clear memory we are going to pass to userspace, in case the driver does a short read (Keith) * tag 'for-linus-20181109' of git://git.kernel.dk/linux-block: block: make sure writesame bio is aligned with logical block size block: cleanup __blkdev_issue_discard() block: make sure discard bio is aligned with logical block size Revert "nvmet-rdma: use a private workqueue for delete" nvme: make sure ns head inherits underlying device limits nvmet: don't try to add ns to p2p map unless it actually uses it sata_rcar: convert to SPDX identifiers ubd: fix missing initialization of io_req block: Clear kernel memory before copying to user MAINTAINERS: Fix remaining pointers to obsolete libata.git ubd: fix missing lock around request issue block: respect virtual boundary mask in bvecs
2 parents d757a3b + 34ffec6 commit dc5db21

File tree

11 files changed

+43
-51
lines changed

11 files changed

+43
-51
lines changed

MAINTAINERS

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8367,7 +8367,7 @@ F: drivers/media/dvb-frontends/lgdt3305.*
83678367
LIBATA PATA ARASAN COMPACT FLASH CONTROLLER
83688368
M: Viresh Kumar <vireshk@kernel.org>
83698369
L: linux-ide@vger.kernel.org
8370-
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
8370+
T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
83718371
S: Maintained
83728372
F: include/linux/pata_arasan_cf_data.h
83738373
F: drivers/ata/pata_arasan_cf.c
@@ -8384,7 +8384,7 @@ F: drivers/ata/ata_generic.c
83848384
LIBATA PATA FARADAY FTIDE010 AND GEMINI SATA BRIDGE DRIVERS
83858385
M: Linus Walleij <linus.walleij@linaro.org>
83868386
L: linux-ide@vger.kernel.org
8387-
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
8387+
T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
83888388
S: Maintained
83898389
F: drivers/ata/pata_ftide010.c
83908390
F: drivers/ata/sata_gemini.c
@@ -8403,7 +8403,7 @@ F: include/linux/ahci_platform.h
84038403
LIBATA SATA PROMISE TX2/TX4 CONTROLLER DRIVER
84048404
M: Mikael Pettersson <mikpelinux@gmail.com>
84058405
L: linux-ide@vger.kernel.org
8406-
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
8406+
T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
84078407
S: Maintained
84088408
F: drivers/ata/sata_promise.*
84098409

arch/um/drivers/ubd_kern.c

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1305,6 +1305,7 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
13051305
io_req->fds[0] = dev->cow.fd;
13061306
else
13071307
io_req->fds[0] = dev->fd;
1308+
io_req->error = 0;
13081309

13091310
if (req_op(req) == REQ_OP_FLUSH) {
13101311
io_req->op = UBD_FLUSH;
@@ -1313,9 +1314,7 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
13131314
io_req->cow_offset = -1;
13141315
io_req->offset = off;
13151316
io_req->length = bvec->bv_len;
1316-
io_req->error = 0;
13171317
io_req->sector_mask = 0;
1318-
13191318
io_req->op = rq_data_dir(req) == READ ? UBD_READ : UBD_WRITE;
13201319
io_req->offsets[0] = 0;
13211320
io_req->offsets[1] = dev->cow.data_offset;
@@ -1341,11 +1340,14 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
13411340
static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
13421341
const struct blk_mq_queue_data *bd)
13431342
{
1343+
struct ubd *ubd_dev = hctx->queue->queuedata;
13441344
struct request *req = bd->rq;
13451345
int ret = 0;
13461346

13471347
blk_mq_start_request(req);
13481348

1349+
spin_lock_irq(&ubd_dev->lock);
1350+
13491351
if (req_op(req) == REQ_OP_FLUSH) {
13501352
ret = ubd_queue_one_vec(hctx, req, 0, NULL);
13511353
} else {
@@ -1361,9 +1363,11 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
13611363
}
13621364
}
13631365
out:
1364-
if (ret < 0) {
1366+
spin_unlock_irq(&ubd_dev->lock);
1367+
1368+
if (ret < 0)
13651369
blk_mq_requeue_request(req, true);
1366-
}
1370+
13671371
return BLK_STS_OK;
13681372
}
13691373

block/bio.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1260,6 +1260,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
12601260
if (ret)
12611261
goto cleanup;
12621262
} else {
1263+
zero_fill_bio(bio);
12631264
iov_iter_advance(iter, bio->bi_iter.bi_size);
12641265
}
12651266

block/blk-lib.c

Lines changed: 7 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -51,25 +51,21 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
5151
if ((sector | nr_sects) & bs_mask)
5252
return -EINVAL;
5353

54-
while (nr_sects) {
55-
unsigned int req_sects = nr_sects;
56-
sector_t end_sect;
57-
58-
if (!req_sects)
59-
goto fail;
60-
if (req_sects > UINT_MAX >> 9)
61-
req_sects = UINT_MAX >> 9;
54+
if (!nr_sects)
55+
return -EINVAL;
6256

63-
end_sect = sector + req_sects;
57+
while (nr_sects) {
58+
unsigned int req_sects = min_t(unsigned int, nr_sects,
59+
bio_allowed_max_sectors(q));
6460

6561
bio = blk_next_bio(bio, 0, gfp_mask);
6662
bio->bi_iter.bi_sector = sector;
6763
bio_set_dev(bio, bdev);
6864
bio_set_op_attrs(bio, op, 0);
6965

7066
bio->bi_iter.bi_size = req_sects << 9;
67+
sector += req_sects;
7168
nr_sects -= req_sects;
72-
sector = end_sect;
7369

7470
/*
7571
* We can loop for a long time in here, if someone does
@@ -82,14 +78,6 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
8278

8379
*biop = bio;
8480
return 0;
85-
86-
fail:
87-
if (bio) {
88-
submit_bio_wait(bio);
89-
bio_put(bio);
90-
}
91-
*biop = NULL;
92-
return -EOPNOTSUPP;
9381
}
9482
EXPORT_SYMBOL(__blkdev_issue_discard);
9583

@@ -161,7 +149,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
161149
return -EOPNOTSUPP;
162150

163151
/* Ensure that max_write_same_sectors doesn't overflow bi_size */
164-
max_write_same_sectors = UINT_MAX >> 9;
152+
max_write_same_sectors = bio_allowed_max_sectors(q);
165153

166154
while (nr_sects) {
167155
bio = blk_next_bio(bio, 1, gfp_mask);

block/blk-merge.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ static inline bool bio_will_gap(struct request_queue *q,
4646
bio_get_first_bvec(prev_rq->bio, &pb);
4747
else
4848
bio_get_first_bvec(prev, &pb);
49-
if (pb.bv_offset)
49+
if (pb.bv_offset & queue_virt_boundary(q))
5050
return true;
5151

5252
/*
@@ -90,7 +90,8 @@ static struct bio *blk_bio_discard_split(struct request_queue *q,
9090
/* Zero-sector (unknown) and one-sector granularities are the same. */
9191
granularity = max(q->limits.discard_granularity >> 9, 1U);
9292

93-
max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
93+
max_discard_sectors = min(q->limits.max_discard_sectors,
94+
bio_allowed_max_sectors(q));
9495
max_discard_sectors -= max_discard_sectors % granularity;
9596

9697
if (unlikely(!max_discard_sectors)) {

block/blk.h

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
169169
static inline bool __bvec_gap_to_prev(struct request_queue *q,
170170
struct bio_vec *bprv, unsigned int offset)
171171
{
172-
return offset ||
172+
return (offset & queue_virt_boundary(q)) ||
173173
((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
174174
}
175175

@@ -395,6 +395,16 @@ static inline unsigned long blk_rq_deadline(struct request *rq)
395395
return rq->__deadline & ~0x1UL;
396396
}
397397

398+
/*
399+
* The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
400+
* is defined as 'unsigned int', meantime it has to aligned to with logical
401+
* block size which is the minimum accepted unit by hardware.
402+
*/
403+
static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
404+
{
405+
return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
406+
}
407+
398408
/*
399409
* Internal io_context interface
400410
*/

drivers/ata/sata_rcar.c

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,10 @@
1+
// SPDX-License-Identifier: GPL-2.0+
12
/*
23
* Renesas R-Car SATA driver
34
*
45
* Author: Vladimir Barinov <source@cogentembedded.com>
56
* Copyright (C) 2013-2015 Cogent Embedded, Inc.
67
* Copyright (C) 2013-2015 Renesas Solutions Corp.
7-
*
8-
* This program is free software; you can redistribute it and/or modify it
9-
* under the terms of the GNU General Public License as published by the
10-
* Free Software Foundation; either version 2 of the License, or (at your
11-
* option) any later version.
128
*/
139

1410
#include <linux/kernel.h>

drivers/nvme/host/core.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1519,8 +1519,10 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
15191519
if (ns->ndev)
15201520
nvme_nvm_update_nvm_info(ns);
15211521
#ifdef CONFIG_NVME_MULTIPATH
1522-
if (ns->head->disk)
1522+
if (ns->head->disk) {
15231523
nvme_update_disk_info(ns->head->disk, ns, id);
1524+
blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
1525+
}
15241526
#endif
15251527
}
15261528

drivers/nvme/host/multipath.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -285,6 +285,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
285285
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
286286
/* set to a default value for 512 until disk is validated */
287287
blk_queue_logical_block_size(q, 512);
288+
blk_set_stacking_limits(&q->limits);
288289

289290
/* we need to propagate up the VMC settings */
290291
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)

drivers/nvme/target/core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -420,7 +420,7 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
420420
struct pci_dev *p2p_dev;
421421
int ret;
422422

423-
if (!ctrl->p2p_client)
423+
if (!ctrl->p2p_client || !ns->use_p2pmem)
424424
return;
425425

426426
if (ns->p2p_dev) {

drivers/nvme/target/rdma.c

Lines changed: 4 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,6 @@ struct nvmet_rdma_device {
122122
int inline_page_count;
123123
};
124124

125-
static struct workqueue_struct *nvmet_rdma_delete_wq;
126125
static bool nvmet_rdma_use_srq;
127126
module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
128127
MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
@@ -1274,12 +1273,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
12741273

12751274
if (queue->host_qid == 0) {
12761275
/* Let inflight controller teardown complete */
1277-
flush_workqueue(nvmet_rdma_delete_wq);
1276+
flush_scheduled_work();
12781277
}
12791278

12801279
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
12811280
if (ret) {
1282-
queue_work(nvmet_rdma_delete_wq, &queue->release_work);
1281+
schedule_work(&queue->release_work);
12831282
/* Destroying rdma_cm id is not needed here */
12841283
return 0;
12851284
}
@@ -1344,7 +1343,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
13441343

13451344
if (disconnect) {
13461345
rdma_disconnect(queue->cm_id);
1347-
queue_work(nvmet_rdma_delete_wq, &queue->release_work);
1346+
schedule_work(&queue->release_work);
13481347
}
13491348
}
13501349

@@ -1374,7 +1373,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
13741373
mutex_unlock(&nvmet_rdma_queue_mutex);
13751374

13761375
pr_err("failed to connect queue %d\n", queue->idx);
1377-
queue_work(nvmet_rdma_delete_wq, &queue->release_work);
1376+
schedule_work(&queue->release_work);
13781377
}
13791378

13801379
/**
@@ -1656,25 +1655,15 @@ static int __init nvmet_rdma_init(void)
16561655
if (ret)
16571656
goto err_ib_client;
16581657

1659-
nvmet_rdma_delete_wq = alloc_workqueue("nvmet-rdma-delete-wq",
1660-
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
1661-
if (!nvmet_rdma_delete_wq) {
1662-
ret = -ENOMEM;
1663-
goto err_unreg_transport;
1664-
}
1665-
16661658
return 0;
16671659

1668-
err_unreg_transport:
1669-
nvmet_unregister_transport(&nvmet_rdma_ops);
16701660
err_ib_client:
16711661
ib_unregister_client(&nvmet_rdma_ib_client);
16721662
return ret;
16731663
}
16741664

16751665
static void __exit nvmet_rdma_exit(void)
16761666
{
1677-
destroy_workqueue(nvmet_rdma_delete_wq);
16781667
nvmet_unregister_transport(&nvmet_rdma_ops);
16791668
ib_unregister_client(&nvmet_rdma_ib_client);
16801669
WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));

0 commit comments

Comments
 (0)