Skip to content

Commit 44148a6

Browse files
committed
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-2.6-block
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-2.6-block: ide: always ensure that blk_delay_queue() is called if we have pending IO block: fix request sorting at unplug dm: improve block integrity support fs: export empty_aops ide: ide_requeue_and_plug() reinstate "always plug" behaviour blk-throttle: don't call xchg on bool ufs: remove unessecary blk_flush_plug block: make the flush insertion use the tail of the dispatch list block: get rid of elv_insert() interface block: dump request state on seeing a corrupted request completion
2 parents d0de4dc + 782b86e commit 44148a6

File tree

14 files changed

+148
-91
lines changed

14 files changed

+148
-91
lines changed

block/blk-core.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2163,7 +2163,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
21632163
* size, something has gone terribly wrong.
21642164
*/
21652165
if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
2166-
printk(KERN_ERR "blk: request botched\n");
2166+
blk_dump_rq_flags(req, "request botched");
21672167
req->__data_len = blk_rq_cur_bytes(req);
21682168
}
21692169

@@ -2665,7 +2665,7 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
26652665
struct request *rqa = container_of(a, struct request, queuelist);
26662666
struct request *rqb = container_of(b, struct request, queuelist);
26672667

2668-
return !(rqa->q == rqb->q);
2668+
return !(rqa->q <= rqb->q);
26692669
}
26702670

26712671
static void flush_plug_list(struct blk_plug *plug)

block/blk-flush.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -261,7 +261,7 @@ static bool blk_kick_flush(struct request_queue *q)
261261
q->flush_rq.end_io = flush_end_io;
262262

263263
q->flush_pending_idx ^= 1;
264-
elv_insert(q, &q->flush_rq, ELEVATOR_INSERT_REQUEUE);
264+
list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
265265
return true;
266266
}
267267

@@ -281,7 +281,7 @@ static void flush_data_end_io(struct request *rq, int error)
281281
* blk_insert_flush - insert a new FLUSH/FUA request
282282
* @rq: request to insert
283283
*
284-
* To be called from elv_insert() for %ELEVATOR_INSERT_FLUSH insertions.
284+
* To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
285285
* @rq is being submitted. Analyze what needs to be done and put it on the
286286
* right queue.
287287
*
@@ -312,7 +312,7 @@ void blk_insert_flush(struct request *rq)
312312
*/
313313
if ((policy & REQ_FSEQ_DATA) &&
314314
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
315-
list_add(&rq->queuelist, &q->queue_head);
315+
list_add_tail(&rq->queuelist, &q->queue_head);
316316
return;
317317
}
318318

block/blk-integrity.c

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,8 @@
3030

3131
static struct kmem_cache *integrity_cachep;
3232

33+
static const char *bi_unsupported_name = "unsupported";
34+
3335
/**
3436
* blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
3537
* @q: request queue
@@ -358,6 +360,14 @@ static struct kobj_type integrity_ktype = {
358360
.release = blk_integrity_release,
359361
};
360362

363+
bool blk_integrity_is_initialized(struct gendisk *disk)
364+
{
365+
struct blk_integrity *bi = blk_get_integrity(disk);
366+
367+
return (bi && bi->name && strcmp(bi->name, bi_unsupported_name) != 0);
368+
}
369+
EXPORT_SYMBOL(blk_integrity_is_initialized);
370+
361371
/**
362372
* blk_integrity_register - Register a gendisk as being integrity-capable
363373
* @disk: struct gendisk pointer to make integrity-aware
@@ -407,7 +417,7 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
407417
bi->get_tag_fn = template->get_tag_fn;
408418
bi->tag_size = template->tag_size;
409419
} else
410-
bi->name = "unsupported";
420+
bi->name = bi_unsupported_name;
411421

412422
return 0;
413423
}

block/blk-throttle.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ struct throtl_grp {
7777
unsigned long slice_end[2];
7878

7979
/* Some throttle limits got updated for the group */
80-
bool limits_changed;
80+
int limits_changed;
8181
};
8282

8383
struct throtl_data
@@ -102,7 +102,7 @@ struct throtl_data
102102
/* Work for dispatching throttled bios */
103103
struct delayed_work throtl_work;
104104

105-
bool limits_changed;
105+
int limits_changed;
106106
};
107107

108108
enum tg_state_flags {

block/elevator.c

Lines changed: 15 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -610,7 +610,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
610610

611611
rq->cmd_flags &= ~REQ_STARTED;
612612

613-
elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
613+
__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
614614
}
615615

616616
void elv_drain_elevator(struct request_queue *q)
@@ -655,12 +655,25 @@ void elv_quiesce_end(struct request_queue *q)
655655
queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
656656
}
657657

658-
void elv_insert(struct request_queue *q, struct request *rq, int where)
658+
void __elv_add_request(struct request_queue *q, struct request *rq, int where)
659659
{
660660
trace_block_rq_insert(q, rq);
661661

662662
rq->q = q;
663663

664+
BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
665+
666+
if (rq->cmd_flags & REQ_SOFTBARRIER) {
667+
/* barriers are scheduling boundary, update end_sector */
668+
if (rq->cmd_type == REQ_TYPE_FS ||
669+
(rq->cmd_flags & REQ_DISCARD)) {
670+
q->end_sector = rq_end_sector(rq);
671+
q->boundary_rq = rq;
672+
}
673+
} else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
674+
where == ELEVATOR_INSERT_SORT)
675+
where = ELEVATOR_INSERT_BACK;
676+
664677
switch (where) {
665678
case ELEVATOR_INSERT_REQUEUE:
666679
case ELEVATOR_INSERT_FRONT:
@@ -722,24 +735,6 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
722735
BUG();
723736
}
724737
}
725-
726-
void __elv_add_request(struct request_queue *q, struct request *rq, int where)
727-
{
728-
BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
729-
730-
if (rq->cmd_flags & REQ_SOFTBARRIER) {
731-
/* barriers are scheduling boundary, update end_sector */
732-
if (rq->cmd_type == REQ_TYPE_FS ||
733-
(rq->cmd_flags & REQ_DISCARD)) {
734-
q->end_sector = rq_end_sector(rq);
735-
q->boundary_rq = rq;
736-
}
737-
} else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
738-
where == ELEVATOR_INSERT_SORT)
739-
where = ELEVATOR_INSERT_BACK;
740-
741-
elv_insert(q, rq, where);
742-
}
743738
EXPORT_SYMBOL(__elv_add_request);
744739

745740
void elv_add_request(struct request_queue *q, struct request *rq, int where)

drivers/ide/ide-io.c

Lines changed: 21 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -430,6 +430,26 @@ static inline void ide_unlock_host(struct ide_host *host)
430430
}
431431
}
432432

433+
static void __ide_requeue_and_plug(struct request_queue *q, struct request *rq)
434+
{
435+
if (rq)
436+
blk_requeue_request(q, rq);
437+
if (rq || blk_peek_request(q)) {
438+
/* Use 3ms as that was the old plug delay */
439+
blk_delay_queue(q, 3);
440+
}
441+
}
442+
443+
void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
444+
{
445+
struct request_queue *q = drive->queue;
446+
unsigned long flags;
447+
448+
spin_lock_irqsave(q->queue_lock, flags);
449+
__ide_requeue_and_plug(q, rq);
450+
spin_unlock_irqrestore(q->queue_lock, flags);
451+
}
452+
433453
/*
434454
* Issue a new request to a device.
435455
*/
@@ -550,28 +570,7 @@ void do_ide_request(struct request_queue *q)
550570
ide_unlock_host(host);
551571
plug_device_2:
552572
spin_lock_irq(q->queue_lock);
553-
554-
if (rq) {
555-
blk_requeue_request(q, rq);
556-
blk_delay_queue(q, queue_run_ms);
557-
}
558-
}
559-
560-
void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
561-
{
562-
struct request_queue *q = drive->queue;
563-
unsigned long flags;
564-
565-
spin_lock_irqsave(q->queue_lock, flags);
566-
567-
if (rq)
568-
blk_requeue_request(q, rq);
569-
570-
spin_unlock_irqrestore(q->queue_lock, flags);
571-
572-
/* Use 3ms as that was the old plug delay */
573-
if (rq)
574-
blk_delay_queue(q, 3);
573+
__ide_requeue_and_plug(q, rq);
575574
}
576575

577576
static int drive_is_ready(ide_drive_t *drive)

drivers/md/dm-table.c

Lines changed: 80 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -926,21 +926,81 @@ static int dm_table_build_index(struct dm_table *t)
926926
return r;
927927
}
928928

929+
/*
930+
* Get a disk whose integrity profile reflects the table's profile.
931+
* If %match_all is true, all devices' profiles must match.
932+
* If %match_all is false, all devices must at least have an
933+
* allocated integrity profile; but uninitialized is ok.
934+
* Returns NULL if integrity support was inconsistent or unavailable.
935+
*/
936+
static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
937+
bool match_all)
938+
{
939+
struct list_head *devices = dm_table_get_devices(t);
940+
struct dm_dev_internal *dd = NULL;
941+
struct gendisk *prev_disk = NULL, *template_disk = NULL;
942+
943+
list_for_each_entry(dd, devices, list) {
944+
template_disk = dd->dm_dev.bdev->bd_disk;
945+
if (!blk_get_integrity(template_disk))
946+
goto no_integrity;
947+
if (!match_all && !blk_integrity_is_initialized(template_disk))
948+
continue; /* skip uninitialized profiles */
949+
else if (prev_disk &&
950+
blk_integrity_compare(prev_disk, template_disk) < 0)
951+
goto no_integrity;
952+
prev_disk = template_disk;
953+
}
954+
955+
return template_disk;
956+
957+
no_integrity:
958+
if (prev_disk)
959+
DMWARN("%s: integrity not set: %s and %s profile mismatch",
960+
dm_device_name(t->md),
961+
prev_disk->disk_name,
962+
template_disk->disk_name);
963+
return NULL;
964+
}
965+
929966
/*
930967
* Register the mapped device for blk_integrity support if
931-
* the underlying devices support it.
968+
* the underlying devices have an integrity profile. But all devices
969+
* may not have matching profiles (checking all devices isn't reliable
970+
* during table load because this table may use other DM device(s) which
971+
* must be resumed before they will have an initialized integity profile).
972+
* Stacked DM devices force a 2 stage integrity profile validation:
973+
* 1 - during load, validate all initialized integrity profiles match
974+
* 2 - during resume, validate all integrity profiles match
932975
*/
933976
static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
934977
{
935-
struct list_head *devices = dm_table_get_devices(t);
936-
struct dm_dev_internal *dd;
978+
struct gendisk *template_disk = NULL;
937979

938-
list_for_each_entry(dd, devices, list)
939-
if (bdev_get_integrity(dd->dm_dev.bdev)) {
940-
t->integrity_supported = 1;
941-
return blk_integrity_register(dm_disk(md), NULL);
942-
}
980+
template_disk = dm_table_get_integrity_disk(t, false);
981+
if (!template_disk)
982+
return 0;
943983

984+
if (!blk_integrity_is_initialized(dm_disk(md))) {
985+
t->integrity_supported = 1;
986+
return blk_integrity_register(dm_disk(md), NULL);
987+
}
988+
989+
/*
990+
* If DM device already has an initalized integrity
991+
* profile the new profile should not conflict.
992+
*/
993+
if (blk_integrity_is_initialized(template_disk) &&
994+
blk_integrity_compare(dm_disk(md), template_disk) < 0) {
995+
DMWARN("%s: conflict with existing integrity profile: "
996+
"%s profile mismatch",
997+
dm_device_name(t->md),
998+
template_disk->disk_name);
999+
return 1;
1000+
}
1001+
1002+
/* Preserve existing initialized integrity profile */
1003+
t->integrity_supported = 1;
9441004
return 0;
9451005
}
9461006

@@ -1094,41 +1154,27 @@ int dm_calculate_queue_limits(struct dm_table *table,
10941154

10951155
/*
10961156
* Set the integrity profile for this device if all devices used have
1097-
* matching profiles.
1157+
* matching profiles. We're quite deep in the resume path but still
1158+
* don't know if all devices (particularly DM devices this device
1159+
* may be stacked on) have matching profiles. Even if the profiles
1160+
* don't match we have no way to fail (to resume) at this point.
10981161
*/
10991162
static void dm_table_set_integrity(struct dm_table *t)
11001163
{
1101-
struct list_head *devices = dm_table_get_devices(t);
1102-
struct dm_dev_internal *prev = NULL, *dd = NULL;
1164+
struct gendisk *template_disk = NULL;
11031165

11041166
if (!blk_get_integrity(dm_disk(t->md)))
11051167
return;
11061168

1107-
list_for_each_entry(dd, devices, list) {
1108-
if (prev &&
1109-
blk_integrity_compare(prev->dm_dev.bdev->bd_disk,
1110-
dd->dm_dev.bdev->bd_disk) < 0) {
1111-
DMWARN("%s: integrity not set: %s and %s mismatch",
1112-
dm_device_name(t->md),
1113-
prev->dm_dev.bdev->bd_disk->disk_name,
1114-
dd->dm_dev.bdev->bd_disk->disk_name);
1115-
goto no_integrity;
1116-
}
1117-
prev = dd;
1169+
template_disk = dm_table_get_integrity_disk(t, true);
1170+
if (!template_disk &&
1171+
blk_integrity_is_initialized(dm_disk(t->md))) {
1172+
DMWARN("%s: device no longer has a valid integrity profile",
1173+
dm_device_name(t->md));
1174+
return;
11181175
}
1119-
1120-
if (!prev || !bdev_get_integrity(prev->dm_dev.bdev))
1121-
goto no_integrity;
1122-
11231176
blk_integrity_register(dm_disk(t->md),
1124-
bdev_get_integrity(prev->dm_dev.bdev));
1125-
1126-
return;
1127-
1128-
no_integrity:
1129-
blk_integrity_register(dm_disk(t->md), NULL);
1130-
1131-
return;
1177+
blk_get_integrity(template_disk));
11321178
}
11331179

11341180
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,

fs/inode.c

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,6 +124,14 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
124124
*/
125125
static DECLARE_RWSEM(iprune_sem);
126126

127+
/*
128+
* Empty aops. Can be used for the cases where the user does not
129+
* define any of the address_space operations.
130+
*/
131+
const struct address_space_operations empty_aops = {
132+
};
133+
EXPORT_SYMBOL(empty_aops);
134+
127135
/*
128136
* Statistics gathering..
129137
*/
@@ -176,7 +184,6 @@ int proc_nr_inodes(ctl_table *table, int write,
176184
*/
177185
int inode_init_always(struct super_block *sb, struct inode *inode)
178186
{
179-
static const struct address_space_operations empty_aops;
180187
static const struct inode_operations empty_iops;
181188
static const struct file_operations empty_fops;
182189
struct address_space *const mapping = &inode->i_data;

fs/nilfs2/page.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -495,8 +495,6 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
495495
void nilfs_mapping_init(struct address_space *mapping,
496496
struct backing_dev_info *bdi)
497497
{
498-
static const struct address_space_operations empty_aops;
499-
500498
mapping->host = NULL;
501499
mapping->flags = 0;
502500
mapping_set_gfp_mask(mapping, GFP_NOFS);

0 commit comments

Comments
 (0)