Skip to content

Commit 8dc8146

Browse files
damien-lemoalaxboe
authored andcommitted
deadline-iosched: Introduce zone locking support
Introduce zone write locking to avoid write request reordering with zoned block devices. This is achieved using a finer selection of the next request to dispatch: 1) Any non-write request is always allowed to proceed. 2) Any write to a conventional zone is always allowed to proceed. 3) For a write to a sequential zone, the zone lock is first checked. a) If the zone is not locked, the write is allowed to proceed after its target zone is locked. b) If the zone is locked, the write request is skipped and the next request in the dispatch queue tested (back to step 1). For a write request that has locked its target zone, the zone is unlocked either when the request completes and the method deadline_request_completed() is called, or when the request is requeued using the method deadline_add_request(). Requests targeting a locked zone are always left in the scheduler queue to preserve the initial write order. If no write request can be dispatched, allow reads to be dispatched even if the write batch is not done. If the device used is not a zoned block device, or if zoned block device support is disabled, this patch does not modify deadline behavior. Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent c117bac commit 8dc8146

File tree

1 file changed

+68
-3
lines changed

1 file changed

+68
-3
lines changed

block/deadline-iosched.c

Lines changed: 68 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,12 @@ deadline_add_request(struct request_queue *q, struct request *rq)
9898
struct deadline_data *dd = q->elevator->elevator_data;
9999
const int data_dir = rq_data_dir(rq);
100100

101+
/*
102+
* This may be a requeue of a write request that has locked its
103+
* target zone. If it is the case, this releases the zone lock.
104+
*/
105+
blk_req_zone_write_unlock(rq);
106+
101107
deadline_add_rq_rb(dd, rq);
102108

103109
/*
@@ -188,6 +194,12 @@ deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
188194
{
189195
struct request_queue *q = rq->q;
190196

197+
/*
198+
* For a zoned block device, write requests must write lock their
199+
* target zone.
200+
*/
201+
blk_req_zone_write_lock(rq);
202+
191203
deadline_remove_request(q, rq);
192204
elv_dispatch_add_tail(q, rq);
193205
}
@@ -235,13 +247,28 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
235247
static struct request *
236248
deadline_fifo_request(struct deadline_data *dd, int data_dir)
237249
{
250+
struct request *rq;
251+
238252
if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
239253
return NULL;
240254

241255
if (list_empty(&dd->fifo_list[data_dir]))
242256
return NULL;
243257

244-
return rq_entry_fifo(dd->fifo_list[data_dir].next);
258+
rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
259+
if (data_dir == READ || !blk_queue_is_zoned(rq->q))
260+
return rq;
261+
262+
/*
263+
* Look for a write request that can be dispatched, that is one with
264+
* an unlocked target zone.
265+
*/
266+
list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) {
267+
if (blk_req_can_dispatch_to_zone(rq))
268+
return rq;
269+
}
270+
271+
return NULL;
245272
}
246273

247274
/*
@@ -251,10 +278,29 @@ deadline_fifo_request(struct deadline_data *dd, int data_dir)
251278
static struct request *
252279
deadline_next_request(struct deadline_data *dd, int data_dir)
253280
{
281+
struct request *rq;
282+
254283
if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
255284
return NULL;
256285

257-
return dd->next_rq[data_dir];
286+
rq = dd->next_rq[data_dir];
287+
if (!rq)
288+
return NULL;
289+
290+
if (data_dir == READ || !blk_queue_is_zoned(rq->q))
291+
return rq;
292+
293+
/*
294+
* Look for a write request that can be dispatched, that is one with
295+
* an unlocked target zone.
296+
*/
297+
while (rq) {
298+
if (blk_req_can_dispatch_to_zone(rq))
299+
return rq;
300+
rq = deadline_latter_request(rq);
301+
}
302+
303+
return NULL;
258304
}
259305

260306
/*
@@ -288,7 +334,8 @@ static int deadline_dispatch_requests(struct request_queue *q, int force)
288334
if (reads) {
289335
BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
290336

291-
if (writes && (dd->starved++ >= dd->writes_starved))
337+
if (deadline_fifo_request(dd, WRITE) &&
338+
(dd->starved++ >= dd->writes_starved))
292339
goto dispatch_writes;
293340

294341
data_dir = READ;
@@ -333,6 +380,13 @@ static int deadline_dispatch_requests(struct request_queue *q, int force)
333380
rq = next_rq;
334381
}
335382

383+
/*
384+
* For a zoned block device, if we only have writes queued and none of
385+
* them can be dispatched, rq will be NULL.
386+
*/
387+
if (!rq)
388+
return 0;
389+
336390
dd->batching = 0;
337391

338392
dispatch_request:
@@ -345,6 +399,16 @@ static int deadline_dispatch_requests(struct request_queue *q, int force)
345399
return 1;
346400
}
347401

402+
/*
403+
* For zoned block devices, write unlock the target zone of completed
404+
* write requests.
405+
*/
406+
static void
407+
deadline_completed_request(struct request_queue *q, struct request *rq)
408+
{
409+
blk_req_zone_write_unlock(rq);
410+
}
411+
348412
static void deadline_exit_queue(struct elevator_queue *e)
349413
{
350414
struct deadline_data *dd = e->elevator_data;
@@ -466,6 +530,7 @@ static struct elevator_type iosched_deadline = {
466530
.elevator_merged_fn = deadline_merged_request,
467531
.elevator_merge_req_fn = deadline_merged_requests,
468532
.elevator_dispatch_fn = deadline_dispatch_requests,
533+
.elevator_completed_req_fn = deadline_completed_request,
469534
.elevator_add_req_fn = deadline_add_request,
470535
.elevator_former_req_fn = elv_rb_former_request,
471536
.elevator_latter_req_fn = elv_rb_latter_request,

0 commit comments

Comments
 (0)