Skip to content

Commit db6d995

Browse files
committed
block: remove request_list code
It's now dead code, nobody uses it. Reviewed-by: Hannes Reinecke <hare@suse.com> Tested-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 1028e4b commit db6d995

File tree

6 files changed

+0
-260
lines changed

6 files changed

+0
-260
lines changed

block/blk-cgroup.c

Lines changed: 0 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -76,9 +76,6 @@ static void blkg_free(struct blkcg_gq *blkg)
7676
if (blkg->pd[i])
7777
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
7878

79-
if (blkg->blkcg != &blkcg_root)
80-
blk_exit_rl(blkg->q, &blkg->rl);
81-
8279
blkg_rwstat_exit(&blkg->stat_ios);
8380
blkg_rwstat_exit(&blkg->stat_bytes);
8481
kfree(blkg);
@@ -112,13 +109,6 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
112109
blkg->blkcg = blkcg;
113110
atomic_set(&blkg->refcnt, 1);
114111

115-
/* root blkg uses @q->root_rl, init rl only for !root blkgs */
116-
if (blkcg != &blkcg_root) {
117-
if (blk_init_rl(&blkg->rl, q, gfp_mask))
118-
goto err_free;
119-
blkg->rl.blkg = blkg;
120-
}
121-
122112
for (i = 0; i < BLKCG_MAX_POLS; i++) {
123113
struct blkcg_policy *pol = blkcg_policy[i];
124114
struct blkg_policy_data *pd;
@@ -377,7 +367,6 @@ static void blkg_destroy_all(struct request_queue *q)
377367
}
378368

379369
q->root_blkg = NULL;
380-
q->root_rl.blkg = NULL;
381370
}
382371

383372
/*
@@ -403,41 +392,6 @@ void __blkg_release_rcu(struct rcu_head *rcu_head)
403392
}
404393
EXPORT_SYMBOL_GPL(__blkg_release_rcu);
405394

406-
/*
407-
* The next function used by blk_queue_for_each_rl(). It's a bit tricky
408-
* because the root blkg uses @q->root_rl instead of its own rl.
409-
*/
410-
struct request_list *__blk_queue_next_rl(struct request_list *rl,
411-
struct request_queue *q)
412-
{
413-
struct list_head *ent;
414-
struct blkcg_gq *blkg;
415-
416-
/*
417-
* Determine the current blkg list_head. The first entry is
418-
* root_rl which is off @q->blkg_list and mapped to the head.
419-
*/
420-
if (rl == &q->root_rl) {
421-
ent = &q->blkg_list;
422-
/* There are no more block groups, hence no request lists */
423-
if (list_empty(ent))
424-
return NULL;
425-
} else {
426-
blkg = container_of(rl, struct blkcg_gq, rl);
427-
ent = &blkg->q_node;
428-
}
429-
430-
/* walk to the next list_head, skip root blkcg */
431-
ent = ent->next;
432-
if (ent == &q->root_blkg->q_node)
433-
ent = ent->next;
434-
if (ent == &q->blkg_list)
435-
return NULL;
436-
437-
blkg = container_of(ent, struct blkcg_gq, q_node);
438-
return &blkg->rl;
439-
}
440-
441395
static int blkcg_reset_stats(struct cgroup_subsys_state *css,
442396
struct cftype *cftype, u64 val)
443397
{
@@ -1230,7 +1184,6 @@ int blkcg_init_queue(struct request_queue *q)
12301184
if (IS_ERR(blkg))
12311185
goto err_unlock;
12321186
q->root_blkg = blkg;
1233-
q->root_rl.blkg = blkg;
12341187
spin_unlock_irq(q->queue_lock);
12351188
rcu_read_unlock();
12361189

block/blk-core.c

Lines changed: 0 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -450,81 +450,6 @@ void blk_cleanup_queue(struct request_queue *q)
450450
}
451451
EXPORT_SYMBOL(blk_cleanup_queue);
452452

453-
/* Allocate memory local to the request queue */
454-
static void *alloc_request_simple(gfp_t gfp_mask, void *data)
455-
{
456-
struct request_queue *q = data;
457-
458-
return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node);
459-
}
460-
461-
static void free_request_simple(void *element, void *data)
462-
{
463-
kmem_cache_free(request_cachep, element);
464-
}
465-
466-
static void *alloc_request_size(gfp_t gfp_mask, void *data)
467-
{
468-
struct request_queue *q = data;
469-
struct request *rq;
470-
471-
rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask,
472-
q->node);
473-
if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) {
474-
kfree(rq);
475-
rq = NULL;
476-
}
477-
return rq;
478-
}
479-
480-
static void free_request_size(void *element, void *data)
481-
{
482-
struct request_queue *q = data;
483-
484-
if (q->exit_rq_fn)
485-
q->exit_rq_fn(q, element);
486-
kfree(element);
487-
}
488-
489-
int blk_init_rl(struct request_list *rl, struct request_queue *q,
490-
gfp_t gfp_mask)
491-
{
492-
if (unlikely(rl->rq_pool) || q->mq_ops)
493-
return 0;
494-
495-
rl->q = q;
496-
rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
497-
rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
498-
init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
499-
init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
500-
501-
if (q->cmd_size) {
502-
rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
503-
alloc_request_size, free_request_size,
504-
q, gfp_mask, q->node);
505-
} else {
506-
rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
507-
alloc_request_simple, free_request_simple,
508-
q, gfp_mask, q->node);
509-
}
510-
if (!rl->rq_pool)
511-
return -ENOMEM;
512-
513-
if (rl != &q->root_rl)
514-
WARN_ON_ONCE(!blk_get_queue(q));
515-
516-
return 0;
517-
}
518-
519-
void blk_exit_rl(struct request_queue *q, struct request_list *rl)
520-
{
521-
if (rl->rq_pool) {
522-
mempool_destroy(rl->rq_pool);
523-
if (rl != &q->root_rl)
524-
blk_put_queue(q);
525-
}
526-
}
527-
528453
struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
529454
{
530455
return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE, NULL);

block/blk-mq.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -326,10 +326,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
326326
rq->end_io_data = NULL;
327327
rq->next_rq = NULL;
328328

329-
#ifdef CONFIG_BLK_CGROUP
330-
rq->rl = NULL;
331-
#endif
332-
333329
data->ctx->rq_dispatched[op_is_sync(op)]++;
334330
refcount_set(&rq->ref, 1);
335331
return rq;

block/blk.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -120,9 +120,6 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
120120
int node, int cmd_size, gfp_t flags);
121121
void blk_free_flush_queue(struct blk_flush_queue *q);
122122

123-
int blk_init_rl(struct request_list *rl, struct request_queue *q,
124-
gfp_t gfp_mask);
125-
void blk_exit_rl(struct request_queue *q, struct request_list *rl);
126123
void blk_exit_queue(struct request_queue *q);
127124
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
128125
struct bio *bio);

include/linux/blk-cgroup.h

Lines changed: 0 additions & 97 deletions
Original file line numberDiff line numberDiff line change
@@ -122,9 +122,6 @@ struct blkcg_gq {
122122
/* all non-root blkcg_gq's are guaranteed to have access to parent */
123123
struct blkcg_gq *parent;
124124

125-
/* request allocation list for this blkcg-q pair */
126-
struct request_list rl;
127-
128125
/* reference count */
129126
atomic_t refcnt;
130127

@@ -515,94 +512,6 @@ static inline void blkg_put(struct blkcg_gq *blkg)
515512
if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
516513
(p_blkg)->q, false)))
517514

518-
/**
519-
* blk_get_rl - get request_list to use
520-
* @q: request_queue of interest
521-
* @bio: bio which will be attached to the allocated request (may be %NULL)
522-
*
523-
* The caller wants to allocate a request from @q to use for @bio. Find
524-
* the request_list to use and obtain a reference on it. Should be called
525-
* under queue_lock. This function is guaranteed to return non-%NULL
526-
* request_list.
527-
*/
528-
static inline struct request_list *blk_get_rl(struct request_queue *q,
529-
struct bio *bio)
530-
{
531-
struct blkcg *blkcg;
532-
struct blkcg_gq *blkg;
533-
534-
rcu_read_lock();
535-
536-
blkcg = bio_blkcg(bio);
537-
538-
/* bypass blkg lookup and use @q->root_rl directly for root */
539-
if (blkcg == &blkcg_root)
540-
goto root_rl;
541-
542-
/*
543-
* Try to use blkg->rl. blkg lookup may fail under memory pressure
544-
* or if either the blkcg or queue is going away. Fall back to
545-
* root_rl in such cases.
546-
*/
547-
blkg = blkg_lookup(blkcg, q);
548-
if (unlikely(!blkg))
549-
goto root_rl;
550-
551-
blkg_get(blkg);
552-
rcu_read_unlock();
553-
return &blkg->rl;
554-
root_rl:
555-
rcu_read_unlock();
556-
return &q->root_rl;
557-
}
558-
559-
/**
560-
* blk_put_rl - put request_list
561-
* @rl: request_list to put
562-
*
563-
* Put the reference acquired by blk_get_rl(). Should be called under
564-
* queue_lock.
565-
*/
566-
static inline void blk_put_rl(struct request_list *rl)
567-
{
568-
if (rl->blkg->blkcg != &blkcg_root)
569-
blkg_put(rl->blkg);
570-
}
571-
572-
/**
573-
* blk_rq_set_rl - associate a request with a request_list
574-
* @rq: request of interest
575-
* @rl: target request_list
576-
*
577-
* Associate @rq with @rl so that accounting and freeing can know the
578-
* request_list @rq came from.
579-
*/
580-
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
581-
{
582-
rq->rl = rl;
583-
}
584-
585-
/**
586-
* blk_rq_rl - return the request_list a request came from
587-
* @rq: request of interest
588-
*
589-
* Return the request_list @rq is allocated from.
590-
*/
591-
static inline struct request_list *blk_rq_rl(struct request *rq)
592-
{
593-
return rq->rl;
594-
}
595-
596-
struct request_list *__blk_queue_next_rl(struct request_list *rl,
597-
struct request_queue *q);
598-
/**
599-
* blk_queue_for_each_rl - iterate through all request_lists of a request_queue
600-
*
601-
* Should be used under queue_lock.
602-
*/
603-
#define blk_queue_for_each_rl(rl, q) \
604-
for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
605-
606515
static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
607516
{
608517
int ret;
@@ -939,12 +848,6 @@ static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
939848
static inline void blkg_get(struct blkcg_gq *blkg) { }
940849
static inline void blkg_put(struct blkcg_gq *blkg) { }
941850

942-
static inline struct request_list *blk_get_rl(struct request_queue *q,
943-
struct bio *bio) { return &q->root_rl; }
944-
static inline void blk_put_rl(struct request_list *rl) { }
945-
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
946-
static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
947-
948851
static inline bool blkcg_bio_issue_check(struct request_queue *q,
949852
struct bio *bio) { return true; }
950853

include/linux/blkdev.h

Lines changed: 0 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -58,22 +58,6 @@ struct blk_stat_callback;
5858

5959
typedef void (rq_end_io_fn)(struct request *, blk_status_t);
6060

61-
struct request_list {
62-
struct request_queue *q; /* the queue this rl belongs to */
63-
#ifdef CONFIG_BLK_CGROUP
64-
struct blkcg_gq *blkg; /* blkg this request pool belongs to */
65-
#endif
66-
/*
67-
* count[], starved[], and wait[] are indexed by
68-
* BLK_RW_SYNC/BLK_RW_ASYNC
69-
*/
70-
int count[2];
71-
int starved[2];
72-
mempool_t *rq_pool;
73-
wait_queue_head_t wait[2];
74-
unsigned int flags;
75-
};
76-
7761
/*
7862
* request flags */
7963
typedef __u32 __bitwise req_flags_t;
@@ -259,10 +243,6 @@ struct request {
259243

260244
/* for bidi */
261245
struct request *next_rq;
262-
263-
#ifdef CONFIG_BLK_CGROUP
264-
struct request_list *rl; /* rl this rq is alloced from */
265-
#endif
266246
};
267247

268248
static inline bool blk_op_is_scsi(unsigned int op)
@@ -312,8 +292,6 @@ typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t);
312292
struct bio_vec;
313293
typedef void (softirq_done_fn)(struct request *);
314294
typedef int (dma_drain_needed_fn)(struct request *);
315-
typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t);
316-
typedef void (exit_rq_fn)(struct request_queue *, struct request *);
317295

318296
enum blk_eh_timer_return {
319297
BLK_EH_DONE, /* drivers has completed the command */
@@ -427,22 +405,10 @@ struct request_queue {
427405
struct blk_queue_stats *stats;
428406
struct rq_qos *rq_qos;
429407

430-
/*
431-
* If blkcg is not used, @q->root_rl serves all requests. If blkcg
432-
* is used, root blkg allocates from @q->root_rl and all other
433-
* blkgs from their own blkg->rl. Which one to use should be
434-
* determined using bio_request_list().
435-
*/
436-
struct request_list root_rl;
437-
438408
make_request_fn *make_request_fn;
439409
poll_q_fn *poll_fn;
440410
softirq_done_fn *softirq_done_fn;
441411
dma_drain_needed_fn *dma_drain_needed;
442-
/* Called just after a request is allocated */
443-
init_rq_fn *init_rq_fn;
444-
/* Called just before a request is freed */
445-
exit_rq_fn *exit_rq_fn;
446412

447413
const struct blk_mq_ops *mq_ops;
448414

0 commit comments

Comments
 (0)