@@ -140,7 +140,8 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
140
140
return atomic_read (& hctx -> nr_active ) < depth ;
141
141
}
142
142
143
- static int __bt_get_word (struct blk_align_bitmap * bm , unsigned int last_tag )
143
+ static int __bt_get_word (struct blk_align_bitmap * bm , unsigned int last_tag ,
144
+ bool nowrap )
144
145
{
145
146
int tag , org_last_tag = last_tag ;
146
147
@@ -152,7 +153,7 @@ static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
152
153
* offset to 0 in a failure case, so start from 0 to
153
154
* exhaust the map.
154
155
*/
155
- if (org_last_tag && last_tag ) {
156
+ if (org_last_tag && last_tag && ! nowrap ) {
156
157
last_tag = org_last_tag = 0 ;
157
158
continue ;
158
159
}
@@ -170,6 +171,8 @@ static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
170
171
return tag ;
171
172
}
172
173
174
+ #define BT_ALLOC_RR (tags ) (tags->alloc_policy == BLK_TAG_ALLOC_RR)
175
+
173
176
/*
174
177
* Straight forward bitmap tag implementation, where each bit is a tag
175
178
* (cleared == free, and set == busy). The small twist is using per-cpu
@@ -182,7 +185,7 @@ static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
182
185
* until the map is exhausted.
183
186
*/
184
187
static int __bt_get (struct blk_mq_hw_ctx * hctx , struct blk_mq_bitmap_tags * bt ,
185
- unsigned int * tag_cache )
188
+ unsigned int * tag_cache , struct blk_mq_tags * tags )
186
189
{
187
190
unsigned int last_tag , org_last_tag ;
188
191
int index , i , tag ;
@@ -194,7 +197,8 @@ static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
194
197
index = TAG_TO_INDEX (bt , last_tag );
195
198
196
199
for (i = 0 ; i < bt -> map_nr ; i ++ ) {
197
- tag = __bt_get_word (& bt -> map [index ], TAG_TO_BIT (bt , last_tag ));
200
+ tag = __bt_get_word (& bt -> map [index ], TAG_TO_BIT (bt , last_tag ),
201
+ BT_ALLOC_RR (tags ));
198
202
if (tag != -1 ) {
199
203
tag += (index << bt -> bits_per_word );
200
204
goto done ;
@@ -221,7 +225,7 @@ static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
221
225
* up using the specific cached tag.
222
226
*/
223
227
done :
224
- if (tag == org_last_tag ) {
228
+ if (tag == org_last_tag || unlikely ( BT_ALLOC_RR ( tags )) ) {
225
229
last_tag = tag + 1 ;
226
230
if (last_tag >= bt -> depth - 1 )
227
231
last_tag = 0 ;
@@ -250,13 +254,13 @@ static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
250
254
static int bt_get (struct blk_mq_alloc_data * data ,
251
255
struct blk_mq_bitmap_tags * bt ,
252
256
struct blk_mq_hw_ctx * hctx ,
253
- unsigned int * last_tag )
257
+ unsigned int * last_tag , struct blk_mq_tags * tags )
254
258
{
255
259
struct bt_wait_state * bs ;
256
260
DEFINE_WAIT (wait );
257
261
int tag ;
258
262
259
- tag = __bt_get (hctx , bt , last_tag );
263
+ tag = __bt_get (hctx , bt , last_tag , tags );
260
264
if (tag != -1 )
261
265
return tag ;
262
266
@@ -267,7 +271,7 @@ static int bt_get(struct blk_mq_alloc_data *data,
267
271
do {
268
272
prepare_to_wait (& bs -> wait , & wait , TASK_UNINTERRUPTIBLE );
269
273
270
- tag = __bt_get (hctx , bt , last_tag );
274
+ tag = __bt_get (hctx , bt , last_tag , tags );
271
275
if (tag != -1 )
272
276
break ;
273
277
@@ -282,7 +286,7 @@ static int bt_get(struct blk_mq_alloc_data *data,
282
286
* Retry tag allocation after running the hardware queue,
283
287
* as running the queue may also have found completions.
284
288
*/
285
- tag = __bt_get (hctx , bt , last_tag );
289
+ tag = __bt_get (hctx , bt , last_tag , tags );
286
290
if (tag != -1 )
287
291
break ;
288
292
@@ -313,7 +317,7 @@ static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
313
317
int tag ;
314
318
315
319
tag = bt_get (data , & data -> hctx -> tags -> bitmap_tags , data -> hctx ,
316
- & data -> ctx -> last_tag );
320
+ & data -> ctx -> last_tag , data -> hctx -> tags );
317
321
if (tag >= 0 )
318
322
return tag + data -> hctx -> tags -> nr_reserved_tags ;
319
323
@@ -329,7 +333,8 @@ static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
329
333
return BLK_MQ_TAG_FAIL ;
330
334
}
331
335
332
- tag = bt_get (data , & data -> hctx -> tags -> breserved_tags , NULL , & zero );
336
+ tag = bt_get (data , & data -> hctx -> tags -> breserved_tags , NULL , & zero ,
337
+ data -> hctx -> tags );
333
338
if (tag < 0 )
334
339
return BLK_MQ_TAG_FAIL ;
335
340
@@ -401,7 +406,8 @@ void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
401
406
402
407
BUG_ON (real_tag >= tags -> nr_tags );
403
408
bt_clear_tag (& tags -> bitmap_tags , real_tag );
404
- * last_tag = real_tag ;
409
+ if (likely (tags -> alloc_policy == BLK_TAG_ALLOC_FIFO ))
410
+ * last_tag = real_tag ;
405
411
} else {
406
412
BUG_ON (tag >= tags -> nr_reserved_tags );
407
413
bt_clear_tag (& tags -> breserved_tags , tag );
@@ -538,10 +544,12 @@ static void bt_free(struct blk_mq_bitmap_tags *bt)
538
544
}
539
545
540
546
static struct blk_mq_tags * blk_mq_init_bitmap_tags (struct blk_mq_tags * tags ,
541
- int node )
547
+ int node , int alloc_policy )
542
548
{
543
549
unsigned int depth = tags -> nr_tags - tags -> nr_reserved_tags ;
544
550
551
+ tags -> alloc_policy = alloc_policy ;
552
+
545
553
if (bt_alloc (& tags -> bitmap_tags , depth , node , false))
546
554
goto enomem ;
547
555
if (bt_alloc (& tags -> breserved_tags , tags -> nr_reserved_tags , node , true))
@@ -555,7 +563,8 @@ static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
555
563
}
556
564
557
565
struct blk_mq_tags * blk_mq_init_tags (unsigned int total_tags ,
558
- unsigned int reserved_tags , int node )
566
+ unsigned int reserved_tags ,
567
+ int node , int alloc_policy )
559
568
{
560
569
struct blk_mq_tags * tags ;
561
570
@@ -571,7 +580,7 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
571
580
tags -> nr_tags = total_tags ;
572
581
tags -> nr_reserved_tags = reserved_tags ;
573
582
574
- return blk_mq_init_bitmap_tags (tags , node );
583
+ return blk_mq_init_bitmap_tags (tags , node , alloc_policy );
575
584
}
576
585
577
586
void blk_mq_free_tags (struct blk_mq_tags * tags )
0 commit comments