@@ -90,32 +90,46 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
90
90
return atomic_read (& hctx -> nr_active ) < depth ;
91
91
}
92
92
93
- static int __bt_get (struct blk_mq_hw_ctx * hctx , struct sbitmap_queue * bt )
93
+ static int __blk_mq_get_tag (struct blk_mq_hw_ctx * hctx , struct sbitmap_queue * bt )
94
94
{
95
95
if (!hctx_may_queue (hctx , bt ))
96
96
return -1 ;
97
97
return __sbitmap_queue_get (bt );
98
98
}
99
99
100
- static int bt_get (struct blk_mq_alloc_data * data , struct sbitmap_queue * bt ,
101
- struct blk_mq_hw_ctx * hctx , struct blk_mq_tags * tags )
100
+ unsigned int blk_mq_get_tag (struct blk_mq_alloc_data * data )
102
101
{
102
+ struct blk_mq_tags * tags = blk_mq_tags_from_data (data );
103
+ struct sbitmap_queue * bt ;
103
104
struct sbq_wait_state * ws ;
104
105
DEFINE_WAIT (wait );
106
+ unsigned int tag_offset ;
105
107
int tag ;
106
108
107
- tag = __bt_get (hctx , bt );
109
+ if (data -> flags & BLK_MQ_REQ_RESERVED ) {
110
+ if (unlikely (!tags -> nr_reserved_tags )) {
111
+ WARN_ON_ONCE (1 );
112
+ return BLK_MQ_TAG_FAIL ;
113
+ }
114
+ bt = & tags -> breserved_tags ;
115
+ tag_offset = 0 ;
116
+ } else {
117
+ bt = & tags -> bitmap_tags ;
118
+ tag_offset = tags -> nr_reserved_tags ;
119
+ }
120
+
121
+ tag = __blk_mq_get_tag (data -> hctx , bt );
108
122
if (tag != -1 )
109
- return tag ;
123
+ goto found_tag ;
110
124
111
125
if (data -> flags & BLK_MQ_REQ_NOWAIT )
112
- return -1 ;
126
+ return BLK_MQ_TAG_FAIL ;
113
127
114
- ws = bt_wait_ptr (bt , hctx );
128
+ ws = bt_wait_ptr (bt , data -> hctx );
115
129
do {
116
130
prepare_to_wait (& ws -> wait , & wait , TASK_UNINTERRUPTIBLE );
117
131
118
- tag = __bt_get ( hctx , bt );
132
+ tag = __blk_mq_get_tag ( data -> hctx , bt );
119
133
if (tag != -1 )
120
134
break ;
121
135
@@ -125,14 +139,14 @@ static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt,
125
139
* some to complete. Note that hctx can be NULL here for
126
140
* reserved tag allocation.
127
141
*/
128
- if (hctx )
129
- blk_mq_run_hw_queue (hctx , false);
142
+ if (data -> hctx )
143
+ blk_mq_run_hw_queue (data -> hctx , false);
130
144
131
145
/*
132
146
* Retry tag allocation after running the hardware queue,
133
147
* as running the queue may also have found completions.
134
148
*/
135
- tag = __bt_get ( hctx , bt );
149
+ tag = __blk_mq_get_tag ( data -> hctx , bt );
136
150
if (tag != -1 )
137
151
break ;
138
152
@@ -142,61 +156,25 @@ static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt,
142
156
143
157
data -> ctx = blk_mq_get_ctx (data -> q );
144
158
data -> hctx = blk_mq_map_queue (data -> q , data -> ctx -> cpu );
145
- if (data -> flags & BLK_MQ_REQ_RESERVED ) {
146
- bt = & data -> hctx -> tags -> breserved_tags ;
147
- } else {
148
- hctx = data -> hctx ;
149
- bt = & hctx -> tags -> bitmap_tags ;
150
- }
159
+ tags = blk_mq_tags_from_data (data );
160
+ if ( data -> flags & BLK_MQ_REQ_RESERVED )
161
+ bt = & tags -> breserved_tags ;
162
+ else
163
+ bt = & tags -> bitmap_tags ;
164
+
151
165
finish_wait (& ws -> wait , & wait );
152
- ws = bt_wait_ptr (bt , hctx );
166
+ ws = bt_wait_ptr (bt , data -> hctx );
153
167
} while (1 );
154
168
155
169
finish_wait (& ws -> wait , & wait );
156
- return tag ;
157
- }
158
-
159
- static unsigned int __blk_mq_get_tag (struct blk_mq_alloc_data * data )
160
- {
161
- int tag ;
162
-
163
- tag = bt_get (data , & data -> hctx -> tags -> bitmap_tags , data -> hctx ,
164
- data -> hctx -> tags );
165
- if (tag >= 0 )
166
- return tag + data -> hctx -> tags -> nr_reserved_tags ;
167
-
168
- return BLK_MQ_TAG_FAIL ;
169
- }
170
-
171
- static unsigned int __blk_mq_get_reserved_tag (struct blk_mq_alloc_data * data )
172
- {
173
- int tag ;
174
170
175
- if (unlikely (!data -> hctx -> tags -> nr_reserved_tags )) {
176
- WARN_ON_ONCE (1 );
177
- return BLK_MQ_TAG_FAIL ;
178
- }
179
-
180
- tag = bt_get (data , & data -> hctx -> tags -> breserved_tags , NULL ,
181
- data -> hctx -> tags );
182
- if (tag < 0 )
183
- return BLK_MQ_TAG_FAIL ;
184
-
185
- return tag ;
171
+ found_tag :
172
+ return tag + tag_offset ;
186
173
}
187
174
188
- unsigned int blk_mq_get_tag (struct blk_mq_alloc_data * data )
175
+ void blk_mq_put_tag (struct blk_mq_hw_ctx * hctx , struct blk_mq_tags * tags ,
176
+ struct blk_mq_ctx * ctx , unsigned int tag )
189
177
{
190
- if (data -> flags & BLK_MQ_REQ_RESERVED )
191
- return __blk_mq_get_reserved_tag (data );
192
- return __blk_mq_get_tag (data );
193
- }
194
-
195
- void blk_mq_put_tag (struct blk_mq_hw_ctx * hctx , struct blk_mq_ctx * ctx ,
196
- unsigned int tag )
197
- {
198
- struct blk_mq_tags * tags = hctx -> tags ;
199
-
200
178
if (tag >= tags -> nr_reserved_tags ) {
201
179
const int real_tag = tag - tags -> nr_reserved_tags ;
202
180
0 commit comments