Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Tag allocation using scalable bitmaps. Uses active queue tracking to support
4 : * fairer distribution of tags between multiple submitters when a shared tag map
5 : * is used.
6 : *
7 : * Copyright (C) 2013-2014 Jens Axboe
8 : */
9 : #include <linux/kernel.h>
10 : #include <linux/module.h>
11 :
12 : #include <linux/delay.h>
13 : #include "blk.h"
14 : #include "blk-mq.h"
15 : #include "blk-mq-sched.h"
16 :
17 : /*
18 : * Recalculate wakeup batch when tag is shared by hctx.
19 : */
20 0 : static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
21 : unsigned int users)
22 : {
23 0 : if (!users)
24 : return;
25 :
26 0 : sbitmap_queue_recalculate_wake_batch(&tags->bitmap_tags,
27 : users);
28 0 : sbitmap_queue_recalculate_wake_batch(&tags->breserved_tags,
29 : users);
30 : }
31 :
32 : /*
33 : * If a previously inactive queue goes active, bump the active user count.
34 : * We need to do this before try to allocate driver tag, then even if fail
35 : * to get tag when first time, the other shared-tag users could reserve
36 : * budget for it.
37 : */
38 0 : void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
39 : {
40 : unsigned int users;
41 :
42 : /*
43 : * calling test_bit() prior to test_and_set_bit() is intentional,
44 : * it avoids dirtying the cacheline if the queue is already active.
45 : */
46 0 : if (blk_mq_is_shared_tags(hctx->flags)) {
47 0 : struct request_queue *q = hctx->queue;
48 :
49 0 : if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) ||
50 0 : test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
51 : return;
52 : } else {
53 0 : if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) ||
54 0 : test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
55 : return;
56 : }
57 :
58 0 : users = atomic_inc_return(&hctx->tags->active_queues);
59 :
60 0 : blk_mq_update_wake_batch(hctx->tags, users);
61 : }
62 :
63 : /*
64 : * Wakeup all potentially sleeping on tags
65 : */
66 0 : void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
67 : {
68 0 : sbitmap_queue_wake_all(&tags->bitmap_tags);
69 0 : if (include_reserve)
70 0 : sbitmap_queue_wake_all(&tags->breserved_tags);
71 0 : }
72 :
73 : /*
74 : * If a previously busy queue goes inactive, potential waiters could now
75 : * be allowed to queue. Wake them up and check.
76 : */
77 0 : void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
78 : {
79 0 : struct blk_mq_tags *tags = hctx->tags;
80 : unsigned int users;
81 :
82 0 : if (blk_mq_is_shared_tags(hctx->flags)) {
83 0 : struct request_queue *q = hctx->queue;
84 :
85 0 : if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
86 0 : &q->queue_flags))
87 : return;
88 : } else {
89 0 : if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
90 : return;
91 : }
92 :
93 0 : users = atomic_dec_return(&tags->active_queues);
94 :
95 0 : blk_mq_update_wake_batch(tags, users);
96 :
97 : blk_mq_tag_wakeup_all(tags, false);
98 : }
99 :
100 0 : static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
101 : struct sbitmap_queue *bt)
102 : {
103 0 : if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) &&
104 0 : !hctx_may_queue(data->hctx, bt))
105 : return BLK_MQ_NO_TAG;
106 :
107 0 : if (data->shallow_depth)
108 0 : return sbitmap_queue_get_shallow(bt, data->shallow_depth);
109 : else
110 0 : return __sbitmap_queue_get(bt);
111 : }
112 :
113 0 : unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
114 : unsigned int *offset)
115 : {
116 0 : struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
117 0 : struct sbitmap_queue *bt = &tags->bitmap_tags;
118 : unsigned long ret;
119 :
120 0 : if (data->shallow_depth ||data->flags & BLK_MQ_REQ_RESERVED ||
121 0 : data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
122 : return 0;
123 0 : ret = __sbitmap_queue_get_batch(bt, nr_tags, offset);
124 0 : *offset += tags->nr_reserved_tags;
125 0 : return ret;
126 : }
127 :
128 0 : unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
129 : {
130 0 : struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
131 : struct sbitmap_queue *bt;
132 : struct sbq_wait_state *ws;
133 0 : DEFINE_SBQ_WAIT(wait);
134 : unsigned int tag_offset;
135 : int tag;
136 :
137 0 : if (data->flags & BLK_MQ_REQ_RESERVED) {
138 0 : if (unlikely(!tags->nr_reserved_tags)) {
139 0 : WARN_ON_ONCE(1);
140 : return BLK_MQ_NO_TAG;
141 : }
142 0 : bt = &tags->breserved_tags;
143 0 : tag_offset = 0;
144 : } else {
145 0 : bt = &tags->bitmap_tags;
146 0 : tag_offset = tags->nr_reserved_tags;
147 : }
148 :
149 0 : tag = __blk_mq_get_tag(data, bt);
150 0 : if (tag != BLK_MQ_NO_TAG)
151 : goto found_tag;
152 :
153 0 : if (data->flags & BLK_MQ_REQ_NOWAIT)
154 : return BLK_MQ_NO_TAG;
155 :
156 0 : ws = bt_wait_ptr(bt, data->hctx);
157 : do {
158 : struct sbitmap_queue *bt_prev;
159 :
160 : /*
161 : * We're out of tags on this hardware queue, kick any
162 : * pending IO submits before going to sleep waiting for
163 : * some to complete.
164 : */
165 0 : blk_mq_run_hw_queue(data->hctx, false);
166 :
167 : /*
168 : * Retry tag allocation after running the hardware queue,
169 : * as running the queue may also have found completions.
170 : */
171 0 : tag = __blk_mq_get_tag(data, bt);
172 0 : if (tag != BLK_MQ_NO_TAG)
173 : break;
174 :
175 0 : sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
176 :
177 0 : tag = __blk_mq_get_tag(data, bt);
178 0 : if (tag != BLK_MQ_NO_TAG)
179 : break;
180 :
181 0 : bt_prev = bt;
182 0 : io_schedule();
183 :
184 0 : sbitmap_finish_wait(bt, ws, &wait);
185 :
186 0 : data->ctx = blk_mq_get_ctx(data->q);
187 0 : data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
188 : data->ctx);
189 0 : tags = blk_mq_tags_from_data(data);
190 0 : if (data->flags & BLK_MQ_REQ_RESERVED)
191 0 : bt = &tags->breserved_tags;
192 : else
193 0 : bt = &tags->bitmap_tags;
194 :
195 : /*
196 : * If destination hw queue is changed, fake wake up on
197 : * previous queue for compensating the wake up miss, so
198 : * other allocations on previous queue won't be starved.
199 : */
200 0 : if (bt != bt_prev)
201 0 : sbitmap_queue_wake_up(bt_prev, 1);
202 :
203 0 : ws = bt_wait_ptr(bt, data->hctx);
204 : } while (1);
205 :
206 0 : sbitmap_finish_wait(bt, ws, &wait);
207 :
208 : found_tag:
209 : /*
210 : * Give up this allocation if the hctx is inactive. The caller will
211 : * retry on an active hctx.
212 : */
213 0 : if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) {
214 0 : blk_mq_put_tag(tags, data->ctx, tag + tag_offset);
215 0 : return BLK_MQ_NO_TAG;
216 : }
217 0 : return tag + tag_offset;
218 : }
219 :
220 0 : void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
221 : unsigned int tag)
222 : {
223 0 : if (!blk_mq_tag_is_reserved(tags, tag)) {
224 0 : const int real_tag = tag - tags->nr_reserved_tags;
225 :
226 0 : BUG_ON(real_tag >= tags->nr_tags);
227 0 : sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
228 : } else {
229 0 : sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
230 : }
231 0 : }
232 :
233 0 : void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags)
234 : {
235 0 : sbitmap_queue_clear_batch(&tags->bitmap_tags, tags->nr_reserved_tags,
236 : tag_array, nr_tags);
237 0 : }
238 :
239 : struct bt_iter_data {
240 : struct blk_mq_hw_ctx *hctx;
241 : struct request_queue *q;
242 : busy_tag_iter_fn *fn;
243 : void *data;
244 : bool reserved;
245 : };
246 :
247 0 : static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
248 : unsigned int bitnr)
249 : {
250 : struct request *rq;
251 : unsigned long flags;
252 :
253 0 : spin_lock_irqsave(&tags->lock, flags);
254 0 : rq = tags->rqs[bitnr];
255 0 : if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq))
256 : rq = NULL;
257 0 : spin_unlock_irqrestore(&tags->lock, flags);
258 0 : return rq;
259 : }
260 :
261 0 : static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
262 : {
263 0 : struct bt_iter_data *iter_data = data;
264 0 : struct blk_mq_hw_ctx *hctx = iter_data->hctx;
265 0 : struct request_queue *q = iter_data->q;
266 0 : struct blk_mq_tag_set *set = q->tag_set;
267 : struct blk_mq_tags *tags;
268 : struct request *rq;
269 0 : bool ret = true;
270 :
271 0 : if (blk_mq_is_shared_tags(set->flags))
272 0 : tags = set->shared_tags;
273 : else
274 0 : tags = hctx->tags;
275 :
276 0 : if (!iter_data->reserved)
277 0 : bitnr += tags->nr_reserved_tags;
278 : /*
279 : * We can hit rq == NULL here, because the tagging functions
280 : * test and set the bit before assigning ->rqs[].
281 : */
282 0 : rq = blk_mq_find_and_get_req(tags, bitnr);
283 0 : if (!rq)
284 : return true;
285 :
286 0 : if (rq->q == q && (!hctx || rq->mq_hctx == hctx))
287 0 : ret = iter_data->fn(rq, iter_data->data);
288 0 : blk_mq_put_rq_ref(rq);
289 0 : return ret;
290 : }
291 :
292 : /**
293 : * bt_for_each - iterate over the requests associated with a hardware queue
294 : * @hctx: Hardware queue to examine.
295 : * @q: Request queue to examine.
296 : * @bt: sbitmap to examine. This is either the breserved_tags member
297 : * or the bitmap_tags member of struct blk_mq_tags.
298 : * @fn: Pointer to the function that will be called for each request
299 : * associated with @hctx that has been assigned a driver tag.
300 : * @fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
301 : * where rq is a pointer to a request. Return true to continue
302 : * iterating tags, false to stop.
303 : * @data: Will be passed as third argument to @fn.
304 : * @reserved: Indicates whether @bt is the breserved_tags member or the
305 : * bitmap_tags member of struct blk_mq_tags.
306 : */
307 : static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct request_queue *q,
308 : struct sbitmap_queue *bt, busy_tag_iter_fn *fn,
309 : void *data, bool reserved)
310 : {
311 0 : struct bt_iter_data iter_data = {
312 : .hctx = hctx,
313 : .fn = fn,
314 : .data = data,
315 : .reserved = reserved,
316 : .q = q,
317 : };
318 :
319 0 : sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
320 : }
321 :
322 : struct bt_tags_iter_data {
323 : struct blk_mq_tags *tags;
324 : busy_tag_iter_fn *fn;
325 : void *data;
326 : unsigned int flags;
327 : };
328 :
329 : #define BT_TAG_ITER_RESERVED (1 << 0)
330 : #define BT_TAG_ITER_STARTED (1 << 1)
331 : #define BT_TAG_ITER_STATIC_RQS (1 << 2)
332 :
333 0 : static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
334 : {
335 0 : struct bt_tags_iter_data *iter_data = data;
336 0 : struct blk_mq_tags *tags = iter_data->tags;
337 : struct request *rq;
338 0 : bool ret = true;
339 0 : bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
340 :
341 0 : if (!(iter_data->flags & BT_TAG_ITER_RESERVED))
342 0 : bitnr += tags->nr_reserved_tags;
343 :
344 : /*
345 : * We can hit rq == NULL here, because the tagging functions
346 : * test and set the bit before assigning ->rqs[].
347 : */
348 0 : if (iter_static_rqs)
349 0 : rq = tags->static_rqs[bitnr];
350 : else
351 0 : rq = blk_mq_find_and_get_req(tags, bitnr);
352 0 : if (!rq)
353 : return true;
354 :
355 0 : if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
356 0 : blk_mq_request_started(rq))
357 0 : ret = iter_data->fn(rq, iter_data->data);
358 0 : if (!iter_static_rqs)
359 0 : blk_mq_put_rq_ref(rq);
360 : return ret;
361 : }
362 :
363 : /**
364 : * bt_tags_for_each - iterate over the requests in a tag map
365 : * @tags: Tag map to iterate over.
366 : * @bt: sbitmap to examine. This is either the breserved_tags member
367 : * or the bitmap_tags member of struct blk_mq_tags.
368 : * @fn: Pointer to the function that will be called for each started
369 : * request. @fn will be called as follows: @fn(rq, @data,
370 : * @reserved) where rq is a pointer to a request. Return true
371 : * to continue iterating tags, false to stop.
372 : * @data: Will be passed as second argument to @fn.
373 : * @flags: BT_TAG_ITER_*
374 : */
375 : static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
376 : busy_tag_iter_fn *fn, void *data, unsigned int flags)
377 : {
378 0 : struct bt_tags_iter_data iter_data = {
379 : .tags = tags,
380 : .fn = fn,
381 : .data = data,
382 : .flags = flags,
383 : };
384 :
385 0 : if (tags->rqs)
386 0 : sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
387 : }
388 :
389 0 : static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
390 : busy_tag_iter_fn *fn, void *priv, unsigned int flags)
391 : {
392 0 : WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
393 :
394 0 : if (tags->nr_reserved_tags)
395 0 : bt_tags_for_each(tags, &tags->breserved_tags, fn, priv,
396 : flags | BT_TAG_ITER_RESERVED);
397 0 : bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags);
398 0 : }
399 :
400 : /**
401 : * blk_mq_all_tag_iter - iterate over all requests in a tag map
402 : * @tags: Tag map to iterate over.
403 : * @fn: Pointer to the function that will be called for each
404 : * request. @fn will be called as follows: @fn(rq, @priv,
405 : * reserved) where rq is a pointer to a request. 'reserved'
406 : * indicates whether or not @rq is a reserved request. Return
407 : * true to continue iterating tags, false to stop.
408 : * @priv: Will be passed as second argument to @fn.
409 : *
410 : * Caller has to pass the tag map from which requests are allocated.
411 : */
412 0 : void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
413 : void *priv)
414 : {
415 0 : __blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
416 0 : }
417 :
418 : /**
419 : * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
420 : * @tagset: Tag set to iterate over.
421 : * @fn: Pointer to the function that will be called for each started
422 : * request. @fn will be called as follows: @fn(rq, @priv,
423 : * reserved) where rq is a pointer to a request. 'reserved'
424 : * indicates whether or not @rq is a reserved request. Return
425 : * true to continue iterating tags, false to stop.
426 : * @priv: Will be passed as second argument to @fn.
427 : *
428 : * We grab one request reference before calling @fn and release it after
429 : * @fn returns.
430 : */
431 0 : void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
432 : busy_tag_iter_fn *fn, void *priv)
433 : {
434 0 : unsigned int flags = tagset->flags;
435 : int i, nr_tags;
436 :
437 0 : nr_tags = blk_mq_is_shared_tags(flags) ? 1 : tagset->nr_hw_queues;
438 :
439 0 : for (i = 0; i < nr_tags; i++) {
440 0 : if (tagset->tags && tagset->tags[i])
441 0 : __blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
442 : BT_TAG_ITER_STARTED);
443 : }
444 0 : }
445 : EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
446 :
447 0 : static bool blk_mq_tagset_count_completed_rqs(struct request *rq, void *data)
448 : {
449 0 : unsigned *count = data;
450 :
451 0 : if (blk_mq_request_completed(rq))
452 0 : (*count)++;
453 0 : return true;
454 : }
455 :
456 : /**
457 : * blk_mq_tagset_wait_completed_request - Wait until all scheduled request
458 : * completions have finished.
459 : * @tagset: Tag set to drain completed request
460 : *
461 : * Note: This function has to be run after all IO queues are shutdown
462 : */
463 0 : void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
464 : {
465 0 : while (true) {
466 0 : unsigned count = 0;
467 :
468 0 : blk_mq_tagset_busy_iter(tagset,
469 : blk_mq_tagset_count_completed_rqs, &count);
470 0 : if (!count)
471 : break;
472 0 : msleep(5);
473 : }
474 0 : }
475 : EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
476 :
477 : /**
478 : * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
479 : * @q: Request queue to examine.
480 : * @fn: Pointer to the function that will be called for each request
481 : * on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
482 : * reserved) where rq is a pointer to a request and hctx points
483 : * to the hardware queue associated with the request. 'reserved'
484 : * indicates whether or not @rq is a reserved request.
485 : * @priv: Will be passed as third argument to @fn.
486 : *
487 : * Note: if @q->tag_set is shared with other request queues then @fn will be
488 : * called for all requests on all queues that share that tag set and not only
489 : * for requests associated with @q.
490 : */
491 0 : void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
492 : void *priv)
493 : {
494 : /*
495 : * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table
496 : * while the queue is frozen. So we can use q_usage_counter to avoid
497 : * racing with it.
498 : */
499 0 : if (!percpu_ref_tryget(&q->q_usage_counter))
500 : return;
501 :
502 0 : if (blk_mq_is_shared_tags(q->tag_set->flags)) {
503 0 : struct blk_mq_tags *tags = q->tag_set->shared_tags;
504 0 : struct sbitmap_queue *bresv = &tags->breserved_tags;
505 0 : struct sbitmap_queue *btags = &tags->bitmap_tags;
506 :
507 0 : if (tags->nr_reserved_tags)
508 : bt_for_each(NULL, q, bresv, fn, priv, true);
509 : bt_for_each(NULL, q, btags, fn, priv, false);
510 : } else {
511 : struct blk_mq_hw_ctx *hctx;
512 : unsigned long i;
513 :
514 0 : queue_for_each_hw_ctx(q, hctx, i) {
515 0 : struct blk_mq_tags *tags = hctx->tags;
516 0 : struct sbitmap_queue *bresv = &tags->breserved_tags;
517 0 : struct sbitmap_queue *btags = &tags->bitmap_tags;
518 :
519 : /*
520 : * If no software queues are currently mapped to this
521 : * hardware queue, there's nothing to check
522 : */
523 0 : if (!blk_mq_hw_queue_mapped(hctx))
524 0 : continue;
525 :
526 0 : if (tags->nr_reserved_tags)
527 : bt_for_each(hctx, q, bresv, fn, priv, true);
528 : bt_for_each(hctx, q, btags, fn, priv, false);
529 : }
530 : }
531 0 : blk_queue_exit(q);
532 : }
533 :
534 : static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
535 : bool round_robin, int node)
536 : {
537 0 : return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
538 : node);
539 : }
540 :
541 0 : int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
542 : struct sbitmap_queue *breserved_tags,
543 : unsigned int queue_depth, unsigned int reserved,
544 : int node, int alloc_policy)
545 : {
546 0 : unsigned int depth = queue_depth - reserved;
547 0 : bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
548 :
549 0 : if (bt_alloc(bitmap_tags, depth, round_robin, node))
550 : return -ENOMEM;
551 0 : if (bt_alloc(breserved_tags, reserved, round_robin, node))
552 : goto free_bitmap_tags;
553 :
554 : return 0;
555 :
556 : free_bitmap_tags:
557 0 : sbitmap_queue_free(bitmap_tags);
558 0 : return -ENOMEM;
559 : }
560 :
561 0 : struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
562 : unsigned int reserved_tags,
563 : int node, int alloc_policy)
564 : {
565 : struct blk_mq_tags *tags;
566 :
567 0 : if (total_tags > BLK_MQ_TAG_MAX) {
568 0 : pr_err("blk-mq: tag depth too large\n");
569 0 : return NULL;
570 : }
571 :
572 0 : tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
573 0 : if (!tags)
574 : return NULL;
575 :
576 0 : tags->nr_tags = total_tags;
577 0 : tags->nr_reserved_tags = reserved_tags;
578 0 : spin_lock_init(&tags->lock);
579 :
580 0 : if (blk_mq_init_bitmaps(&tags->bitmap_tags, &tags->breserved_tags,
581 : total_tags, reserved_tags, node,
582 : alloc_policy) < 0) {
583 0 : kfree(tags);
584 0 : return NULL;
585 : }
586 : return tags;
587 : }
588 :
589 0 : void blk_mq_free_tags(struct blk_mq_tags *tags)
590 : {
591 0 : sbitmap_queue_free(&tags->bitmap_tags);
592 0 : sbitmap_queue_free(&tags->breserved_tags);
593 0 : kfree(tags);
594 0 : }
595 :
596 0 : int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
597 : struct blk_mq_tags **tagsptr, unsigned int tdepth,
598 : bool can_grow)
599 : {
600 0 : struct blk_mq_tags *tags = *tagsptr;
601 :
602 0 : if (tdepth <= tags->nr_reserved_tags)
603 : return -EINVAL;
604 :
605 : /*
606 : * If we are allowed to grow beyond the original size, allocate
607 : * a new set of tags before freeing the old one.
608 : */
609 0 : if (tdepth > tags->nr_tags) {
610 0 : struct blk_mq_tag_set *set = hctx->queue->tag_set;
611 : struct blk_mq_tags *new;
612 :
613 0 : if (!can_grow)
614 : return -EINVAL;
615 :
616 : /*
617 : * We need some sort of upper limit, set it high enough that
618 : * no valid use cases should require more.
619 : */
620 0 : if (tdepth > MAX_SCHED_RQ)
621 : return -EINVAL;
622 :
623 : /*
624 : * Only the sbitmap needs resizing since we allocated the max
625 : * initially.
626 : */
627 0 : if (blk_mq_is_shared_tags(set->flags))
628 : return 0;
629 :
630 0 : new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth);
631 0 : if (!new)
632 : return -ENOMEM;
633 :
634 0 : blk_mq_free_map_and_rqs(set, *tagsptr, hctx->queue_num);
635 0 : *tagsptr = new;
636 : } else {
637 : /*
638 : * Don't need (or can't) update reserved tags here, they
639 : * remain static and should never need resizing.
640 : */
641 0 : sbitmap_queue_resize(&tags->bitmap_tags,
642 : tdepth - tags->nr_reserved_tags);
643 : }
644 :
645 : return 0;
646 : }
647 :
648 0 : void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size)
649 : {
650 0 : struct blk_mq_tags *tags = set->shared_tags;
651 :
652 0 : sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags);
653 0 : }
654 :
655 0 : void blk_mq_tag_update_sched_shared_tags(struct request_queue *q)
656 : {
657 0 : sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags,
658 0 : q->nr_requests - q->tag_set->reserved_tags);
659 0 : }
660 :
661 : /**
662 : * blk_mq_unique_tag() - return a tag that is unique queue-wide
663 : * @rq: request for which to compute a unique tag
664 : *
665 : * The tag field in struct request is unique per hardware queue but not over
666 : * all hardware queues. Hence this function that returns a tag with the
667 : * hardware context index in the upper bits and the per hardware queue tag in
668 : * the lower bits.
669 : *
670 : * Note: When called for a request that is queued on a non-multiqueue request
671 : * queue, the hardware context index is set to zero.
672 : */
673 0 : u32 blk_mq_unique_tag(struct request *rq)
674 : {
675 0 : return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
676 0 : (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
677 : }
678 : EXPORT_SYMBOL(blk_mq_unique_tag);
|