LCOV - code coverage report
Current view: top level - block - blk-mq-tag.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 224 0.0 %
Date: 2023-03-27 20:00:47 Functions: 0 25 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Tag allocation using scalable bitmaps. Uses active queue tracking to support
       4             :  * fairer distribution of tags between multiple submitters when a shared tag map
       5             :  * is used.
       6             :  *
       7             :  * Copyright (C) 2013-2014 Jens Axboe
       8             :  */
       9             : #include <linux/kernel.h>
      10             : #include <linux/module.h>
      11             : 
      12             : #include <linux/blk-mq.h>
      13             : #include <linux/delay.h>
      14             : #include "blk.h"
      15             : #include "blk-mq.h"
      16             : #include "blk-mq-sched.h"
      17             : #include "blk-mq-tag.h"
      18             : 
      19             : /*
      20             :  * Recalculate wakeup batch when tag is shared by hctx.
      21             :  */
      22           0 : static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
      23             :                 unsigned int users)
      24             : {
      25           0 :         if (!users)
      26             :                 return;
      27             : 
      28           0 :         sbitmap_queue_recalculate_wake_batch(&tags->bitmap_tags,
      29             :                         users);
      30           0 :         sbitmap_queue_recalculate_wake_batch(&tags->breserved_tags,
      31             :                         users);
      32             : }
      33             : 
      34             : /*
      35             :  * If a previously inactive queue goes active, bump the active user count.
      36             :  * We need to do this before try to allocate driver tag, then even if fail
      37             :  * to get tag when first time, the other shared-tag users could reserve
      38             :  * budget for it.
      39             :  */
      40           0 : void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
      41             : {
      42             :         unsigned int users;
      43             : 
      44           0 :         if (blk_mq_is_shared_tags(hctx->flags)) {
      45           0 :                 struct request_queue *q = hctx->queue;
      46             : 
      47           0 :                 if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
      48             :                         return;
      49           0 :                 set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags);
      50             :         } else {
      51           0 :                 if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
      52             :                         return;
      53           0 :                 set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state);
      54             :         }
      55             : 
      56           0 :         users = atomic_inc_return(&hctx->tags->active_queues);
      57             : 
      58           0 :         blk_mq_update_wake_batch(hctx->tags, users);
      59             : }
      60             : 
      61             : /*
      62             :  * Wakeup all potentially sleeping on tags
      63             :  */
      64           0 : void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
      65             : {
      66           0 :         sbitmap_queue_wake_all(&tags->bitmap_tags);
      67           0 :         if (include_reserve)
      68           0 :                 sbitmap_queue_wake_all(&tags->breserved_tags);
      69           0 : }
      70             : 
      71             : /*
      72             :  * If a previously busy queue goes inactive, potential waiters could now
      73             :  * be allowed to queue. Wake them up and check.
      74             :  */
      75           0 : void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
      76             : {
      77           0 :         struct blk_mq_tags *tags = hctx->tags;
      78             :         unsigned int users;
      79             : 
      80           0 :         if (blk_mq_is_shared_tags(hctx->flags)) {
      81           0 :                 struct request_queue *q = hctx->queue;
      82             : 
      83           0 :                 if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
      84           0 :                                         &q->queue_flags))
      85             :                         return;
      86             :         } else {
      87           0 :                 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
      88             :                         return;
      89             :         }
      90             : 
      91           0 :         users = atomic_dec_return(&tags->active_queues);
      92             : 
      93           0 :         blk_mq_update_wake_batch(tags, users);
      94             : 
      95             :         blk_mq_tag_wakeup_all(tags, false);
      96             : }
      97             : 
      98           0 : static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
      99             :                             struct sbitmap_queue *bt)
     100             : {
     101           0 :         if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) &&
     102           0 :                         !hctx_may_queue(data->hctx, bt))
     103             :                 return BLK_MQ_NO_TAG;
     104             : 
     105           0 :         if (data->shallow_depth)
     106           0 :                 return sbitmap_queue_get_shallow(bt, data->shallow_depth);
     107             :         else
     108           0 :                 return __sbitmap_queue_get(bt);
     109             : }
     110             : 
     111           0 : unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
     112             :                               unsigned int *offset)
     113             : {
     114           0 :         struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
     115           0 :         struct sbitmap_queue *bt = &tags->bitmap_tags;
     116             :         unsigned long ret;
     117             : 
     118           0 :         if (data->shallow_depth ||data->flags & BLK_MQ_REQ_RESERVED ||
     119           0 :             data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
     120             :                 return 0;
     121           0 :         ret = __sbitmap_queue_get_batch(bt, nr_tags, offset);
     122           0 :         *offset += tags->nr_reserved_tags;
     123           0 :         return ret;
     124             : }
     125             : 
     126           0 : unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
     127             : {
     128           0 :         struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
     129             :         struct sbitmap_queue *bt;
     130             :         struct sbq_wait_state *ws;
     131           0 :         DEFINE_SBQ_WAIT(wait);
     132             :         unsigned int tag_offset;
     133             :         int tag;
     134             : 
     135           0 :         if (data->flags & BLK_MQ_REQ_RESERVED) {
     136           0 :                 if (unlikely(!tags->nr_reserved_tags)) {
     137           0 :                         WARN_ON_ONCE(1);
     138             :                         return BLK_MQ_NO_TAG;
     139             :                 }
     140           0 :                 bt = &tags->breserved_tags;
     141           0 :                 tag_offset = 0;
     142             :         } else {
     143           0 :                 bt = &tags->bitmap_tags;
     144           0 :                 tag_offset = tags->nr_reserved_tags;
     145             :         }
     146             : 
     147           0 :         tag = __blk_mq_get_tag(data, bt);
     148           0 :         if (tag != BLK_MQ_NO_TAG)
     149             :                 goto found_tag;
     150             : 
     151           0 :         if (data->flags & BLK_MQ_REQ_NOWAIT)
     152             :                 return BLK_MQ_NO_TAG;
     153             : 
     154           0 :         ws = bt_wait_ptr(bt, data->hctx);
     155             :         do {
     156             :                 struct sbitmap_queue *bt_prev;
     157             : 
     158             :                 /*
     159             :                  * We're out of tags on this hardware queue, kick any
     160             :                  * pending IO submits before going to sleep waiting for
     161             :                  * some to complete.
     162             :                  */
     163           0 :                 blk_mq_run_hw_queue(data->hctx, false);
     164             : 
     165             :                 /*
     166             :                  * Retry tag allocation after running the hardware queue,
     167             :                  * as running the queue may also have found completions.
     168             :                  */
     169           0 :                 tag = __blk_mq_get_tag(data, bt);
     170           0 :                 if (tag != BLK_MQ_NO_TAG)
     171             :                         break;
     172             : 
     173           0 :                 sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
     174             : 
     175           0 :                 tag = __blk_mq_get_tag(data, bt);
     176           0 :                 if (tag != BLK_MQ_NO_TAG)
     177             :                         break;
     178             : 
     179           0 :                 bt_prev = bt;
     180           0 :                 io_schedule();
     181             : 
     182           0 :                 sbitmap_finish_wait(bt, ws, &wait);
     183             : 
     184           0 :                 data->ctx = blk_mq_get_ctx(data->q);
     185           0 :                 data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
     186             :                                                 data->ctx);
     187           0 :                 tags = blk_mq_tags_from_data(data);
     188           0 :                 if (data->flags & BLK_MQ_REQ_RESERVED)
     189           0 :                         bt = &tags->breserved_tags;
     190             :                 else
     191           0 :                         bt = &tags->bitmap_tags;
     192             : 
     193             :                 /*
     194             :                  * If destination hw queue is changed, fake wake up on
     195             :                  * previous queue for compensating the wake up miss, so
     196             :                  * other allocations on previous queue won't be starved.
     197             :                  */
     198           0 :                 if (bt != bt_prev)
     199           0 :                         sbitmap_queue_wake_up(bt_prev, 1);
     200             : 
     201           0 :                 ws = bt_wait_ptr(bt, data->hctx);
     202             :         } while (1);
     203             : 
     204           0 :         sbitmap_finish_wait(bt, ws, &wait);
     205             : 
     206             : found_tag:
     207             :         /*
     208             :          * Give up this allocation if the hctx is inactive.  The caller will
     209             :          * retry on an active hctx.
     210             :          */
     211           0 :         if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) {
     212           0 :                 blk_mq_put_tag(tags, data->ctx, tag + tag_offset);
     213           0 :                 return BLK_MQ_NO_TAG;
     214             :         }
     215           0 :         return tag + tag_offset;
     216             : }
     217             : 
     218           0 : void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
     219             :                     unsigned int tag)
     220             : {
     221           0 :         if (!blk_mq_tag_is_reserved(tags, tag)) {
     222           0 :                 const int real_tag = tag - tags->nr_reserved_tags;
     223             : 
     224           0 :                 BUG_ON(real_tag >= tags->nr_tags);
     225           0 :                 sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
     226             :         } else {
     227           0 :                 sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
     228             :         }
     229           0 : }
     230             : 
     231           0 : void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags)
     232             : {
     233           0 :         sbitmap_queue_clear_batch(&tags->bitmap_tags, tags->nr_reserved_tags,
     234             :                                         tag_array, nr_tags);
     235           0 : }
     236             : 
     237             : struct bt_iter_data {
     238             :         struct blk_mq_hw_ctx *hctx;
     239             :         struct request_queue *q;
     240             :         busy_tag_iter_fn *fn;
     241             :         void *data;
     242             :         bool reserved;
     243             : };
     244             : 
     245           0 : static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
     246             :                 unsigned int bitnr)
     247             : {
     248             :         struct request *rq;
     249             :         unsigned long flags;
     250             : 
     251           0 :         spin_lock_irqsave(&tags->lock, flags);
     252           0 :         rq = tags->rqs[bitnr];
     253           0 :         if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq))
     254             :                 rq = NULL;
     255           0 :         spin_unlock_irqrestore(&tags->lock, flags);
     256           0 :         return rq;
     257             : }
     258             : 
     259           0 : static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
     260             : {
     261           0 :         struct bt_iter_data *iter_data = data;
     262           0 :         struct blk_mq_hw_ctx *hctx = iter_data->hctx;
     263           0 :         struct request_queue *q = iter_data->q;
     264           0 :         struct blk_mq_tag_set *set = q->tag_set;
     265             :         struct blk_mq_tags *tags;
     266             :         struct request *rq;
     267           0 :         bool ret = true;
     268             : 
     269           0 :         if (blk_mq_is_shared_tags(set->flags))
     270           0 :                 tags = set->shared_tags;
     271             :         else
     272           0 :                 tags = hctx->tags;
     273             : 
     274           0 :         if (!iter_data->reserved)
     275           0 :                 bitnr += tags->nr_reserved_tags;
     276             :         /*
     277             :          * We can hit rq == NULL here, because the tagging functions
     278             :          * test and set the bit before assigning ->rqs[].
     279             :          */
     280           0 :         rq = blk_mq_find_and_get_req(tags, bitnr);
     281           0 :         if (!rq)
     282             :                 return true;
     283             : 
     284           0 :         if (rq->q == q && (!hctx || rq->mq_hctx == hctx))
     285           0 :                 ret = iter_data->fn(rq, iter_data->data);
     286           0 :         blk_mq_put_rq_ref(rq);
     287           0 :         return ret;
     288             : }
     289             : 
     290             : /**
     291             :  * bt_for_each - iterate over the requests associated with a hardware queue
     292             :  * @hctx:       Hardware queue to examine.
     293             :  * @q:          Request queue to examine.
     294             :  * @bt:         sbitmap to examine. This is either the breserved_tags member
     295             :  *              or the bitmap_tags member of struct blk_mq_tags.
     296             :  * @fn:         Pointer to the function that will be called for each request
     297             :  *              associated with @hctx that has been assigned a driver tag.
     298             :  *              @fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
     299             :  *              where rq is a pointer to a request. Return true to continue
     300             :  *              iterating tags, false to stop.
     301             :  * @data:       Will be passed as third argument to @fn.
     302             :  * @reserved:   Indicates whether @bt is the breserved_tags member or the
     303             :  *              bitmap_tags member of struct blk_mq_tags.
     304             :  */
     305             : static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct request_queue *q,
     306             :                         struct sbitmap_queue *bt, busy_tag_iter_fn *fn,
     307             :                         void *data, bool reserved)
     308             : {
     309           0 :         struct bt_iter_data iter_data = {
     310             :                 .hctx = hctx,
     311             :                 .fn = fn,
     312             :                 .data = data,
     313             :                 .reserved = reserved,
     314             :                 .q = q,
     315             :         };
     316             : 
     317           0 :         sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
     318             : }
     319             : 
     320             : struct bt_tags_iter_data {
     321             :         struct blk_mq_tags *tags;
     322             :         busy_tag_iter_fn *fn;
     323             :         void *data;
     324             :         unsigned int flags;
     325             : };
     326             : 
     327             : #define BT_TAG_ITER_RESERVED            (1 << 0)
     328             : #define BT_TAG_ITER_STARTED             (1 << 1)
     329             : #define BT_TAG_ITER_STATIC_RQS          (1 << 2)
     330             : 
     331           0 : static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
     332             : {
     333           0 :         struct bt_tags_iter_data *iter_data = data;
     334           0 :         struct blk_mq_tags *tags = iter_data->tags;
     335             :         struct request *rq;
     336           0 :         bool ret = true;
     337           0 :         bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
     338             : 
     339           0 :         if (!(iter_data->flags & BT_TAG_ITER_RESERVED))
     340           0 :                 bitnr += tags->nr_reserved_tags;
     341             : 
     342             :         /*
     343             :          * We can hit rq == NULL here, because the tagging functions
     344             :          * test and set the bit before assigning ->rqs[].
     345             :          */
     346           0 :         if (iter_static_rqs)
     347           0 :                 rq = tags->static_rqs[bitnr];
     348             :         else
     349           0 :                 rq = blk_mq_find_and_get_req(tags, bitnr);
     350           0 :         if (!rq)
     351             :                 return true;
     352             : 
     353           0 :         if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
     354           0 :             blk_mq_request_started(rq))
     355           0 :                 ret = iter_data->fn(rq, iter_data->data);
     356           0 :         if (!iter_static_rqs)
     357           0 :                 blk_mq_put_rq_ref(rq);
     358             :         return ret;
     359             : }
     360             : 
     361             : /**
     362             :  * bt_tags_for_each - iterate over the requests in a tag map
     363             :  * @tags:       Tag map to iterate over.
     364             :  * @bt:         sbitmap to examine. This is either the breserved_tags member
     365             :  *              or the bitmap_tags member of struct blk_mq_tags.
     366             :  * @fn:         Pointer to the function that will be called for each started
     367             :  *              request. @fn will be called as follows: @fn(rq, @data,
     368             :  *              @reserved) where rq is a pointer to a request. Return true
     369             :  *              to continue iterating tags, false to stop.
     370             :  * @data:       Will be passed as second argument to @fn.
     371             :  * @flags:      BT_TAG_ITER_*
     372             :  */
     373             : static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
     374             :                              busy_tag_iter_fn *fn, void *data, unsigned int flags)
     375             : {
     376           0 :         struct bt_tags_iter_data iter_data = {
     377             :                 .tags = tags,
     378             :                 .fn = fn,
     379             :                 .data = data,
     380             :                 .flags = flags,
     381             :         };
     382             : 
     383           0 :         if (tags->rqs)
     384           0 :                 sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
     385             : }
     386             : 
     387           0 : static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
     388             :                 busy_tag_iter_fn *fn, void *priv, unsigned int flags)
     389             : {
     390           0 :         WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
     391             : 
     392           0 :         if (tags->nr_reserved_tags)
     393           0 :                 bt_tags_for_each(tags, &tags->breserved_tags, fn, priv,
     394             :                                  flags | BT_TAG_ITER_RESERVED);
     395           0 :         bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags);
     396           0 : }
     397             : 
     398             : /**
     399             :  * blk_mq_all_tag_iter - iterate over all requests in a tag map
     400             :  * @tags:       Tag map to iterate over.
     401             :  * @fn:         Pointer to the function that will be called for each
     402             :  *              request. @fn will be called as follows: @fn(rq, @priv,
     403             :  *              reserved) where rq is a pointer to a request. 'reserved'
     404             :  *              indicates whether or not @rq is a reserved request. Return
     405             :  *              true to continue iterating tags, false to stop.
     406             :  * @priv:       Will be passed as second argument to @fn.
     407             :  *
     408             :  * Caller has to pass the tag map from which requests are allocated.
     409             :  */
     410           0 : void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
     411             :                 void *priv)
     412             : {
     413           0 :         __blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
     414           0 : }
     415             : 
     416             : /**
     417             :  * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
     418             :  * @tagset:     Tag set to iterate over.
     419             :  * @fn:         Pointer to the function that will be called for each started
     420             :  *              request. @fn will be called as follows: @fn(rq, @priv,
     421             :  *              reserved) where rq is a pointer to a request. 'reserved'
     422             :  *              indicates whether or not @rq is a reserved request. Return
     423             :  *              true to continue iterating tags, false to stop.
     424             :  * @priv:       Will be passed as second argument to @fn.
     425             :  *
     426             :  * We grab one request reference before calling @fn and release it after
     427             :  * @fn returns.
     428             :  */
     429           0 : void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
     430             :                 busy_tag_iter_fn *fn, void *priv)
     431             : {
     432           0 :         unsigned int flags = tagset->flags;
     433             :         int i, nr_tags;
     434             : 
     435           0 :         nr_tags = blk_mq_is_shared_tags(flags) ? 1 : tagset->nr_hw_queues;
     436             : 
     437           0 :         for (i = 0; i < nr_tags; i++) {
     438           0 :                 if (tagset->tags && tagset->tags[i])
     439           0 :                         __blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
     440             :                                               BT_TAG_ITER_STARTED);
     441             :         }
     442           0 : }
     443             : EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
     444             : 
     445           0 : static bool blk_mq_tagset_count_completed_rqs(struct request *rq, void *data)
     446             : {
     447           0 :         unsigned *count = data;
     448             : 
     449           0 :         if (blk_mq_request_completed(rq))
     450           0 :                 (*count)++;
     451           0 :         return true;
     452             : }
     453             : 
     454             : /**
     455             :  * blk_mq_tagset_wait_completed_request - Wait until all scheduled request
     456             :  * completions have finished.
     457             :  * @tagset:     Tag set to drain completed request
     458             :  *
     459             :  * Note: This function has to be run after all IO queues are shutdown
     460             :  */
     461           0 : void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
     462             : {
     463           0 :         while (true) {
     464           0 :                 unsigned count = 0;
     465             : 
     466           0 :                 blk_mq_tagset_busy_iter(tagset,
     467             :                                 blk_mq_tagset_count_completed_rqs, &count);
     468           0 :                 if (!count)
     469             :                         break;
     470           0 :                 msleep(5);
     471             :         }
     472           0 : }
     473             : EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
     474             : 
     475             : /**
     476             :  * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
     477             :  * @q:          Request queue to examine.
     478             :  * @fn:         Pointer to the function that will be called for each request
     479             :  *              on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
     480             :  *              reserved) where rq is a pointer to a request and hctx points
     481             :  *              to the hardware queue associated with the request. 'reserved'
     482             :  *              indicates whether or not @rq is a reserved request.
     483             :  * @priv:       Will be passed as third argument to @fn.
     484             :  *
     485             :  * Note: if @q->tag_set is shared with other request queues then @fn will be
     486             :  * called for all requests on all queues that share that tag set and not only
     487             :  * for requests associated with @q.
     488             :  */
     489           0 : void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
     490             :                 void *priv)
     491             : {
     492             :         /*
     493             :          * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table
     494             :          * while the queue is frozen. So we can use q_usage_counter to avoid
     495             :          * racing with it.
     496             :          */
     497           0 :         if (!percpu_ref_tryget(&q->q_usage_counter))
     498             :                 return;
     499             : 
     500           0 :         if (blk_mq_is_shared_tags(q->tag_set->flags)) {
     501           0 :                 struct blk_mq_tags *tags = q->tag_set->shared_tags;
     502           0 :                 struct sbitmap_queue *bresv = &tags->breserved_tags;
     503           0 :                 struct sbitmap_queue *btags = &tags->bitmap_tags;
     504             : 
     505           0 :                 if (tags->nr_reserved_tags)
     506             :                         bt_for_each(NULL, q, bresv, fn, priv, true);
     507             :                 bt_for_each(NULL, q, btags, fn, priv, false);
     508             :         } else {
     509             :                 struct blk_mq_hw_ctx *hctx;
     510             :                 unsigned long i;
     511             : 
     512           0 :                 queue_for_each_hw_ctx(q, hctx, i) {
     513           0 :                         struct blk_mq_tags *tags = hctx->tags;
     514           0 :                         struct sbitmap_queue *bresv = &tags->breserved_tags;
     515           0 :                         struct sbitmap_queue *btags = &tags->bitmap_tags;
     516             : 
     517             :                         /*
     518             :                          * If no software queues are currently mapped to this
     519             :                          * hardware queue, there's nothing to check
     520             :                          */
     521           0 :                         if (!blk_mq_hw_queue_mapped(hctx))
     522           0 :                                 continue;
     523             : 
     524           0 :                         if (tags->nr_reserved_tags)
     525             :                                 bt_for_each(hctx, q, bresv, fn, priv, true);
     526             :                         bt_for_each(hctx, q, btags, fn, priv, false);
     527             :                 }
     528             :         }
     529           0 :         blk_queue_exit(q);
     530             : }
     531             : 
     532             : static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
     533             :                     bool round_robin, int node)
     534             : {
     535           0 :         return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
     536             :                                        node);
     537             : }
     538             : 
     539           0 : int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
     540             :                         struct sbitmap_queue *breserved_tags,
     541             :                         unsigned int queue_depth, unsigned int reserved,
     542             :                         int node, int alloc_policy)
     543             : {
     544           0 :         unsigned int depth = queue_depth - reserved;
     545           0 :         bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
     546             : 
     547           0 :         if (bt_alloc(bitmap_tags, depth, round_robin, node))
     548             :                 return -ENOMEM;
     549           0 :         if (bt_alloc(breserved_tags, reserved, round_robin, node))
     550             :                 goto free_bitmap_tags;
     551             : 
     552             :         return 0;
     553             : 
     554             : free_bitmap_tags:
     555           0 :         sbitmap_queue_free(bitmap_tags);
     556           0 :         return -ENOMEM;
     557             : }
     558             : 
     559           0 : struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
     560             :                                      unsigned int reserved_tags,
     561             :                                      int node, int alloc_policy)
     562             : {
     563             :         struct blk_mq_tags *tags;
     564             : 
     565           0 :         if (total_tags > BLK_MQ_TAG_MAX) {
     566           0 :                 pr_err("blk-mq: tag depth too large\n");
     567           0 :                 return NULL;
     568             :         }
     569             : 
     570           0 :         tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
     571           0 :         if (!tags)
     572             :                 return NULL;
     573             : 
     574           0 :         tags->nr_tags = total_tags;
     575           0 :         tags->nr_reserved_tags = reserved_tags;
     576           0 :         spin_lock_init(&tags->lock);
     577             : 
     578           0 :         if (blk_mq_init_bitmaps(&tags->bitmap_tags, &tags->breserved_tags,
     579             :                                 total_tags, reserved_tags, node,
     580             :                                 alloc_policy) < 0) {
     581           0 :                 kfree(tags);
     582           0 :                 return NULL;
     583             :         }
     584             :         return tags;
     585             : }
     586             : 
     587           0 : void blk_mq_free_tags(struct blk_mq_tags *tags)
     588             : {
     589           0 :         sbitmap_queue_free(&tags->bitmap_tags);
     590           0 :         sbitmap_queue_free(&tags->breserved_tags);
     591           0 :         kfree(tags);
     592           0 : }
     593             : 
     594           0 : int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
     595             :                             struct blk_mq_tags **tagsptr, unsigned int tdepth,
     596             :                             bool can_grow)
     597             : {
     598           0 :         struct blk_mq_tags *tags = *tagsptr;
     599             : 
     600           0 :         if (tdepth <= tags->nr_reserved_tags)
     601             :                 return -EINVAL;
     602             : 
     603             :         /*
     604             :          * If we are allowed to grow beyond the original size, allocate
     605             :          * a new set of tags before freeing the old one.
     606             :          */
     607           0 :         if (tdepth > tags->nr_tags) {
     608           0 :                 struct blk_mq_tag_set *set = hctx->queue->tag_set;
     609             :                 struct blk_mq_tags *new;
     610             : 
     611           0 :                 if (!can_grow)
     612             :                         return -EINVAL;
     613             : 
     614             :                 /*
     615             :                  * We need some sort of upper limit, set it high enough that
     616             :                  * no valid use cases should require more.
     617             :                  */
     618           0 :                 if (tdepth > MAX_SCHED_RQ)
     619             :                         return -EINVAL;
     620             : 
     621             :                 /*
     622             :                  * Only the sbitmap needs resizing since we allocated the max
     623             :                  * initially.
     624             :                  */
     625           0 :                 if (blk_mq_is_shared_tags(set->flags))
     626             :                         return 0;
     627             : 
     628           0 :                 new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth);
     629           0 :                 if (!new)
     630             :                         return -ENOMEM;
     631             : 
     632           0 :                 blk_mq_free_map_and_rqs(set, *tagsptr, hctx->queue_num);
     633           0 :                 *tagsptr = new;
     634             :         } else {
     635             :                 /*
     636             :                  * Don't need (or can't) update reserved tags here, they
     637             :                  * remain static and should never need resizing.
     638             :                  */
     639           0 :                 sbitmap_queue_resize(&tags->bitmap_tags,
     640             :                                 tdepth - tags->nr_reserved_tags);
     641             :         }
     642             : 
     643             :         return 0;
     644             : }
     645             : 
     646           0 : void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size)
     647             : {
     648           0 :         struct blk_mq_tags *tags = set->shared_tags;
     649             : 
     650           0 :         sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags);
     651           0 : }
     652             : 
     653           0 : void blk_mq_tag_update_sched_shared_tags(struct request_queue *q)
     654             : {
     655           0 :         sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags,
     656           0 :                              q->nr_requests - q->tag_set->reserved_tags);
     657           0 : }
     658             : 
     659             : /**
     660             :  * blk_mq_unique_tag() - return a tag that is unique queue-wide
     661             :  * @rq: request for which to compute a unique tag
     662             :  *
     663             :  * The tag field in struct request is unique per hardware queue but not over
     664             :  * all hardware queues. Hence this function that returns a tag with the
     665             :  * hardware context index in the upper bits and the per hardware queue tag in
     666             :  * the lower bits.
     667             :  *
     668             :  * Note: When called for a request that is queued on a non-multiqueue request
     669             :  * queue, the hardware context index is set to zero.
     670             :  */
     671           0 : u32 blk_mq_unique_tag(struct request *rq)
     672             : {
     673           0 :         return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
     674           0 :                 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
     675             : }
     676             : EXPORT_SYMBOL(blk_mq_unique_tag);

Generated by: LCOV version 1.14