LCOV - code coverage report
Current view: top level - block - blk-mq.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 73 0.0 %
Date: 2023-07-19 18:55:55 Functions: 0 5 0.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef INT_BLK_MQ_H
       3             : #define INT_BLK_MQ_H
       4             : 
       5             : #include <linux/blk-mq.h>
       6             : #include "blk-stat.h"
       7             : 
       8             : struct blk_mq_tag_set;
       9             : 
      10             : struct blk_mq_ctxs {
      11             :         struct kobject kobj;
      12             :         struct blk_mq_ctx __percpu      *queue_ctx;
      13             : };
      14             : 
      15             : /**
      16             :  * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
      17             :  */
      18             : struct blk_mq_ctx {
      19             :         struct {
      20             :                 spinlock_t              lock;
      21             :                 struct list_head        rq_lists[HCTX_MAX_TYPES];
      22             :         } ____cacheline_aligned_in_smp;
      23             : 
      24             :         unsigned int            cpu;
      25             :         unsigned short          index_hw[HCTX_MAX_TYPES];
      26             :         struct blk_mq_hw_ctx    *hctxs[HCTX_MAX_TYPES];
      27             : 
      28             :         struct request_queue    *queue;
      29             :         struct blk_mq_ctxs      *ctxs;
      30             :         struct kobject          kobj;
      31             : } ____cacheline_aligned_in_smp;
      32             : 
      33             : enum {
      34             :         BLK_MQ_NO_TAG           = -1U,
      35             :         BLK_MQ_TAG_MIN          = 1,
      36             :         BLK_MQ_TAG_MAX          = BLK_MQ_NO_TAG - 1,
      37             : };
      38             : 
      39             : typedef unsigned int __bitwise blk_insert_t;
      40             : #define BLK_MQ_INSERT_AT_HEAD           ((__force blk_insert_t)0x01)
      41             : 
      42             : void blk_mq_submit_bio(struct bio *bio);
      43             : int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
      44             :                 unsigned int flags);
      45             : void blk_mq_exit_queue(struct request_queue *q);
      46             : int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
      47             : void blk_mq_wake_waiters(struct request_queue *q);
      48             : bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
      49             :                              unsigned int);
      50             : void blk_mq_add_to_requeue_list(struct request *rq, blk_insert_t insert_flags);
      51             : void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
      52             : struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
      53             :                                         struct blk_mq_ctx *start);
      54             : void blk_mq_put_rq_ref(struct request *rq);
      55             : 
      56             : /*
      57             :  * Internal helpers for allocating/freeing the request map
      58             :  */
      59             : void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
      60             :                      unsigned int hctx_idx);
      61             : void blk_mq_free_rq_map(struct blk_mq_tags *tags);
      62             : struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
      63             :                                 unsigned int hctx_idx, unsigned int depth);
      64             : void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
      65             :                              struct blk_mq_tags *tags,
      66             :                              unsigned int hctx_idx);
      67             : /*
      68             :  * Internal helpers for request insertion into sw queues
      69             :  */
      70             : void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags);
      71             : 
      72             : /*
      73             :  * CPU -> queue mappings
      74             :  */
      75             : extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
      76             : 
      77             : /*
      78             :  * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
      79             :  * @q: request queue
      80             :  * @type: the hctx type index
      81             :  * @cpu: CPU
      82             :  */
      83             : static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
      84             :                                                           enum hctx_type type,
      85             :                                                           unsigned int cpu)
      86             : {
      87           0 :         return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]);
      88             : }
      89             : 
      90             : static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
      91             : {
      92           0 :         enum hctx_type type = HCTX_TYPE_DEFAULT;
      93             : 
      94             :         /*
      95             :          * The caller ensure that if REQ_POLLED, poll must be enabled.
      96             :          */
      97           0 :         if (opf & REQ_POLLED)
      98             :                 type = HCTX_TYPE_POLL;
      99           0 :         else if ((opf & REQ_OP_MASK) == REQ_OP_READ)
     100           0 :                 type = HCTX_TYPE_READ;
     101             :         return type;
     102             : }
     103             : 
     104             : /*
     105             :  * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
     106             :  * @q: request queue
     107             :  * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED).
     108             :  * @ctx: software queue cpu ctx
     109             :  */
     110             : static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
     111             :                                                      blk_opf_t opf,
     112             :                                                      struct blk_mq_ctx *ctx)
     113             : {
     114           0 :         return ctx->hctxs[blk_mq_get_hctx_type(opf)];
     115             : }
     116             : 
     117             : /*
     118             :  * sysfs helpers
     119             :  */
     120             : extern void blk_mq_sysfs_init(struct request_queue *q);
     121             : extern void blk_mq_sysfs_deinit(struct request_queue *q);
     122             : int blk_mq_sysfs_register(struct gendisk *disk);
     123             : void blk_mq_sysfs_unregister(struct gendisk *disk);
     124             : int blk_mq_sysfs_register_hctxs(struct request_queue *q);
     125             : void blk_mq_sysfs_unregister_hctxs(struct request_queue *q);
     126             : extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
     127             : void blk_mq_free_plug_rqs(struct blk_plug *plug);
     128             : void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
     129             : 
     130             : void blk_mq_cancel_work_sync(struct request_queue *q);
     131             : 
     132             : void blk_mq_release(struct request_queue *q);
     133             : 
     134             : static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
     135             :                                            unsigned int cpu)
     136             : {
     137           0 :         return per_cpu_ptr(q->queue_ctx, cpu);
     138             : }
     139             : 
     140             : /*
     141             :  * This assumes per-cpu software queueing queues. They could be per-node
     142             :  * as well, for instance. For now this is hardcoded as-is. Note that we don't
     143             :  * care about preemption, since we know the ctx's are persistent. This does
     144             :  * mean that we can't rely on ctx always matching the currently running CPU.
     145             :  */
     146             : static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
     147             : {
     148           0 :         return __blk_mq_get_ctx(q, raw_smp_processor_id());
     149             : }
     150             : 
     151             : struct blk_mq_alloc_data {
     152             :         /* input parameter */
     153             :         struct request_queue *q;
     154             :         blk_mq_req_flags_t flags;
     155             :         unsigned int shallow_depth;
     156             :         blk_opf_t cmd_flags;
     157             :         req_flags_t rq_flags;
     158             : 
     159             :         /* allocate multiple requests/tags in one go */
     160             :         unsigned int nr_tags;
     161             :         struct request **cached_rq;
     162             : 
     163             :         /* input & output parameter */
     164             :         struct blk_mq_ctx *ctx;
     165             :         struct blk_mq_hw_ctx *hctx;
     166             : };
     167             : 
     168             : struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
     169             :                 unsigned int reserved_tags, int node, int alloc_policy);
     170             : void blk_mq_free_tags(struct blk_mq_tags *tags);
     171             : int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
     172             :                 struct sbitmap_queue *breserved_tags, unsigned int queue_depth,
     173             :                 unsigned int reserved, int node, int alloc_policy);
     174             : 
     175             : unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
     176             : unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
     177             :                 unsigned int *offset);
     178             : void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
     179             :                 unsigned int tag);
     180             : void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
     181             : int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
     182             :                 struct blk_mq_tags **tags, unsigned int depth, bool can_grow);
     183             : void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
     184             :                 unsigned int size);
     185             : void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);
     186             : 
     187             : void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
     188             : void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
     189             :                 void *priv);
     190             : void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
     191             :                 void *priv);
     192             : 
     193             : static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
     194             :                                                  struct blk_mq_hw_ctx *hctx)
     195             : {
     196           0 :         if (!hctx)
     197           0 :                 return &bt->ws[0];
     198           0 :         return sbq_wait_ptr(bt, &hctx->wait_index);
     199             : }
     200             : 
     201             : void __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
     202             : void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
     203             : 
     204             : static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
     205             : {
     206           0 :         if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
     207           0 :                 __blk_mq_tag_busy(hctx);
     208             : }
     209             : 
     210             : static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
     211             : {
     212           0 :         if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
     213           0 :                 __blk_mq_tag_idle(hctx);
     214             : }
     215             : 
     216             : static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
     217             :                                           unsigned int tag)
     218             : {
     219             :         return tag < tags->nr_reserved_tags;
     220             : }
     221             : 
     222             : static inline bool blk_mq_is_shared_tags(unsigned int flags)
     223             : {
     224           0 :         return flags & BLK_MQ_F_TAG_HCTX_SHARED;
     225             : }
     226             : 
     227             : static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
     228             : {
     229           0 :         if (!(data->rq_flags & RQF_ELV))
     230           0 :                 return data->hctx->tags;
     231           0 :         return data->hctx->sched_tags;
     232             : }
     233             : 
     234             : static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
     235             : {
     236           0 :         return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
     237             : }
     238             : 
     239             : static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
     240             : {
     241           0 :         return hctx->nr_ctx && hctx->tags;
     242             : }
     243             : 
     244             : unsigned int blk_mq_in_flight(struct request_queue *q,
     245             :                 struct block_device *part);
     246             : void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
     247             :                 unsigned int inflight[2]);
     248             : 
     249             : static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
     250             :                                               int budget_token)
     251             : {
     252           0 :         if (q->mq_ops->put_budget)
     253           0 :                 q->mq_ops->put_budget(q, budget_token);
     254             : }
     255             : 
     256             : static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
     257             : {
     258           0 :         if (q->mq_ops->get_budget)
     259           0 :                 return q->mq_ops->get_budget(q);
     260             :         return 0;
     261             : }
     262             : 
     263             : static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
     264             : {
     265             :         if (token < 0)
     266             :                 return;
     267             : 
     268           0 :         if (rq->q->mq_ops->set_rq_budget_token)
     269           0 :                 rq->q->mq_ops->set_rq_budget_token(rq, token);
     270             : }
     271             : 
     272             : static inline int blk_mq_get_rq_budget_token(struct request *rq)
     273             : {
     274           0 :         if (rq->q->mq_ops->get_rq_budget_token)
     275           0 :                 return rq->q->mq_ops->get_rq_budget_token(rq);
     276             :         return -1;
     277             : }
     278             : 
     279             : static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
     280             : {
     281           0 :         if (blk_mq_is_shared_tags(hctx->flags))
     282           0 :                 atomic_inc(&hctx->queue->nr_active_requests_shared_tags);
     283             :         else
     284           0 :                 atomic_inc(&hctx->nr_active);
     285             : }
     286             : 
     287             : static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
     288             :                 int val)
     289             : {
     290           0 :         if (blk_mq_is_shared_tags(hctx->flags))
     291           0 :                 atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
     292             :         else
     293           0 :                 atomic_sub(val, &hctx->nr_active);
     294             : }
     295             : 
     296             : static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
     297             : {
     298             :         __blk_mq_sub_active_requests(hctx, 1);
     299             : }
     300             : 
     301             : static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
     302             : {
     303           0 :         if (blk_mq_is_shared_tags(hctx->flags))
     304           0 :                 return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
     305           0 :         return atomic_read(&hctx->nr_active);
     306             : }
     307           0 : static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
     308             :                                            struct request *rq)
     309             : {
     310           0 :         blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
     311           0 :         rq->tag = BLK_MQ_NO_TAG;
     312             : 
     313           0 :         if (rq->rq_flags & RQF_MQ_INFLIGHT) {
     314           0 :                 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
     315             :                 __blk_mq_dec_active_requests(hctx);
     316             :         }
     317           0 : }
     318             : 
     319             : static inline void blk_mq_put_driver_tag(struct request *rq)
     320             : {
     321           0 :         if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
     322             :                 return;
     323             : 
     324           0 :         __blk_mq_put_driver_tag(rq->mq_hctx, rq);
     325             : }
     326             : 
     327             : bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq);
     328             : 
     329           0 : static inline bool blk_mq_get_driver_tag(struct request *rq)
     330             : {
     331           0 :         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
     332             : 
     333           0 :         if (rq->tag != BLK_MQ_NO_TAG &&
     334           0 :             !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
     335           0 :                 hctx->tags->rqs[rq->tag] = rq;
     336           0 :                 return true;
     337             :         }
     338             : 
     339           0 :         return __blk_mq_get_driver_tag(hctx, rq);
     340             : }
     341             : 
     342             : static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
     343             : {
     344             :         int cpu;
     345             : 
     346           0 :         for_each_possible_cpu(cpu)
     347           0 :                 qmap->mq_map[cpu] = 0;
     348             : }
     349             : 
     350             : /*
     351             :  * blk_mq_plug() - Get caller context plug
     352             :  * @bio : the bio being submitted by the caller context
     353             :  *
     354             :  * Plugging, by design, may delay the insertion of BIOs into the elevator in
     355             :  * order to increase BIO merging opportunities. This however can cause BIO
     356             :  * insertion order to change from the order in which submit_bio() is being
     357             :  * executed in the case of multiple contexts concurrently issuing BIOs to a
     358             :  * device, even if these context are synchronized to tightly control BIO issuing
     359             :  * order. While this is not a problem with regular block devices, this ordering
     360             :  * change can cause write BIO failures with zoned block devices as these
     361             :  * require sequential write patterns to zones. Prevent this from happening by
     362             :  * ignoring the plug state of a BIO issuing context if it is for a zoned block
     363             :  * device and the BIO to plug is a write operation.
     364             :  *
     365             :  * Return current->plug if the bio can be plugged and NULL otherwise
     366             :  */
     367             : static inline struct blk_plug *blk_mq_plug( struct bio *bio)
     368             : {
     369             :         /* Zoned block device write operation case: do not plug the BIO */
     370             :         if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
     371             :             bdev_op_is_zoned_write(bio->bi_bdev, bio_op(bio)))
     372             :                 return NULL;
     373             : 
     374             :         /*
     375             :          * For regular block devices or read operations, use the context plug
     376             :          * which may be NULL if blk_start_plug() was not executed.
     377             :          */
     378           0 :         return current->plug;
     379             : }
     380             : 
     381             : /* Free all requests on the list */
     382           0 : static inline void blk_mq_free_requests(struct list_head *list)
     383             : {
     384           0 :         while (!list_empty(list)) {
     385           0 :                 struct request *rq = list_entry_rq(list->next);
     386             : 
     387           0 :                 list_del_init(&rq->queuelist);
     388           0 :                 blk_mq_free_request(rq);
     389             :         }
     390           0 : }
     391             : 
     392             : /*
     393             :  * For shared tag users, we track the number of currently active users
     394             :  * and attempt to provide a fair share of the tag depth for each of them.
     395             :  */
     396           0 : static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
     397             :                                   struct sbitmap_queue *bt)
     398             : {
     399             :         unsigned int depth, users;
     400             : 
     401           0 :         if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
     402             :                 return true;
     403             : 
     404             :         /*
     405             :          * Don't try dividing an ant
     406             :          */
     407           0 :         if (bt->sb.depth == 1)
     408             :                 return true;
     409             : 
     410           0 :         if (blk_mq_is_shared_tags(hctx->flags)) {
     411           0 :                 struct request_queue *q = hctx->queue;
     412             : 
     413           0 :                 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
     414             :                         return true;
     415             :         } else {
     416           0 :                 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
     417             :                         return true;
     418             :         }
     419             : 
     420           0 :         users = atomic_read(&hctx->tags->active_queues);
     421             : 
     422           0 :         if (!users)
     423             :                 return true;
     424             : 
     425             :         /*
     426             :          * Allow at least some tags
     427             :          */
     428           0 :         depth = max((bt->sb.depth + users - 1) / users, 4U);
     429           0 :         return __blk_mq_active_requests(hctx) < depth;
     430             : }
     431             : 
     432             : /* run the code block in @dispatch_ops with rcu/srcu read lock held */
     433             : #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \
     434             : do {                                                            \
     435             :         if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) {                \
     436             :                 struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
     437             :                 int srcu_idx;                                   \
     438             :                                                                 \
     439             :                 might_sleep_if(check_sleep);                    \
     440             :                 srcu_idx = srcu_read_lock(__tag_set->srcu);  \
     441             :                 (dispatch_ops);                                 \
     442             :                 srcu_read_unlock(__tag_set->srcu, srcu_idx); \
     443             :         } else {                                                \
     444             :                 rcu_read_lock();                                \
     445             :                 (dispatch_ops);                                 \
     446             :                 rcu_read_unlock();                              \
     447             :         }                                                       \
     448             : } while (0)
     449             : 
     450             : #define blk_mq_run_dispatch_ops(q, dispatch_ops)                \
     451             :         __blk_mq_run_dispatch_ops(q, true, dispatch_ops)        \
     452             : 
     453             : #endif

Generated by: LCOV version 1.14