LCOV - code coverage report
Current view: top level - block - blk-mq.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 73 0.0 %
Date: 2023-08-24 13:40:31 Functions: 0 5 0.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef INT_BLK_MQ_H
       3             : #define INT_BLK_MQ_H
       4             : 
       5             : #include <linux/blk-mq.h>
       6             : #include "blk-stat.h"
       7             : 
       8             : struct blk_mq_tag_set;
       9             : 
      10             : struct blk_mq_ctxs {
      11             :         struct kobject kobj;
      12             :         struct blk_mq_ctx __percpu      *queue_ctx;
      13             : };
      14             : 
      15             : /**
      16             :  * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
      17             :  */
      18             : struct blk_mq_ctx {
      19             :         struct {
      20             :                 spinlock_t              lock;
      21             :                 struct list_head        rq_lists[HCTX_MAX_TYPES];
      22             :         } ____cacheline_aligned_in_smp;
      23             : 
      24             :         unsigned int            cpu;
      25             :         unsigned short          index_hw[HCTX_MAX_TYPES];
      26             :         struct blk_mq_hw_ctx    *hctxs[HCTX_MAX_TYPES];
      27             : 
      28             :         struct request_queue    *queue;
      29             :         struct blk_mq_ctxs      *ctxs;
      30             :         struct kobject          kobj;
      31             : } ____cacheline_aligned_in_smp;
      32             : 
      33             : enum {
      34             :         BLK_MQ_NO_TAG           = -1U,
      35             :         BLK_MQ_TAG_MIN          = 1,
      36             :         BLK_MQ_TAG_MAX          = BLK_MQ_NO_TAG - 1,
      37             : };
      38             : 
      39             : typedef unsigned int __bitwise blk_insert_t;
      40             : #define BLK_MQ_INSERT_AT_HEAD           ((__force blk_insert_t)0x01)
      41             : 
      42             : void blk_mq_submit_bio(struct bio *bio);
      43             : int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
      44             :                 unsigned int flags);
      45             : void blk_mq_exit_queue(struct request_queue *q);
      46             : int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
      47             : void blk_mq_wake_waiters(struct request_queue *q);
      48             : bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
      49             :                              unsigned int);
      50             : void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
      51             : struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
      52             :                                         struct blk_mq_ctx *start);
      53             : void blk_mq_put_rq_ref(struct request *rq);
      54             : 
      55             : /*
      56             :  * Internal helpers for allocating/freeing the request map
      57             :  */
      58             : void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
      59             :                      unsigned int hctx_idx);
      60             : void blk_mq_free_rq_map(struct blk_mq_tags *tags);
      61             : struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
      62             :                                 unsigned int hctx_idx, unsigned int depth);
      63             : void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
      64             :                              struct blk_mq_tags *tags,
      65             :                              unsigned int hctx_idx);
      66             : 
      67             : /*
      68             :  * CPU -> queue mappings
      69             :  */
      70             : extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
      71             : 
      72             : /*
      73             :  * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
      74             :  * @q: request queue
      75             :  * @type: the hctx type index
      76             :  * @cpu: CPU
      77             :  */
      78             : static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
      79             :                                                           enum hctx_type type,
      80             :                                                           unsigned int cpu)
      81             : {
      82           0 :         return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]);
      83             : }
      84             : 
      85             : static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
      86             : {
      87           0 :         enum hctx_type type = HCTX_TYPE_DEFAULT;
      88             : 
      89             :         /*
      90             :          * The caller ensure that if REQ_POLLED, poll must be enabled.
      91             :          */
      92           0 :         if (opf & REQ_POLLED)
      93             :                 type = HCTX_TYPE_POLL;
      94           0 :         else if ((opf & REQ_OP_MASK) == REQ_OP_READ)
      95           0 :                 type = HCTX_TYPE_READ;
      96             :         return type;
      97             : }
      98             : 
      99             : /*
     100             :  * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
     101             :  * @q: request queue
     102             :  * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED).
     103             :  * @ctx: software queue cpu ctx
     104             :  */
     105             : static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
     106             :                                                      blk_opf_t opf,
     107             :                                                      struct blk_mq_ctx *ctx)
     108             : {
     109           0 :         return ctx->hctxs[blk_mq_get_hctx_type(opf)];
     110             : }
     111             : 
     112             : /*
     113             :  * sysfs helpers
     114             :  */
     115             : extern void blk_mq_sysfs_init(struct request_queue *q);
     116             : extern void blk_mq_sysfs_deinit(struct request_queue *q);
     117             : int blk_mq_sysfs_register(struct gendisk *disk);
     118             : void blk_mq_sysfs_unregister(struct gendisk *disk);
     119             : int blk_mq_sysfs_register_hctxs(struct request_queue *q);
     120             : void blk_mq_sysfs_unregister_hctxs(struct request_queue *q);
     121             : extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
     122             : void blk_mq_free_plug_rqs(struct blk_plug *plug);
     123             : void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
     124             : 
     125             : void blk_mq_cancel_work_sync(struct request_queue *q);
     126             : 
     127             : void blk_mq_release(struct request_queue *q);
     128             : 
     129             : static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
     130             :                                            unsigned int cpu)
     131             : {
     132           0 :         return per_cpu_ptr(q->queue_ctx, cpu);
     133             : }
     134             : 
     135             : /*
     136             :  * This assumes per-cpu software queueing queues. They could be per-node
     137             :  * as well, for instance. For now this is hardcoded as-is. Note that we don't
     138             :  * care about preemption, since we know the ctx's are persistent. This does
     139             :  * mean that we can't rely on ctx always matching the currently running CPU.
     140             :  */
     141             : static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
     142             : {
     143           0 :         return __blk_mq_get_ctx(q, raw_smp_processor_id());
     144             : }
     145             : 
     146             : struct blk_mq_alloc_data {
     147             :         /* input parameter */
     148             :         struct request_queue *q;
     149             :         blk_mq_req_flags_t flags;
     150             :         unsigned int shallow_depth;
     151             :         blk_opf_t cmd_flags;
     152             :         req_flags_t rq_flags;
     153             : 
     154             :         /* allocate multiple requests/tags in one go */
     155             :         unsigned int nr_tags;
     156             :         struct request **cached_rq;
     157             : 
     158             :         /* input & output parameter */
     159             :         struct blk_mq_ctx *ctx;
     160             :         struct blk_mq_hw_ctx *hctx;
     161             : };
     162             : 
     163             : struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
     164             :                 unsigned int reserved_tags, int node, int alloc_policy);
     165             : void blk_mq_free_tags(struct blk_mq_tags *tags);
     166             : int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
     167             :                 struct sbitmap_queue *breserved_tags, unsigned int queue_depth,
     168             :                 unsigned int reserved, int node, int alloc_policy);
     169             : 
     170             : unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
     171             : unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
     172             :                 unsigned int *offset);
     173             : void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
     174             :                 unsigned int tag);
     175             : void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
     176             : int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
     177             :                 struct blk_mq_tags **tags, unsigned int depth, bool can_grow);
     178             : void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
     179             :                 unsigned int size);
     180             : void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);
     181             : 
     182             : void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
     183             : void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
     184             :                 void *priv);
     185             : void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
     186             :                 void *priv);
     187             : 
     188             : static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
     189             :                                                  struct blk_mq_hw_ctx *hctx)
     190             : {
     191           0 :         if (!hctx)
     192           0 :                 return &bt->ws[0];
     193           0 :         return sbq_wait_ptr(bt, &hctx->wait_index);
     194             : }
     195             : 
     196             : void __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
     197             : void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
     198             : 
     199             : static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
     200             : {
     201           0 :         if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
     202           0 :                 __blk_mq_tag_busy(hctx);
     203             : }
     204             : 
     205             : static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
     206             : {
     207           0 :         if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
     208           0 :                 __blk_mq_tag_idle(hctx);
     209             : }
     210             : 
     211             : static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
     212             :                                           unsigned int tag)
     213             : {
     214             :         return tag < tags->nr_reserved_tags;
     215             : }
     216             : 
     217             : static inline bool blk_mq_is_shared_tags(unsigned int flags)
     218             : {
     219           0 :         return flags & BLK_MQ_F_TAG_HCTX_SHARED;
     220             : }
     221             : 
     222             : static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
     223             : {
     224           0 :         if (data->rq_flags & RQF_SCHED_TAGS)
     225           0 :                 return data->hctx->sched_tags;
     226           0 :         return data->hctx->tags;
     227             : }
     228             : 
     229             : static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
     230             : {
     231           0 :         return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
     232             : }
     233             : 
     234             : static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
     235             : {
     236           0 :         return hctx->nr_ctx && hctx->tags;
     237             : }
     238             : 
     239             : unsigned int blk_mq_in_flight(struct request_queue *q,
     240             :                 struct block_device *part);
     241             : void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
     242             :                 unsigned int inflight[2]);
     243             : 
     244             : static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
     245             :                                               int budget_token)
     246             : {
     247           0 :         if (q->mq_ops->put_budget)
     248           0 :                 q->mq_ops->put_budget(q, budget_token);
     249             : }
     250             : 
     251             : static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
     252             : {
     253           0 :         if (q->mq_ops->get_budget)
     254           0 :                 return q->mq_ops->get_budget(q);
     255             :         return 0;
     256             : }
     257             : 
     258             : static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
     259             : {
     260             :         if (token < 0)
     261             :                 return;
     262             : 
     263           0 :         if (rq->q->mq_ops->set_rq_budget_token)
     264           0 :                 rq->q->mq_ops->set_rq_budget_token(rq, token);
     265             : }
     266             : 
     267             : static inline int blk_mq_get_rq_budget_token(struct request *rq)
     268             : {
     269           0 :         if (rq->q->mq_ops->get_rq_budget_token)
     270           0 :                 return rq->q->mq_ops->get_rq_budget_token(rq);
     271             :         return -1;
     272             : }
     273             : 
     274             : static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
     275             : {
     276           0 :         if (blk_mq_is_shared_tags(hctx->flags))
     277           0 :                 atomic_inc(&hctx->queue->nr_active_requests_shared_tags);
     278             :         else
     279           0 :                 atomic_inc(&hctx->nr_active);
     280             : }
     281             : 
     282             : static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
     283             :                 int val)
     284             : {
     285           0 :         if (blk_mq_is_shared_tags(hctx->flags))
     286           0 :                 atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
     287             :         else
     288           0 :                 atomic_sub(val, &hctx->nr_active);
     289             : }
     290             : 
     291             : static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
     292             : {
     293             :         __blk_mq_sub_active_requests(hctx, 1);
     294             : }
     295             : 
     296             : static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
     297             : {
     298           0 :         if (blk_mq_is_shared_tags(hctx->flags))
     299           0 :                 return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
     300           0 :         return atomic_read(&hctx->nr_active);
     301             : }
     302           0 : static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
     303             :                                            struct request *rq)
     304             : {
     305           0 :         blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
     306           0 :         rq->tag = BLK_MQ_NO_TAG;
     307             : 
     308           0 :         if (rq->rq_flags & RQF_MQ_INFLIGHT) {
     309           0 :                 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
     310             :                 __blk_mq_dec_active_requests(hctx);
     311             :         }
     312           0 : }
     313             : 
     314             : static inline void blk_mq_put_driver_tag(struct request *rq)
     315             : {
     316           0 :         if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
     317             :                 return;
     318             : 
     319           0 :         __blk_mq_put_driver_tag(rq->mq_hctx, rq);
     320             : }
     321             : 
     322             : bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq);
     323             : 
     324           0 : static inline bool blk_mq_get_driver_tag(struct request *rq)
     325             : {
     326           0 :         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
     327             : 
     328           0 :         if (rq->tag != BLK_MQ_NO_TAG &&
     329           0 :             !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
     330           0 :                 hctx->tags->rqs[rq->tag] = rq;
     331           0 :                 return true;
     332             :         }
     333             : 
     334           0 :         return __blk_mq_get_driver_tag(hctx, rq);
     335             : }
     336             : 
     337             : static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
     338             : {
     339             :         int cpu;
     340             : 
     341           0 :         for_each_possible_cpu(cpu)
     342           0 :                 qmap->mq_map[cpu] = 0;
     343             : }
     344             : 
     345             : /*
     346             :  * blk_mq_plug() - Get caller context plug
     347             :  * @bio : the bio being submitted by the caller context
     348             :  *
     349             :  * Plugging, by design, may delay the insertion of BIOs into the elevator in
     350             :  * order to increase BIO merging opportunities. This however can cause BIO
     351             :  * insertion order to change from the order in which submit_bio() is being
     352             :  * executed in the case of multiple contexts concurrently issuing BIOs to a
     353             :  * device, even if these context are synchronized to tightly control BIO issuing
     354             :  * order. While this is not a problem with regular block devices, this ordering
     355             :  * change can cause write BIO failures with zoned block devices as these
     356             :  * require sequential write patterns to zones. Prevent this from happening by
     357             :  * ignoring the plug state of a BIO issuing context if it is for a zoned block
     358             :  * device and the BIO to plug is a write operation.
     359             :  *
     360             :  * Return current->plug if the bio can be plugged and NULL otherwise
     361             :  */
     362             : static inline struct blk_plug *blk_mq_plug( struct bio *bio)
     363             : {
     364             :         /* Zoned block device write operation case: do not plug the BIO */
     365             :         if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
     366             :             bdev_op_is_zoned_write(bio->bi_bdev, bio_op(bio)))
     367             :                 return NULL;
     368             : 
     369             :         /*
     370             :          * For regular block devices or read operations, use the context plug
     371             :          * which may be NULL if blk_start_plug() was not executed.
     372             :          */
     373           0 :         return current->plug;
     374             : }
     375             : 
     376             : /* Free all requests on the list */
     377           0 : static inline void blk_mq_free_requests(struct list_head *list)
     378             : {
     379           0 :         while (!list_empty(list)) {
     380           0 :                 struct request *rq = list_entry_rq(list->next);
     381             : 
     382           0 :                 list_del_init(&rq->queuelist);
     383           0 :                 blk_mq_free_request(rq);
     384             :         }
     385           0 : }
     386             : 
     387             : /*
     388             :  * For shared tag users, we track the number of currently active users
     389             :  * and attempt to provide a fair share of the tag depth for each of them.
     390             :  */
     391           0 : static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
     392             :                                   struct sbitmap_queue *bt)
     393             : {
     394             :         unsigned int depth, users;
     395             : 
     396           0 :         if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
     397             :                 return true;
     398             : 
     399             :         /*
     400             :          * Don't try dividing an ant
     401             :          */
     402           0 :         if (bt->sb.depth == 1)
     403             :                 return true;
     404             : 
     405           0 :         if (blk_mq_is_shared_tags(hctx->flags)) {
     406           0 :                 struct request_queue *q = hctx->queue;
     407             : 
     408           0 :                 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
     409             :                         return true;
     410             :         } else {
     411           0 :                 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
     412             :                         return true;
     413             :         }
     414             : 
     415           0 :         users = READ_ONCE(hctx->tags->active_queues);
     416           0 :         if (!users)
     417             :                 return true;
     418             : 
     419             :         /*
     420             :          * Allow at least some tags
     421             :          */
     422           0 :         depth = max((bt->sb.depth + users - 1) / users, 4U);
     423           0 :         return __blk_mq_active_requests(hctx) < depth;
     424             : }
     425             : 
     426             : /* run the code block in @dispatch_ops with rcu/srcu read lock held */
     427             : #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \
     428             : do {                                                            \
     429             :         if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) {                \
     430             :                 struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
     431             :                 int srcu_idx;                                   \
     432             :                                                                 \
     433             :                 might_sleep_if(check_sleep);                    \
     434             :                 srcu_idx = srcu_read_lock(__tag_set->srcu);  \
     435             :                 (dispatch_ops);                                 \
     436             :                 srcu_read_unlock(__tag_set->srcu, srcu_idx); \
     437             :         } else {                                                \
     438             :                 rcu_read_lock();                                \
     439             :                 (dispatch_ops);                                 \
     440             :                 rcu_read_unlock();                              \
     441             :         }                                                       \
     442             : } while (0)
     443             : 
     444             : #define blk_mq_run_dispatch_ops(q, dispatch_ops)                \
     445             :         __blk_mq_run_dispatch_ops(q, true, dispatch_ops)        \
     446             : 
     447             : #endif

Generated by: LCOV version 1.14