LCOV - code coverage report
Current view: top level - block - blk-mq.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 66 0.0 %
Date: 2023-03-27 20:00:47 Functions: 0 5 0.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef INT_BLK_MQ_H
       3             : #define INT_BLK_MQ_H
       4             : 
       5             : #include "blk-stat.h"
       6             : #include "blk-mq-tag.h"
       7             : 
       8             : struct blk_mq_tag_set;
       9             : 
      10             : struct blk_mq_ctxs {
      11             :         struct kobject kobj;
      12             :         struct blk_mq_ctx __percpu      *queue_ctx;
      13             : };
      14             : 
      15             : /**
      16             :  * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
      17             :  */
      18             : struct blk_mq_ctx {
      19             :         struct {
      20             :                 spinlock_t              lock;
      21             :                 struct list_head        rq_lists[HCTX_MAX_TYPES];
      22             :         } ____cacheline_aligned_in_smp;
      23             : 
      24             :         unsigned int            cpu;
      25             :         unsigned short          index_hw[HCTX_MAX_TYPES];
      26             :         struct blk_mq_hw_ctx    *hctxs[HCTX_MAX_TYPES];
      27             : 
      28             :         struct request_queue    *queue;
      29             :         struct blk_mq_ctxs      *ctxs;
      30             :         struct kobject          kobj;
      31             : } ____cacheline_aligned_in_smp;
      32             : 
      33             : void blk_mq_submit_bio(struct bio *bio);
      34             : int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
      35             :                 unsigned int flags);
      36             : void blk_mq_exit_queue(struct request_queue *q);
      37             : int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
      38             : void blk_mq_wake_waiters(struct request_queue *q);
      39             : bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
      40             :                              unsigned int);
      41             : void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
      42             :                                 bool kick_requeue_list);
      43             : void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
      44             : struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
      45             :                                         struct blk_mq_ctx *start);
      46             : void blk_mq_put_rq_ref(struct request *rq);
      47             : 
      48             : /*
      49             :  * Internal helpers for allocating/freeing the request map
      50             :  */
      51             : void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
      52             :                      unsigned int hctx_idx);
      53             : void blk_mq_free_rq_map(struct blk_mq_tags *tags);
      54             : struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
      55             :                                 unsigned int hctx_idx, unsigned int depth);
      56             : void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
      57             :                              struct blk_mq_tags *tags,
      58             :                              unsigned int hctx_idx);
      59             : /*
      60             :  * Internal helpers for request insertion into sw queues
      61             :  */
      62             : void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
      63             :                                 bool at_head);
      64             : void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
      65             :                                   bool run_queue);
      66             : void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
      67             :                                 struct list_head *list);
      68             : void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
      69             :                                     struct list_head *list);
      70             : 
      71             : /*
      72             :  * CPU -> queue mappings
      73             :  */
      74             : extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
      75             : 
      76             : /*
      77             :  * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
      78             :  * @q: request queue
      79             :  * @type: the hctx type index
      80             :  * @cpu: CPU
      81             :  */
      82             : static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
      83             :                                                           enum hctx_type type,
      84             :                                                           unsigned int cpu)
      85             : {
      86           0 :         return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]);
      87             : }
      88             : 
      89             : static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
      90             : {
      91           0 :         enum hctx_type type = HCTX_TYPE_DEFAULT;
      92             : 
      93             :         /*
      94             :          * The caller ensure that if REQ_POLLED, poll must be enabled.
      95             :          */
      96           0 :         if (opf & REQ_POLLED)
      97             :                 type = HCTX_TYPE_POLL;
      98           0 :         else if ((opf & REQ_OP_MASK) == REQ_OP_READ)
      99           0 :                 type = HCTX_TYPE_READ;
     100             :         return type;
     101             : }
     102             : 
     103             : /*
     104             :  * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
     105             :  * @q: request queue
     106             :  * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED).
     107             :  * @ctx: software queue cpu ctx
     108             :  */
     109             : static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
     110             :                                                      blk_opf_t opf,
     111             :                                                      struct blk_mq_ctx *ctx)
     112             : {
     113           0 :         return ctx->hctxs[blk_mq_get_hctx_type(opf)];
     114             : }
     115             : 
     116             : /*
     117             :  * sysfs helpers
     118             :  */
     119             : extern void blk_mq_sysfs_init(struct request_queue *q);
     120             : extern void blk_mq_sysfs_deinit(struct request_queue *q);
     121             : int blk_mq_sysfs_register(struct gendisk *disk);
     122             : void blk_mq_sysfs_unregister(struct gendisk *disk);
     123             : int blk_mq_sysfs_register_hctxs(struct request_queue *q);
     124             : void blk_mq_sysfs_unregister_hctxs(struct request_queue *q);
     125             : extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
     126             : void blk_mq_free_plug_rqs(struct blk_plug *plug);
     127             : void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
     128             : 
     129             : void blk_mq_cancel_work_sync(struct request_queue *q);
     130             : 
     131             : void blk_mq_release(struct request_queue *q);
     132             : 
     133             : static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
     134             :                                            unsigned int cpu)
     135             : {
     136           0 :         return per_cpu_ptr(q->queue_ctx, cpu);
     137             : }
     138             : 
     139             : /*
     140             :  * This assumes per-cpu software queueing queues. They could be per-node
     141             :  * as well, for instance. For now this is hardcoded as-is. Note that we don't
     142             :  * care about preemption, since we know the ctx's are persistent. This does
     143             :  * mean that we can't rely on ctx always matching the currently running CPU.
     144             :  */
     145             : static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
     146             : {
     147           0 :         return __blk_mq_get_ctx(q, raw_smp_processor_id());
     148             : }
     149             : 
     150             : struct blk_mq_alloc_data {
     151             :         /* input parameter */
     152             :         struct request_queue *q;
     153             :         blk_mq_req_flags_t flags;
     154             :         unsigned int shallow_depth;
     155             :         blk_opf_t cmd_flags;
     156             :         req_flags_t rq_flags;
     157             : 
     158             :         /* allocate multiple requests/tags in one go */
     159             :         unsigned int nr_tags;
     160             :         struct request **cached_rq;
     161             : 
     162             :         /* input & output parameter */
     163             :         struct blk_mq_ctx *ctx;
     164             :         struct blk_mq_hw_ctx *hctx;
     165             : };
     166             : 
     167             : static inline bool blk_mq_is_shared_tags(unsigned int flags)
     168             : {
     169           0 :         return flags & BLK_MQ_F_TAG_HCTX_SHARED;
     170             : }
     171             : 
     172             : static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
     173             : {
     174           0 :         if (!(data->rq_flags & RQF_ELV))
     175           0 :                 return data->hctx->tags;
     176           0 :         return data->hctx->sched_tags;
     177             : }
     178             : 
     179             : static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
     180             : {
     181           0 :         return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
     182             : }
     183             : 
     184             : static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
     185             : {
     186           0 :         return hctx->nr_ctx && hctx->tags;
     187             : }
     188             : 
     189             : unsigned int blk_mq_in_flight(struct request_queue *q,
     190             :                 struct block_device *part);
     191             : void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
     192             :                 unsigned int inflight[2]);
     193             : 
     194             : static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
     195             :                                               int budget_token)
     196             : {
     197           0 :         if (q->mq_ops->put_budget)
     198           0 :                 q->mq_ops->put_budget(q, budget_token);
     199             : }
     200             : 
     201             : static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
     202             : {
     203           0 :         if (q->mq_ops->get_budget)
     204           0 :                 return q->mq_ops->get_budget(q);
     205             :         return 0;
     206             : }
     207             : 
     208             : static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
     209             : {
     210             :         if (token < 0)
     211             :                 return;
     212             : 
     213           0 :         if (rq->q->mq_ops->set_rq_budget_token)
     214           0 :                 rq->q->mq_ops->set_rq_budget_token(rq, token);
     215             : }
     216             : 
     217             : static inline int blk_mq_get_rq_budget_token(struct request *rq)
     218             : {
     219           0 :         if (rq->q->mq_ops->get_rq_budget_token)
     220           0 :                 return rq->q->mq_ops->get_rq_budget_token(rq);
     221             :         return -1;
     222             : }
     223             : 
     224             : static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
     225             : {
     226           0 :         if (blk_mq_is_shared_tags(hctx->flags))
     227           0 :                 atomic_inc(&hctx->queue->nr_active_requests_shared_tags);
     228             :         else
     229           0 :                 atomic_inc(&hctx->nr_active);
     230             : }
     231             : 
     232             : static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
     233             :                 int val)
     234             : {
     235           0 :         if (blk_mq_is_shared_tags(hctx->flags))
     236           0 :                 atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
     237             :         else
     238           0 :                 atomic_sub(val, &hctx->nr_active);
     239             : }
     240             : 
     241             : static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
     242             : {
     243             :         __blk_mq_sub_active_requests(hctx, 1);
     244             : }
     245             : 
     246             : static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
     247             : {
     248           0 :         if (blk_mq_is_shared_tags(hctx->flags))
     249           0 :                 return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
     250           0 :         return atomic_read(&hctx->nr_active);
     251             : }
     252           0 : static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
     253             :                                            struct request *rq)
     254             : {
     255           0 :         blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
     256           0 :         rq->tag = BLK_MQ_NO_TAG;
     257             : 
     258           0 :         if (rq->rq_flags & RQF_MQ_INFLIGHT) {
     259           0 :                 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
     260             :                 __blk_mq_dec_active_requests(hctx);
     261             :         }
     262           0 : }
     263             : 
     264             : static inline void blk_mq_put_driver_tag(struct request *rq)
     265             : {
     266           0 :         if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
     267             :                 return;
     268             : 
     269           0 :         __blk_mq_put_driver_tag(rq->mq_hctx, rq);
     270             : }
     271             : 
     272             : bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq);
     273             : 
     274           0 : static inline bool blk_mq_get_driver_tag(struct request *rq)
     275             : {
     276           0 :         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
     277             : 
     278           0 :         if (rq->tag != BLK_MQ_NO_TAG &&
     279           0 :             !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
     280           0 :                 hctx->tags->rqs[rq->tag] = rq;
     281           0 :                 return true;
     282             :         }
     283             : 
     284           0 :         return __blk_mq_get_driver_tag(hctx, rq);
     285             : }
     286             : 
     287             : static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
     288             : {
     289             :         int cpu;
     290             : 
     291           0 :         for_each_possible_cpu(cpu)
     292           0 :                 qmap->mq_map[cpu] = 0;
     293             : }
     294             : 
     295             : /*
     296             :  * blk_mq_plug() - Get caller context plug
     297             :  * @bio : the bio being submitted by the caller context
     298             :  *
     299             :  * Plugging, by design, may delay the insertion of BIOs into the elevator in
     300             :  * order to increase BIO merging opportunities. This however can cause BIO
     301             :  * insertion order to change from the order in which submit_bio() is being
     302             :  * executed in the case of multiple contexts concurrently issuing BIOs to a
     303             :  * device, even if these context are synchronized to tightly control BIO issuing
     304             :  * order. While this is not a problem with regular block devices, this ordering
     305             :  * change can cause write BIO failures with zoned block devices as these
     306             :  * require sequential write patterns to zones. Prevent this from happening by
     307             :  * ignoring the plug state of a BIO issuing context if it is for a zoned block
     308             :  * device and the BIO to plug is a write operation.
     309             :  *
     310             :  * Return current->plug if the bio can be plugged and NULL otherwise
     311             :  */
     312             : static inline struct blk_plug *blk_mq_plug( struct bio *bio)
     313             : {
     314             :         /* Zoned block device write operation case: do not plug the BIO */
     315             :         if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
     316             :             bdev_op_is_zoned_write(bio->bi_bdev, bio_op(bio)))
     317             :                 return NULL;
     318             : 
     319             :         /*
     320             :          * For regular block devices or read operations, use the context plug
     321             :          * which may be NULL if blk_start_plug() was not executed.
     322             :          */
     323           0 :         return current->plug;
     324             : }
     325             : 
     326             : /* Free all requests on the list */
     327           0 : static inline void blk_mq_free_requests(struct list_head *list)
     328             : {
     329           0 :         while (!list_empty(list)) {
     330           0 :                 struct request *rq = list_entry_rq(list->next);
     331             : 
     332           0 :                 list_del_init(&rq->queuelist);
     333           0 :                 blk_mq_free_request(rq);
     334             :         }
     335           0 : }
     336             : 
     337             : /*
     338             :  * For shared tag users, we track the number of currently active users
     339             :  * and attempt to provide a fair share of the tag depth for each of them.
     340             :  */
     341           0 : static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
     342             :                                   struct sbitmap_queue *bt)
     343             : {
     344             :         unsigned int depth, users;
     345             : 
     346           0 :         if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
     347             :                 return true;
     348             : 
     349             :         /*
     350             :          * Don't try dividing an ant
     351             :          */
     352           0 :         if (bt->sb.depth == 1)
     353             :                 return true;
     354             : 
     355           0 :         if (blk_mq_is_shared_tags(hctx->flags)) {
     356           0 :                 struct request_queue *q = hctx->queue;
     357             : 
     358           0 :                 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
     359             :                         return true;
     360             :         } else {
     361           0 :                 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
     362             :                         return true;
     363             :         }
     364             : 
     365           0 :         users = atomic_read(&hctx->tags->active_queues);
     366             : 
     367           0 :         if (!users)
     368             :                 return true;
     369             : 
     370             :         /*
     371             :          * Allow at least some tags
     372             :          */
     373           0 :         depth = max((bt->sb.depth + users - 1) / users, 4U);
     374           0 :         return __blk_mq_active_requests(hctx) < depth;
     375             : }
     376             : 
     377             : /* run the code block in @dispatch_ops with rcu/srcu read lock held */
     378             : #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \
     379             : do {                                                            \
     380             :         if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) {                \
     381             :                 int srcu_idx;                                   \
     382             :                                                                 \
     383             :                 might_sleep_if(check_sleep);                    \
     384             :                 srcu_idx = srcu_read_lock((q)->tag_set->srcu);    \
     385             :                 (dispatch_ops);                                 \
     386             :                 srcu_read_unlock((q)->tag_set->srcu, srcu_idx);   \
     387             :         } else {                                                \
     388             :                 rcu_read_lock();                                \
     389             :                 (dispatch_ops);                                 \
     390             :                 rcu_read_unlock();                              \
     391             :         }                                                       \
     392             : } while (0)
     393             : 
     394             : #define blk_mq_run_dispatch_ops(q, dispatch_ops)                \
     395             :         __blk_mq_run_dispatch_ops(q, true, dispatch_ops)        \
     396             : 
     397             : #endif

Generated by: LCOV version 1.14