LCOV - code coverage report
Current view: top level - block - blk-mq.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 8 1918 0.4 %
Date: 2023-03-27 20:00:47 Functions: 1 175 0.6 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Block multiqueue core code
       4             :  *
       5             :  * Copyright (C) 2013-2014 Jens Axboe
       6             :  * Copyright (C) 2013-2014 Christoph Hellwig
       7             :  */
       8             : #include <linux/kernel.h>
       9             : #include <linux/module.h>
      10             : #include <linux/backing-dev.h>
      11             : #include <linux/bio.h>
      12             : #include <linux/blkdev.h>
      13             : #include <linux/blk-integrity.h>
      14             : #include <linux/kmemleak.h>
      15             : #include <linux/mm.h>
      16             : #include <linux/init.h>
      17             : #include <linux/slab.h>
      18             : #include <linux/workqueue.h>
      19             : #include <linux/smp.h>
      20             : #include <linux/interrupt.h>
      21             : #include <linux/llist.h>
      22             : #include <linux/cpu.h>
      23             : #include <linux/cache.h>
      24             : #include <linux/sched/sysctl.h>
      25             : #include <linux/sched/topology.h>
      26             : #include <linux/sched/signal.h>
      27             : #include <linux/delay.h>
      28             : #include <linux/crash_dump.h>
      29             : #include <linux/prefetch.h>
      30             : #include <linux/blk-crypto.h>
      31             : #include <linux/part_stat.h>
      32             : 
      33             : #include <trace/events/block.h>
      34             : 
      35             : #include <linux/blk-mq.h>
      36             : #include <linux/t10-pi.h>
      37             : #include "blk.h"
      38             : #include "blk-mq.h"
      39             : #include "blk-mq-debugfs.h"
      40             : #include "blk-mq-tag.h"
      41             : #include "blk-pm.h"
      42             : #include "blk-stat.h"
      43             : #include "blk-mq-sched.h"
      44             : #include "blk-rq-qos.h"
      45             : #include "blk-ioprio.h"
      46             : 
      47             : static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
      48             : 
      49             : static void blk_mq_poll_stats_start(struct request_queue *q);
      50             : static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
      51             : 
      52           0 : static int blk_mq_poll_stats_bkt(const struct request *rq)
      53             : {
      54             :         int ddir, sectors, bucket;
      55             : 
      56           0 :         ddir = rq_data_dir(rq);
      57           0 :         sectors = blk_rq_stats_sectors(rq);
      58             : 
      59           0 :         bucket = ddir + 2 * ilog2(sectors);
      60             : 
      61           0 :         if (bucket < 0)
      62             :                 return -1;
      63           0 :         else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
      64           0 :                 return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
      65             : 
      66             :         return bucket;
      67             : }
      68             : 
      69             : #define BLK_QC_T_SHIFT          16
      70             : #define BLK_QC_T_INTERNAL       (1U << 31)
      71             : 
      72             : static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
      73             :                 blk_qc_t qc)
      74             : {
      75           0 :         return xa_load(&q->hctx_table,
      76           0 :                         (qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT);
      77             : }
      78             : 
      79             : static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx,
      80             :                 blk_qc_t qc)
      81             : {
      82           0 :         unsigned int tag = qc & ((1U << BLK_QC_T_SHIFT) - 1);
      83             : 
      84           0 :         if (qc & BLK_QC_T_INTERNAL)
      85           0 :                 return blk_mq_tag_to_rq(hctx->sched_tags, tag);
      86           0 :         return blk_mq_tag_to_rq(hctx->tags, tag);
      87             : }
      88             : 
      89             : static inline blk_qc_t blk_rq_to_qc(struct request *rq)
      90             : {
      91           0 :         return (rq->mq_hctx->queue_num << BLK_QC_T_SHIFT) |
      92           0 :                 (rq->tag != -1 ?
      93           0 :                  rq->tag : (rq->internal_tag | BLK_QC_T_INTERNAL));
      94             : }
      95             : 
      96             : /*
      97             :  * Check if any of the ctx, dispatch list or elevator
      98             :  * have pending work in this hardware queue.
      99             :  */
     100           0 : static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
     101             : {
     102           0 :         return !list_empty_careful(&hctx->dispatch) ||
     103           0 :                 sbitmap_any_bit_set(&hctx->ctx_map) ||
     104           0 :                         blk_mq_sched_has_work(hctx);
     105             : }
     106             : 
     107             : /*
     108             :  * Mark this ctx as having pending work in this hardware queue
     109             :  */
     110           0 : static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
     111             :                                      struct blk_mq_ctx *ctx)
     112             : {
     113           0 :         const int bit = ctx->index_hw[hctx->type];
     114             : 
     115           0 :         if (!sbitmap_test_bit(&hctx->ctx_map, bit))
     116           0 :                 sbitmap_set_bit(&hctx->ctx_map, bit);
     117           0 : }
     118             : 
     119             : static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
     120             :                                       struct blk_mq_ctx *ctx)
     121             : {
     122           0 :         const int bit = ctx->index_hw[hctx->type];
     123             : 
     124           0 :         sbitmap_clear_bit(&hctx->ctx_map, bit);
     125             : }
     126             : 
     127             : struct mq_inflight {
     128             :         struct block_device *part;
     129             :         unsigned int inflight[2];
     130             : };
     131             : 
     132           0 : static bool blk_mq_check_inflight(struct request *rq, void *priv)
     133             : {
     134           0 :         struct mq_inflight *mi = priv;
     135             : 
     136           0 :         if (rq->part && blk_do_io_stat(rq) &&
     137           0 :             (!mi->part->bd_partno || rq->part == mi->part) &&
     138           0 :             blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
     139           0 :                 mi->inflight[rq_data_dir(rq)]++;
     140             : 
     141           0 :         return true;
     142             : }
     143             : 
     144           0 : unsigned int blk_mq_in_flight(struct request_queue *q,
     145             :                 struct block_device *part)
     146             : {
     147           0 :         struct mq_inflight mi = { .part = part };
     148             : 
     149           0 :         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
     150             : 
     151           0 :         return mi.inflight[0] + mi.inflight[1];
     152             : }
     153             : 
     154           0 : void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
     155             :                 unsigned int inflight[2])
     156             : {
     157           0 :         struct mq_inflight mi = { .part = part };
     158             : 
     159           0 :         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
     160           0 :         inflight[0] = mi.inflight[0];
     161           0 :         inflight[1] = mi.inflight[1];
     162           0 : }
     163             : 
     164           0 : void blk_freeze_queue_start(struct request_queue *q)
     165             : {
     166           0 :         mutex_lock(&q->mq_freeze_lock);
     167           0 :         if (++q->mq_freeze_depth == 1) {
     168           0 :                 percpu_ref_kill(&q->q_usage_counter);
     169           0 :                 mutex_unlock(&q->mq_freeze_lock);
     170           0 :                 if (queue_is_mq(q))
     171           0 :                         blk_mq_run_hw_queues(q, false);
     172             :         } else {
     173           0 :                 mutex_unlock(&q->mq_freeze_lock);
     174             :         }
     175           0 : }
     176             : EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
     177             : 
     178           0 : void blk_mq_freeze_queue_wait(struct request_queue *q)
     179             : {
     180           0 :         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
     181           0 : }
     182             : EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
     183             : 
     184           0 : int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
     185             :                                      unsigned long timeout)
     186             : {
     187           0 :         return wait_event_timeout(q->mq_freeze_wq,
     188             :                                         percpu_ref_is_zero(&q->q_usage_counter),
     189             :                                         timeout);
     190             : }
     191             : EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
     192             : 
     193             : /*
     194             :  * Guarantee no request is in use, so we can change any data structure of
     195             :  * the queue afterward.
     196             :  */
     197           0 : void blk_freeze_queue(struct request_queue *q)
     198             : {
     199             :         /*
     200             :          * In the !blk_mq case we are only calling this to kill the
     201             :          * q_usage_counter, otherwise this increases the freeze depth
     202             :          * and waits for it to return to zero.  For this reason there is
     203             :          * no blk_unfreeze_queue(), and blk_freeze_queue() is not
     204             :          * exported to drivers as the only user for unfreeze is blk_mq.
     205             :          */
     206           0 :         blk_freeze_queue_start(q);
     207           0 :         blk_mq_freeze_queue_wait(q);
     208           0 : }
     209             : 
     210           0 : void blk_mq_freeze_queue(struct request_queue *q)
     211             : {
     212             :         /*
     213             :          * ...just an alias to keep freeze and unfreeze actions balanced
     214             :          * in the blk_mq_* namespace
     215             :          */
     216           0 :         blk_freeze_queue(q);
     217           0 : }
     218             : EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
     219             : 
     220           0 : void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
     221             : {
     222           0 :         mutex_lock(&q->mq_freeze_lock);
     223           0 :         if (force_atomic)
     224           0 :                 q->q_usage_counter.data->force_atomic = true;
     225           0 :         q->mq_freeze_depth--;
     226           0 :         WARN_ON_ONCE(q->mq_freeze_depth < 0);
     227           0 :         if (!q->mq_freeze_depth) {
     228           0 :                 percpu_ref_resurrect(&q->q_usage_counter);
     229           0 :                 wake_up_all(&q->mq_freeze_wq);
     230             :         }
     231           0 :         mutex_unlock(&q->mq_freeze_lock);
     232           0 : }
     233             : 
     234           0 : void blk_mq_unfreeze_queue(struct request_queue *q)
     235             : {
     236           0 :         __blk_mq_unfreeze_queue(q, false);
     237           0 : }
     238             : EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
     239             : 
     240             : /*
     241             :  * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
     242             :  * mpt3sas driver such that this function can be removed.
     243             :  */
     244           0 : void blk_mq_quiesce_queue_nowait(struct request_queue *q)
     245             : {
     246             :         unsigned long flags;
     247             : 
     248           0 :         spin_lock_irqsave(&q->queue_lock, flags);
     249           0 :         if (!q->quiesce_depth++)
     250           0 :                 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
     251           0 :         spin_unlock_irqrestore(&q->queue_lock, flags);
     252           0 : }
     253             : EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
     254             : 
     255             : /**
     256             :  * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done
     257             :  * @set: tag_set to wait on
     258             :  *
     259             :  * Note: it is driver's responsibility for making sure that quiesce has
     260             :  * been started on or more of the request_queues of the tag_set.  This
     261             :  * function only waits for the quiesce on those request_queues that had
     262             :  * the quiesce flag set using blk_mq_quiesce_queue_nowait.
     263             :  */
     264           0 : void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set)
     265             : {
     266           0 :         if (set->flags & BLK_MQ_F_BLOCKING)
     267           0 :                 synchronize_srcu(set->srcu);
     268             :         else
     269           0 :                 synchronize_rcu();
     270           0 : }
     271             : EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done);
     272             : 
     273             : /**
     274             :  * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
     275             :  * @q: request queue.
     276             :  *
     277             :  * Note: this function does not prevent that the struct request end_io()
     278             :  * callback function is invoked. Once this function is returned, we make
     279             :  * sure no dispatch can happen until the queue is unquiesced via
     280             :  * blk_mq_unquiesce_queue().
     281             :  */
     282           0 : void blk_mq_quiesce_queue(struct request_queue *q)
     283             : {
     284           0 :         blk_mq_quiesce_queue_nowait(q);
     285             :         /* nothing to wait for non-mq queues */
     286           0 :         if (queue_is_mq(q))
     287           0 :                 blk_mq_wait_quiesce_done(q->tag_set);
     288           0 : }
     289             : EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
     290             : 
     291             : /*
     292             :  * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
     293             :  * @q: request queue.
     294             :  *
     295             :  * This function recovers queue into the state before quiescing
     296             :  * which is done by blk_mq_quiesce_queue.
     297             :  */
     298           0 : void blk_mq_unquiesce_queue(struct request_queue *q)
     299             : {
     300             :         unsigned long flags;
     301           0 :         bool run_queue = false;
     302             : 
     303           0 :         spin_lock_irqsave(&q->queue_lock, flags);
     304           0 :         if (WARN_ON_ONCE(q->quiesce_depth <= 0)) {
     305             :                 ;
     306           0 :         } else if (!--q->quiesce_depth) {
     307           0 :                 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
     308           0 :                 run_queue = true;
     309             :         }
     310           0 :         spin_unlock_irqrestore(&q->queue_lock, flags);
     311             : 
     312             :         /* dispatch requests which are inserted during quiescing */
     313           0 :         if (run_queue)
     314           0 :                 blk_mq_run_hw_queues(q, true);
     315           0 : }
     316             : EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
     317             : 
     318           0 : void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set)
     319             : {
     320             :         struct request_queue *q;
     321             : 
     322           0 :         mutex_lock(&set->tag_list_lock);
     323           0 :         list_for_each_entry(q, &set->tag_list, tag_set_list) {
     324           0 :                 if (!blk_queue_skip_tagset_quiesce(q))
     325           0 :                         blk_mq_quiesce_queue_nowait(q);
     326             :         }
     327           0 :         blk_mq_wait_quiesce_done(set);
     328           0 :         mutex_unlock(&set->tag_list_lock);
     329           0 : }
     330             : EXPORT_SYMBOL_GPL(blk_mq_quiesce_tagset);
     331             : 
     332           0 : void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set)
     333             : {
     334             :         struct request_queue *q;
     335             : 
     336           0 :         mutex_lock(&set->tag_list_lock);
     337           0 :         list_for_each_entry(q, &set->tag_list, tag_set_list) {
     338           0 :                 if (!blk_queue_skip_tagset_quiesce(q))
     339           0 :                         blk_mq_unquiesce_queue(q);
     340             :         }
     341           0 :         mutex_unlock(&set->tag_list_lock);
     342           0 : }
     343             : EXPORT_SYMBOL_GPL(blk_mq_unquiesce_tagset);
     344             : 
     345           0 : void blk_mq_wake_waiters(struct request_queue *q)
     346             : {
     347             :         struct blk_mq_hw_ctx *hctx;
     348             :         unsigned long i;
     349             : 
     350           0 :         queue_for_each_hw_ctx(q, hctx, i)
     351           0 :                 if (blk_mq_hw_queue_mapped(hctx))
     352           0 :                         blk_mq_tag_wakeup_all(hctx->tags, true);
     353           0 : }
     354             : 
     355           0 : void blk_rq_init(struct request_queue *q, struct request *rq)
     356             : {
     357           0 :         memset(rq, 0, sizeof(*rq));
     358             : 
     359           0 :         INIT_LIST_HEAD(&rq->queuelist);
     360           0 :         rq->q = q;
     361           0 :         rq->__sector = (sector_t) -1;
     362           0 :         INIT_HLIST_NODE(&rq->hash);
     363           0 :         RB_CLEAR_NODE(&rq->rb_node);
     364           0 :         rq->tag = BLK_MQ_NO_TAG;
     365           0 :         rq->internal_tag = BLK_MQ_NO_TAG;
     366           0 :         rq->start_time_ns = ktime_get_ns();
     367           0 :         rq->part = NULL;
     368           0 :         blk_crypto_rq_set_defaults(rq);
     369           0 : }
     370             : EXPORT_SYMBOL(blk_rq_init);
     371             : 
     372           0 : static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
     373             :                 struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns)
     374             : {
     375           0 :         struct blk_mq_ctx *ctx = data->ctx;
     376           0 :         struct blk_mq_hw_ctx *hctx = data->hctx;
     377           0 :         struct request_queue *q = data->q;
     378           0 :         struct request *rq = tags->static_rqs[tag];
     379             : 
     380           0 :         rq->q = q;
     381           0 :         rq->mq_ctx = ctx;
     382           0 :         rq->mq_hctx = hctx;
     383           0 :         rq->cmd_flags = data->cmd_flags;
     384             : 
     385           0 :         if (data->flags & BLK_MQ_REQ_PM)
     386           0 :                 data->rq_flags |= RQF_PM;
     387           0 :         if (blk_queue_io_stat(q))
     388           0 :                 data->rq_flags |= RQF_IO_STAT;
     389           0 :         rq->rq_flags = data->rq_flags;
     390             : 
     391           0 :         if (!(data->rq_flags & RQF_ELV)) {
     392           0 :                 rq->tag = tag;
     393           0 :                 rq->internal_tag = BLK_MQ_NO_TAG;
     394             :         } else {
     395           0 :                 rq->tag = BLK_MQ_NO_TAG;
     396           0 :                 rq->internal_tag = tag;
     397             :         }
     398           0 :         rq->timeout = 0;
     399             : 
     400           0 :         if (blk_mq_need_time_stamp(rq))
     401           0 :                 rq->start_time_ns = ktime_get_ns();
     402             :         else
     403           0 :                 rq->start_time_ns = 0;
     404           0 :         rq->part = NULL;
     405             : #ifdef CONFIG_BLK_RQ_ALLOC_TIME
     406             :         rq->alloc_time_ns = alloc_time_ns;
     407             : #endif
     408           0 :         rq->io_start_time_ns = 0;
     409           0 :         rq->stats_sectors = 0;
     410           0 :         rq->nr_phys_segments = 0;
     411             : #if defined(CONFIG_BLK_DEV_INTEGRITY)
     412             :         rq->nr_integrity_segments = 0;
     413             : #endif
     414           0 :         rq->end_io = NULL;
     415           0 :         rq->end_io_data = NULL;
     416             : 
     417           0 :         blk_crypto_rq_set_defaults(rq);
     418           0 :         INIT_LIST_HEAD(&rq->queuelist);
     419             :         /* tag was already set */
     420           0 :         WRITE_ONCE(rq->deadline, 0);
     421           0 :         req_ref_set(rq, 1);
     422             : 
     423           0 :         if (rq->rq_flags & RQF_ELV) {
     424           0 :                 struct elevator_queue *e = data->q->elevator;
     425             : 
     426           0 :                 INIT_HLIST_NODE(&rq->hash);
     427           0 :                 RB_CLEAR_NODE(&rq->rb_node);
     428             : 
     429           0 :                 if (!op_is_flush(data->cmd_flags) &&
     430           0 :                     e->type->ops.prepare_request) {
     431           0 :                         e->type->ops.prepare_request(rq);
     432           0 :                         rq->rq_flags |= RQF_ELVPRIV;
     433             :                 }
     434             :         }
     435             : 
     436           0 :         return rq;
     437             : }
     438             : 
     439             : static inline struct request *
     440           0 : __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
     441             :                 u64 alloc_time_ns)
     442             : {
     443             :         unsigned int tag, tag_offset;
     444             :         struct blk_mq_tags *tags;
     445             :         struct request *rq;
     446             :         unsigned long tag_mask;
     447           0 :         int i, nr = 0;
     448             : 
     449           0 :         tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset);
     450           0 :         if (unlikely(!tag_mask))
     451             :                 return NULL;
     452             : 
     453           0 :         tags = blk_mq_tags_from_data(data);
     454           0 :         for (i = 0; tag_mask; i++) {
     455           0 :                 if (!(tag_mask & (1UL << i)))
     456           0 :                         continue;
     457           0 :                 tag = tag_offset + i;
     458           0 :                 prefetch(tags->static_rqs[tag]);
     459           0 :                 tag_mask &= ~(1UL << i);
     460           0 :                 rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns);
     461           0 :                 rq_list_add(data->cached_rq, rq);
     462           0 :                 nr++;
     463             :         }
     464             :         /* caller already holds a reference, add for remainder */
     465           0 :         percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
     466           0 :         data->nr_tags -= nr;
     467             : 
     468           0 :         return rq_list_pop(data->cached_rq);
     469             : }
     470             : 
     471           0 : static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
     472             : {
     473           0 :         struct request_queue *q = data->q;
     474           0 :         u64 alloc_time_ns = 0;
     475             :         struct request *rq;
     476             :         unsigned int tag;
     477             : 
     478             :         /* alloc_time includes depth and tag waits */
     479             :         if (blk_queue_rq_alloc_time(q))
     480             :                 alloc_time_ns = ktime_get_ns();
     481             : 
     482           0 :         if (data->cmd_flags & REQ_NOWAIT)
     483           0 :                 data->flags |= BLK_MQ_REQ_NOWAIT;
     484             : 
     485           0 :         if (q->elevator) {
     486           0 :                 struct elevator_queue *e = q->elevator;
     487             : 
     488           0 :                 data->rq_flags |= RQF_ELV;
     489             : 
     490             :                 /*
     491             :                  * Flush/passthrough requests are special and go directly to the
     492             :                  * dispatch list. Don't include reserved tags in the
     493             :                  * limiting, as it isn't useful.
     494             :                  */
     495           0 :                 if (!op_is_flush(data->cmd_flags) &&
     496           0 :                     !blk_op_is_passthrough(data->cmd_flags) &&
     497           0 :                     e->type->ops.limit_depth &&
     498           0 :                     !(data->flags & BLK_MQ_REQ_RESERVED))
     499           0 :                         e->type->ops.limit_depth(data->cmd_flags, data);
     500             :         }
     501             : 
     502             : retry:
     503           0 :         data->ctx = blk_mq_get_ctx(q);
     504           0 :         data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
     505           0 :         if (!(data->rq_flags & RQF_ELV))
     506           0 :                 blk_mq_tag_busy(data->hctx);
     507             : 
     508           0 :         if (data->flags & BLK_MQ_REQ_RESERVED)
     509           0 :                 data->rq_flags |= RQF_RESV;
     510             : 
     511             :         /*
     512             :          * Try batched alloc if we want more than 1 tag.
     513             :          */
     514           0 :         if (data->nr_tags > 1) {
     515           0 :                 rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns);
     516           0 :                 if (rq)
     517             :                         return rq;
     518           0 :                 data->nr_tags = 1;
     519             :         }
     520             : 
     521             :         /*
     522             :          * Waiting allocations only fail because of an inactive hctx.  In that
     523             :          * case just retry the hctx assignment and tag allocation as CPU hotplug
     524             :          * should have migrated us to an online CPU by now.
     525             :          */
     526           0 :         tag = blk_mq_get_tag(data);
     527           0 :         if (tag == BLK_MQ_NO_TAG) {
     528           0 :                 if (data->flags & BLK_MQ_REQ_NOWAIT)
     529             :                         return NULL;
     530             :                 /*
     531             :                  * Give up the CPU and sleep for a random short time to
     532             :                  * ensure that thread using a realtime scheduling class
     533             :                  * are migrated off the CPU, and thus off the hctx that
     534             :                  * is going away.
     535             :                  */
     536           0 :                 msleep(3);
     537           0 :                 goto retry;
     538             :         }
     539             : 
     540           0 :         return blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag,
     541             :                                         alloc_time_ns);
     542             : }
     543             : 
     544           0 : static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
     545             :                                             struct blk_plug *plug,
     546             :                                             blk_opf_t opf,
     547             :                                             blk_mq_req_flags_t flags)
     548             : {
     549           0 :         struct blk_mq_alloc_data data = {
     550             :                 .q              = q,
     551             :                 .flags          = flags,
     552             :                 .cmd_flags      = opf,
     553           0 :                 .nr_tags        = plug->nr_ios,
     554           0 :                 .cached_rq      = &plug->cached_rq,
     555             :         };
     556             :         struct request *rq;
     557             : 
     558           0 :         if (blk_queue_enter(q, flags))
     559             :                 return NULL;
     560             : 
     561           0 :         plug->nr_ios = 1;
     562             : 
     563           0 :         rq = __blk_mq_alloc_requests(&data);
     564           0 :         if (unlikely(!rq))
     565           0 :                 blk_queue_exit(q);
     566             :         return rq;
     567             : }
     568             : 
     569           0 : static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
     570             :                                                    blk_opf_t opf,
     571             :                                                    blk_mq_req_flags_t flags)
     572             : {
     573           0 :         struct blk_plug *plug = current->plug;
     574             :         struct request *rq;
     575             : 
     576           0 :         if (!plug)
     577             :                 return NULL;
     578             : 
     579           0 :         if (rq_list_empty(plug->cached_rq)) {
     580           0 :                 if (plug->nr_ios == 1)
     581             :                         return NULL;
     582           0 :                 rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
     583           0 :                 if (!rq)
     584             :                         return NULL;
     585             :         } else {
     586           0 :                 rq = rq_list_peek(&plug->cached_rq);
     587           0 :                 if (!rq || rq->q != q)
     588             :                         return NULL;
     589             : 
     590           0 :                 if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type)
     591             :                         return NULL;
     592           0 :                 if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
     593             :                         return NULL;
     594             : 
     595           0 :                 plug->cached_rq = rq_list_next(rq);
     596             :         }
     597             : 
     598           0 :         rq->cmd_flags = opf;
     599           0 :         INIT_LIST_HEAD(&rq->queuelist);
     600           0 :         return rq;
     601             : }
     602             : 
     603           0 : struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
     604             :                 blk_mq_req_flags_t flags)
     605             : {
     606             :         struct request *rq;
     607             : 
     608           0 :         rq = blk_mq_alloc_cached_request(q, opf, flags);
     609           0 :         if (!rq) {
     610           0 :                 struct blk_mq_alloc_data data = {
     611             :                         .q              = q,
     612             :                         .flags          = flags,
     613             :                         .cmd_flags      = opf,
     614             :                         .nr_tags        = 1,
     615             :                 };
     616             :                 int ret;
     617             : 
     618           0 :                 ret = blk_queue_enter(q, flags);
     619           0 :                 if (ret)
     620           0 :                         return ERR_PTR(ret);
     621             : 
     622           0 :                 rq = __blk_mq_alloc_requests(&data);
     623           0 :                 if (!rq)
     624             :                         goto out_queue_exit;
     625             :         }
     626           0 :         rq->__data_len = 0;
     627           0 :         rq->__sector = (sector_t) -1;
     628           0 :         rq->bio = rq->biotail = NULL;
     629           0 :         return rq;
     630             : out_queue_exit:
     631           0 :         blk_queue_exit(q);
     632           0 :         return ERR_PTR(-EWOULDBLOCK);
     633             : }
     634             : EXPORT_SYMBOL(blk_mq_alloc_request);
     635             : 
     636           0 : struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
     637             :         blk_opf_t opf, blk_mq_req_flags_t flags, unsigned int hctx_idx)
     638             : {
     639           0 :         struct blk_mq_alloc_data data = {
     640             :                 .q              = q,
     641             :                 .flags          = flags,
     642             :                 .cmd_flags      = opf,
     643             :                 .nr_tags        = 1,
     644             :         };
     645           0 :         u64 alloc_time_ns = 0;
     646             :         struct request *rq;
     647             :         unsigned int cpu;
     648             :         unsigned int tag;
     649             :         int ret;
     650             : 
     651             :         /* alloc_time includes depth and tag waits */
     652             :         if (blk_queue_rq_alloc_time(q))
     653             :                 alloc_time_ns = ktime_get_ns();
     654             : 
     655             :         /*
     656             :          * If the tag allocator sleeps we could get an allocation for a
     657             :          * different hardware context.  No need to complicate the low level
     658             :          * allocator for this for the rare use case of a command tied to
     659             :          * a specific queue.
     660             :          */
     661           0 :         if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)) ||
     662           0 :             WARN_ON_ONCE(!(flags & BLK_MQ_REQ_RESERVED)))
     663             :                 return ERR_PTR(-EINVAL);
     664             : 
     665           0 :         if (hctx_idx >= q->nr_hw_queues)
     666             :                 return ERR_PTR(-EIO);
     667             : 
     668           0 :         ret = blk_queue_enter(q, flags);
     669           0 :         if (ret)
     670           0 :                 return ERR_PTR(ret);
     671             : 
     672             :         /*
     673             :          * Check if the hardware context is actually mapped to anything.
     674             :          * If not tell the caller that it should skip this queue.
     675             :          */
     676           0 :         ret = -EXDEV;
     677           0 :         data.hctx = xa_load(&q->hctx_table, hctx_idx);
     678           0 :         if (!blk_mq_hw_queue_mapped(data.hctx))
     679             :                 goto out_queue_exit;
     680           0 :         cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
     681           0 :         if (cpu >= nr_cpu_ids)
     682             :                 goto out_queue_exit;
     683           0 :         data.ctx = __blk_mq_get_ctx(q, cpu);
     684             : 
     685           0 :         if (!q->elevator)
     686           0 :                 blk_mq_tag_busy(data.hctx);
     687             :         else
     688           0 :                 data.rq_flags |= RQF_ELV;
     689             : 
     690           0 :         if (flags & BLK_MQ_REQ_RESERVED)
     691           0 :                 data.rq_flags |= RQF_RESV;
     692             : 
     693           0 :         ret = -EWOULDBLOCK;
     694           0 :         tag = blk_mq_get_tag(&data);
     695           0 :         if (tag == BLK_MQ_NO_TAG)
     696             :                 goto out_queue_exit;
     697           0 :         rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag,
     698             :                                         alloc_time_ns);
     699           0 :         rq->__data_len = 0;
     700           0 :         rq->__sector = (sector_t) -1;
     701           0 :         rq->bio = rq->biotail = NULL;
     702           0 :         return rq;
     703             : 
     704             : out_queue_exit:
     705           0 :         blk_queue_exit(q);
     706           0 :         return ERR_PTR(ret);
     707             : }
     708             : EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
     709             : 
     710           0 : static void __blk_mq_free_request(struct request *rq)
     711             : {
     712           0 :         struct request_queue *q = rq->q;
     713           0 :         struct blk_mq_ctx *ctx = rq->mq_ctx;
     714           0 :         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
     715           0 :         const int sched_tag = rq->internal_tag;
     716             : 
     717           0 :         blk_crypto_free_request(rq);
     718           0 :         blk_pm_mark_last_busy(rq);
     719           0 :         rq->mq_hctx = NULL;
     720           0 :         if (rq->tag != BLK_MQ_NO_TAG)
     721           0 :                 blk_mq_put_tag(hctx->tags, ctx, rq->tag);
     722           0 :         if (sched_tag != BLK_MQ_NO_TAG)
     723           0 :                 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
     724           0 :         blk_mq_sched_restart(hctx);
     725           0 :         blk_queue_exit(q);
     726           0 : }
     727             : 
     728           0 : void blk_mq_free_request(struct request *rq)
     729             : {
     730           0 :         struct request_queue *q = rq->q;
     731           0 :         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
     732             : 
     733           0 :         if ((rq->rq_flags & RQF_ELVPRIV) &&
     734           0 :             q->elevator->type->ops.finish_request)
     735           0 :                 q->elevator->type->ops.finish_request(rq);
     736             : 
     737           0 :         if (rq->rq_flags & RQF_MQ_INFLIGHT)
     738             :                 __blk_mq_dec_active_requests(hctx);
     739             : 
     740           0 :         if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
     741           0 :                 laptop_io_completion(q->disk->bdi);
     742             : 
     743           0 :         rq_qos_done(q, rq);
     744             : 
     745           0 :         WRITE_ONCE(rq->state, MQ_RQ_IDLE);
     746           0 :         if (req_ref_put_and_test(rq))
     747           0 :                 __blk_mq_free_request(rq);
     748           0 : }
     749             : EXPORT_SYMBOL_GPL(blk_mq_free_request);
     750             : 
     751           0 : void blk_mq_free_plug_rqs(struct blk_plug *plug)
     752             : {
     753             :         struct request *rq;
     754             : 
     755           0 :         while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
     756           0 :                 blk_mq_free_request(rq);
     757           0 : }
     758             : 
     759           0 : void blk_dump_rq_flags(struct request *rq, char *msg)
     760             : {
     761           0 :         printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
     762             :                 rq->q->disk ? rq->q->disk->disk_name : "?",
     763             :                 (__force unsigned long long) rq->cmd_flags);
     764             : 
     765           0 :         printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
     766             :                (unsigned long long)blk_rq_pos(rq),
     767             :                blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
     768           0 :         printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
     769             :                rq->bio, rq->biotail, blk_rq_bytes(rq));
     770           0 : }
     771             : EXPORT_SYMBOL(blk_dump_rq_flags);
     772             : 
     773           0 : static void req_bio_endio(struct request *rq, struct bio *bio,
     774             :                           unsigned int nbytes, blk_status_t error)
     775             : {
     776           0 :         if (unlikely(error)) {
     777           0 :                 bio->bi_status = error;
     778           0 :         } else if (req_op(rq) == REQ_OP_ZONE_APPEND) {
     779             :                 /*
     780             :                  * Partial zone append completions cannot be supported as the
     781             :                  * BIO fragments may end up not being written sequentially.
     782             :                  */
     783           0 :                 if (bio->bi_iter.bi_size != nbytes)
     784           0 :                         bio->bi_status = BLK_STS_IOERR;
     785             :                 else
     786           0 :                         bio->bi_iter.bi_sector = rq->__sector;
     787             :         }
     788             : 
     789           0 :         bio_advance(bio, nbytes);
     790             : 
     791           0 :         if (unlikely(rq->rq_flags & RQF_QUIET))
     792             :                 bio_set_flag(bio, BIO_QUIET);
     793             :         /* don't actually finish bio if it's part of flush sequence */
     794           0 :         if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
     795           0 :                 bio_endio(bio);
     796           0 : }
     797             : 
     798           0 : static void blk_account_io_completion(struct request *req, unsigned int bytes)
     799             : {
     800           0 :         if (req->part && blk_do_io_stat(req)) {
     801           0 :                 const int sgrp = op_stat_group(req_op(req));
     802             : 
     803           0 :                 part_stat_lock();
     804           0 :                 part_stat_add(req->part, sectors[sgrp], bytes >> 9);
     805           0 :                 part_stat_unlock();
     806             :         }
     807           0 : }
     808             : 
     809           0 : static void blk_print_req_error(struct request *req, blk_status_t status)
     810             : {
     811           0 :         printk_ratelimited(KERN_ERR
     812             :                 "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
     813             :                 "phys_seg %u prio class %u\n",
     814             :                 blk_status_to_str(status),
     815             :                 req->q->disk ? req->q->disk->disk_name : "?",
     816             :                 blk_rq_pos(req), (__force u32)req_op(req),
     817             :                 blk_op_str(req_op(req)),
     818             :                 (__force u32)(req->cmd_flags & ~REQ_OP_MASK),
     819             :                 req->nr_phys_segments,
     820             :                 IOPRIO_PRIO_CLASS(req->ioprio));
     821           0 : }
     822             : 
     823             : /*
     824             :  * Fully end IO on a request. Does not support partial completions, or
     825             :  * errors.
     826             :  */
     827           0 : static void blk_complete_request(struct request *req)
     828             : {
     829           0 :         const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0;
     830           0 :         int total_bytes = blk_rq_bytes(req);
     831           0 :         struct bio *bio = req->bio;
     832             : 
     833           0 :         trace_block_rq_complete(req, BLK_STS_OK, total_bytes);
     834             : 
     835           0 :         if (!bio)
     836             :                 return;
     837             : 
     838             : #ifdef CONFIG_BLK_DEV_INTEGRITY
     839             :         if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ)
     840             :                 req->q->integrity.profile->complete_fn(req, total_bytes);
     841             : #endif
     842             : 
     843           0 :         blk_account_io_completion(req, total_bytes);
     844             : 
     845             :         do {
     846           0 :                 struct bio *next = bio->bi_next;
     847             : 
     848             :                 /* Completion has already been traced */
     849           0 :                 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
     850             : 
     851           0 :                 if (req_op(req) == REQ_OP_ZONE_APPEND)
     852           0 :                         bio->bi_iter.bi_sector = req->__sector;
     853             : 
     854           0 :                 if (!is_flush)
     855           0 :                         bio_endio(bio);
     856           0 :                 bio = next;
     857           0 :         } while (bio);
     858             : 
     859             :         /*
     860             :          * Reset counters so that the request stacking driver
     861             :          * can find how many bytes remain in the request
     862             :          * later.
     863             :          */
     864           0 :         if (!req->end_io) {
     865           0 :                 req->bio = NULL;
     866           0 :                 req->__data_len = 0;
     867             :         }
     868             : }
     869             : 
     870             : /**
     871             :  * blk_update_request - Complete multiple bytes without completing the request
     872             :  * @req:      the request being processed
     873             :  * @error:    block status code
     874             :  * @nr_bytes: number of bytes to complete for @req
     875             :  *
     876             :  * Description:
     877             :  *     Ends I/O on a number of bytes attached to @req, but doesn't complete
     878             :  *     the request structure even if @req doesn't have leftover.
     879             :  *     If @req has leftover, sets it up for the next range of segments.
     880             :  *
     881             :  *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
     882             :  *     %false return from this function.
     883             :  *
     884             :  * Note:
     885             :  *      The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function
     886             :  *      except in the consistency check at the end of this function.
     887             :  *
     888             :  * Return:
     889             :  *     %false - this request doesn't have any more data
     890             :  *     %true  - this request has more data
     891             :  **/
     892           0 : bool blk_update_request(struct request *req, blk_status_t error,
     893             :                 unsigned int nr_bytes)
     894             : {
     895             :         int total_bytes;
     896             : 
     897           0 :         trace_block_rq_complete(req, error, nr_bytes);
     898             : 
     899           0 :         if (!req->bio)
     900             :                 return false;
     901             : 
     902             : #ifdef CONFIG_BLK_DEV_INTEGRITY
     903             :         if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
     904             :             error == BLK_STS_OK)
     905             :                 req->q->integrity.profile->complete_fn(req, nr_bytes);
     906             : #endif
     907             : 
     908           0 :         if (unlikely(error && !blk_rq_is_passthrough(req) &&
     909           0 :                      !(req->rq_flags & RQF_QUIET)) &&
     910           0 :                      !test_bit(GD_DEAD, &req->q->disk->state)) {
     911           0 :                 blk_print_req_error(req, error);
     912           0 :                 trace_block_rq_error(req, error, nr_bytes);
     913             :         }
     914             : 
     915           0 :         blk_account_io_completion(req, nr_bytes);
     916             : 
     917           0 :         total_bytes = 0;
     918           0 :         while (req->bio) {
     919           0 :                 struct bio *bio = req->bio;
     920           0 :                 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
     921             : 
     922           0 :                 if (bio_bytes == bio->bi_iter.bi_size)
     923           0 :                         req->bio = bio->bi_next;
     924             : 
     925             :                 /* Completion has already been traced */
     926           0 :                 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
     927           0 :                 req_bio_endio(req, bio, bio_bytes, error);
     928             : 
     929           0 :                 total_bytes += bio_bytes;
     930           0 :                 nr_bytes -= bio_bytes;
     931             : 
     932           0 :                 if (!nr_bytes)
     933             :                         break;
     934             :         }
     935             : 
     936             :         /*
     937             :          * completely done
     938             :          */
     939           0 :         if (!req->bio) {
     940             :                 /*
     941             :                  * Reset counters so that the request stacking driver
     942             :                  * can find how many bytes remain in the request
     943             :                  * later.
     944             :                  */
     945           0 :                 req->__data_len = 0;
     946           0 :                 return false;
     947             :         }
     948             : 
     949           0 :         req->__data_len -= total_bytes;
     950             : 
     951             :         /* update sector only for requests with clear definition of sector */
     952           0 :         if (!blk_rq_is_passthrough(req))
     953           0 :                 req->__sector += total_bytes >> 9;
     954             : 
     955             :         /* mixed attributes always follow the first bio */
     956           0 :         if (req->rq_flags & RQF_MIXED_MERGE) {
     957           0 :                 req->cmd_flags &= ~REQ_FAILFAST_MASK;
     958           0 :                 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
     959             :         }
     960             : 
     961           0 :         if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
     962             :                 /*
     963             :                  * If total number of sectors is less than the first segment
     964             :                  * size, something has gone terribly wrong.
     965             :                  */
     966           0 :                 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
     967           0 :                         blk_dump_rq_flags(req, "request botched");
     968           0 :                         req->__data_len = blk_rq_cur_bytes(req);
     969             :                 }
     970             : 
     971             :                 /* recalculate the number of segments */
     972           0 :                 req->nr_phys_segments = blk_recalc_rq_segments(req);
     973             :         }
     974             : 
     975             :         return true;
     976             : }
     977             : EXPORT_SYMBOL_GPL(blk_update_request);
     978             : 
     979           0 : static void __blk_account_io_done(struct request *req, u64 now)
     980             : {
     981           0 :         const int sgrp = op_stat_group(req_op(req));
     982             : 
     983           0 :         part_stat_lock();
     984           0 :         update_io_ticks(req->part, jiffies, true);
     985           0 :         part_stat_inc(req->part, ios[sgrp]);
     986           0 :         part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
     987           0 :         part_stat_unlock();
     988           0 : }
     989             : 
     990           0 : static inline void blk_account_io_done(struct request *req, u64 now)
     991             : {
     992             :         /*
     993             :          * Account IO completion.  flush_rq isn't accounted as a
     994             :          * normal IO on queueing nor completion.  Accounting the
     995             :          * containing request is enough.
     996             :          */
     997           0 :         if (blk_do_io_stat(req) && req->part &&
     998           0 :             !(req->rq_flags & RQF_FLUSH_SEQ))
     999           0 :                 __blk_account_io_done(req, now);
    1000           0 : }
    1001             : 
    1002           0 : static void __blk_account_io_start(struct request *rq)
    1003             : {
    1004             :         /*
    1005             :          * All non-passthrough requests are created from a bio with one
    1006             :          * exception: when a flush command that is part of a flush sequence
    1007             :          * generated by the state machine in blk-flush.c is cloned onto the
    1008             :          * lower device by dm-multipath we can get here without a bio.
    1009             :          */
    1010           0 :         if (rq->bio)
    1011           0 :                 rq->part = rq->bio->bi_bdev;
    1012             :         else
    1013           0 :                 rq->part = rq->q->disk->part0;
    1014             : 
    1015           0 :         part_stat_lock();
    1016           0 :         update_io_ticks(rq->part, jiffies, false);
    1017           0 :         part_stat_unlock();
    1018           0 : }
    1019             : 
    1020           0 : static inline void blk_account_io_start(struct request *req)
    1021             : {
    1022           0 :         if (blk_do_io_stat(req))
    1023           0 :                 __blk_account_io_start(req);
    1024           0 : }
    1025             : 
    1026           0 : static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
    1027             : {
    1028           0 :         if (rq->rq_flags & RQF_STATS) {
    1029           0 :                 blk_mq_poll_stats_start(rq->q);
    1030           0 :                 blk_stat_add(rq, now);
    1031             :         }
    1032             : 
    1033           0 :         blk_mq_sched_completed_request(rq, now);
    1034           0 :         blk_account_io_done(rq, now);
    1035           0 : }
    1036             : 
    1037           0 : inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
    1038             : {
    1039           0 :         if (blk_mq_need_time_stamp(rq))
    1040           0 :                 __blk_mq_end_request_acct(rq, ktime_get_ns());
    1041             : 
    1042           0 :         if (rq->end_io) {
    1043           0 :                 rq_qos_done(rq->q, rq);
    1044           0 :                 if (rq->end_io(rq, error) == RQ_END_IO_FREE)
    1045           0 :                         blk_mq_free_request(rq);
    1046             :         } else {
    1047           0 :                 blk_mq_free_request(rq);
    1048             :         }
    1049           0 : }
    1050             : EXPORT_SYMBOL(__blk_mq_end_request);
    1051             : 
    1052           0 : void blk_mq_end_request(struct request *rq, blk_status_t error)
    1053             : {
    1054           0 :         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
    1055           0 :                 BUG();
    1056           0 :         __blk_mq_end_request(rq, error);
    1057           0 : }
    1058             : EXPORT_SYMBOL(blk_mq_end_request);
    1059             : 
    1060             : #define TAG_COMP_BATCH          32
    1061             : 
    1062           0 : static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx,
    1063             :                                           int *tag_array, int nr_tags)
    1064             : {
    1065           0 :         struct request_queue *q = hctx->queue;
    1066             : 
    1067             :         /*
    1068             :          * All requests should have been marked as RQF_MQ_INFLIGHT, so
    1069             :          * update hctx->nr_active in batch
    1070             :          */
    1071           0 :         if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
    1072             :                 __blk_mq_sub_active_requests(hctx, nr_tags);
    1073             : 
    1074           0 :         blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
    1075           0 :         percpu_ref_put_many(&q->q_usage_counter, nr_tags);
    1076           0 : }
    1077             : 
    1078           0 : void blk_mq_end_request_batch(struct io_comp_batch *iob)
    1079             : {
    1080           0 :         int tags[TAG_COMP_BATCH], nr_tags = 0;
    1081           0 :         struct blk_mq_hw_ctx *cur_hctx = NULL;
    1082             :         struct request *rq;
    1083           0 :         u64 now = 0;
    1084             : 
    1085           0 :         if (iob->need_ts)
    1086           0 :                 now = ktime_get_ns();
    1087             : 
    1088           0 :         while ((rq = rq_list_pop(&iob->req_list)) != NULL) {
    1089           0 :                 prefetch(rq->bio);
    1090           0 :                 prefetch(rq->rq_next);
    1091             : 
    1092           0 :                 blk_complete_request(rq);
    1093           0 :                 if (iob->need_ts)
    1094           0 :                         __blk_mq_end_request_acct(rq, now);
    1095             : 
    1096           0 :                 rq_qos_done(rq->q, rq);
    1097             : 
    1098             :                 /*
    1099             :                  * If end_io handler returns NONE, then it still has
    1100             :                  * ownership of the request.
    1101             :                  */
    1102           0 :                 if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE)
    1103           0 :                         continue;
    1104             : 
    1105           0 :                 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
    1106           0 :                 if (!req_ref_put_and_test(rq))
    1107           0 :                         continue;
    1108             : 
    1109           0 :                 blk_crypto_free_request(rq);
    1110           0 :                 blk_pm_mark_last_busy(rq);
    1111             : 
    1112           0 :                 if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
    1113           0 :                         if (cur_hctx)
    1114           0 :                                 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
    1115           0 :                         nr_tags = 0;
    1116           0 :                         cur_hctx = rq->mq_hctx;
    1117             :                 }
    1118           0 :                 tags[nr_tags++] = rq->tag;
    1119             :         }
    1120             : 
    1121           0 :         if (nr_tags)
    1122           0 :                 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
    1123           0 : }
    1124             : EXPORT_SYMBOL_GPL(blk_mq_end_request_batch);
    1125             : 
    1126           0 : static void blk_complete_reqs(struct llist_head *list)
    1127             : {
    1128           0 :         struct llist_node *entry = llist_reverse_order(llist_del_all(list));
    1129             :         struct request *rq, *next;
    1130             : 
    1131           0 :         llist_for_each_entry_safe(rq, next, entry, ipi_list)
    1132           0 :                 rq->q->mq_ops->complete(rq);
    1133           0 : }
    1134             : 
    1135           0 : static __latent_entropy void blk_done_softirq(struct softirq_action *h)
    1136             : {
    1137           0 :         blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
    1138           0 : }
    1139             : 
    1140           0 : static int blk_softirq_cpu_dead(unsigned int cpu)
    1141             : {
    1142           0 :         blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
    1143           0 :         return 0;
    1144             : }
    1145             : 
    1146             : static void __blk_mq_complete_request_remote(void *data)
    1147             : {
    1148             :         __raise_softirq_irqoff(BLOCK_SOFTIRQ);
    1149             : }
    1150             : 
    1151             : static inline bool blk_mq_complete_need_ipi(struct request *rq)
    1152             : {
    1153           0 :         int cpu = raw_smp_processor_id();
    1154             : 
    1155             :         if (!IS_ENABLED(CONFIG_SMP) ||
    1156             :             !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
    1157             :                 return false;
    1158             :         /*
    1159             :          * With force threaded interrupts enabled, raising softirq from an SMP
    1160             :          * function call will always result in waking the ksoftirqd thread.
    1161             :          * This is probably worse than completing the request on a different
    1162             :          * cache domain.
    1163             :          */
    1164             :         if (force_irqthreads())
    1165             :                 return false;
    1166             : 
    1167             :         /* same CPU or cache domain?  Complete locally */
    1168             :         if (cpu == rq->mq_ctx->cpu ||
    1169             :             (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
    1170             :              cpus_share_cache(cpu, rq->mq_ctx->cpu)))
    1171             :                 return false;
    1172             : 
    1173             :         /* don't try to IPI to an offline CPU */
    1174             :         return cpu_online(rq->mq_ctx->cpu);
    1175             : }
    1176             : 
    1177             : static void blk_mq_complete_send_ipi(struct request *rq)
    1178             : {
    1179             :         struct llist_head *list;
    1180             :         unsigned int cpu;
    1181             : 
    1182             :         cpu = rq->mq_ctx->cpu;
    1183             :         list = &per_cpu(blk_cpu_done, cpu);
    1184             :         if (llist_add(&rq->ipi_list, list)) {
    1185             :                 INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
    1186             :                 smp_call_function_single_async(cpu, &rq->csd);
    1187             :         }
    1188             : }
    1189             : 
    1190           0 : static void blk_mq_raise_softirq(struct request *rq)
    1191             : {
    1192             :         struct llist_head *list;
    1193             : 
    1194           0 :         preempt_disable();
    1195           0 :         list = this_cpu_ptr(&blk_cpu_done);
    1196           0 :         if (llist_add(&rq->ipi_list, list))
    1197           0 :                 raise_softirq(BLOCK_SOFTIRQ);
    1198           0 :         preempt_enable();
    1199           0 : }
    1200             : 
    1201           0 : bool blk_mq_complete_request_remote(struct request *rq)
    1202             : {
    1203           0 :         WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
    1204             : 
    1205             :         /*
    1206             :          * For request which hctx has only one ctx mapping,
    1207             :          * or a polled request, always complete locally,
    1208             :          * it's pointless to redirect the completion.
    1209             :          */
    1210           0 :         if (rq->mq_hctx->nr_ctx == 1 ||
    1211           0 :                 rq->cmd_flags & REQ_POLLED)
    1212             :                 return false;
    1213             : 
    1214           0 :         if (blk_mq_complete_need_ipi(rq)) {
    1215             :                 blk_mq_complete_send_ipi(rq);
    1216             :                 return true;
    1217             :         }
    1218             : 
    1219           0 :         if (rq->q->nr_hw_queues == 1) {
    1220           0 :                 blk_mq_raise_softirq(rq);
    1221           0 :                 return true;
    1222             :         }
    1223             :         return false;
    1224             : }
    1225             : EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
    1226             : 
    1227             : /**
    1228             :  * blk_mq_complete_request - end I/O on a request
    1229             :  * @rq:         the request being processed
    1230             :  *
    1231             :  * Description:
    1232             :  *      Complete a request by scheduling the ->complete_rq operation.
    1233             :  **/
    1234           0 : void blk_mq_complete_request(struct request *rq)
    1235             : {
    1236           0 :         if (!blk_mq_complete_request_remote(rq))
    1237           0 :                 rq->q->mq_ops->complete(rq);
    1238           0 : }
    1239             : EXPORT_SYMBOL(blk_mq_complete_request);
    1240             : 
    1241             : /**
    1242             :  * blk_mq_start_request - Start processing a request
    1243             :  * @rq: Pointer to request to be started
    1244             :  *
    1245             :  * Function used by device drivers to notify the block layer that a request
    1246             :  * is going to be processed now, so blk layer can do proper initializations
    1247             :  * such as starting the timeout timer.
    1248             :  */
    1249           0 : void blk_mq_start_request(struct request *rq)
    1250             : {
    1251           0 :         struct request_queue *q = rq->q;
    1252             : 
    1253           0 :         trace_block_rq_issue(rq);
    1254             : 
    1255           0 :         if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
    1256           0 :                 rq->io_start_time_ns = ktime_get_ns();
    1257           0 :                 rq->stats_sectors = blk_rq_sectors(rq);
    1258           0 :                 rq->rq_flags |= RQF_STATS;
    1259           0 :                 rq_qos_issue(q, rq);
    1260             :         }
    1261             : 
    1262           0 :         WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
    1263             : 
    1264           0 :         blk_add_timer(rq);
    1265           0 :         WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
    1266             : 
    1267             : #ifdef CONFIG_BLK_DEV_INTEGRITY
    1268             :         if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
    1269             :                 q->integrity.profile->prepare_fn(rq);
    1270             : #endif
    1271           0 :         if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
    1272           0 :                 WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq));
    1273           0 : }
    1274             : EXPORT_SYMBOL(blk_mq_start_request);
    1275             : 
    1276             : /*
    1277             :  * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
    1278             :  * queues. This is important for md arrays to benefit from merging
    1279             :  * requests.
    1280             :  */
    1281             : static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
    1282             : {
    1283           0 :         if (plug->multiple_queues)
    1284             :                 return BLK_MAX_REQUEST_COUNT * 2;
    1285             :         return BLK_MAX_REQUEST_COUNT;
    1286             : }
    1287             : 
    1288           0 : static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
    1289             : {
    1290           0 :         struct request *last = rq_list_peek(&plug->mq_list);
    1291             : 
    1292           0 :         if (!plug->rq_count) {
    1293             :                 trace_block_plug(rq->q);
    1294           0 :         } else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
    1295           0 :                    (!blk_queue_nomerges(rq->q) &&
    1296           0 :                     blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
    1297           0 :                 blk_mq_flush_plug_list(plug, false);
    1298           0 :                 last = NULL;
    1299           0 :                 trace_block_plug(rq->q);
    1300             :         }
    1301             : 
    1302           0 :         if (!plug->multiple_queues && last && last->q != rq->q)
    1303           0 :                 plug->multiple_queues = true;
    1304           0 :         if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
    1305           0 :                 plug->has_elevator = true;
    1306           0 :         rq->rq_next = NULL;
    1307           0 :         rq_list_add(&plug->mq_list, rq);
    1308           0 :         plug->rq_count++;
    1309           0 : }
    1310             : 
    1311             : /**
    1312             :  * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
    1313             :  * @rq:         request to insert
    1314             :  * @at_head:    insert request at head or tail of queue
    1315             :  *
    1316             :  * Description:
    1317             :  *    Insert a fully prepared request at the back of the I/O scheduler queue
    1318             :  *    for execution.  Don't wait for completion.
    1319             :  *
    1320             :  * Note:
    1321             :  *    This function will invoke @done directly if the queue is dead.
    1322             :  */
    1323           0 : void blk_execute_rq_nowait(struct request *rq, bool at_head)
    1324             : {
    1325           0 :         WARN_ON(irqs_disabled());
    1326           0 :         WARN_ON(!blk_rq_is_passthrough(rq));
    1327             : 
    1328           0 :         blk_account_io_start(rq);
    1329             : 
    1330             :         /*
    1331             :          * As plugging can be enabled for passthrough requests on a zoned
    1332             :          * device, directly accessing the plug instead of using blk_mq_plug()
    1333             :          * should not have any consequences.
    1334             :          */
    1335           0 :         if (current->plug)
    1336           0 :                 blk_add_rq_to_plug(current->plug, rq);
    1337             :         else
    1338           0 :                 blk_mq_sched_insert_request(rq, at_head, true, false);
    1339           0 : }
    1340             : EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
    1341             : 
    1342             : struct blk_rq_wait {
    1343             :         struct completion done;
    1344             :         blk_status_t ret;
    1345             : };
    1346             : 
    1347           0 : static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
    1348             : {
    1349           0 :         struct blk_rq_wait *wait = rq->end_io_data;
    1350             : 
    1351           0 :         wait->ret = ret;
    1352           0 :         complete(&wait->done);
    1353           0 :         return RQ_END_IO_NONE;
    1354             : }
    1355             : 
    1356           0 : bool blk_rq_is_poll(struct request *rq)
    1357             : {
    1358           0 :         if (!rq->mq_hctx)
    1359             :                 return false;
    1360           0 :         if (rq->mq_hctx->type != HCTX_TYPE_POLL)
    1361             :                 return false;
    1362           0 :         if (WARN_ON_ONCE(!rq->bio))
    1363             :                 return false;
    1364           0 :         return true;
    1365             : }
    1366             : EXPORT_SYMBOL_GPL(blk_rq_is_poll);
    1367             : 
    1368           0 : static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
    1369             : {
    1370             :         do {
    1371           0 :                 bio_poll(rq->bio, NULL, 0);
    1372           0 :                 cond_resched();
    1373           0 :         } while (!completion_done(wait));
    1374           0 : }
    1375             : 
    1376             : /**
    1377             :  * blk_execute_rq - insert a request into queue for execution
    1378             :  * @rq:         request to insert
    1379             :  * @at_head:    insert request at head or tail of queue
    1380             :  *
    1381             :  * Description:
    1382             :  *    Insert a fully prepared request at the back of the I/O scheduler queue
    1383             :  *    for execution and wait for completion.
    1384             :  * Return: The blk_status_t result provided to blk_mq_end_request().
    1385             :  */
    1386           0 : blk_status_t blk_execute_rq(struct request *rq, bool at_head)
    1387             : {
    1388           0 :         struct blk_rq_wait wait = {
    1389           0 :                 .done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
    1390             :         };
    1391             : 
    1392           0 :         WARN_ON(irqs_disabled());
    1393           0 :         WARN_ON(!blk_rq_is_passthrough(rq));
    1394             : 
    1395           0 :         rq->end_io_data = &wait;
    1396           0 :         rq->end_io = blk_end_sync_rq;
    1397             : 
    1398           0 :         blk_account_io_start(rq);
    1399           0 :         blk_mq_sched_insert_request(rq, at_head, true, false);
    1400             : 
    1401           0 :         if (blk_rq_is_poll(rq)) {
    1402           0 :                 blk_rq_poll_completion(rq, &wait.done);
    1403             :         } else {
    1404             :                 /*
    1405             :                  * Prevent hang_check timer from firing at us during very long
    1406             :                  * I/O
    1407             :                  */
    1408           0 :                 unsigned long hang_check = sysctl_hung_task_timeout_secs;
    1409             : 
    1410             :                 if (hang_check)
    1411             :                         while (!wait_for_completion_io_timeout(&wait.done,
    1412             :                                         hang_check * (HZ/2)))
    1413             :                                 ;
    1414             :                 else
    1415           0 :                         wait_for_completion_io(&wait.done);
    1416             :         }
    1417             : 
    1418           0 :         return wait.ret;
    1419             : }
    1420             : EXPORT_SYMBOL(blk_execute_rq);
    1421             : 
    1422           0 : static void __blk_mq_requeue_request(struct request *rq)
    1423             : {
    1424           0 :         struct request_queue *q = rq->q;
    1425             : 
    1426           0 :         blk_mq_put_driver_tag(rq);
    1427             : 
    1428           0 :         trace_block_rq_requeue(rq);
    1429           0 :         rq_qos_requeue(q, rq);
    1430             : 
    1431           0 :         if (blk_mq_request_started(rq)) {
    1432           0 :                 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
    1433           0 :                 rq->rq_flags &= ~RQF_TIMED_OUT;
    1434             :         }
    1435           0 : }
    1436             : 
    1437           0 : void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
    1438             : {
    1439           0 :         __blk_mq_requeue_request(rq);
    1440             : 
    1441             :         /* this request will be re-inserted to io scheduler queue */
    1442           0 :         blk_mq_sched_requeue_request(rq);
    1443             : 
    1444           0 :         blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
    1445           0 : }
    1446             : EXPORT_SYMBOL(blk_mq_requeue_request);
    1447             : 
    1448           0 : static void blk_mq_requeue_work(struct work_struct *work)
    1449             : {
    1450           0 :         struct request_queue *q =
    1451           0 :                 container_of(work, struct request_queue, requeue_work.work);
    1452           0 :         LIST_HEAD(rq_list);
    1453             :         struct request *rq, *next;
    1454             : 
    1455           0 :         spin_lock_irq(&q->requeue_lock);
    1456           0 :         list_splice_init(&q->requeue_list, &rq_list);
    1457           0 :         spin_unlock_irq(&q->requeue_lock);
    1458             : 
    1459           0 :         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
    1460           0 :                 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
    1461           0 :                         continue;
    1462             : 
    1463           0 :                 rq->rq_flags &= ~RQF_SOFTBARRIER;
    1464           0 :                 list_del_init(&rq->queuelist);
    1465             :                 /*
    1466             :                  * If RQF_DONTPREP, rq has contained some driver specific
    1467             :                  * data, so insert it to hctx dispatch list to avoid any
    1468             :                  * merge.
    1469             :                  */
    1470           0 :                 if (rq->rq_flags & RQF_DONTPREP)
    1471           0 :                         blk_mq_request_bypass_insert(rq, false, false);
    1472             :                 else
    1473           0 :                         blk_mq_sched_insert_request(rq, true, false, false);
    1474             :         }
    1475             : 
    1476           0 :         while (!list_empty(&rq_list)) {
    1477           0 :                 rq = list_entry(rq_list.next, struct request, queuelist);
    1478           0 :                 list_del_init(&rq->queuelist);
    1479           0 :                 blk_mq_sched_insert_request(rq, false, false, false);
    1480             :         }
    1481             : 
    1482           0 :         blk_mq_run_hw_queues(q, false);
    1483           0 : }
    1484             : 
    1485           0 : void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
    1486             :                                 bool kick_requeue_list)
    1487             : {
    1488           0 :         struct request_queue *q = rq->q;
    1489             :         unsigned long flags;
    1490             : 
    1491             :         /*
    1492             :          * We abuse this flag that is otherwise used by the I/O scheduler to
    1493             :          * request head insertion from the workqueue.
    1494             :          */
    1495           0 :         BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
    1496             : 
    1497           0 :         spin_lock_irqsave(&q->requeue_lock, flags);
    1498           0 :         if (at_head) {
    1499           0 :                 rq->rq_flags |= RQF_SOFTBARRIER;
    1500           0 :                 list_add(&rq->queuelist, &q->requeue_list);
    1501             :         } else {
    1502           0 :                 list_add_tail(&rq->queuelist, &q->requeue_list);
    1503             :         }
    1504           0 :         spin_unlock_irqrestore(&q->requeue_lock, flags);
    1505             : 
    1506           0 :         if (kick_requeue_list)
    1507             :                 blk_mq_kick_requeue_list(q);
    1508           0 : }
    1509             : 
    1510           0 : void blk_mq_kick_requeue_list(struct request_queue *q)
    1511             : {
    1512           0 :         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
    1513           0 : }
    1514             : EXPORT_SYMBOL(blk_mq_kick_requeue_list);
    1515             : 
    1516           0 : void blk_mq_delay_kick_requeue_list(struct request_queue *q,
    1517             :                                     unsigned long msecs)
    1518             : {
    1519           0 :         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
    1520             :                                     msecs_to_jiffies(msecs));
    1521           0 : }
    1522             : EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
    1523             : 
    1524           0 : static bool blk_mq_rq_inflight(struct request *rq, void *priv)
    1525             : {
    1526             :         /*
    1527             :          * If we find a request that isn't idle we know the queue is busy
    1528             :          * as it's checked in the iter.
    1529             :          * Return false to stop the iteration.
    1530             :          */
    1531           0 :         if (blk_mq_request_started(rq)) {
    1532           0 :                 bool *busy = priv;
    1533             : 
    1534           0 :                 *busy = true;
    1535           0 :                 return false;
    1536             :         }
    1537             : 
    1538             :         return true;
    1539             : }
    1540             : 
    1541           0 : bool blk_mq_queue_inflight(struct request_queue *q)
    1542             : {
    1543           0 :         bool busy = false;
    1544             : 
    1545           0 :         blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
    1546           0 :         return busy;
    1547             : }
    1548             : EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
    1549             : 
    1550           0 : static void blk_mq_rq_timed_out(struct request *req)
    1551             : {
    1552           0 :         req->rq_flags |= RQF_TIMED_OUT;
    1553           0 :         if (req->q->mq_ops->timeout) {
    1554             :                 enum blk_eh_timer_return ret;
    1555             : 
    1556           0 :                 ret = req->q->mq_ops->timeout(req);
    1557           0 :                 if (ret == BLK_EH_DONE)
    1558             :                         return;
    1559           0 :                 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
    1560             :         }
    1561             : 
    1562           0 :         blk_add_timer(req);
    1563             : }
    1564             : 
    1565             : struct blk_expired_data {
    1566             :         bool has_timedout_rq;
    1567             :         unsigned long next;
    1568             :         unsigned long timeout_start;
    1569             : };
    1570             : 
    1571             : static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired)
    1572             : {
    1573             :         unsigned long deadline;
    1574             : 
    1575           0 :         if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
    1576             :                 return false;
    1577           0 :         if (rq->rq_flags & RQF_TIMED_OUT)
    1578             :                 return false;
    1579             : 
    1580           0 :         deadline = READ_ONCE(rq->deadline);
    1581           0 :         if (time_after_eq(expired->timeout_start, deadline))
    1582             :                 return true;
    1583             : 
    1584           0 :         if (expired->next == 0)
    1585           0 :                 expired->next = deadline;
    1586           0 :         else if (time_after(expired->next, deadline))
    1587           0 :                 expired->next = deadline;
    1588             :         return false;
    1589             : }
    1590             : 
    1591           0 : void blk_mq_put_rq_ref(struct request *rq)
    1592             : {
    1593           0 :         if (is_flush_rq(rq)) {
    1594           0 :                 if (rq->end_io(rq, 0) == RQ_END_IO_FREE)
    1595           0 :                         blk_mq_free_request(rq);
    1596           0 :         } else if (req_ref_put_and_test(rq)) {
    1597           0 :                 __blk_mq_free_request(rq);
    1598             :         }
    1599           0 : }
    1600             : 
    1601           0 : static bool blk_mq_check_expired(struct request *rq, void *priv)
    1602             : {
    1603           0 :         struct blk_expired_data *expired = priv;
    1604             : 
    1605             :         /*
    1606             :          * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
    1607             :          * be reallocated underneath the timeout handler's processing, then
    1608             :          * the expire check is reliable. If the request is not expired, then
    1609             :          * it was completed and reallocated as a new request after returning
    1610             :          * from blk_mq_check_expired().
    1611             :          */
    1612           0 :         if (blk_mq_req_expired(rq, expired)) {
    1613           0 :                 expired->has_timedout_rq = true;
    1614           0 :                 return false;
    1615             :         }
    1616             :         return true;
    1617             : }
    1618             : 
    1619           0 : static bool blk_mq_handle_expired(struct request *rq, void *priv)
    1620             : {
    1621           0 :         struct blk_expired_data *expired = priv;
    1622             : 
    1623           0 :         if (blk_mq_req_expired(rq, expired))
    1624           0 :                 blk_mq_rq_timed_out(rq);
    1625           0 :         return true;
    1626             : }
    1627             : 
    1628           0 : static void blk_mq_timeout_work(struct work_struct *work)
    1629             : {
    1630           0 :         struct request_queue *q =
    1631           0 :                 container_of(work, struct request_queue, timeout_work);
    1632           0 :         struct blk_expired_data expired = {
    1633             :                 .timeout_start = jiffies,
    1634             :         };
    1635             :         struct blk_mq_hw_ctx *hctx;
    1636             :         unsigned long i;
    1637             : 
    1638             :         /* A deadlock might occur if a request is stuck requiring a
    1639             :          * timeout at the same time a queue freeze is waiting
    1640             :          * completion, since the timeout code would not be able to
    1641             :          * acquire the queue reference here.
    1642             :          *
    1643             :          * That's why we don't use blk_queue_enter here; instead, we use
    1644             :          * percpu_ref_tryget directly, because we need to be able to
    1645             :          * obtain a reference even in the short window between the queue
    1646             :          * starting to freeze, by dropping the first reference in
    1647             :          * blk_freeze_queue_start, and the moment the last request is
    1648             :          * consumed, marked by the instant q_usage_counter reaches
    1649             :          * zero.
    1650             :          */
    1651           0 :         if (!percpu_ref_tryget(&q->q_usage_counter))
    1652           0 :                 return;
    1653             : 
    1654             :         /* check if there is any timed-out request */
    1655           0 :         blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &expired);
    1656           0 :         if (expired.has_timedout_rq) {
    1657             :                 /*
    1658             :                  * Before walking tags, we must ensure any submit started
    1659             :                  * before the current time has finished. Since the submit
    1660             :                  * uses srcu or rcu, wait for a synchronization point to
    1661             :                  * ensure all running submits have finished
    1662             :                  */
    1663           0 :                 blk_mq_wait_quiesce_done(q->tag_set);
    1664             : 
    1665           0 :                 expired.next = 0;
    1666           0 :                 blk_mq_queue_tag_busy_iter(q, blk_mq_handle_expired, &expired);
    1667             :         }
    1668             : 
    1669           0 :         if (expired.next != 0) {
    1670           0 :                 mod_timer(&q->timeout, expired.next);
    1671             :         } else {
    1672             :                 /*
    1673             :                  * Request timeouts are handled as a forward rolling timer. If
    1674             :                  * we end up here it means that no requests are pending and
    1675             :                  * also that no request has been pending for a while. Mark
    1676             :                  * each hctx as idle.
    1677             :                  */
    1678           0 :                 queue_for_each_hw_ctx(q, hctx, i) {
    1679             :                         /* the hctx may be unmapped, so check it here */
    1680           0 :                         if (blk_mq_hw_queue_mapped(hctx))
    1681             :                                 blk_mq_tag_idle(hctx);
    1682             :                 }
    1683             :         }
    1684           0 :         blk_queue_exit(q);
    1685             : }
    1686             : 
    1687             : struct flush_busy_ctx_data {
    1688             :         struct blk_mq_hw_ctx *hctx;
    1689             :         struct list_head *list;
    1690             : };
    1691             : 
    1692           0 : static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
    1693             : {
    1694           0 :         struct flush_busy_ctx_data *flush_data = data;
    1695           0 :         struct blk_mq_hw_ctx *hctx = flush_data->hctx;
    1696           0 :         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
    1697           0 :         enum hctx_type type = hctx->type;
    1698             : 
    1699           0 :         spin_lock(&ctx->lock);
    1700           0 :         list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
    1701           0 :         sbitmap_clear_bit(sb, bitnr);
    1702           0 :         spin_unlock(&ctx->lock);
    1703           0 :         return true;
    1704             : }
    1705             : 
    1706             : /*
    1707             :  * Process software queues that have been marked busy, splicing them
    1708             :  * to the for-dispatch
    1709             :  */
    1710           0 : void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
    1711             : {
    1712           0 :         struct flush_busy_ctx_data data = {
    1713             :                 .hctx = hctx,
    1714             :                 .list = list,
    1715             :         };
    1716             : 
    1717           0 :         sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
    1718           0 : }
    1719             : EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
    1720             : 
    1721             : struct dispatch_rq_data {
    1722             :         struct blk_mq_hw_ctx *hctx;
    1723             :         struct request *rq;
    1724             : };
    1725             : 
    1726           0 : static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
    1727             :                 void *data)
    1728             : {
    1729           0 :         struct dispatch_rq_data *dispatch_data = data;
    1730           0 :         struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
    1731           0 :         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
    1732           0 :         enum hctx_type type = hctx->type;
    1733             : 
    1734           0 :         spin_lock(&ctx->lock);
    1735           0 :         if (!list_empty(&ctx->rq_lists[type])) {
    1736           0 :                 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
    1737           0 :                 list_del_init(&dispatch_data->rq->queuelist);
    1738           0 :                 if (list_empty(&ctx->rq_lists[type]))
    1739             :                         sbitmap_clear_bit(sb, bitnr);
    1740             :         }
    1741           0 :         spin_unlock(&ctx->lock);
    1742             : 
    1743           0 :         return !dispatch_data->rq;
    1744             : }
    1745             : 
    1746           0 : struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
    1747             :                                         struct blk_mq_ctx *start)
    1748             : {
    1749           0 :         unsigned off = start ? start->index_hw[hctx->type] : 0;
    1750           0 :         struct dispatch_rq_data data = {
    1751             :                 .hctx = hctx,
    1752             :                 .rq   = NULL,
    1753             :         };
    1754             : 
    1755           0 :         __sbitmap_for_each_set(&hctx->ctx_map, off,
    1756             :                                dispatch_rq_from_ctx, &data);
    1757             : 
    1758           0 :         return data.rq;
    1759             : }
    1760             : 
    1761           0 : static bool __blk_mq_alloc_driver_tag(struct request *rq)
    1762             : {
    1763           0 :         struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
    1764           0 :         unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
    1765             :         int tag;
    1766             : 
    1767           0 :         blk_mq_tag_busy(rq->mq_hctx);
    1768             : 
    1769           0 :         if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
    1770           0 :                 bt = &rq->mq_hctx->tags->breserved_tags;
    1771           0 :                 tag_offset = 0;
    1772             :         } else {
    1773           0 :                 if (!hctx_may_queue(rq->mq_hctx, bt))
    1774             :                         return false;
    1775             :         }
    1776             : 
    1777           0 :         tag = __sbitmap_queue_get(bt);
    1778           0 :         if (tag == BLK_MQ_NO_TAG)
    1779             :                 return false;
    1780             : 
    1781           0 :         rq->tag = tag + tag_offset;
    1782           0 :         return true;
    1783             : }
    1784             : 
    1785           0 : bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq)
    1786             : {
    1787           0 :         if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
    1788             :                 return false;
    1789             : 
    1790           0 :         if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
    1791           0 :                         !(rq->rq_flags & RQF_MQ_INFLIGHT)) {
    1792           0 :                 rq->rq_flags |= RQF_MQ_INFLIGHT;
    1793             :                 __blk_mq_inc_active_requests(hctx);
    1794             :         }
    1795           0 :         hctx->tags->rqs[rq->tag] = rq;
    1796           0 :         return true;
    1797             : }
    1798             : 
    1799           0 : static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
    1800             :                                 int flags, void *key)
    1801             : {
    1802             :         struct blk_mq_hw_ctx *hctx;
    1803             : 
    1804           0 :         hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
    1805             : 
    1806           0 :         spin_lock(&hctx->dispatch_wait_lock);
    1807           0 :         if (!list_empty(&wait->entry)) {
    1808             :                 struct sbitmap_queue *sbq;
    1809             : 
    1810           0 :                 list_del_init(&wait->entry);
    1811           0 :                 sbq = &hctx->tags->bitmap_tags;
    1812           0 :                 atomic_dec(&sbq->ws_active);
    1813             :         }
    1814           0 :         spin_unlock(&hctx->dispatch_wait_lock);
    1815             : 
    1816           0 :         blk_mq_run_hw_queue(hctx, true);
    1817           0 :         return 1;
    1818             : }
    1819             : 
    1820             : /*
    1821             :  * Mark us waiting for a tag. For shared tags, this involves hooking us into
    1822             :  * the tag wakeups. For non-shared tags, we can simply mark us needing a
    1823             :  * restart. For both cases, take care to check the condition again after
    1824             :  * marking us as waiting.
    1825             :  */
    1826           0 : static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
    1827             :                                  struct request *rq)
    1828             : {
    1829             :         struct sbitmap_queue *sbq;
    1830             :         struct wait_queue_head *wq;
    1831             :         wait_queue_entry_t *wait;
    1832             :         bool ret;
    1833             : 
    1834           0 :         if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
    1835           0 :             !(blk_mq_is_shared_tags(hctx->flags))) {
    1836           0 :                 blk_mq_sched_mark_restart_hctx(hctx);
    1837             : 
    1838             :                 /*
    1839             :                  * It's possible that a tag was freed in the window between the
    1840             :                  * allocation failure and adding the hardware queue to the wait
    1841             :                  * queue.
    1842             :                  *
    1843             :                  * Don't clear RESTART here, someone else could have set it.
    1844             :                  * At most this will cost an extra queue run.
    1845             :                  */
    1846           0 :                 return blk_mq_get_driver_tag(rq);
    1847             :         }
    1848             : 
    1849           0 :         wait = &hctx->dispatch_wait;
    1850           0 :         if (!list_empty_careful(&wait->entry))
    1851             :                 return false;
    1852             : 
    1853           0 :         if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag))
    1854           0 :                 sbq = &hctx->tags->breserved_tags;
    1855             :         else
    1856           0 :                 sbq = &hctx->tags->bitmap_tags;
    1857           0 :         wq = &bt_wait_ptr(sbq, hctx)->wait;
    1858             : 
    1859           0 :         spin_lock_irq(&wq->lock);
    1860           0 :         spin_lock(&hctx->dispatch_wait_lock);
    1861           0 :         if (!list_empty(&wait->entry)) {
    1862           0 :                 spin_unlock(&hctx->dispatch_wait_lock);
    1863           0 :                 spin_unlock_irq(&wq->lock);
    1864           0 :                 return false;
    1865             :         }
    1866             : 
    1867           0 :         atomic_inc(&sbq->ws_active);
    1868           0 :         wait->flags &= ~WQ_FLAG_EXCLUSIVE;
    1869           0 :         __add_wait_queue(wq, wait);
    1870             : 
    1871             :         /*
    1872             :          * It's possible that a tag was freed in the window between the
    1873             :          * allocation failure and adding the hardware queue to the wait
    1874             :          * queue.
    1875             :          */
    1876           0 :         ret = blk_mq_get_driver_tag(rq);
    1877           0 :         if (!ret) {
    1878           0 :                 spin_unlock(&hctx->dispatch_wait_lock);
    1879           0 :                 spin_unlock_irq(&wq->lock);
    1880           0 :                 return false;
    1881             :         }
    1882             : 
    1883             :         /*
    1884             :          * We got a tag, remove ourselves from the wait queue to ensure
    1885             :          * someone else gets the wakeup.
    1886             :          */
    1887           0 :         list_del_init(&wait->entry);
    1888           0 :         atomic_dec(&sbq->ws_active);
    1889           0 :         spin_unlock(&hctx->dispatch_wait_lock);
    1890           0 :         spin_unlock_irq(&wq->lock);
    1891             : 
    1892           0 :         return true;
    1893             : }
    1894             : 
    1895             : #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT  8
    1896             : #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR  4
    1897             : /*
    1898             :  * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
    1899             :  * - EWMA is one simple way to compute running average value
    1900             :  * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
    1901             :  * - take 4 as factor for avoiding to get too small(0) result, and this
    1902             :  *   factor doesn't matter because EWMA decreases exponentially
    1903             :  */
    1904             : static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
    1905             : {
    1906             :         unsigned int ewma;
    1907             : 
    1908           0 :         ewma = hctx->dispatch_busy;
    1909             : 
    1910           0 :         if (!ewma && !busy)
    1911             :                 return;
    1912             : 
    1913           0 :         ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
    1914             :         if (busy)
    1915           0 :                 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
    1916           0 :         ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
    1917             : 
    1918           0 :         hctx->dispatch_busy = ewma;
    1919             : }
    1920             : 
    1921             : #define BLK_MQ_RESOURCE_DELAY   3               /* ms units */
    1922             : 
    1923             : static void blk_mq_handle_dev_resource(struct request *rq,
    1924             :                                        struct list_head *list)
    1925             : {
    1926           0 :         list_add(&rq->queuelist, list);
    1927           0 :         __blk_mq_requeue_request(rq);
    1928             : }
    1929             : 
    1930             : static void blk_mq_handle_zone_resource(struct request *rq,
    1931             :                                         struct list_head *zone_list)
    1932             : {
    1933             :         /*
    1934             :          * If we end up here it is because we cannot dispatch a request to a
    1935             :          * specific zone due to LLD level zone-write locking or other zone
    1936             :          * related resource not being available. In this case, set the request
    1937             :          * aside in zone_list for retrying it later.
    1938             :          */
    1939           0 :         list_add(&rq->queuelist, zone_list);
    1940           0 :         __blk_mq_requeue_request(rq);
    1941             : }
    1942             : 
    1943             : enum prep_dispatch {
    1944             :         PREP_DISPATCH_OK,
    1945             :         PREP_DISPATCH_NO_TAG,
    1946             :         PREP_DISPATCH_NO_BUDGET,
    1947             : };
    1948             : 
    1949           0 : static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
    1950             :                                                   bool need_budget)
    1951             : {
    1952           0 :         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
    1953           0 :         int budget_token = -1;
    1954             : 
    1955           0 :         if (need_budget) {
    1956           0 :                 budget_token = blk_mq_get_dispatch_budget(rq->q);
    1957           0 :                 if (budget_token < 0) {
    1958             :                         blk_mq_put_driver_tag(rq);
    1959             :                         return PREP_DISPATCH_NO_BUDGET;
    1960             :                 }
    1961             :                 blk_mq_set_rq_budget_token(rq, budget_token);
    1962             :         }
    1963             : 
    1964           0 :         if (!blk_mq_get_driver_tag(rq)) {
    1965             :                 /*
    1966             :                  * The initial allocation attempt failed, so we need to
    1967             :                  * rerun the hardware queue when a tag is freed. The
    1968             :                  * waitqueue takes care of that. If the queue is run
    1969             :                  * before we add this entry back on the dispatch list,
    1970             :                  * we'll re-run it below.
    1971             :                  */
    1972           0 :                 if (!blk_mq_mark_tag_wait(hctx, rq)) {
    1973             :                         /*
    1974             :                          * All budgets not got from this function will be put
    1975             :                          * together during handling partial dispatch
    1976             :                          */
    1977           0 :                         if (need_budget)
    1978           0 :                                 blk_mq_put_dispatch_budget(rq->q, budget_token);
    1979             :                         return PREP_DISPATCH_NO_TAG;
    1980             :                 }
    1981             :         }
    1982             : 
    1983             :         return PREP_DISPATCH_OK;
    1984             : }
    1985             : 
    1986             : /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
    1987           0 : static void blk_mq_release_budgets(struct request_queue *q,
    1988             :                 struct list_head *list)
    1989             : {
    1990             :         struct request *rq;
    1991             : 
    1992           0 :         list_for_each_entry(rq, list, queuelist) {
    1993           0 :                 int budget_token = blk_mq_get_rq_budget_token(rq);
    1994             : 
    1995           0 :                 if (budget_token >= 0)
    1996             :                         blk_mq_put_dispatch_budget(q, budget_token);
    1997             :         }
    1998           0 : }
    1999             : 
    2000             : /*
    2001             :  * blk_mq_commit_rqs will notify driver using bd->last that there is no
    2002             :  * more requests. (See comment in struct blk_mq_ops for commit_rqs for
    2003             :  * details)
    2004             :  * Attention, we should explicitly call this in unusual cases:
    2005             :  *  1) did not queue everything initially scheduled to queue
    2006             :  *  2) the last attempt to queue a request failed
    2007             :  */
    2008             : static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued,
    2009             :                               bool from_schedule)
    2010             : {
    2011           0 :         if (hctx->queue->mq_ops->commit_rqs && queued) {
    2012           0 :                 trace_block_unplug(hctx->queue, queued, !from_schedule);
    2013           0 :                 hctx->queue->mq_ops->commit_rqs(hctx);
    2014             :         }
    2015             : }
    2016             : 
    2017             : /*
    2018             :  * Returns true if we did some work AND can potentially do more.
    2019             :  */
    2020           0 : bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
    2021             :                              unsigned int nr_budgets)
    2022             : {
    2023             :         enum prep_dispatch prep;
    2024           0 :         struct request_queue *q = hctx->queue;
    2025             :         struct request *rq;
    2026             :         int queued;
    2027           0 :         blk_status_t ret = BLK_STS_OK;
    2028           0 :         LIST_HEAD(zone_list);
    2029           0 :         bool needs_resource = false;
    2030             : 
    2031           0 :         if (list_empty(list))
    2032             :                 return false;
    2033             : 
    2034             :         /*
    2035             :          * Now process all the entries, sending them to the driver.
    2036             :          */
    2037             :         queued = 0;
    2038             :         do {
    2039             :                 struct blk_mq_queue_data bd;
    2040             : 
    2041           0 :                 rq = list_first_entry(list, struct request, queuelist);
    2042             : 
    2043           0 :                 WARN_ON_ONCE(hctx != rq->mq_hctx);
    2044           0 :                 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
    2045           0 :                 if (prep != PREP_DISPATCH_OK)
    2046             :                         break;
    2047             : 
    2048           0 :                 list_del_init(&rq->queuelist);
    2049             : 
    2050           0 :                 bd.rq = rq;
    2051           0 :                 bd.last = list_empty(list);
    2052             : 
    2053             :                 /*
    2054             :                  * once the request is queued to lld, no need to cover the
    2055             :                  * budget any more
    2056             :                  */
    2057           0 :                 if (nr_budgets)
    2058           0 :                         nr_budgets--;
    2059           0 :                 ret = q->mq_ops->queue_rq(hctx, &bd);
    2060           0 :                 switch (ret) {
    2061             :                 case BLK_STS_OK:
    2062           0 :                         queued++;
    2063           0 :                         break;
    2064             :                 case BLK_STS_RESOURCE:
    2065           0 :                         needs_resource = true;
    2066             :                         fallthrough;
    2067             :                 case BLK_STS_DEV_RESOURCE:
    2068           0 :                         blk_mq_handle_dev_resource(rq, list);
    2069           0 :                         goto out;
    2070             :                 case BLK_STS_ZONE_RESOURCE:
    2071             :                         /*
    2072             :                          * Move the request to zone_list and keep going through
    2073             :                          * the dispatch list to find more requests the drive can
    2074             :                          * accept.
    2075             :                          */
    2076           0 :                         blk_mq_handle_zone_resource(rq, &zone_list);
    2077           0 :                         needs_resource = true;
    2078           0 :                         break;
    2079             :                 default:
    2080           0 :                         blk_mq_end_request(rq, ret);
    2081             :                 }
    2082           0 :         } while (!list_empty(list));
    2083             : out:
    2084           0 :         if (!list_empty(&zone_list))
    2085             :                 list_splice_tail_init(&zone_list, list);
    2086             : 
    2087             :         /* If we didn't flush the entire list, we could have told the driver
    2088             :          * there was more coming, but that turned out to be a lie.
    2089             :          */
    2090           0 :         if (!list_empty(list) || ret != BLK_STS_OK)
    2091             :                 blk_mq_commit_rqs(hctx, queued, false);
    2092             : 
    2093             :         /*
    2094             :          * Any items that need requeuing? Stuff them into hctx->dispatch,
    2095             :          * that is where we will continue on next queue run.
    2096             :          */
    2097           0 :         if (!list_empty(list)) {
    2098             :                 bool needs_restart;
    2099             :                 /* For non-shared tags, the RESTART check will suffice */
    2100           0 :                 bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
    2101           0 :                         ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) ||
    2102           0 :                         blk_mq_is_shared_tags(hctx->flags));
    2103             : 
    2104           0 :                 if (nr_budgets)
    2105           0 :                         blk_mq_release_budgets(q, list);
    2106             : 
    2107           0 :                 spin_lock(&hctx->lock);
    2108           0 :                 list_splice_tail_init(list, &hctx->dispatch);
    2109           0 :                 spin_unlock(&hctx->lock);
    2110             : 
    2111             :                 /*
    2112             :                  * Order adding requests to hctx->dispatch and checking
    2113             :                  * SCHED_RESTART flag. The pair of this smp_mb() is the one
    2114             :                  * in blk_mq_sched_restart(). Avoid restart code path to
    2115             :                  * miss the new added requests to hctx->dispatch, meantime
    2116             :                  * SCHED_RESTART is observed here.
    2117             :                  */
    2118           0 :                 smp_mb();
    2119             : 
    2120             :                 /*
    2121             :                  * If SCHED_RESTART was set by the caller of this function and
    2122             :                  * it is no longer set that means that it was cleared by another
    2123             :                  * thread and hence that a queue rerun is needed.
    2124             :                  *
    2125             :                  * If 'no_tag' is set, that means that we failed getting
    2126             :                  * a driver tag with an I/O scheduler attached. If our dispatch
    2127             :                  * waitqueue is no longer active, ensure that we run the queue
    2128             :                  * AFTER adding our entries back to the list.
    2129             :                  *
    2130             :                  * If no I/O scheduler has been configured it is possible that
    2131             :                  * the hardware queue got stopped and restarted before requests
    2132             :                  * were pushed back onto the dispatch list. Rerun the queue to
    2133             :                  * avoid starvation. Notes:
    2134             :                  * - blk_mq_run_hw_queue() checks whether or not a queue has
    2135             :                  *   been stopped before rerunning a queue.
    2136             :                  * - Some but not all block drivers stop a queue before
    2137             :                  *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
    2138             :                  *   and dm-rq.
    2139             :                  *
    2140             :                  * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
    2141             :                  * bit is set, run queue after a delay to avoid IO stalls
    2142             :                  * that could otherwise occur if the queue is idle.  We'll do
    2143             :                  * similar if we couldn't get budget or couldn't lock a zone
    2144             :                  * and SCHED_RESTART is set.
    2145             :                  */
    2146           0 :                 needs_restart = blk_mq_sched_needs_restart(hctx);
    2147           0 :                 if (prep == PREP_DISPATCH_NO_BUDGET)
    2148           0 :                         needs_resource = true;
    2149           0 :                 if (!needs_restart ||
    2150           0 :                     (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
    2151           0 :                         blk_mq_run_hw_queue(hctx, true);
    2152           0 :                 else if (needs_resource)
    2153             :                         blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
    2154             : 
    2155           0 :                 blk_mq_update_dispatch_busy(hctx, true);
    2156           0 :                 return false;
    2157             :         }
    2158             : 
    2159             :         blk_mq_update_dispatch_busy(hctx, false);
    2160             :         return true;
    2161             : }
    2162             : 
    2163             : /**
    2164             :  * __blk_mq_run_hw_queue - Run a hardware queue.
    2165             :  * @hctx: Pointer to the hardware queue to run.
    2166             :  *
    2167             :  * Send pending requests to the hardware.
    2168             :  */
    2169           0 : static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
    2170             : {
    2171             :         /*
    2172             :          * We can't run the queue inline with ints disabled. Ensure that
    2173             :          * we catch bad users of this early.
    2174             :          */
    2175           0 :         WARN_ON_ONCE(in_interrupt());
    2176             : 
    2177           0 :         blk_mq_run_dispatch_ops(hctx->queue,
    2178             :                         blk_mq_sched_dispatch_requests(hctx));
    2179           0 : }
    2180             : 
    2181           0 : static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
    2182             : {
    2183           0 :         int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
    2184             : 
    2185           0 :         if (cpu >= nr_cpu_ids)
    2186           0 :                 cpu = cpumask_first(hctx->cpumask);
    2187           0 :         return cpu;
    2188             : }
    2189             : 
    2190             : /*
    2191             :  * It'd be great if the workqueue API had a way to pass
    2192             :  * in a mask and had some smarts for more clever placement.
    2193             :  * For now we just round-robin here, switching for every
    2194             :  * BLK_MQ_CPU_WORK_BATCH queued items.
    2195             :  */
    2196           0 : static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
    2197             : {
    2198           0 :         bool tried = false;
    2199           0 :         int next_cpu = hctx->next_cpu;
    2200             : 
    2201           0 :         if (hctx->queue->nr_hw_queues == 1)
    2202             :                 return WORK_CPU_UNBOUND;
    2203             : 
    2204           0 :         if (--hctx->next_cpu_batch <= 0) {
    2205             : select_cpu:
    2206           0 :                 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
    2207             :                                 cpu_online_mask);
    2208           0 :                 if (next_cpu >= nr_cpu_ids)
    2209           0 :                         next_cpu = blk_mq_first_mapped_cpu(hctx);
    2210           0 :                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
    2211             :         }
    2212             : 
    2213             :         /*
    2214             :          * Do unbound schedule if we can't find a online CPU for this hctx,
    2215             :          * and it should only happen in the path of handling CPU DEAD.
    2216             :          */
    2217           0 :         if (!cpu_online(next_cpu)) {
    2218           0 :                 if (!tried) {
    2219             :                         tried = true;
    2220             :                         goto select_cpu;
    2221             :                 }
    2222             : 
    2223             :                 /*
    2224             :                  * Make sure to re-select CPU next time once after CPUs
    2225             :                  * in hctx->cpumask become online again.
    2226             :                  */
    2227           0 :                 hctx->next_cpu = next_cpu;
    2228           0 :                 hctx->next_cpu_batch = 1;
    2229           0 :                 return WORK_CPU_UNBOUND;
    2230             :         }
    2231             : 
    2232           0 :         hctx->next_cpu = next_cpu;
    2233           0 :         return next_cpu;
    2234             : }
    2235             : 
    2236             : /**
    2237             :  * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue.
    2238             :  * @hctx: Pointer to the hardware queue to run.
    2239             :  * @async: If we want to run the queue asynchronously.
    2240             :  * @msecs: Milliseconds of delay to wait before running the queue.
    2241             :  *
    2242             :  * If !@async, try to run the queue now. Else, run the queue asynchronously and
    2243             :  * with a delay of @msecs.
    2244             :  */
    2245           0 : static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
    2246             :                                         unsigned long msecs)
    2247             : {
    2248           0 :         if (unlikely(blk_mq_hctx_stopped(hctx)))
    2249             :                 return;
    2250             : 
    2251           0 :         if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
    2252           0 :                 if (cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
    2253           0 :                         __blk_mq_run_hw_queue(hctx);
    2254           0 :                         return;
    2255             :                 }
    2256             :         }
    2257             : 
    2258           0 :         kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
    2259             :                                     msecs_to_jiffies(msecs));
    2260             : }
    2261             : 
    2262             : /**
    2263             :  * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
    2264             :  * @hctx: Pointer to the hardware queue to run.
    2265             :  * @msecs: Milliseconds of delay to wait before running the queue.
    2266             :  *
    2267             :  * Run a hardware queue asynchronously with a delay of @msecs.
    2268             :  */
    2269           0 : void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
    2270             : {
    2271           0 :         __blk_mq_delay_run_hw_queue(hctx, true, msecs);
    2272           0 : }
    2273             : EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
    2274             : 
    2275             : /**
    2276             :  * blk_mq_run_hw_queue - Start to run a hardware queue.
    2277             :  * @hctx: Pointer to the hardware queue to run.
    2278             :  * @async: If we want to run the queue asynchronously.
    2279             :  *
    2280             :  * Check if the request queue is not in a quiesced state and if there are
    2281             :  * pending requests to be sent. If this is true, run the queue to send requests
    2282             :  * to hardware.
    2283             :  */
    2284           0 : void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
    2285             : {
    2286             :         bool need_run;
    2287             : 
    2288             :         /*
    2289             :          * When queue is quiesced, we may be switching io scheduler, or
    2290             :          * updating nr_hw_queues, or other things, and we can't run queue
    2291             :          * any more, even __blk_mq_hctx_has_pending() can't be called safely.
    2292             :          *
    2293             :          * And queue will be rerun in blk_mq_unquiesce_queue() if it is
    2294             :          * quiesced.
    2295             :          */
    2296           0 :         __blk_mq_run_dispatch_ops(hctx->queue, false,
    2297             :                 need_run = !blk_queue_quiesced(hctx->queue) &&
    2298             :                 blk_mq_hctx_has_pending(hctx));
    2299             : 
    2300           0 :         if (need_run)
    2301           0 :                 __blk_mq_delay_run_hw_queue(hctx, async, 0);
    2302           0 : }
    2303             : EXPORT_SYMBOL(blk_mq_run_hw_queue);
    2304             : 
    2305             : /*
    2306             :  * Return prefered queue to dispatch from (if any) for non-mq aware IO
    2307             :  * scheduler.
    2308             :  */
    2309             : static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
    2310             : {
    2311           0 :         struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
    2312             :         /*
    2313             :          * If the IO scheduler does not respect hardware queues when
    2314             :          * dispatching, we just don't bother with multiple HW queues and
    2315             :          * dispatch from hctx for the current CPU since running multiple queues
    2316             :          * just causes lock contention inside the scheduler and pointless cache
    2317             :          * bouncing.
    2318             :          */
    2319           0 :         struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT];
    2320             : 
    2321           0 :         if (!blk_mq_hctx_stopped(hctx))
    2322             :                 return hctx;
    2323             :         return NULL;
    2324             : }
    2325             : 
    2326             : /**
    2327             :  * blk_mq_run_hw_queues - Run all hardware queues in a request queue.
    2328             :  * @q: Pointer to the request queue to run.
    2329             :  * @async: If we want to run the queue asynchronously.
    2330             :  */
    2331           0 : void blk_mq_run_hw_queues(struct request_queue *q, bool async)
    2332             : {
    2333             :         struct blk_mq_hw_ctx *hctx, *sq_hctx;
    2334             :         unsigned long i;
    2335             : 
    2336           0 :         sq_hctx = NULL;
    2337           0 :         if (blk_queue_sq_sched(q))
    2338             :                 sq_hctx = blk_mq_get_sq_hctx(q);
    2339           0 :         queue_for_each_hw_ctx(q, hctx, i) {
    2340           0 :                 if (blk_mq_hctx_stopped(hctx))
    2341           0 :                         continue;
    2342             :                 /*
    2343             :                  * Dispatch from this hctx either if there's no hctx preferred
    2344             :                  * by IO scheduler or if it has requests that bypass the
    2345             :                  * scheduler.
    2346             :                  */
    2347           0 :                 if (!sq_hctx || sq_hctx == hctx ||
    2348           0 :                     !list_empty_careful(&hctx->dispatch))
    2349           0 :                         blk_mq_run_hw_queue(hctx, async);
    2350             :         }
    2351           0 : }
    2352             : EXPORT_SYMBOL(blk_mq_run_hw_queues);
    2353             : 
    2354             : /**
    2355             :  * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously.
    2356             :  * @q: Pointer to the request queue to run.
    2357             :  * @msecs: Milliseconds of delay to wait before running the queues.
    2358             :  */
    2359           0 : void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
    2360             : {
    2361             :         struct blk_mq_hw_ctx *hctx, *sq_hctx;
    2362             :         unsigned long i;
    2363             : 
    2364           0 :         sq_hctx = NULL;
    2365           0 :         if (blk_queue_sq_sched(q))
    2366             :                 sq_hctx = blk_mq_get_sq_hctx(q);
    2367           0 :         queue_for_each_hw_ctx(q, hctx, i) {
    2368           0 :                 if (blk_mq_hctx_stopped(hctx))
    2369           0 :                         continue;
    2370             :                 /*
    2371             :                  * If there is already a run_work pending, leave the
    2372             :                  * pending delay untouched. Otherwise, a hctx can stall
    2373             :                  * if another hctx is re-delaying the other's work
    2374             :                  * before the work executes.
    2375             :                  */
    2376           0 :                 if (delayed_work_pending(&hctx->run_work))
    2377           0 :                         continue;
    2378             :                 /*
    2379             :                  * Dispatch from this hctx either if there's no hctx preferred
    2380             :                  * by IO scheduler or if it has requests that bypass the
    2381             :                  * scheduler.
    2382             :                  */
    2383           0 :                 if (!sq_hctx || sq_hctx == hctx ||
    2384           0 :                     !list_empty_careful(&hctx->dispatch))
    2385             :                         blk_mq_delay_run_hw_queue(hctx, msecs);
    2386             :         }
    2387           0 : }
    2388             : EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
    2389             : 
    2390             : /*
    2391             :  * This function is often used for pausing .queue_rq() by driver when
    2392             :  * there isn't enough resource or some conditions aren't satisfied, and
    2393             :  * BLK_STS_RESOURCE is usually returned.
    2394             :  *
    2395             :  * We do not guarantee that dispatch can be drained or blocked
    2396             :  * after blk_mq_stop_hw_queue() returns. Please use
    2397             :  * blk_mq_quiesce_queue() for that requirement.
    2398             :  */
    2399           0 : void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
    2400             : {
    2401           0 :         cancel_delayed_work(&hctx->run_work);
    2402             : 
    2403           0 :         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
    2404           0 : }
    2405             : EXPORT_SYMBOL(blk_mq_stop_hw_queue);
    2406             : 
    2407             : /*
    2408             :  * This function is often used for pausing .queue_rq() by driver when
    2409             :  * there isn't enough resource or some conditions aren't satisfied, and
    2410             :  * BLK_STS_RESOURCE is usually returned.
    2411             :  *
    2412             :  * We do not guarantee that dispatch can be drained or blocked
    2413             :  * after blk_mq_stop_hw_queues() returns. Please use
    2414             :  * blk_mq_quiesce_queue() for that requirement.
    2415             :  */
    2416           0 : void blk_mq_stop_hw_queues(struct request_queue *q)
    2417             : {
    2418             :         struct blk_mq_hw_ctx *hctx;
    2419             :         unsigned long i;
    2420             : 
    2421           0 :         queue_for_each_hw_ctx(q, hctx, i)
    2422           0 :                 blk_mq_stop_hw_queue(hctx);
    2423           0 : }
    2424             : EXPORT_SYMBOL(blk_mq_stop_hw_queues);
    2425             : 
    2426           0 : void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
    2427             : {
    2428           0 :         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
    2429             : 
    2430           0 :         blk_mq_run_hw_queue(hctx, false);
    2431           0 : }
    2432             : EXPORT_SYMBOL(blk_mq_start_hw_queue);
    2433             : 
    2434           0 : void blk_mq_start_hw_queues(struct request_queue *q)
    2435             : {
    2436             :         struct blk_mq_hw_ctx *hctx;
    2437             :         unsigned long i;
    2438             : 
    2439           0 :         queue_for_each_hw_ctx(q, hctx, i)
    2440           0 :                 blk_mq_start_hw_queue(hctx);
    2441           0 : }
    2442             : EXPORT_SYMBOL(blk_mq_start_hw_queues);
    2443             : 
    2444           0 : void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
    2445             : {
    2446           0 :         if (!blk_mq_hctx_stopped(hctx))
    2447             :                 return;
    2448             : 
    2449           0 :         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
    2450           0 :         blk_mq_run_hw_queue(hctx, async);
    2451             : }
    2452             : EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
    2453             : 
    2454           0 : void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
    2455             : {
    2456             :         struct blk_mq_hw_ctx *hctx;
    2457             :         unsigned long i;
    2458             : 
    2459           0 :         queue_for_each_hw_ctx(q, hctx, i)
    2460           0 :                 blk_mq_start_stopped_hw_queue(hctx, async);
    2461           0 : }
    2462             : EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
    2463             : 
    2464           0 : static void blk_mq_run_work_fn(struct work_struct *work)
    2465             : {
    2466             :         struct blk_mq_hw_ctx *hctx;
    2467             : 
    2468           0 :         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
    2469             : 
    2470             :         /*
    2471             :          * If we are stopped, don't run the queue.
    2472             :          */
    2473           0 :         if (blk_mq_hctx_stopped(hctx))
    2474             :                 return;
    2475             : 
    2476           0 :         __blk_mq_run_hw_queue(hctx);
    2477             : }
    2478             : 
    2479             : static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
    2480             :                                             struct request *rq,
    2481             :                                             bool at_head)
    2482             : {
    2483           0 :         struct blk_mq_ctx *ctx = rq->mq_ctx;
    2484           0 :         enum hctx_type type = hctx->type;
    2485             : 
    2486             :         lockdep_assert_held(&ctx->lock);
    2487             : 
    2488           0 :         trace_block_rq_insert(rq);
    2489             : 
    2490           0 :         if (at_head)
    2491           0 :                 list_add(&rq->queuelist, &ctx->rq_lists[type]);
    2492             :         else
    2493           0 :                 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
    2494             : }
    2495             : 
    2496           0 : void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
    2497             :                              bool at_head)
    2498             : {
    2499           0 :         struct blk_mq_ctx *ctx = rq->mq_ctx;
    2500             : 
    2501             :         lockdep_assert_held(&ctx->lock);
    2502             : 
    2503           0 :         __blk_mq_insert_req_list(hctx, rq, at_head);
    2504           0 :         blk_mq_hctx_mark_pending(hctx, ctx);
    2505           0 : }
    2506             : 
    2507             : /**
    2508             :  * blk_mq_request_bypass_insert - Insert a request at dispatch list.
    2509             :  * @rq: Pointer to request to be inserted.
    2510             :  * @at_head: true if the request should be inserted at the head of the list.
    2511             :  * @run_queue: If we should run the hardware queue after inserting the request.
    2512             :  *
    2513             :  * Should only be used carefully, when the caller knows we want to
    2514             :  * bypass a potential IO scheduler on the target device.
    2515             :  */
    2516           0 : void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
    2517             :                                   bool run_queue)
    2518             : {
    2519           0 :         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
    2520             : 
    2521           0 :         spin_lock(&hctx->lock);
    2522           0 :         if (at_head)
    2523           0 :                 list_add(&rq->queuelist, &hctx->dispatch);
    2524             :         else
    2525           0 :                 list_add_tail(&rq->queuelist, &hctx->dispatch);
    2526           0 :         spin_unlock(&hctx->lock);
    2527             : 
    2528           0 :         if (run_queue)
    2529           0 :                 blk_mq_run_hw_queue(hctx, false);
    2530           0 : }
    2531             : 
    2532           0 : void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
    2533             :                             struct list_head *list)
    2534             : 
    2535             : {
    2536             :         struct request *rq;
    2537           0 :         enum hctx_type type = hctx->type;
    2538             : 
    2539             :         /*
    2540             :          * preemption doesn't flush plug list, so it's possible ctx->cpu is
    2541             :          * offline now
    2542             :          */
    2543           0 :         list_for_each_entry(rq, list, queuelist) {
    2544           0 :                 BUG_ON(rq->mq_ctx != ctx);
    2545           0 :                 trace_block_rq_insert(rq);
    2546             :         }
    2547             : 
    2548           0 :         spin_lock(&ctx->lock);
    2549           0 :         list_splice_tail_init(list, &ctx->rq_lists[type]);
    2550           0 :         blk_mq_hctx_mark_pending(hctx, ctx);
    2551           0 :         spin_unlock(&ctx->lock);
    2552           0 : }
    2553             : 
    2554           0 : static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
    2555             :                 unsigned int nr_segs)
    2556             : {
    2557             :         int err;
    2558             : 
    2559           0 :         if (bio->bi_opf & REQ_RAHEAD)
    2560           0 :                 rq->cmd_flags |= REQ_FAILFAST_MASK;
    2561             : 
    2562           0 :         rq->__sector = bio->bi_iter.bi_sector;
    2563           0 :         blk_rq_bio_prep(rq, bio, nr_segs);
    2564             : 
    2565             :         /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
    2566           0 :         err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
    2567           0 :         WARN_ON_ONCE(err);
    2568             : 
    2569           0 :         blk_account_io_start(rq);
    2570           0 : }
    2571             : 
    2572           0 : static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
    2573             :                                             struct request *rq, bool last)
    2574             : {
    2575           0 :         struct request_queue *q = rq->q;
    2576           0 :         struct blk_mq_queue_data bd = {
    2577             :                 .rq = rq,
    2578             :                 .last = last,
    2579             :         };
    2580             :         blk_status_t ret;
    2581             : 
    2582             :         /*
    2583             :          * For OK queue, we are done. For error, caller may kill it.
    2584             :          * Any other error (busy), just add it to our list as we
    2585             :          * previously would have done.
    2586             :          */
    2587           0 :         ret = q->mq_ops->queue_rq(hctx, &bd);
    2588           0 :         switch (ret) {
    2589             :         case BLK_STS_OK:
    2590             :                 blk_mq_update_dispatch_busy(hctx, false);
    2591             :                 break;
    2592             :         case BLK_STS_RESOURCE:
    2593             :         case BLK_STS_DEV_RESOURCE:
    2594           0 :                 blk_mq_update_dispatch_busy(hctx, true);
    2595           0 :                 __blk_mq_requeue_request(rq);
    2596           0 :                 break;
    2597             :         default:
    2598             :                 blk_mq_update_dispatch_busy(hctx, false);
    2599             :                 break;
    2600             :         }
    2601             : 
    2602           0 :         return ret;
    2603             : }
    2604             : 
    2605           0 : static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
    2606             :                                                 struct request *rq,
    2607             :                                                 bool bypass_insert, bool last)
    2608             : {
    2609           0 :         struct request_queue *q = rq->q;
    2610           0 :         bool run_queue = true;
    2611             :         int budget_token;
    2612             : 
    2613             :         /*
    2614             :          * RCU or SRCU read lock is needed before checking quiesced flag.
    2615             :          *
    2616             :          * When queue is stopped or quiesced, ignore 'bypass_insert' from
    2617             :          * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
    2618             :          * and avoid driver to try to dispatch again.
    2619             :          */
    2620           0 :         if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
    2621             :                 run_queue = false;
    2622             :                 bypass_insert = false;
    2623             :                 goto insert;
    2624             :         }
    2625             : 
    2626           0 :         if ((rq->rq_flags & RQF_ELV) && !bypass_insert)
    2627             :                 goto insert;
    2628             : 
    2629           0 :         budget_token = blk_mq_get_dispatch_budget(q);
    2630           0 :         if (budget_token < 0)
    2631             :                 goto insert;
    2632             : 
    2633           0 :         blk_mq_set_rq_budget_token(rq, budget_token);
    2634             : 
    2635           0 :         if (!blk_mq_get_driver_tag(rq)) {
    2636             :                 blk_mq_put_dispatch_budget(q, budget_token);
    2637             :                 goto insert;
    2638             :         }
    2639             : 
    2640           0 :         return __blk_mq_issue_directly(hctx, rq, last);
    2641             : insert:
    2642           0 :         if (bypass_insert)
    2643             :                 return BLK_STS_RESOURCE;
    2644             : 
    2645           0 :         blk_mq_sched_insert_request(rq, false, run_queue, false);
    2646             : 
    2647           0 :         return BLK_STS_OK;
    2648             : }
    2649             : 
    2650             : /**
    2651             :  * blk_mq_try_issue_directly - Try to send a request directly to device driver.
    2652             :  * @hctx: Pointer of the associated hardware queue.
    2653             :  * @rq: Pointer to request to be sent.
    2654             :  *
    2655             :  * If the device has enough resources to accept a new request now, send the
    2656             :  * request directly to device driver. Else, insert at hctx->dispatch queue, so
    2657             :  * we can try send it another time in the future. Requests inserted at this
    2658             :  * queue have higher priority.
    2659             :  */
    2660           0 : static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
    2661             :                 struct request *rq)
    2662             : {
    2663           0 :         blk_status_t ret =
    2664             :                 __blk_mq_try_issue_directly(hctx, rq, false, true);
    2665             : 
    2666           0 :         if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
    2667           0 :                 blk_mq_request_bypass_insert(rq, false, true);
    2668           0 :         else if (ret != BLK_STS_OK)
    2669           0 :                 blk_mq_end_request(rq, ret);
    2670           0 : }
    2671             : 
    2672             : static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
    2673             : {
    2674           0 :         return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last);
    2675             : }
    2676             : 
    2677           0 : static void blk_mq_plug_issue_direct(struct blk_plug *plug)
    2678             : {
    2679           0 :         struct blk_mq_hw_ctx *hctx = NULL;
    2680             :         struct request *rq;
    2681           0 :         int queued = 0;
    2682           0 :         blk_status_t ret = BLK_STS_OK;
    2683             : 
    2684           0 :         while ((rq = rq_list_pop(&plug->mq_list))) {
    2685           0 :                 bool last = rq_list_empty(plug->mq_list);
    2686             : 
    2687           0 :                 if (hctx != rq->mq_hctx) {
    2688           0 :                         if (hctx) {
    2689             :                                 blk_mq_commit_rqs(hctx, queued, false);
    2690             :                                 queued = 0;
    2691             :                         }
    2692           0 :                         hctx = rq->mq_hctx;
    2693             :                 }
    2694             : 
    2695           0 :                 ret = blk_mq_request_issue_directly(rq, last);
    2696           0 :                 switch (ret) {
    2697             :                 case BLK_STS_OK:
    2698           0 :                         queued++;
    2699           0 :                         break;
    2700             :                 case BLK_STS_RESOURCE:
    2701             :                 case BLK_STS_DEV_RESOURCE:
    2702           0 :                         blk_mq_request_bypass_insert(rq, false, true);
    2703           0 :                         goto out;
    2704             :                 default:
    2705           0 :                         blk_mq_end_request(rq, ret);
    2706           0 :                         break;
    2707             :                 }
    2708             :         }
    2709             : 
    2710             : out:
    2711           0 :         if (ret != BLK_STS_OK)
    2712             :                 blk_mq_commit_rqs(hctx, queued, false);
    2713           0 : }
    2714             : 
    2715           0 : static void __blk_mq_flush_plug_list(struct request_queue *q,
    2716             :                                      struct blk_plug *plug)
    2717             : {
    2718           0 :         if (blk_queue_quiesced(q))
    2719             :                 return;
    2720           0 :         q->mq_ops->queue_rqs(&plug->mq_list);
    2721             : }
    2722             : 
    2723           0 : static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
    2724             : {
    2725           0 :         struct blk_mq_hw_ctx *this_hctx = NULL;
    2726           0 :         struct blk_mq_ctx *this_ctx = NULL;
    2727           0 :         struct request *requeue_list = NULL;
    2728           0 :         unsigned int depth = 0;
    2729           0 :         LIST_HEAD(list);
    2730             : 
    2731             :         do {
    2732           0 :                 struct request *rq = rq_list_pop(&plug->mq_list);
    2733             : 
    2734           0 :                 if (!this_hctx) {
    2735           0 :                         this_hctx = rq->mq_hctx;
    2736           0 :                         this_ctx = rq->mq_ctx;
    2737           0 :                 } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
    2738           0 :                         rq_list_add(&requeue_list, rq);
    2739           0 :                         continue;
    2740             :                 }
    2741           0 :                 list_add_tail(&rq->queuelist, &list);
    2742           0 :                 depth++;
    2743           0 :         } while (!rq_list_empty(plug->mq_list));
    2744             : 
    2745           0 :         plug->mq_list = requeue_list;
    2746           0 :         trace_block_unplug(this_hctx->queue, depth, !from_sched);
    2747           0 :         blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched);
    2748           0 : }
    2749             : 
    2750           0 : void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
    2751             : {
    2752             :         struct request *rq;
    2753             : 
    2754           0 :         if (rq_list_empty(plug->mq_list))
    2755             :                 return;
    2756           0 :         plug->rq_count = 0;
    2757             : 
    2758           0 :         if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
    2759             :                 struct request_queue *q;
    2760             : 
    2761           0 :                 rq = rq_list_peek(&plug->mq_list);
    2762           0 :                 q = rq->q;
    2763             : 
    2764             :                 /*
    2765             :                  * Peek first request and see if we have a ->queue_rqs() hook.
    2766             :                  * If we do, we can dispatch the whole plug list in one go. We
    2767             :                  * already know at this point that all requests belong to the
    2768             :                  * same queue, caller must ensure that's the case.
    2769             :                  *
    2770             :                  * Since we pass off the full list to the driver at this point,
    2771             :                  * we do not increment the active request count for the queue.
    2772             :                  * Bypass shared tags for now because of that.
    2773             :                  */
    2774           0 :                 if (q->mq_ops->queue_rqs &&
    2775           0 :                     !(rq->mq_hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
    2776           0 :                         blk_mq_run_dispatch_ops(q,
    2777             :                                 __blk_mq_flush_plug_list(q, plug));
    2778           0 :                         if (rq_list_empty(plug->mq_list))
    2779             :                                 return;
    2780             :                 }
    2781             : 
    2782           0 :                 blk_mq_run_dispatch_ops(q,
    2783             :                                 blk_mq_plug_issue_direct(plug));
    2784           0 :                 if (rq_list_empty(plug->mq_list))
    2785             :                         return;
    2786             :         }
    2787             : 
    2788             :         do {
    2789           0 :                 blk_mq_dispatch_plug_list(plug, from_schedule);
    2790           0 :         } while (!rq_list_empty(plug->mq_list));
    2791             : }
    2792             : 
    2793           0 : void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
    2794             :                 struct list_head *list)
    2795             : {
    2796           0 :         int queued = 0;
    2797           0 :         blk_status_t ret = BLK_STS_OK;
    2798             : 
    2799           0 :         while (!list_empty(list)) {
    2800           0 :                 struct request *rq = list_first_entry(list, struct request,
    2801             :                                 queuelist);
    2802             : 
    2803           0 :                 list_del_init(&rq->queuelist);
    2804           0 :                 ret = blk_mq_request_issue_directly(rq, list_empty(list));
    2805           0 :                 switch (ret) {
    2806             :                 case BLK_STS_OK:
    2807           0 :                         queued++;
    2808           0 :                         break;
    2809             :                 case BLK_STS_RESOURCE:
    2810             :                 case BLK_STS_DEV_RESOURCE:
    2811           0 :                         blk_mq_request_bypass_insert(rq, false,
    2812           0 :                                                      list_empty(list));
    2813           0 :                         goto out;
    2814             :                 default:
    2815           0 :                         blk_mq_end_request(rq, ret);
    2816           0 :                         break;
    2817             :                 }
    2818             :         }
    2819             : 
    2820             : out:
    2821           0 :         if (ret != BLK_STS_OK)
    2822             :                 blk_mq_commit_rqs(hctx, queued, false);
    2823           0 : }
    2824             : 
    2825           0 : static bool blk_mq_attempt_bio_merge(struct request_queue *q,
    2826             :                                      struct bio *bio, unsigned int nr_segs)
    2827             : {
    2828           0 :         if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
    2829           0 :                 if (blk_attempt_plug_merge(q, bio, nr_segs))
    2830             :                         return true;
    2831           0 :                 if (blk_mq_sched_bio_merge(q, bio, nr_segs))
    2832             :                         return true;
    2833             :         }
    2834             :         return false;
    2835             : }
    2836             : 
    2837           0 : static struct request *blk_mq_get_new_requests(struct request_queue *q,
    2838             :                                                struct blk_plug *plug,
    2839             :                                                struct bio *bio,
    2840             :                                                unsigned int nsegs)
    2841             : {
    2842           0 :         struct blk_mq_alloc_data data = {
    2843             :                 .q              = q,
    2844             :                 .nr_tags        = 1,
    2845           0 :                 .cmd_flags      = bio->bi_opf,
    2846             :         };
    2847             :         struct request *rq;
    2848             : 
    2849           0 :         if (unlikely(bio_queue_enter(bio)))
    2850             :                 return NULL;
    2851             : 
    2852           0 :         if (blk_mq_attempt_bio_merge(q, bio, nsegs))
    2853             :                 goto queue_exit;
    2854             : 
    2855           0 :         rq_qos_throttle(q, bio);
    2856             : 
    2857           0 :         if (plug) {
    2858           0 :                 data.nr_tags = plug->nr_ios;
    2859           0 :                 plug->nr_ios = 1;
    2860           0 :                 data.cached_rq = &plug->cached_rq;
    2861             :         }
    2862             : 
    2863           0 :         rq = __blk_mq_alloc_requests(&data);
    2864           0 :         if (rq)
    2865             :                 return rq;
    2866           0 :         rq_qos_cleanup(q, bio);
    2867           0 :         if (bio->bi_opf & REQ_NOWAIT)
    2868             :                 bio_wouldblock_error(bio);
    2869             : queue_exit:
    2870           0 :         blk_queue_exit(q);
    2871           0 :         return NULL;
    2872             : }
    2873             : 
    2874           0 : static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
    2875             :                 struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
    2876             : {
    2877             :         struct request *rq;
    2878             :         enum hctx_type type, hctx_type;
    2879             : 
    2880           0 :         if (!plug)
    2881             :                 return NULL;
    2882             : 
    2883           0 :         if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
    2884           0 :                 *bio = NULL;
    2885           0 :                 return NULL;
    2886             :         }
    2887             : 
    2888           0 :         rq = rq_list_peek(&plug->cached_rq);
    2889           0 :         if (!rq || rq->q != q)
    2890             :                 return NULL;
    2891             : 
    2892           0 :         type = blk_mq_get_hctx_type((*bio)->bi_opf);
    2893           0 :         hctx_type = rq->mq_hctx->type;
    2894           0 :         if (type != hctx_type &&
    2895           0 :             !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
    2896             :                 return NULL;
    2897           0 :         if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
    2898             :                 return NULL;
    2899             : 
    2900             :         /*
    2901             :          * If any qos ->throttle() end up blocking, we will have flushed the
    2902             :          * plug and hence killed the cached_rq list as well. Pop this entry
    2903             :          * before we throttle.
    2904             :          */
    2905           0 :         plug->cached_rq = rq_list_next(rq);
    2906           0 :         rq_qos_throttle(q, *bio);
    2907             : 
    2908           0 :         rq->cmd_flags = (*bio)->bi_opf;
    2909           0 :         INIT_LIST_HEAD(&rq->queuelist);
    2910           0 :         return rq;
    2911             : }
    2912             : 
    2913             : static void bio_set_ioprio(struct bio *bio)
    2914             : {
    2915             :         /* Nobody set ioprio so far? Initialize it based on task's nice value */
    2916           0 :         if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
    2917           0 :                 bio->bi_ioprio = get_current_ioprio();
    2918           0 :         blkcg_set_ioprio(bio);
    2919             : }
    2920             : 
    2921             : /**
    2922             :  * blk_mq_submit_bio - Create and send a request to block device.
    2923             :  * @bio: Bio pointer.
    2924             :  *
    2925             :  * Builds up a request structure from @q and @bio and send to the device. The
    2926             :  * request may not be queued directly to hardware if:
    2927             :  * * This request can be merged with another one
    2928             :  * * We want to place request at plug queue for possible future merging
    2929             :  * * There is an IO scheduler active at this queue
    2930             :  *
    2931             :  * It will not queue the request if there is an error with the bio, or at the
    2932             :  * request creation.
    2933             :  */
    2934           0 : void blk_mq_submit_bio(struct bio *bio)
    2935             : {
    2936           0 :         struct request_queue *q = bdev_get_queue(bio->bi_bdev);
    2937           0 :         struct blk_plug *plug = blk_mq_plug(bio);
    2938           0 :         const int is_sync = op_is_sync(bio->bi_opf);
    2939             :         struct request *rq;
    2940           0 :         unsigned int nr_segs = 1;
    2941             :         blk_status_t ret;
    2942             : 
    2943           0 :         bio = blk_queue_bounce(bio, q);
    2944           0 :         if (bio_may_exceed_limits(bio, &q->limits)) {
    2945           0 :                 bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
    2946           0 :                 if (!bio)
    2947           0 :                         return;
    2948             :         }
    2949             : 
    2950           0 :         if (!bio_integrity_prep(bio))
    2951             :                 return;
    2952             : 
    2953           0 :         bio_set_ioprio(bio);
    2954             : 
    2955           0 :         rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
    2956           0 :         if (!rq) {
    2957           0 :                 if (!bio)
    2958             :                         return;
    2959           0 :                 rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
    2960           0 :                 if (unlikely(!rq))
    2961             :                         return;
    2962             :         }
    2963             : 
    2964           0 :         trace_block_getrq(bio);
    2965             : 
    2966           0 :         rq_qos_track(q, rq, bio);
    2967             : 
    2968           0 :         blk_mq_bio_to_request(rq, bio, nr_segs);
    2969             : 
    2970           0 :         ret = blk_crypto_init_request(rq);
    2971             :         if (ret != BLK_STS_OK) {
    2972             :                 bio->bi_status = ret;
    2973             :                 bio_endio(bio);
    2974             :                 blk_mq_free_request(rq);
    2975             :                 return;
    2976             :         }
    2977             : 
    2978           0 :         if (op_is_flush(bio->bi_opf)) {
    2979           0 :                 blk_insert_flush(rq);
    2980           0 :                 return;
    2981             :         }
    2982             : 
    2983           0 :         if (plug)
    2984           0 :                 blk_add_rq_to_plug(plug, rq);
    2985           0 :         else if ((rq->rq_flags & RQF_ELV) ||
    2986           0 :                  (rq->mq_hctx->dispatch_busy &&
    2987           0 :                   (q->nr_hw_queues == 1 || !is_sync)))
    2988           0 :                 blk_mq_sched_insert_request(rq, false, true, true);
    2989             :         else
    2990           0 :                 blk_mq_run_dispatch_ops(rq->q,
    2991             :                                 blk_mq_try_issue_directly(rq->mq_hctx, rq));
    2992             : }
    2993             : 
    2994             : #ifdef CONFIG_BLK_MQ_STACKING
    2995             : /**
    2996             :  * blk_insert_cloned_request - Helper for stacking drivers to submit a request
    2997             :  * @rq: the request being queued
    2998             :  */
    2999             : blk_status_t blk_insert_cloned_request(struct request *rq)
    3000             : {
    3001             :         struct request_queue *q = rq->q;
    3002             :         unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
    3003             :         unsigned int max_segments = blk_rq_get_max_segments(rq);
    3004             :         blk_status_t ret;
    3005             : 
    3006             :         if (blk_rq_sectors(rq) > max_sectors) {
    3007             :                 /*
    3008             :                  * SCSI device does not have a good way to return if
    3009             :                  * Write Same/Zero is actually supported. If a device rejects
    3010             :                  * a non-read/write command (discard, write same,etc.) the
    3011             :                  * low-level device driver will set the relevant queue limit to
    3012             :                  * 0 to prevent blk-lib from issuing more of the offending
    3013             :                  * operations. Commands queued prior to the queue limit being
    3014             :                  * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
    3015             :                  * errors being propagated to upper layers.
    3016             :                  */
    3017             :                 if (max_sectors == 0)
    3018             :                         return BLK_STS_NOTSUPP;
    3019             : 
    3020             :                 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
    3021             :                         __func__, blk_rq_sectors(rq), max_sectors);
    3022             :                 return BLK_STS_IOERR;
    3023             :         }
    3024             : 
    3025             :         /*
    3026             :          * The queue settings related to segment counting may differ from the
    3027             :          * original queue.
    3028             :          */
    3029             :         rq->nr_phys_segments = blk_recalc_rq_segments(rq);
    3030             :         if (rq->nr_phys_segments > max_segments) {
    3031             :                 printk(KERN_ERR "%s: over max segments limit. (%u > %u)\n",
    3032             :                         __func__, rq->nr_phys_segments, max_segments);
    3033             :                 return BLK_STS_IOERR;
    3034             :         }
    3035             : 
    3036             :         if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq)))
    3037             :                 return BLK_STS_IOERR;
    3038             : 
    3039             :         if (blk_crypto_insert_cloned_request(rq))
    3040             :                 return BLK_STS_IOERR;
    3041             : 
    3042             :         blk_account_io_start(rq);
    3043             : 
    3044             :         /*
    3045             :          * Since we have a scheduler attached on the top device,
    3046             :          * bypass a potential scheduler on the bottom device for
    3047             :          * insert.
    3048             :          */
    3049             :         blk_mq_run_dispatch_ops(q,
    3050             :                         ret = blk_mq_request_issue_directly(rq, true));
    3051             :         if (ret)
    3052             :                 blk_account_io_done(rq, ktime_get_ns());
    3053             :         return ret;
    3054             : }
    3055             : EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
    3056             : 
    3057             : /**
    3058             :  * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
    3059             :  * @rq: the clone request to be cleaned up
    3060             :  *
    3061             :  * Description:
    3062             :  *     Free all bios in @rq for a cloned request.
    3063             :  */
    3064             : void blk_rq_unprep_clone(struct request *rq)
    3065             : {
    3066             :         struct bio *bio;
    3067             : 
    3068             :         while ((bio = rq->bio) != NULL) {
    3069             :                 rq->bio = bio->bi_next;
    3070             : 
    3071             :                 bio_put(bio);
    3072             :         }
    3073             : }
    3074             : EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
    3075             : 
    3076             : /**
    3077             :  * blk_rq_prep_clone - Helper function to setup clone request
    3078             :  * @rq: the request to be setup
    3079             :  * @rq_src: original request to be cloned
    3080             :  * @bs: bio_set that bios for clone are allocated from
    3081             :  * @gfp_mask: memory allocation mask for bio
    3082             :  * @bio_ctr: setup function to be called for each clone bio.
    3083             :  *           Returns %0 for success, non %0 for failure.
    3084             :  * @data: private data to be passed to @bio_ctr
    3085             :  *
    3086             :  * Description:
    3087             :  *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
    3088             :  *     Also, pages which the original bios are pointing to are not copied
    3089             :  *     and the cloned bios just point same pages.
    3090             :  *     So cloned bios must be completed before original bios, which means
    3091             :  *     the caller must complete @rq before @rq_src.
    3092             :  */
    3093             : int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
    3094             :                       struct bio_set *bs, gfp_t gfp_mask,
    3095             :                       int (*bio_ctr)(struct bio *, struct bio *, void *),
    3096             :                       void *data)
    3097             : {
    3098             :         struct bio *bio, *bio_src;
    3099             : 
    3100             :         if (!bs)
    3101             :                 bs = &fs_bio_set;
    3102             : 
    3103             :         __rq_for_each_bio(bio_src, rq_src) {
    3104             :                 bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask,
    3105             :                                       bs);
    3106             :                 if (!bio)
    3107             :                         goto free_and_out;
    3108             : 
    3109             :                 if (bio_ctr && bio_ctr(bio, bio_src, data))
    3110             :                         goto free_and_out;
    3111             : 
    3112             :                 if (rq->bio) {
    3113             :                         rq->biotail->bi_next = bio;
    3114             :                         rq->biotail = bio;
    3115             :                 } else {
    3116             :                         rq->bio = rq->biotail = bio;
    3117             :                 }
    3118             :                 bio = NULL;
    3119             :         }
    3120             : 
    3121             :         /* Copy attributes of the original request to the clone request. */
    3122             :         rq->__sector = blk_rq_pos(rq_src);
    3123             :         rq->__data_len = blk_rq_bytes(rq_src);
    3124             :         if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
    3125             :                 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
    3126             :                 rq->special_vec = rq_src->special_vec;
    3127             :         }
    3128             :         rq->nr_phys_segments = rq_src->nr_phys_segments;
    3129             :         rq->ioprio = rq_src->ioprio;
    3130             : 
    3131             :         if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
    3132             :                 goto free_and_out;
    3133             : 
    3134             :         return 0;
    3135             : 
    3136             : free_and_out:
    3137             :         if (bio)
    3138             :                 bio_put(bio);
    3139             :         blk_rq_unprep_clone(rq);
    3140             : 
    3141             :         return -ENOMEM;
    3142             : }
    3143             : EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
    3144             : #endif /* CONFIG_BLK_MQ_STACKING */
    3145             : 
    3146             : /*
    3147             :  * Steal bios from a request and add them to a bio list.
    3148             :  * The request must not have been partially completed before.
    3149             :  */
    3150           0 : void blk_steal_bios(struct bio_list *list, struct request *rq)
    3151             : {
    3152           0 :         if (rq->bio) {
    3153           0 :                 if (list->tail)
    3154           0 :                         list->tail->bi_next = rq->bio;
    3155             :                 else
    3156           0 :                         list->head = rq->bio;
    3157           0 :                 list->tail = rq->biotail;
    3158             : 
    3159           0 :                 rq->bio = NULL;
    3160           0 :                 rq->biotail = NULL;
    3161             :         }
    3162             : 
    3163           0 :         rq->__data_len = 0;
    3164           0 : }
    3165             : EXPORT_SYMBOL_GPL(blk_steal_bios);
    3166             : 
    3167             : static size_t order_to_size(unsigned int order)
    3168             : {
    3169           0 :         return (size_t)PAGE_SIZE << order;
    3170             : }
    3171             : 
    3172             : /* called before freeing request pool in @tags */
    3173           0 : static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
    3174             :                                     struct blk_mq_tags *tags)
    3175             : {
    3176             :         struct page *page;
    3177             :         unsigned long flags;
    3178             : 
    3179             :         /*
    3180             :          * There is no need to clear mapping if driver tags is not initialized
    3181             :          * or the mapping belongs to the driver tags.
    3182             :          */
    3183           0 :         if (!drv_tags || drv_tags == tags)
    3184             :                 return;
    3185             : 
    3186           0 :         list_for_each_entry(page, &tags->page_list, lru) {
    3187           0 :                 unsigned long start = (unsigned long)page_address(page);
    3188           0 :                 unsigned long end = start + order_to_size(page->private);
    3189             :                 int i;
    3190             : 
    3191           0 :                 for (i = 0; i < drv_tags->nr_tags; i++) {
    3192           0 :                         struct request *rq = drv_tags->rqs[i];
    3193           0 :                         unsigned long rq_addr = (unsigned long)rq;
    3194             : 
    3195           0 :                         if (rq_addr >= start && rq_addr < end) {
    3196           0 :                                 WARN_ON_ONCE(req_ref_read(rq) != 0);
    3197           0 :                                 cmpxchg(&drv_tags->rqs[i], rq, NULL);
    3198             :                         }
    3199             :                 }
    3200             :         }
    3201             : 
    3202             :         /*
    3203             :          * Wait until all pending iteration is done.
    3204             :          *
    3205             :          * Request reference is cleared and it is guaranteed to be observed
    3206             :          * after the ->lock is released.
    3207             :          */
    3208           0 :         spin_lock_irqsave(&drv_tags->lock, flags);
    3209           0 :         spin_unlock_irqrestore(&drv_tags->lock, flags);
    3210             : }
    3211             : 
    3212           0 : void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
    3213             :                      unsigned int hctx_idx)
    3214             : {
    3215             :         struct blk_mq_tags *drv_tags;
    3216             :         struct page *page;
    3217             : 
    3218           0 :         if (list_empty(&tags->page_list))
    3219             :                 return;
    3220             : 
    3221           0 :         if (blk_mq_is_shared_tags(set->flags))
    3222           0 :                 drv_tags = set->shared_tags;
    3223             :         else
    3224           0 :                 drv_tags = set->tags[hctx_idx];
    3225             : 
    3226           0 :         if (tags->static_rqs && set->ops->exit_request) {
    3227             :                 int i;
    3228             : 
    3229           0 :                 for (i = 0; i < tags->nr_tags; i++) {
    3230           0 :                         struct request *rq = tags->static_rqs[i];
    3231             : 
    3232           0 :                         if (!rq)
    3233           0 :                                 continue;
    3234           0 :                         set->ops->exit_request(set, rq, hctx_idx);
    3235           0 :                         tags->static_rqs[i] = NULL;
    3236             :                 }
    3237             :         }
    3238             : 
    3239           0 :         blk_mq_clear_rq_mapping(drv_tags, tags);
    3240             : 
    3241           0 :         while (!list_empty(&tags->page_list)) {
    3242           0 :                 page = list_first_entry(&tags->page_list, struct page, lru);
    3243           0 :                 list_del_init(&page->lru);
    3244             :                 /*
    3245             :                  * Remove kmemleak object previously allocated in
    3246             :                  * blk_mq_alloc_rqs().
    3247             :                  */
    3248           0 :                 kmemleak_free(page_address(page));
    3249           0 :                 __free_pages(page, page->private);
    3250             :         }
    3251             : }
    3252             : 
    3253           0 : void blk_mq_free_rq_map(struct blk_mq_tags *tags)
    3254             : {
    3255           0 :         kfree(tags->rqs);
    3256           0 :         tags->rqs = NULL;
    3257           0 :         kfree(tags->static_rqs);
    3258           0 :         tags->static_rqs = NULL;
    3259             : 
    3260           0 :         blk_mq_free_tags(tags);
    3261           0 : }
    3262             : 
    3263             : static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set,
    3264             :                 unsigned int hctx_idx)
    3265             : {
    3266             :         int i;
    3267             : 
    3268           0 :         for (i = 0; i < set->nr_maps; i++) {
    3269           0 :                 unsigned int start = set->map[i].queue_offset;
    3270           0 :                 unsigned int end = start + set->map[i].nr_queues;
    3271             : 
    3272           0 :                 if (hctx_idx >= start && hctx_idx < end)
    3273             :                         break;
    3274             :         }
    3275             : 
    3276           0 :         if (i >= set->nr_maps)
    3277           0 :                 i = HCTX_TYPE_DEFAULT;
    3278             : 
    3279           0 :         return i;
    3280             : }
    3281             : 
    3282           0 : static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set,
    3283             :                 unsigned int hctx_idx)
    3284             : {
    3285           0 :         enum hctx_type type = hctx_idx_to_type(set, hctx_idx);
    3286             : 
    3287           0 :         return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx);
    3288             : }
    3289             : 
    3290           0 : static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
    3291             :                                                unsigned int hctx_idx,
    3292             :                                                unsigned int nr_tags,
    3293             :                                                unsigned int reserved_tags)
    3294             : {
    3295           0 :         int node = blk_mq_get_hctx_node(set, hctx_idx);
    3296             :         struct blk_mq_tags *tags;
    3297             : 
    3298           0 :         if (node == NUMA_NO_NODE)
    3299           0 :                 node = set->numa_node;
    3300             : 
    3301           0 :         tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
    3302           0 :                                 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
    3303           0 :         if (!tags)
    3304             :                 return NULL;
    3305             : 
    3306           0 :         tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
    3307             :                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
    3308             :                                  node);
    3309           0 :         if (!tags->rqs)
    3310             :                 goto err_free_tags;
    3311             : 
    3312           0 :         tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
    3313             :                                         GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
    3314             :                                         node);
    3315           0 :         if (!tags->static_rqs)
    3316             :                 goto err_free_rqs;
    3317             : 
    3318             :         return tags;
    3319             : 
    3320             : err_free_rqs:
    3321           0 :         kfree(tags->rqs);
    3322             : err_free_tags:
    3323           0 :         blk_mq_free_tags(tags);
    3324           0 :         return NULL;
    3325             : }
    3326             : 
    3327             : static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
    3328             :                                unsigned int hctx_idx, int node)
    3329             : {
    3330             :         int ret;
    3331             : 
    3332           0 :         if (set->ops->init_request) {
    3333           0 :                 ret = set->ops->init_request(set, rq, hctx_idx, node);
    3334           0 :                 if (ret)
    3335             :                         return ret;
    3336             :         }
    3337             : 
    3338           0 :         WRITE_ONCE(rq->state, MQ_RQ_IDLE);
    3339             :         return 0;
    3340             : }
    3341             : 
    3342           0 : static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
    3343             :                             struct blk_mq_tags *tags,
    3344             :                             unsigned int hctx_idx, unsigned int depth)
    3345             : {
    3346           0 :         unsigned int i, j, entries_per_page, max_order = 4;
    3347           0 :         int node = blk_mq_get_hctx_node(set, hctx_idx);
    3348             :         size_t rq_size, left;
    3349             : 
    3350           0 :         if (node == NUMA_NO_NODE)
    3351           0 :                 node = set->numa_node;
    3352             : 
    3353           0 :         INIT_LIST_HEAD(&tags->page_list);
    3354             : 
    3355             :         /*
    3356             :          * rq_size is the size of the request plus driver payload, rounded
    3357             :          * to the cacheline size
    3358             :          */
    3359           0 :         rq_size = round_up(sizeof(struct request) + set->cmd_size,
    3360             :                                 cache_line_size());
    3361           0 :         left = rq_size * depth;
    3362             : 
    3363           0 :         for (i = 0; i < depth; ) {
    3364             :                 int this_order = max_order;
    3365             :                 struct page *page;
    3366             :                 int to_do;
    3367             :                 void *p;
    3368             : 
    3369           0 :                 while (this_order && left < order_to_size(this_order - 1))
    3370             :                         this_order--;
    3371             : 
    3372             :                 do {
    3373           0 :                         page = alloc_pages_node(node,
    3374             :                                 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
    3375             :                                 this_order);
    3376           0 :                         if (page)
    3377             :                                 break;
    3378           0 :                         if (!this_order--)
    3379             :                                 break;
    3380           0 :                         if (order_to_size(this_order) < rq_size)
    3381             :                                 break;
    3382             :                 } while (1);
    3383             : 
    3384           0 :                 if (!page)
    3385             :                         goto fail;
    3386             : 
    3387           0 :                 page->private = this_order;
    3388           0 :                 list_add_tail(&page->lru, &tags->page_list);
    3389             : 
    3390           0 :                 p = page_address(page);
    3391             :                 /*
    3392             :                  * Allow kmemleak to scan these pages as they contain pointers
    3393             :                  * to additional allocations like via ops->init_request().
    3394             :                  */
    3395           0 :                 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
    3396           0 :                 entries_per_page = order_to_size(this_order) / rq_size;
    3397           0 :                 to_do = min(entries_per_page, depth - i);
    3398           0 :                 left -= to_do * rq_size;
    3399           0 :                 for (j = 0; j < to_do; j++) {
    3400           0 :                         struct request *rq = p;
    3401             : 
    3402           0 :                         tags->static_rqs[i] = rq;
    3403           0 :                         if (blk_mq_init_request(set, rq, hctx_idx, node)) {
    3404           0 :                                 tags->static_rqs[i] = NULL;
    3405           0 :                                 goto fail;
    3406             :                         }
    3407             : 
    3408           0 :                         p += rq_size;
    3409           0 :                         i++;
    3410             :                 }
    3411             :         }
    3412             :         return 0;
    3413             : 
    3414             : fail:
    3415           0 :         blk_mq_free_rqs(set, tags, hctx_idx);
    3416           0 :         return -ENOMEM;
    3417             : }
    3418             : 
    3419             : struct rq_iter_data {
    3420             :         struct blk_mq_hw_ctx *hctx;
    3421             :         bool has_rq;
    3422             : };
    3423             : 
    3424           0 : static bool blk_mq_has_request(struct request *rq, void *data)
    3425             : {
    3426           0 :         struct rq_iter_data *iter_data = data;
    3427             : 
    3428           0 :         if (rq->mq_hctx != iter_data->hctx)
    3429             :                 return true;
    3430           0 :         iter_data->has_rq = true;
    3431           0 :         return false;
    3432             : }
    3433             : 
    3434           0 : static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
    3435             : {
    3436           0 :         struct blk_mq_tags *tags = hctx->sched_tags ?
    3437           0 :                         hctx->sched_tags : hctx->tags;
    3438           0 :         struct rq_iter_data data = {
    3439             :                 .hctx   = hctx,
    3440             :         };
    3441             : 
    3442           0 :         blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
    3443           0 :         return data.has_rq;
    3444             : }
    3445             : 
    3446           0 : static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
    3447             :                 struct blk_mq_hw_ctx *hctx)
    3448             : {
    3449           0 :         if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu)
    3450             :                 return false;
    3451           0 :         if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
    3452             :                 return false;
    3453           0 :         return true;
    3454             : }
    3455             : 
    3456           0 : static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
    3457             : {
    3458           0 :         struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
    3459             :                         struct blk_mq_hw_ctx, cpuhp_online);
    3460             : 
    3461           0 :         if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
    3462           0 :             !blk_mq_last_cpu_in_hctx(cpu, hctx))
    3463             :                 return 0;
    3464             : 
    3465             :         /*
    3466             :          * Prevent new request from being allocated on the current hctx.
    3467             :          *
    3468             :          * The smp_mb__after_atomic() Pairs with the implied barrier in
    3469             :          * test_and_set_bit_lock in sbitmap_get().  Ensures the inactive flag is
    3470             :          * seen once we return from the tag allocator.
    3471             :          */
    3472           0 :         set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
    3473           0 :         smp_mb__after_atomic();
    3474             : 
    3475             :         /*
    3476             :          * Try to grab a reference to the queue and wait for any outstanding
    3477             :          * requests.  If we could not grab a reference the queue has been
    3478             :          * frozen and there are no requests.
    3479             :          */
    3480           0 :         if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
    3481           0 :                 while (blk_mq_hctx_has_requests(hctx))
    3482           0 :                         msleep(5);
    3483           0 :                 percpu_ref_put(&hctx->queue->q_usage_counter);
    3484             :         }
    3485             : 
    3486             :         return 0;
    3487             : }
    3488             : 
    3489           0 : static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
    3490             : {
    3491           0 :         struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
    3492             :                         struct blk_mq_hw_ctx, cpuhp_online);
    3493             : 
    3494           0 :         if (cpumask_test_cpu(cpu, hctx->cpumask))
    3495           0 :                 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
    3496           0 :         return 0;
    3497             : }
    3498             : 
    3499             : /*
    3500             :  * 'cpu' is going away. splice any existing rq_list entries from this
    3501             :  * software queue to the hw queue dispatch list, and ensure that it
    3502             :  * gets run.
    3503             :  */
    3504           0 : static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
    3505             : {
    3506             :         struct blk_mq_hw_ctx *hctx;
    3507             :         struct blk_mq_ctx *ctx;
    3508           0 :         LIST_HEAD(tmp);
    3509             :         enum hctx_type type;
    3510             : 
    3511           0 :         hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
    3512           0 :         if (!cpumask_test_cpu(cpu, hctx->cpumask))
    3513             :                 return 0;
    3514             : 
    3515           0 :         ctx = __blk_mq_get_ctx(hctx->queue, cpu);
    3516           0 :         type = hctx->type;
    3517             : 
    3518           0 :         spin_lock(&ctx->lock);
    3519           0 :         if (!list_empty(&ctx->rq_lists[type])) {
    3520           0 :                 list_splice_init(&ctx->rq_lists[type], &tmp);
    3521             :                 blk_mq_hctx_clear_pending(hctx, ctx);
    3522             :         }
    3523           0 :         spin_unlock(&ctx->lock);
    3524             : 
    3525           0 :         if (list_empty(&tmp))
    3526             :                 return 0;
    3527             : 
    3528           0 :         spin_lock(&hctx->lock);
    3529           0 :         list_splice_tail_init(&tmp, &hctx->dispatch);
    3530           0 :         spin_unlock(&hctx->lock);
    3531             : 
    3532           0 :         blk_mq_run_hw_queue(hctx, true);
    3533           0 :         return 0;
    3534             : }
    3535             : 
    3536           0 : static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
    3537             : {
    3538           0 :         if (!(hctx->flags & BLK_MQ_F_STACKING))
    3539           0 :                 cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
    3540             :                                                     &hctx->cpuhp_online);
    3541           0 :         cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
    3542             :                                             &hctx->cpuhp_dead);
    3543           0 : }
    3544             : 
    3545             : /*
    3546             :  * Before freeing hw queue, clearing the flush request reference in
    3547             :  * tags->rqs[] for avoiding potential UAF.
    3548             :  */
    3549           0 : static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
    3550             :                 unsigned int queue_depth, struct request *flush_rq)
    3551             : {
    3552             :         int i;
    3553             :         unsigned long flags;
    3554             : 
    3555             :         /* The hw queue may not be mapped yet */
    3556           0 :         if (!tags)
    3557             :                 return;
    3558             : 
    3559           0 :         WARN_ON_ONCE(req_ref_read(flush_rq) != 0);
    3560             : 
    3561           0 :         for (i = 0; i < queue_depth; i++)
    3562           0 :                 cmpxchg(&tags->rqs[i], flush_rq, NULL);
    3563             : 
    3564             :         /*
    3565             :          * Wait until all pending iteration is done.
    3566             :          *
    3567             :          * Request reference is cleared and it is guaranteed to be observed
    3568             :          * after the ->lock is released.
    3569             :          */
    3570           0 :         spin_lock_irqsave(&tags->lock, flags);
    3571           0 :         spin_unlock_irqrestore(&tags->lock, flags);
    3572             : }
    3573             : 
    3574             : /* hctx->ctxs will be freed in queue's release handler */
    3575           0 : static void blk_mq_exit_hctx(struct request_queue *q,
    3576             :                 struct blk_mq_tag_set *set,
    3577             :                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
    3578             : {
    3579           0 :         struct request *flush_rq = hctx->fq->flush_rq;
    3580             : 
    3581           0 :         if (blk_mq_hw_queue_mapped(hctx))
    3582             :                 blk_mq_tag_idle(hctx);
    3583             : 
    3584           0 :         if (blk_queue_init_done(q))
    3585           0 :                 blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
    3586             :                                 set->queue_depth, flush_rq);
    3587           0 :         if (set->ops->exit_request)
    3588           0 :                 set->ops->exit_request(set, flush_rq, hctx_idx);
    3589             : 
    3590           0 :         if (set->ops->exit_hctx)
    3591           0 :                 set->ops->exit_hctx(hctx, hctx_idx);
    3592             : 
    3593           0 :         blk_mq_remove_cpuhp(hctx);
    3594             : 
    3595           0 :         xa_erase(&q->hctx_table, hctx_idx);
    3596             : 
    3597           0 :         spin_lock(&q->unused_hctx_lock);
    3598           0 :         list_add(&hctx->hctx_list, &q->unused_hctx_list);
    3599           0 :         spin_unlock(&q->unused_hctx_lock);
    3600           0 : }
    3601             : 
    3602           0 : static void blk_mq_exit_hw_queues(struct request_queue *q,
    3603             :                 struct blk_mq_tag_set *set, int nr_queue)
    3604             : {
    3605             :         struct blk_mq_hw_ctx *hctx;
    3606             :         unsigned long i;
    3607             : 
    3608           0 :         queue_for_each_hw_ctx(q, hctx, i) {
    3609           0 :                 if (i == nr_queue)
    3610             :                         break;
    3611           0 :                 blk_mq_exit_hctx(q, set, hctx, i);
    3612             :         }
    3613           0 : }
    3614             : 
    3615           0 : static int blk_mq_init_hctx(struct request_queue *q,
    3616             :                 struct blk_mq_tag_set *set,
    3617             :                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
    3618             : {
    3619           0 :         hctx->queue_num = hctx_idx;
    3620             : 
    3621           0 :         if (!(hctx->flags & BLK_MQ_F_STACKING))
    3622           0 :                 cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
    3623             :                                 &hctx->cpuhp_online);
    3624           0 :         cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
    3625             : 
    3626           0 :         hctx->tags = set->tags[hctx_idx];
    3627             : 
    3628           0 :         if (set->ops->init_hctx &&
    3629           0 :             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
    3630             :                 goto unregister_cpu_notifier;
    3631             : 
    3632           0 :         if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
    3633           0 :                                 hctx->numa_node))
    3634             :                 goto exit_hctx;
    3635             : 
    3636           0 :         if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL))
    3637             :                 goto exit_flush_rq;
    3638             : 
    3639             :         return 0;
    3640             : 
    3641             :  exit_flush_rq:
    3642           0 :         if (set->ops->exit_request)
    3643           0 :                 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
    3644             :  exit_hctx:
    3645           0 :         if (set->ops->exit_hctx)
    3646           0 :                 set->ops->exit_hctx(hctx, hctx_idx);
    3647             :  unregister_cpu_notifier:
    3648           0 :         blk_mq_remove_cpuhp(hctx);
    3649           0 :         return -1;
    3650             : }
    3651             : 
    3652             : static struct blk_mq_hw_ctx *
    3653           0 : blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
    3654             :                 int node)
    3655             : {
    3656             :         struct blk_mq_hw_ctx *hctx;
    3657           0 :         gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
    3658             : 
    3659           0 :         hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node);
    3660           0 :         if (!hctx)
    3661             :                 goto fail_alloc_hctx;
    3662             : 
    3663           0 :         if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
    3664             :                 goto free_hctx;
    3665             : 
    3666           0 :         atomic_set(&hctx->nr_active, 0);
    3667           0 :         if (node == NUMA_NO_NODE)
    3668           0 :                 node = set->numa_node;
    3669           0 :         hctx->numa_node = node;
    3670             : 
    3671           0 :         INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
    3672           0 :         spin_lock_init(&hctx->lock);
    3673           0 :         INIT_LIST_HEAD(&hctx->dispatch);
    3674           0 :         hctx->queue = q;
    3675           0 :         hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
    3676             : 
    3677           0 :         INIT_LIST_HEAD(&hctx->hctx_list);
    3678             : 
    3679             :         /*
    3680             :          * Allocate space for all possible cpus to avoid allocation at
    3681             :          * runtime
    3682             :          */
    3683           0 :         hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
    3684             :                         gfp, node);
    3685           0 :         if (!hctx->ctxs)
    3686             :                 goto free_cpumask;
    3687             : 
    3688           0 :         if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
    3689             :                                 gfp, node, false, false))
    3690             :                 goto free_ctxs;
    3691           0 :         hctx->nr_ctx = 0;
    3692             : 
    3693           0 :         spin_lock_init(&hctx->dispatch_wait_lock);
    3694           0 :         init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
    3695           0 :         INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
    3696             : 
    3697           0 :         hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
    3698           0 :         if (!hctx->fq)
    3699             :                 goto free_bitmap;
    3700             : 
    3701           0 :         blk_mq_hctx_kobj_init(hctx);
    3702             : 
    3703           0 :         return hctx;
    3704             : 
    3705             :  free_bitmap:
    3706           0 :         sbitmap_free(&hctx->ctx_map);
    3707             :  free_ctxs:
    3708           0 :         kfree(hctx->ctxs);
    3709             :  free_cpumask:
    3710           0 :         free_cpumask_var(hctx->cpumask);
    3711             :  free_hctx:
    3712           0 :         kfree(hctx);
    3713             :  fail_alloc_hctx:
    3714             :         return NULL;
    3715             : }
    3716             : 
    3717           0 : static void blk_mq_init_cpu_queues(struct request_queue *q,
    3718             :                                    unsigned int nr_hw_queues)
    3719             : {
    3720           0 :         struct blk_mq_tag_set *set = q->tag_set;
    3721             :         unsigned int i, j;
    3722             : 
    3723           0 :         for_each_possible_cpu(i) {
    3724           0 :                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
    3725             :                 struct blk_mq_hw_ctx *hctx;
    3726             :                 int k;
    3727             : 
    3728           0 :                 __ctx->cpu = i;
    3729           0 :                 spin_lock_init(&__ctx->lock);
    3730           0 :                 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
    3731           0 :                         INIT_LIST_HEAD(&__ctx->rq_lists[k]);
    3732             : 
    3733           0 :                 __ctx->queue = q;
    3734             : 
    3735             :                 /*
    3736             :                  * Set local node, IFF we have more than one hw queue. If
    3737             :                  * not, we remain on the home node of the device
    3738             :                  */
    3739           0 :                 for (j = 0; j < set->nr_maps; j++) {
    3740           0 :                         hctx = blk_mq_map_queue_type(q, j, i);
    3741           0 :                         if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
    3742           0 :                                 hctx->numa_node = cpu_to_node(i);
    3743             :                 }
    3744             :         }
    3745           0 : }
    3746             : 
    3747           0 : struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
    3748             :                                              unsigned int hctx_idx,
    3749             :                                              unsigned int depth)
    3750             : {
    3751             :         struct blk_mq_tags *tags;
    3752             :         int ret;
    3753             : 
    3754           0 :         tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags);
    3755           0 :         if (!tags)
    3756             :                 return NULL;
    3757             : 
    3758           0 :         ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
    3759           0 :         if (ret) {
    3760           0 :                 blk_mq_free_rq_map(tags);
    3761           0 :                 return NULL;
    3762             :         }
    3763             : 
    3764             :         return tags;
    3765             : }
    3766             : 
    3767           0 : static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
    3768             :                                        int hctx_idx)
    3769             : {
    3770           0 :         if (blk_mq_is_shared_tags(set->flags)) {
    3771           0 :                 set->tags[hctx_idx] = set->shared_tags;
    3772             : 
    3773           0 :                 return true;
    3774             :         }
    3775             : 
    3776           0 :         set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx,
    3777             :                                                        set->queue_depth);
    3778             : 
    3779           0 :         return set->tags[hctx_idx];
    3780             : }
    3781             : 
    3782           0 : void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
    3783             :                              struct blk_mq_tags *tags,
    3784             :                              unsigned int hctx_idx)
    3785             : {
    3786           0 :         if (tags) {
    3787           0 :                 blk_mq_free_rqs(set, tags, hctx_idx);
    3788           0 :                 blk_mq_free_rq_map(tags);
    3789             :         }
    3790           0 : }
    3791             : 
    3792           0 : static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
    3793             :                                       unsigned int hctx_idx)
    3794             : {
    3795           0 :         if (!blk_mq_is_shared_tags(set->flags))
    3796           0 :                 blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx);
    3797             : 
    3798           0 :         set->tags[hctx_idx] = NULL;
    3799           0 : }
    3800             : 
    3801           0 : static void blk_mq_map_swqueue(struct request_queue *q)
    3802             : {
    3803             :         unsigned int j, hctx_idx;
    3804             :         unsigned long i;
    3805             :         struct blk_mq_hw_ctx *hctx;
    3806             :         struct blk_mq_ctx *ctx;
    3807           0 :         struct blk_mq_tag_set *set = q->tag_set;
    3808             : 
    3809           0 :         queue_for_each_hw_ctx(q, hctx, i) {
    3810           0 :                 cpumask_clear(hctx->cpumask);
    3811           0 :                 hctx->nr_ctx = 0;
    3812           0 :                 hctx->dispatch_from = NULL;
    3813             :         }
    3814             : 
    3815             :         /*
    3816             :          * Map software to hardware queues.
    3817             :          *
    3818             :          * If the cpu isn't present, the cpu is mapped to first hctx.
    3819             :          */
    3820           0 :         for_each_possible_cpu(i) {
    3821             : 
    3822           0 :                 ctx = per_cpu_ptr(q->queue_ctx, i);
    3823           0 :                 for (j = 0; j < set->nr_maps; j++) {
    3824           0 :                         if (!set->map[j].nr_queues) {
    3825           0 :                                 ctx->hctxs[j] = blk_mq_map_queue_type(q,
    3826             :                                                 HCTX_TYPE_DEFAULT, i);
    3827           0 :                                 continue;
    3828             :                         }
    3829           0 :                         hctx_idx = set->map[j].mq_map[i];
    3830             :                         /* unmapped hw queue can be remapped after CPU topo changed */
    3831           0 :                         if (!set->tags[hctx_idx] &&
    3832           0 :                             !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) {
    3833             :                                 /*
    3834             :                                  * If tags initialization fail for some hctx,
    3835             :                                  * that hctx won't be brought online.  In this
    3836             :                                  * case, remap the current ctx to hctx[0] which
    3837             :                                  * is guaranteed to always have tags allocated
    3838             :                                  */
    3839           0 :                                 set->map[j].mq_map[i] = 0;
    3840             :                         }
    3841             : 
    3842           0 :                         hctx = blk_mq_map_queue_type(q, j, i);
    3843           0 :                         ctx->hctxs[j] = hctx;
    3844             :                         /*
    3845             :                          * If the CPU is already set in the mask, then we've
    3846             :                          * mapped this one already. This can happen if
    3847             :                          * devices share queues across queue maps.
    3848             :                          */
    3849           0 :                         if (cpumask_test_cpu(i, hctx->cpumask))
    3850           0 :                                 continue;
    3851             : 
    3852           0 :                         cpumask_set_cpu(i, hctx->cpumask);
    3853           0 :                         hctx->type = j;
    3854           0 :                         ctx->index_hw[hctx->type] = hctx->nr_ctx;
    3855           0 :                         hctx->ctxs[hctx->nr_ctx++] = ctx;
    3856             : 
    3857             :                         /*
    3858             :                          * If the nr_ctx type overflows, we have exceeded the
    3859             :                          * amount of sw queues we can support.
    3860             :                          */
    3861           0 :                         BUG_ON(!hctx->nr_ctx);
    3862             :                 }
    3863             : 
    3864           0 :                 for (; j < HCTX_MAX_TYPES; j++)
    3865           0 :                         ctx->hctxs[j] = blk_mq_map_queue_type(q,
    3866             :                                         HCTX_TYPE_DEFAULT, i);
    3867             :         }
    3868             : 
    3869           0 :         queue_for_each_hw_ctx(q, hctx, i) {
    3870             :                 /*
    3871             :                  * If no software queues are mapped to this hardware queue,
    3872             :                  * disable it and free the request entries.
    3873             :                  */
    3874           0 :                 if (!hctx->nr_ctx) {
    3875             :                         /* Never unmap queue 0.  We need it as a
    3876             :                          * fallback in case of a new remap fails
    3877             :                          * allocation
    3878             :                          */
    3879           0 :                         if (i)
    3880           0 :                                 __blk_mq_free_map_and_rqs(set, i);
    3881             : 
    3882           0 :                         hctx->tags = NULL;
    3883           0 :                         continue;
    3884             :                 }
    3885             : 
    3886           0 :                 hctx->tags = set->tags[i];
    3887           0 :                 WARN_ON(!hctx->tags);
    3888             : 
    3889             :                 /*
    3890             :                  * Set the map size to the number of mapped software queues.
    3891             :                  * This is more accurate and more efficient than looping
    3892             :                  * over all possibly mapped software queues.
    3893             :                  */
    3894           0 :                 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
    3895             : 
    3896             :                 /*
    3897             :                  * Initialize batch roundrobin counts
    3898             :                  */
    3899           0 :                 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
    3900           0 :                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
    3901             :         }
    3902           0 : }
    3903             : 
    3904             : /*
    3905             :  * Caller needs to ensure that we're either frozen/quiesced, or that
    3906             :  * the queue isn't live yet.
    3907             :  */
    3908           0 : static void queue_set_hctx_shared(struct request_queue *q, bool shared)
    3909             : {
    3910             :         struct blk_mq_hw_ctx *hctx;
    3911             :         unsigned long i;
    3912             : 
    3913           0 :         queue_for_each_hw_ctx(q, hctx, i) {
    3914           0 :                 if (shared) {
    3915           0 :                         hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
    3916             :                 } else {
    3917           0 :                         blk_mq_tag_idle(hctx);
    3918           0 :                         hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
    3919             :                 }
    3920             :         }
    3921           0 : }
    3922             : 
    3923           0 : static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
    3924             :                                          bool shared)
    3925             : {
    3926             :         struct request_queue *q;
    3927             : 
    3928             :         lockdep_assert_held(&set->tag_list_lock);
    3929             : 
    3930           0 :         list_for_each_entry(q, &set->tag_list, tag_set_list) {
    3931           0 :                 blk_mq_freeze_queue(q);
    3932           0 :                 queue_set_hctx_shared(q, shared);
    3933           0 :                 blk_mq_unfreeze_queue(q);
    3934             :         }
    3935           0 : }
    3936             : 
    3937           0 : static void blk_mq_del_queue_tag_set(struct request_queue *q)
    3938             : {
    3939           0 :         struct blk_mq_tag_set *set = q->tag_set;
    3940             : 
    3941           0 :         mutex_lock(&set->tag_list_lock);
    3942           0 :         list_del(&q->tag_set_list);
    3943           0 :         if (list_is_singular(&set->tag_list)) {
    3944             :                 /* just transitioned to unshared */
    3945           0 :                 set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
    3946             :                 /* update existing queue */
    3947           0 :                 blk_mq_update_tag_set_shared(set, false);
    3948             :         }
    3949           0 :         mutex_unlock(&set->tag_list_lock);
    3950           0 :         INIT_LIST_HEAD(&q->tag_set_list);
    3951           0 : }
    3952             : 
    3953           0 : static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
    3954             :                                      struct request_queue *q)
    3955             : {
    3956           0 :         mutex_lock(&set->tag_list_lock);
    3957             : 
    3958             :         /*
    3959             :          * Check to see if we're transitioning to shared (from 1 to 2 queues).
    3960             :          */
    3961           0 :         if (!list_empty(&set->tag_list) &&
    3962           0 :             !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
    3963           0 :                 set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
    3964             :                 /* update existing queue */
    3965           0 :                 blk_mq_update_tag_set_shared(set, true);
    3966             :         }
    3967           0 :         if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
    3968           0 :                 queue_set_hctx_shared(q, true);
    3969           0 :         list_add_tail(&q->tag_set_list, &set->tag_list);
    3970             : 
    3971           0 :         mutex_unlock(&set->tag_list_lock);
    3972           0 : }
    3973             : 
    3974             : /* All allocations will be freed in release handler of q->mq_kobj */
    3975           0 : static int blk_mq_alloc_ctxs(struct request_queue *q)
    3976             : {
    3977             :         struct blk_mq_ctxs *ctxs;
    3978             :         int cpu;
    3979             : 
    3980           0 :         ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
    3981           0 :         if (!ctxs)
    3982             :                 return -ENOMEM;
    3983             : 
    3984           0 :         ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
    3985           0 :         if (!ctxs->queue_ctx)
    3986             :                 goto fail;
    3987             : 
    3988           0 :         for_each_possible_cpu(cpu) {
    3989           0 :                 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
    3990           0 :                 ctx->ctxs = ctxs;
    3991             :         }
    3992             : 
    3993           0 :         q->mq_kobj = &ctxs->kobj;
    3994           0 :         q->queue_ctx = ctxs->queue_ctx;
    3995             : 
    3996             :         return 0;
    3997             :  fail:
    3998           0 :         kfree(ctxs);
    3999             :         return -ENOMEM;
    4000             : }
    4001             : 
    4002             : /*
    4003             :  * It is the actual release handler for mq, but we do it from
    4004             :  * request queue's release handler for avoiding use-after-free
    4005             :  * and headache because q->mq_kobj shouldn't have been introduced,
    4006             :  * but we can't group ctx/kctx kobj without it.
    4007             :  */
    4008           0 : void blk_mq_release(struct request_queue *q)
    4009             : {
    4010             :         struct blk_mq_hw_ctx *hctx, *next;
    4011             :         unsigned long i;
    4012             : 
    4013           0 :         queue_for_each_hw_ctx(q, hctx, i)
    4014           0 :                 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
    4015             : 
    4016             :         /* all hctx are in .unused_hctx_list now */
    4017           0 :         list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
    4018           0 :                 list_del_init(&hctx->hctx_list);
    4019           0 :                 kobject_put(&hctx->kobj);
    4020             :         }
    4021             : 
    4022           0 :         xa_destroy(&q->hctx_table);
    4023             : 
    4024             :         /*
    4025             :          * release .mq_kobj and sw queue's kobject now because
    4026             :          * both share lifetime with request queue.
    4027             :          */
    4028           0 :         blk_mq_sysfs_deinit(q);
    4029           0 : }
    4030             : 
    4031           0 : static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
    4032             :                 void *queuedata)
    4033             : {
    4034             :         struct request_queue *q;
    4035             :         int ret;
    4036             : 
    4037           0 :         q = blk_alloc_queue(set->numa_node);
    4038           0 :         if (!q)
    4039             :                 return ERR_PTR(-ENOMEM);
    4040           0 :         q->queuedata = queuedata;
    4041           0 :         ret = blk_mq_init_allocated_queue(set, q);
    4042           0 :         if (ret) {
    4043           0 :                 blk_put_queue(q);
    4044           0 :                 return ERR_PTR(ret);
    4045             :         }
    4046             :         return q;
    4047             : }
    4048             : 
    4049           0 : struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
    4050             : {
    4051           0 :         return blk_mq_init_queue_data(set, NULL);
    4052             : }
    4053             : EXPORT_SYMBOL(blk_mq_init_queue);
    4054             : 
    4055             : /**
    4056             :  * blk_mq_destroy_queue - shutdown a request queue
    4057             :  * @q: request queue to shutdown
    4058             :  *
    4059             :  * This shuts down a request queue allocated by blk_mq_init_queue(). All future
    4060             :  * requests will be failed with -ENODEV. The caller is responsible for dropping
    4061             :  * the reference from blk_mq_init_queue() by calling blk_put_queue().
    4062             :  *
    4063             :  * Context: can sleep
    4064             :  */
    4065           0 : void blk_mq_destroy_queue(struct request_queue *q)
    4066             : {
    4067           0 :         WARN_ON_ONCE(!queue_is_mq(q));
    4068           0 :         WARN_ON_ONCE(blk_queue_registered(q));
    4069             : 
    4070             :         might_sleep();
    4071             : 
    4072           0 :         blk_queue_flag_set(QUEUE_FLAG_DYING, q);
    4073           0 :         blk_queue_start_drain(q);
    4074           0 :         blk_mq_freeze_queue_wait(q);
    4075             : 
    4076           0 :         blk_sync_queue(q);
    4077           0 :         blk_mq_cancel_work_sync(q);
    4078           0 :         blk_mq_exit_queue(q);
    4079           0 : }
    4080             : EXPORT_SYMBOL(blk_mq_destroy_queue);
    4081             : 
    4082           0 : struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
    4083             :                 struct lock_class_key *lkclass)
    4084             : {
    4085             :         struct request_queue *q;
    4086             :         struct gendisk *disk;
    4087             : 
    4088           0 :         q = blk_mq_init_queue_data(set, queuedata);
    4089           0 :         if (IS_ERR(q))
    4090             :                 return ERR_CAST(q);
    4091             : 
    4092           0 :         disk = __alloc_disk_node(q, set->numa_node, lkclass);
    4093           0 :         if (!disk) {
    4094           0 :                 blk_mq_destroy_queue(q);
    4095           0 :                 blk_put_queue(q);
    4096           0 :                 return ERR_PTR(-ENOMEM);
    4097             :         }
    4098           0 :         set_bit(GD_OWNS_QUEUE, &disk->state);
    4099           0 :         return disk;
    4100             : }
    4101             : EXPORT_SYMBOL(__blk_mq_alloc_disk);
    4102             : 
    4103           0 : struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
    4104             :                 struct lock_class_key *lkclass)
    4105             : {
    4106             :         struct gendisk *disk;
    4107             : 
    4108           0 :         if (!blk_get_queue(q))
    4109             :                 return NULL;
    4110           0 :         disk = __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
    4111           0 :         if (!disk)
    4112           0 :                 blk_put_queue(q);
    4113             :         return disk;
    4114             : }
    4115             : EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
    4116             : 
    4117           0 : static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
    4118             :                 struct blk_mq_tag_set *set, struct request_queue *q,
    4119             :                 int hctx_idx, int node)
    4120             : {
    4121           0 :         struct blk_mq_hw_ctx *hctx = NULL, *tmp;
    4122             : 
    4123             :         /* reuse dead hctx first */
    4124           0 :         spin_lock(&q->unused_hctx_lock);
    4125           0 :         list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
    4126           0 :                 if (tmp->numa_node == node) {
    4127             :                         hctx = tmp;
    4128             :                         break;
    4129             :                 }
    4130             :         }
    4131           0 :         if (hctx)
    4132           0 :                 list_del_init(&hctx->hctx_list);
    4133           0 :         spin_unlock(&q->unused_hctx_lock);
    4134             : 
    4135           0 :         if (!hctx)
    4136           0 :                 hctx = blk_mq_alloc_hctx(q, set, node);
    4137           0 :         if (!hctx)
    4138             :                 goto fail;
    4139             : 
    4140           0 :         if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
    4141             :                 goto free_hctx;
    4142             : 
    4143             :         return hctx;
    4144             : 
    4145             :  free_hctx:
    4146           0 :         kobject_put(&hctx->kobj);
    4147             :  fail:
    4148             :         return NULL;
    4149             : }
    4150             : 
    4151           0 : static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
    4152             :                                                 struct request_queue *q)
    4153             : {
    4154             :         struct blk_mq_hw_ctx *hctx;
    4155             :         unsigned long i, j;
    4156             : 
    4157             :         /* protect against switching io scheduler  */
    4158           0 :         mutex_lock(&q->sysfs_lock);
    4159           0 :         for (i = 0; i < set->nr_hw_queues; i++) {
    4160             :                 int old_node;
    4161           0 :                 int node = blk_mq_get_hctx_node(set, i);
    4162           0 :                 struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i);
    4163             : 
    4164           0 :                 if (old_hctx) {
    4165           0 :                         old_node = old_hctx->numa_node;
    4166           0 :                         blk_mq_exit_hctx(q, set, old_hctx, i);
    4167             :                 }
    4168             : 
    4169           0 :                 if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) {
    4170           0 :                         if (!old_hctx)
    4171             :                                 break;
    4172           0 :                         pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n",
    4173             :                                         node, old_node);
    4174           0 :                         hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node);
    4175           0 :                         WARN_ON_ONCE(!hctx);
    4176             :                 }
    4177             :         }
    4178             :         /*
    4179             :          * Increasing nr_hw_queues fails. Free the newly allocated
    4180             :          * hctxs and keep the previous q->nr_hw_queues.
    4181             :          */
    4182           0 :         if (i != set->nr_hw_queues) {
    4183           0 :                 j = q->nr_hw_queues;
    4184             :         } else {
    4185           0 :                 j = i;
    4186           0 :                 q->nr_hw_queues = set->nr_hw_queues;
    4187             :         }
    4188             : 
    4189           0 :         xa_for_each_start(&q->hctx_table, j, hctx, j)
    4190           0 :                 blk_mq_exit_hctx(q, set, hctx, j);
    4191           0 :         mutex_unlock(&q->sysfs_lock);
    4192           0 : }
    4193             : 
    4194           0 : static void blk_mq_update_poll_flag(struct request_queue *q)
    4195             : {
    4196           0 :         struct blk_mq_tag_set *set = q->tag_set;
    4197             : 
    4198           0 :         if (set->nr_maps > HCTX_TYPE_POLL &&
    4199           0 :             set->map[HCTX_TYPE_POLL].nr_queues)
    4200           0 :                 blk_queue_flag_set(QUEUE_FLAG_POLL, q);
    4201             :         else
    4202           0 :                 blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
    4203           0 : }
    4204             : 
    4205           0 : int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
    4206             :                 struct request_queue *q)
    4207             : {
    4208             :         /* mark the queue as mq asap */
    4209           0 :         q->mq_ops = set->ops;
    4210             : 
    4211           0 :         q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
    4212             :                                              blk_mq_poll_stats_bkt,
    4213             :                                              BLK_MQ_POLL_STATS_BKTS, q);
    4214           0 :         if (!q->poll_cb)
    4215             :                 goto err_exit;
    4216             : 
    4217           0 :         if (blk_mq_alloc_ctxs(q))
    4218             :                 goto err_poll;
    4219             : 
    4220             :         /* init q->mq_kobj and sw queues' kobjects */
    4221           0 :         blk_mq_sysfs_init(q);
    4222             : 
    4223           0 :         INIT_LIST_HEAD(&q->unused_hctx_list);
    4224           0 :         spin_lock_init(&q->unused_hctx_lock);
    4225             : 
    4226           0 :         xa_init(&q->hctx_table);
    4227             : 
    4228           0 :         blk_mq_realloc_hw_ctxs(set, q);
    4229           0 :         if (!q->nr_hw_queues)
    4230             :                 goto err_hctxs;
    4231             : 
    4232           0 :         INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
    4233           0 :         blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
    4234             : 
    4235           0 :         q->tag_set = set;
    4236             : 
    4237           0 :         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
    4238           0 :         blk_mq_update_poll_flag(q);
    4239             : 
    4240           0 :         INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
    4241           0 :         INIT_LIST_HEAD(&q->requeue_list);
    4242           0 :         spin_lock_init(&q->requeue_lock);
    4243             : 
    4244           0 :         q->nr_requests = set->queue_depth;
    4245             : 
    4246             :         /*
    4247             :          * Default to classic polling
    4248             :          */
    4249           0 :         q->poll_nsec = BLK_MQ_POLL_CLASSIC;
    4250             : 
    4251           0 :         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
    4252           0 :         blk_mq_add_queue_tag_set(set, q);
    4253           0 :         blk_mq_map_swqueue(q);
    4254           0 :         return 0;
    4255             : 
    4256             : err_hctxs:
    4257           0 :         blk_mq_release(q);
    4258             : err_poll:
    4259           0 :         blk_stat_free_callback(q->poll_cb);
    4260           0 :         q->poll_cb = NULL;
    4261             : err_exit:
    4262           0 :         q->mq_ops = NULL;
    4263           0 :         return -ENOMEM;
    4264             : }
    4265             : EXPORT_SYMBOL(blk_mq_init_allocated_queue);
    4266             : 
    4267             : /* tags can _not_ be used after returning from blk_mq_exit_queue */
    4268           0 : void blk_mq_exit_queue(struct request_queue *q)
    4269             : {
    4270           0 :         struct blk_mq_tag_set *set = q->tag_set;
    4271             : 
    4272             :         /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
    4273           0 :         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
    4274             :         /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
    4275           0 :         blk_mq_del_queue_tag_set(q);
    4276           0 : }
    4277             : 
    4278           0 : static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
    4279             : {
    4280             :         int i;
    4281             : 
    4282           0 :         if (blk_mq_is_shared_tags(set->flags)) {
    4283           0 :                 set->shared_tags = blk_mq_alloc_map_and_rqs(set,
    4284             :                                                 BLK_MQ_NO_HCTX_IDX,
    4285             :                                                 set->queue_depth);
    4286           0 :                 if (!set->shared_tags)
    4287             :                         return -ENOMEM;
    4288             :         }
    4289             : 
    4290           0 :         for (i = 0; i < set->nr_hw_queues; i++) {
    4291           0 :                 if (!__blk_mq_alloc_map_and_rqs(set, i))
    4292             :                         goto out_unwind;
    4293           0 :                 cond_resched();
    4294             :         }
    4295             : 
    4296             :         return 0;
    4297             : 
    4298             : out_unwind:
    4299           0 :         while (--i >= 0)
    4300           0 :                 __blk_mq_free_map_and_rqs(set, i);
    4301             : 
    4302           0 :         if (blk_mq_is_shared_tags(set->flags)) {
    4303           0 :                 blk_mq_free_map_and_rqs(set, set->shared_tags,
    4304             :                                         BLK_MQ_NO_HCTX_IDX);
    4305             :         }
    4306             : 
    4307             :         return -ENOMEM;
    4308             : }
    4309             : 
    4310             : /*
    4311             :  * Allocate the request maps associated with this tag_set. Note that this
    4312             :  * may reduce the depth asked for, if memory is tight. set->queue_depth
    4313             :  * will be updated to reflect the allocated depth.
    4314             :  */
    4315           0 : static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
    4316             : {
    4317             :         unsigned int depth;
    4318             :         int err;
    4319             : 
    4320           0 :         depth = set->queue_depth;
    4321             :         do {
    4322           0 :                 err = __blk_mq_alloc_rq_maps(set);
    4323           0 :                 if (!err)
    4324             :                         break;
    4325             : 
    4326           0 :                 set->queue_depth >>= 1;
    4327           0 :                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
    4328             :                         err = -ENOMEM;
    4329             :                         break;
    4330             :                 }
    4331           0 :         } while (set->queue_depth);
    4332             : 
    4333           0 :         if (!set->queue_depth || err) {
    4334           0 :                 pr_err("blk-mq: failed to allocate request map\n");
    4335           0 :                 return -ENOMEM;
    4336             :         }
    4337             : 
    4338           0 :         if (depth != set->queue_depth)
    4339           0 :                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
    4340             :                                                 depth, set->queue_depth);
    4341             : 
    4342             :         return 0;
    4343             : }
    4344             : 
    4345           0 : static void blk_mq_update_queue_map(struct blk_mq_tag_set *set)
    4346             : {
    4347             :         /*
    4348             :          * blk_mq_map_queues() and multiple .map_queues() implementations
    4349             :          * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
    4350             :          * number of hardware queues.
    4351             :          */
    4352           0 :         if (set->nr_maps == 1)
    4353           0 :                 set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
    4354             : 
    4355           0 :         if (set->ops->map_queues && !is_kdump_kernel()) {
    4356             :                 int i;
    4357             : 
    4358             :                 /*
    4359             :                  * transport .map_queues is usually done in the following
    4360             :                  * way:
    4361             :                  *
    4362             :                  * for (queue = 0; queue < set->nr_hw_queues; queue++) {
    4363             :                  *      mask = get_cpu_mask(queue)
    4364             :                  *      for_each_cpu(cpu, mask)
    4365             :                  *              set->map[x].mq_map[cpu] = queue;
    4366             :                  * }
    4367             :                  *
    4368             :                  * When we need to remap, the table has to be cleared for
    4369             :                  * killing stale mapping since one CPU may not be mapped
    4370             :                  * to any hw queue.
    4371             :                  */
    4372           0 :                 for (i = 0; i < set->nr_maps; i++)
    4373           0 :                         blk_mq_clear_mq_map(&set->map[i]);
    4374             : 
    4375           0 :                 set->ops->map_queues(set);
    4376             :         } else {
    4377           0 :                 BUG_ON(set->nr_maps > 1);
    4378           0 :                 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
    4379             :         }
    4380           0 : }
    4381             : 
    4382           0 : static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
    4383             :                                        int new_nr_hw_queues)
    4384             : {
    4385             :         struct blk_mq_tags **new_tags;
    4386             : 
    4387           0 :         if (set->nr_hw_queues >= new_nr_hw_queues)
    4388             :                 goto done;
    4389             : 
    4390           0 :         new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
    4391             :                                 GFP_KERNEL, set->numa_node);
    4392           0 :         if (!new_tags)
    4393             :                 return -ENOMEM;
    4394             : 
    4395           0 :         if (set->tags)
    4396           0 :                 memcpy(new_tags, set->tags, set->nr_hw_queues *
    4397             :                        sizeof(*set->tags));
    4398           0 :         kfree(set->tags);
    4399           0 :         set->tags = new_tags;
    4400             : done:
    4401           0 :         set->nr_hw_queues = new_nr_hw_queues;
    4402           0 :         return 0;
    4403             : }
    4404             : 
    4405             : /*
    4406             :  * Alloc a tag set to be associated with one or more request queues.
    4407             :  * May fail with EINVAL for various error conditions. May adjust the
    4408             :  * requested depth down, if it's too large. In that case, the set
    4409             :  * value will be stored in set->queue_depth.
    4410             :  */
    4411           0 : int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
    4412             : {
    4413             :         int i, ret;
    4414             : 
    4415             :         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
    4416             : 
    4417           0 :         if (!set->nr_hw_queues)
    4418             :                 return -EINVAL;
    4419           0 :         if (!set->queue_depth)
    4420             :                 return -EINVAL;
    4421           0 :         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
    4422             :                 return -EINVAL;
    4423             : 
    4424           0 :         if (!set->ops->queue_rq)
    4425             :                 return -EINVAL;
    4426             : 
    4427           0 :         if (!set->ops->get_budget ^ !set->ops->put_budget)
    4428             :                 return -EINVAL;
    4429             : 
    4430           0 :         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
    4431           0 :                 pr_info("blk-mq: reduced tag depth to %u\n",
    4432             :                         BLK_MQ_MAX_DEPTH);
    4433           0 :                 set->queue_depth = BLK_MQ_MAX_DEPTH;
    4434             :         }
    4435             : 
    4436           0 :         if (!set->nr_maps)
    4437           0 :                 set->nr_maps = 1;
    4438           0 :         else if (set->nr_maps > HCTX_MAX_TYPES)
    4439             :                 return -EINVAL;
    4440             : 
    4441             :         /*
    4442             :          * If a crashdump is active, then we are potentially in a very
    4443             :          * memory constrained environment. Limit us to 1 queue and
    4444             :          * 64 tags to prevent using too much memory.
    4445             :          */
    4446             :         if (is_kdump_kernel()) {
    4447             :                 set->nr_hw_queues = 1;
    4448             :                 set->nr_maps = 1;
    4449             :                 set->queue_depth = min(64U, set->queue_depth);
    4450             :         }
    4451             :         /*
    4452             :          * There is no use for more h/w queues than cpus if we just have
    4453             :          * a single map
    4454             :          */
    4455           0 :         if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
    4456           0 :                 set->nr_hw_queues = nr_cpu_ids;
    4457             : 
    4458           0 :         if (set->flags & BLK_MQ_F_BLOCKING) {
    4459           0 :                 set->srcu = kmalloc(sizeof(*set->srcu), GFP_KERNEL);
    4460           0 :                 if (!set->srcu)
    4461             :                         return -ENOMEM;
    4462           0 :                 ret = init_srcu_struct(set->srcu);
    4463           0 :                 if (ret)
    4464             :                         goto out_free_srcu;
    4465             :         }
    4466             : 
    4467           0 :         ret = -ENOMEM;
    4468           0 :         set->tags = kcalloc_node(set->nr_hw_queues,
    4469             :                                  sizeof(struct blk_mq_tags *), GFP_KERNEL,
    4470             :                                  set->numa_node);
    4471           0 :         if (!set->tags)
    4472             :                 goto out_cleanup_srcu;
    4473             : 
    4474           0 :         for (i = 0; i < set->nr_maps; i++) {
    4475           0 :                 set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
    4476             :                                                   sizeof(set->map[i].mq_map[0]),
    4477             :                                                   GFP_KERNEL, set->numa_node);
    4478           0 :                 if (!set->map[i].mq_map)
    4479             :                         goto out_free_mq_map;
    4480           0 :                 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
    4481             :         }
    4482             : 
    4483           0 :         blk_mq_update_queue_map(set);
    4484             : 
    4485           0 :         ret = blk_mq_alloc_set_map_and_rqs(set);
    4486           0 :         if (ret)
    4487             :                 goto out_free_mq_map;
    4488             : 
    4489           0 :         mutex_init(&set->tag_list_lock);
    4490           0 :         INIT_LIST_HEAD(&set->tag_list);
    4491             : 
    4492           0 :         return 0;
    4493             : 
    4494             : out_free_mq_map:
    4495           0 :         for (i = 0; i < set->nr_maps; i++) {
    4496           0 :                 kfree(set->map[i].mq_map);
    4497           0 :                 set->map[i].mq_map = NULL;
    4498             :         }
    4499           0 :         kfree(set->tags);
    4500           0 :         set->tags = NULL;
    4501             : out_cleanup_srcu:
    4502           0 :         if (set->flags & BLK_MQ_F_BLOCKING)
    4503           0 :                 cleanup_srcu_struct(set->srcu);
    4504             : out_free_srcu:
    4505           0 :         if (set->flags & BLK_MQ_F_BLOCKING)
    4506           0 :                 kfree(set->srcu);
    4507             :         return ret;
    4508             : }
    4509             : EXPORT_SYMBOL(blk_mq_alloc_tag_set);
    4510             : 
    4511             : /* allocate and initialize a tagset for a simple single-queue device */
    4512           0 : int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
    4513             :                 const struct blk_mq_ops *ops, unsigned int queue_depth,
    4514             :                 unsigned int set_flags)
    4515             : {
    4516           0 :         memset(set, 0, sizeof(*set));
    4517           0 :         set->ops = ops;
    4518           0 :         set->nr_hw_queues = 1;
    4519           0 :         set->nr_maps = 1;
    4520           0 :         set->queue_depth = queue_depth;
    4521           0 :         set->numa_node = NUMA_NO_NODE;
    4522           0 :         set->flags = set_flags;
    4523           0 :         return blk_mq_alloc_tag_set(set);
    4524             : }
    4525             : EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set);
    4526             : 
    4527           0 : void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
    4528             : {
    4529             :         int i, j;
    4530             : 
    4531           0 :         for (i = 0; i < set->nr_hw_queues; i++)
    4532           0 :                 __blk_mq_free_map_and_rqs(set, i);
    4533             : 
    4534           0 :         if (blk_mq_is_shared_tags(set->flags)) {
    4535           0 :                 blk_mq_free_map_and_rqs(set, set->shared_tags,
    4536             :                                         BLK_MQ_NO_HCTX_IDX);
    4537             :         }
    4538             : 
    4539           0 :         for (j = 0; j < set->nr_maps; j++) {
    4540           0 :                 kfree(set->map[j].mq_map);
    4541           0 :                 set->map[j].mq_map = NULL;
    4542             :         }
    4543             : 
    4544           0 :         kfree(set->tags);
    4545           0 :         set->tags = NULL;
    4546           0 :         if (set->flags & BLK_MQ_F_BLOCKING) {
    4547           0 :                 cleanup_srcu_struct(set->srcu);
    4548           0 :                 kfree(set->srcu);
    4549             :         }
    4550           0 : }
    4551             : EXPORT_SYMBOL(blk_mq_free_tag_set);
    4552             : 
    4553           0 : int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
    4554             : {
    4555           0 :         struct blk_mq_tag_set *set = q->tag_set;
    4556             :         struct blk_mq_hw_ctx *hctx;
    4557             :         int ret;
    4558             :         unsigned long i;
    4559             : 
    4560           0 :         if (!set)
    4561             :                 return -EINVAL;
    4562             : 
    4563           0 :         if (q->nr_requests == nr)
    4564             :                 return 0;
    4565             : 
    4566           0 :         blk_mq_freeze_queue(q);
    4567           0 :         blk_mq_quiesce_queue(q);
    4568             : 
    4569           0 :         ret = 0;
    4570           0 :         queue_for_each_hw_ctx(q, hctx, i) {
    4571           0 :                 if (!hctx->tags)
    4572           0 :                         continue;
    4573             :                 /*
    4574             :                  * If we're using an MQ scheduler, just update the scheduler
    4575             :                  * queue depth. This is similar to what the old code would do.
    4576             :                  */
    4577           0 :                 if (hctx->sched_tags) {
    4578           0 :                         ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
    4579             :                                                       nr, true);
    4580             :                 } else {
    4581           0 :                         ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
    4582             :                                                       false);
    4583             :                 }
    4584           0 :                 if (ret)
    4585             :                         break;
    4586           0 :                 if (q->elevator && q->elevator->type->ops.depth_updated)
    4587           0 :                         q->elevator->type->ops.depth_updated(hctx);
    4588             :         }
    4589           0 :         if (!ret) {
    4590           0 :                 q->nr_requests = nr;
    4591           0 :                 if (blk_mq_is_shared_tags(set->flags)) {
    4592           0 :                         if (q->elevator)
    4593           0 :                                 blk_mq_tag_update_sched_shared_tags(q);
    4594             :                         else
    4595           0 :                                 blk_mq_tag_resize_shared_tags(set, nr);
    4596             :                 }
    4597             :         }
    4598             : 
    4599           0 :         blk_mq_unquiesce_queue(q);
    4600           0 :         blk_mq_unfreeze_queue(q);
    4601             : 
    4602           0 :         return ret;
    4603             : }
    4604             : 
    4605             : /*
    4606             :  * request_queue and elevator_type pair.
    4607             :  * It is just used by __blk_mq_update_nr_hw_queues to cache
    4608             :  * the elevator_type associated with a request_queue.
    4609             :  */
    4610             : struct blk_mq_qe_pair {
    4611             :         struct list_head node;
    4612             :         struct request_queue *q;
    4613             :         struct elevator_type *type;
    4614             : };
    4615             : 
    4616             : /*
    4617             :  * Cache the elevator_type in qe pair list and switch the
    4618             :  * io scheduler to 'none'
    4619             :  */
    4620           0 : static bool blk_mq_elv_switch_none(struct list_head *head,
    4621             :                 struct request_queue *q)
    4622             : {
    4623             :         struct blk_mq_qe_pair *qe;
    4624             : 
    4625           0 :         if (!q->elevator)
    4626             :                 return true;
    4627             : 
    4628           0 :         qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
    4629           0 :         if (!qe)
    4630             :                 return false;
    4631             : 
    4632             :         /* q->elevator needs protection from ->sysfs_lock */
    4633           0 :         mutex_lock(&q->sysfs_lock);
    4634             : 
    4635           0 :         INIT_LIST_HEAD(&qe->node);
    4636           0 :         qe->q = q;
    4637           0 :         qe->type = q->elevator->type;
    4638             :         /* keep a reference to the elevator module as we'll switch back */
    4639           0 :         __elevator_get(qe->type);
    4640           0 :         list_add(&qe->node, head);
    4641           0 :         elevator_disable(q);
    4642           0 :         mutex_unlock(&q->sysfs_lock);
    4643             : 
    4644           0 :         return true;
    4645             : }
    4646             : 
    4647             : static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head,
    4648             :                                                 struct request_queue *q)
    4649             : {
    4650             :         struct blk_mq_qe_pair *qe;
    4651             : 
    4652           0 :         list_for_each_entry(qe, head, node)
    4653           0 :                 if (qe->q == q)
    4654             :                         return qe;
    4655             : 
    4656             :         return NULL;
    4657             : }
    4658             : 
    4659           0 : static void blk_mq_elv_switch_back(struct list_head *head,
    4660             :                                   struct request_queue *q)
    4661             : {
    4662             :         struct blk_mq_qe_pair *qe;
    4663             :         struct elevator_type *t;
    4664             : 
    4665           0 :         qe = blk_lookup_qe_pair(head, q);
    4666           0 :         if (!qe)
    4667             :                 return;
    4668           0 :         t = qe->type;
    4669           0 :         list_del(&qe->node);
    4670           0 :         kfree(qe);
    4671             : 
    4672           0 :         mutex_lock(&q->sysfs_lock);
    4673           0 :         elevator_switch(q, t);
    4674             :         /* drop the reference acquired in blk_mq_elv_switch_none */
    4675           0 :         elevator_put(t);
    4676           0 :         mutex_unlock(&q->sysfs_lock);
    4677             : }
    4678             : 
    4679           0 : static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
    4680             :                                                         int nr_hw_queues)
    4681             : {
    4682             :         struct request_queue *q;
    4683           0 :         LIST_HEAD(head);
    4684             :         int prev_nr_hw_queues;
    4685             : 
    4686             :         lockdep_assert_held(&set->tag_list_lock);
    4687             : 
    4688           0 :         if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
    4689           0 :                 nr_hw_queues = nr_cpu_ids;
    4690           0 :         if (nr_hw_queues < 1)
    4691           0 :                 return;
    4692           0 :         if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
    4693             :                 return;
    4694             : 
    4695           0 :         list_for_each_entry(q, &set->tag_list, tag_set_list)
    4696           0 :                 blk_mq_freeze_queue(q);
    4697             :         /*
    4698             :          * Switch IO scheduler to 'none', cleaning up the data associated
    4699             :          * with the previous scheduler. We will switch back once we are done
    4700             :          * updating the new sw to hw queue mappings.
    4701             :          */
    4702           0 :         list_for_each_entry(q, &set->tag_list, tag_set_list)
    4703           0 :                 if (!blk_mq_elv_switch_none(&head, q))
    4704             :                         goto switch_back;
    4705             : 
    4706           0 :         list_for_each_entry(q, &set->tag_list, tag_set_list) {
    4707           0 :                 blk_mq_debugfs_unregister_hctxs(q);
    4708           0 :                 blk_mq_sysfs_unregister_hctxs(q);
    4709             :         }
    4710             : 
    4711           0 :         prev_nr_hw_queues = set->nr_hw_queues;
    4712           0 :         if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
    4713             :                 goto reregister;
    4714             : 
    4715             : fallback:
    4716           0 :         blk_mq_update_queue_map(set);
    4717           0 :         list_for_each_entry(q, &set->tag_list, tag_set_list) {
    4718           0 :                 blk_mq_realloc_hw_ctxs(set, q);
    4719           0 :                 blk_mq_update_poll_flag(q);
    4720           0 :                 if (q->nr_hw_queues != set->nr_hw_queues) {
    4721           0 :                         int i = prev_nr_hw_queues;
    4722             : 
    4723           0 :                         pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
    4724             :                                         nr_hw_queues, prev_nr_hw_queues);
    4725           0 :                         for (; i < set->nr_hw_queues; i++)
    4726           0 :                                 __blk_mq_free_map_and_rqs(set, i);
    4727             : 
    4728           0 :                         set->nr_hw_queues = prev_nr_hw_queues;
    4729           0 :                         blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
    4730           0 :                         goto fallback;
    4731             :                 }
    4732           0 :                 blk_mq_map_swqueue(q);
    4733             :         }
    4734             : 
    4735             : reregister:
    4736           0 :         list_for_each_entry(q, &set->tag_list, tag_set_list) {
    4737           0 :                 blk_mq_sysfs_register_hctxs(q);
    4738           0 :                 blk_mq_debugfs_register_hctxs(q);
    4739             :         }
    4740             : 
    4741             : switch_back:
    4742           0 :         list_for_each_entry(q, &set->tag_list, tag_set_list)
    4743           0 :                 blk_mq_elv_switch_back(&head, q);
    4744             : 
    4745           0 :         list_for_each_entry(q, &set->tag_list, tag_set_list)
    4746           0 :                 blk_mq_unfreeze_queue(q);
    4747             : }
    4748             : 
    4749           0 : void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
    4750             : {
    4751           0 :         mutex_lock(&set->tag_list_lock);
    4752           0 :         __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
    4753           0 :         mutex_unlock(&set->tag_list_lock);
    4754           0 : }
    4755             : EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
    4756             : 
    4757             : /* Enable polling stats and return whether they were already enabled. */
    4758             : static bool blk_poll_stats_enable(struct request_queue *q)
    4759             : {
    4760           0 :         if (q->poll_stat)
    4761             :                 return true;
    4762             : 
    4763           0 :         return blk_stats_alloc_enable(q);
    4764             : }
    4765             : 
    4766             : static void blk_mq_poll_stats_start(struct request_queue *q)
    4767             : {
    4768             :         /*
    4769             :          * We don't arm the callback if polling stats are not enabled or the
    4770             :          * callback is already active.
    4771             :          */
    4772           0 :         if (!q->poll_stat || blk_stat_is_active(q->poll_cb))
    4773             :                 return;
    4774             : 
    4775           0 :         blk_stat_activate_msecs(q->poll_cb, 100);
    4776             : }
    4777             : 
    4778           0 : static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
    4779             : {
    4780           0 :         struct request_queue *q = cb->data;
    4781             :         int bucket;
    4782             : 
    4783           0 :         for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
    4784           0 :                 if (cb->stat[bucket].nr_samples)
    4785           0 :                         q->poll_stat[bucket] = cb->stat[bucket];
    4786             :         }
    4787           0 : }
    4788             : 
    4789           0 : static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
    4790             :                                        struct request *rq)
    4791             : {
    4792           0 :         unsigned long ret = 0;
    4793             :         int bucket;
    4794             : 
    4795             :         /*
    4796             :          * If stats collection isn't on, don't sleep but turn it on for
    4797             :          * future users
    4798             :          */
    4799           0 :         if (!blk_poll_stats_enable(q))
    4800             :                 return 0;
    4801             : 
    4802             :         /*
    4803             :          * As an optimistic guess, use half of the mean service time
    4804             :          * for this type of request. We can (and should) make this smarter.
    4805             :          * For instance, if the completion latencies are tight, we can
    4806             :          * get closer than just half the mean. This is especially
    4807             :          * important on devices where the completion latencies are longer
    4808             :          * than ~10 usec. We do use the stats for the relevant IO size
    4809             :          * if available which does lead to better estimates.
    4810             :          */
    4811           0 :         bucket = blk_mq_poll_stats_bkt(rq);
    4812           0 :         if (bucket < 0)
    4813             :                 return ret;
    4814             : 
    4815           0 :         if (q->poll_stat[bucket].nr_samples)
    4816           0 :                 ret = (q->poll_stat[bucket].mean + 1) / 2;
    4817             : 
    4818             :         return ret;
    4819             : }
    4820             : 
    4821           0 : static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc)
    4822             : {
    4823           0 :         struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc);
    4824           0 :         struct request *rq = blk_qc_to_rq(hctx, qc);
    4825             :         struct hrtimer_sleeper hs;
    4826             :         enum hrtimer_mode mode;
    4827             :         unsigned int nsecs;
    4828             :         ktime_t kt;
    4829             : 
    4830             :         /*
    4831             :          * If a request has completed on queue that uses an I/O scheduler, we
    4832             :          * won't get back a request from blk_qc_to_rq.
    4833             :          */
    4834           0 :         if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT))
    4835             :                 return false;
    4836             : 
    4837             :         /*
    4838             :          * If we get here, hybrid polling is enabled. Hence poll_nsec can be:
    4839             :          *
    4840             :          *  0:  use half of prev avg
    4841             :          * >0:       use this specific value
    4842             :          */
    4843           0 :         if (q->poll_nsec > 0)
    4844           0 :                 nsecs = q->poll_nsec;
    4845             :         else
    4846           0 :                 nsecs = blk_mq_poll_nsecs(q, rq);
    4847             : 
    4848           0 :         if (!nsecs)
    4849             :                 return false;
    4850             : 
    4851           0 :         rq->rq_flags |= RQF_MQ_POLL_SLEPT;
    4852             : 
    4853             :         /*
    4854             :          * This will be replaced with the stats tracking code, using
    4855             :          * 'avg_completion_time / 2' as the pre-sleep target.
    4856             :          */
    4857           0 :         kt = nsecs;
    4858             : 
    4859           0 :         mode = HRTIMER_MODE_REL;
    4860           0 :         hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode);
    4861           0 :         hrtimer_set_expires(&hs.timer, kt);
    4862             : 
    4863             :         do {
    4864           0 :                 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
    4865             :                         break;
    4866           0 :                 set_current_state(TASK_UNINTERRUPTIBLE);
    4867           0 :                 hrtimer_sleeper_start_expires(&hs, mode);
    4868           0 :                 if (hs.task)
    4869           0 :                         io_schedule();
    4870           0 :                 hrtimer_cancel(&hs.timer);
    4871           0 :                 mode = HRTIMER_MODE_ABS;
    4872           0 :         } while (hs.task && !signal_pending(current));
    4873             : 
    4874           0 :         __set_current_state(TASK_RUNNING);
    4875           0 :         destroy_hrtimer_on_stack(&hs.timer);
    4876             : 
    4877             :         /*
    4878             :          * If we sleep, have the caller restart the poll loop to reset the
    4879             :          * state.  Like for the other success return cases, the caller is
    4880             :          * responsible for checking if the IO completed.  If the IO isn't
    4881             :          * complete, we'll get called again and will go straight to the busy
    4882             :          * poll loop.
    4883             :          */
    4884           0 :         return true;
    4885             : }
    4886             : 
    4887           0 : static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
    4888             :                                struct io_comp_batch *iob, unsigned int flags)
    4889             : {
    4890           0 :         struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
    4891           0 :         long state = get_current_state();
    4892             :         int ret;
    4893             : 
    4894             :         do {
    4895           0 :                 ret = q->mq_ops->poll(hctx, iob);
    4896           0 :                 if (ret > 0) {
    4897           0 :                         __set_current_state(TASK_RUNNING);
    4898           0 :                         return ret;
    4899             :                 }
    4900             : 
    4901           0 :                 if (signal_pending_state(state, current))
    4902           0 :                         __set_current_state(TASK_RUNNING);
    4903           0 :                 if (task_is_running(current))
    4904             :                         return 1;
    4905             : 
    4906           0 :                 if (ret < 0 || (flags & BLK_POLL_ONESHOT))
    4907             :                         break;
    4908             :                 cpu_relax();
    4909           0 :         } while (!need_resched());
    4910             : 
    4911           0 :         __set_current_state(TASK_RUNNING);
    4912           0 :         return 0;
    4913             : }
    4914             : 
    4915           0 : int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
    4916             :                 unsigned int flags)
    4917             : {
    4918           0 :         if (!(flags & BLK_POLL_NOSLEEP) &&
    4919           0 :             q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
    4920           0 :                 if (blk_mq_poll_hybrid(q, cookie))
    4921             :                         return 1;
    4922             :         }
    4923           0 :         return blk_mq_poll_classic(q, cookie, iob, flags);
    4924             : }
    4925             : 
    4926           0 : unsigned int blk_mq_rq_cpu(struct request *rq)
    4927             : {
    4928           0 :         return rq->mq_ctx->cpu;
    4929             : }
    4930             : EXPORT_SYMBOL(blk_mq_rq_cpu);
    4931             : 
    4932           0 : void blk_mq_cancel_work_sync(struct request_queue *q)
    4933             : {
    4934             :         struct blk_mq_hw_ctx *hctx;
    4935             :         unsigned long i;
    4936             : 
    4937           0 :         cancel_delayed_work_sync(&q->requeue_work);
    4938             : 
    4939           0 :         queue_for_each_hw_ctx(q, hctx, i)
    4940           0 :                 cancel_delayed_work_sync(&hctx->run_work);
    4941           0 : }
    4942             : 
    4943           1 : static int __init blk_mq_init(void)
    4944             : {
    4945             :         int i;
    4946             : 
    4947           2 :         for_each_possible_cpu(i)
    4948           2 :                 init_llist_head(&per_cpu(blk_cpu_done, i));
    4949           1 :         open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
    4950             : 
    4951           1 :         cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
    4952             :                                   "block/softirq:dead", NULL,
    4953             :                                   blk_softirq_cpu_dead);
    4954           1 :         cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
    4955             :                                 blk_mq_hctx_notify_dead);
    4956           1 :         cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
    4957             :                                 blk_mq_hctx_notify_online,
    4958             :                                 blk_mq_hctx_notify_offline);
    4959           1 :         return 0;
    4960             : }
    4961             : subsys_initcall(blk_mq_init);

Generated by: LCOV version 1.14