LCOV - code coverage report
Current view: top level - block - blk-mq-sched.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 208 0.0 %
Date: 2023-08-24 13:40:31 Functions: 0 17 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * blk-mq scheduling framework
       4             :  *
       5             :  * Copyright (C) 2016 Jens Axboe
       6             :  */
       7             : #include <linux/kernel.h>
       8             : #include <linux/module.h>
       9             : #include <linux/list_sort.h>
      10             : 
      11             : #include <trace/events/block.h>
      12             : 
      13             : #include "blk.h"
      14             : #include "blk-mq.h"
      15             : #include "blk-mq-debugfs.h"
      16             : #include "blk-mq-sched.h"
      17             : #include "blk-wbt.h"
      18             : 
      19             : /*
      20             :  * Mark a hardware queue as needing a restart.
      21             :  */
      22           0 : void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
      23             : {
      24           0 :         if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
      25             :                 return;
      26             : 
      27           0 :         set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
      28             : }
      29             : EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
      30             : 
      31           0 : void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
      32             : {
      33           0 :         clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
      34             : 
      35             :         /*
      36             :          * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
      37             :          * in blk_mq_run_hw_queue(). Its pair is the barrier in
      38             :          * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART,
      39             :          * meantime new request added to hctx->dispatch is missed to check in
      40             :          * blk_mq_run_hw_queue().
      41             :          */
      42           0 :         smp_mb();
      43             : 
      44           0 :         blk_mq_run_hw_queue(hctx, true);
      45           0 : }
      46             : 
      47           0 : static int sched_rq_cmp(void *priv, const struct list_head *a,
      48             :                         const struct list_head *b)
      49             : {
      50           0 :         struct request *rqa = container_of(a, struct request, queuelist);
      51           0 :         struct request *rqb = container_of(b, struct request, queuelist);
      52             : 
      53           0 :         return rqa->mq_hctx > rqb->mq_hctx;
      54             : }
      55             : 
      56           0 : static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list)
      57             : {
      58           0 :         struct blk_mq_hw_ctx *hctx =
      59           0 :                 list_first_entry(rq_list, struct request, queuelist)->mq_hctx;
      60             :         struct request *rq;
      61           0 :         LIST_HEAD(hctx_list);
      62           0 :         unsigned int count = 0;
      63             : 
      64           0 :         list_for_each_entry(rq, rq_list, queuelist) {
      65           0 :                 if (rq->mq_hctx != hctx) {
      66           0 :                         list_cut_before(&hctx_list, rq_list, &rq->queuelist);
      67             :                         goto dispatch;
      68             :                 }
      69           0 :                 count++;
      70             :         }
      71             :         list_splice_tail_init(rq_list, &hctx_list);
      72             : 
      73             : dispatch:
      74           0 :         return blk_mq_dispatch_rq_list(hctx, &hctx_list, count);
      75             : }
      76             : 
      77             : #define BLK_MQ_BUDGET_DELAY     3               /* ms units */
      78             : 
      79             : /*
      80             :  * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
      81             :  * its queue by itself in its completion handler, so we don't need to
      82             :  * restart queue if .get_budget() fails to get the budget.
      83             :  *
      84             :  * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
      85             :  * be run again.  This is necessary to avoid starving flushes.
      86             :  */
      87           0 : static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
      88             : {
      89           0 :         struct request_queue *q = hctx->queue;
      90           0 :         struct elevator_queue *e = q->elevator;
      91           0 :         bool multi_hctxs = false, run_queue = false;
      92           0 :         bool dispatched = false, busy = false;
      93             :         unsigned int max_dispatch;
      94           0 :         LIST_HEAD(rq_list);
      95           0 :         int count = 0;
      96             : 
      97           0 :         if (hctx->dispatch_busy)
      98             :                 max_dispatch = 1;
      99             :         else
     100           0 :                 max_dispatch = hctx->queue->nr_requests;
     101             : 
     102             :         do {
     103             :                 struct request *rq;
     104             :                 int budget_token;
     105             : 
     106           0 :                 if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
     107             :                         break;
     108             : 
     109           0 :                 if (!list_empty_careful(&hctx->dispatch)) {
     110             :                         busy = true;
     111             :                         break;
     112             :                 }
     113             : 
     114           0 :                 budget_token = blk_mq_get_dispatch_budget(q);
     115           0 :                 if (budget_token < 0)
     116             :                         break;
     117             : 
     118           0 :                 rq = e->type->ops.dispatch_request(hctx);
     119           0 :                 if (!rq) {
     120             :                         blk_mq_put_dispatch_budget(q, budget_token);
     121             :                         /*
     122             :                          * We're releasing without dispatching. Holding the
     123             :                          * budget could have blocked any "hctx"s with the
     124             :                          * same queue and if we didn't dispatch then there's
     125             :                          * no guarantee anyone will kick the queue.  Kick it
     126             :                          * ourselves.
     127             :                          */
     128             :                         run_queue = true;
     129             :                         break;
     130             :                 }
     131             : 
     132           0 :                 blk_mq_set_rq_budget_token(rq, budget_token);
     133             : 
     134             :                 /*
     135             :                  * Now this rq owns the budget which has to be released
     136             :                  * if this rq won't be queued to driver via .queue_rq()
     137             :                  * in blk_mq_dispatch_rq_list().
     138             :                  */
     139           0 :                 list_add_tail(&rq->queuelist, &rq_list);
     140           0 :                 count++;
     141           0 :                 if (rq->mq_hctx != hctx)
     142           0 :                         multi_hctxs = true;
     143             : 
     144             :                 /*
     145             :                  * If we cannot get tag for the request, stop dequeueing
     146             :                  * requests from the IO scheduler. We are unlikely to be able
     147             :                  * to submit them anyway and it creates false impression for
     148             :                  * scheduling heuristics that the device can take more IO.
     149             :                  */
     150           0 :                 if (!blk_mq_get_driver_tag(rq))
     151             :                         break;
     152           0 :         } while (count < max_dispatch);
     153             : 
     154           0 :         if (!count) {
     155           0 :                 if (run_queue)
     156           0 :                         blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
     157           0 :         } else if (multi_hctxs) {
     158             :                 /*
     159             :                  * Requests from different hctx may be dequeued from some
     160             :                  * schedulers, such as bfq and deadline.
     161             :                  *
     162             :                  * Sort the requests in the list according to their hctx,
     163             :                  * dispatch batching requests from same hctx at a time.
     164             :                  */
     165           0 :                 list_sort(NULL, &rq_list, sched_rq_cmp);
     166             :                 do {
     167           0 :                         dispatched |= blk_mq_dispatch_hctx_list(&rq_list);
     168           0 :                 } while (!list_empty(&rq_list));
     169             :         } else {
     170           0 :                 dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count);
     171             :         }
     172             : 
     173           0 :         if (busy)
     174             :                 return -EAGAIN;
     175           0 :         return !!dispatched;
     176             : }
     177             : 
     178           0 : static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
     179             : {
     180           0 :         unsigned long end = jiffies + HZ;
     181             :         int ret;
     182             : 
     183             :         do {
     184           0 :                 ret = __blk_mq_do_dispatch_sched(hctx);
     185           0 :                 if (ret != 1)
     186             :                         break;
     187           0 :                 if (need_resched() || time_is_before_jiffies(end)) {
     188           0 :                         blk_mq_delay_run_hw_queue(hctx, 0);
     189           0 :                         break;
     190             :                 }
     191             :         } while (1);
     192             : 
     193           0 :         return ret;
     194             : }
     195             : 
     196             : static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
     197             :                                           struct blk_mq_ctx *ctx)
     198             : {
     199           0 :         unsigned short idx = ctx->index_hw[hctx->type];
     200             : 
     201           0 :         if (++idx == hctx->nr_ctx)
     202           0 :                 idx = 0;
     203             : 
     204           0 :         return hctx->ctxs[idx];
     205             : }
     206             : 
     207             : /*
     208             :  * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
     209             :  * its queue by itself in its completion handler, so we don't need to
     210             :  * restart queue if .get_budget() fails to get the budget.
     211             :  *
     212             :  * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
     213             :  * be run again.  This is necessary to avoid starving flushes.
     214             :  */
     215           0 : static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
     216             : {
     217           0 :         struct request_queue *q = hctx->queue;
     218           0 :         LIST_HEAD(rq_list);
     219           0 :         struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
     220           0 :         int ret = 0;
     221             :         struct request *rq;
     222             : 
     223             :         do {
     224             :                 int budget_token;
     225             : 
     226           0 :                 if (!list_empty_careful(&hctx->dispatch)) {
     227             :                         ret = -EAGAIN;
     228             :                         break;
     229             :                 }
     230             : 
     231           0 :                 if (!sbitmap_any_bit_set(&hctx->ctx_map))
     232             :                         break;
     233             : 
     234           0 :                 budget_token = blk_mq_get_dispatch_budget(q);
     235           0 :                 if (budget_token < 0)
     236             :                         break;
     237             : 
     238           0 :                 rq = blk_mq_dequeue_from_ctx(hctx, ctx);
     239           0 :                 if (!rq) {
     240           0 :                         blk_mq_put_dispatch_budget(q, budget_token);
     241             :                         /*
     242             :                          * We're releasing without dispatching. Holding the
     243             :                          * budget could have blocked any "hctx"s with the
     244             :                          * same queue and if we didn't dispatch then there's
     245             :                          * no guarantee anyone will kick the queue.  Kick it
     246             :                          * ourselves.
     247             :                          */
     248           0 :                         blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
     249           0 :                         break;
     250             :                 }
     251             : 
     252           0 :                 blk_mq_set_rq_budget_token(rq, budget_token);
     253             : 
     254             :                 /*
     255             :                  * Now this rq owns the budget which has to be released
     256             :                  * if this rq won't be queued to driver via .queue_rq()
     257             :                  * in blk_mq_dispatch_rq_list().
     258             :                  */
     259           0 :                 list_add(&rq->queuelist, &rq_list);
     260             : 
     261             :                 /* round robin for fair dispatch */
     262           0 :                 ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
     263             : 
     264           0 :         } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1));
     265             : 
     266           0 :         WRITE_ONCE(hctx->dispatch_from, ctx);
     267           0 :         return ret;
     268             : }
     269             : 
     270           0 : static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
     271             : {
     272           0 :         bool need_dispatch = false;
     273           0 :         LIST_HEAD(rq_list);
     274             : 
     275             :         /*
     276             :          * If we have previous entries on our dispatch list, grab them first for
     277             :          * more fair dispatch.
     278             :          */
     279           0 :         if (!list_empty_careful(&hctx->dispatch)) {
     280           0 :                 spin_lock(&hctx->lock);
     281           0 :                 if (!list_empty(&hctx->dispatch))
     282           0 :                         list_splice_init(&hctx->dispatch, &rq_list);
     283           0 :                 spin_unlock(&hctx->lock);
     284             :         }
     285             : 
     286             :         /*
     287             :          * Only ask the scheduler for requests, if we didn't have residual
     288             :          * requests from the dispatch list. This is to avoid the case where
     289             :          * we only ever dispatch a fraction of the requests available because
     290             :          * of low device queue depth. Once we pull requests out of the IO
     291             :          * scheduler, we can no longer merge or sort them. So it's best to
     292             :          * leave them there for as long as we can. Mark the hw queue as
     293             :          * needing a restart in that case.
     294             :          *
     295             :          * We want to dispatch from the scheduler if there was nothing
     296             :          * on the dispatch list or we were able to dispatch from the
     297             :          * dispatch list.
     298             :          */
     299           0 :         if (!list_empty(&rq_list)) {
     300           0 :                 blk_mq_sched_mark_restart_hctx(hctx);
     301           0 :                 if (!blk_mq_dispatch_rq_list(hctx, &rq_list, 0))
     302             :                         return 0;
     303             :                 need_dispatch = true;
     304             :         } else {
     305           0 :                 need_dispatch = hctx->dispatch_busy;
     306             :         }
     307             : 
     308           0 :         if (hctx->queue->elevator)
     309           0 :                 return blk_mq_do_dispatch_sched(hctx);
     310             : 
     311             :         /* dequeue request one by one from sw queue if queue is busy */
     312           0 :         if (need_dispatch)
     313           0 :                 return blk_mq_do_dispatch_ctx(hctx);
     314           0 :         blk_mq_flush_busy_ctxs(hctx, &rq_list);
     315           0 :         blk_mq_dispatch_rq_list(hctx, &rq_list, 0);
     316           0 :         return 0;
     317             : }
     318             : 
     319           0 : void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
     320             : {
     321           0 :         struct request_queue *q = hctx->queue;
     322             : 
     323             :         /* RCU or SRCU read lock is needed before checking quiesced flag */
     324           0 :         if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
     325             :                 return;
     326             : 
     327           0 :         hctx->run++;
     328             : 
     329             :         /*
     330             :          * A return of -EAGAIN is an indication that hctx->dispatch is not
     331             :          * empty and we must run again in order to avoid starving flushes.
     332             :          */
     333           0 :         if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
     334           0 :                 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
     335           0 :                         blk_mq_run_hw_queue(hctx, true);
     336             :         }
     337             : }
     338             : 
     339           0 : bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
     340             :                 unsigned int nr_segs)
     341             : {
     342           0 :         struct elevator_queue *e = q->elevator;
     343             :         struct blk_mq_ctx *ctx;
     344             :         struct blk_mq_hw_ctx *hctx;
     345           0 :         bool ret = false;
     346             :         enum hctx_type type;
     347             : 
     348           0 :         if (e && e->type->ops.bio_merge) {
     349           0 :                 ret = e->type->ops.bio_merge(q, bio, nr_segs);
     350           0 :                 goto out_put;
     351             :         }
     352             : 
     353           0 :         ctx = blk_mq_get_ctx(q);
     354           0 :         hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
     355           0 :         type = hctx->type;
     356           0 :         if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
     357           0 :             list_empty_careful(&ctx->rq_lists[type]))
     358             :                 goto out_put;
     359             : 
     360             :         /* default per sw-queue merge */
     361           0 :         spin_lock(&ctx->lock);
     362             :         /*
     363             :          * Reverse check our software queue for entries that we could
     364             :          * potentially merge with. Currently includes a hand-wavy stop
     365             :          * count of 8, to not spend too much time checking for merges.
     366             :          */
     367           0 :         if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs))
     368           0 :                 ret = true;
     369             : 
     370           0 :         spin_unlock(&ctx->lock);
     371             : out_put:
     372           0 :         return ret;
     373             : }
     374             : 
     375           0 : bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
     376             :                                    struct list_head *free)
     377             : {
     378           0 :         return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free);
     379             : }
     380             : EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
     381             : 
     382           0 : static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
     383             :                                           struct blk_mq_hw_ctx *hctx,
     384             :                                           unsigned int hctx_idx)
     385             : {
     386           0 :         if (blk_mq_is_shared_tags(q->tag_set->flags)) {
     387           0 :                 hctx->sched_tags = q->sched_shared_tags;
     388             :                 return 0;
     389             :         }
     390             : 
     391           0 :         hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx,
     392           0 :                                                     q->nr_requests);
     393             : 
     394           0 :         if (!hctx->sched_tags)
     395             :                 return -ENOMEM;
     396             :         return 0;
     397             : }
     398             : 
     399             : static void blk_mq_exit_sched_shared_tags(struct request_queue *queue)
     400             : {
     401           0 :         blk_mq_free_rq_map(queue->sched_shared_tags);
     402           0 :         queue->sched_shared_tags = NULL;
     403             : }
     404             : 
     405             : /* called in queue's release handler, tagset has gone away */
     406           0 : static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags)
     407             : {
     408             :         struct blk_mq_hw_ctx *hctx;
     409             :         unsigned long i;
     410             : 
     411           0 :         queue_for_each_hw_ctx(q, hctx, i) {
     412           0 :                 if (hctx->sched_tags) {
     413           0 :                         if (!blk_mq_is_shared_tags(flags))
     414           0 :                                 blk_mq_free_rq_map(hctx->sched_tags);
     415           0 :                         hctx->sched_tags = NULL;
     416             :                 }
     417             :         }
     418             : 
     419           0 :         if (blk_mq_is_shared_tags(flags))
     420           0 :                 blk_mq_exit_sched_shared_tags(q);
     421           0 : }
     422             : 
     423           0 : static int blk_mq_init_sched_shared_tags(struct request_queue *queue)
     424             : {
     425           0 :         struct blk_mq_tag_set *set = queue->tag_set;
     426             : 
     427             :         /*
     428             :          * Set initial depth at max so that we don't need to reallocate for
     429             :          * updating nr_requests.
     430             :          */
     431           0 :         queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set,
     432             :                                                 BLK_MQ_NO_HCTX_IDX,
     433             :                                                 MAX_SCHED_RQ);
     434           0 :         if (!queue->sched_shared_tags)
     435             :                 return -ENOMEM;
     436             : 
     437           0 :         blk_mq_tag_update_sched_shared_tags(queue);
     438             : 
     439           0 :         return 0;
     440             : }
     441             : 
     442             : /* caller must have a reference to @e, will grab another one if successful */
     443           0 : int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
     444             : {
     445           0 :         unsigned int flags = q->tag_set->flags;
     446             :         struct blk_mq_hw_ctx *hctx;
     447             :         struct elevator_queue *eq;
     448             :         unsigned long i;
     449             :         int ret;
     450             : 
     451             :         /*
     452             :          * Default to double of smaller one between hw queue_depth and 128,
     453             :          * since we don't split into sync/async like the old code did.
     454             :          * Additionally, this is a per-hw queue depth.
     455             :          */
     456           0 :         q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
     457             :                                    BLKDEV_DEFAULT_RQ);
     458             : 
     459           0 :         if (blk_mq_is_shared_tags(flags)) {
     460           0 :                 ret = blk_mq_init_sched_shared_tags(q);
     461           0 :                 if (ret)
     462             :                         return ret;
     463             :         }
     464             : 
     465           0 :         queue_for_each_hw_ctx(q, hctx, i) {
     466           0 :                 ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i);
     467           0 :                 if (ret)
     468             :                         goto err_free_map_and_rqs;
     469             :         }
     470             : 
     471           0 :         ret = e->ops.init_sched(q, e);
     472           0 :         if (ret)
     473             :                 goto err_free_map_and_rqs;
     474             : 
     475           0 :         mutex_lock(&q->debugfs_mutex);
     476           0 :         blk_mq_debugfs_register_sched(q);
     477           0 :         mutex_unlock(&q->debugfs_mutex);
     478             : 
     479           0 :         queue_for_each_hw_ctx(q, hctx, i) {
     480           0 :                 if (e->ops.init_hctx) {
     481           0 :                         ret = e->ops.init_hctx(hctx, i);
     482           0 :                         if (ret) {
     483           0 :                                 eq = q->elevator;
     484           0 :                                 blk_mq_sched_free_rqs(q);
     485           0 :                                 blk_mq_exit_sched(q, eq);
     486           0 :                                 kobject_put(&eq->kobj);
     487           0 :                                 return ret;
     488             :                         }
     489             :                 }
     490           0 :                 mutex_lock(&q->debugfs_mutex);
     491           0 :                 blk_mq_debugfs_register_sched_hctx(q, hctx);
     492           0 :                 mutex_unlock(&q->debugfs_mutex);
     493             :         }
     494             : 
     495             :         return 0;
     496             : 
     497             : err_free_map_and_rqs:
     498           0 :         blk_mq_sched_free_rqs(q);
     499           0 :         blk_mq_sched_tags_teardown(q, flags);
     500             : 
     501           0 :         q->elevator = NULL;
     502           0 :         return ret;
     503             : }
     504             : 
     505             : /*
     506             :  * called in either blk_queue_cleanup or elevator_switch, tagset
     507             :  * is required for freeing requests
     508             :  */
     509           0 : void blk_mq_sched_free_rqs(struct request_queue *q)
     510             : {
     511             :         struct blk_mq_hw_ctx *hctx;
     512             :         unsigned long i;
     513             : 
     514           0 :         if (blk_mq_is_shared_tags(q->tag_set->flags)) {
     515           0 :                 blk_mq_free_rqs(q->tag_set, q->sched_shared_tags,
     516             :                                 BLK_MQ_NO_HCTX_IDX);
     517             :         } else {
     518           0 :                 queue_for_each_hw_ctx(q, hctx, i) {
     519           0 :                         if (hctx->sched_tags)
     520           0 :                                 blk_mq_free_rqs(q->tag_set,
     521             :                                                 hctx->sched_tags, i);
     522             :                 }
     523             :         }
     524           0 : }
     525             : 
     526           0 : void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
     527             : {
     528             :         struct blk_mq_hw_ctx *hctx;
     529             :         unsigned long i;
     530           0 :         unsigned int flags = 0;
     531             : 
     532           0 :         queue_for_each_hw_ctx(q, hctx, i) {
     533           0 :                 mutex_lock(&q->debugfs_mutex);
     534           0 :                 blk_mq_debugfs_unregister_sched_hctx(hctx);
     535           0 :                 mutex_unlock(&q->debugfs_mutex);
     536             : 
     537           0 :                 if (e->type->ops.exit_hctx && hctx->sched_data) {
     538           0 :                         e->type->ops.exit_hctx(hctx, i);
     539           0 :                         hctx->sched_data = NULL;
     540             :                 }
     541           0 :                 flags = hctx->flags;
     542             :         }
     543             : 
     544           0 :         mutex_lock(&q->debugfs_mutex);
     545           0 :         blk_mq_debugfs_unregister_sched(q);
     546           0 :         mutex_unlock(&q->debugfs_mutex);
     547             : 
     548           0 :         if (e->type->ops.exit_sched)
     549           0 :                 e->type->ops.exit_sched(e);
     550           0 :         blk_mq_sched_tags_teardown(q, flags);
     551           0 :         q->elevator = NULL;
     552           0 : }

Generated by: LCOV version 1.14