LCOV - code coverage report
Current view: top level - block - kyber-iosched.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 2 281 0.7 %
Date: 2023-03-27 20:00:47 Functions: 1 27 3.7 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * The Kyber I/O scheduler. Controls latency by throttling queue depths using
       4             :  * scalable techniques.
       5             :  *
       6             :  * Copyright (C) 2017 Facebook
       7             :  */
       8             : 
       9             : #include <linux/kernel.h>
      10             : #include <linux/blkdev.h>
      11             : #include <linux/blk-mq.h>
      12             : #include <linux/module.h>
      13             : #include <linux/sbitmap.h>
      14             : 
      15             : #include <trace/events/block.h>
      16             : 
      17             : #include "elevator.h"
      18             : #include "blk.h"
      19             : #include "blk-mq.h"
      20             : #include "blk-mq-debugfs.h"
      21             : #include "blk-mq-sched.h"
      22             : #include "blk-mq-tag.h"
      23             : 
      24             : #define CREATE_TRACE_POINTS
      25             : #include <trace/events/kyber.h>
      26             : 
      27             : /*
      28             :  * Scheduling domains: the device is divided into multiple domains based on the
      29             :  * request type.
      30             :  */
      31             : enum {
      32             :         KYBER_READ,
      33             :         KYBER_WRITE,
      34             :         KYBER_DISCARD,
      35             :         KYBER_OTHER,
      36             :         KYBER_NUM_DOMAINS,
      37             : };
      38             : 
      39             : static const char *kyber_domain_names[] = {
      40             :         [KYBER_READ] = "READ",
      41             :         [KYBER_WRITE] = "WRITE",
      42             :         [KYBER_DISCARD] = "DISCARD",
      43             :         [KYBER_OTHER] = "OTHER",
      44             : };
      45             : 
      46             : enum {
      47             :         /*
      48             :          * In order to prevent starvation of synchronous requests by a flood of
      49             :          * asynchronous requests, we reserve 25% of requests for synchronous
      50             :          * operations.
      51             :          */
      52             :         KYBER_ASYNC_PERCENT = 75,
      53             : };
      54             : 
      55             : /*
      56             :  * Maximum device-wide depth for each scheduling domain.
      57             :  *
      58             :  * Even for fast devices with lots of tags like NVMe, you can saturate the
      59             :  * device with only a fraction of the maximum possible queue depth. So, we cap
      60             :  * these to a reasonable value.
      61             :  */
      62             : static const unsigned int kyber_depth[] = {
      63             :         [KYBER_READ] = 256,
      64             :         [KYBER_WRITE] = 128,
      65             :         [KYBER_DISCARD] = 64,
      66             :         [KYBER_OTHER] = 16,
      67             : };
      68             : 
      69             : /*
      70             :  * Default latency targets for each scheduling domain.
      71             :  */
      72             : static const u64 kyber_latency_targets[] = {
      73             :         [KYBER_READ] = 2ULL * NSEC_PER_MSEC,
      74             :         [KYBER_WRITE] = 10ULL * NSEC_PER_MSEC,
      75             :         [KYBER_DISCARD] = 5ULL * NSEC_PER_SEC,
      76             : };
      77             : 
      78             : /*
      79             :  * Batch size (number of requests we'll dispatch in a row) for each scheduling
      80             :  * domain.
      81             :  */
      82             : static const unsigned int kyber_batch_size[] = {
      83             :         [KYBER_READ] = 16,
      84             :         [KYBER_WRITE] = 8,
      85             :         [KYBER_DISCARD] = 1,
      86             :         [KYBER_OTHER] = 1,
      87             : };
      88             : 
      89             : /*
      90             :  * Requests latencies are recorded in a histogram with buckets defined relative
      91             :  * to the target latency:
      92             :  *
      93             :  * <= 1/4 * target latency
      94             :  * <= 1/2 * target latency
      95             :  * <= 3/4 * target latency
      96             :  * <= target latency
      97             :  * <= 1 1/4 * target latency
      98             :  * <= 1 1/2 * target latency
      99             :  * <= 1 3/4 * target latency
     100             :  * > 1 3/4 * target latency
     101             :  */
     102             : enum {
     103             :         /*
     104             :          * The width of the latency histogram buckets is
     105             :          * 1 / (1 << KYBER_LATENCY_SHIFT) * target latency.
     106             :          */
     107             :         KYBER_LATENCY_SHIFT = 2,
     108             :         /*
     109             :          * The first (1 << KYBER_LATENCY_SHIFT) buckets are <= target latency,
     110             :          * thus, "good".
     111             :          */
     112             :         KYBER_GOOD_BUCKETS = 1 << KYBER_LATENCY_SHIFT,
     113             :         /* There are also (1 << KYBER_LATENCY_SHIFT) "bad" buckets. */
     114             :         KYBER_LATENCY_BUCKETS = 2 << KYBER_LATENCY_SHIFT,
     115             : };
     116             : 
     117             : /*
     118             :  * We measure both the total latency and the I/O latency (i.e., latency after
     119             :  * submitting to the device).
     120             :  */
     121             : enum {
     122             :         KYBER_TOTAL_LATENCY,
     123             :         KYBER_IO_LATENCY,
     124             : };
     125             : 
     126             : static const char *kyber_latency_type_names[] = {
     127             :         [KYBER_TOTAL_LATENCY] = "total",
     128             :         [KYBER_IO_LATENCY] = "I/O",
     129             : };
     130             : 
     131             : /*
     132             :  * Per-cpu latency histograms: total latency and I/O latency for each scheduling
     133             :  * domain except for KYBER_OTHER.
     134             :  */
     135             : struct kyber_cpu_latency {
     136             :         atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
     137             : };
     138             : 
     139             : /*
     140             :  * There is a same mapping between ctx & hctx and kcq & khd,
     141             :  * we use request->mq_ctx->index_hw to index the kcq in khd.
     142             :  */
     143             : struct kyber_ctx_queue {
     144             :         /*
     145             :          * Used to ensure operations on rq_list and kcq_map to be an atmoic one.
     146             :          * Also protect the rqs on rq_list when merge.
     147             :          */
     148             :         spinlock_t lock;
     149             :         struct list_head rq_list[KYBER_NUM_DOMAINS];
     150             : } ____cacheline_aligned_in_smp;
     151             : 
     152             : struct kyber_queue_data {
     153             :         struct request_queue *q;
     154             :         dev_t dev;
     155             : 
     156             :         /*
     157             :          * Each scheduling domain has a limited number of in-flight requests
     158             :          * device-wide, limited by these tokens.
     159             :          */
     160             :         struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS];
     161             : 
     162             :         /*
     163             :          * Async request percentage, converted to per-word depth for
     164             :          * sbitmap_get_shallow().
     165             :          */
     166             :         unsigned int async_depth;
     167             : 
     168             :         struct kyber_cpu_latency __percpu *cpu_latency;
     169             : 
     170             :         /* Timer for stats aggregation and adjusting domain tokens. */
     171             :         struct timer_list timer;
     172             : 
     173             :         unsigned int latency_buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
     174             : 
     175             :         unsigned long latency_timeout[KYBER_OTHER];
     176             : 
     177             :         int domain_p99[KYBER_OTHER];
     178             : 
     179             :         /* Target latencies in nanoseconds. */
     180             :         u64 latency_targets[KYBER_OTHER];
     181             : };
     182             : 
     183             : struct kyber_hctx_data {
     184             :         spinlock_t lock;
     185             :         struct list_head rqs[KYBER_NUM_DOMAINS];
     186             :         unsigned int cur_domain;
     187             :         unsigned int batching;
     188             :         struct kyber_ctx_queue *kcqs;
     189             :         struct sbitmap kcq_map[KYBER_NUM_DOMAINS];
     190             :         struct sbq_wait domain_wait[KYBER_NUM_DOMAINS];
     191             :         struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
     192             :         atomic_t wait_index[KYBER_NUM_DOMAINS];
     193             : };
     194             : 
     195             : static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
     196             :                              void *key);
     197             : 
     198             : static unsigned int kyber_sched_domain(blk_opf_t opf)
     199             : {
     200           0 :         switch (opf & REQ_OP_MASK) {
     201             :         case REQ_OP_READ:
     202             :                 return KYBER_READ;
     203             :         case REQ_OP_WRITE:
     204             :                 return KYBER_WRITE;
     205             :         case REQ_OP_DISCARD:
     206             :                 return KYBER_DISCARD;
     207             :         default:
     208             :                 return KYBER_OTHER;
     209             :         }
     210             : }
     211             : 
     212             : static void flush_latency_buckets(struct kyber_queue_data *kqd,
     213             :                                   struct kyber_cpu_latency *cpu_latency,
     214             :                                   unsigned int sched_domain, unsigned int type)
     215             : {
     216           0 :         unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
     217           0 :         atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type];
     218             :         unsigned int bucket;
     219             : 
     220           0 :         for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
     221           0 :                 buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0);
     222             : }
     223             : 
     224             : /*
     225             :  * Calculate the histogram bucket with the given percentile rank, or -1 if there
     226             :  * aren't enough samples yet.
     227             :  */
     228           0 : static int calculate_percentile(struct kyber_queue_data *kqd,
     229             :                                 unsigned int sched_domain, unsigned int type,
     230             :                                 unsigned int percentile)
     231             : {
     232           0 :         unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
     233           0 :         unsigned int bucket, samples = 0, percentile_samples;
     234             : 
     235           0 :         for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
     236           0 :                 samples += buckets[bucket];
     237             : 
     238           0 :         if (!samples)
     239             :                 return -1;
     240             : 
     241             :         /*
     242             :          * We do the calculation once we have 500 samples or one second passes
     243             :          * since the first sample was recorded, whichever comes first.
     244             :          */
     245           0 :         if (!kqd->latency_timeout[sched_domain])
     246           0 :                 kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL);
     247           0 :         if (samples < 500 &&
     248           0 :             time_is_after_jiffies(kqd->latency_timeout[sched_domain])) {
     249             :                 return -1;
     250             :         }
     251           0 :         kqd->latency_timeout[sched_domain] = 0;
     252             : 
     253           0 :         percentile_samples = DIV_ROUND_UP(samples * percentile, 100);
     254           0 :         for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS - 1; bucket++) {
     255           0 :                 if (buckets[bucket] >= percentile_samples)
     256             :                         break;
     257           0 :                 percentile_samples -= buckets[bucket];
     258             :         }
     259           0 :         memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
     260             : 
     261           0 :         trace_kyber_latency(kqd->dev, kyber_domain_names[sched_domain],
     262             :                             kyber_latency_type_names[type], percentile,
     263             :                             bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples);
     264             : 
     265           0 :         return bucket;
     266             : }
     267             : 
     268             : static void kyber_resize_domain(struct kyber_queue_data *kqd,
     269             :                                 unsigned int sched_domain, unsigned int depth)
     270             : {
     271           0 :         depth = clamp(depth, 1U, kyber_depth[sched_domain]);
     272           0 :         if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
     273           0 :                 sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
     274           0 :                 trace_kyber_adjust(kqd->dev, kyber_domain_names[sched_domain],
     275             :                                    depth);
     276             :         }
     277             : }
     278             : 
     279           0 : static void kyber_timer_fn(struct timer_list *t)
     280             : {
     281           0 :         struct kyber_queue_data *kqd = from_timer(kqd, t, timer);
     282             :         unsigned int sched_domain;
     283             :         int cpu;
     284           0 :         bool bad = false;
     285             : 
     286             :         /* Sum all of the per-cpu latency histograms. */
     287           0 :         for_each_online_cpu(cpu) {
     288             :                 struct kyber_cpu_latency *cpu_latency;
     289             : 
     290           0 :                 cpu_latency = per_cpu_ptr(kqd->cpu_latency, cpu);
     291           0 :                 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
     292           0 :                         flush_latency_buckets(kqd, cpu_latency, sched_domain,
     293             :                                               KYBER_TOTAL_LATENCY);
     294           0 :                         flush_latency_buckets(kqd, cpu_latency, sched_domain,
     295             :                                               KYBER_IO_LATENCY);
     296             :                 }
     297             :         }
     298             : 
     299             :         /*
     300             :          * Check if any domains have a high I/O latency, which might indicate
     301             :          * congestion in the device. Note that we use the p90; we don't want to
     302             :          * be too sensitive to outliers here.
     303             :          */
     304           0 :         for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
     305             :                 int p90;
     306             : 
     307           0 :                 p90 = calculate_percentile(kqd, sched_domain, KYBER_IO_LATENCY,
     308             :                                            90);
     309           0 :                 if (p90 >= KYBER_GOOD_BUCKETS)
     310           0 :                         bad = true;
     311             :         }
     312             : 
     313             :         /*
     314             :          * Adjust the scheduling domain depths. If we determined that there was
     315             :          * congestion, we throttle all domains with good latencies. Either way,
     316             :          * we ease up on throttling domains with bad latencies.
     317             :          */
     318           0 :         for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
     319             :                 unsigned int orig_depth, depth;
     320             :                 int p99;
     321             : 
     322           0 :                 p99 = calculate_percentile(kqd, sched_domain,
     323             :                                            KYBER_TOTAL_LATENCY, 99);
     324             :                 /*
     325             :                  * This is kind of subtle: different domains will not
     326             :                  * necessarily have enough samples to calculate the latency
     327             :                  * percentiles during the same window, so we have to remember
     328             :                  * the p99 for the next time we observe congestion; once we do,
     329             :                  * we don't want to throttle again until we get more data, so we
     330             :                  * reset it to -1.
     331             :                  */
     332           0 :                 if (bad) {
     333           0 :                         if (p99 < 0)
     334           0 :                                 p99 = kqd->domain_p99[sched_domain];
     335           0 :                         kqd->domain_p99[sched_domain] = -1;
     336           0 :                 } else if (p99 >= 0) {
     337           0 :                         kqd->domain_p99[sched_domain] = p99;
     338             :                 }
     339           0 :                 if (p99 < 0)
     340           0 :                         continue;
     341             : 
     342             :                 /*
     343             :                  * If this domain has bad latency, throttle less. Otherwise,
     344             :                  * throttle more iff we determined that there is congestion.
     345             :                  *
     346             :                  * The new depth is scaled linearly with the p99 latency vs the
     347             :                  * latency target. E.g., if the p99 is 3/4 of the target, then
     348             :                  * we throttle down to 3/4 of the current depth, and if the p99
     349             :                  * is 2x the target, then we double the depth.
     350             :                  */
     351           0 :                 if (bad || p99 >= KYBER_GOOD_BUCKETS) {
     352           0 :                         orig_depth = kqd->domain_tokens[sched_domain].sb.depth;
     353           0 :                         depth = (orig_depth * (p99 + 1)) >> KYBER_LATENCY_SHIFT;
     354             :                         kyber_resize_domain(kqd, sched_domain, depth);
     355             :                 }
     356             :         }
     357           0 : }
     358             : 
     359           0 : static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
     360             : {
     361             :         struct kyber_queue_data *kqd;
     362           0 :         int ret = -ENOMEM;
     363             :         int i;
     364             : 
     365           0 :         kqd = kzalloc_node(sizeof(*kqd), GFP_KERNEL, q->node);
     366           0 :         if (!kqd)
     367             :                 goto err;
     368             : 
     369           0 :         kqd->q = q;
     370           0 :         kqd->dev = disk_devt(q->disk);
     371             : 
     372           0 :         kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency,
     373             :                                             GFP_KERNEL | __GFP_ZERO);
     374           0 :         if (!kqd->cpu_latency)
     375             :                 goto err_kqd;
     376             : 
     377           0 :         timer_setup(&kqd->timer, kyber_timer_fn, 0);
     378             : 
     379           0 :         for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
     380           0 :                 WARN_ON(!kyber_depth[i]);
     381           0 :                 WARN_ON(!kyber_batch_size[i]);
     382           0 :                 ret = sbitmap_queue_init_node(&kqd->domain_tokens[i],
     383             :                                               kyber_depth[i], -1, false,
     384             :                                               GFP_KERNEL, q->node);
     385           0 :                 if (ret) {
     386           0 :                         while (--i >= 0)
     387           0 :                                 sbitmap_queue_free(&kqd->domain_tokens[i]);
     388             :                         goto err_buckets;
     389             :                 }
     390             :         }
     391             : 
     392           0 :         for (i = 0; i < KYBER_OTHER; i++) {
     393           0 :                 kqd->domain_p99[i] = -1;
     394           0 :                 kqd->latency_targets[i] = kyber_latency_targets[i];
     395             :         }
     396             : 
     397             :         return kqd;
     398             : 
     399             : err_buckets:
     400           0 :         free_percpu(kqd->cpu_latency);
     401             : err_kqd:
     402           0 :         kfree(kqd);
     403             : err:
     404           0 :         return ERR_PTR(ret);
     405             : }
     406             : 
     407           0 : static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
     408             : {
     409             :         struct kyber_queue_data *kqd;
     410             :         struct elevator_queue *eq;
     411             : 
     412           0 :         eq = elevator_alloc(q, e);
     413           0 :         if (!eq)
     414             :                 return -ENOMEM;
     415             : 
     416           0 :         kqd = kyber_queue_data_alloc(q);
     417           0 :         if (IS_ERR(kqd)) {
     418           0 :                 kobject_put(&eq->kobj);
     419           0 :                 return PTR_ERR(kqd);
     420             :         }
     421             : 
     422           0 :         blk_stat_enable_accounting(q);
     423             : 
     424           0 :         blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
     425             : 
     426           0 :         eq->elevator_data = kqd;
     427           0 :         q->elevator = eq;
     428             : 
     429           0 :         return 0;
     430             : }
     431             : 
     432           0 : static void kyber_exit_sched(struct elevator_queue *e)
     433             : {
     434           0 :         struct kyber_queue_data *kqd = e->elevator_data;
     435             :         int i;
     436             : 
     437           0 :         timer_shutdown_sync(&kqd->timer);
     438           0 :         blk_stat_disable_accounting(kqd->q);
     439             : 
     440           0 :         for (i = 0; i < KYBER_NUM_DOMAINS; i++)
     441           0 :                 sbitmap_queue_free(&kqd->domain_tokens[i]);
     442           0 :         free_percpu(kqd->cpu_latency);
     443           0 :         kfree(kqd);
     444           0 : }
     445             : 
     446             : static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq)
     447             : {
     448             :         unsigned int i;
     449             : 
     450           0 :         spin_lock_init(&kcq->lock);
     451           0 :         for (i = 0; i < KYBER_NUM_DOMAINS; i++)
     452           0 :                 INIT_LIST_HEAD(&kcq->rq_list[i]);
     453             : }
     454             : 
     455           0 : static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx)
     456             : {
     457           0 :         struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
     458           0 :         struct blk_mq_tags *tags = hctx->sched_tags;
     459           0 :         unsigned int shift = tags->bitmap_tags.sb.shift;
     460             : 
     461           0 :         kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
     462             : 
     463           0 :         sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, kqd->async_depth);
     464           0 : }
     465             : 
     466           0 : static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
     467             : {
     468             :         struct kyber_hctx_data *khd;
     469             :         int i;
     470             : 
     471           0 :         khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node);
     472           0 :         if (!khd)
     473             :                 return -ENOMEM;
     474             : 
     475           0 :         khd->kcqs = kmalloc_array_node(hctx->nr_ctx,
     476             :                                        sizeof(struct kyber_ctx_queue),
     477           0 :                                        GFP_KERNEL, hctx->numa_node);
     478           0 :         if (!khd->kcqs)
     479             :                 goto err_khd;
     480             : 
     481           0 :         for (i = 0; i < hctx->nr_ctx; i++)
     482           0 :                 kyber_ctx_queue_init(&khd->kcqs[i]);
     483             : 
     484           0 :         for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
     485           0 :                 if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx,
     486           0 :                                       ilog2(8), GFP_KERNEL, hctx->numa_node,
     487             :                                       false, false)) {
     488           0 :                         while (--i >= 0)
     489           0 :                                 sbitmap_free(&khd->kcq_map[i]);
     490             :                         goto err_kcqs;
     491             :                 }
     492             :         }
     493             : 
     494             :         spin_lock_init(&khd->lock);
     495             : 
     496           0 :         for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
     497           0 :                 INIT_LIST_HEAD(&khd->rqs[i]);
     498           0 :                 khd->domain_wait[i].sbq = NULL;
     499           0 :                 init_waitqueue_func_entry(&khd->domain_wait[i].wait,
     500             :                                           kyber_domain_wake);
     501           0 :                 khd->domain_wait[i].wait.private = hctx;
     502           0 :                 INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry);
     503           0 :                 atomic_set(&khd->wait_index[i], 0);
     504             :         }
     505             : 
     506           0 :         khd->cur_domain = 0;
     507           0 :         khd->batching = 0;
     508             : 
     509           0 :         hctx->sched_data = khd;
     510           0 :         kyber_depth_updated(hctx);
     511             : 
     512           0 :         return 0;
     513             : 
     514             : err_kcqs:
     515           0 :         kfree(khd->kcqs);
     516             : err_khd:
     517           0 :         kfree(khd);
     518           0 :         return -ENOMEM;
     519             : }
     520             : 
     521           0 : static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
     522             : {
     523           0 :         struct kyber_hctx_data *khd = hctx->sched_data;
     524             :         int i;
     525             : 
     526           0 :         for (i = 0; i < KYBER_NUM_DOMAINS; i++)
     527           0 :                 sbitmap_free(&khd->kcq_map[i]);
     528           0 :         kfree(khd->kcqs);
     529           0 :         kfree(hctx->sched_data);
     530           0 : }
     531             : 
     532             : static int rq_get_domain_token(struct request *rq)
     533             : {
     534           0 :         return (long)rq->elv.priv[0];
     535             : }
     536             : 
     537             : static void rq_set_domain_token(struct request *rq, int token)
     538             : {
     539           0 :         rq->elv.priv[0] = (void *)(long)token;
     540             : }
     541             : 
     542           0 : static void rq_clear_domain_token(struct kyber_queue_data *kqd,
     543             :                                   struct request *rq)
     544             : {
     545             :         unsigned int sched_domain;
     546             :         int nr;
     547             : 
     548           0 :         nr = rq_get_domain_token(rq);
     549           0 :         if (nr != -1) {
     550           0 :                 sched_domain = kyber_sched_domain(rq->cmd_flags);
     551           0 :                 sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr,
     552           0 :                                     rq->mq_ctx->cpu);
     553             :         }
     554           0 : }
     555             : 
     556           0 : static void kyber_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
     557             : {
     558             :         /*
     559             :          * We use the scheduler tags as per-hardware queue queueing tokens.
     560             :          * Async requests can be limited at this stage.
     561             :          */
     562           0 :         if (!op_is_sync(opf)) {
     563           0 :                 struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
     564             : 
     565           0 :                 data->shallow_depth = kqd->async_depth;
     566             :         }
     567           0 : }
     568             : 
     569           0 : static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
     570             :                 unsigned int nr_segs)
     571             : {
     572           0 :         struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
     573           0 :         struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
     574           0 :         struct kyber_hctx_data *khd = hctx->sched_data;
     575           0 :         struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
     576           0 :         unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
     577           0 :         struct list_head *rq_list = &kcq->rq_list[sched_domain];
     578             :         bool merged;
     579             : 
     580           0 :         spin_lock(&kcq->lock);
     581           0 :         merged = blk_bio_list_merge(hctx->queue, rq_list, bio, nr_segs);
     582           0 :         spin_unlock(&kcq->lock);
     583             : 
     584           0 :         return merged;
     585             : }
     586             : 
     587           0 : static void kyber_prepare_request(struct request *rq)
     588             : {
     589           0 :         rq_set_domain_token(rq, -1);
     590           0 : }
     591             : 
     592           0 : static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
     593             :                                   struct list_head *rq_list, bool at_head)
     594             : {
     595           0 :         struct kyber_hctx_data *khd = hctx->sched_data;
     596             :         struct request *rq, *next;
     597             : 
     598           0 :         list_for_each_entry_safe(rq, next, rq_list, queuelist) {
     599           0 :                 unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags);
     600           0 :                 struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]];
     601           0 :                 struct list_head *head = &kcq->rq_list[sched_domain];
     602             : 
     603           0 :                 spin_lock(&kcq->lock);
     604           0 :                 trace_block_rq_insert(rq);
     605           0 :                 if (at_head)
     606           0 :                         list_move(&rq->queuelist, head);
     607             :                 else
     608           0 :                         list_move_tail(&rq->queuelist, head);
     609           0 :                 sbitmap_set_bit(&khd->kcq_map[sched_domain],
     610           0 :                                 rq->mq_ctx->index_hw[hctx->type]);
     611           0 :                 spin_unlock(&kcq->lock);
     612             :         }
     613           0 : }
     614             : 
     615           0 : static void kyber_finish_request(struct request *rq)
     616             : {
     617           0 :         struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
     618             : 
     619           0 :         rq_clear_domain_token(kqd, rq);
     620           0 : }
     621             : 
     622             : static void add_latency_sample(struct kyber_cpu_latency *cpu_latency,
     623             :                                unsigned int sched_domain, unsigned int type,
     624             :                                u64 target, u64 latency)
     625             : {
     626             :         unsigned int bucket;
     627             :         u64 divisor;
     628             : 
     629           0 :         if (latency > 0) {
     630           0 :                 divisor = max_t(u64, target >> KYBER_LATENCY_SHIFT, 1);
     631           0 :                 bucket = min_t(unsigned int, div64_u64(latency - 1, divisor),
     632             :                                KYBER_LATENCY_BUCKETS - 1);
     633             :         } else {
     634             :                 bucket = 0;
     635             :         }
     636             : 
     637           0 :         atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]);
     638             : }
     639             : 
     640           0 : static void kyber_completed_request(struct request *rq, u64 now)
     641             : {
     642           0 :         struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
     643             :         struct kyber_cpu_latency *cpu_latency;
     644             :         unsigned int sched_domain;
     645             :         u64 target;
     646             : 
     647           0 :         sched_domain = kyber_sched_domain(rq->cmd_flags);
     648           0 :         if (sched_domain == KYBER_OTHER)
     649             :                 return;
     650             : 
     651           0 :         cpu_latency = get_cpu_ptr(kqd->cpu_latency);
     652           0 :         target = kqd->latency_targets[sched_domain];
     653           0 :         add_latency_sample(cpu_latency, sched_domain, KYBER_TOTAL_LATENCY,
     654           0 :                            target, now - rq->start_time_ns);
     655           0 :         add_latency_sample(cpu_latency, sched_domain, KYBER_IO_LATENCY, target,
     656           0 :                            now - rq->io_start_time_ns);
     657           0 :         put_cpu_ptr(kqd->cpu_latency);
     658             : 
     659           0 :         timer_reduce(&kqd->timer, jiffies + HZ / 10);
     660             : }
     661             : 
     662             : struct flush_kcq_data {
     663             :         struct kyber_hctx_data *khd;
     664             :         unsigned int sched_domain;
     665             :         struct list_head *list;
     666             : };
     667             : 
     668           0 : static bool flush_busy_kcq(struct sbitmap *sb, unsigned int bitnr, void *data)
     669             : {
     670           0 :         struct flush_kcq_data *flush_data = data;
     671           0 :         struct kyber_ctx_queue *kcq = &flush_data->khd->kcqs[bitnr];
     672             : 
     673           0 :         spin_lock(&kcq->lock);
     674           0 :         list_splice_tail_init(&kcq->rq_list[flush_data->sched_domain],
     675             :                               flush_data->list);
     676           0 :         sbitmap_clear_bit(sb, bitnr);
     677           0 :         spin_unlock(&kcq->lock);
     678             : 
     679           0 :         return true;
     680             : }
     681             : 
     682             : static void kyber_flush_busy_kcqs(struct kyber_hctx_data *khd,
     683             :                                   unsigned int sched_domain,
     684             :                                   struct list_head *list)
     685             : {
     686           0 :         struct flush_kcq_data data = {
     687             :                 .khd = khd,
     688             :                 .sched_domain = sched_domain,
     689             :                 .list = list,
     690             :         };
     691             : 
     692           0 :         sbitmap_for_each_set(&khd->kcq_map[sched_domain],
     693             :                              flush_busy_kcq, &data);
     694             : }
     695             : 
     696           0 : static int kyber_domain_wake(wait_queue_entry_t *wqe, unsigned mode, int flags,
     697             :                              void *key)
     698             : {
     699           0 :         struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private);
     700           0 :         struct sbq_wait *wait = container_of(wqe, struct sbq_wait, wait);
     701             : 
     702           0 :         sbitmap_del_wait_queue(wait);
     703           0 :         blk_mq_run_hw_queue(hctx, true);
     704           0 :         return 1;
     705             : }
     706             : 
     707           0 : static int kyber_get_domain_token(struct kyber_queue_data *kqd,
     708             :                                   struct kyber_hctx_data *khd,
     709             :                                   struct blk_mq_hw_ctx *hctx)
     710             : {
     711           0 :         unsigned int sched_domain = khd->cur_domain;
     712           0 :         struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
     713           0 :         struct sbq_wait *wait = &khd->domain_wait[sched_domain];
     714             :         struct sbq_wait_state *ws;
     715             :         int nr;
     716             : 
     717           0 :         nr = __sbitmap_queue_get(domain_tokens);
     718             : 
     719             :         /*
     720             :          * If we failed to get a domain token, make sure the hardware queue is
     721             :          * run when one becomes available. Note that this is serialized on
     722             :          * khd->lock, but we still need to be careful about the waker.
     723             :          */
     724           0 :         if (nr < 0 && list_empty_careful(&wait->wait.entry)) {
     725           0 :                 ws = sbq_wait_ptr(domain_tokens,
     726             :                                   &khd->wait_index[sched_domain]);
     727           0 :                 khd->domain_ws[sched_domain] = ws;
     728           0 :                 sbitmap_add_wait_queue(domain_tokens, ws, wait);
     729             : 
     730             :                 /*
     731             :                  * Try again in case a token was freed before we got on the wait
     732             :                  * queue.
     733             :                  */
     734           0 :                 nr = __sbitmap_queue_get(domain_tokens);
     735             :         }
     736             : 
     737             :         /*
     738             :          * If we got a token while we were on the wait queue, remove ourselves
     739             :          * from the wait queue to ensure that all wake ups make forward
     740             :          * progress. It's possible that the waker already deleted the entry
     741             :          * between the !list_empty_careful() check and us grabbing the lock, but
     742             :          * list_del_init() is okay with that.
     743             :          */
     744           0 :         if (nr >= 0 && !list_empty_careful(&wait->wait.entry)) {
     745           0 :                 ws = khd->domain_ws[sched_domain];
     746           0 :                 spin_lock_irq(&ws->wait.lock);
     747           0 :                 sbitmap_del_wait_queue(wait);
     748           0 :                 spin_unlock_irq(&ws->wait.lock);
     749             :         }
     750             : 
     751           0 :         return nr;
     752             : }
     753             : 
     754             : static struct request *
     755           0 : kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
     756             :                           struct kyber_hctx_data *khd,
     757             :                           struct blk_mq_hw_ctx *hctx)
     758             : {
     759             :         struct list_head *rqs;
     760             :         struct request *rq;
     761             :         int nr;
     762             : 
     763           0 :         rqs = &khd->rqs[khd->cur_domain];
     764             : 
     765             :         /*
     766             :          * If we already have a flushed request, then we just need to get a
     767             :          * token for it. Otherwise, if there are pending requests in the kcqs,
     768             :          * flush the kcqs, but only if we can get a token. If not, we should
     769             :          * leave the requests in the kcqs so that they can be merged. Note that
     770             :          * khd->lock serializes the flushes, so if we observed any bit set in
     771             :          * the kcq_map, we will always get a request.
     772             :          */
     773           0 :         rq = list_first_entry_or_null(rqs, struct request, queuelist);
     774           0 :         if (rq) {
     775           0 :                 nr = kyber_get_domain_token(kqd, khd, hctx);
     776           0 :                 if (nr >= 0) {
     777           0 :                         khd->batching++;
     778           0 :                         rq_set_domain_token(rq, nr);
     779           0 :                         list_del_init(&rq->queuelist);
     780             :                         return rq;
     781             :                 } else {
     782             :                         trace_kyber_throttled(kqd->dev,
     783             :                                               kyber_domain_names[khd->cur_domain]);
     784             :                 }
     785           0 :         } else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) {
     786           0 :                 nr = kyber_get_domain_token(kqd, khd, hctx);
     787           0 :                 if (nr >= 0) {
     788           0 :                         kyber_flush_busy_kcqs(khd, khd->cur_domain, rqs);
     789           0 :                         rq = list_first_entry(rqs, struct request, queuelist);
     790           0 :                         khd->batching++;
     791           0 :                         rq_set_domain_token(rq, nr);
     792           0 :                         list_del_init(&rq->queuelist);
     793             :                         return rq;
     794             :                 } else {
     795             :                         trace_kyber_throttled(kqd->dev,
     796             :                                               kyber_domain_names[khd->cur_domain]);
     797             :                 }
     798             :         }
     799             : 
     800             :         /* There were either no pending requests or no tokens. */
     801             :         return NULL;
     802             : }
     803             : 
     804           0 : static struct request *kyber_dispatch_request(struct blk_mq_hw_ctx *hctx)
     805             : {
     806           0 :         struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
     807           0 :         struct kyber_hctx_data *khd = hctx->sched_data;
     808             :         struct request *rq;
     809             :         int i;
     810             : 
     811           0 :         spin_lock(&khd->lock);
     812             : 
     813             :         /*
     814             :          * First, if we are still entitled to batch, try to dispatch a request
     815             :          * from the batch.
     816             :          */
     817           0 :         if (khd->batching < kyber_batch_size[khd->cur_domain]) {
     818           0 :                 rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
     819           0 :                 if (rq)
     820             :                         goto out;
     821             :         }
     822             : 
     823             :         /*
     824             :          * Either,
     825             :          * 1. We were no longer entitled to a batch.
     826             :          * 2. The domain we were batching didn't have any requests.
     827             :          * 3. The domain we were batching was out of tokens.
     828             :          *
     829             :          * Start another batch. Note that this wraps back around to the original
     830             :          * domain if no other domains have requests or tokens.
     831             :          */
     832           0 :         khd->batching = 0;
     833           0 :         for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
     834           0 :                 if (khd->cur_domain == KYBER_NUM_DOMAINS - 1)
     835           0 :                         khd->cur_domain = 0;
     836             :                 else
     837           0 :                         khd->cur_domain++;
     838             : 
     839           0 :                 rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
     840           0 :                 if (rq)
     841             :                         goto out;
     842             :         }
     843             : 
     844             :         rq = NULL;
     845             : out:
     846           0 :         spin_unlock(&khd->lock);
     847           0 :         return rq;
     848             : }
     849             : 
     850           0 : static bool kyber_has_work(struct blk_mq_hw_ctx *hctx)
     851             : {
     852           0 :         struct kyber_hctx_data *khd = hctx->sched_data;
     853             :         int i;
     854             : 
     855           0 :         for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
     856           0 :                 if (!list_empty_careful(&khd->rqs[i]) ||
     857           0 :                     sbitmap_any_bit_set(&khd->kcq_map[i]))
     858             :                         return true;
     859             :         }
     860             : 
     861             :         return false;
     862             : }
     863             : 
     864             : #define KYBER_LAT_SHOW_STORE(domain, name)                              \
     865             : static ssize_t kyber_##name##_lat_show(struct elevator_queue *e,        \
     866             :                                        char *page)                      \
     867             : {                                                                       \
     868             :         struct kyber_queue_data *kqd = e->elevator_data;             \
     869             :                                                                         \
     870             :         return sprintf(page, "%llu\n", kqd->latency_targets[domain]);      \
     871             : }                                                                       \
     872             :                                                                         \
     873             : static ssize_t kyber_##name##_lat_store(struct elevator_queue *e,       \
     874             :                                         const char *page, size_t count) \
     875             : {                                                                       \
     876             :         struct kyber_queue_data *kqd = e->elevator_data;             \
     877             :         unsigned long long nsec;                                        \
     878             :         int ret;                                                        \
     879             :                                                                         \
     880             :         ret = kstrtoull(page, 10, &nsec);                           \
     881             :         if (ret)                                                        \
     882             :                 return ret;                                             \
     883             :                                                                         \
     884             :         kqd->latency_targets[domain] = nsec;                         \
     885             :                                                                         \
     886             :         return count;                                                   \
     887             : }
     888           0 : KYBER_LAT_SHOW_STORE(KYBER_READ, read);
     889           0 : KYBER_LAT_SHOW_STORE(KYBER_WRITE, write);
     890             : #undef KYBER_LAT_SHOW_STORE
     891             : 
     892             : #define KYBER_LAT_ATTR(op) __ATTR(op##_lat_nsec, 0644, kyber_##op##_lat_show, kyber_##op##_lat_store)
     893             : static struct elv_fs_entry kyber_sched_attrs[] = {
     894             :         KYBER_LAT_ATTR(read),
     895             :         KYBER_LAT_ATTR(write),
     896             :         __ATTR_NULL
     897             : };
     898             : #undef KYBER_LAT_ATTR
     899             : 
     900             : #ifdef CONFIG_BLK_DEBUG_FS
     901             : #define KYBER_DEBUGFS_DOMAIN_ATTRS(domain, name)                        \
     902             : static int kyber_##name##_tokens_show(void *data, struct seq_file *m)   \
     903             : {                                                                       \
     904             :         struct request_queue *q = data;                                 \
     905             :         struct kyber_queue_data *kqd = q->elevator->elevator_data;        \
     906             :                                                                         \
     907             :         sbitmap_queue_show(&kqd->domain_tokens[domain], m);              \
     908             :         return 0;                                                       \
     909             : }                                                                       \
     910             :                                                                         \
     911             : static void *kyber_##name##_rqs_start(struct seq_file *m, loff_t *pos)  \
     912             :         __acquires(&khd->lock)                                           \
     913             : {                                                                       \
     914             :         struct blk_mq_hw_ctx *hctx = m->private;                     \
     915             :         struct kyber_hctx_data *khd = hctx->sched_data;                      \
     916             :                                                                         \
     917             :         spin_lock(&khd->lock);                                           \
     918             :         return seq_list_start(&khd->rqs[domain], *pos);                  \
     919             : }                                                                       \
     920             :                                                                         \
     921             : static void *kyber_##name##_rqs_next(struct seq_file *m, void *v,       \
     922             :                                      loff_t *pos)                       \
     923             : {                                                                       \
     924             :         struct blk_mq_hw_ctx *hctx = m->private;                     \
     925             :         struct kyber_hctx_data *khd = hctx->sched_data;                      \
     926             :                                                                         \
     927             :         return seq_list_next(v, &khd->rqs[domain], pos);         \
     928             : }                                                                       \
     929             :                                                                         \
     930             : static void kyber_##name##_rqs_stop(struct seq_file *m, void *v)        \
     931             :         __releases(&khd->lock)                                           \
     932             : {                                                                       \
     933             :         struct blk_mq_hw_ctx *hctx = m->private;                     \
     934             :         struct kyber_hctx_data *khd = hctx->sched_data;                      \
     935             :                                                                         \
     936             :         spin_unlock(&khd->lock);                                 \
     937             : }                                                                       \
     938             :                                                                         \
     939             : static const struct seq_operations kyber_##name##_rqs_seq_ops = {       \
     940             :         .start  = kyber_##name##_rqs_start,                             \
     941             :         .next   = kyber_##name##_rqs_next,                              \
     942             :         .stop   = kyber_##name##_rqs_stop,                              \
     943             :         .show   = blk_mq_debugfs_rq_show,                               \
     944             : };                                                                      \
     945             :                                                                         \
     946             : static int kyber_##name##_waiting_show(void *data, struct seq_file *m)  \
     947             : {                                                                       \
     948             :         struct blk_mq_hw_ctx *hctx = data;                              \
     949             :         struct kyber_hctx_data *khd = hctx->sched_data;                      \
     950             :         wait_queue_entry_t *wait = &khd->domain_wait[domain].wait;       \
     951             :                                                                         \
     952             :         seq_printf(m, "%d\n", !list_empty_careful(&wait->entry));      \
     953             :         return 0;                                                       \
     954             : }
     955             : KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
     956             : KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_WRITE, write)
     957             : KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard)
     958             : KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
     959             : #undef KYBER_DEBUGFS_DOMAIN_ATTRS
     960             : 
     961             : static int kyber_async_depth_show(void *data, struct seq_file *m)
     962             : {
     963             :         struct request_queue *q = data;
     964             :         struct kyber_queue_data *kqd = q->elevator->elevator_data;
     965             : 
     966             :         seq_printf(m, "%u\n", kqd->async_depth);
     967             :         return 0;
     968             : }
     969             : 
     970             : static int kyber_cur_domain_show(void *data, struct seq_file *m)
     971             : {
     972             :         struct blk_mq_hw_ctx *hctx = data;
     973             :         struct kyber_hctx_data *khd = hctx->sched_data;
     974             : 
     975             :         seq_printf(m, "%s\n", kyber_domain_names[khd->cur_domain]);
     976             :         return 0;
     977             : }
     978             : 
     979             : static int kyber_batching_show(void *data, struct seq_file *m)
     980             : {
     981             :         struct blk_mq_hw_ctx *hctx = data;
     982             :         struct kyber_hctx_data *khd = hctx->sched_data;
     983             : 
     984             :         seq_printf(m, "%u\n", khd->batching);
     985             :         return 0;
     986             : }
     987             : 
     988             : #define KYBER_QUEUE_DOMAIN_ATTRS(name)  \
     989             :         {#name "_tokens", 0400, kyber_##name##_tokens_show}
     990             : static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = {
     991             :         KYBER_QUEUE_DOMAIN_ATTRS(read),
     992             :         KYBER_QUEUE_DOMAIN_ATTRS(write),
     993             :         KYBER_QUEUE_DOMAIN_ATTRS(discard),
     994             :         KYBER_QUEUE_DOMAIN_ATTRS(other),
     995             :         {"async_depth", 0400, kyber_async_depth_show},
     996             :         {},
     997             : };
     998             : #undef KYBER_QUEUE_DOMAIN_ATTRS
     999             : 
    1000             : #define KYBER_HCTX_DOMAIN_ATTRS(name)                                   \
    1001             :         {#name "_rqs", 0400, .seq_ops = &kyber_##name##_rqs_seq_ops},     \
    1002             :         {#name "_waiting", 0400, kyber_##name##_waiting_show}
    1003             : static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = {
    1004             :         KYBER_HCTX_DOMAIN_ATTRS(read),
    1005             :         KYBER_HCTX_DOMAIN_ATTRS(write),
    1006             :         KYBER_HCTX_DOMAIN_ATTRS(discard),
    1007             :         KYBER_HCTX_DOMAIN_ATTRS(other),
    1008             :         {"cur_domain", 0400, kyber_cur_domain_show},
    1009             :         {"batching", 0400, kyber_batching_show},
    1010             :         {},
    1011             : };
    1012             : #undef KYBER_HCTX_DOMAIN_ATTRS
    1013             : #endif
    1014             : 
    1015             : static struct elevator_type kyber_sched = {
    1016             :         .ops = {
    1017             :                 .init_sched = kyber_init_sched,
    1018             :                 .exit_sched = kyber_exit_sched,
    1019             :                 .init_hctx = kyber_init_hctx,
    1020             :                 .exit_hctx = kyber_exit_hctx,
    1021             :                 .limit_depth = kyber_limit_depth,
    1022             :                 .bio_merge = kyber_bio_merge,
    1023             :                 .prepare_request = kyber_prepare_request,
    1024             :                 .insert_requests = kyber_insert_requests,
    1025             :                 .finish_request = kyber_finish_request,
    1026             :                 .requeue_request = kyber_finish_request,
    1027             :                 .completed_request = kyber_completed_request,
    1028             :                 .dispatch_request = kyber_dispatch_request,
    1029             :                 .has_work = kyber_has_work,
    1030             :                 .depth_updated = kyber_depth_updated,
    1031             :         },
    1032             : #ifdef CONFIG_BLK_DEBUG_FS
    1033             :         .queue_debugfs_attrs = kyber_queue_debugfs_attrs,
    1034             :         .hctx_debugfs_attrs = kyber_hctx_debugfs_attrs,
    1035             : #endif
    1036             :         .elevator_attrs = kyber_sched_attrs,
    1037             :         .elevator_name = "kyber",
    1038             :         .elevator_owner = THIS_MODULE,
    1039             : };
    1040             : 
    1041           1 : static int __init kyber_init(void)
    1042             : {
    1043           1 :         return elv_register(&kyber_sched);
    1044             : }
    1045             : 
    1046           0 : static void __exit kyber_exit(void)
    1047             : {
    1048           0 :         elv_unregister(&kyber_sched);
    1049           0 : }
    1050             : 
    1051             : module_init(kyber_init);
    1052             : module_exit(kyber_exit);
    1053             : 
    1054             : MODULE_AUTHOR("Omar Sandoval");
    1055             : MODULE_LICENSE("GPL");
    1056             : MODULE_DESCRIPTION("Kyber I/O scheduler");

Generated by: LCOV version 1.14