LCOV - code coverage report
Current view: top level - block - blk-core.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 6 350 1.7 %
Date: 2023-04-06 08:38:28 Functions: 1 49 2.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Copyright (C) 1991, 1992 Linus Torvalds
       4             :  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
       5             :  * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
       6             :  * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
       7             :  * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
       8             :  *      -  July2000
       9             :  * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
      10             :  */
      11             : 
      12             : /*
      13             :  * This handles all read/write requests to block devices
      14             :  */
      15             : #include <linux/kernel.h>
      16             : #include <linux/module.h>
      17             : #include <linux/bio.h>
      18             : #include <linux/blkdev.h>
      19             : #include <linux/blk-pm.h>
      20             : #include <linux/blk-integrity.h>
      21             : #include <linux/highmem.h>
      22             : #include <linux/mm.h>
      23             : #include <linux/pagemap.h>
      24             : #include <linux/kernel_stat.h>
      25             : #include <linux/string.h>
      26             : #include <linux/init.h>
      27             : #include <linux/completion.h>
      28             : #include <linux/slab.h>
      29             : #include <linux/swap.h>
      30             : #include <linux/writeback.h>
      31             : #include <linux/task_io_accounting_ops.h>
      32             : #include <linux/fault-inject.h>
      33             : #include <linux/list_sort.h>
      34             : #include <linux/delay.h>
      35             : #include <linux/ratelimit.h>
      36             : #include <linux/pm_runtime.h>
      37             : #include <linux/t10-pi.h>
      38             : #include <linux/debugfs.h>
      39             : #include <linux/bpf.h>
      40             : #include <linux/part_stat.h>
      41             : #include <linux/sched/sysctl.h>
      42             : #include <linux/blk-crypto.h>
      43             : 
      44             : #define CREATE_TRACE_POINTS
      45             : #include <trace/events/block.h>
      46             : 
      47             : #include "blk.h"
      48             : #include "blk-mq-sched.h"
      49             : #include "blk-pm.h"
      50             : #include "blk-cgroup.h"
      51             : #include "blk-throttle.h"
      52             : 
      53             : struct dentry *blk_debugfs_root;
      54             : 
      55             : EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
      56             : EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
      57             : EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
      58             : EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
      59             : EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
      60             : EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
      61             : 
      62             : static DEFINE_IDA(blk_queue_ida);
      63             : 
      64             : /*
      65             :  * For queue allocation
      66             :  */
      67             : static struct kmem_cache *blk_requestq_cachep;
      68             : 
      69             : /*
      70             :  * Controlling structure to kblockd
      71             :  */
      72             : static struct workqueue_struct *kblockd_workqueue;
      73             : 
      74             : /**
      75             :  * blk_queue_flag_set - atomically set a queue flag
      76             :  * @flag: flag to be set
      77             :  * @q: request queue
      78             :  */
      79           0 : void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
      80             : {
      81           0 :         set_bit(flag, &q->queue_flags);
      82           0 : }
      83             : EXPORT_SYMBOL(blk_queue_flag_set);
      84             : 
      85             : /**
      86             :  * blk_queue_flag_clear - atomically clear a queue flag
      87             :  * @flag: flag to be cleared
      88             :  * @q: request queue
      89             :  */
      90           0 : void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
      91             : {
      92           0 :         clear_bit(flag, &q->queue_flags);
      93           0 : }
      94             : EXPORT_SYMBOL(blk_queue_flag_clear);
      95             : 
      96             : /**
      97             :  * blk_queue_flag_test_and_set - atomically test and set a queue flag
      98             :  * @flag: flag to be set
      99             :  * @q: request queue
     100             :  *
     101             :  * Returns the previous value of @flag - 0 if the flag was not set and 1 if
     102             :  * the flag was already set.
     103             :  */
     104           0 : bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
     105             : {
     106           0 :         return test_and_set_bit(flag, &q->queue_flags);
     107             : }
     108             : EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
     109             : 
     110             : #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
     111             : static const char *const blk_op_name[] = {
     112             :         REQ_OP_NAME(READ),
     113             :         REQ_OP_NAME(WRITE),
     114             :         REQ_OP_NAME(FLUSH),
     115             :         REQ_OP_NAME(DISCARD),
     116             :         REQ_OP_NAME(SECURE_ERASE),
     117             :         REQ_OP_NAME(ZONE_RESET),
     118             :         REQ_OP_NAME(ZONE_RESET_ALL),
     119             :         REQ_OP_NAME(ZONE_OPEN),
     120             :         REQ_OP_NAME(ZONE_CLOSE),
     121             :         REQ_OP_NAME(ZONE_FINISH),
     122             :         REQ_OP_NAME(ZONE_APPEND),
     123             :         REQ_OP_NAME(WRITE_ZEROES),
     124             :         REQ_OP_NAME(DRV_IN),
     125             :         REQ_OP_NAME(DRV_OUT),
     126             : };
     127             : #undef REQ_OP_NAME
     128             : 
     129             : /**
     130             :  * blk_op_str - Return string XXX in the REQ_OP_XXX.
     131             :  * @op: REQ_OP_XXX.
     132             :  *
     133             :  * Description: Centralize block layer function to convert REQ_OP_XXX into
     134             :  * string format. Useful in the debugging and tracing bio or request. For
     135             :  * invalid REQ_OP_XXX it returns string "UNKNOWN".
     136             :  */
     137           0 : inline const char *blk_op_str(enum req_op op)
     138             : {
     139           0 :         const char *op_str = "UNKNOWN";
     140             : 
     141           0 :         if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
     142           0 :                 op_str = blk_op_name[op];
     143             : 
     144           0 :         return op_str;
     145             : }
     146             : EXPORT_SYMBOL_GPL(blk_op_str);
     147             : 
     148             : static const struct {
     149             :         int             errno;
     150             :         const char      *name;
     151             : } blk_errors[] = {
     152             :         [BLK_STS_OK]            = { 0,          "" },
     153             :         [BLK_STS_NOTSUPP]       = { -EOPNOTSUPP, "operation not supported" },
     154             :         [BLK_STS_TIMEOUT]       = { -ETIMEDOUT, "timeout" },
     155             :         [BLK_STS_NOSPC]         = { -ENOSPC,    "critical space allocation" },
     156             :         [BLK_STS_TRANSPORT]     = { -ENOLINK,   "recoverable transport" },
     157             :         [BLK_STS_TARGET]        = { -EREMOTEIO, "critical target" },
     158             :         [BLK_STS_NEXUS]         = { -EBADE,     "critical nexus" },
     159             :         [BLK_STS_MEDIUM]        = { -ENODATA,   "critical medium" },
     160             :         [BLK_STS_PROTECTION]    = { -EILSEQ,    "protection" },
     161             :         [BLK_STS_RESOURCE]      = { -ENOMEM,    "kernel resource" },
     162             :         [BLK_STS_DEV_RESOURCE]  = { -EBUSY,     "device resource" },
     163             :         [BLK_STS_AGAIN]         = { -EAGAIN,    "nonblocking retry" },
     164             :         [BLK_STS_OFFLINE]       = { -ENODEV,    "device offline" },
     165             : 
     166             :         /* device mapper special case, should not leak out: */
     167             :         [BLK_STS_DM_REQUEUE]    = { -EREMCHG, "dm internal retry" },
     168             : 
     169             :         /* zone device specific errors */
     170             :         [BLK_STS_ZONE_OPEN_RESOURCE]    = { -ETOOMANYREFS, "open zones exceeded" },
     171             :         [BLK_STS_ZONE_ACTIVE_RESOURCE]  = { -EOVERFLOW, "active zones exceeded" },
     172             : 
     173             :         /* everything else not covered above: */
     174             :         [BLK_STS_IOERR]         = { -EIO,       "I/O" },
     175             : };
     176             : 
     177           0 : blk_status_t errno_to_blk_status(int errno)
     178             : {
     179             :         int i;
     180             : 
     181           0 :         for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
     182           0 :                 if (blk_errors[i].errno == errno)
     183           0 :                         return (__force blk_status_t)i;
     184             :         }
     185             : 
     186             :         return BLK_STS_IOERR;
     187             : }
     188             : EXPORT_SYMBOL_GPL(errno_to_blk_status);
     189             : 
     190           0 : int blk_status_to_errno(blk_status_t status)
     191             : {
     192           0 :         int idx = (__force int)status;
     193             : 
     194           0 :         if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
     195             :                 return -EIO;
     196           0 :         return blk_errors[idx].errno;
     197             : }
     198             : EXPORT_SYMBOL_GPL(blk_status_to_errno);
     199             : 
     200           0 : const char *blk_status_to_str(blk_status_t status)
     201             : {
     202           0 :         int idx = (__force int)status;
     203             : 
     204           0 :         if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
     205             :                 return "<null>";
     206           0 :         return blk_errors[idx].name;
     207             : }
     208             : 
     209             : /**
     210             :  * blk_sync_queue - cancel any pending callbacks on a queue
     211             :  * @q: the queue
     212             :  *
     213             :  * Description:
     214             :  *     The block layer may perform asynchronous callback activity
     215             :  *     on a queue, such as calling the unplug function after a timeout.
     216             :  *     A block device may call blk_sync_queue to ensure that any
     217             :  *     such activity is cancelled, thus allowing it to release resources
     218             :  *     that the callbacks might use. The caller must already have made sure
     219             :  *     that its ->submit_bio will not re-add plugging prior to calling
     220             :  *     this function.
     221             :  *
     222             :  *     This function does not cancel any asynchronous activity arising
     223             :  *     out of elevator or throttling code. That would require elevator_exit()
     224             :  *     and blkcg_exit_queue() to be called with queue lock initialized.
     225             :  *
     226             :  */
     227           0 : void blk_sync_queue(struct request_queue *q)
     228             : {
     229           0 :         del_timer_sync(&q->timeout);
     230           0 :         cancel_work_sync(&q->timeout_work);
     231           0 : }
     232             : EXPORT_SYMBOL(blk_sync_queue);
     233             : 
     234             : /**
     235             :  * blk_set_pm_only - increment pm_only counter
     236             :  * @q: request queue pointer
     237             :  */
     238           0 : void blk_set_pm_only(struct request_queue *q)
     239             : {
     240           0 :         atomic_inc(&q->pm_only);
     241           0 : }
     242             : EXPORT_SYMBOL_GPL(blk_set_pm_only);
     243             : 
     244           0 : void blk_clear_pm_only(struct request_queue *q)
     245             : {
     246             :         int pm_only;
     247             : 
     248           0 :         pm_only = atomic_dec_return(&q->pm_only);
     249           0 :         WARN_ON_ONCE(pm_only < 0);
     250           0 :         if (pm_only == 0)
     251           0 :                 wake_up_all(&q->mq_freeze_wq);
     252           0 : }
     253             : EXPORT_SYMBOL_GPL(blk_clear_pm_only);
     254             : 
     255           0 : static void blk_free_queue_rcu(struct rcu_head *rcu_head)
     256             : {
     257           0 :         struct request_queue *q = container_of(rcu_head,
     258             :                         struct request_queue, rcu_head);
     259             : 
     260           0 :         percpu_ref_exit(&q->q_usage_counter);
     261           0 :         kmem_cache_free(blk_requestq_cachep, q);
     262           0 : }
     263             : 
     264           0 : static void blk_free_queue(struct request_queue *q)
     265             : {
     266           0 :         if (q->poll_stat)
     267           0 :                 blk_stat_remove_callback(q, q->poll_cb);
     268           0 :         blk_stat_free_callback(q->poll_cb);
     269             : 
     270           0 :         blk_free_queue_stats(q->stats);
     271           0 :         kfree(q->poll_stat);
     272             : 
     273           0 :         if (queue_is_mq(q))
     274           0 :                 blk_mq_release(q);
     275             : 
     276           0 :         ida_free(&blk_queue_ida, q->id);
     277           0 :         call_rcu(&q->rcu_head, blk_free_queue_rcu);
     278           0 : }
     279             : 
     280             : /**
     281             :  * blk_put_queue - decrement the request_queue refcount
     282             :  * @q: the request_queue structure to decrement the refcount for
     283             :  *
     284             :  * Decrements the refcount of the request_queue and free it when the refcount
     285             :  * reaches 0.
     286             :  */
     287           0 : void blk_put_queue(struct request_queue *q)
     288             : {
     289           0 :         if (refcount_dec_and_test(&q->refs))
     290           0 :                 blk_free_queue(q);
     291           0 : }
     292             : EXPORT_SYMBOL(blk_put_queue);
     293             : 
     294           0 : void blk_queue_start_drain(struct request_queue *q)
     295             : {
     296             :         /*
     297             :          * When queue DYING flag is set, we need to block new req
     298             :          * entering queue, so we call blk_freeze_queue_start() to
     299             :          * prevent I/O from crossing blk_queue_enter().
     300             :          */
     301           0 :         blk_freeze_queue_start(q);
     302           0 :         if (queue_is_mq(q))
     303           0 :                 blk_mq_wake_waiters(q);
     304             :         /* Make blk_queue_enter() reexamine the DYING flag. */
     305           0 :         wake_up_all(&q->mq_freeze_wq);
     306           0 : }
     307             : 
     308             : /**
     309             :  * blk_queue_enter() - try to increase q->q_usage_counter
     310             :  * @q: request queue pointer
     311             :  * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
     312             :  */
     313           0 : int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
     314             : {
     315           0 :         const bool pm = flags & BLK_MQ_REQ_PM;
     316             : 
     317           0 :         while (!blk_try_enter_queue(q, pm)) {
     318           0 :                 if (flags & BLK_MQ_REQ_NOWAIT)
     319             :                         return -EAGAIN;
     320             : 
     321             :                 /*
     322             :                  * read pair of barrier in blk_freeze_queue_start(), we need to
     323             :                  * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
     324             :                  * reading .mq_freeze_depth or queue dying flag, otherwise the
     325             :                  * following wait may never return if the two reads are
     326             :                  * reordered.
     327             :                  */
     328           0 :                 smp_rmb();
     329           0 :                 wait_event(q->mq_freeze_wq,
     330             :                            (!q->mq_freeze_depth &&
     331             :                             blk_pm_resume_queue(pm, q)) ||
     332             :                            blk_queue_dying(q));
     333           0 :                 if (blk_queue_dying(q))
     334             :                         return -ENODEV;
     335             :         }
     336             : 
     337             :         return 0;
     338             : }
     339             : 
     340           0 : int __bio_queue_enter(struct request_queue *q, struct bio *bio)
     341             : {
     342           0 :         while (!blk_try_enter_queue(q, false)) {
     343           0 :                 struct gendisk *disk = bio->bi_bdev->bd_disk;
     344             : 
     345           0 :                 if (bio->bi_opf & REQ_NOWAIT) {
     346           0 :                         if (test_bit(GD_DEAD, &disk->state))
     347             :                                 goto dead;
     348           0 :                         bio_wouldblock_error(bio);
     349           0 :                         return -EAGAIN;
     350             :                 }
     351             : 
     352             :                 /*
     353             :                  * read pair of barrier in blk_freeze_queue_start(), we need to
     354             :                  * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
     355             :                  * reading .mq_freeze_depth or queue dying flag, otherwise the
     356             :                  * following wait may never return if the two reads are
     357             :                  * reordered.
     358             :                  */
     359           0 :                 smp_rmb();
     360           0 :                 wait_event(q->mq_freeze_wq,
     361             :                            (!q->mq_freeze_depth &&
     362             :                             blk_pm_resume_queue(false, q)) ||
     363             :                            test_bit(GD_DEAD, &disk->state));
     364           0 :                 if (test_bit(GD_DEAD, &disk->state))
     365             :                         goto dead;
     366             :         }
     367             : 
     368             :         return 0;
     369             : dead:
     370           0 :         bio_io_error(bio);
     371           0 :         return -ENODEV;
     372             : }
     373             : 
     374           0 : void blk_queue_exit(struct request_queue *q)
     375             : {
     376           0 :         percpu_ref_put(&q->q_usage_counter);
     377           0 : }
     378             : 
     379           0 : static void blk_queue_usage_counter_release(struct percpu_ref *ref)
     380             : {
     381           0 :         struct request_queue *q =
     382           0 :                 container_of(ref, struct request_queue, q_usage_counter);
     383             : 
     384           0 :         wake_up_all(&q->mq_freeze_wq);
     385           0 : }
     386             : 
     387           0 : static void blk_rq_timed_out_timer(struct timer_list *t)
     388             : {
     389           0 :         struct request_queue *q = from_timer(q, t, timeout);
     390             : 
     391           0 :         kblockd_schedule_work(&q->timeout_work);
     392           0 : }
     393             : 
     394           0 : static void blk_timeout_work(struct work_struct *work)
     395             : {
     396           0 : }
     397             : 
     398           0 : struct request_queue *blk_alloc_queue(int node_id)
     399             : {
     400             :         struct request_queue *q;
     401             : 
     402           0 :         q = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO,
     403             :                                   node_id);
     404           0 :         if (!q)
     405             :                 return NULL;
     406             : 
     407           0 :         q->last_merge = NULL;
     408             : 
     409           0 :         q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
     410           0 :         if (q->id < 0)
     411             :                 goto fail_q;
     412             : 
     413           0 :         q->stats = blk_alloc_queue_stats();
     414           0 :         if (!q->stats)
     415             :                 goto fail_id;
     416             : 
     417           0 :         q->node = node_id;
     418             : 
     419           0 :         atomic_set(&q->nr_active_requests_shared_tags, 0);
     420             : 
     421           0 :         timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
     422           0 :         INIT_WORK(&q->timeout_work, blk_timeout_work);
     423           0 :         INIT_LIST_HEAD(&q->icq_list);
     424             : 
     425           0 :         refcount_set(&q->refs, 1);
     426           0 :         mutex_init(&q->debugfs_mutex);
     427           0 :         mutex_init(&q->sysfs_lock);
     428           0 :         mutex_init(&q->sysfs_dir_lock);
     429           0 :         spin_lock_init(&q->queue_lock);
     430             : 
     431           0 :         init_waitqueue_head(&q->mq_freeze_wq);
     432           0 :         mutex_init(&q->mq_freeze_lock);
     433             : 
     434             :         /*
     435             :          * Init percpu_ref in atomic mode so that it's faster to shutdown.
     436             :          * See blk_register_queue() for details.
     437             :          */
     438           0 :         if (percpu_ref_init(&q->q_usage_counter,
     439             :                                 blk_queue_usage_counter_release,
     440             :                                 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
     441             :                 goto fail_stats;
     442             : 
     443           0 :         blk_set_default_limits(&q->limits);
     444           0 :         q->nr_requests = BLKDEV_DEFAULT_RQ;
     445             : 
     446           0 :         return q;
     447             : 
     448             : fail_stats:
     449           0 :         blk_free_queue_stats(q->stats);
     450             : fail_id:
     451           0 :         ida_free(&blk_queue_ida, q->id);
     452             : fail_q:
     453           0 :         kmem_cache_free(blk_requestq_cachep, q);
     454           0 :         return NULL;
     455             : }
     456             : 
     457             : /**
     458             :  * blk_get_queue - increment the request_queue refcount
     459             :  * @q: the request_queue structure to increment the refcount for
     460             :  *
     461             :  * Increment the refcount of the request_queue kobject.
     462             :  *
     463             :  * Context: Any context.
     464             :  */
     465           0 : bool blk_get_queue(struct request_queue *q)
     466             : {
     467           0 :         if (unlikely(blk_queue_dying(q)))
     468             :                 return false;
     469           0 :         refcount_inc(&q->refs);
     470           0 :         return true;
     471             : }
     472             : EXPORT_SYMBOL(blk_get_queue);
     473             : 
     474             : #ifdef CONFIG_FAIL_MAKE_REQUEST
     475             : 
     476             : static DECLARE_FAULT_ATTR(fail_make_request);
     477             : 
     478             : static int __init setup_fail_make_request(char *str)
     479             : {
     480             :         return setup_fault_attr(&fail_make_request, str);
     481             : }
     482             : __setup("fail_make_request=", setup_fail_make_request);
     483             : 
     484             : bool should_fail_request(struct block_device *part, unsigned int bytes)
     485             : {
     486             :         return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
     487             : }
     488             : 
     489             : static int __init fail_make_request_debugfs(void)
     490             : {
     491             :         struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
     492             :                                                 NULL, &fail_make_request);
     493             : 
     494             :         return PTR_ERR_OR_ZERO(dir);
     495             : }
     496             : 
     497             : late_initcall(fail_make_request_debugfs);
     498             : #endif /* CONFIG_FAIL_MAKE_REQUEST */
     499             : 
     500           0 : static inline void bio_check_ro(struct bio *bio)
     501             : {
     502           0 :         if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
     503           0 :                 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
     504             :                         return;
     505           0 :                 pr_warn("Trying to write to read-only block-device %pg\n",
     506             :                         bio->bi_bdev);
     507             :                 /* Older lvm-tools actually trigger this */
     508             :         }
     509             : }
     510             : 
     511           0 : static noinline int should_fail_bio(struct bio *bio)
     512             : {
     513           0 :         if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
     514             :                 return -EIO;
     515             :         return 0;
     516             : }
     517             : ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
     518             : 
     519             : /*
     520             :  * Check whether this bio extends beyond the end of the device or partition.
     521             :  * This may well happen - the kernel calls bread() without checking the size of
     522             :  * the device, e.g., when mounting a file system.
     523             :  */
     524           0 : static inline int bio_check_eod(struct bio *bio)
     525             : {
     526           0 :         sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
     527           0 :         unsigned int nr_sectors = bio_sectors(bio);
     528             : 
     529           0 :         if (nr_sectors && maxsector &&
     530           0 :             (nr_sectors > maxsector ||
     531           0 :              bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
     532           0 :                 pr_info_ratelimited("%s: attempt to access beyond end of device\n"
     533             :                                     "%pg: rw=%d, sector=%llu, nr_sectors = %u limit=%llu\n",
     534             :                                     current->comm, bio->bi_bdev, bio->bi_opf,
     535             :                                     bio->bi_iter.bi_sector, nr_sectors, maxsector);
     536             :                 return -EIO;
     537             :         }
     538             :         return 0;
     539             : }
     540             : 
     541             : /*
     542             :  * Remap block n of partition p to block n+start(p) of the disk.
     543             :  */
     544             : static int blk_partition_remap(struct bio *bio)
     545             : {
     546           0 :         struct block_device *p = bio->bi_bdev;
     547             : 
     548           0 :         if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
     549             :                 return -EIO;
     550           0 :         if (bio_sectors(bio)) {
     551           0 :                 bio->bi_iter.bi_sector += p->bd_start_sect;
     552           0 :                 trace_block_bio_remap(bio, p->bd_dev,
     553             :                                       bio->bi_iter.bi_sector -
     554           0 :                                       p->bd_start_sect);
     555             :         }
     556           0 :         bio_set_flag(bio, BIO_REMAPPED);
     557             :         return 0;
     558             : }
     559             : 
     560             : /*
     561             :  * Check write append to a zoned block device.
     562             :  */
     563             : static inline blk_status_t blk_check_zone_append(struct request_queue *q,
     564             :                                                  struct bio *bio)
     565             : {
     566             :         int nr_sectors = bio_sectors(bio);
     567             : 
     568             :         /* Only applicable to zoned block devices */
     569             :         if (!bdev_is_zoned(bio->bi_bdev))
     570             :                 return BLK_STS_NOTSUPP;
     571             : 
     572             :         /* The bio sector must point to the start of a sequential zone */
     573             :         if (!bdev_is_zone_start(bio->bi_bdev, bio->bi_iter.bi_sector) ||
     574             :             !bio_zone_is_seq(bio))
     575             :                 return BLK_STS_IOERR;
     576             : 
     577             :         /*
     578             :          * Not allowed to cross zone boundaries. Otherwise, the BIO will be
     579             :          * split and could result in non-contiguous sectors being written in
     580             :          * different zones.
     581             :          */
     582             :         if (nr_sectors > q->limits.chunk_sectors)
     583             :                 return BLK_STS_IOERR;
     584             : 
     585             :         /* Make sure the BIO is small enough and will not get split */
     586             :         if (nr_sectors > q->limits.max_zone_append_sectors)
     587             :                 return BLK_STS_IOERR;
     588             : 
     589             :         bio->bi_opf |= REQ_NOMERGE;
     590             : 
     591             :         return BLK_STS_OK;
     592             : }
     593             : 
     594           0 : static void __submit_bio(struct bio *bio)
     595             : {
     596           0 :         struct gendisk *disk = bio->bi_bdev->bd_disk;
     597             : 
     598           0 :         if (unlikely(!blk_crypto_bio_prep(&bio)))
     599             :                 return;
     600             : 
     601           0 :         if (!disk->fops->submit_bio) {
     602           0 :                 blk_mq_submit_bio(bio);
     603           0 :         } else if (likely(bio_queue_enter(bio) == 0)) {
     604           0 :                 disk->fops->submit_bio(bio);
     605           0 :                 blk_queue_exit(disk->queue);
     606             :         }
     607             : }
     608             : 
     609             : /*
     610             :  * The loop in this function may be a bit non-obvious, and so deserves some
     611             :  * explanation:
     612             :  *
     613             :  *  - Before entering the loop, bio->bi_next is NULL (as all callers ensure
     614             :  *    that), so we have a list with a single bio.
     615             :  *  - We pretend that we have just taken it off a longer list, so we assign
     616             :  *    bio_list to a pointer to the bio_list_on_stack, thus initialising the
     617             :  *    bio_list of new bios to be added.  ->submit_bio() may indeed add some more
     618             :  *    bios through a recursive call to submit_bio_noacct.  If it did, we find a
     619             :  *    non-NULL value in bio_list and re-enter the loop from the top.
     620             :  *  - In this case we really did just take the bio of the top of the list (no
     621             :  *    pretending) and so remove it from bio_list, and call into ->submit_bio()
     622             :  *    again.
     623             :  *
     624             :  * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
     625             :  * bio_list_on_stack[1] contains bios that were submitted before the current
     626             :  *      ->submit_bio, but that haven't been processed yet.
     627             :  */
     628           0 : static void __submit_bio_noacct(struct bio *bio)
     629             : {
     630             :         struct bio_list bio_list_on_stack[2];
     631             : 
     632           0 :         BUG_ON(bio->bi_next);
     633             : 
     634           0 :         bio_list_init(&bio_list_on_stack[0]);
     635           0 :         current->bio_list = bio_list_on_stack;
     636             : 
     637             :         do {
     638           0 :                 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
     639             :                 struct bio_list lower, same;
     640             : 
     641             :                 /*
     642             :                  * Create a fresh bio_list for all subordinate requests.
     643             :                  */
     644           0 :                 bio_list_on_stack[1] = bio_list_on_stack[0];
     645           0 :                 bio_list_init(&bio_list_on_stack[0]);
     646             : 
     647           0 :                 __submit_bio(bio);
     648             : 
     649             :                 /*
     650             :                  * Sort new bios into those for a lower level and those for the
     651             :                  * same level.
     652             :                  */
     653           0 :                 bio_list_init(&lower);
     654             :                 bio_list_init(&same);
     655           0 :                 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
     656           0 :                         if (q == bdev_get_queue(bio->bi_bdev))
     657             :                                 bio_list_add(&same, bio);
     658             :                         else
     659             :                                 bio_list_add(&lower, bio);
     660             : 
     661             :                 /*
     662             :                  * Now assemble so we handle the lowest level first.
     663             :                  */
     664           0 :                 bio_list_merge(&bio_list_on_stack[0], &lower);
     665           0 :                 bio_list_merge(&bio_list_on_stack[0], &same);
     666           0 :                 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
     667           0 :         } while ((bio = bio_list_pop(&bio_list_on_stack[0])));
     668             : 
     669           0 :         current->bio_list = NULL;
     670           0 : }
     671             : 
     672           0 : static void __submit_bio_noacct_mq(struct bio *bio)
     673             : {
     674           0 :         struct bio_list bio_list[2] = { };
     675             : 
     676           0 :         current->bio_list = bio_list;
     677             : 
     678             :         do {
     679           0 :                 __submit_bio(bio);
     680           0 :         } while ((bio = bio_list_pop(&bio_list[0])));
     681             : 
     682           0 :         current->bio_list = NULL;
     683           0 : }
     684             : 
     685           0 : void submit_bio_noacct_nocheck(struct bio *bio)
     686             : {
     687           0 :         blk_cgroup_bio_start(bio);
     688           0 :         blkcg_bio_issue_init(bio);
     689             : 
     690           0 :         if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
     691           0 :                 trace_block_bio_queue(bio);
     692             :                 /*
     693             :                  * Now that enqueuing has been traced, we need to trace
     694             :                  * completion as well.
     695             :                  */
     696             :                 bio_set_flag(bio, BIO_TRACE_COMPLETION);
     697             :         }
     698             : 
     699             :         /*
     700             :          * We only want one ->submit_bio to be active at a time, else stack
     701             :          * usage with stacked devices could be a problem.  Use current->bio_list
     702             :          * to collect a list of requests submited by a ->submit_bio method while
     703             :          * it is active, and then process them after it returned.
     704             :          */
     705           0 :         if (current->bio_list)
     706           0 :                 bio_list_add(&current->bio_list[0], bio);
     707           0 :         else if (!bio->bi_bdev->bd_disk->fops->submit_bio)
     708           0 :                 __submit_bio_noacct_mq(bio);
     709             :         else
     710           0 :                 __submit_bio_noacct(bio);
     711           0 : }
     712             : 
     713             : /**
     714             :  * submit_bio_noacct - re-submit a bio to the block device layer for I/O
     715             :  * @bio:  The bio describing the location in memory and on the device.
     716             :  *
     717             :  * This is a version of submit_bio() that shall only be used for I/O that is
     718             :  * resubmitted to lower level drivers by stacking block drivers.  All file
     719             :  * systems and other upper level users of the block layer should use
     720             :  * submit_bio() instead.
     721             :  */
     722           0 : void submit_bio_noacct(struct bio *bio)
     723             : {
     724           0 :         struct block_device *bdev = bio->bi_bdev;
     725           0 :         struct request_queue *q = bdev_get_queue(bdev);
     726           0 :         blk_status_t status = BLK_STS_IOERR;
     727             :         struct blk_plug *plug;
     728             : 
     729             :         might_sleep();
     730             : 
     731           0 :         plug = blk_mq_plug(bio);
     732           0 :         if (plug && plug->nowait)
     733           0 :                 bio->bi_opf |= REQ_NOWAIT;
     734             : 
     735             :         /*
     736             :          * For a REQ_NOWAIT based request, return -EOPNOTSUPP
     737             :          * if queue does not support NOWAIT.
     738             :          */
     739           0 :         if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev))
     740             :                 goto not_supported;
     741             : 
     742           0 :         if (should_fail_bio(bio))
     743             :                 goto end_io;
     744           0 :         bio_check_ro(bio);
     745           0 :         if (!bio_flagged(bio, BIO_REMAPPED)) {
     746           0 :                 if (unlikely(bio_check_eod(bio)))
     747             :                         goto end_io;
     748           0 :                 if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
     749             :                         goto end_io;
     750             :         }
     751             : 
     752             :         /*
     753             :          * Filter flush bio's early so that bio based drivers without flush
     754             :          * support don't have to worry about them.
     755             :          */
     756           0 :         if (op_is_flush(bio->bi_opf)) {
     757           0 :                 if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE &&
     758             :                                  bio_op(bio) != REQ_OP_ZONE_APPEND))
     759             :                         goto end_io;
     760           0 :                 if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
     761           0 :                         bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
     762           0 :                         if (!bio_sectors(bio)) {
     763             :                                 status = BLK_STS_OK;
     764             :                                 goto end_io;
     765             :                         }
     766             :                 }
     767             :         }
     768             : 
     769           0 :         if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
     770             :                 bio_clear_polled(bio);
     771             : 
     772           0 :         switch (bio_op(bio)) {
     773             :         case REQ_OP_DISCARD:
     774           0 :                 if (!bdev_max_discard_sectors(bdev))
     775             :                         goto not_supported;
     776             :                 break;
     777             :         case REQ_OP_SECURE_ERASE:
     778           0 :                 if (!bdev_max_secure_erase_sectors(bdev))
     779             :                         goto not_supported;
     780             :                 break;
     781             :         case REQ_OP_ZONE_APPEND:
     782             :                 status = blk_check_zone_append(q, bio);
     783             :                 if (status != BLK_STS_OK)
     784             :                         goto end_io;
     785             :                 break;
     786             :         case REQ_OP_ZONE_RESET:
     787             :         case REQ_OP_ZONE_OPEN:
     788             :         case REQ_OP_ZONE_CLOSE:
     789             :         case REQ_OP_ZONE_FINISH:
     790             :                 if (!bdev_is_zoned(bio->bi_bdev))
     791             :                         goto not_supported;
     792             :                 break;
     793             :         case REQ_OP_ZONE_RESET_ALL:
     794             :                 if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q))
     795             :                         goto not_supported;
     796             :                 break;
     797             :         case REQ_OP_WRITE_ZEROES:
     798           0 :                 if (!q->limits.max_write_zeroes_sectors)
     799             :                         goto not_supported;
     800             :                 break;
     801             :         default:
     802             :                 break;
     803             :         }
     804             : 
     805           0 :         if (blk_throtl_bio(bio))
     806             :                 return;
     807           0 :         submit_bio_noacct_nocheck(bio);
     808           0 :         return;
     809             : 
     810             : not_supported:
     811             :         status = BLK_STS_NOTSUPP;
     812             : end_io:
     813           0 :         bio->bi_status = status;
     814           0 :         bio_endio(bio);
     815             : }
     816             : EXPORT_SYMBOL(submit_bio_noacct);
     817             : 
     818             : /**
     819             :  * submit_bio - submit a bio to the block device layer for I/O
     820             :  * @bio: The &struct bio which describes the I/O
     821             :  *
     822             :  * submit_bio() is used to submit I/O requests to block devices.  It is passed a
     823             :  * fully set up &struct bio that describes the I/O that needs to be done.  The
     824             :  * bio will be send to the device described by the bi_bdev field.
     825             :  *
     826             :  * The success/failure status of the request, along with notification of
     827             :  * completion, is delivered asynchronously through the ->bi_end_io() callback
     828             :  * in @bio.  The bio must NOT be touched by the caller until ->bi_end_io() has
     829             :  * been called.
     830             :  */
     831           0 : void submit_bio(struct bio *bio)
     832             : {
     833           0 :         if (blkcg_punt_bio_submit(bio))
     834             :                 return;
     835             : 
     836           0 :         if (bio_op(bio) == REQ_OP_READ) {
     837           0 :                 task_io_account_read(bio->bi_iter.bi_size);
     838           0 :                 count_vm_events(PGPGIN, bio_sectors(bio));
     839           0 :         } else if (bio_op(bio) == REQ_OP_WRITE) {
     840           0 :                 count_vm_events(PGPGOUT, bio_sectors(bio));
     841             :         }
     842             : 
     843           0 :         submit_bio_noacct(bio);
     844             : }
     845             : EXPORT_SYMBOL(submit_bio);
     846             : 
     847             : /**
     848             :  * bio_poll - poll for BIO completions
     849             :  * @bio: bio to poll for
     850             :  * @iob: batches of IO
     851             :  * @flags: BLK_POLL_* flags that control the behavior
     852             :  *
     853             :  * Poll for completions on queue associated with the bio. Returns number of
     854             :  * completed entries found.
     855             :  *
     856             :  * Note: the caller must either be the context that submitted @bio, or
     857             :  * be in a RCU critical section to prevent freeing of @bio.
     858             :  */
     859           0 : int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
     860             : {
     861           0 :         blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
     862             :         struct block_device *bdev;
     863             :         struct request_queue *q;
     864           0 :         int ret = 0;
     865             : 
     866           0 :         bdev = READ_ONCE(bio->bi_bdev);
     867           0 :         if (!bdev)
     868             :                 return 0;
     869             : 
     870           0 :         q = bdev_get_queue(bdev);
     871           0 :         if (cookie == BLK_QC_T_NONE ||
     872           0 :             !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
     873             :                 return 0;
     874             : 
     875             :         /*
     876             :          * As the requests that require a zone lock are not plugged in the
     877             :          * first place, directly accessing the plug instead of using
     878             :          * blk_mq_plug() should not have any consequences during flushing for
     879             :          * zoned devices.
     880             :          */
     881           0 :         blk_flush_plug(current->plug, false);
     882             : 
     883             :         /*
     884             :          * We need to be able to enter a frozen queue, similar to how
     885             :          * timeouts also need to do that. If that is blocked, then we can
     886             :          * have pending IO when a queue freeze is started, and then the
     887             :          * wait for the freeze to finish will wait for polled requests to
     888             :          * timeout as the poller is preventer from entering the queue and
     889             :          * completing them. As long as we prevent new IO from being queued,
     890             :          * that should be all that matters.
     891             :          */
     892           0 :         if (!percpu_ref_tryget(&q->q_usage_counter))
     893             :                 return 0;
     894           0 :         if (queue_is_mq(q)) {
     895           0 :                 ret = blk_mq_poll(q, cookie, iob, flags);
     896             :         } else {
     897           0 :                 struct gendisk *disk = q->disk;
     898             : 
     899           0 :                 if (disk && disk->fops->poll_bio)
     900           0 :                         ret = disk->fops->poll_bio(bio, iob, flags);
     901             :         }
     902           0 :         blk_queue_exit(q);
     903           0 :         return ret;
     904             : }
     905             : EXPORT_SYMBOL_GPL(bio_poll);
     906             : 
     907             : /*
     908             :  * Helper to implement file_operations.iopoll.  Requires the bio to be stored
     909             :  * in iocb->private, and cleared before freeing the bio.
     910             :  */
     911           0 : int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
     912             :                     unsigned int flags)
     913             : {
     914             :         struct bio *bio;
     915           0 :         int ret = 0;
     916             : 
     917             :         /*
     918             :          * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can
     919             :          * point to a freshly allocated bio at this point.  If that happens
     920             :          * we have a few cases to consider:
     921             :          *
     922             :          *  1) the bio is beeing initialized and bi_bdev is NULL.  We can just
     923             :          *     simply nothing in this case
     924             :          *  2) the bio points to a not poll enabled device.  bio_poll will catch
     925             :          *     this and return 0
     926             :          *  3) the bio points to a poll capable device, including but not
     927             :          *     limited to the one that the original bio pointed to.  In this
     928             :          *     case we will call into the actual poll method and poll for I/O,
     929             :          *     even if we don't need to, but it won't cause harm either.
     930             :          *
     931             :          * For cases 2) and 3) above the RCU grace period ensures that bi_bdev
     932             :          * is still allocated. Because partitions hold a reference to the whole
     933             :          * device bdev and thus disk, the disk is also still valid.  Grabbing
     934             :          * a reference to the queue in bio_poll() ensures the hctxs and requests
     935             :          * are still valid as well.
     936             :          */
     937             :         rcu_read_lock();
     938           0 :         bio = READ_ONCE(kiocb->private);
     939           0 :         if (bio)
     940           0 :                 ret = bio_poll(bio, iob, flags);
     941             :         rcu_read_unlock();
     942             : 
     943           0 :         return ret;
     944             : }
     945             : EXPORT_SYMBOL_GPL(iocb_bio_iopoll);
     946             : 
     947           0 : void update_io_ticks(struct block_device *part, unsigned long now, bool end)
     948             : {
     949             :         unsigned long stamp;
     950             : again:
     951           0 :         stamp = READ_ONCE(part->bd_stamp);
     952           0 :         if (unlikely(time_after(now, stamp))) {
     953           0 :                 if (likely(try_cmpxchg(&part->bd_stamp, &stamp, now)))
     954           0 :                         __part_stat_add(part, io_ticks, end ? now - stamp : 1);
     955             :         }
     956           0 :         if (part->bd_partno) {
     957           0 :                 part = bdev_whole(part);
     958           0 :                 goto again;
     959             :         }
     960           0 : }
     961             : 
     962           0 : unsigned long bdev_start_io_acct(struct block_device *bdev,
     963             :                                  unsigned int sectors, enum req_op op,
     964             :                                  unsigned long start_time)
     965             : {
     966           0 :         const int sgrp = op_stat_group(op);
     967             : 
     968           0 :         part_stat_lock();
     969           0 :         update_io_ticks(bdev, start_time, false);
     970           0 :         part_stat_inc(bdev, ios[sgrp]);
     971           0 :         part_stat_add(bdev, sectors[sgrp], sectors);
     972           0 :         part_stat_local_inc(bdev, in_flight[op_is_write(op)]);
     973           0 :         part_stat_unlock();
     974             : 
     975           0 :         return start_time;
     976             : }
     977             : EXPORT_SYMBOL(bdev_start_io_acct);
     978             : 
     979             : /**
     980             :  * bio_start_io_acct - start I/O accounting for bio based drivers
     981             :  * @bio:        bio to start account for
     982             :  *
     983             :  * Returns the start time that should be passed back to bio_end_io_acct().
     984             :  */
     985           0 : unsigned long bio_start_io_acct(struct bio *bio)
     986             : {
     987           0 :         return bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
     988             :                                   bio_op(bio), jiffies);
     989             : }
     990             : EXPORT_SYMBOL_GPL(bio_start_io_acct);
     991             : 
     992           0 : void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
     993             :                       unsigned long start_time)
     994             : {
     995           0 :         const int sgrp = op_stat_group(op);
     996           0 :         unsigned long now = READ_ONCE(jiffies);
     997           0 :         unsigned long duration = now - start_time;
     998             : 
     999           0 :         part_stat_lock();
    1000           0 :         update_io_ticks(bdev, now, true);
    1001           0 :         part_stat_add(bdev, nsecs[sgrp], jiffies_to_nsecs(duration));
    1002           0 :         part_stat_local_dec(bdev, in_flight[op_is_write(op)]);
    1003           0 :         part_stat_unlock();
    1004           0 : }
    1005             : EXPORT_SYMBOL(bdev_end_io_acct);
    1006             : 
    1007           0 : void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
    1008             :                               struct block_device *orig_bdev)
    1009             : {
    1010           0 :         bdev_end_io_acct(orig_bdev, bio_op(bio), start_time);
    1011           0 : }
    1012             : EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
    1013             : 
    1014             : /**
    1015             :  * blk_lld_busy - Check if underlying low-level drivers of a device are busy
    1016             :  * @q : the queue of the device being checked
    1017             :  *
    1018             :  * Description:
    1019             :  *    Check if underlying low-level drivers of a device are busy.
    1020             :  *    If the drivers want to export their busy state, they must set own
    1021             :  *    exporting function using blk_queue_lld_busy() first.
    1022             :  *
    1023             :  *    Basically, this function is used only by request stacking drivers
    1024             :  *    to stop dispatching requests to underlying devices when underlying
    1025             :  *    devices are busy.  This behavior helps more I/O merging on the queue
    1026             :  *    of the request stacking driver and prevents I/O throughput regression
    1027             :  *    on burst I/O load.
    1028             :  *
    1029             :  * Return:
    1030             :  *    0 - Not busy (The request stacking driver should dispatch request)
    1031             :  *    1 - Busy (The request stacking driver should stop dispatching request)
    1032             :  */
    1033           0 : int blk_lld_busy(struct request_queue *q)
    1034             : {
    1035           0 :         if (queue_is_mq(q) && q->mq_ops->busy)
    1036           0 :                 return q->mq_ops->busy(q);
    1037             : 
    1038             :         return 0;
    1039             : }
    1040             : EXPORT_SYMBOL_GPL(blk_lld_busy);
    1041             : 
    1042           0 : int kblockd_schedule_work(struct work_struct *work)
    1043             : {
    1044           0 :         return queue_work(kblockd_workqueue, work);
    1045             : }
    1046             : EXPORT_SYMBOL(kblockd_schedule_work);
    1047             : 
    1048           0 : int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
    1049             :                                 unsigned long delay)
    1050             : {
    1051           0 :         return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
    1052             : }
    1053             : EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
    1054             : 
    1055           0 : void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
    1056             : {
    1057           0 :         struct task_struct *tsk = current;
    1058             : 
    1059             :         /*
    1060             :          * If this is a nested plug, don't actually assign it.
    1061             :          */
    1062           0 :         if (tsk->plug)
    1063             :                 return;
    1064             : 
    1065           0 :         plug->mq_list = NULL;
    1066           0 :         plug->cached_rq = NULL;
    1067           0 :         plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
    1068           0 :         plug->rq_count = 0;
    1069           0 :         plug->multiple_queues = false;
    1070           0 :         plug->has_elevator = false;
    1071           0 :         plug->nowait = false;
    1072           0 :         INIT_LIST_HEAD(&plug->cb_list);
    1073             : 
    1074             :         /*
    1075             :          * Store ordering should not be needed here, since a potential
    1076             :          * preempt will imply a full memory barrier
    1077             :          */
    1078           0 :         tsk->plug = plug;
    1079             : }
    1080             : 
    1081             : /**
    1082             :  * blk_start_plug - initialize blk_plug and track it inside the task_struct
    1083             :  * @plug:       The &struct blk_plug that needs to be initialized
    1084             :  *
    1085             :  * Description:
    1086             :  *   blk_start_plug() indicates to the block layer an intent by the caller
    1087             :  *   to submit multiple I/O requests in a batch.  The block layer may use
    1088             :  *   this hint to defer submitting I/Os from the caller until blk_finish_plug()
    1089             :  *   is called.  However, the block layer may choose to submit requests
    1090             :  *   before a call to blk_finish_plug() if the number of queued I/Os
    1091             :  *   exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
    1092             :  *   %BLK_PLUG_FLUSH_SIZE.  The queued I/Os may also be submitted early if
    1093             :  *   the task schedules (see below).
    1094             :  *
    1095             :  *   Tracking blk_plug inside the task_struct will help with auto-flushing the
    1096             :  *   pending I/O should the task end up blocking between blk_start_plug() and
    1097             :  *   blk_finish_plug(). This is important from a performance perspective, but
    1098             :  *   also ensures that we don't deadlock. For instance, if the task is blocking
    1099             :  *   for a memory allocation, memory reclaim could end up wanting to free a
    1100             :  *   page belonging to that request that is currently residing in our private
    1101             :  *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
    1102             :  *   this kind of deadlock.
    1103             :  */
    1104           0 : void blk_start_plug(struct blk_plug *plug)
    1105             : {
    1106           0 :         blk_start_plug_nr_ios(plug, 1);
    1107           0 : }
    1108             : EXPORT_SYMBOL(blk_start_plug);
    1109             : 
    1110           0 : static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
    1111             : {
    1112           0 :         LIST_HEAD(callbacks);
    1113             : 
    1114           0 :         while (!list_empty(&plug->cb_list)) {
    1115           0 :                 list_splice_init(&plug->cb_list, &callbacks);
    1116             : 
    1117           0 :                 while (!list_empty(&callbacks)) {
    1118           0 :                         struct blk_plug_cb *cb = list_first_entry(&callbacks,
    1119             :                                                           struct blk_plug_cb,
    1120             :                                                           list);
    1121           0 :                         list_del(&cb->list);
    1122           0 :                         cb->callback(cb, from_schedule);
    1123             :                 }
    1124             :         }
    1125           0 : }
    1126             : 
    1127           0 : struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
    1128             :                                       int size)
    1129             : {
    1130           0 :         struct blk_plug *plug = current->plug;
    1131             :         struct blk_plug_cb *cb;
    1132             : 
    1133           0 :         if (!plug)
    1134             :                 return NULL;
    1135             : 
    1136           0 :         list_for_each_entry(cb, &plug->cb_list, list)
    1137           0 :                 if (cb->callback == unplug && cb->data == data)
    1138             :                         return cb;
    1139             : 
    1140             :         /* Not currently on the callback list */
    1141           0 :         BUG_ON(size < sizeof(*cb));
    1142           0 :         cb = kzalloc(size, GFP_ATOMIC);
    1143           0 :         if (cb) {
    1144           0 :                 cb->data = data;
    1145           0 :                 cb->callback = unplug;
    1146           0 :                 list_add(&cb->list, &plug->cb_list);
    1147             :         }
    1148             :         return cb;
    1149             : }
    1150             : EXPORT_SYMBOL(blk_check_plugged);
    1151             : 
    1152           0 : void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
    1153             : {
    1154           0 :         if (!list_empty(&plug->cb_list))
    1155           0 :                 flush_plug_callbacks(plug, from_schedule);
    1156           0 :         if (!rq_list_empty(plug->mq_list))
    1157           0 :                 blk_mq_flush_plug_list(plug, from_schedule);
    1158             :         /*
    1159             :          * Unconditionally flush out cached requests, even if the unplug
    1160             :          * event came from schedule. Since we know hold references to the
    1161             :          * queue for cached requests, we don't want a blocked task holding
    1162             :          * up a queue freeze/quiesce event.
    1163             :          */
    1164           0 :         if (unlikely(!rq_list_empty(plug->cached_rq)))
    1165           0 :                 blk_mq_free_plug_rqs(plug);
    1166           0 : }
    1167             : 
    1168             : /**
    1169             :  * blk_finish_plug - mark the end of a batch of submitted I/O
    1170             :  * @plug:       The &struct blk_plug passed to blk_start_plug()
    1171             :  *
    1172             :  * Description:
    1173             :  * Indicate that a batch of I/O submissions is complete.  This function
    1174             :  * must be paired with an initial call to blk_start_plug().  The intent
    1175             :  * is to allow the block layer to optimize I/O submission.  See the
    1176             :  * documentation for blk_start_plug() for more information.
    1177             :  */
    1178           0 : void blk_finish_plug(struct blk_plug *plug)
    1179             : {
    1180           0 :         if (plug == current->plug) {
    1181           0 :                 __blk_flush_plug(plug, false);
    1182           0 :                 current->plug = NULL;
    1183             :         }
    1184           0 : }
    1185             : EXPORT_SYMBOL(blk_finish_plug);
    1186             : 
    1187           0 : void blk_io_schedule(void)
    1188             : {
    1189             :         /* Prevent hang_check timer from firing at us during very long I/O */
    1190           0 :         unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
    1191             : 
    1192             :         if (timeout)
    1193             :                 io_schedule_timeout(timeout);
    1194             :         else
    1195           0 :                 io_schedule();
    1196           0 : }
    1197             : EXPORT_SYMBOL_GPL(blk_io_schedule);
    1198             : 
    1199           1 : int __init blk_dev_init(void)
    1200             : {
    1201             :         BUILD_BUG_ON((__force u32)REQ_OP_LAST >= (1 << REQ_OP_BITS));
    1202             :         BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
    1203             :                         sizeof_field(struct request, cmd_flags));
    1204             :         BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
    1205             :                         sizeof_field(struct bio, bi_opf));
    1206             : 
    1207             :         /* used for unplugging and affects IO latency/throughput - HIGHPRI */
    1208           1 :         kblockd_workqueue = alloc_workqueue("kblockd",
    1209             :                                             WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
    1210           1 :         if (!kblockd_workqueue)
    1211           0 :                 panic("Failed to create kblockd\n");
    1212             : 
    1213           1 :         blk_requestq_cachep = kmem_cache_create("request_queue",
    1214             :                         sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
    1215             : 
    1216           2 :         blk_debugfs_root = debugfs_create_dir("block", NULL);
    1217             : 
    1218           1 :         return 0;
    1219             : }

Generated by: LCOV version 1.14