LCOV - code coverage report
Current view: top level - block - blk-settings.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 298 0.0 %
Date: 2023-08-24 13:40:31 Functions: 0 36 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Functions related to setting various queue properties from drivers
       4             :  */
       5             : #include <linux/kernel.h>
       6             : #include <linux/module.h>
       7             : #include <linux/init.h>
       8             : #include <linux/bio.h>
       9             : #include <linux/blkdev.h>
      10             : #include <linux/pagemap.h>
      11             : #include <linux/backing-dev-defs.h>
      12             : #include <linux/gcd.h>
      13             : #include <linux/lcm.h>
      14             : #include <linux/jiffies.h>
      15             : #include <linux/gfp.h>
      16             : #include <linux/dma-mapping.h>
      17             : 
      18             : #include "blk.h"
      19             : #include "blk-rq-qos.h"
      20             : #include "blk-wbt.h"
      21             : 
      22           0 : void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
      23             : {
      24           0 :         q->rq_timeout = timeout;
      25           0 : }
      26             : EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
      27             : 
      28             : /**
      29             :  * blk_set_default_limits - reset limits to default values
      30             :  * @lim:  the queue_limits structure to reset
      31             :  *
      32             :  * Description:
      33             :  *   Returns a queue_limit struct to its default state.
      34             :  */
      35           0 : void blk_set_default_limits(struct queue_limits *lim)
      36             : {
      37           0 :         lim->max_segments = BLK_MAX_SEGMENTS;
      38           0 :         lim->max_discard_segments = 1;
      39           0 :         lim->max_integrity_segments = 0;
      40           0 :         lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
      41           0 :         lim->virt_boundary_mask = 0;
      42           0 :         lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
      43           0 :         lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
      44           0 :         lim->max_user_sectors = lim->max_dev_sectors = 0;
      45           0 :         lim->chunk_sectors = 0;
      46           0 :         lim->max_write_zeroes_sectors = 0;
      47           0 :         lim->max_zone_append_sectors = 0;
      48           0 :         lim->max_discard_sectors = 0;
      49           0 :         lim->max_hw_discard_sectors = 0;
      50           0 :         lim->max_secure_erase_sectors = 0;
      51           0 :         lim->discard_granularity = 0;
      52           0 :         lim->discard_alignment = 0;
      53           0 :         lim->discard_misaligned = 0;
      54           0 :         lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
      55           0 :         lim->bounce = BLK_BOUNCE_NONE;
      56           0 :         lim->alignment_offset = 0;
      57           0 :         lim->io_opt = 0;
      58           0 :         lim->misaligned = 0;
      59           0 :         lim->zoned = BLK_ZONED_NONE;
      60           0 :         lim->zone_write_granularity = 0;
      61           0 :         lim->dma_alignment = 511;
      62           0 : }
      63             : 
      64             : /**
      65             :  * blk_set_stacking_limits - set default limits for stacking devices
      66             :  * @lim:  the queue_limits structure to reset
      67             :  *
      68             :  * Description:
      69             :  *   Returns a queue_limit struct to its default state. Should be used
      70             :  *   by stacking drivers like DM that have no internal limits.
      71             :  */
      72           0 : void blk_set_stacking_limits(struct queue_limits *lim)
      73             : {
      74           0 :         blk_set_default_limits(lim);
      75             : 
      76             :         /* Inherit limits from component devices */
      77           0 :         lim->max_segments = USHRT_MAX;
      78           0 :         lim->max_discard_segments = USHRT_MAX;
      79           0 :         lim->max_hw_sectors = UINT_MAX;
      80           0 :         lim->max_segment_size = UINT_MAX;
      81           0 :         lim->max_sectors = UINT_MAX;
      82           0 :         lim->max_dev_sectors = UINT_MAX;
      83           0 :         lim->max_write_zeroes_sectors = UINT_MAX;
      84           0 :         lim->max_zone_append_sectors = UINT_MAX;
      85           0 : }
      86             : EXPORT_SYMBOL(blk_set_stacking_limits);
      87             : 
      88             : /**
      89             :  * blk_queue_bounce_limit - set bounce buffer limit for queue
      90             :  * @q: the request queue for the device
      91             :  * @bounce: bounce limit to enforce
      92             :  *
      93             :  * Description:
      94             :  *    Force bouncing for ISA DMA ranges or highmem.
      95             :  *
      96             :  *    DEPRECATED, don't use in new code.
      97             :  **/
      98           0 : void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
      99             : {
     100           0 :         q->limits.bounce = bounce;
     101           0 : }
     102             : EXPORT_SYMBOL(blk_queue_bounce_limit);
     103             : 
     104             : /**
     105             :  * blk_queue_max_hw_sectors - set max sectors for a request for this queue
     106             :  * @q:  the request queue for the device
     107             :  * @max_hw_sectors:  max hardware sectors in the usual 512b unit
     108             :  *
     109             :  * Description:
     110             :  *    Enables a low level driver to set a hard upper limit,
     111             :  *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
     112             :  *    the device driver based upon the capabilities of the I/O
     113             :  *    controller.
     114             :  *
     115             :  *    max_dev_sectors is a hard limit imposed by the storage device for
     116             :  *    READ/WRITE requests. It is set by the disk driver.
     117             :  *
     118             :  *    max_sectors is a soft limit imposed by the block layer for
     119             :  *    filesystem type requests.  This value can be overridden on a
     120             :  *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
     121             :  *    The soft limit can not exceed max_hw_sectors.
     122             :  **/
     123           0 : void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
     124             : {
     125           0 :         struct queue_limits *limits = &q->limits;
     126             :         unsigned int max_sectors;
     127             : 
     128           0 :         if ((max_hw_sectors << 9) < PAGE_SIZE) {
     129           0 :                 max_hw_sectors = 1 << (PAGE_SHIFT - 9);
     130           0 :                 printk(KERN_INFO "%s: set to minimum %d\n",
     131             :                        __func__, max_hw_sectors);
     132             :         }
     133             : 
     134           0 :         max_hw_sectors = round_down(max_hw_sectors,
     135             :                                     limits->logical_block_size >> SECTOR_SHIFT);
     136           0 :         limits->max_hw_sectors = max_hw_sectors;
     137             : 
     138           0 :         max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
     139             : 
     140           0 :         if (limits->max_user_sectors)
     141           0 :                 max_sectors = min(max_sectors, limits->max_user_sectors);
     142             :         else
     143           0 :                 max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS);
     144             : 
     145           0 :         max_sectors = round_down(max_sectors,
     146             :                                  limits->logical_block_size >> SECTOR_SHIFT);
     147           0 :         limits->max_sectors = max_sectors;
     148             : 
     149           0 :         if (!q->disk)
     150             :                 return;
     151           0 :         q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
     152             : }
     153             : EXPORT_SYMBOL(blk_queue_max_hw_sectors);
     154             : 
     155             : /**
     156             :  * blk_queue_chunk_sectors - set size of the chunk for this queue
     157             :  * @q:  the request queue for the device
     158             :  * @chunk_sectors:  chunk sectors in the usual 512b unit
     159             :  *
     160             :  * Description:
     161             :  *    If a driver doesn't want IOs to cross a given chunk size, it can set
     162             :  *    this limit and prevent merging across chunks. Note that the block layer
     163             :  *    must accept a page worth of data at any offset. So if the crossing of
     164             :  *    chunks is a hard limitation in the driver, it must still be prepared
     165             :  *    to split single page bios.
     166             :  **/
     167           0 : void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
     168             : {
     169           0 :         q->limits.chunk_sectors = chunk_sectors;
     170           0 : }
     171             : EXPORT_SYMBOL(blk_queue_chunk_sectors);
     172             : 
     173             : /**
     174             :  * blk_queue_max_discard_sectors - set max sectors for a single discard
     175             :  * @q:  the request queue for the device
     176             :  * @max_discard_sectors: maximum number of sectors to discard
     177             :  **/
     178           0 : void blk_queue_max_discard_sectors(struct request_queue *q,
     179             :                 unsigned int max_discard_sectors)
     180             : {
     181           0 :         q->limits.max_hw_discard_sectors = max_discard_sectors;
     182           0 :         q->limits.max_discard_sectors = max_discard_sectors;
     183           0 : }
     184             : EXPORT_SYMBOL(blk_queue_max_discard_sectors);
     185             : 
     186             : /**
     187             :  * blk_queue_max_secure_erase_sectors - set max sectors for a secure erase
     188             :  * @q:  the request queue for the device
     189             :  * @max_sectors: maximum number of sectors to secure_erase
     190             :  **/
     191           0 : void blk_queue_max_secure_erase_sectors(struct request_queue *q,
     192             :                 unsigned int max_sectors)
     193             : {
     194           0 :         q->limits.max_secure_erase_sectors = max_sectors;
     195           0 : }
     196             : EXPORT_SYMBOL(blk_queue_max_secure_erase_sectors);
     197             : 
     198             : /**
     199             :  * blk_queue_max_write_zeroes_sectors - set max sectors for a single
     200             :  *                                      write zeroes
     201             :  * @q:  the request queue for the device
     202             :  * @max_write_zeroes_sectors: maximum number of sectors to write per command
     203             :  **/
     204           0 : void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
     205             :                 unsigned int max_write_zeroes_sectors)
     206             : {
     207           0 :         q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
     208           0 : }
     209             : EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
     210             : 
     211             : /**
     212             :  * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
     213             :  * @q:  the request queue for the device
     214             :  * @max_zone_append_sectors: maximum number of sectors to write per command
     215             :  **/
     216           0 : void blk_queue_max_zone_append_sectors(struct request_queue *q,
     217             :                 unsigned int max_zone_append_sectors)
     218             : {
     219             :         unsigned int max_sectors;
     220             : 
     221           0 :         if (WARN_ON(!blk_queue_is_zoned(q)))
     222             :                 return;
     223             : 
     224             :         max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors);
     225             :         max_sectors = min(q->limits.chunk_sectors, max_sectors);
     226             : 
     227             :         /*
     228             :          * Signal eventual driver bugs resulting in the max_zone_append sectors limit
     229             :          * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set,
     230             :          * or the max_hw_sectors limit not set.
     231             :          */
     232             :         WARN_ON(!max_sectors);
     233             : 
     234             :         q->limits.max_zone_append_sectors = max_sectors;
     235             : }
     236             : EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
     237             : 
     238             : /**
     239             :  * blk_queue_max_segments - set max hw segments for a request for this queue
     240             :  * @q:  the request queue for the device
     241             :  * @max_segments:  max number of segments
     242             :  *
     243             :  * Description:
     244             :  *    Enables a low level driver to set an upper limit on the number of
     245             :  *    hw data segments in a request.
     246             :  **/
     247           0 : void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
     248             : {
     249           0 :         if (!max_segments) {
     250           0 :                 max_segments = 1;
     251           0 :                 printk(KERN_INFO "%s: set to minimum %d\n",
     252             :                        __func__, max_segments);
     253             :         }
     254             : 
     255           0 :         q->limits.max_segments = max_segments;
     256           0 : }
     257             : EXPORT_SYMBOL(blk_queue_max_segments);
     258             : 
     259             : /**
     260             :  * blk_queue_max_discard_segments - set max segments for discard requests
     261             :  * @q:  the request queue for the device
     262             :  * @max_segments:  max number of segments
     263             :  *
     264             :  * Description:
     265             :  *    Enables a low level driver to set an upper limit on the number of
     266             :  *    segments in a discard request.
     267             :  **/
     268           0 : void blk_queue_max_discard_segments(struct request_queue *q,
     269             :                 unsigned short max_segments)
     270             : {
     271           0 :         q->limits.max_discard_segments = max_segments;
     272           0 : }
     273             : EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
     274             : 
     275             : /**
     276             :  * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
     277             :  * @q:  the request queue for the device
     278             :  * @max_size:  max size of segment in bytes
     279             :  *
     280             :  * Description:
     281             :  *    Enables a low level driver to set an upper limit on the size of a
     282             :  *    coalesced segment
     283             :  **/
     284           0 : void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
     285             : {
     286           0 :         if (max_size < PAGE_SIZE) {
     287           0 :                 max_size = PAGE_SIZE;
     288           0 :                 printk(KERN_INFO "%s: set to minimum %d\n",
     289             :                        __func__, max_size);
     290             :         }
     291             : 
     292             :         /* see blk_queue_virt_boundary() for the explanation */
     293           0 :         WARN_ON_ONCE(q->limits.virt_boundary_mask);
     294             : 
     295           0 :         q->limits.max_segment_size = max_size;
     296           0 : }
     297             : EXPORT_SYMBOL(blk_queue_max_segment_size);
     298             : 
     299             : /**
     300             :  * blk_queue_logical_block_size - set logical block size for the queue
     301             :  * @q:  the request queue for the device
     302             :  * @size:  the logical block size, in bytes
     303             :  *
     304             :  * Description:
     305             :  *   This should be set to the lowest possible block size that the
     306             :  *   storage device can address.  The default of 512 covers most
     307             :  *   hardware.
     308             :  **/
     309           0 : void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
     310             : {
     311           0 :         struct queue_limits *limits = &q->limits;
     312             : 
     313           0 :         limits->logical_block_size = size;
     314             : 
     315           0 :         if (limits->physical_block_size < size)
     316           0 :                 limits->physical_block_size = size;
     317             : 
     318           0 :         if (limits->io_min < limits->physical_block_size)
     319           0 :                 limits->io_min = limits->physical_block_size;
     320             : 
     321           0 :         limits->max_hw_sectors =
     322           0 :                 round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT);
     323           0 :         limits->max_sectors =
     324           0 :                 round_down(limits->max_sectors, size >> SECTOR_SHIFT);
     325           0 : }
     326             : EXPORT_SYMBOL(blk_queue_logical_block_size);
     327             : 
     328             : /**
     329             :  * blk_queue_physical_block_size - set physical block size for the queue
     330             :  * @q:  the request queue for the device
     331             :  * @size:  the physical block size, in bytes
     332             :  *
     333             :  * Description:
     334             :  *   This should be set to the lowest possible sector size that the
     335             :  *   hardware can operate on without reverting to read-modify-write
     336             :  *   operations.
     337             :  */
     338           0 : void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
     339             : {
     340           0 :         q->limits.physical_block_size = size;
     341             : 
     342           0 :         if (q->limits.physical_block_size < q->limits.logical_block_size)
     343           0 :                 q->limits.physical_block_size = q->limits.logical_block_size;
     344             : 
     345           0 :         if (q->limits.io_min < q->limits.physical_block_size)
     346           0 :                 q->limits.io_min = q->limits.physical_block_size;
     347           0 : }
     348             : EXPORT_SYMBOL(blk_queue_physical_block_size);
     349             : 
     350             : /**
     351             :  * blk_queue_zone_write_granularity - set zone write granularity for the queue
     352             :  * @q:  the request queue for the zoned device
     353             :  * @size:  the zone write granularity size, in bytes
     354             :  *
     355             :  * Description:
     356             :  *   This should be set to the lowest possible size allowing to write in
     357             :  *   sequential zones of a zoned block device.
     358             :  */
     359           0 : void blk_queue_zone_write_granularity(struct request_queue *q,
     360             :                                       unsigned int size)
     361             : {
     362           0 :         if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
     363             :                 return;
     364             : 
     365             :         q->limits.zone_write_granularity = size;
     366             : 
     367             :         if (q->limits.zone_write_granularity < q->limits.logical_block_size)
     368             :                 q->limits.zone_write_granularity = q->limits.logical_block_size;
     369             : }
     370             : EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity);
     371             : 
     372             : /**
     373             :  * blk_queue_alignment_offset - set physical block alignment offset
     374             :  * @q:  the request queue for the device
     375             :  * @offset: alignment offset in bytes
     376             :  *
     377             :  * Description:
     378             :  *   Some devices are naturally misaligned to compensate for things like
     379             :  *   the legacy DOS partition table 63-sector offset.  Low-level drivers
     380             :  *   should call this function for devices whose first sector is not
     381             :  *   naturally aligned.
     382             :  */
     383           0 : void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
     384             : {
     385           0 :         q->limits.alignment_offset =
     386           0 :                 offset & (q->limits.physical_block_size - 1);
     387           0 :         q->limits.misaligned = 0;
     388           0 : }
     389             : EXPORT_SYMBOL(blk_queue_alignment_offset);
     390             : 
     391           0 : void disk_update_readahead(struct gendisk *disk)
     392             : {
     393           0 :         struct request_queue *q = disk->queue;
     394             : 
     395             :         /*
     396             :          * For read-ahead of large files to be effective, we need to read ahead
     397             :          * at least twice the optimal I/O size.
     398             :          */
     399           0 :         disk->bdi->ra_pages =
     400           0 :                 max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
     401           0 :         disk->bdi->io_pages = queue_max_sectors(q) >> (PAGE_SHIFT - 9);
     402           0 : }
     403             : EXPORT_SYMBOL_GPL(disk_update_readahead);
     404             : 
     405             : /**
     406             :  * blk_limits_io_min - set minimum request size for a device
     407             :  * @limits: the queue limits
     408             :  * @min:  smallest I/O size in bytes
     409             :  *
     410             :  * Description:
     411             :  *   Some devices have an internal block size bigger than the reported
     412             :  *   hardware sector size.  This function can be used to signal the
     413             :  *   smallest I/O the device can perform without incurring a performance
     414             :  *   penalty.
     415             :  */
     416           0 : void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
     417             : {
     418           0 :         limits->io_min = min;
     419             : 
     420           0 :         if (limits->io_min < limits->logical_block_size)
     421           0 :                 limits->io_min = limits->logical_block_size;
     422             : 
     423           0 :         if (limits->io_min < limits->physical_block_size)
     424           0 :                 limits->io_min = limits->physical_block_size;
     425           0 : }
     426             : EXPORT_SYMBOL(blk_limits_io_min);
     427             : 
     428             : /**
     429             :  * blk_queue_io_min - set minimum request size for the queue
     430             :  * @q:  the request queue for the device
     431             :  * @min:  smallest I/O size in bytes
     432             :  *
     433             :  * Description:
     434             :  *   Storage devices may report a granularity or preferred minimum I/O
     435             :  *   size which is the smallest request the device can perform without
     436             :  *   incurring a performance penalty.  For disk drives this is often the
     437             :  *   physical block size.  For RAID arrays it is often the stripe chunk
     438             :  *   size.  A properly aligned multiple of minimum_io_size is the
     439             :  *   preferred request size for workloads where a high number of I/O
     440             :  *   operations is desired.
     441             :  */
     442           0 : void blk_queue_io_min(struct request_queue *q, unsigned int min)
     443             : {
     444           0 :         blk_limits_io_min(&q->limits, min);
     445           0 : }
     446             : EXPORT_SYMBOL(blk_queue_io_min);
     447             : 
     448             : /**
     449             :  * blk_limits_io_opt - set optimal request size for a device
     450             :  * @limits: the queue limits
     451             :  * @opt:  smallest I/O size in bytes
     452             :  *
     453             :  * Description:
     454             :  *   Storage devices may report an optimal I/O size, which is the
     455             :  *   device's preferred unit for sustained I/O.  This is rarely reported
     456             :  *   for disk drives.  For RAID arrays it is usually the stripe width or
     457             :  *   the internal track size.  A properly aligned multiple of
     458             :  *   optimal_io_size is the preferred request size for workloads where
     459             :  *   sustained throughput is desired.
     460             :  */
     461           0 : void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
     462             : {
     463           0 :         limits->io_opt = opt;
     464           0 : }
     465             : EXPORT_SYMBOL(blk_limits_io_opt);
     466             : 
     467             : /**
     468             :  * blk_queue_io_opt - set optimal request size for the queue
     469             :  * @q:  the request queue for the device
     470             :  * @opt:  optimal request size in bytes
     471             :  *
     472             :  * Description:
     473             :  *   Storage devices may report an optimal I/O size, which is the
     474             :  *   device's preferred unit for sustained I/O.  This is rarely reported
     475             :  *   for disk drives.  For RAID arrays it is usually the stripe width or
     476             :  *   the internal track size.  A properly aligned multiple of
     477             :  *   optimal_io_size is the preferred request size for workloads where
     478             :  *   sustained throughput is desired.
     479             :  */
     480           0 : void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
     481             : {
     482           0 :         blk_limits_io_opt(&q->limits, opt);
     483           0 :         if (!q->disk)
     484             :                 return;
     485           0 :         q->disk->bdi->ra_pages =
     486           0 :                 max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
     487             : }
     488             : EXPORT_SYMBOL(blk_queue_io_opt);
     489             : 
     490             : static int queue_limit_alignment_offset(const struct queue_limits *lim,
     491             :                 sector_t sector)
     492             : {
     493           0 :         unsigned int granularity = max(lim->physical_block_size, lim->io_min);
     494           0 :         unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
     495             :                 << SECTOR_SHIFT;
     496             : 
     497           0 :         return (granularity + lim->alignment_offset - alignment) % granularity;
     498             : }
     499             : 
     500             : static unsigned int queue_limit_discard_alignment(
     501             :                 const struct queue_limits *lim, sector_t sector)
     502             : {
     503             :         unsigned int alignment, granularity, offset;
     504             : 
     505           0 :         if (!lim->max_discard_sectors)
     506             :                 return 0;
     507             : 
     508             :         /* Why are these in bytes, not sectors? */
     509           0 :         alignment = lim->discard_alignment >> SECTOR_SHIFT;
     510           0 :         granularity = lim->discard_granularity >> SECTOR_SHIFT;
     511           0 :         if (!granularity)
     512             :                 return 0;
     513             : 
     514             :         /* Offset of the partition start in 'granularity' sectors */
     515           0 :         offset = sector_div(sector, granularity);
     516             : 
     517             :         /* And why do we do this modulus *again* in blkdev_issue_discard()? */
     518           0 :         offset = (granularity + alignment - offset) % granularity;
     519             : 
     520             :         /* Turn it back into bytes, gaah */
     521           0 :         return offset << SECTOR_SHIFT;
     522             : }
     523             : 
     524             : static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
     525             : {
     526           0 :         sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
     527           0 :         if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
     528           0 :                 sectors = PAGE_SIZE >> SECTOR_SHIFT;
     529             :         return sectors;
     530             : }
     531             : 
     532             : /**
     533             :  * blk_stack_limits - adjust queue_limits for stacked devices
     534             :  * @t:  the stacking driver limits (top device)
     535             :  * @b:  the underlying queue limits (bottom, component device)
     536             :  * @start:  first data sector within component device
     537             :  *
     538             :  * Description:
     539             :  *    This function is used by stacking drivers like MD and DM to ensure
     540             :  *    that all component devices have compatible block sizes and
     541             :  *    alignments.  The stacking driver must provide a queue_limits
     542             :  *    struct (top) and then iteratively call the stacking function for
     543             :  *    all component (bottom) devices.  The stacking function will
     544             :  *    attempt to combine the values and ensure proper alignment.
     545             :  *
     546             :  *    Returns 0 if the top and bottom queue_limits are compatible.  The
     547             :  *    top device's block sizes and alignment offsets may be adjusted to
     548             :  *    ensure alignment with the bottom device. If no compatible sizes
     549             :  *    and alignments exist, -1 is returned and the resulting top
     550             :  *    queue_limits will have the misaligned flag set to indicate that
     551             :  *    the alignment_offset is undefined.
     552             :  */
     553           0 : int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
     554             :                      sector_t start)
     555             : {
     556           0 :         unsigned int top, bottom, alignment, ret = 0;
     557             : 
     558           0 :         t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
     559           0 :         t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
     560           0 :         t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
     561           0 :         t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
     562             :                                         b->max_write_zeroes_sectors);
     563           0 :         t->max_zone_append_sectors = min(t->max_zone_append_sectors,
     564             :                                         b->max_zone_append_sectors);
     565           0 :         t->bounce = max(t->bounce, b->bounce);
     566             : 
     567           0 :         t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
     568             :                                             b->seg_boundary_mask);
     569           0 :         t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
     570             :                                             b->virt_boundary_mask);
     571             : 
     572           0 :         t->max_segments = min_not_zero(t->max_segments, b->max_segments);
     573           0 :         t->max_discard_segments = min_not_zero(t->max_discard_segments,
     574             :                                                b->max_discard_segments);
     575           0 :         t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
     576             :                                                  b->max_integrity_segments);
     577             : 
     578           0 :         t->max_segment_size = min_not_zero(t->max_segment_size,
     579             :                                            b->max_segment_size);
     580             : 
     581           0 :         t->misaligned |= b->misaligned;
     582             : 
     583           0 :         alignment = queue_limit_alignment_offset(b, start);
     584             : 
     585             :         /* Bottom device has different alignment.  Check that it is
     586             :          * compatible with the current top alignment.
     587             :          */
     588           0 :         if (t->alignment_offset != alignment) {
     589             : 
     590           0 :                 top = max(t->physical_block_size, t->io_min)
     591             :                         + t->alignment_offset;
     592           0 :                 bottom = max(b->physical_block_size, b->io_min) + alignment;
     593             : 
     594             :                 /* Verify that top and bottom intervals line up */
     595           0 :                 if (max(top, bottom) % min(top, bottom)) {
     596           0 :                         t->misaligned = 1;
     597           0 :                         ret = -1;
     598             :                 }
     599             :         }
     600             : 
     601           0 :         t->logical_block_size = max(t->logical_block_size,
     602             :                                     b->logical_block_size);
     603             : 
     604           0 :         t->physical_block_size = max(t->physical_block_size,
     605             :                                      b->physical_block_size);
     606             : 
     607           0 :         t->io_min = max(t->io_min, b->io_min);
     608           0 :         t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
     609           0 :         t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
     610             : 
     611             :         /* Set non-power-of-2 compatible chunk_sectors boundary */
     612           0 :         if (b->chunk_sectors)
     613           0 :                 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
     614             : 
     615             :         /* Physical block size a multiple of the logical block size? */
     616           0 :         if (t->physical_block_size & (t->logical_block_size - 1)) {
     617           0 :                 t->physical_block_size = t->logical_block_size;
     618           0 :                 t->misaligned = 1;
     619           0 :                 ret = -1;
     620             :         }
     621             : 
     622             :         /* Minimum I/O a multiple of the physical block size? */
     623           0 :         if (t->io_min & (t->physical_block_size - 1)) {
     624           0 :                 t->io_min = t->physical_block_size;
     625           0 :                 t->misaligned = 1;
     626           0 :                 ret = -1;
     627             :         }
     628             : 
     629             :         /* Optimal I/O a multiple of the physical block size? */
     630           0 :         if (t->io_opt & (t->physical_block_size - 1)) {
     631           0 :                 t->io_opt = 0;
     632           0 :                 t->misaligned = 1;
     633           0 :                 ret = -1;
     634             :         }
     635             : 
     636             :         /* chunk_sectors a multiple of the physical block size? */
     637           0 :         if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
     638           0 :                 t->chunk_sectors = 0;
     639           0 :                 t->misaligned = 1;
     640           0 :                 ret = -1;
     641             :         }
     642             : 
     643           0 :         t->raid_partial_stripes_expensive =
     644           0 :                 max(t->raid_partial_stripes_expensive,
     645             :                     b->raid_partial_stripes_expensive);
     646             : 
     647             :         /* Find lowest common alignment_offset */
     648           0 :         t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
     649           0 :                 % max(t->physical_block_size, t->io_min);
     650             : 
     651             :         /* Verify that new alignment_offset is on a logical block boundary */
     652           0 :         if (t->alignment_offset & (t->logical_block_size - 1)) {
     653           0 :                 t->misaligned = 1;
     654           0 :                 ret = -1;
     655             :         }
     656             : 
     657           0 :         t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
     658           0 :         t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
     659           0 :         t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
     660             : 
     661             :         /* Discard alignment and granularity */
     662           0 :         if (b->discard_granularity) {
     663           0 :                 alignment = queue_limit_discard_alignment(b, start);
     664             : 
     665           0 :                 if (t->discard_granularity != 0 &&
     666           0 :                     t->discard_alignment != alignment) {
     667           0 :                         top = t->discard_granularity + t->discard_alignment;
     668           0 :                         bottom = b->discard_granularity + alignment;
     669             : 
     670             :                         /* Verify that top and bottom intervals line up */
     671           0 :                         if ((max(top, bottom) % min(top, bottom)) != 0)
     672           0 :                                 t->discard_misaligned = 1;
     673             :                 }
     674             : 
     675           0 :                 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
     676             :                                                       b->max_discard_sectors);
     677           0 :                 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
     678             :                                                          b->max_hw_discard_sectors);
     679           0 :                 t->discard_granularity = max(t->discard_granularity,
     680             :                                              b->discard_granularity);
     681           0 :                 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
     682           0 :                         t->discard_granularity;
     683             :         }
     684           0 :         t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
     685             :                                                    b->max_secure_erase_sectors);
     686           0 :         t->zone_write_granularity = max(t->zone_write_granularity,
     687             :                                         b->zone_write_granularity);
     688           0 :         t->zoned = max(t->zoned, b->zoned);
     689           0 :         return ret;
     690             : }
     691             : EXPORT_SYMBOL(blk_stack_limits);
     692             : 
     693             : /**
     694             :  * disk_stack_limits - adjust queue limits for stacked drivers
     695             :  * @disk:  MD/DM gendisk (top)
     696             :  * @bdev:  the underlying block device (bottom)
     697             :  * @offset:  offset to beginning of data within component device
     698             :  *
     699             :  * Description:
     700             :  *    Merges the limits for a top level gendisk and a bottom level
     701             :  *    block_device.
     702             :  */
     703           0 : void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
     704             :                        sector_t offset)
     705             : {
     706           0 :         struct request_queue *t = disk->queue;
     707             : 
     708           0 :         if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits,
     709           0 :                         get_start_sect(bdev) + (offset >> 9)) < 0)
     710           0 :                 pr_notice("%s: Warning: Device %pg is misaligned\n",
     711             :                         disk->disk_name, bdev);
     712             : 
     713           0 :         disk_update_readahead(disk);
     714           0 : }
     715             : EXPORT_SYMBOL(disk_stack_limits);
     716             : 
     717             : /**
     718             :  * blk_queue_update_dma_pad - update pad mask
     719             :  * @q:     the request queue for the device
     720             :  * @mask:  pad mask
     721             :  *
     722             :  * Update dma pad mask.
     723             :  *
     724             :  * Appending pad buffer to a request modifies the last entry of a
     725             :  * scatter list such that it includes the pad buffer.
     726             :  **/
     727           0 : void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
     728             : {
     729           0 :         if (mask > q->dma_pad_mask)
     730           0 :                 q->dma_pad_mask = mask;
     731           0 : }
     732             : EXPORT_SYMBOL(blk_queue_update_dma_pad);
     733             : 
     734             : /**
     735             :  * blk_queue_segment_boundary - set boundary rules for segment merging
     736             :  * @q:  the request queue for the device
     737             :  * @mask:  the memory boundary mask
     738             :  **/
     739           0 : void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
     740             : {
     741           0 :         if (mask < PAGE_SIZE - 1) {
     742           0 :                 mask = PAGE_SIZE - 1;
     743           0 :                 printk(KERN_INFO "%s: set to minimum %lx\n",
     744             :                        __func__, mask);
     745             :         }
     746             : 
     747           0 :         q->limits.seg_boundary_mask = mask;
     748           0 : }
     749             : EXPORT_SYMBOL(blk_queue_segment_boundary);
     750             : 
     751             : /**
     752             :  * blk_queue_virt_boundary - set boundary rules for bio merging
     753             :  * @q:  the request queue for the device
     754             :  * @mask:  the memory boundary mask
     755             :  **/
     756           0 : void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
     757             : {
     758           0 :         q->limits.virt_boundary_mask = mask;
     759             : 
     760             :         /*
     761             :          * Devices that require a virtual boundary do not support scatter/gather
     762             :          * I/O natively, but instead require a descriptor list entry for each
     763             :          * page (which might not be idential to the Linux PAGE_SIZE).  Because
     764             :          * of that they are not limited by our notion of "segment size".
     765             :          */
     766           0 :         if (mask)
     767           0 :                 q->limits.max_segment_size = UINT_MAX;
     768           0 : }
     769             : EXPORT_SYMBOL(blk_queue_virt_boundary);
     770             : 
     771             : /**
     772             :  * blk_queue_dma_alignment - set dma length and memory alignment
     773             :  * @q:     the request queue for the device
     774             :  * @mask:  alignment mask
     775             :  *
     776             :  * description:
     777             :  *    set required memory and length alignment for direct dma transactions.
     778             :  *    this is used when building direct io requests for the queue.
     779             :  *
     780             :  **/
     781           0 : void blk_queue_dma_alignment(struct request_queue *q, int mask)
     782             : {
     783           0 :         q->limits.dma_alignment = mask;
     784           0 : }
     785             : EXPORT_SYMBOL(blk_queue_dma_alignment);
     786             : 
     787             : /**
     788             :  * blk_queue_update_dma_alignment - update dma length and memory alignment
     789             :  * @q:     the request queue for the device
     790             :  * @mask:  alignment mask
     791             :  *
     792             :  * description:
     793             :  *    update required memory and length alignment for direct dma transactions.
     794             :  *    If the requested alignment is larger than the current alignment, then
     795             :  *    the current queue alignment is updated to the new value, otherwise it
     796             :  *    is left alone.  The design of this is to allow multiple objects
     797             :  *    (driver, device, transport etc) to set their respective
     798             :  *    alignments without having them interfere.
     799             :  *
     800             :  **/
     801           0 : void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
     802             : {
     803           0 :         BUG_ON(mask > PAGE_SIZE);
     804             : 
     805           0 :         if (mask > q->limits.dma_alignment)
     806           0 :                 q->limits.dma_alignment = mask;
     807           0 : }
     808             : EXPORT_SYMBOL(blk_queue_update_dma_alignment);
     809             : 
     810             : /**
     811             :  * blk_set_queue_depth - tell the block layer about the device queue depth
     812             :  * @q:          the request queue for the device
     813             :  * @depth:              queue depth
     814             :  *
     815             :  */
     816           0 : void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
     817             : {
     818           0 :         q->queue_depth = depth;
     819           0 :         rq_qos_queue_depth_changed(q);
     820           0 : }
     821             : EXPORT_SYMBOL(blk_set_queue_depth);
     822             : 
     823             : /**
     824             :  * blk_queue_write_cache - configure queue's write cache
     825             :  * @q:          the request queue for the device
     826             :  * @wc:         write back cache on or off
     827             :  * @fua:        device supports FUA writes, if true
     828             :  *
     829             :  * Tell the block layer about the write cache of @q.
     830             :  */
     831           0 : void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
     832             : {
     833           0 :         if (wc)
     834           0 :                 blk_queue_flag_set(QUEUE_FLAG_WC, q);
     835             :         else
     836           0 :                 blk_queue_flag_clear(QUEUE_FLAG_WC, q);
     837           0 :         if (fua)
     838           0 :                 blk_queue_flag_set(QUEUE_FLAG_FUA, q);
     839             :         else
     840           0 :                 blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
     841             : 
     842           0 :         wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
     843           0 : }
     844             : EXPORT_SYMBOL_GPL(blk_queue_write_cache);
     845             : 
     846             : /**
     847             :  * blk_queue_required_elevator_features - Set a queue required elevator features
     848             :  * @q:          the request queue for the target device
     849             :  * @features:   Required elevator features OR'ed together
     850             :  *
     851             :  * Tell the block layer that for the device controlled through @q, only the
     852             :  * only elevators that can be used are those that implement at least the set of
     853             :  * features specified by @features.
     854             :  */
     855           0 : void blk_queue_required_elevator_features(struct request_queue *q,
     856             :                                           unsigned int features)
     857             : {
     858           0 :         q->required_elevator_features = features;
     859           0 : }
     860             : EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
     861             : 
     862             : /**
     863             :  * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
     864             :  * @q:          the request queue for the device
     865             :  * @dev:        the device pointer for dma
     866             :  *
     867             :  * Tell the block layer about merging the segments by dma map of @q.
     868             :  */
     869           0 : bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
     870             :                                        struct device *dev)
     871             : {
     872           0 :         unsigned long boundary = dma_get_merge_boundary(dev);
     873             : 
     874           0 :         if (!boundary)
     875             :                 return false;
     876             : 
     877             :         /* No need to update max_segment_size. see blk_queue_virt_boundary() */
     878           0 :         blk_queue_virt_boundary(q, boundary);
     879             : 
     880           0 :         return true;
     881             : }
     882             : EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
     883             : 
     884             : static bool disk_has_partitions(struct gendisk *disk)
     885             : {
     886             :         unsigned long idx;
     887             :         struct block_device *part;
     888             :         bool ret = false;
     889             : 
     890             :         rcu_read_lock();
     891             :         xa_for_each(&disk->part_tbl, idx, part) {
     892             :                 if (bdev_is_partition(part)) {
     893             :                         ret = true;
     894             :                         break;
     895             :                 }
     896             :         }
     897             :         rcu_read_unlock();
     898             : 
     899             :         return ret;
     900             : }
     901             : 
     902             : /**
     903             :  * disk_set_zoned - configure the zoned model for a disk
     904             :  * @disk:       the gendisk of the queue to configure
     905             :  * @model:      the zoned model to set
     906             :  *
     907             :  * Set the zoned model of @disk to @model.
     908             :  *
     909             :  * When @model is BLK_ZONED_HM (host managed), this should be called only
     910             :  * if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option).
     911             :  * If @model specifies BLK_ZONED_HA (host aware), the effective model used
     912             :  * depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions
     913             :  * on the disk.
     914             :  */
     915           0 : void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
     916             : {
     917           0 :         struct request_queue *q = disk->queue;
     918           0 :         unsigned int old_model = q->limits.zoned;
     919             : 
     920           0 :         switch (model) {
     921             :         case BLK_ZONED_HM:
     922             :                 /*
     923             :                  * Host managed devices are supported only if
     924             :                  * CONFIG_BLK_DEV_ZONED is enabled.
     925             :                  */
     926           0 :                 WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
     927             :                 break;
     928             :         case BLK_ZONED_HA:
     929             :                 /*
     930             :                  * Host aware devices can be treated either as regular block
     931             :                  * devices (similar to drive managed devices) or as zoned block
     932             :                  * devices to take advantage of the zone command set, similarly
     933             :                  * to host managed devices. We try the latter if there are no
     934             :                  * partitions and zoned block device support is enabled, else
     935             :                  * we do nothing special as far as the block layer is concerned.
     936             :                  */
     937             :                 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
     938             :                     disk_has_partitions(disk))
     939             :                         model = BLK_ZONED_NONE;
     940             :                 break;
     941             :         case BLK_ZONED_NONE:
     942             :         default:
     943           0 :                 if (WARN_ON_ONCE(model != BLK_ZONED_NONE))
     944           0 :                         model = BLK_ZONED_NONE;
     945             :                 break;
     946             :         }
     947             : 
     948           0 :         q->limits.zoned = model;
     949           0 :         if (model != BLK_ZONED_NONE) {
     950             :                 /*
     951             :                  * Set the zone write granularity to the device logical block
     952             :                  * size by default. The driver can change this value if needed.
     953             :                  */
     954           0 :                 blk_queue_zone_write_granularity(q,
     955             :                                                 queue_logical_block_size(q));
     956             :         } else if (old_model != BLK_ZONED_NONE) {
     957             :                 disk_clear_zone_settings(disk);
     958             :         }
     959           0 : }
     960             : EXPORT_SYMBOL_GPL(disk_set_zoned);
     961             : 
     962           0 : int bdev_alignment_offset(struct block_device *bdev)
     963             : {
     964           0 :         struct request_queue *q = bdev_get_queue(bdev);
     965             : 
     966           0 :         if (q->limits.misaligned)
     967             :                 return -1;
     968           0 :         if (bdev_is_partition(bdev))
     969           0 :                 return queue_limit_alignment_offset(&q->limits,
     970             :                                 bdev->bd_start_sect);
     971           0 :         return q->limits.alignment_offset;
     972             : }
     973             : EXPORT_SYMBOL_GPL(bdev_alignment_offset);
     974             : 
     975           0 : unsigned int bdev_discard_alignment(struct block_device *bdev)
     976             : {
     977           0 :         struct request_queue *q = bdev_get_queue(bdev);
     978             : 
     979           0 :         if (bdev_is_partition(bdev))
     980           0 :                 return queue_limit_discard_alignment(&q->limits,
     981             :                                 bdev->bd_start_sect);
     982           0 :         return q->limits.discard_alignment;
     983             : }
     984             : EXPORT_SYMBOL_GPL(bdev_discard_alignment);

Generated by: LCOV version 1.14