LCOV - code coverage report
Current view: top level - block - blk-sysfs.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 267 0.0 %
Date: 2023-08-24 13:40:31 Functions: 0 60 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Functions related to sysfs handling
       4             :  */
       5             : #include <linux/kernel.h>
       6             : #include <linux/slab.h>
       7             : #include <linux/module.h>
       8             : #include <linux/bio.h>
       9             : #include <linux/blkdev.h>
      10             : #include <linux/backing-dev.h>
      11             : #include <linux/blktrace_api.h>
      12             : #include <linux/debugfs.h>
      13             : 
      14             : #include "blk.h"
      15             : #include "blk-mq.h"
      16             : #include "blk-mq-debugfs.h"
      17             : #include "blk-mq-sched.h"
      18             : #include "blk-rq-qos.h"
      19             : #include "blk-wbt.h"
      20             : #include "blk-cgroup.h"
      21             : #include "blk-throttle.h"
      22             : 
      23             : struct queue_sysfs_entry {
      24             :         struct attribute attr;
      25             :         ssize_t (*show)(struct request_queue *, char *);
      26             :         ssize_t (*store)(struct request_queue *, const char *, size_t);
      27             : };
      28             : 
      29             : static ssize_t
      30             : queue_var_show(unsigned long var, char *page)
      31             : {
      32           0 :         return sprintf(page, "%lu\n", var);
      33             : }
      34             : 
      35             : static ssize_t
      36             : queue_var_store(unsigned long *var, const char *page, size_t count)
      37             : {
      38             :         int err;
      39             :         unsigned long v;
      40             : 
      41           0 :         err = kstrtoul(page, 10, &v);
      42           0 :         if (err || v > UINT_MAX)
      43             :                 return -EINVAL;
      44             : 
      45           0 :         *var = v;
      46             : 
      47           0 :         return count;
      48             : }
      49             : 
      50           0 : static ssize_t queue_requests_show(struct request_queue *q, char *page)
      51             : {
      52           0 :         return queue_var_show(q->nr_requests, page);
      53             : }
      54             : 
      55             : static ssize_t
      56           0 : queue_requests_store(struct request_queue *q, const char *page, size_t count)
      57             : {
      58             :         unsigned long nr;
      59             :         int ret, err;
      60             : 
      61           0 :         if (!queue_is_mq(q))
      62             :                 return -EINVAL;
      63             : 
      64           0 :         ret = queue_var_store(&nr, page, count);
      65           0 :         if (ret < 0)
      66           0 :                 return ret;
      67             : 
      68           0 :         if (nr < BLKDEV_MIN_RQ)
      69           0 :                 nr = BLKDEV_MIN_RQ;
      70             : 
      71           0 :         err = blk_mq_update_nr_requests(q, nr);
      72           0 :         if (err)
      73           0 :                 return err;
      74             : 
      75           0 :         return ret;
      76             : }
      77             : 
      78           0 : static ssize_t queue_ra_show(struct request_queue *q, char *page)
      79             : {
      80             :         unsigned long ra_kb;
      81             : 
      82           0 :         if (!q->disk)
      83             :                 return -EINVAL;
      84           0 :         ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10);
      85           0 :         return queue_var_show(ra_kb, page);
      86             : }
      87             : 
      88             : static ssize_t
      89           0 : queue_ra_store(struct request_queue *q, const char *page, size_t count)
      90             : {
      91             :         unsigned long ra_kb;
      92             :         ssize_t ret;
      93             : 
      94           0 :         if (!q->disk)
      95             :                 return -EINVAL;
      96           0 :         ret = queue_var_store(&ra_kb, page, count);
      97           0 :         if (ret < 0)
      98             :                 return ret;
      99           0 :         q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
     100           0 :         return ret;
     101             : }
     102             : 
     103           0 : static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
     104             : {
     105           0 :         int max_sectors_kb = queue_max_sectors(q) >> 1;
     106             : 
     107           0 :         return queue_var_show(max_sectors_kb, page);
     108             : }
     109             : 
     110           0 : static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
     111             : {
     112           0 :         return queue_var_show(queue_max_segments(q), page);
     113             : }
     114             : 
     115           0 : static ssize_t queue_max_discard_segments_show(struct request_queue *q,
     116             :                 char *page)
     117             : {
     118           0 :         return queue_var_show(queue_max_discard_segments(q), page);
     119             : }
     120             : 
     121           0 : static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
     122             : {
     123           0 :         return queue_var_show(q->limits.max_integrity_segments, page);
     124             : }
     125             : 
     126           0 : static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
     127             : {
     128           0 :         return queue_var_show(queue_max_segment_size(q), page);
     129             : }
     130             : 
     131           0 : static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
     132             : {
     133           0 :         return queue_var_show(queue_logical_block_size(q), page);
     134             : }
     135             : 
     136           0 : static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
     137             : {
     138           0 :         return queue_var_show(queue_physical_block_size(q), page);
     139             : }
     140             : 
     141           0 : static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
     142             : {
     143           0 :         return queue_var_show(q->limits.chunk_sectors, page);
     144             : }
     145             : 
     146           0 : static ssize_t queue_io_min_show(struct request_queue *q, char *page)
     147             : {
     148           0 :         return queue_var_show(queue_io_min(q), page);
     149             : }
     150             : 
     151           0 : static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
     152             : {
     153           0 :         return queue_var_show(queue_io_opt(q), page);
     154             : }
     155             : 
     156           0 : static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
     157             : {
     158           0 :         return queue_var_show(q->limits.discard_granularity, page);
     159             : }
     160             : 
     161           0 : static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
     162             : {
     163             : 
     164           0 :         return sprintf(page, "%llu\n",
     165           0 :                 (unsigned long long)q->limits.max_hw_discard_sectors << 9);
     166             : }
     167             : 
     168           0 : static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
     169             : {
     170           0 :         return sprintf(page, "%llu\n",
     171           0 :                        (unsigned long long)q->limits.max_discard_sectors << 9);
     172             : }
     173             : 
     174           0 : static ssize_t queue_discard_max_store(struct request_queue *q,
     175             :                                        const char *page, size_t count)
     176             : {
     177             :         unsigned long max_discard;
     178           0 :         ssize_t ret = queue_var_store(&max_discard, page, count);
     179             : 
     180           0 :         if (ret < 0)
     181             :                 return ret;
     182             : 
     183           0 :         if (max_discard & (q->limits.discard_granularity - 1))
     184             :                 return -EINVAL;
     185             : 
     186           0 :         max_discard >>= 9;
     187           0 :         if (max_discard > UINT_MAX)
     188             :                 return -EINVAL;
     189             : 
     190           0 :         if (max_discard > q->limits.max_hw_discard_sectors)
     191           0 :                 max_discard = q->limits.max_hw_discard_sectors;
     192             : 
     193           0 :         q->limits.max_discard_sectors = max_discard;
     194           0 :         return ret;
     195             : }
     196             : 
     197           0 : static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
     198             : {
     199           0 :         return queue_var_show(0, page);
     200             : }
     201             : 
     202           0 : static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
     203             : {
     204           0 :         return queue_var_show(0, page);
     205             : }
     206             : 
     207           0 : static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
     208             : {
     209           0 :         return sprintf(page, "%llu\n",
     210           0 :                 (unsigned long long)q->limits.max_write_zeroes_sectors << 9);
     211             : }
     212             : 
     213           0 : static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
     214             :                                                  char *page)
     215             : {
     216           0 :         return queue_var_show(queue_zone_write_granularity(q), page);
     217             : }
     218             : 
     219           0 : static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
     220             : {
     221           0 :         unsigned long long max_sectors = q->limits.max_zone_append_sectors;
     222             : 
     223           0 :         return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
     224             : }
     225             : 
     226             : static ssize_t
     227           0 : queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
     228             : {
     229             :         unsigned long var;
     230             :         unsigned int max_sectors_kb,
     231           0 :                 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
     232           0 :                         page_kb = 1 << (PAGE_SHIFT - 10);
     233           0 :         ssize_t ret = queue_var_store(&var, page, count);
     234             : 
     235           0 :         if (ret < 0)
     236             :                 return ret;
     237             : 
     238           0 :         max_sectors_kb = (unsigned int)var;
     239           0 :         max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb,
     240             :                                          q->limits.max_dev_sectors >> 1);
     241           0 :         if (max_sectors_kb == 0) {
     242           0 :                 q->limits.max_user_sectors = 0;
     243           0 :                 max_sectors_kb = min(max_hw_sectors_kb,
     244             :                                      BLK_DEF_MAX_SECTORS >> 1);
     245             :         } else {
     246           0 :                 if (max_sectors_kb > max_hw_sectors_kb ||
     247           0 :                     max_sectors_kb < page_kb)
     248             :                         return -EINVAL;
     249           0 :                 q->limits.max_user_sectors = max_sectors_kb << 1;
     250             :         }
     251             : 
     252           0 :         spin_lock_irq(&q->queue_lock);
     253           0 :         q->limits.max_sectors = max_sectors_kb << 1;
     254           0 :         if (q->disk)
     255           0 :                 q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
     256           0 :         spin_unlock_irq(&q->queue_lock);
     257             : 
     258           0 :         return ret;
     259             : }
     260             : 
     261           0 : static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
     262             : {
     263           0 :         int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
     264             : 
     265           0 :         return queue_var_show(max_hw_sectors_kb, page);
     266             : }
     267             : 
     268           0 : static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
     269             : {
     270           0 :         return queue_var_show(q->limits.virt_boundary_mask, page);
     271             : }
     272             : 
     273           0 : static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
     274             : {
     275           0 :         return queue_var_show(queue_dma_alignment(q), page);
     276             : }
     277             : 
     278             : #define QUEUE_SYSFS_BIT_FNS(name, flag, neg)                            \
     279             : static ssize_t                                                          \
     280             : queue_##name##_show(struct request_queue *q, char *page)                \
     281             : {                                                                       \
     282             :         int bit;                                                        \
     283             :         bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);              \
     284             :         return queue_var_show(neg ? !bit : bit, page);                  \
     285             : }                                                                       \
     286             : static ssize_t                                                          \
     287             : queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
     288             : {                                                                       \
     289             :         unsigned long val;                                              \
     290             :         ssize_t ret;                                                    \
     291             :         ret = queue_var_store(&val, page, count);                   \
     292             :         if (ret < 0)                                                 \
     293             :                  return ret;                                            \
     294             :         if (neg)                                                        \
     295             :                 val = !val;                                             \
     296             :                                                                         \
     297             :         if (val)                                                        \
     298             :                 blk_queue_flag_set(QUEUE_FLAG_##flag, q);               \
     299             :         else                                                            \
     300             :                 blk_queue_flag_clear(QUEUE_FLAG_##flag, q);             \
     301             :         return ret;                                                     \
     302             : }
     303             : 
     304           0 : QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
     305           0 : QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
     306           0 : QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
     307           0 : QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
     308             : #undef QUEUE_SYSFS_BIT_FNS
     309             : 
     310           0 : static ssize_t queue_zoned_show(struct request_queue *q, char *page)
     311             : {
     312           0 :         switch (blk_queue_zoned_model(q)) {
     313             :         case BLK_ZONED_HA:
     314             :                 return sprintf(page, "host-aware\n");
     315             :         case BLK_ZONED_HM:
     316             :                 return sprintf(page, "host-managed\n");
     317             :         default:
     318           0 :                 return sprintf(page, "none\n");
     319             :         }
     320             : }
     321             : 
     322           0 : static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
     323             : {
     324           0 :         return queue_var_show(disk_nr_zones(q->disk), page);
     325             : }
     326             : 
     327           0 : static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
     328             : {
     329           0 :         return queue_var_show(bdev_max_open_zones(q->disk->part0), page);
     330             : }
     331             : 
     332           0 : static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
     333             : {
     334           0 :         return queue_var_show(bdev_max_active_zones(q->disk->part0), page);
     335             : }
     336             : 
     337           0 : static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
     338             : {
     339           0 :         return queue_var_show((blk_queue_nomerges(q) << 1) |
     340           0 :                                blk_queue_noxmerges(q), page);
     341             : }
     342             : 
     343           0 : static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
     344             :                                     size_t count)
     345             : {
     346             :         unsigned long nm;
     347           0 :         ssize_t ret = queue_var_store(&nm, page, count);
     348             : 
     349           0 :         if (ret < 0)
     350             :                 return ret;
     351             : 
     352           0 :         blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
     353           0 :         blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
     354           0 :         if (nm == 2)
     355           0 :                 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
     356           0 :         else if (nm)
     357           0 :                 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
     358             : 
     359             :         return ret;
     360             : }
     361             : 
     362           0 : static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
     363             : {
     364           0 :         bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
     365           0 :         bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
     366             : 
     367           0 :         return queue_var_show(set << force, page);
     368             : }
     369             : 
     370             : static ssize_t
     371           0 : queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
     372             : {
     373           0 :         ssize_t ret = -EINVAL;
     374             : #ifdef CONFIG_SMP
     375             :         unsigned long val;
     376             : 
     377             :         ret = queue_var_store(&val, page, count);
     378             :         if (ret < 0)
     379             :                 return ret;
     380             : 
     381             :         if (val == 2) {
     382             :                 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
     383             :                 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
     384             :         } else if (val == 1) {
     385             :                 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
     386             :                 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
     387             :         } else if (val == 0) {
     388             :                 blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
     389             :                 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
     390             :         }
     391             : #endif
     392           0 :         return ret;
     393             : }
     394             : 
     395           0 : static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
     396             : {
     397           0 :         return sprintf(page, "%d\n", -1);
     398             : }
     399             : 
     400           0 : static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
     401             :                                 size_t count)
     402             : {
     403           0 :         return count;
     404             : }
     405             : 
     406           0 : static ssize_t queue_poll_show(struct request_queue *q, char *page)
     407             : {
     408           0 :         return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
     409             : }
     410             : 
     411           0 : static ssize_t queue_poll_store(struct request_queue *q, const char *page,
     412             :                                 size_t count)
     413             : {
     414           0 :         if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
     415             :                 return -EINVAL;
     416           0 :         pr_info_ratelimited("writes to the poll attribute are ignored.\n");
     417           0 :         pr_info_ratelimited("please use driver specific parameters instead.\n");
     418           0 :         return count;
     419             : }
     420             : 
     421           0 : static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
     422             : {
     423           0 :         return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
     424             : }
     425             : 
     426           0 : static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
     427             :                                   size_t count)
     428             : {
     429             :         unsigned int val;
     430             :         int err;
     431             : 
     432           0 :         err = kstrtou32(page, 10, &val);
     433           0 :         if (err || val == 0)
     434             :                 return -EINVAL;
     435             : 
     436           0 :         blk_queue_rq_timeout(q, msecs_to_jiffies(val));
     437             : 
     438           0 :         return count;
     439             : }
     440             : 
     441           0 : static ssize_t queue_wc_show(struct request_queue *q, char *page)
     442             : {
     443           0 :         if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
     444           0 :                 return sprintf(page, "write back\n");
     445             : 
     446           0 :         return sprintf(page, "write through\n");
     447             : }
     448             : 
     449           0 : static ssize_t queue_wc_store(struct request_queue *q, const char *page,
     450             :                               size_t count)
     451             : {
     452           0 :         int set = -1;
     453             : 
     454           0 :         if (!strncmp(page, "write back", 10))
     455             :                 set = 1;
     456           0 :         else if (!strncmp(page, "write through", 13) ||
     457           0 :                  !strncmp(page, "none", 4))
     458             :                 set = 0;
     459             : 
     460           0 :         if (set == -1)
     461             :                 return -EINVAL;
     462             : 
     463           0 :         if (set)
     464           0 :                 blk_queue_flag_set(QUEUE_FLAG_WC, q);
     465             :         else
     466           0 :                 blk_queue_flag_clear(QUEUE_FLAG_WC, q);
     467             : 
     468           0 :         return count;
     469             : }
     470             : 
     471           0 : static ssize_t queue_fua_show(struct request_queue *q, char *page)
     472             : {
     473           0 :         return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
     474             : }
     475             : 
     476           0 : static ssize_t queue_dax_show(struct request_queue *q, char *page)
     477             : {
     478           0 :         return queue_var_show(blk_queue_dax(q), page);
     479             : }
     480             : 
     481             : #define QUEUE_RO_ENTRY(_prefix, _name)                  \
     482             : static struct queue_sysfs_entry _prefix##_entry = {     \
     483             :         .attr   = { .name = _name, .mode = 0444 },      \
     484             :         .show   = _prefix##_show,                       \
     485             : };
     486             : 
     487             : #define QUEUE_RW_ENTRY(_prefix, _name)                  \
     488             : static struct queue_sysfs_entry _prefix##_entry = {     \
     489             :         .attr   = { .name = _name, .mode = 0644 },      \
     490             :         .show   = _prefix##_show,                       \
     491             :         .store  = _prefix##_store,                      \
     492             : };
     493             : 
     494             : QUEUE_RW_ENTRY(queue_requests, "nr_requests");
     495             : QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
     496             : QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
     497             : QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
     498             : QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
     499             : QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
     500             : QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
     501             : QUEUE_RW_ENTRY(elv_iosched, "scheduler");
     502             : 
     503             : QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
     504             : QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
     505             : QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
     506             : QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
     507             : QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
     508             : 
     509             : QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
     510             : QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
     511             : QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
     512             : QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
     513             : QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
     514             : 
     515             : QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
     516             : QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
     517             : QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
     518             : QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
     519             : 
     520             : QUEUE_RO_ENTRY(queue_zoned, "zoned");
     521             : QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
     522             : QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
     523             : QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
     524             : 
     525             : QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
     526             : QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
     527             : QUEUE_RW_ENTRY(queue_poll, "io_poll");
     528             : QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
     529             : QUEUE_RW_ENTRY(queue_wc, "write_cache");
     530             : QUEUE_RO_ENTRY(queue_fua, "fua");
     531             : QUEUE_RO_ENTRY(queue_dax, "dax");
     532             : QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
     533             : QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
     534             : QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
     535             : 
     536             : #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
     537             : QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
     538             : #endif
     539             : 
     540             : /* legacy alias for logical_block_size: */
     541             : static struct queue_sysfs_entry queue_hw_sector_size_entry = {
     542             :         .attr = {.name = "hw_sector_size", .mode = 0444 },
     543             :         .show = queue_logical_block_size_show,
     544             : };
     545             : 
     546             : QUEUE_RW_ENTRY(queue_nonrot, "rotational");
     547             : QUEUE_RW_ENTRY(queue_iostats, "iostats");
     548             : QUEUE_RW_ENTRY(queue_random, "add_random");
     549             : QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
     550             : 
     551             : #ifdef CONFIG_BLK_WBT
     552             : static ssize_t queue_var_store64(s64 *var, const char *page)
     553             : {
     554             :         int err;
     555             :         s64 v;
     556             : 
     557             :         err = kstrtos64(page, 10, &v);
     558             :         if (err < 0)
     559             :                 return err;
     560             : 
     561             :         *var = v;
     562             :         return 0;
     563             : }
     564             : 
     565             : static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
     566             : {
     567             :         if (!wbt_rq_qos(q))
     568             :                 return -EINVAL;
     569             : 
     570             :         if (wbt_disabled(q))
     571             :                 return sprintf(page, "0\n");
     572             : 
     573             :         return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
     574             : }
     575             : 
     576             : static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
     577             :                                   size_t count)
     578             : {
     579             :         struct rq_qos *rqos;
     580             :         ssize_t ret;
     581             :         s64 val;
     582             : 
     583             :         ret = queue_var_store64(&val, page);
     584             :         if (ret < 0)
     585             :                 return ret;
     586             :         if (val < -1)
     587             :                 return -EINVAL;
     588             : 
     589             :         rqos = wbt_rq_qos(q);
     590             :         if (!rqos) {
     591             :                 ret = wbt_init(q->disk);
     592             :                 if (ret)
     593             :                         return ret;
     594             :         }
     595             : 
     596             :         if (val == -1)
     597             :                 val = wbt_default_latency_nsec(q);
     598             :         else if (val >= 0)
     599             :                 val *= 1000ULL;
     600             : 
     601             :         if (wbt_get_min_lat(q) == val)
     602             :                 return count;
     603             : 
     604             :         /*
     605             :          * Ensure that the queue is idled, in case the latency update
     606             :          * ends up either enabling or disabling wbt completely. We can't
     607             :          * have IO inflight if that happens.
     608             :          */
     609             :         blk_mq_freeze_queue(q);
     610             :         blk_mq_quiesce_queue(q);
     611             : 
     612             :         wbt_set_min_lat(q, val);
     613             : 
     614             :         blk_mq_unquiesce_queue(q);
     615             :         blk_mq_unfreeze_queue(q);
     616             : 
     617             :         return count;
     618             : }
     619             : 
     620             : QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
     621             : #endif
     622             : 
     623             : static struct attribute *queue_attrs[] = {
     624             :         &queue_ra_entry.attr,
     625             :         &queue_max_hw_sectors_entry.attr,
     626             :         &queue_max_sectors_entry.attr,
     627             :         &queue_max_segments_entry.attr,
     628             :         &queue_max_discard_segments_entry.attr,
     629             :         &queue_max_integrity_segments_entry.attr,
     630             :         &queue_max_segment_size_entry.attr,
     631             :         &queue_hw_sector_size_entry.attr,
     632             :         &queue_logical_block_size_entry.attr,
     633             :         &queue_physical_block_size_entry.attr,
     634             :         &queue_chunk_sectors_entry.attr,
     635             :         &queue_io_min_entry.attr,
     636             :         &queue_io_opt_entry.attr,
     637             :         &queue_discard_granularity_entry.attr,
     638             :         &queue_discard_max_entry.attr,
     639             :         &queue_discard_max_hw_entry.attr,
     640             :         &queue_discard_zeroes_data_entry.attr,
     641             :         &queue_write_same_max_entry.attr,
     642             :         &queue_write_zeroes_max_entry.attr,
     643             :         &queue_zone_append_max_entry.attr,
     644             :         &queue_zone_write_granularity_entry.attr,
     645             :         &queue_nonrot_entry.attr,
     646             :         &queue_zoned_entry.attr,
     647             :         &queue_nr_zones_entry.attr,
     648             :         &queue_max_open_zones_entry.attr,
     649             :         &queue_max_active_zones_entry.attr,
     650             :         &queue_nomerges_entry.attr,
     651             :         &queue_iostats_entry.attr,
     652             :         &queue_stable_writes_entry.attr,
     653             :         &queue_random_entry.attr,
     654             :         &queue_poll_entry.attr,
     655             :         &queue_wc_entry.attr,
     656             :         &queue_fua_entry.attr,
     657             :         &queue_dax_entry.attr,
     658             :         &queue_poll_delay_entry.attr,
     659             : #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
     660             :         &blk_throtl_sample_time_entry.attr,
     661             : #endif
     662             :         &queue_virt_boundary_mask_entry.attr,
     663             :         &queue_dma_alignment_entry.attr,
     664             :         NULL,
     665             : };
     666             : 
     667             : static struct attribute *blk_mq_queue_attrs[] = {
     668             :         &queue_requests_entry.attr,
     669             :         &elv_iosched_entry.attr,
     670             :         &queue_rq_affinity_entry.attr,
     671             :         &queue_io_timeout_entry.attr,
     672             : #ifdef CONFIG_BLK_WBT
     673             :         &queue_wb_lat_entry.attr,
     674             : #endif
     675             :         NULL,
     676             : };
     677             : 
     678           0 : static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
     679             :                                 int n)
     680             : {
     681           0 :         struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
     682           0 :         struct request_queue *q = disk->queue;
     683             : 
     684           0 :         if ((attr == &queue_max_open_zones_entry.attr ||
     685             :              attr == &queue_max_active_zones_entry.attr) &&
     686             :             !blk_queue_is_zoned(q))
     687             :                 return 0;
     688             : 
     689           0 :         return attr->mode;
     690             : }
     691             : 
     692           0 : static umode_t blk_mq_queue_attr_visible(struct kobject *kobj,
     693             :                                          struct attribute *attr, int n)
     694             : {
     695           0 :         struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
     696           0 :         struct request_queue *q = disk->queue;
     697             : 
     698           0 :         if (!queue_is_mq(q))
     699             :                 return 0;
     700             : 
     701           0 :         if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout)
     702             :                 return 0;
     703             : 
     704           0 :         return attr->mode;
     705             : }
     706             : 
     707             : static struct attribute_group queue_attr_group = {
     708             :         .attrs = queue_attrs,
     709             :         .is_visible = queue_attr_visible,
     710             : };
     711             : 
     712             : static struct attribute_group blk_mq_queue_attr_group = {
     713             :         .attrs = blk_mq_queue_attrs,
     714             :         .is_visible = blk_mq_queue_attr_visible,
     715             : };
     716             : 
     717             : #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
     718             : 
     719             : static ssize_t
     720           0 : queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
     721             : {
     722           0 :         struct queue_sysfs_entry *entry = to_queue(attr);
     723           0 :         struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
     724           0 :         struct request_queue *q = disk->queue;
     725             :         ssize_t res;
     726             : 
     727           0 :         if (!entry->show)
     728             :                 return -EIO;
     729           0 :         mutex_lock(&q->sysfs_lock);
     730           0 :         res = entry->show(q, page);
     731           0 :         mutex_unlock(&q->sysfs_lock);
     732           0 :         return res;
     733             : }
     734             : 
     735             : static ssize_t
     736           0 : queue_attr_store(struct kobject *kobj, struct attribute *attr,
     737             :                     const char *page, size_t length)
     738             : {
     739           0 :         struct queue_sysfs_entry *entry = to_queue(attr);
     740           0 :         struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
     741           0 :         struct request_queue *q = disk->queue;
     742             :         ssize_t res;
     743             : 
     744           0 :         if (!entry->store)
     745             :                 return -EIO;
     746             : 
     747           0 :         mutex_lock(&q->sysfs_lock);
     748           0 :         res = entry->store(q, page, length);
     749           0 :         mutex_unlock(&q->sysfs_lock);
     750           0 :         return res;
     751             : }
     752             : 
     753             : static const struct sysfs_ops queue_sysfs_ops = {
     754             :         .show   = queue_attr_show,
     755             :         .store  = queue_attr_store,
     756             : };
     757             : 
     758             : static const struct attribute_group *blk_queue_attr_groups[] = {
     759             :         &queue_attr_group,
     760             :         &blk_mq_queue_attr_group,
     761             :         NULL
     762             : };
     763             : 
     764           0 : static void blk_queue_release(struct kobject *kobj)
     765             : {
     766             :         /* nothing to do here, all data is associated with the parent gendisk */
     767           0 : }
     768             : 
     769             : static const struct kobj_type blk_queue_ktype = {
     770             :         .default_groups = blk_queue_attr_groups,
     771             :         .sysfs_ops      = &queue_sysfs_ops,
     772             :         .release        = blk_queue_release,
     773             : };
     774             : 
     775             : static void blk_debugfs_remove(struct gendisk *disk)
     776             : {
     777           0 :         struct request_queue *q = disk->queue;
     778             : 
     779           0 :         mutex_lock(&q->debugfs_mutex);
     780             :         blk_trace_shutdown(q);
     781           0 :         debugfs_remove_recursive(q->debugfs_dir);
     782           0 :         q->debugfs_dir = NULL;
     783           0 :         q->sched_debugfs_dir = NULL;
     784           0 :         q->rqos_debugfs_dir = NULL;
     785           0 :         mutex_unlock(&q->debugfs_mutex);
     786             : }
     787             : 
     788             : /**
     789             :  * blk_register_queue - register a block layer queue with sysfs
     790             :  * @disk: Disk of which the request queue should be registered with sysfs.
     791             :  */
     792           0 : int blk_register_queue(struct gendisk *disk)
     793             : {
     794           0 :         struct request_queue *q = disk->queue;
     795             :         int ret;
     796             : 
     797           0 :         mutex_lock(&q->sysfs_dir_lock);
     798           0 :         kobject_init(&disk->queue_kobj, &blk_queue_ktype);
     799           0 :         ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
     800           0 :         if (ret < 0)
     801             :                 goto out_put_queue_kobj;
     802             : 
     803           0 :         if (queue_is_mq(q)) {
     804           0 :                 ret = blk_mq_sysfs_register(disk);
     805           0 :                 if (ret)
     806             :                         goto out_put_queue_kobj;
     807             :         }
     808           0 :         mutex_lock(&q->sysfs_lock);
     809             : 
     810           0 :         mutex_lock(&q->debugfs_mutex);
     811           0 :         q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
     812           0 :         if (queue_is_mq(q))
     813             :                 blk_mq_debugfs_register(q);
     814           0 :         mutex_unlock(&q->debugfs_mutex);
     815             : 
     816           0 :         ret = disk_register_independent_access_ranges(disk);
     817           0 :         if (ret)
     818             :                 goto out_debugfs_remove;
     819             : 
     820           0 :         if (q->elevator) {
     821           0 :                 ret = elv_register_queue(q, false);
     822           0 :                 if (ret)
     823             :                         goto out_unregister_ia_ranges;
     824             :         }
     825             : 
     826           0 :         ret = blk_crypto_sysfs_register(disk);
     827             :         if (ret)
     828             :                 goto out_elv_unregister;
     829             : 
     830           0 :         blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
     831           0 :         wbt_enable_default(disk);
     832           0 :         blk_throtl_register(disk);
     833             : 
     834             :         /* Now everything is ready and send out KOBJ_ADD uevent */
     835           0 :         kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
     836           0 :         if (q->elevator)
     837           0 :                 kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
     838           0 :         mutex_unlock(&q->sysfs_lock);
     839           0 :         mutex_unlock(&q->sysfs_dir_lock);
     840             : 
     841             :         /*
     842             :          * SCSI probing may synchronously create and destroy a lot of
     843             :          * request_queues for non-existent devices.  Shutting down a fully
     844             :          * functional queue takes measureable wallclock time as RCU grace
     845             :          * periods are involved.  To avoid excessive latency in these
     846             :          * cases, a request_queue starts out in a degraded mode which is
     847             :          * faster to shut down and is made fully functional here as
     848             :          * request_queues for non-existent devices never get registered.
     849             :          */
     850           0 :         if (!blk_queue_init_done(q)) {
     851           0 :                 blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
     852           0 :                 percpu_ref_switch_to_percpu(&q->q_usage_counter);
     853             :         }
     854             : 
     855             :         return ret;
     856             : 
     857             : out_elv_unregister:
     858             :         elv_unregister_queue(q);
     859             : out_unregister_ia_ranges:
     860           0 :         disk_unregister_independent_access_ranges(disk);
     861             : out_debugfs_remove:
     862           0 :         blk_debugfs_remove(disk);
     863           0 :         mutex_unlock(&q->sysfs_lock);
     864             : out_put_queue_kobj:
     865           0 :         kobject_put(&disk->queue_kobj);
     866           0 :         mutex_unlock(&q->sysfs_dir_lock);
     867           0 :         return ret;
     868             : }
     869             : 
     870             : /**
     871             :  * blk_unregister_queue - counterpart of blk_register_queue()
     872             :  * @disk: Disk of which the request queue should be unregistered from sysfs.
     873             :  *
     874             :  * Note: the caller is responsible for guaranteeing that this function is called
     875             :  * after blk_register_queue() has finished.
     876             :  */
     877           0 : void blk_unregister_queue(struct gendisk *disk)
     878             : {
     879           0 :         struct request_queue *q = disk->queue;
     880             : 
     881           0 :         if (WARN_ON(!q))
     882             :                 return;
     883             : 
     884             :         /* Return early if disk->queue was never registered. */
     885           0 :         if (!blk_queue_registered(q))
     886             :                 return;
     887             : 
     888             :         /*
     889             :          * Since sysfs_remove_dir() prevents adding new directory entries
     890             :          * before removal of existing entries starts, protect against
     891             :          * concurrent elv_iosched_store() calls.
     892             :          */
     893           0 :         mutex_lock(&q->sysfs_lock);
     894           0 :         blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
     895           0 :         mutex_unlock(&q->sysfs_lock);
     896             : 
     897           0 :         mutex_lock(&q->sysfs_dir_lock);
     898             :         /*
     899             :          * Remove the sysfs attributes before unregistering the queue data
     900             :          * structures that can be modified through sysfs.
     901             :          */
     902           0 :         if (queue_is_mq(q))
     903           0 :                 blk_mq_sysfs_unregister(disk);
     904           0 :         blk_crypto_sysfs_unregister(disk);
     905             : 
     906           0 :         mutex_lock(&q->sysfs_lock);
     907           0 :         elv_unregister_queue(q);
     908           0 :         disk_unregister_independent_access_ranges(disk);
     909           0 :         mutex_unlock(&q->sysfs_lock);
     910             : 
     911             :         /* Now that we've deleted all child objects, we can delete the queue. */
     912           0 :         kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
     913           0 :         kobject_del(&disk->queue_kobj);
     914           0 :         mutex_unlock(&q->sysfs_dir_lock);
     915             : 
     916           0 :         blk_debugfs_remove(disk);
     917             : }

Generated by: LCOV version 1.14