Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Functions related to sysfs handling
4 : */
5 : #include <linux/kernel.h>
6 : #include <linux/slab.h>
7 : #include <linux/module.h>
8 : #include <linux/bio.h>
9 : #include <linux/blkdev.h>
10 : #include <linux/backing-dev.h>
11 : #include <linux/blktrace_api.h>
12 : #include <linux/debugfs.h>
13 :
14 : #include "blk.h"
15 : #include "blk-mq.h"
16 : #include "blk-mq-debugfs.h"
17 : #include "blk-mq-sched.h"
18 : #include "blk-rq-qos.h"
19 : #include "blk-wbt.h"
20 : #include "blk-cgroup.h"
21 : #include "blk-throttle.h"
22 :
23 : struct queue_sysfs_entry {
24 : struct attribute attr;
25 : ssize_t (*show)(struct request_queue *, char *);
26 : ssize_t (*store)(struct request_queue *, const char *, size_t);
27 : };
28 :
29 : static ssize_t
30 : queue_var_show(unsigned long var, char *page)
31 : {
32 0 : return sprintf(page, "%lu\n", var);
33 : }
34 :
35 : static ssize_t
36 : queue_var_store(unsigned long *var, const char *page, size_t count)
37 : {
38 : int err;
39 : unsigned long v;
40 :
41 0 : err = kstrtoul(page, 10, &v);
42 0 : if (err || v > UINT_MAX)
43 : return -EINVAL;
44 :
45 0 : *var = v;
46 :
47 0 : return count;
48 : }
49 :
50 : static ssize_t queue_var_store64(s64 *var, const char *page)
51 : {
52 : int err;
53 : s64 v;
54 :
55 0 : err = kstrtos64(page, 10, &v);
56 0 : if (err < 0)
57 0 : return err;
58 :
59 0 : *var = v;
60 : return 0;
61 : }
62 :
63 0 : static ssize_t queue_requests_show(struct request_queue *q, char *page)
64 : {
65 0 : return queue_var_show(q->nr_requests, page);
66 : }
67 :
68 : static ssize_t
69 0 : queue_requests_store(struct request_queue *q, const char *page, size_t count)
70 : {
71 : unsigned long nr;
72 : int ret, err;
73 :
74 0 : if (!queue_is_mq(q))
75 : return -EINVAL;
76 :
77 0 : ret = queue_var_store(&nr, page, count);
78 0 : if (ret < 0)
79 0 : return ret;
80 :
81 0 : if (nr < BLKDEV_MIN_RQ)
82 0 : nr = BLKDEV_MIN_RQ;
83 :
84 0 : err = blk_mq_update_nr_requests(q, nr);
85 0 : if (err)
86 0 : return err;
87 :
88 0 : return ret;
89 : }
90 :
91 0 : static ssize_t queue_ra_show(struct request_queue *q, char *page)
92 : {
93 : unsigned long ra_kb;
94 :
95 0 : if (!q->disk)
96 : return -EINVAL;
97 0 : ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10);
98 0 : return queue_var_show(ra_kb, page);
99 : }
100 :
101 : static ssize_t
102 0 : queue_ra_store(struct request_queue *q, const char *page, size_t count)
103 : {
104 : unsigned long ra_kb;
105 : ssize_t ret;
106 :
107 0 : if (!q->disk)
108 : return -EINVAL;
109 0 : ret = queue_var_store(&ra_kb, page, count);
110 0 : if (ret < 0)
111 : return ret;
112 0 : q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
113 0 : return ret;
114 : }
115 :
116 0 : static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
117 : {
118 0 : int max_sectors_kb = queue_max_sectors(q) >> 1;
119 :
120 0 : return queue_var_show(max_sectors_kb, page);
121 : }
122 :
123 0 : static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
124 : {
125 0 : return queue_var_show(queue_max_segments(q), page);
126 : }
127 :
128 0 : static ssize_t queue_max_discard_segments_show(struct request_queue *q,
129 : char *page)
130 : {
131 0 : return queue_var_show(queue_max_discard_segments(q), page);
132 : }
133 :
134 0 : static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
135 : {
136 0 : return queue_var_show(q->limits.max_integrity_segments, page);
137 : }
138 :
139 0 : static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
140 : {
141 0 : return queue_var_show(queue_max_segment_size(q), page);
142 : }
143 :
144 0 : static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
145 : {
146 0 : return queue_var_show(queue_logical_block_size(q), page);
147 : }
148 :
149 0 : static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
150 : {
151 0 : return queue_var_show(queue_physical_block_size(q), page);
152 : }
153 :
154 0 : static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
155 : {
156 0 : return queue_var_show(q->limits.chunk_sectors, page);
157 : }
158 :
159 0 : static ssize_t queue_io_min_show(struct request_queue *q, char *page)
160 : {
161 0 : return queue_var_show(queue_io_min(q), page);
162 : }
163 :
164 0 : static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
165 : {
166 0 : return queue_var_show(queue_io_opt(q), page);
167 : }
168 :
169 0 : static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
170 : {
171 0 : return queue_var_show(q->limits.discard_granularity, page);
172 : }
173 :
174 0 : static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
175 : {
176 :
177 0 : return sprintf(page, "%llu\n",
178 0 : (unsigned long long)q->limits.max_hw_discard_sectors << 9);
179 : }
180 :
181 0 : static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
182 : {
183 0 : return sprintf(page, "%llu\n",
184 0 : (unsigned long long)q->limits.max_discard_sectors << 9);
185 : }
186 :
187 0 : static ssize_t queue_discard_max_store(struct request_queue *q,
188 : const char *page, size_t count)
189 : {
190 : unsigned long max_discard;
191 0 : ssize_t ret = queue_var_store(&max_discard, page, count);
192 :
193 0 : if (ret < 0)
194 : return ret;
195 :
196 0 : if (max_discard & (q->limits.discard_granularity - 1))
197 : return -EINVAL;
198 :
199 0 : max_discard >>= 9;
200 0 : if (max_discard > UINT_MAX)
201 : return -EINVAL;
202 :
203 0 : if (max_discard > q->limits.max_hw_discard_sectors)
204 0 : max_discard = q->limits.max_hw_discard_sectors;
205 :
206 0 : q->limits.max_discard_sectors = max_discard;
207 0 : return ret;
208 : }
209 :
210 0 : static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
211 : {
212 0 : return queue_var_show(0, page);
213 : }
214 :
215 0 : static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
216 : {
217 0 : return queue_var_show(0, page);
218 : }
219 :
220 0 : static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
221 : {
222 0 : return sprintf(page, "%llu\n",
223 0 : (unsigned long long)q->limits.max_write_zeroes_sectors << 9);
224 : }
225 :
226 0 : static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
227 : char *page)
228 : {
229 0 : return queue_var_show(queue_zone_write_granularity(q), page);
230 : }
231 :
232 0 : static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
233 : {
234 0 : unsigned long long max_sectors = q->limits.max_zone_append_sectors;
235 :
236 0 : return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
237 : }
238 :
239 : static ssize_t
240 0 : queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
241 : {
242 : unsigned long var;
243 : unsigned int max_sectors_kb,
244 0 : max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
245 0 : page_kb = 1 << (PAGE_SHIFT - 10);
246 0 : ssize_t ret = queue_var_store(&var, page, count);
247 :
248 0 : if (ret < 0)
249 : return ret;
250 :
251 0 : max_sectors_kb = (unsigned int)var;
252 0 : max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb,
253 : q->limits.max_dev_sectors >> 1);
254 0 : if (max_sectors_kb == 0) {
255 0 : q->limits.max_user_sectors = 0;
256 0 : max_sectors_kb = min(max_hw_sectors_kb,
257 : BLK_DEF_MAX_SECTORS >> 1);
258 : } else {
259 0 : if (max_sectors_kb > max_hw_sectors_kb ||
260 0 : max_sectors_kb < page_kb)
261 : return -EINVAL;
262 0 : q->limits.max_user_sectors = max_sectors_kb << 1;
263 : }
264 :
265 0 : spin_lock_irq(&q->queue_lock);
266 0 : q->limits.max_sectors = max_sectors_kb << 1;
267 0 : if (q->disk)
268 0 : q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
269 0 : spin_unlock_irq(&q->queue_lock);
270 :
271 0 : return ret;
272 : }
273 :
274 0 : static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
275 : {
276 0 : int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
277 :
278 0 : return queue_var_show(max_hw_sectors_kb, page);
279 : }
280 :
281 0 : static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
282 : {
283 0 : return queue_var_show(q->limits.virt_boundary_mask, page);
284 : }
285 :
286 0 : static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
287 : {
288 0 : return queue_var_show(queue_dma_alignment(q), page);
289 : }
290 :
291 : #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
292 : static ssize_t \
293 : queue_##name##_show(struct request_queue *q, char *page) \
294 : { \
295 : int bit; \
296 : bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
297 : return queue_var_show(neg ? !bit : bit, page); \
298 : } \
299 : static ssize_t \
300 : queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
301 : { \
302 : unsigned long val; \
303 : ssize_t ret; \
304 : ret = queue_var_store(&val, page, count); \
305 : if (ret < 0) \
306 : return ret; \
307 : if (neg) \
308 : val = !val; \
309 : \
310 : if (val) \
311 : blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
312 : else \
313 : blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
314 : return ret; \
315 : }
316 :
317 0 : QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
318 0 : QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
319 0 : QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
320 0 : QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
321 : #undef QUEUE_SYSFS_BIT_FNS
322 :
323 0 : static ssize_t queue_zoned_show(struct request_queue *q, char *page)
324 : {
325 0 : switch (blk_queue_zoned_model(q)) {
326 : case BLK_ZONED_HA:
327 : return sprintf(page, "host-aware\n");
328 : case BLK_ZONED_HM:
329 : return sprintf(page, "host-managed\n");
330 : default:
331 0 : return sprintf(page, "none\n");
332 : }
333 : }
334 :
335 0 : static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
336 : {
337 0 : return queue_var_show(disk_nr_zones(q->disk), page);
338 : }
339 :
340 0 : static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
341 : {
342 0 : return queue_var_show(bdev_max_open_zones(q->disk->part0), page);
343 : }
344 :
345 0 : static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
346 : {
347 0 : return queue_var_show(bdev_max_active_zones(q->disk->part0), page);
348 : }
349 :
350 0 : static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
351 : {
352 0 : return queue_var_show((blk_queue_nomerges(q) << 1) |
353 0 : blk_queue_noxmerges(q), page);
354 : }
355 :
356 0 : static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
357 : size_t count)
358 : {
359 : unsigned long nm;
360 0 : ssize_t ret = queue_var_store(&nm, page, count);
361 :
362 0 : if (ret < 0)
363 : return ret;
364 :
365 0 : blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
366 0 : blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
367 0 : if (nm == 2)
368 0 : blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
369 0 : else if (nm)
370 0 : blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
371 :
372 : return ret;
373 : }
374 :
375 0 : static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
376 : {
377 0 : bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
378 0 : bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
379 :
380 0 : return queue_var_show(set << force, page);
381 : }
382 :
383 : static ssize_t
384 0 : queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
385 : {
386 0 : ssize_t ret = -EINVAL;
387 : #ifdef CONFIG_SMP
388 : unsigned long val;
389 :
390 : ret = queue_var_store(&val, page, count);
391 : if (ret < 0)
392 : return ret;
393 :
394 : if (val == 2) {
395 : blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
396 : blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
397 : } else if (val == 1) {
398 : blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
399 : blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
400 : } else if (val == 0) {
401 : blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
402 : blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
403 : }
404 : #endif
405 0 : return ret;
406 : }
407 :
408 0 : static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
409 : {
410 0 : return sprintf(page, "%d\n", -1);
411 : }
412 :
413 0 : static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
414 : size_t count)
415 : {
416 0 : return count;
417 : }
418 :
419 0 : static ssize_t queue_poll_show(struct request_queue *q, char *page)
420 : {
421 0 : return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
422 : }
423 :
424 0 : static ssize_t queue_poll_store(struct request_queue *q, const char *page,
425 : size_t count)
426 : {
427 0 : if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
428 : return -EINVAL;
429 0 : pr_info_ratelimited("writes to the poll attribute are ignored.\n");
430 0 : pr_info_ratelimited("please use driver specific parameters instead.\n");
431 0 : return count;
432 : }
433 :
434 0 : static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
435 : {
436 0 : return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
437 : }
438 :
439 0 : static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
440 : size_t count)
441 : {
442 : unsigned int val;
443 : int err;
444 :
445 0 : err = kstrtou32(page, 10, &val);
446 0 : if (err || val == 0)
447 : return -EINVAL;
448 :
449 0 : blk_queue_rq_timeout(q, msecs_to_jiffies(val));
450 :
451 0 : return count;
452 : }
453 :
454 0 : static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
455 : {
456 0 : if (!wbt_rq_qos(q))
457 : return -EINVAL;
458 :
459 0 : if (wbt_disabled(q))
460 0 : return sprintf(page, "0\n");
461 :
462 : return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
463 : }
464 :
465 0 : static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
466 : size_t count)
467 : {
468 : struct rq_qos *rqos;
469 : ssize_t ret;
470 : s64 val;
471 :
472 0 : ret = queue_var_store64(&val, page);
473 0 : if (ret < 0)
474 : return ret;
475 0 : if (val < -1)
476 : return -EINVAL;
477 :
478 0 : rqos = wbt_rq_qos(q);
479 0 : if (!rqos) {
480 : ret = wbt_init(q->disk);
481 : if (ret)
482 : return ret;
483 : }
484 :
485 0 : if (val == -1)
486 : val = wbt_default_latency_nsec(q);
487 0 : else if (val >= 0)
488 0 : val *= 1000ULL;
489 :
490 0 : if (wbt_get_min_lat(q) == val)
491 0 : return count;
492 :
493 : /*
494 : * Ensure that the queue is idled, in case the latency update
495 : * ends up either enabling or disabling wbt completely. We can't
496 : * have IO inflight if that happens.
497 : */
498 0 : blk_mq_freeze_queue(q);
499 0 : blk_mq_quiesce_queue(q);
500 :
501 0 : wbt_set_min_lat(q, val);
502 :
503 0 : blk_mq_unquiesce_queue(q);
504 0 : blk_mq_unfreeze_queue(q);
505 :
506 0 : return count;
507 : }
508 :
509 0 : static ssize_t queue_wc_show(struct request_queue *q, char *page)
510 : {
511 0 : if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
512 0 : return sprintf(page, "write back\n");
513 :
514 0 : return sprintf(page, "write through\n");
515 : }
516 :
517 0 : static ssize_t queue_wc_store(struct request_queue *q, const char *page,
518 : size_t count)
519 : {
520 0 : int set = -1;
521 :
522 0 : if (!strncmp(page, "write back", 10))
523 : set = 1;
524 0 : else if (!strncmp(page, "write through", 13) ||
525 0 : !strncmp(page, "none", 4))
526 : set = 0;
527 :
528 0 : if (set == -1)
529 : return -EINVAL;
530 :
531 0 : if (set)
532 0 : blk_queue_flag_set(QUEUE_FLAG_WC, q);
533 : else
534 0 : blk_queue_flag_clear(QUEUE_FLAG_WC, q);
535 :
536 0 : return count;
537 : }
538 :
539 0 : static ssize_t queue_fua_show(struct request_queue *q, char *page)
540 : {
541 0 : return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
542 : }
543 :
544 0 : static ssize_t queue_dax_show(struct request_queue *q, char *page)
545 : {
546 0 : return queue_var_show(blk_queue_dax(q), page);
547 : }
548 :
549 : #define QUEUE_RO_ENTRY(_prefix, _name) \
550 : static struct queue_sysfs_entry _prefix##_entry = { \
551 : .attr = { .name = _name, .mode = 0444 }, \
552 : .show = _prefix##_show, \
553 : };
554 :
555 : #define QUEUE_RW_ENTRY(_prefix, _name) \
556 : static struct queue_sysfs_entry _prefix##_entry = { \
557 : .attr = { .name = _name, .mode = 0644 }, \
558 : .show = _prefix##_show, \
559 : .store = _prefix##_store, \
560 : };
561 :
562 : QUEUE_RW_ENTRY(queue_requests, "nr_requests");
563 : QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
564 : QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
565 : QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
566 : QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
567 : QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
568 : QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
569 : QUEUE_RW_ENTRY(elv_iosched, "scheduler");
570 :
571 : QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
572 : QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
573 : QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
574 : QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
575 : QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
576 :
577 : QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
578 : QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
579 : QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
580 : QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
581 : QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
582 :
583 : QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
584 : QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
585 : QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
586 : QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
587 :
588 : QUEUE_RO_ENTRY(queue_zoned, "zoned");
589 : QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
590 : QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
591 : QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
592 :
593 : QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
594 : QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
595 : QUEUE_RW_ENTRY(queue_poll, "io_poll");
596 : QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
597 : QUEUE_RW_ENTRY(queue_wc, "write_cache");
598 : QUEUE_RO_ENTRY(queue_fua, "fua");
599 : QUEUE_RO_ENTRY(queue_dax, "dax");
600 : QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
601 : QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
602 : QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
603 : QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
604 :
605 : #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
606 : QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
607 : #endif
608 :
609 : /* legacy alias for logical_block_size: */
610 : static struct queue_sysfs_entry queue_hw_sector_size_entry = {
611 : .attr = {.name = "hw_sector_size", .mode = 0444 },
612 : .show = queue_logical_block_size_show,
613 : };
614 :
615 : QUEUE_RW_ENTRY(queue_nonrot, "rotational");
616 : QUEUE_RW_ENTRY(queue_iostats, "iostats");
617 : QUEUE_RW_ENTRY(queue_random, "add_random");
618 : QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
619 :
620 : static struct attribute *queue_attrs[] = {
621 : &queue_requests_entry.attr,
622 : &queue_ra_entry.attr,
623 : &queue_max_hw_sectors_entry.attr,
624 : &queue_max_sectors_entry.attr,
625 : &queue_max_segments_entry.attr,
626 : &queue_max_discard_segments_entry.attr,
627 : &queue_max_integrity_segments_entry.attr,
628 : &queue_max_segment_size_entry.attr,
629 : &elv_iosched_entry.attr,
630 : &queue_hw_sector_size_entry.attr,
631 : &queue_logical_block_size_entry.attr,
632 : &queue_physical_block_size_entry.attr,
633 : &queue_chunk_sectors_entry.attr,
634 : &queue_io_min_entry.attr,
635 : &queue_io_opt_entry.attr,
636 : &queue_discard_granularity_entry.attr,
637 : &queue_discard_max_entry.attr,
638 : &queue_discard_max_hw_entry.attr,
639 : &queue_discard_zeroes_data_entry.attr,
640 : &queue_write_same_max_entry.attr,
641 : &queue_write_zeroes_max_entry.attr,
642 : &queue_zone_append_max_entry.attr,
643 : &queue_zone_write_granularity_entry.attr,
644 : &queue_nonrot_entry.attr,
645 : &queue_zoned_entry.attr,
646 : &queue_nr_zones_entry.attr,
647 : &queue_max_open_zones_entry.attr,
648 : &queue_max_active_zones_entry.attr,
649 : &queue_nomerges_entry.attr,
650 : &queue_rq_affinity_entry.attr,
651 : &queue_iostats_entry.attr,
652 : &queue_stable_writes_entry.attr,
653 : &queue_random_entry.attr,
654 : &queue_poll_entry.attr,
655 : &queue_wc_entry.attr,
656 : &queue_fua_entry.attr,
657 : &queue_dax_entry.attr,
658 : &queue_wb_lat_entry.attr,
659 : &queue_poll_delay_entry.attr,
660 : &queue_io_timeout_entry.attr,
661 : #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
662 : &blk_throtl_sample_time_entry.attr,
663 : #endif
664 : &queue_virt_boundary_mask_entry.attr,
665 : &queue_dma_alignment_entry.attr,
666 : NULL,
667 : };
668 :
669 0 : static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
670 : int n)
671 : {
672 0 : struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
673 0 : struct request_queue *q = disk->queue;
674 :
675 0 : if (attr == &queue_io_timeout_entry.attr &&
676 0 : (!q->mq_ops || !q->mq_ops->timeout))
677 : return 0;
678 :
679 0 : if ((attr == &queue_max_open_zones_entry.attr ||
680 : attr == &queue_max_active_zones_entry.attr) &&
681 : !blk_queue_is_zoned(q))
682 : return 0;
683 :
684 0 : return attr->mode;
685 : }
686 :
687 : static struct attribute_group queue_attr_group = {
688 : .attrs = queue_attrs,
689 : .is_visible = queue_attr_visible,
690 : };
691 :
692 :
693 : #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
694 :
695 : static ssize_t
696 0 : queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
697 : {
698 0 : struct queue_sysfs_entry *entry = to_queue(attr);
699 0 : struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
700 0 : struct request_queue *q = disk->queue;
701 : ssize_t res;
702 :
703 0 : if (!entry->show)
704 : return -EIO;
705 0 : mutex_lock(&q->sysfs_lock);
706 0 : res = entry->show(q, page);
707 0 : mutex_unlock(&q->sysfs_lock);
708 0 : return res;
709 : }
710 :
711 : static ssize_t
712 0 : queue_attr_store(struct kobject *kobj, struct attribute *attr,
713 : const char *page, size_t length)
714 : {
715 0 : struct queue_sysfs_entry *entry = to_queue(attr);
716 0 : struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
717 0 : struct request_queue *q = disk->queue;
718 : ssize_t res;
719 :
720 0 : if (!entry->store)
721 : return -EIO;
722 :
723 0 : mutex_lock(&q->sysfs_lock);
724 0 : res = entry->store(q, page, length);
725 0 : mutex_unlock(&q->sysfs_lock);
726 0 : return res;
727 : }
728 :
729 : static const struct sysfs_ops queue_sysfs_ops = {
730 : .show = queue_attr_show,
731 : .store = queue_attr_store,
732 : };
733 :
734 : static const struct attribute_group *blk_queue_attr_groups[] = {
735 : &queue_attr_group,
736 : NULL
737 : };
738 :
739 0 : static void blk_queue_release(struct kobject *kobj)
740 : {
741 : /* nothing to do here, all data is associated with the parent gendisk */
742 0 : }
743 :
744 : static const struct kobj_type blk_queue_ktype = {
745 : .default_groups = blk_queue_attr_groups,
746 : .sysfs_ops = &queue_sysfs_ops,
747 : .release = blk_queue_release,
748 : };
749 :
750 : static void blk_debugfs_remove(struct gendisk *disk)
751 : {
752 0 : struct request_queue *q = disk->queue;
753 :
754 0 : mutex_lock(&q->debugfs_mutex);
755 : blk_trace_shutdown(q);
756 0 : debugfs_remove_recursive(q->debugfs_dir);
757 0 : q->debugfs_dir = NULL;
758 0 : q->sched_debugfs_dir = NULL;
759 0 : q->rqos_debugfs_dir = NULL;
760 0 : mutex_unlock(&q->debugfs_mutex);
761 : }
762 :
763 : /**
764 : * blk_register_queue - register a block layer queue with sysfs
765 : * @disk: Disk of which the request queue should be registered with sysfs.
766 : */
767 0 : int blk_register_queue(struct gendisk *disk)
768 : {
769 0 : struct request_queue *q = disk->queue;
770 : int ret;
771 :
772 0 : mutex_lock(&q->sysfs_dir_lock);
773 0 : kobject_init(&disk->queue_kobj, &blk_queue_ktype);
774 0 : ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
775 0 : if (ret < 0)
776 : goto out_put_queue_kobj;
777 :
778 0 : if (queue_is_mq(q)) {
779 0 : ret = blk_mq_sysfs_register(disk);
780 0 : if (ret)
781 : goto out_put_queue_kobj;
782 : }
783 0 : mutex_lock(&q->sysfs_lock);
784 :
785 0 : mutex_lock(&q->debugfs_mutex);
786 0 : q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
787 0 : if (queue_is_mq(q))
788 : blk_mq_debugfs_register(q);
789 0 : mutex_unlock(&q->debugfs_mutex);
790 :
791 0 : ret = disk_register_independent_access_ranges(disk);
792 0 : if (ret)
793 : goto out_debugfs_remove;
794 :
795 0 : if (q->elevator) {
796 0 : ret = elv_register_queue(q, false);
797 0 : if (ret)
798 : goto out_unregister_ia_ranges;
799 : }
800 :
801 0 : ret = blk_crypto_sysfs_register(disk);
802 : if (ret)
803 : goto out_elv_unregister;
804 :
805 0 : blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
806 0 : wbt_enable_default(disk);
807 0 : blk_throtl_register(disk);
808 :
809 : /* Now everything is ready and send out KOBJ_ADD uevent */
810 0 : kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
811 0 : if (q->elevator)
812 0 : kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
813 0 : mutex_unlock(&q->sysfs_lock);
814 0 : mutex_unlock(&q->sysfs_dir_lock);
815 :
816 : /*
817 : * SCSI probing may synchronously create and destroy a lot of
818 : * request_queues for non-existent devices. Shutting down a fully
819 : * functional queue takes measureable wallclock time as RCU grace
820 : * periods are involved. To avoid excessive latency in these
821 : * cases, a request_queue starts out in a degraded mode which is
822 : * faster to shut down and is made fully functional here as
823 : * request_queues for non-existent devices never get registered.
824 : */
825 0 : if (!blk_queue_init_done(q)) {
826 0 : blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
827 0 : percpu_ref_switch_to_percpu(&q->q_usage_counter);
828 : }
829 :
830 : return ret;
831 :
832 : out_elv_unregister:
833 : elv_unregister_queue(q);
834 : out_unregister_ia_ranges:
835 0 : disk_unregister_independent_access_ranges(disk);
836 : out_debugfs_remove:
837 0 : blk_debugfs_remove(disk);
838 0 : mutex_unlock(&q->sysfs_lock);
839 : out_put_queue_kobj:
840 0 : kobject_put(&disk->queue_kobj);
841 0 : mutex_unlock(&q->sysfs_dir_lock);
842 0 : return ret;
843 : }
844 :
845 : /**
846 : * blk_unregister_queue - counterpart of blk_register_queue()
847 : * @disk: Disk of which the request queue should be unregistered from sysfs.
848 : *
849 : * Note: the caller is responsible for guaranteeing that this function is called
850 : * after blk_register_queue() has finished.
851 : */
852 0 : void blk_unregister_queue(struct gendisk *disk)
853 : {
854 0 : struct request_queue *q = disk->queue;
855 :
856 0 : if (WARN_ON(!q))
857 : return;
858 :
859 : /* Return early if disk->queue was never registered. */
860 0 : if (!blk_queue_registered(q))
861 : return;
862 :
863 : /*
864 : * Since sysfs_remove_dir() prevents adding new directory entries
865 : * before removal of existing entries starts, protect against
866 : * concurrent elv_iosched_store() calls.
867 : */
868 0 : mutex_lock(&q->sysfs_lock);
869 0 : blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
870 0 : mutex_unlock(&q->sysfs_lock);
871 :
872 0 : mutex_lock(&q->sysfs_dir_lock);
873 : /*
874 : * Remove the sysfs attributes before unregistering the queue data
875 : * structures that can be modified through sysfs.
876 : */
877 0 : if (queue_is_mq(q))
878 0 : blk_mq_sysfs_unregister(disk);
879 0 : blk_crypto_sysfs_unregister(disk);
880 :
881 0 : mutex_lock(&q->sysfs_lock);
882 0 : elv_unregister_queue(q);
883 0 : disk_unregister_independent_access_ranges(disk);
884 0 : mutex_unlock(&q->sysfs_lock);
885 :
886 : /* Now that we've deleted all child objects, we can delete the queue. */
887 0 : kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
888 0 : kobject_del(&disk->queue_kobj);
889 0 : mutex_unlock(&q->sysfs_dir_lock);
890 :
891 0 : blk_debugfs_remove(disk);
892 : }
|