Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef BLK_MQ_H
3 : #define BLK_MQ_H
4 :
5 : #include <linux/blkdev.h>
6 : #include <linux/sbitmap.h>
7 : #include <linux/lockdep.h>
8 : #include <linux/scatterlist.h>
9 : #include <linux/prefetch.h>
10 : #include <linux/srcu.h>
11 :
12 : struct blk_mq_tags;
13 : struct blk_flush_queue;
14 :
15 : #define BLKDEV_MIN_RQ 4
16 : #define BLKDEV_DEFAULT_RQ 128
17 :
18 : enum rq_end_io_ret {
19 : RQ_END_IO_NONE,
20 : RQ_END_IO_FREE,
21 : };
22 :
23 : typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
24 :
25 : /*
26 : * request flags */
27 : typedef __u32 __bitwise req_flags_t;
28 :
29 : /* drive already may have started this one */
30 : #define RQF_STARTED ((__force req_flags_t)(1 << 1))
31 : /* may not be passed by ioscheduler */
32 : #define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
33 : /* request for flush sequence */
34 : #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
35 : /* merge of different types, fail separately */
36 : #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
37 : /* track inflight for MQ */
38 : #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
39 : /* don't call prep for this one */
40 : #define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
41 : /* vaguely specified driver internal error. Ignored by the block layer */
42 : #define RQF_FAILED ((__force req_flags_t)(1 << 10))
43 : /* don't warn about errors */
44 : #define RQF_QUIET ((__force req_flags_t)(1 << 11))
45 : /* elevator private data attached */
46 : #define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
47 : /* account into disk and partition IO statistics */
48 : #define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
49 : /* runtime pm request */
50 : #define RQF_PM ((__force req_flags_t)(1 << 15))
51 : /* on IO scheduler merge hash */
52 : #define RQF_HASHED ((__force req_flags_t)(1 << 16))
53 : /* track IO completion time */
54 : #define RQF_STATS ((__force req_flags_t)(1 << 17))
55 : /* Look at ->special_vec for the actual data payload instead of the
56 : bio chain. */
57 : #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
58 : /* The per-zone write lock is held for this request */
59 : #define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
60 : /* already slept for hybrid poll */
61 : #define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20))
62 : /* ->timeout has been called, don't expire again */
63 : #define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
64 : /* queue has elevator attached */
65 : #define RQF_ELV ((__force req_flags_t)(1 << 22))
66 : #define RQF_RESV ((__force req_flags_t)(1 << 23))
67 :
68 : /* flags that prevent us from merging requests: */
69 : #define RQF_NOMERGE_FLAGS \
70 : (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
71 :
72 : enum mq_rq_state {
73 : MQ_RQ_IDLE = 0,
74 : MQ_RQ_IN_FLIGHT = 1,
75 : MQ_RQ_COMPLETE = 2,
76 : };
77 :
78 : /*
79 : * Try to put the fields that are referenced together in the same cacheline.
80 : *
81 : * If you modify this structure, make sure to update blk_rq_init() and
82 : * especially blk_mq_rq_ctx_init() to take care of the added fields.
83 : */
84 : struct request {
85 : struct request_queue *q;
86 : struct blk_mq_ctx *mq_ctx;
87 : struct blk_mq_hw_ctx *mq_hctx;
88 :
89 : blk_opf_t cmd_flags; /* op and common flags */
90 : req_flags_t rq_flags;
91 :
92 : int tag;
93 : int internal_tag;
94 :
95 : unsigned int timeout;
96 :
97 : /* the following two fields are internal, NEVER access directly */
98 : unsigned int __data_len; /* total data len */
99 : sector_t __sector; /* sector cursor */
100 :
101 : struct bio *bio;
102 : struct bio *biotail;
103 :
104 : union {
105 : struct list_head queuelist;
106 : struct request *rq_next;
107 : };
108 :
109 : struct block_device *part;
110 : #ifdef CONFIG_BLK_RQ_ALLOC_TIME
111 : /* Time that the first bio started allocating this request. */
112 : u64 alloc_time_ns;
113 : #endif
114 : /* Time that this request was allocated for this IO. */
115 : u64 start_time_ns;
116 : /* Time that I/O was submitted to the device. */
117 : u64 io_start_time_ns;
118 :
119 : #ifdef CONFIG_BLK_WBT
120 : unsigned short wbt_flags;
121 : #endif
122 : /*
123 : * rq sectors used for blk stats. It has the same value
124 : * with blk_rq_sectors(rq), except that it never be zeroed
125 : * by completion.
126 : */
127 : unsigned short stats_sectors;
128 :
129 : /*
130 : * Number of scatter-gather DMA addr+len pairs after
131 : * physical address coalescing is performed.
132 : */
133 : unsigned short nr_phys_segments;
134 :
135 : #ifdef CONFIG_BLK_DEV_INTEGRITY
136 : unsigned short nr_integrity_segments;
137 : #endif
138 :
139 : #ifdef CONFIG_BLK_INLINE_ENCRYPTION
140 : struct bio_crypt_ctx *crypt_ctx;
141 : struct blk_crypto_keyslot *crypt_keyslot;
142 : #endif
143 :
144 : unsigned short ioprio;
145 :
146 : enum mq_rq_state state;
147 : atomic_t ref;
148 :
149 : unsigned long deadline;
150 :
151 : /*
152 : * The hash is used inside the scheduler, and killed once the
153 : * request reaches the dispatch list. The ipi_list is only used
154 : * to queue the request for softirq completion, which is long
155 : * after the request has been unhashed (and even removed from
156 : * the dispatch list).
157 : */
158 : union {
159 : struct hlist_node hash; /* merge hash */
160 : struct llist_node ipi_list;
161 : };
162 :
163 : /*
164 : * The rb_node is only used inside the io scheduler, requests
165 : * are pruned when moved to the dispatch queue. So let the
166 : * completion_data share space with the rb_node.
167 : */
168 : union {
169 : struct rb_node rb_node; /* sort/lookup */
170 : struct bio_vec special_vec;
171 : void *completion_data;
172 : };
173 :
174 :
175 : /*
176 : * Three pointers are available for the IO schedulers, if they need
177 : * more they have to dynamically allocate it. Flush requests are
178 : * never put on the IO scheduler. So let the flush fields share
179 : * space with the elevator data.
180 : */
181 : union {
182 : struct {
183 : struct io_cq *icq;
184 : void *priv[2];
185 : } elv;
186 :
187 : struct {
188 : unsigned int seq;
189 : struct list_head list;
190 : rq_end_io_fn *saved_end_io;
191 : } flush;
192 : };
193 :
194 : union {
195 : struct __call_single_data csd;
196 : u64 fifo_time;
197 : };
198 :
199 : /*
200 : * completion callback.
201 : */
202 : rq_end_io_fn *end_io;
203 : void *end_io_data;
204 : };
205 :
206 : static inline enum req_op req_op(const struct request *req)
207 : {
208 0 : return req->cmd_flags & REQ_OP_MASK;
209 : }
210 :
211 : static inline bool blk_rq_is_passthrough(struct request *rq)
212 : {
213 0 : return blk_op_is_passthrough(req_op(rq));
214 : }
215 :
216 : static inline unsigned short req_get_ioprio(struct request *req)
217 : {
218 : return req->ioprio;
219 : }
220 :
221 : #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
222 :
223 : #define rq_dma_dir(rq) \
224 : (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
225 :
226 : #define rq_list_add(listptr, rq) do { \
227 : (rq)->rq_next = *(listptr); \
228 : *(listptr) = rq; \
229 : } while (0)
230 :
231 : #define rq_list_pop(listptr) \
232 : ({ \
233 : struct request *__req = NULL; \
234 : if ((listptr) && *(listptr)) { \
235 : __req = *(listptr); \
236 : *(listptr) = __req->rq_next; \
237 : } \
238 : __req; \
239 : })
240 :
241 : #define rq_list_peek(listptr) \
242 : ({ \
243 : struct request *__req = NULL; \
244 : if ((listptr) && *(listptr)) \
245 : __req = *(listptr); \
246 : __req; \
247 : })
248 :
249 : #define rq_list_for_each(listptr, pos) \
250 : for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos))
251 :
252 : #define rq_list_for_each_safe(listptr, pos, nxt) \
253 : for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \
254 : pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL)
255 :
256 : #define rq_list_next(rq) (rq)->rq_next
257 : #define rq_list_empty(list) ((list) == (struct request *) NULL)
258 :
259 : /**
260 : * rq_list_move() - move a struct request from one list to another
261 : * @src: The source list @rq is currently in
262 : * @dst: The destination list that @rq will be appended to
263 : * @rq: The request to move
264 : * @prev: The request preceding @rq in @src (NULL if @rq is the head)
265 : */
266 : static inline void rq_list_move(struct request **src, struct request **dst,
267 : struct request *rq, struct request *prev)
268 : {
269 : if (prev)
270 : prev->rq_next = rq->rq_next;
271 : else
272 : *src = rq->rq_next;
273 : rq_list_add(dst, rq);
274 : }
275 :
276 : /**
277 : * enum blk_eh_timer_return - How the timeout handler should proceed
278 : * @BLK_EH_DONE: The block driver completed the command or will complete it at
279 : * a later time.
280 : * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the
281 : * request to complete.
282 : */
283 : enum blk_eh_timer_return {
284 : BLK_EH_DONE,
285 : BLK_EH_RESET_TIMER,
286 : };
287 :
288 : #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
289 : #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
290 :
291 : /**
292 : * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
293 : * block device
294 : */
295 : struct blk_mq_hw_ctx {
296 : struct {
297 : /** @lock: Protects the dispatch list. */
298 : spinlock_t lock;
299 : /**
300 : * @dispatch: Used for requests that are ready to be
301 : * dispatched to the hardware but for some reason (e.g. lack of
302 : * resources) could not be sent to the hardware. As soon as the
303 : * driver can send new requests, requests at this list will
304 : * be sent first for a fairer dispatch.
305 : */
306 : struct list_head dispatch;
307 : /**
308 : * @state: BLK_MQ_S_* flags. Defines the state of the hw
309 : * queue (active, scheduled to restart, stopped).
310 : */
311 : unsigned long state;
312 : } ____cacheline_aligned_in_smp;
313 :
314 : /**
315 : * @run_work: Used for scheduling a hardware queue run at a later time.
316 : */
317 : struct delayed_work run_work;
318 : /** @cpumask: Map of available CPUs where this hctx can run. */
319 : cpumask_var_t cpumask;
320 : /**
321 : * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
322 : * selection from @cpumask.
323 : */
324 : int next_cpu;
325 : /**
326 : * @next_cpu_batch: Counter of how many works left in the batch before
327 : * changing to the next CPU.
328 : */
329 : int next_cpu_batch;
330 :
331 : /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
332 : unsigned long flags;
333 :
334 : /**
335 : * @sched_data: Pointer owned by the IO scheduler attached to a request
336 : * queue. It's up to the IO scheduler how to use this pointer.
337 : */
338 : void *sched_data;
339 : /**
340 : * @queue: Pointer to the request queue that owns this hardware context.
341 : */
342 : struct request_queue *queue;
343 : /** @fq: Queue of requests that need to perform a flush operation. */
344 : struct blk_flush_queue *fq;
345 :
346 : /**
347 : * @driver_data: Pointer to data owned by the block driver that created
348 : * this hctx
349 : */
350 : void *driver_data;
351 :
352 : /**
353 : * @ctx_map: Bitmap for each software queue. If bit is on, there is a
354 : * pending request in that software queue.
355 : */
356 : struct sbitmap ctx_map;
357 :
358 : /**
359 : * @dispatch_from: Software queue to be used when no scheduler was
360 : * selected.
361 : */
362 : struct blk_mq_ctx *dispatch_from;
363 : /**
364 : * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
365 : * decide if the hw_queue is busy using Exponential Weighted Moving
366 : * Average algorithm.
367 : */
368 : unsigned int dispatch_busy;
369 :
370 : /** @type: HCTX_TYPE_* flags. Type of hardware queue. */
371 : unsigned short type;
372 : /** @nr_ctx: Number of software queues. */
373 : unsigned short nr_ctx;
374 : /** @ctxs: Array of software queues. */
375 : struct blk_mq_ctx **ctxs;
376 :
377 : /** @dispatch_wait_lock: Lock for dispatch_wait queue. */
378 : spinlock_t dispatch_wait_lock;
379 : /**
380 : * @dispatch_wait: Waitqueue to put requests when there is no tag
381 : * available at the moment, to wait for another try in the future.
382 : */
383 : wait_queue_entry_t dispatch_wait;
384 :
385 : /**
386 : * @wait_index: Index of next available dispatch_wait queue to insert
387 : * requests.
388 : */
389 : atomic_t wait_index;
390 :
391 : /**
392 : * @tags: Tags owned by the block driver. A tag at this set is only
393 : * assigned when a request is dispatched from a hardware queue.
394 : */
395 : struct blk_mq_tags *tags;
396 : /**
397 : * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
398 : * scheduler associated with a request queue, a tag is assigned when
399 : * that request is allocated. Else, this member is not used.
400 : */
401 : struct blk_mq_tags *sched_tags;
402 :
403 : /** @queued: Number of queued requests. */
404 : unsigned long queued;
405 : /** @run: Number of dispatched requests. */
406 : unsigned long run;
407 :
408 : /** @numa_node: NUMA node the storage adapter has been connected to. */
409 : unsigned int numa_node;
410 : /** @queue_num: Index of this hardware queue. */
411 : unsigned int queue_num;
412 :
413 : /**
414 : * @nr_active: Number of active requests. Only used when a tag set is
415 : * shared across request queues.
416 : */
417 : atomic_t nr_active;
418 :
419 : /** @cpuhp_online: List to store request if CPU is going to die */
420 : struct hlist_node cpuhp_online;
421 : /** @cpuhp_dead: List to store request if some CPU die. */
422 : struct hlist_node cpuhp_dead;
423 : /** @kobj: Kernel object for sysfs. */
424 : struct kobject kobj;
425 :
426 : #ifdef CONFIG_BLK_DEBUG_FS
427 : /**
428 : * @debugfs_dir: debugfs directory for this hardware queue. Named
429 : * as cpu<cpu_number>.
430 : */
431 : struct dentry *debugfs_dir;
432 : /** @sched_debugfs_dir: debugfs directory for the scheduler. */
433 : struct dentry *sched_debugfs_dir;
434 : #endif
435 :
436 : /**
437 : * @hctx_list: if this hctx is not in use, this is an entry in
438 : * q->unused_hctx_list.
439 : */
440 : struct list_head hctx_list;
441 : };
442 :
443 : /**
444 : * struct blk_mq_queue_map - Map software queues to hardware queues
445 : * @mq_map: CPU ID to hardware queue index map. This is an array
446 : * with nr_cpu_ids elements. Each element has a value in the range
447 : * [@queue_offset, @queue_offset + @nr_queues).
448 : * @nr_queues: Number of hardware queues to map CPU IDs onto.
449 : * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
450 : * driver to map each hardware queue type (enum hctx_type) onto a distinct
451 : * set of hardware queues.
452 : */
453 : struct blk_mq_queue_map {
454 : unsigned int *mq_map;
455 : unsigned int nr_queues;
456 : unsigned int queue_offset;
457 : };
458 :
459 : /**
460 : * enum hctx_type - Type of hardware queue
461 : * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for.
462 : * @HCTX_TYPE_READ: Just for READ I/O.
463 : * @HCTX_TYPE_POLL: Polled I/O of any kind.
464 : * @HCTX_MAX_TYPES: Number of types of hctx.
465 : */
466 : enum hctx_type {
467 : HCTX_TYPE_DEFAULT,
468 : HCTX_TYPE_READ,
469 : HCTX_TYPE_POLL,
470 :
471 : HCTX_MAX_TYPES,
472 : };
473 :
474 : /**
475 : * struct blk_mq_tag_set - tag set that can be shared between request queues
476 : * @ops: Pointers to functions that implement block driver behavior.
477 : * @map: One or more ctx -> hctx mappings. One map exists for each
478 : * hardware queue type (enum hctx_type) that the driver wishes
479 : * to support. There are no restrictions on maps being of the
480 : * same size, and it's perfectly legal to share maps between
481 : * types.
482 : * @nr_maps: Number of elements in the @map array. A number in the range
483 : * [1, HCTX_MAX_TYPES].
484 : * @nr_hw_queues: Number of hardware queues supported by the block driver that
485 : * owns this data structure.
486 : * @queue_depth: Number of tags per hardware queue, reserved tags included.
487 : * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
488 : * allocations.
489 : * @cmd_size: Number of additional bytes to allocate per request. The block
490 : * driver owns these additional bytes.
491 : * @numa_node: NUMA node the storage adapter has been connected to.
492 : * @timeout: Request processing timeout in jiffies.
493 : * @flags: Zero or more BLK_MQ_F_* flags.
494 : * @driver_data: Pointer to data owned by the block driver that created this
495 : * tag set.
496 : * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
497 : * elements.
498 : * @shared_tags:
499 : * Shared set of tags. Has @nr_hw_queues elements. If set,
500 : * shared by all @tags.
501 : * @tag_list_lock: Serializes tag_list accesses.
502 : * @tag_list: List of the request queues that use this tag set. See also
503 : * request_queue.tag_set_list.
504 : * @srcu: Use as lock when type of the request queue is blocking
505 : * (BLK_MQ_F_BLOCKING).
506 : */
507 : struct blk_mq_tag_set {
508 : const struct blk_mq_ops *ops;
509 : struct blk_mq_queue_map map[HCTX_MAX_TYPES];
510 : unsigned int nr_maps;
511 : unsigned int nr_hw_queues;
512 : unsigned int queue_depth;
513 : unsigned int reserved_tags;
514 : unsigned int cmd_size;
515 : int numa_node;
516 : unsigned int timeout;
517 : unsigned int flags;
518 : void *driver_data;
519 :
520 : struct blk_mq_tags **tags;
521 :
522 : struct blk_mq_tags *shared_tags;
523 :
524 : struct mutex tag_list_lock;
525 : struct list_head tag_list;
526 : struct srcu_struct *srcu;
527 : };
528 :
529 : /**
530 : * struct blk_mq_queue_data - Data about a request inserted in a queue
531 : *
532 : * @rq: Request pointer.
533 : * @last: If it is the last request in the queue.
534 : */
535 : struct blk_mq_queue_data {
536 : struct request *rq;
537 : bool last;
538 : };
539 :
540 : typedef bool (busy_tag_iter_fn)(struct request *, void *);
541 :
542 : /**
543 : * struct blk_mq_ops - Callback functions that implements block driver
544 : * behaviour.
545 : */
546 : struct blk_mq_ops {
547 : /**
548 : * @queue_rq: Queue a new request from block IO.
549 : */
550 : blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
551 : const struct blk_mq_queue_data *);
552 :
553 : /**
554 : * @commit_rqs: If a driver uses bd->last to judge when to submit
555 : * requests to hardware, it must define this function. In case of errors
556 : * that make us stop issuing further requests, this hook serves the
557 : * purpose of kicking the hardware (which the last request otherwise
558 : * would have done).
559 : */
560 : void (*commit_rqs)(struct blk_mq_hw_ctx *);
561 :
562 : /**
563 : * @queue_rqs: Queue a list of new requests. Driver is guaranteed
564 : * that each request belongs to the same queue. If the driver doesn't
565 : * empty the @rqlist completely, then the rest will be queued
566 : * individually by the block layer upon return.
567 : */
568 : void (*queue_rqs)(struct request **rqlist);
569 :
570 : /**
571 : * @get_budget: Reserve budget before queue request, once .queue_rq is
572 : * run, it is driver's responsibility to release the
573 : * reserved budget. Also we have to handle failure case
574 : * of .get_budget for avoiding I/O deadlock.
575 : */
576 : int (*get_budget)(struct request_queue *);
577 :
578 : /**
579 : * @put_budget: Release the reserved budget.
580 : */
581 : void (*put_budget)(struct request_queue *, int);
582 :
583 : /**
584 : * @set_rq_budget_token: store rq's budget token
585 : */
586 : void (*set_rq_budget_token)(struct request *, int);
587 : /**
588 : * @get_rq_budget_token: retrieve rq's budget token
589 : */
590 : int (*get_rq_budget_token)(struct request *);
591 :
592 : /**
593 : * @timeout: Called on request timeout.
594 : */
595 : enum blk_eh_timer_return (*timeout)(struct request *);
596 :
597 : /**
598 : * @poll: Called to poll for completion of a specific tag.
599 : */
600 : int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *);
601 :
602 : /**
603 : * @complete: Mark the request as complete.
604 : */
605 : void (*complete)(struct request *);
606 :
607 : /**
608 : * @init_hctx: Called when the block layer side of a hardware queue has
609 : * been set up, allowing the driver to allocate/init matching
610 : * structures.
611 : */
612 : int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
613 : /**
614 : * @exit_hctx: Ditto for exit/teardown.
615 : */
616 : void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
617 :
618 : /**
619 : * @init_request: Called for every command allocated by the block layer
620 : * to allow the driver to set up driver specific data.
621 : *
622 : * Tag greater than or equal to queue_depth is for setting up
623 : * flush request.
624 : */
625 : int (*init_request)(struct blk_mq_tag_set *set, struct request *,
626 : unsigned int, unsigned int);
627 : /**
628 : * @exit_request: Ditto for exit/teardown.
629 : */
630 : void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
631 : unsigned int);
632 :
633 : /**
634 : * @cleanup_rq: Called before freeing one request which isn't completed
635 : * yet, and usually for freeing the driver private data.
636 : */
637 : void (*cleanup_rq)(struct request *);
638 :
639 : /**
640 : * @busy: If set, returns whether or not this queue currently is busy.
641 : */
642 : bool (*busy)(struct request_queue *);
643 :
644 : /**
645 : * @map_queues: This allows drivers specify their own queue mapping by
646 : * overriding the setup-time function that builds the mq_map.
647 : */
648 : void (*map_queues)(struct blk_mq_tag_set *set);
649 :
650 : #ifdef CONFIG_BLK_DEBUG_FS
651 : /**
652 : * @show_rq: Used by the debugfs implementation to show driver-specific
653 : * information about a request.
654 : */
655 : void (*show_rq)(struct seq_file *m, struct request *rq);
656 : #endif
657 : };
658 :
659 : enum {
660 : BLK_MQ_F_SHOULD_MERGE = 1 << 0,
661 : BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
662 : /*
663 : * Set when this device requires underlying blk-mq device for
664 : * completing IO:
665 : */
666 : BLK_MQ_F_STACKING = 1 << 2,
667 : BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
668 : BLK_MQ_F_BLOCKING = 1 << 5,
669 : /* Do not allow an I/O scheduler to be configured. */
670 : BLK_MQ_F_NO_SCHED = 1 << 6,
671 : /*
672 : * Select 'none' during queue registration in case of a single hwq
673 : * or shared hwqs instead of 'mq-deadline'.
674 : */
675 : BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7,
676 : BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
677 : BLK_MQ_F_ALLOC_POLICY_BITS = 1,
678 :
679 : BLK_MQ_S_STOPPED = 0,
680 : BLK_MQ_S_TAG_ACTIVE = 1,
681 : BLK_MQ_S_SCHED_RESTART = 2,
682 :
683 : /* hw queue is inactive after all its CPUs become offline */
684 : BLK_MQ_S_INACTIVE = 3,
685 :
686 : BLK_MQ_MAX_DEPTH = 10240,
687 :
688 : BLK_MQ_CPU_WORK_BATCH = 8,
689 : };
690 : #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
691 : ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
692 : ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
693 : #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
694 : ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
695 : << BLK_MQ_F_ALLOC_POLICY_START_BIT)
696 :
697 : #define BLK_MQ_NO_HCTX_IDX (-1U)
698 :
699 : struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
700 : struct lock_class_key *lkclass);
701 : #define blk_mq_alloc_disk(set, queuedata) \
702 : ({ \
703 : static struct lock_class_key __key; \
704 : \
705 : __blk_mq_alloc_disk(set, queuedata, &__key); \
706 : })
707 : struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
708 : struct lock_class_key *lkclass);
709 : struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
710 : int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
711 : struct request_queue *q);
712 : void blk_mq_destroy_queue(struct request_queue *);
713 :
714 : int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
715 : int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
716 : const struct blk_mq_ops *ops, unsigned int queue_depth,
717 : unsigned int set_flags);
718 : void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
719 :
720 : void blk_mq_free_request(struct request *rq);
721 :
722 : bool blk_mq_queue_inflight(struct request_queue *q);
723 :
724 : enum {
725 : /* return when out of requests */
726 : BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
727 : /* allocate from reserved pool */
728 : BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
729 : /* set RQF_PM */
730 : BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
731 : };
732 :
733 : struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
734 : blk_mq_req_flags_t flags);
735 : struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
736 : blk_opf_t opf, blk_mq_req_flags_t flags,
737 : unsigned int hctx_idx);
738 :
739 : /*
740 : * Tag address space map.
741 : */
742 : struct blk_mq_tags {
743 : unsigned int nr_tags;
744 : unsigned int nr_reserved_tags;
745 :
746 : atomic_t active_queues;
747 :
748 : struct sbitmap_queue bitmap_tags;
749 : struct sbitmap_queue breserved_tags;
750 :
751 : struct request **rqs;
752 : struct request **static_rqs;
753 : struct list_head page_list;
754 :
755 : /*
756 : * used to clear request reference in rqs[] before freeing one
757 : * request pool
758 : */
759 : spinlock_t lock;
760 : };
761 :
762 : static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags,
763 : unsigned int tag)
764 : {
765 0 : if (tag < tags->nr_tags) {
766 0 : prefetch(tags->rqs[tag]);
767 : return tags->rqs[tag];
768 : }
769 :
770 : return NULL;
771 : }
772 :
773 : enum {
774 : BLK_MQ_UNIQUE_TAG_BITS = 16,
775 : BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
776 : };
777 :
778 : u32 blk_mq_unique_tag(struct request *rq);
779 :
780 : static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
781 : {
782 : return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
783 : }
784 :
785 : static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
786 : {
787 : return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
788 : }
789 :
790 : /**
791 : * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
792 : * @rq: target request.
793 : */
794 : static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
795 : {
796 0 : return READ_ONCE(rq->state);
797 : }
798 :
799 : static inline int blk_mq_request_started(struct request *rq)
800 : {
801 0 : return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
802 : }
803 :
804 : static inline int blk_mq_request_completed(struct request *rq)
805 : {
806 0 : return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
807 : }
808 :
809 : /*
810 : *
811 : * Set the state to complete when completing a request from inside ->queue_rq.
812 : * This is used by drivers that want to ensure special complete actions that
813 : * need access to the request are called on failure, e.g. by nvme for
814 : * multipathing.
815 : */
816 : static inline void blk_mq_set_request_complete(struct request *rq)
817 : {
818 : WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
819 : }
820 :
821 : /*
822 : * Complete the request directly instead of deferring it to softirq or
823 : * completing it another CPU. Useful in preemptible instead of an interrupt.
824 : */
825 : static inline void blk_mq_complete_request_direct(struct request *rq,
826 : void (*complete)(struct request *rq))
827 : {
828 : WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
829 : complete(rq);
830 : }
831 :
832 : void blk_mq_start_request(struct request *rq);
833 : void blk_mq_end_request(struct request *rq, blk_status_t error);
834 : void __blk_mq_end_request(struct request *rq, blk_status_t error);
835 : void blk_mq_end_request_batch(struct io_comp_batch *ib);
836 :
837 : /*
838 : * Only need start/end time stamping if we have iostat or
839 : * blk stats enabled, or using an IO scheduler.
840 : */
841 : static inline bool blk_mq_need_time_stamp(struct request *rq)
842 : {
843 0 : return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV));
844 : }
845 :
846 : static inline bool blk_mq_is_reserved_rq(struct request *rq)
847 : {
848 : return rq->rq_flags & RQF_RESV;
849 : }
850 :
851 : /*
852 : * Batched completions only work when there is no I/O error and no special
853 : * ->end_io handler.
854 : */
855 : static inline bool blk_mq_add_to_batch(struct request *req,
856 : struct io_comp_batch *iob, int ioerror,
857 : void (*complete)(struct io_comp_batch *))
858 : {
859 : if (!iob || (req->rq_flags & RQF_ELV) || ioerror ||
860 : (req->end_io && !blk_rq_is_passthrough(req)))
861 : return false;
862 :
863 : if (!iob->complete)
864 : iob->complete = complete;
865 : else if (iob->complete != complete)
866 : return false;
867 : iob->need_ts |= blk_mq_need_time_stamp(req);
868 : rq_list_add(&iob->req_list, req);
869 : return true;
870 : }
871 :
872 : void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
873 : void blk_mq_kick_requeue_list(struct request_queue *q);
874 : void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
875 : void blk_mq_complete_request(struct request *rq);
876 : bool blk_mq_complete_request_remote(struct request *rq);
877 : void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
878 : void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
879 : void blk_mq_stop_hw_queues(struct request_queue *q);
880 : void blk_mq_start_hw_queues(struct request_queue *q);
881 : void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
882 : void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
883 : void blk_mq_quiesce_queue(struct request_queue *q);
884 : void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set);
885 : void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set);
886 : void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set);
887 : void blk_mq_unquiesce_queue(struct request_queue *q);
888 : void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
889 : void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
890 : void blk_mq_run_hw_queues(struct request_queue *q, bool async);
891 : void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
892 : void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
893 : busy_tag_iter_fn *fn, void *priv);
894 : void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
895 : void blk_mq_freeze_queue(struct request_queue *q);
896 : void blk_mq_unfreeze_queue(struct request_queue *q);
897 : void blk_freeze_queue_start(struct request_queue *q);
898 : void blk_mq_freeze_queue_wait(struct request_queue *q);
899 : int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
900 : unsigned long timeout);
901 :
902 : void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
903 : void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
904 :
905 : void blk_mq_quiesce_queue_nowait(struct request_queue *q);
906 :
907 : unsigned int blk_mq_rq_cpu(struct request *rq);
908 :
909 : bool __blk_should_fake_timeout(struct request_queue *q);
910 : static inline bool blk_should_fake_timeout(struct request_queue *q)
911 : {
912 : if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
913 : test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
914 : return __blk_should_fake_timeout(q);
915 : return false;
916 : }
917 :
918 : /**
919 : * blk_mq_rq_from_pdu - cast a PDU to a request
920 : * @pdu: the PDU (Protocol Data Unit) to be casted
921 : *
922 : * Return: request
923 : *
924 : * Driver command data is immediately after the request. So subtract request
925 : * size to get back to the original request.
926 : */
927 : static inline struct request *blk_mq_rq_from_pdu(void *pdu)
928 : {
929 : return pdu - sizeof(struct request);
930 : }
931 :
932 : /**
933 : * blk_mq_rq_to_pdu - cast a request to a PDU
934 : * @rq: the request to be casted
935 : *
936 : * Return: pointer to the PDU
937 : *
938 : * Driver command data is immediately after the request. So add request to get
939 : * the PDU.
940 : */
941 : static inline void *blk_mq_rq_to_pdu(struct request *rq)
942 : {
943 : return rq + 1;
944 : }
945 :
946 : #define queue_for_each_hw_ctx(q, hctx, i) \
947 : xa_for_each(&(q)->hctx_table, (i), (hctx))
948 :
949 : #define hctx_for_each_ctx(hctx, ctx, i) \
950 : for ((i) = 0; (i) < (hctx)->nr_ctx && \
951 : ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
952 :
953 : static inline void blk_mq_cleanup_rq(struct request *rq)
954 : {
955 : if (rq->q->mq_ops->cleanup_rq)
956 : rq->q->mq_ops->cleanup_rq(rq);
957 : }
958 :
959 : static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
960 : unsigned int nr_segs)
961 : {
962 0 : rq->nr_phys_segments = nr_segs;
963 0 : rq->__data_len = bio->bi_iter.bi_size;
964 0 : rq->bio = rq->biotail = bio;
965 0 : rq->ioprio = bio_prio(bio);
966 : }
967 :
968 : void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
969 : struct lock_class_key *key);
970 :
971 : static inline bool rq_is_sync(struct request *rq)
972 : {
973 : return op_is_sync(rq->cmd_flags);
974 : }
975 :
976 : void blk_rq_init(struct request_queue *q, struct request *rq);
977 : int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
978 : struct bio_set *bs, gfp_t gfp_mask,
979 : int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
980 : void blk_rq_unprep_clone(struct request *rq);
981 : blk_status_t blk_insert_cloned_request(struct request *rq);
982 :
983 : struct rq_map_data {
984 : struct page **pages;
985 : unsigned long offset;
986 : unsigned short page_order;
987 : unsigned short nr_entries;
988 : bool null_mapped;
989 : bool from_user;
990 : };
991 :
992 : int blk_rq_map_user(struct request_queue *, struct request *,
993 : struct rq_map_data *, void __user *, unsigned long, gfp_t);
994 : int blk_rq_map_user_io(struct request *, struct rq_map_data *,
995 : void __user *, unsigned long, gfp_t, bool, int, bool, int);
996 : int blk_rq_map_user_iov(struct request_queue *, struct request *,
997 : struct rq_map_data *, const struct iov_iter *, gfp_t);
998 : int blk_rq_unmap_user(struct bio *);
999 : int blk_rq_map_kern(struct request_queue *, struct request *, void *,
1000 : unsigned int, gfp_t);
1001 : int blk_rq_append_bio(struct request *rq, struct bio *bio);
1002 : void blk_execute_rq_nowait(struct request *rq, bool at_head);
1003 : blk_status_t blk_execute_rq(struct request *rq, bool at_head);
1004 : bool blk_rq_is_poll(struct request *rq);
1005 :
1006 : struct req_iterator {
1007 : struct bvec_iter iter;
1008 : struct bio *bio;
1009 : };
1010 :
1011 : #define __rq_for_each_bio(_bio, rq) \
1012 : if ((rq->bio)) \
1013 : for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
1014 :
1015 : #define rq_for_each_segment(bvl, _rq, _iter) \
1016 : __rq_for_each_bio(_iter.bio, _rq) \
1017 : bio_for_each_segment(bvl, _iter.bio, _iter.iter)
1018 :
1019 : #define rq_for_each_bvec(bvl, _rq, _iter) \
1020 : __rq_for_each_bio(_iter.bio, _rq) \
1021 : bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
1022 :
1023 : #define rq_iter_last(bvec, _iter) \
1024 : (_iter.bio->bi_next == NULL && \
1025 : bio_iter_last(bvec, _iter.iter))
1026 :
1027 : /*
1028 : * blk_rq_pos() : the current sector
1029 : * blk_rq_bytes() : bytes left in the entire request
1030 : * blk_rq_cur_bytes() : bytes left in the current segment
1031 : * blk_rq_sectors() : sectors left in the entire request
1032 : * blk_rq_cur_sectors() : sectors left in the current segment
1033 : * blk_rq_stats_sectors() : sectors of the entire request used for stats
1034 : */
1035 : static inline sector_t blk_rq_pos(const struct request *rq)
1036 : {
1037 : return rq->__sector;
1038 : }
1039 :
1040 : static inline unsigned int blk_rq_bytes(const struct request *rq)
1041 : {
1042 : return rq->__data_len;
1043 : }
1044 :
1045 0 : static inline int blk_rq_cur_bytes(const struct request *rq)
1046 : {
1047 0 : if (!rq->bio)
1048 : return 0;
1049 0 : if (!bio_has_data(rq->bio)) /* dataless requests such as discard */
1050 0 : return rq->bio->bi_iter.bi_size;
1051 0 : return bio_iovec(rq->bio).bv_len;
1052 : }
1053 :
1054 : static inline unsigned int blk_rq_sectors(const struct request *rq)
1055 : {
1056 0 : return blk_rq_bytes(rq) >> SECTOR_SHIFT;
1057 : }
1058 :
1059 : static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1060 : {
1061 0 : return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
1062 : }
1063 :
1064 : static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
1065 : {
1066 0 : return rq->stats_sectors;
1067 : }
1068 :
1069 : /*
1070 : * Some commands like WRITE SAME have a payload or data transfer size which
1071 : * is different from the size of the request. Any driver that supports such
1072 : * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
1073 : * calculate the data transfer size.
1074 : */
1075 : static inline unsigned int blk_rq_payload_bytes(struct request *rq)
1076 : {
1077 : if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1078 : return rq->special_vec.bv_len;
1079 : return blk_rq_bytes(rq);
1080 : }
1081 :
1082 : /*
1083 : * Return the first full biovec in the request. The caller needs to check that
1084 : * there are any bvecs before calling this helper.
1085 : */
1086 : static inline struct bio_vec req_bvec(struct request *rq)
1087 : {
1088 : if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1089 : return rq->special_vec;
1090 : return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
1091 : }
1092 :
1093 : static inline unsigned int blk_rq_count_bios(struct request *rq)
1094 : {
1095 : unsigned int nr_bios = 0;
1096 : struct bio *bio;
1097 :
1098 : __rq_for_each_bio(bio, rq)
1099 : nr_bios++;
1100 :
1101 : return nr_bios;
1102 : }
1103 :
1104 : void blk_steal_bios(struct bio_list *list, struct request *rq);
1105 :
1106 : /*
1107 : * Request completion related functions.
1108 : *
1109 : * blk_update_request() completes given number of bytes and updates
1110 : * the request without completing it.
1111 : */
1112 : bool blk_update_request(struct request *rq, blk_status_t error,
1113 : unsigned int nr_bytes);
1114 : void blk_abort_request(struct request *);
1115 :
1116 : /*
1117 : * Number of physical segments as sent to the device.
1118 : *
1119 : * Normally this is the number of discontiguous data segments sent by the
1120 : * submitter. But for data-less command like discard we might have no
1121 : * actual data segments submitted, but the driver might have to add it's
1122 : * own special payload. In that case we still return 1 here so that this
1123 : * special payload will be mapped.
1124 : */
1125 : static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1126 : {
1127 0 : if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1128 : return 1;
1129 0 : return rq->nr_phys_segments;
1130 : }
1131 :
1132 : /*
1133 : * Number of discard segments (or ranges) the driver needs to fill in.
1134 : * Each discard bio merged into a request is counted as one segment.
1135 : */
1136 : static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
1137 : {
1138 0 : return max_t(unsigned short, rq->nr_phys_segments, 1);
1139 : }
1140 :
1141 : int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
1142 : struct scatterlist *sglist, struct scatterlist **last_sg);
1143 : static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1144 : struct scatterlist *sglist)
1145 : {
1146 : struct scatterlist *last_sg = NULL;
1147 :
1148 : return __blk_rq_map_sg(q, rq, sglist, &last_sg);
1149 : }
1150 : void blk_dump_rq_flags(struct request *, char *);
1151 :
1152 : #ifdef CONFIG_BLK_DEV_ZONED
1153 : static inline unsigned int blk_rq_zone_no(struct request *rq)
1154 : {
1155 : return disk_zone_no(rq->q->disk, blk_rq_pos(rq));
1156 : }
1157 :
1158 : static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
1159 : {
1160 : return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq));
1161 : }
1162 :
1163 : bool blk_req_needs_zone_write_lock(struct request *rq);
1164 : bool blk_req_zone_write_trylock(struct request *rq);
1165 : void __blk_req_zone_write_lock(struct request *rq);
1166 : void __blk_req_zone_write_unlock(struct request *rq);
1167 :
1168 : static inline void blk_req_zone_write_lock(struct request *rq)
1169 : {
1170 : if (blk_req_needs_zone_write_lock(rq))
1171 : __blk_req_zone_write_lock(rq);
1172 : }
1173 :
1174 : static inline void blk_req_zone_write_unlock(struct request *rq)
1175 : {
1176 : if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
1177 : __blk_req_zone_write_unlock(rq);
1178 : }
1179 :
1180 : static inline bool blk_req_zone_is_write_locked(struct request *rq)
1181 : {
1182 : return rq->q->disk->seq_zones_wlock &&
1183 : test_bit(blk_rq_zone_no(rq), rq->q->disk->seq_zones_wlock);
1184 : }
1185 :
1186 : static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1187 : {
1188 : if (!blk_req_needs_zone_write_lock(rq))
1189 : return true;
1190 : return !blk_req_zone_is_write_locked(rq);
1191 : }
1192 : #else /* CONFIG_BLK_DEV_ZONED */
1193 : static inline bool blk_req_needs_zone_write_lock(struct request *rq)
1194 : {
1195 : return false;
1196 : }
1197 :
1198 : static inline void blk_req_zone_write_lock(struct request *rq)
1199 : {
1200 : }
1201 :
1202 : static inline void blk_req_zone_write_unlock(struct request *rq)
1203 : {
1204 : }
1205 : static inline bool blk_req_zone_is_write_locked(struct request *rq)
1206 : {
1207 : return false;
1208 : }
1209 :
1210 : static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1211 : {
1212 : return true;
1213 : }
1214 : #endif /* CONFIG_BLK_DEV_ZONED */
1215 :
1216 : #endif /* BLK_MQ_H */
|