Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef BLK_MQ_H
3 : #define BLK_MQ_H
4 :
5 : #include <linux/blkdev.h>
6 : #include <linux/sbitmap.h>
7 : #include <linux/lockdep.h>
8 : #include <linux/scatterlist.h>
9 : #include <linux/prefetch.h>
10 : #include <linux/srcu.h>
11 :
12 : struct blk_mq_tags;
13 : struct blk_flush_queue;
14 :
15 : #define BLKDEV_MIN_RQ 4
16 : #define BLKDEV_DEFAULT_RQ 128
17 :
18 : enum rq_end_io_ret {
19 : RQ_END_IO_NONE,
20 : RQ_END_IO_FREE,
21 : };
22 :
23 : typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
24 :
25 : /*
26 : * request flags */
27 : typedef __u32 __bitwise req_flags_t;
28 :
29 : /* drive already may have started this one */
30 : #define RQF_STARTED ((__force req_flags_t)(1 << 1))
31 : /* request for flush sequence */
32 : #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
33 : /* merge of different types, fail separately */
34 : #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
35 : /* track inflight for MQ */
36 : #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
37 : /* don't call prep for this one */
38 : #define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
39 : /* use hctx->sched_tags */
40 : #define RQF_SCHED_TAGS ((__force req_flags_t)(1 << 8))
41 : /* use an I/O scheduler for this request */
42 : #define RQF_USE_SCHED ((__force req_flags_t)(1 << 9))
43 : /* vaguely specified driver internal error. Ignored by the block layer */
44 : #define RQF_FAILED ((__force req_flags_t)(1 << 10))
45 : /* don't warn about errors */
46 : #define RQF_QUIET ((__force req_flags_t)(1 << 11))
47 : /* account into disk and partition IO statistics */
48 : #define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
49 : /* runtime pm request */
50 : #define RQF_PM ((__force req_flags_t)(1 << 15))
51 : /* on IO scheduler merge hash */
52 : #define RQF_HASHED ((__force req_flags_t)(1 << 16))
53 : /* track IO completion time */
54 : #define RQF_STATS ((__force req_flags_t)(1 << 17))
55 : /* Look at ->special_vec for the actual data payload instead of the
56 : bio chain. */
57 : #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
58 : /* The per-zone write lock is held for this request */
59 : #define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
60 : /* ->timeout has been called, don't expire again */
61 : #define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
62 : #define RQF_RESV ((__force req_flags_t)(1 << 23))
63 :
64 : /* flags that prevent us from merging requests: */
65 : #define RQF_NOMERGE_FLAGS \
66 : (RQF_STARTED | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
67 :
68 : enum mq_rq_state {
69 : MQ_RQ_IDLE = 0,
70 : MQ_RQ_IN_FLIGHT = 1,
71 : MQ_RQ_COMPLETE = 2,
72 : };
73 :
74 : /*
75 : * Try to put the fields that are referenced together in the same cacheline.
76 : *
77 : * If you modify this structure, make sure to update blk_rq_init() and
78 : * especially blk_mq_rq_ctx_init() to take care of the added fields.
79 : */
80 : struct request {
81 : struct request_queue *q;
82 : struct blk_mq_ctx *mq_ctx;
83 : struct blk_mq_hw_ctx *mq_hctx;
84 :
85 : blk_opf_t cmd_flags; /* op and common flags */
86 : req_flags_t rq_flags;
87 :
88 : int tag;
89 : int internal_tag;
90 :
91 : unsigned int timeout;
92 :
93 : /* the following two fields are internal, NEVER access directly */
94 : unsigned int __data_len; /* total data len */
95 : sector_t __sector; /* sector cursor */
96 :
97 : struct bio *bio;
98 : struct bio *biotail;
99 :
100 : union {
101 : struct list_head queuelist;
102 : struct request *rq_next;
103 : };
104 :
105 : struct block_device *part;
106 : #ifdef CONFIG_BLK_RQ_ALLOC_TIME
107 : /* Time that the first bio started allocating this request. */
108 : u64 alloc_time_ns;
109 : #endif
110 : /* Time that this request was allocated for this IO. */
111 : u64 start_time_ns;
112 : /* Time that I/O was submitted to the device. */
113 : u64 io_start_time_ns;
114 :
115 : #ifdef CONFIG_BLK_WBT
116 : unsigned short wbt_flags;
117 : #endif
118 : /*
119 : * rq sectors used for blk stats. It has the same value
120 : * with blk_rq_sectors(rq), except that it never be zeroed
121 : * by completion.
122 : */
123 : unsigned short stats_sectors;
124 :
125 : /*
126 : * Number of scatter-gather DMA addr+len pairs after
127 : * physical address coalescing is performed.
128 : */
129 : unsigned short nr_phys_segments;
130 :
131 : #ifdef CONFIG_BLK_DEV_INTEGRITY
132 : unsigned short nr_integrity_segments;
133 : #endif
134 :
135 : #ifdef CONFIG_BLK_INLINE_ENCRYPTION
136 : struct bio_crypt_ctx *crypt_ctx;
137 : struct blk_crypto_keyslot *crypt_keyslot;
138 : #endif
139 :
140 : unsigned short ioprio;
141 :
142 : enum mq_rq_state state;
143 : atomic_t ref;
144 :
145 : unsigned long deadline;
146 :
147 : /*
148 : * The hash is used inside the scheduler, and killed once the
149 : * request reaches the dispatch list. The ipi_list is only used
150 : * to queue the request for softirq completion, which is long
151 : * after the request has been unhashed (and even removed from
152 : * the dispatch list).
153 : */
154 : union {
155 : struct hlist_node hash; /* merge hash */
156 : struct llist_node ipi_list;
157 : };
158 :
159 : /*
160 : * The rb_node is only used inside the io scheduler, requests
161 : * are pruned when moved to the dispatch queue. special_vec must
162 : * only be used if RQF_SPECIAL_PAYLOAD is set, and those cannot be
163 : * insert into an IO scheduler.
164 : */
165 : union {
166 : struct rb_node rb_node; /* sort/lookup */
167 : struct bio_vec special_vec;
168 : };
169 :
170 : /*
171 : * Three pointers are available for the IO schedulers, if they need
172 : * more they have to dynamically allocate it.
173 : */
174 : struct {
175 : struct io_cq *icq;
176 : void *priv[2];
177 : } elv;
178 :
179 : struct {
180 : unsigned int seq;
181 : struct list_head list;
182 : rq_end_io_fn *saved_end_io;
183 : } flush;
184 :
185 : union {
186 : struct __call_single_data csd;
187 : u64 fifo_time;
188 : };
189 :
190 : /*
191 : * completion callback.
192 : */
193 : rq_end_io_fn *end_io;
194 : void *end_io_data;
195 : };
196 :
197 : static inline enum req_op req_op(const struct request *req)
198 : {
199 0 : return req->cmd_flags & REQ_OP_MASK;
200 : }
201 :
202 : static inline bool blk_rq_is_passthrough(struct request *rq)
203 : {
204 0 : return blk_op_is_passthrough(rq->cmd_flags);
205 : }
206 :
207 : static inline unsigned short req_get_ioprio(struct request *req)
208 : {
209 : return req->ioprio;
210 : }
211 :
212 : #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
213 :
214 : #define rq_dma_dir(rq) \
215 : (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
216 :
217 : #define rq_list_add(listptr, rq) do { \
218 : (rq)->rq_next = *(listptr); \
219 : *(listptr) = rq; \
220 : } while (0)
221 :
222 : #define rq_list_add_tail(lastpptr, rq) do { \
223 : (rq)->rq_next = NULL; \
224 : **(lastpptr) = rq; \
225 : *(lastpptr) = &rq->rq_next; \
226 : } while (0)
227 :
228 : #define rq_list_pop(listptr) \
229 : ({ \
230 : struct request *__req = NULL; \
231 : if ((listptr) && *(listptr)) { \
232 : __req = *(listptr); \
233 : *(listptr) = __req->rq_next; \
234 : } \
235 : __req; \
236 : })
237 :
238 : #define rq_list_peek(listptr) \
239 : ({ \
240 : struct request *__req = NULL; \
241 : if ((listptr) && *(listptr)) \
242 : __req = *(listptr); \
243 : __req; \
244 : })
245 :
246 : #define rq_list_for_each(listptr, pos) \
247 : for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos))
248 :
249 : #define rq_list_for_each_safe(listptr, pos, nxt) \
250 : for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \
251 : pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL)
252 :
253 : #define rq_list_next(rq) (rq)->rq_next
254 : #define rq_list_empty(list) ((list) == (struct request *) NULL)
255 :
256 : /**
257 : * rq_list_move() - move a struct request from one list to another
258 : * @src: The source list @rq is currently in
259 : * @dst: The destination list that @rq will be appended to
260 : * @rq: The request to move
261 : * @prev: The request preceding @rq in @src (NULL if @rq is the head)
262 : */
263 : static inline void rq_list_move(struct request **src, struct request **dst,
264 : struct request *rq, struct request *prev)
265 : {
266 : if (prev)
267 : prev->rq_next = rq->rq_next;
268 : else
269 : *src = rq->rq_next;
270 : rq_list_add(dst, rq);
271 : }
272 :
273 : /**
274 : * enum blk_eh_timer_return - How the timeout handler should proceed
275 : * @BLK_EH_DONE: The block driver completed the command or will complete it at
276 : * a later time.
277 : * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the
278 : * request to complete.
279 : */
280 : enum blk_eh_timer_return {
281 : BLK_EH_DONE,
282 : BLK_EH_RESET_TIMER,
283 : };
284 :
285 : #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
286 : #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
287 :
288 : /**
289 : * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
290 : * block device
291 : */
292 : struct blk_mq_hw_ctx {
293 : struct {
294 : /** @lock: Protects the dispatch list. */
295 : spinlock_t lock;
296 : /**
297 : * @dispatch: Used for requests that are ready to be
298 : * dispatched to the hardware but for some reason (e.g. lack of
299 : * resources) could not be sent to the hardware. As soon as the
300 : * driver can send new requests, requests at this list will
301 : * be sent first for a fairer dispatch.
302 : */
303 : struct list_head dispatch;
304 : /**
305 : * @state: BLK_MQ_S_* flags. Defines the state of the hw
306 : * queue (active, scheduled to restart, stopped).
307 : */
308 : unsigned long state;
309 : } ____cacheline_aligned_in_smp;
310 :
311 : /**
312 : * @run_work: Used for scheduling a hardware queue run at a later time.
313 : */
314 : struct delayed_work run_work;
315 : /** @cpumask: Map of available CPUs where this hctx can run. */
316 : cpumask_var_t cpumask;
317 : /**
318 : * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
319 : * selection from @cpumask.
320 : */
321 : int next_cpu;
322 : /**
323 : * @next_cpu_batch: Counter of how many works left in the batch before
324 : * changing to the next CPU.
325 : */
326 : int next_cpu_batch;
327 :
328 : /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
329 : unsigned long flags;
330 :
331 : /**
332 : * @sched_data: Pointer owned by the IO scheduler attached to a request
333 : * queue. It's up to the IO scheduler how to use this pointer.
334 : */
335 : void *sched_data;
336 : /**
337 : * @queue: Pointer to the request queue that owns this hardware context.
338 : */
339 : struct request_queue *queue;
340 : /** @fq: Queue of requests that need to perform a flush operation. */
341 : struct blk_flush_queue *fq;
342 :
343 : /**
344 : * @driver_data: Pointer to data owned by the block driver that created
345 : * this hctx
346 : */
347 : void *driver_data;
348 :
349 : /**
350 : * @ctx_map: Bitmap for each software queue. If bit is on, there is a
351 : * pending request in that software queue.
352 : */
353 : struct sbitmap ctx_map;
354 :
355 : /**
356 : * @dispatch_from: Software queue to be used when no scheduler was
357 : * selected.
358 : */
359 : struct blk_mq_ctx *dispatch_from;
360 : /**
361 : * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
362 : * decide if the hw_queue is busy using Exponential Weighted Moving
363 : * Average algorithm.
364 : */
365 : unsigned int dispatch_busy;
366 :
367 : /** @type: HCTX_TYPE_* flags. Type of hardware queue. */
368 : unsigned short type;
369 : /** @nr_ctx: Number of software queues. */
370 : unsigned short nr_ctx;
371 : /** @ctxs: Array of software queues. */
372 : struct blk_mq_ctx **ctxs;
373 :
374 : /** @dispatch_wait_lock: Lock for dispatch_wait queue. */
375 : spinlock_t dispatch_wait_lock;
376 : /**
377 : * @dispatch_wait: Waitqueue to put requests when there is no tag
378 : * available at the moment, to wait for another try in the future.
379 : */
380 : wait_queue_entry_t dispatch_wait;
381 :
382 : /**
383 : * @wait_index: Index of next available dispatch_wait queue to insert
384 : * requests.
385 : */
386 : atomic_t wait_index;
387 :
388 : /**
389 : * @tags: Tags owned by the block driver. A tag at this set is only
390 : * assigned when a request is dispatched from a hardware queue.
391 : */
392 : struct blk_mq_tags *tags;
393 : /**
394 : * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
395 : * scheduler associated with a request queue, a tag is assigned when
396 : * that request is allocated. Else, this member is not used.
397 : */
398 : struct blk_mq_tags *sched_tags;
399 :
400 : /** @queued: Number of queued requests. */
401 : unsigned long queued;
402 : /** @run: Number of dispatched requests. */
403 : unsigned long run;
404 :
405 : /** @numa_node: NUMA node the storage adapter has been connected to. */
406 : unsigned int numa_node;
407 : /** @queue_num: Index of this hardware queue. */
408 : unsigned int queue_num;
409 :
410 : /**
411 : * @nr_active: Number of active requests. Only used when a tag set is
412 : * shared across request queues.
413 : */
414 : atomic_t nr_active;
415 :
416 : /** @cpuhp_online: List to store request if CPU is going to die */
417 : struct hlist_node cpuhp_online;
418 : /** @cpuhp_dead: List to store request if some CPU die. */
419 : struct hlist_node cpuhp_dead;
420 : /** @kobj: Kernel object for sysfs. */
421 : struct kobject kobj;
422 :
423 : #ifdef CONFIG_BLK_DEBUG_FS
424 : /**
425 : * @debugfs_dir: debugfs directory for this hardware queue. Named
426 : * as cpu<cpu_number>.
427 : */
428 : struct dentry *debugfs_dir;
429 : /** @sched_debugfs_dir: debugfs directory for the scheduler. */
430 : struct dentry *sched_debugfs_dir;
431 : #endif
432 :
433 : /**
434 : * @hctx_list: if this hctx is not in use, this is an entry in
435 : * q->unused_hctx_list.
436 : */
437 : struct list_head hctx_list;
438 : };
439 :
440 : /**
441 : * struct blk_mq_queue_map - Map software queues to hardware queues
442 : * @mq_map: CPU ID to hardware queue index map. This is an array
443 : * with nr_cpu_ids elements. Each element has a value in the range
444 : * [@queue_offset, @queue_offset + @nr_queues).
445 : * @nr_queues: Number of hardware queues to map CPU IDs onto.
446 : * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
447 : * driver to map each hardware queue type (enum hctx_type) onto a distinct
448 : * set of hardware queues.
449 : */
450 : struct blk_mq_queue_map {
451 : unsigned int *mq_map;
452 : unsigned int nr_queues;
453 : unsigned int queue_offset;
454 : };
455 :
456 : /**
457 : * enum hctx_type - Type of hardware queue
458 : * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for.
459 : * @HCTX_TYPE_READ: Just for READ I/O.
460 : * @HCTX_TYPE_POLL: Polled I/O of any kind.
461 : * @HCTX_MAX_TYPES: Number of types of hctx.
462 : */
463 : enum hctx_type {
464 : HCTX_TYPE_DEFAULT,
465 : HCTX_TYPE_READ,
466 : HCTX_TYPE_POLL,
467 :
468 : HCTX_MAX_TYPES,
469 : };
470 :
471 : /**
472 : * struct blk_mq_tag_set - tag set that can be shared between request queues
473 : * @ops: Pointers to functions that implement block driver behavior.
474 : * @map: One or more ctx -> hctx mappings. One map exists for each
475 : * hardware queue type (enum hctx_type) that the driver wishes
476 : * to support. There are no restrictions on maps being of the
477 : * same size, and it's perfectly legal to share maps between
478 : * types.
479 : * @nr_maps: Number of elements in the @map array. A number in the range
480 : * [1, HCTX_MAX_TYPES].
481 : * @nr_hw_queues: Number of hardware queues supported by the block driver that
482 : * owns this data structure.
483 : * @queue_depth: Number of tags per hardware queue, reserved tags included.
484 : * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
485 : * allocations.
486 : * @cmd_size: Number of additional bytes to allocate per request. The block
487 : * driver owns these additional bytes.
488 : * @numa_node: NUMA node the storage adapter has been connected to.
489 : * @timeout: Request processing timeout in jiffies.
490 : * @flags: Zero or more BLK_MQ_F_* flags.
491 : * @driver_data: Pointer to data owned by the block driver that created this
492 : * tag set.
493 : * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
494 : * elements.
495 : * @shared_tags:
496 : * Shared set of tags. Has @nr_hw_queues elements. If set,
497 : * shared by all @tags.
498 : * @tag_list_lock: Serializes tag_list accesses.
499 : * @tag_list: List of the request queues that use this tag set. See also
500 : * request_queue.tag_set_list.
501 : * @srcu: Use as lock when type of the request queue is blocking
502 : * (BLK_MQ_F_BLOCKING).
503 : */
504 : struct blk_mq_tag_set {
505 : const struct blk_mq_ops *ops;
506 : struct blk_mq_queue_map map[HCTX_MAX_TYPES];
507 : unsigned int nr_maps;
508 : unsigned int nr_hw_queues;
509 : unsigned int queue_depth;
510 : unsigned int reserved_tags;
511 : unsigned int cmd_size;
512 : int numa_node;
513 : unsigned int timeout;
514 : unsigned int flags;
515 : void *driver_data;
516 :
517 : struct blk_mq_tags **tags;
518 :
519 : struct blk_mq_tags *shared_tags;
520 :
521 : struct mutex tag_list_lock;
522 : struct list_head tag_list;
523 : struct srcu_struct *srcu;
524 : };
525 :
526 : /**
527 : * struct blk_mq_queue_data - Data about a request inserted in a queue
528 : *
529 : * @rq: Request pointer.
530 : * @last: If it is the last request in the queue.
531 : */
532 : struct blk_mq_queue_data {
533 : struct request *rq;
534 : bool last;
535 : };
536 :
537 : typedef bool (busy_tag_iter_fn)(struct request *, void *);
538 :
539 : /**
540 : * struct blk_mq_ops - Callback functions that implements block driver
541 : * behaviour.
542 : */
543 : struct blk_mq_ops {
544 : /**
545 : * @queue_rq: Queue a new request from block IO.
546 : */
547 : blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
548 : const struct blk_mq_queue_data *);
549 :
550 : /**
551 : * @commit_rqs: If a driver uses bd->last to judge when to submit
552 : * requests to hardware, it must define this function. In case of errors
553 : * that make us stop issuing further requests, this hook serves the
554 : * purpose of kicking the hardware (which the last request otherwise
555 : * would have done).
556 : */
557 : void (*commit_rqs)(struct blk_mq_hw_ctx *);
558 :
559 : /**
560 : * @queue_rqs: Queue a list of new requests. Driver is guaranteed
561 : * that each request belongs to the same queue. If the driver doesn't
562 : * empty the @rqlist completely, then the rest will be queued
563 : * individually by the block layer upon return.
564 : */
565 : void (*queue_rqs)(struct request **rqlist);
566 :
567 : /**
568 : * @get_budget: Reserve budget before queue request, once .queue_rq is
569 : * run, it is driver's responsibility to release the
570 : * reserved budget. Also we have to handle failure case
571 : * of .get_budget for avoiding I/O deadlock.
572 : */
573 : int (*get_budget)(struct request_queue *);
574 :
575 : /**
576 : * @put_budget: Release the reserved budget.
577 : */
578 : void (*put_budget)(struct request_queue *, int);
579 :
580 : /**
581 : * @set_rq_budget_token: store rq's budget token
582 : */
583 : void (*set_rq_budget_token)(struct request *, int);
584 : /**
585 : * @get_rq_budget_token: retrieve rq's budget token
586 : */
587 : int (*get_rq_budget_token)(struct request *);
588 :
589 : /**
590 : * @timeout: Called on request timeout.
591 : */
592 : enum blk_eh_timer_return (*timeout)(struct request *);
593 :
594 : /**
595 : * @poll: Called to poll for completion of a specific tag.
596 : */
597 : int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *);
598 :
599 : /**
600 : * @complete: Mark the request as complete.
601 : */
602 : void (*complete)(struct request *);
603 :
604 : /**
605 : * @init_hctx: Called when the block layer side of a hardware queue has
606 : * been set up, allowing the driver to allocate/init matching
607 : * structures.
608 : */
609 : int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
610 : /**
611 : * @exit_hctx: Ditto for exit/teardown.
612 : */
613 : void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
614 :
615 : /**
616 : * @init_request: Called for every command allocated by the block layer
617 : * to allow the driver to set up driver specific data.
618 : *
619 : * Tag greater than or equal to queue_depth is for setting up
620 : * flush request.
621 : */
622 : int (*init_request)(struct blk_mq_tag_set *set, struct request *,
623 : unsigned int, unsigned int);
624 : /**
625 : * @exit_request: Ditto for exit/teardown.
626 : */
627 : void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
628 : unsigned int);
629 :
630 : /**
631 : * @cleanup_rq: Called before freeing one request which isn't completed
632 : * yet, and usually for freeing the driver private data.
633 : */
634 : void (*cleanup_rq)(struct request *);
635 :
636 : /**
637 : * @busy: If set, returns whether or not this queue currently is busy.
638 : */
639 : bool (*busy)(struct request_queue *);
640 :
641 : /**
642 : * @map_queues: This allows drivers specify their own queue mapping by
643 : * overriding the setup-time function that builds the mq_map.
644 : */
645 : void (*map_queues)(struct blk_mq_tag_set *set);
646 :
647 : #ifdef CONFIG_BLK_DEBUG_FS
648 : /**
649 : * @show_rq: Used by the debugfs implementation to show driver-specific
650 : * information about a request.
651 : */
652 : void (*show_rq)(struct seq_file *m, struct request *rq);
653 : #endif
654 : };
655 :
656 : enum {
657 : BLK_MQ_F_SHOULD_MERGE = 1 << 0,
658 : BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
659 : /*
660 : * Set when this device requires underlying blk-mq device for
661 : * completing IO:
662 : */
663 : BLK_MQ_F_STACKING = 1 << 2,
664 : BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
665 : BLK_MQ_F_BLOCKING = 1 << 5,
666 : /* Do not allow an I/O scheduler to be configured. */
667 : BLK_MQ_F_NO_SCHED = 1 << 6,
668 : /*
669 : * Select 'none' during queue registration in case of a single hwq
670 : * or shared hwqs instead of 'mq-deadline'.
671 : */
672 : BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7,
673 : BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
674 : BLK_MQ_F_ALLOC_POLICY_BITS = 1,
675 :
676 : BLK_MQ_S_STOPPED = 0,
677 : BLK_MQ_S_TAG_ACTIVE = 1,
678 : BLK_MQ_S_SCHED_RESTART = 2,
679 :
680 : /* hw queue is inactive after all its CPUs become offline */
681 : BLK_MQ_S_INACTIVE = 3,
682 :
683 : BLK_MQ_MAX_DEPTH = 10240,
684 :
685 : BLK_MQ_CPU_WORK_BATCH = 8,
686 : };
687 : #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
688 : ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
689 : ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
690 : #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
691 : ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
692 : << BLK_MQ_F_ALLOC_POLICY_START_BIT)
693 :
694 : #define BLK_MQ_NO_HCTX_IDX (-1U)
695 :
696 : struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
697 : struct lock_class_key *lkclass);
698 : #define blk_mq_alloc_disk(set, queuedata) \
699 : ({ \
700 : static struct lock_class_key __key; \
701 : \
702 : __blk_mq_alloc_disk(set, queuedata, &__key); \
703 : })
704 : struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
705 : struct lock_class_key *lkclass);
706 : struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
707 : int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
708 : struct request_queue *q);
709 : void blk_mq_destroy_queue(struct request_queue *);
710 :
711 : int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
712 : int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
713 : const struct blk_mq_ops *ops, unsigned int queue_depth,
714 : unsigned int set_flags);
715 : void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
716 :
717 : void blk_mq_free_request(struct request *rq);
718 : int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
719 : unsigned int poll_flags);
720 :
721 : bool blk_mq_queue_inflight(struct request_queue *q);
722 :
723 : enum {
724 : /* return when out of requests */
725 : BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
726 : /* allocate from reserved pool */
727 : BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
728 : /* set RQF_PM */
729 : BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
730 : };
731 :
732 : struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
733 : blk_mq_req_flags_t flags);
734 : struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
735 : blk_opf_t opf, blk_mq_req_flags_t flags,
736 : unsigned int hctx_idx);
737 :
738 : /*
739 : * Tag address space map.
740 : */
741 : struct blk_mq_tags {
742 : unsigned int nr_tags;
743 : unsigned int nr_reserved_tags;
744 : unsigned int active_queues;
745 :
746 : struct sbitmap_queue bitmap_tags;
747 : struct sbitmap_queue breserved_tags;
748 :
749 : struct request **rqs;
750 : struct request **static_rqs;
751 : struct list_head page_list;
752 :
753 : /*
754 : * used to clear request reference in rqs[] before freeing one
755 : * request pool
756 : */
757 : spinlock_t lock;
758 : };
759 :
760 : static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags,
761 : unsigned int tag)
762 : {
763 : if (tag < tags->nr_tags) {
764 : prefetch(tags->rqs[tag]);
765 : return tags->rqs[tag];
766 : }
767 :
768 : return NULL;
769 : }
770 :
771 : enum {
772 : BLK_MQ_UNIQUE_TAG_BITS = 16,
773 : BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
774 : };
775 :
776 : u32 blk_mq_unique_tag(struct request *rq);
777 :
778 : static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
779 : {
780 : return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
781 : }
782 :
783 : static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
784 : {
785 : return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
786 : }
787 :
788 : /**
789 : * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
790 : * @rq: target request.
791 : */
792 : static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
793 : {
794 0 : return READ_ONCE(rq->state);
795 : }
796 :
797 : static inline int blk_mq_request_started(struct request *rq)
798 : {
799 0 : return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
800 : }
801 :
802 : static inline int blk_mq_request_completed(struct request *rq)
803 : {
804 0 : return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
805 : }
806 :
807 : /*
808 : *
809 : * Set the state to complete when completing a request from inside ->queue_rq.
810 : * This is used by drivers that want to ensure special complete actions that
811 : * need access to the request are called on failure, e.g. by nvme for
812 : * multipathing.
813 : */
814 : static inline void blk_mq_set_request_complete(struct request *rq)
815 : {
816 : WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
817 : }
818 :
819 : /*
820 : * Complete the request directly instead of deferring it to softirq or
821 : * completing it another CPU. Useful in preemptible instead of an interrupt.
822 : */
823 : static inline void blk_mq_complete_request_direct(struct request *rq,
824 : void (*complete)(struct request *rq))
825 : {
826 : WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
827 : complete(rq);
828 : }
829 :
830 : void blk_mq_start_request(struct request *rq);
831 : void blk_mq_end_request(struct request *rq, blk_status_t error);
832 : void __blk_mq_end_request(struct request *rq, blk_status_t error);
833 : void blk_mq_end_request_batch(struct io_comp_batch *ib);
834 :
835 : /*
836 : * Only need start/end time stamping if we have iostat or
837 : * blk stats enabled, or using an IO scheduler.
838 : */
839 : static inline bool blk_mq_need_time_stamp(struct request *rq)
840 : {
841 0 : return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED));
842 : }
843 :
844 : static inline bool blk_mq_is_reserved_rq(struct request *rq)
845 : {
846 : return rq->rq_flags & RQF_RESV;
847 : }
848 :
849 : /*
850 : * Batched completions only work when there is no I/O error and no special
851 : * ->end_io handler.
852 : */
853 : static inline bool blk_mq_add_to_batch(struct request *req,
854 : struct io_comp_batch *iob, int ioerror,
855 : void (*complete)(struct io_comp_batch *))
856 : {
857 : /*
858 : * blk_mq_end_request_batch() can't end request allocated from
859 : * sched tags
860 : */
861 : if (!iob || (req->rq_flags & RQF_SCHED_TAGS) || ioerror ||
862 : (req->end_io && !blk_rq_is_passthrough(req)))
863 : return false;
864 :
865 : if (!iob->complete)
866 : iob->complete = complete;
867 : else if (iob->complete != complete)
868 : return false;
869 : iob->need_ts |= blk_mq_need_time_stamp(req);
870 : rq_list_add(&iob->req_list, req);
871 : return true;
872 : }
873 :
874 : void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
875 : void blk_mq_kick_requeue_list(struct request_queue *q);
876 : void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
877 : void blk_mq_complete_request(struct request *rq);
878 : bool blk_mq_complete_request_remote(struct request *rq);
879 : void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
880 : void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
881 : void blk_mq_stop_hw_queues(struct request_queue *q);
882 : void blk_mq_start_hw_queues(struct request_queue *q);
883 : void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
884 : void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
885 : void blk_mq_quiesce_queue(struct request_queue *q);
886 : void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set);
887 : void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set);
888 : void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set);
889 : void blk_mq_unquiesce_queue(struct request_queue *q);
890 : void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
891 : void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
892 : void blk_mq_run_hw_queues(struct request_queue *q, bool async);
893 : void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
894 : void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
895 : busy_tag_iter_fn *fn, void *priv);
896 : void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
897 : void blk_mq_freeze_queue(struct request_queue *q);
898 : void blk_mq_unfreeze_queue(struct request_queue *q);
899 : void blk_freeze_queue_start(struct request_queue *q);
900 : void blk_mq_freeze_queue_wait(struct request_queue *q);
901 : int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
902 : unsigned long timeout);
903 :
904 : void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
905 : void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
906 :
907 : void blk_mq_quiesce_queue_nowait(struct request_queue *q);
908 :
909 : unsigned int blk_mq_rq_cpu(struct request *rq);
910 :
911 : bool __blk_should_fake_timeout(struct request_queue *q);
912 : static inline bool blk_should_fake_timeout(struct request_queue *q)
913 : {
914 : if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
915 : test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
916 : return __blk_should_fake_timeout(q);
917 : return false;
918 : }
919 :
920 : /**
921 : * blk_mq_rq_from_pdu - cast a PDU to a request
922 : * @pdu: the PDU (Protocol Data Unit) to be casted
923 : *
924 : * Return: request
925 : *
926 : * Driver command data is immediately after the request. So subtract request
927 : * size to get back to the original request.
928 : */
929 : static inline struct request *blk_mq_rq_from_pdu(void *pdu)
930 : {
931 : return pdu - sizeof(struct request);
932 : }
933 :
934 : /**
935 : * blk_mq_rq_to_pdu - cast a request to a PDU
936 : * @rq: the request to be casted
937 : *
938 : * Return: pointer to the PDU
939 : *
940 : * Driver command data is immediately after the request. So add request to get
941 : * the PDU.
942 : */
943 : static inline void *blk_mq_rq_to_pdu(struct request *rq)
944 : {
945 : return rq + 1;
946 : }
947 :
948 : #define queue_for_each_hw_ctx(q, hctx, i) \
949 : xa_for_each(&(q)->hctx_table, (i), (hctx))
950 :
951 : #define hctx_for_each_ctx(hctx, ctx, i) \
952 : for ((i) = 0; (i) < (hctx)->nr_ctx && \
953 : ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
954 :
955 : static inline void blk_mq_cleanup_rq(struct request *rq)
956 : {
957 : if (rq->q->mq_ops->cleanup_rq)
958 : rq->q->mq_ops->cleanup_rq(rq);
959 : }
960 :
961 : static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
962 : unsigned int nr_segs)
963 : {
964 0 : rq->nr_phys_segments = nr_segs;
965 0 : rq->__data_len = bio->bi_iter.bi_size;
966 0 : rq->bio = rq->biotail = bio;
967 0 : rq->ioprio = bio_prio(bio);
968 : }
969 :
970 : void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
971 : struct lock_class_key *key);
972 :
973 : static inline bool rq_is_sync(struct request *rq)
974 : {
975 : return op_is_sync(rq->cmd_flags);
976 : }
977 :
978 : void blk_rq_init(struct request_queue *q, struct request *rq);
979 : int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
980 : struct bio_set *bs, gfp_t gfp_mask,
981 : int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
982 : void blk_rq_unprep_clone(struct request *rq);
983 : blk_status_t blk_insert_cloned_request(struct request *rq);
984 :
985 : struct rq_map_data {
986 : struct page **pages;
987 : unsigned long offset;
988 : unsigned short page_order;
989 : unsigned short nr_entries;
990 : bool null_mapped;
991 : bool from_user;
992 : };
993 :
994 : int blk_rq_map_user(struct request_queue *, struct request *,
995 : struct rq_map_data *, void __user *, unsigned long, gfp_t);
996 : int blk_rq_map_user_io(struct request *, struct rq_map_data *,
997 : void __user *, unsigned long, gfp_t, bool, int, bool, int);
998 : int blk_rq_map_user_iov(struct request_queue *, struct request *,
999 : struct rq_map_data *, const struct iov_iter *, gfp_t);
1000 : int blk_rq_unmap_user(struct bio *);
1001 : int blk_rq_map_kern(struct request_queue *, struct request *, void *,
1002 : unsigned int, gfp_t);
1003 : int blk_rq_append_bio(struct request *rq, struct bio *bio);
1004 : void blk_execute_rq_nowait(struct request *rq, bool at_head);
1005 : blk_status_t blk_execute_rq(struct request *rq, bool at_head);
1006 : bool blk_rq_is_poll(struct request *rq);
1007 :
1008 : struct req_iterator {
1009 : struct bvec_iter iter;
1010 : struct bio *bio;
1011 : };
1012 :
1013 : #define __rq_for_each_bio(_bio, rq) \
1014 : if ((rq->bio)) \
1015 : for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
1016 :
1017 : #define rq_for_each_segment(bvl, _rq, _iter) \
1018 : __rq_for_each_bio(_iter.bio, _rq) \
1019 : bio_for_each_segment(bvl, _iter.bio, _iter.iter)
1020 :
1021 : #define rq_for_each_bvec(bvl, _rq, _iter) \
1022 : __rq_for_each_bio(_iter.bio, _rq) \
1023 : bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
1024 :
1025 : #define rq_iter_last(bvec, _iter) \
1026 : (_iter.bio->bi_next == NULL && \
1027 : bio_iter_last(bvec, _iter.iter))
1028 :
1029 : /*
1030 : * blk_rq_pos() : the current sector
1031 : * blk_rq_bytes() : bytes left in the entire request
1032 : * blk_rq_cur_bytes() : bytes left in the current segment
1033 : * blk_rq_sectors() : sectors left in the entire request
1034 : * blk_rq_cur_sectors() : sectors left in the current segment
1035 : * blk_rq_stats_sectors() : sectors of the entire request used for stats
1036 : */
1037 : static inline sector_t blk_rq_pos(const struct request *rq)
1038 : {
1039 : return rq->__sector;
1040 : }
1041 :
1042 : static inline unsigned int blk_rq_bytes(const struct request *rq)
1043 : {
1044 : return rq->__data_len;
1045 : }
1046 :
1047 0 : static inline int blk_rq_cur_bytes(const struct request *rq)
1048 : {
1049 0 : if (!rq->bio)
1050 : return 0;
1051 0 : if (!bio_has_data(rq->bio)) /* dataless requests such as discard */
1052 0 : return rq->bio->bi_iter.bi_size;
1053 0 : return bio_iovec(rq->bio).bv_len;
1054 : }
1055 :
1056 : static inline unsigned int blk_rq_sectors(const struct request *rq)
1057 : {
1058 0 : return blk_rq_bytes(rq) >> SECTOR_SHIFT;
1059 : }
1060 :
1061 : static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1062 : {
1063 0 : return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
1064 : }
1065 :
1066 : static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
1067 : {
1068 : return rq->stats_sectors;
1069 : }
1070 :
1071 : /*
1072 : * Some commands like WRITE SAME have a payload or data transfer size which
1073 : * is different from the size of the request. Any driver that supports such
1074 : * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
1075 : * calculate the data transfer size.
1076 : */
1077 : static inline unsigned int blk_rq_payload_bytes(struct request *rq)
1078 : {
1079 : if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1080 : return rq->special_vec.bv_len;
1081 : return blk_rq_bytes(rq);
1082 : }
1083 :
1084 : /*
1085 : * Return the first full biovec in the request. The caller needs to check that
1086 : * there are any bvecs before calling this helper.
1087 : */
1088 : static inline struct bio_vec req_bvec(struct request *rq)
1089 : {
1090 : if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1091 : return rq->special_vec;
1092 : return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
1093 : }
1094 :
1095 : static inline unsigned int blk_rq_count_bios(struct request *rq)
1096 : {
1097 : unsigned int nr_bios = 0;
1098 : struct bio *bio;
1099 :
1100 : __rq_for_each_bio(bio, rq)
1101 : nr_bios++;
1102 :
1103 : return nr_bios;
1104 : }
1105 :
1106 : void blk_steal_bios(struct bio_list *list, struct request *rq);
1107 :
1108 : /*
1109 : * Request completion related functions.
1110 : *
1111 : * blk_update_request() completes given number of bytes and updates
1112 : * the request without completing it.
1113 : */
1114 : bool blk_update_request(struct request *rq, blk_status_t error,
1115 : unsigned int nr_bytes);
1116 : void blk_abort_request(struct request *);
1117 :
1118 : /*
1119 : * Number of physical segments as sent to the device.
1120 : *
1121 : * Normally this is the number of discontiguous data segments sent by the
1122 : * submitter. But for data-less command like discard we might have no
1123 : * actual data segments submitted, but the driver might have to add it's
1124 : * own special payload. In that case we still return 1 here so that this
1125 : * special payload will be mapped.
1126 : */
1127 : static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1128 : {
1129 0 : if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1130 : return 1;
1131 0 : return rq->nr_phys_segments;
1132 : }
1133 :
1134 : /*
1135 : * Number of discard segments (or ranges) the driver needs to fill in.
1136 : * Each discard bio merged into a request is counted as one segment.
1137 : */
1138 : static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
1139 : {
1140 0 : return max_t(unsigned short, rq->nr_phys_segments, 1);
1141 : }
1142 :
1143 : int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
1144 : struct scatterlist *sglist, struct scatterlist **last_sg);
1145 : static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1146 : struct scatterlist *sglist)
1147 : {
1148 : struct scatterlist *last_sg = NULL;
1149 :
1150 : return __blk_rq_map_sg(q, rq, sglist, &last_sg);
1151 : }
1152 : void blk_dump_rq_flags(struct request *, char *);
1153 :
1154 : #ifdef CONFIG_BLK_DEV_ZONED
1155 : static inline unsigned int blk_rq_zone_no(struct request *rq)
1156 : {
1157 : return disk_zone_no(rq->q->disk, blk_rq_pos(rq));
1158 : }
1159 :
1160 : static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
1161 : {
1162 : return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq));
1163 : }
1164 :
1165 : /**
1166 : * blk_rq_is_seq_zoned_write() - Check if @rq requires write serialization.
1167 : * @rq: Request to examine.
1168 : *
1169 : * Note: REQ_OP_ZONE_APPEND requests do not require serialization.
1170 : */
1171 : static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
1172 : {
1173 : return op_needs_zoned_write_locking(req_op(rq)) &&
1174 : blk_rq_zone_is_seq(rq);
1175 : }
1176 :
1177 : bool blk_req_needs_zone_write_lock(struct request *rq);
1178 : bool blk_req_zone_write_trylock(struct request *rq);
1179 : void __blk_req_zone_write_lock(struct request *rq);
1180 : void __blk_req_zone_write_unlock(struct request *rq);
1181 :
1182 : static inline void blk_req_zone_write_lock(struct request *rq)
1183 : {
1184 : if (blk_req_needs_zone_write_lock(rq))
1185 : __blk_req_zone_write_lock(rq);
1186 : }
1187 :
1188 : static inline void blk_req_zone_write_unlock(struct request *rq)
1189 : {
1190 : if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
1191 : __blk_req_zone_write_unlock(rq);
1192 : }
1193 :
1194 : static inline bool blk_req_zone_is_write_locked(struct request *rq)
1195 : {
1196 : return rq->q->disk->seq_zones_wlock &&
1197 : test_bit(blk_rq_zone_no(rq), rq->q->disk->seq_zones_wlock);
1198 : }
1199 :
1200 : static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1201 : {
1202 : if (!blk_req_needs_zone_write_lock(rq))
1203 : return true;
1204 : return !blk_req_zone_is_write_locked(rq);
1205 : }
1206 : #else /* CONFIG_BLK_DEV_ZONED */
1207 : static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
1208 : {
1209 : return false;
1210 : }
1211 :
1212 : static inline bool blk_req_needs_zone_write_lock(struct request *rq)
1213 : {
1214 : return false;
1215 : }
1216 :
1217 : static inline void blk_req_zone_write_lock(struct request *rq)
1218 : {
1219 : }
1220 :
1221 : static inline void blk_req_zone_write_unlock(struct request *rq)
1222 : {
1223 : }
1224 : static inline bool blk_req_zone_is_write_locked(struct request *rq)
1225 : {
1226 : return false;
1227 : }
1228 :
1229 : static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1230 : {
1231 : return true;
1232 : }
1233 : #endif /* CONFIG_BLK_DEV_ZONED */
1234 :
1235 : #endif /* BLK_MQ_H */
|