Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : /*
3 : * Block data types and constants. Directly include this file only to
4 : * break include dependency loop.
5 : */
6 : #ifndef __LINUX_BLK_TYPES_H
7 : #define __LINUX_BLK_TYPES_H
8 :
9 : #include <linux/types.h>
10 : #include <linux/bvec.h>
11 : #include <linux/device.h>
12 : #include <linux/ktime.h>
13 :
14 : struct bio_set;
15 : struct bio;
16 : struct bio_integrity_payload;
17 : struct page;
18 : struct io_context;
19 : struct cgroup_subsys_state;
20 : typedef void (bio_end_io_t) (struct bio *);
21 : struct bio_crypt_ctx;
22 :
23 : /*
24 : * The basic unit of block I/O is a sector. It is used in a number of contexts
25 : * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
26 : * bytes. Variables of type sector_t represent an offset or size that is a
27 : * multiple of 512 bytes. Hence these two constants.
28 : */
29 : #ifndef SECTOR_SHIFT
30 : #define SECTOR_SHIFT 9
31 : #endif
32 : #ifndef SECTOR_SIZE
33 : #define SECTOR_SIZE (1 << SECTOR_SHIFT)
34 : #endif
35 :
36 : #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
37 : #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
38 : #define SECTOR_MASK (PAGE_SECTORS - 1)
39 :
40 : struct block_device {
41 : sector_t bd_start_sect;
42 : sector_t bd_nr_sectors;
43 : struct gendisk * bd_disk;
44 : struct request_queue * bd_queue;
45 : struct disk_stats __percpu *bd_stats;
46 : unsigned long bd_stamp;
47 : bool bd_read_only; /* read-only policy */
48 : u8 bd_partno;
49 : bool bd_write_holder;
50 : bool bd_has_submit_bio;
51 : dev_t bd_dev;
52 : atomic_t bd_openers;
53 : spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
54 : struct inode * bd_inode; /* will die */
55 : struct super_block * bd_super;
56 : void * bd_claiming;
57 : void * bd_holder;
58 : const struct blk_holder_ops *bd_holder_ops;
59 : struct mutex bd_holder_lock;
60 : /* The counter of freeze processes */
61 : int bd_fsfreeze_count;
62 : int bd_holders;
63 : struct kobject *bd_holder_dir;
64 :
65 : /* Mutex for freeze */
66 : struct mutex bd_fsfreeze_mutex;
67 : struct super_block *bd_fsfreeze_sb;
68 :
69 : struct partition_meta_info *bd_meta_info;
70 : #ifdef CONFIG_FAIL_MAKE_REQUEST
71 : bool bd_make_it_fail;
72 : #endif
73 : /*
74 : * keep this out-of-line as it's both big and not needed in the fast
75 : * path
76 : */
77 : struct device bd_device;
78 : } __randomize_layout;
79 :
80 : #define bdev_whole(_bdev) \
81 : ((_bdev)->bd_disk->part0)
82 :
83 : #define dev_to_bdev(device) \
84 : container_of((device), struct block_device, bd_device)
85 :
86 : #define bdev_kobj(_bdev) \
87 : (&((_bdev)->bd_device.kobj))
88 :
89 : /*
90 : * Block error status values. See block/blk-core:blk_errors for the details.
91 : * Alpha cannot write a byte atomically, so we need to use 32-bit value.
92 : */
93 : #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
94 : typedef u32 __bitwise blk_status_t;
95 : typedef u32 blk_short_t;
96 : #else
97 : typedef u8 __bitwise blk_status_t;
98 : typedef u16 blk_short_t;
99 : #endif
100 : #define BLK_STS_OK 0
101 : #define BLK_STS_NOTSUPP ((__force blk_status_t)1)
102 : #define BLK_STS_TIMEOUT ((__force blk_status_t)2)
103 : #define BLK_STS_NOSPC ((__force blk_status_t)3)
104 : #define BLK_STS_TRANSPORT ((__force blk_status_t)4)
105 : #define BLK_STS_TARGET ((__force blk_status_t)5)
106 : #define BLK_STS_RESV_CONFLICT ((__force blk_status_t)6)
107 : #define BLK_STS_MEDIUM ((__force blk_status_t)7)
108 : #define BLK_STS_PROTECTION ((__force blk_status_t)8)
109 : #define BLK_STS_RESOURCE ((__force blk_status_t)9)
110 : #define BLK_STS_IOERR ((__force blk_status_t)10)
111 :
112 : /* hack for device mapper, don't use elsewhere: */
113 : #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11)
114 :
115 : /*
116 : * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set
117 : * and the bio would block (cf bio_wouldblock_error())
118 : */
119 : #define BLK_STS_AGAIN ((__force blk_status_t)12)
120 :
121 : /*
122 : * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
123 : * device related resources are unavailable, but the driver can guarantee
124 : * that the queue will be rerun in the future once resources become
125 : * available again. This is typically the case for device specific
126 : * resources that are consumed for IO. If the driver fails allocating these
127 : * resources, we know that inflight (or pending) IO will free these
128 : * resource upon completion.
129 : *
130 : * This is different from BLK_STS_RESOURCE in that it explicitly references
131 : * a device specific resource. For resources of wider scope, allocation
132 : * failure can happen without having pending IO. This means that we can't
133 : * rely on request completions freeing these resources, as IO may not be in
134 : * flight. Examples of that are kernel memory allocations, DMA mappings, or
135 : * any other system wide resources.
136 : */
137 : #define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
138 :
139 : /*
140 : * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone
141 : * related resources are unavailable, but the driver can guarantee the queue
142 : * will be rerun in the future once the resources become available again.
143 : *
144 : * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references
145 : * a zone specific resource and IO to a different zone on the same device could
146 : * still be served. Examples of that are zones that are write-locked, but a read
147 : * to the same zone could be served.
148 : */
149 : #define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14)
150 :
151 : /*
152 : * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
153 : * path if the device returns a status indicating that too many zone resources
154 : * are currently open. The same command should be successful if resubmitted
155 : * after the number of open zones decreases below the device's limits, which is
156 : * reported in the request_queue's max_open_zones.
157 : */
158 : #define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)15)
159 :
160 : /*
161 : * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
162 : * path if the device returns a status indicating that too many zone resources
163 : * are currently active. The same command should be successful if resubmitted
164 : * after the number of active zones decreases below the device's limits, which
165 : * is reported in the request_queue's max_active_zones.
166 : */
167 : #define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)16)
168 :
169 : /*
170 : * BLK_STS_OFFLINE is returned from the driver when the target device is offline
171 : * or is being taken offline. This could help differentiate the case where a
172 : * device is intentionally being shut down from a real I/O error.
173 : */
174 : #define BLK_STS_OFFLINE ((__force blk_status_t)17)
175 :
176 : /*
177 : * BLK_STS_DURATION_LIMIT is returned from the driver when the target device
178 : * aborted the command because it exceeded one of its Command Duration Limits.
179 : */
180 : #define BLK_STS_DURATION_LIMIT ((__force blk_status_t)18)
181 :
182 : /**
183 : * blk_path_error - returns true if error may be path related
184 : * @error: status the request was completed with
185 : *
186 : * Description:
187 : * This classifies block error status into non-retryable errors and ones
188 : * that may be successful if retried on a failover path.
189 : *
190 : * Return:
191 : * %false - retrying failover path will not help
192 : * %true - may succeed if retried
193 : */
194 : static inline bool blk_path_error(blk_status_t error)
195 : {
196 : switch (error) {
197 : case BLK_STS_NOTSUPP:
198 : case BLK_STS_NOSPC:
199 : case BLK_STS_TARGET:
200 : case BLK_STS_RESV_CONFLICT:
201 : case BLK_STS_MEDIUM:
202 : case BLK_STS_PROTECTION:
203 : return false;
204 : }
205 :
206 : /* Anything else could be a path failure, so should be retried */
207 : return true;
208 : }
209 :
210 : /*
211 : * From most significant bit:
212 : * 1 bit: reserved for other usage, see below
213 : * 12 bits: original size of bio
214 : * 51 bits: issue time of bio
215 : */
216 : #define BIO_ISSUE_RES_BITS 1
217 : #define BIO_ISSUE_SIZE_BITS 12
218 : #define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS)
219 : #define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
220 : #define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
221 : #define BIO_ISSUE_SIZE_MASK \
222 : (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
223 : #define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
224 :
225 : /* Reserved bit for blk-throtl */
226 : #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
227 :
228 : struct bio_issue {
229 : u64 value;
230 : };
231 :
232 : static inline u64 __bio_issue_time(u64 time)
233 : {
234 : return time & BIO_ISSUE_TIME_MASK;
235 : }
236 :
237 : static inline u64 bio_issue_time(struct bio_issue *issue)
238 : {
239 : return __bio_issue_time(issue->value);
240 : }
241 :
242 : static inline sector_t bio_issue_size(struct bio_issue *issue)
243 : {
244 : return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
245 : }
246 :
247 : static inline void bio_issue_init(struct bio_issue *issue,
248 : sector_t size)
249 : {
250 : size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
251 : issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
252 : (ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
253 : ((u64)size << BIO_ISSUE_SIZE_SHIFT));
254 : }
255 :
256 : typedef __u32 __bitwise blk_opf_t;
257 :
258 : typedef unsigned int blk_qc_t;
259 : #define BLK_QC_T_NONE -1U
260 :
261 : /*
262 : * main unit of I/O for the block layer and lower layers (ie drivers and
263 : * stacking drivers)
264 : */
265 : struct bio {
266 : struct bio *bi_next; /* request queue link */
267 : struct block_device *bi_bdev;
268 : blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits
269 : * req_flags.
270 : */
271 : unsigned short bi_flags; /* BIO_* below */
272 : unsigned short bi_ioprio;
273 : blk_status_t bi_status;
274 : atomic_t __bi_remaining;
275 :
276 : struct bvec_iter bi_iter;
277 :
278 : blk_qc_t bi_cookie;
279 : bio_end_io_t *bi_end_io;
280 : void *bi_private;
281 : #ifdef CONFIG_BLK_CGROUP
282 : /*
283 : * Represents the association of the css and request_queue for the bio.
284 : * If a bio goes direct to device, it will not have a blkg as it will
285 : * not have a request_queue associated with it. The reference is put
286 : * on release of the bio.
287 : */
288 : struct blkcg_gq *bi_blkg;
289 : struct bio_issue bi_issue;
290 : #ifdef CONFIG_BLK_CGROUP_IOCOST
291 : u64 bi_iocost_cost;
292 : #endif
293 : #endif
294 :
295 : #ifdef CONFIG_BLK_INLINE_ENCRYPTION
296 : struct bio_crypt_ctx *bi_crypt_context;
297 : #endif
298 :
299 : union {
300 : #if defined(CONFIG_BLK_DEV_INTEGRITY)
301 : struct bio_integrity_payload *bi_integrity; /* data integrity */
302 : #endif
303 : };
304 :
305 : unsigned short bi_vcnt; /* how many bio_vec's */
306 :
307 : /*
308 : * Everything starting with bi_max_vecs will be preserved by bio_reset()
309 : */
310 :
311 : unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
312 :
313 : atomic_t __bi_cnt; /* pin count */
314 :
315 : struct bio_vec *bi_io_vec; /* the actual vec list */
316 :
317 : struct bio_set *bi_pool;
318 :
319 : /*
320 : * We can inline a number of vecs at the end of the bio, to avoid
321 : * double allocations for a small number of bio_vecs. This member
322 : * MUST obviously be kept at the very end of the bio.
323 : */
324 : struct bio_vec bi_inline_vecs[];
325 : };
326 :
327 : #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
328 : #define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT)
329 :
330 : /*
331 : * bio flags
332 : */
333 : enum {
334 : BIO_PAGE_PINNED, /* Unpin pages in bio_release_pages() */
335 : BIO_CLONED, /* doesn't own data */
336 : BIO_BOUNCED, /* bio is a bounce bio */
337 : BIO_QUIET, /* Make BIO Quiet */
338 : BIO_CHAIN, /* chained bio, ->bi_remaining in effect */
339 : BIO_REFFED, /* bio has elevated ->bi_cnt */
340 : BIO_BPS_THROTTLED, /* This bio has already been subjected to
341 : * throttling rules. Don't do it again. */
342 : BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion
343 : * of this bio. */
344 : BIO_CGROUP_ACCT, /* has been accounted to a cgroup */
345 : BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */
346 : BIO_QOS_MERGED, /* but went through rq_qos merge path */
347 : BIO_REMAPPED,
348 : BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */
349 : BIO_FLAG_LAST
350 : };
351 :
352 : typedef __u32 __bitwise blk_mq_req_flags_t;
353 :
354 : #define REQ_OP_BITS 8
355 : #define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1)
356 : #define REQ_FLAG_BITS 24
357 :
358 : /**
359 : * enum req_op - Operations common to the bio and request structures.
360 : * We use 8 bits for encoding the operation, and the remaining 24 for flags.
361 : *
362 : * The least significant bit of the operation number indicates the data
363 : * transfer direction:
364 : *
365 : * - if the least significant bit is set transfers are TO the device
366 : * - if the least significant bit is not set transfers are FROM the device
367 : *
368 : * If a operation does not transfer data the least significant bit has no
369 : * meaning.
370 : */
371 : enum req_op {
372 : /* read sectors from the device */
373 : REQ_OP_READ = (__force blk_opf_t)0,
374 : /* write sectors to the device */
375 : REQ_OP_WRITE = (__force blk_opf_t)1,
376 : /* flush the volatile write cache */
377 : REQ_OP_FLUSH = (__force blk_opf_t)2,
378 : /* discard sectors */
379 : REQ_OP_DISCARD = (__force blk_opf_t)3,
380 : /* securely erase sectors */
381 : REQ_OP_SECURE_ERASE = (__force blk_opf_t)5,
382 : /* write the zero filled sector many times */
383 : REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9,
384 : /* Open a zone */
385 : REQ_OP_ZONE_OPEN = (__force blk_opf_t)10,
386 : /* Close a zone */
387 : REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11,
388 : /* Transition a zone to full */
389 : REQ_OP_ZONE_FINISH = (__force blk_opf_t)12,
390 : /* write data at the current zone write pointer */
391 : REQ_OP_ZONE_APPEND = (__force blk_opf_t)13,
392 : /* reset a zone write pointer */
393 : REQ_OP_ZONE_RESET = (__force blk_opf_t)15,
394 : /* reset all the zone present on the device */
395 : REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)17,
396 :
397 : /* Driver private requests */
398 : REQ_OP_DRV_IN = (__force blk_opf_t)34,
399 : REQ_OP_DRV_OUT = (__force blk_opf_t)35,
400 :
401 : REQ_OP_LAST = (__force blk_opf_t)36,
402 : };
403 :
404 : enum req_flag_bits {
405 : __REQ_FAILFAST_DEV = /* no driver retries of device errors */
406 : REQ_OP_BITS,
407 : __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
408 : __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
409 : __REQ_SYNC, /* request is sync (sync write or read) */
410 : __REQ_META, /* metadata io request */
411 : __REQ_PRIO, /* boost priority in cfq */
412 : __REQ_NOMERGE, /* don't touch this for merging */
413 : __REQ_IDLE, /* anticipate more IO after this one */
414 : __REQ_INTEGRITY, /* I/O includes block integrity payload */
415 : __REQ_FUA, /* forced unit access */
416 : __REQ_PREFLUSH, /* request for cache flush */
417 : __REQ_RAHEAD, /* read ahead, can fail anytime */
418 : __REQ_BACKGROUND, /* background IO */
419 : __REQ_NOWAIT, /* Don't wait if request will block */
420 : __REQ_POLLED, /* caller polls for completion using bio_poll */
421 : __REQ_ALLOC_CACHE, /* allocate IO from cache if available */
422 : __REQ_SWAP, /* swap I/O */
423 : __REQ_DRV, /* for driver use */
424 : __REQ_FS_PRIVATE, /* for file system (submitter) use */
425 :
426 : /*
427 : * Command specific flags, keep last:
428 : */
429 : /* for REQ_OP_WRITE_ZEROES: */
430 : __REQ_NOUNMAP, /* do not free blocks when zeroing */
431 :
432 : __REQ_NR_BITS, /* stops here */
433 : };
434 :
435 : #define REQ_FAILFAST_DEV \
436 : (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV)
437 : #define REQ_FAILFAST_TRANSPORT \
438 : (__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT)
439 : #define REQ_FAILFAST_DRIVER \
440 : (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER)
441 : #define REQ_SYNC (__force blk_opf_t)(1ULL << __REQ_SYNC)
442 : #define REQ_META (__force blk_opf_t)(1ULL << __REQ_META)
443 : #define REQ_PRIO (__force blk_opf_t)(1ULL << __REQ_PRIO)
444 : #define REQ_NOMERGE (__force blk_opf_t)(1ULL << __REQ_NOMERGE)
445 : #define REQ_IDLE (__force blk_opf_t)(1ULL << __REQ_IDLE)
446 : #define REQ_INTEGRITY (__force blk_opf_t)(1ULL << __REQ_INTEGRITY)
447 : #define REQ_FUA (__force blk_opf_t)(1ULL << __REQ_FUA)
448 : #define REQ_PREFLUSH (__force blk_opf_t)(1ULL << __REQ_PREFLUSH)
449 : #define REQ_RAHEAD (__force blk_opf_t)(1ULL << __REQ_RAHEAD)
450 : #define REQ_BACKGROUND (__force blk_opf_t)(1ULL << __REQ_BACKGROUND)
451 : #define REQ_NOWAIT (__force blk_opf_t)(1ULL << __REQ_NOWAIT)
452 : #define REQ_POLLED (__force blk_opf_t)(1ULL << __REQ_POLLED)
453 : #define REQ_ALLOC_CACHE (__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE)
454 : #define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP)
455 : #define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV)
456 : #define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE)
457 :
458 : #define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
459 :
460 : #define REQ_FAILFAST_MASK \
461 : (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
462 :
463 : #define REQ_NOMERGE_FLAGS \
464 : (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
465 :
466 : enum stat_group {
467 : STAT_READ,
468 : STAT_WRITE,
469 : STAT_DISCARD,
470 : STAT_FLUSH,
471 :
472 : NR_STAT_GROUPS
473 : };
474 :
475 : static inline enum req_op bio_op(const struct bio *bio)
476 : {
477 0 : return bio->bi_opf & REQ_OP_MASK;
478 : }
479 :
480 : static inline bool op_is_write(blk_opf_t op)
481 : {
482 0 : return !!(op & (__force blk_opf_t)1);
483 : }
484 :
485 : /*
486 : * Check if the bio or request is one that needs special treatment in the
487 : * flush state machine.
488 : */
489 : static inline bool op_is_flush(blk_opf_t op)
490 : {
491 0 : return op & (REQ_FUA | REQ_PREFLUSH);
492 : }
493 :
494 : /*
495 : * Reads are always treated as synchronous, as are requests with the FUA or
496 : * PREFLUSH flag. Other operations may be marked as synchronous using the
497 : * REQ_SYNC flag.
498 : */
499 : static inline bool op_is_sync(blk_opf_t op)
500 : {
501 0 : return (op & REQ_OP_MASK) == REQ_OP_READ ||
502 0 : (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
503 : }
504 :
505 : static inline bool op_is_discard(blk_opf_t op)
506 : {
507 0 : return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
508 : }
509 :
510 : /*
511 : * Check if a bio or request operation is a zone management operation, with
512 : * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
513 : * due to its different handling in the block layer and device response in
514 : * case of command failure.
515 : */
516 : static inline bool op_is_zone_mgmt(enum req_op op)
517 : {
518 : switch (op & REQ_OP_MASK) {
519 : case REQ_OP_ZONE_RESET:
520 : case REQ_OP_ZONE_OPEN:
521 : case REQ_OP_ZONE_CLOSE:
522 : case REQ_OP_ZONE_FINISH:
523 : return true;
524 : default:
525 : return false;
526 : }
527 : }
528 :
529 : static inline int op_stat_group(enum req_op op)
530 : {
531 0 : if (op_is_discard(op))
532 : return STAT_DISCARD;
533 0 : return op_is_write(op);
534 : }
535 :
536 : struct blk_rq_stat {
537 : u64 mean;
538 : u64 min;
539 : u64 max;
540 : u32 nr_samples;
541 : u64 batch;
542 : };
543 :
544 : #endif /* __LINUX_BLK_TYPES_H */
|