Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : /*
3 : * Block data types and constants. Directly include this file only to
4 : * break include dependency loop.
5 : */
6 : #ifndef __LINUX_BLK_TYPES_H
7 : #define __LINUX_BLK_TYPES_H
8 :
9 : #include <linux/types.h>
10 : #include <linux/bvec.h>
11 : #include <linux/device.h>
12 : #include <linux/ktime.h>
13 :
14 : struct bio_set;
15 : struct bio;
16 : struct bio_integrity_payload;
17 : struct page;
18 : struct io_context;
19 : struct cgroup_subsys_state;
20 : typedef void (bio_end_io_t) (struct bio *);
21 : struct bio_crypt_ctx;
22 :
23 : /*
24 : * The basic unit of block I/O is a sector. It is used in a number of contexts
25 : * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
26 : * bytes. Variables of type sector_t represent an offset or size that is a
27 : * multiple of 512 bytes. Hence these two constants.
28 : */
29 : #ifndef SECTOR_SHIFT
30 : #define SECTOR_SHIFT 9
31 : #endif
32 : #ifndef SECTOR_SIZE
33 : #define SECTOR_SIZE (1 << SECTOR_SHIFT)
34 : #endif
35 :
36 : #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
37 : #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
38 : #define SECTOR_MASK (PAGE_SECTORS - 1)
39 :
40 : struct block_device {
41 : sector_t bd_start_sect;
42 : sector_t bd_nr_sectors;
43 : struct disk_stats __percpu *bd_stats;
44 : unsigned long bd_stamp;
45 : bool bd_read_only; /* read-only policy */
46 : dev_t bd_dev;
47 : atomic_t bd_openers;
48 : struct inode * bd_inode; /* will die */
49 : struct super_block * bd_super;
50 : void * bd_claiming;
51 : struct device bd_device;
52 : void * bd_holder;
53 : int bd_holders;
54 : bool bd_write_holder;
55 : struct kobject *bd_holder_dir;
56 : u8 bd_partno;
57 : spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
58 : struct gendisk * bd_disk;
59 : struct request_queue * bd_queue;
60 :
61 : /* The counter of freeze processes */
62 : int bd_fsfreeze_count;
63 : /* Mutex for freeze */
64 : struct mutex bd_fsfreeze_mutex;
65 : struct super_block *bd_fsfreeze_sb;
66 :
67 : struct partition_meta_info *bd_meta_info;
68 : #ifdef CONFIG_FAIL_MAKE_REQUEST
69 : bool bd_make_it_fail;
70 : #endif
71 : } __randomize_layout;
72 :
73 : #define bdev_whole(_bdev) \
74 : ((_bdev)->bd_disk->part0)
75 :
76 : #define dev_to_bdev(device) \
77 : container_of((device), struct block_device, bd_device)
78 :
79 : #define bdev_kobj(_bdev) \
80 : (&((_bdev)->bd_device.kobj))
81 :
82 : /*
83 : * Block error status values. See block/blk-core:blk_errors for the details.
84 : * Alpha cannot write a byte atomically, so we need to use 32-bit value.
85 : */
86 : #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
87 : typedef u32 __bitwise blk_status_t;
88 : typedef u32 blk_short_t;
89 : #else
90 : typedef u8 __bitwise blk_status_t;
91 : typedef u16 blk_short_t;
92 : #endif
93 : #define BLK_STS_OK 0
94 : #define BLK_STS_NOTSUPP ((__force blk_status_t)1)
95 : #define BLK_STS_TIMEOUT ((__force blk_status_t)2)
96 : #define BLK_STS_NOSPC ((__force blk_status_t)3)
97 : #define BLK_STS_TRANSPORT ((__force blk_status_t)4)
98 : #define BLK_STS_TARGET ((__force blk_status_t)5)
99 : #define BLK_STS_NEXUS ((__force blk_status_t)6)
100 : #define BLK_STS_MEDIUM ((__force blk_status_t)7)
101 : #define BLK_STS_PROTECTION ((__force blk_status_t)8)
102 : #define BLK_STS_RESOURCE ((__force blk_status_t)9)
103 : #define BLK_STS_IOERR ((__force blk_status_t)10)
104 :
105 : /* hack for device mapper, don't use elsewhere: */
106 : #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11)
107 :
108 : /*
109 : * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set
110 : * and the bio would block (cf bio_wouldblock_error())
111 : */
112 : #define BLK_STS_AGAIN ((__force blk_status_t)12)
113 :
114 : /*
115 : * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
116 : * device related resources are unavailable, but the driver can guarantee
117 : * that the queue will be rerun in the future once resources become
118 : * available again. This is typically the case for device specific
119 : * resources that are consumed for IO. If the driver fails allocating these
120 : * resources, we know that inflight (or pending) IO will free these
121 : * resource upon completion.
122 : *
123 : * This is different from BLK_STS_RESOURCE in that it explicitly references
124 : * a device specific resource. For resources of wider scope, allocation
125 : * failure can happen without having pending IO. This means that we can't
126 : * rely on request completions freeing these resources, as IO may not be in
127 : * flight. Examples of that are kernel memory allocations, DMA mappings, or
128 : * any other system wide resources.
129 : */
130 : #define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
131 :
132 : /*
133 : * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone
134 : * related resources are unavailable, but the driver can guarantee the queue
135 : * will be rerun in the future once the resources become available again.
136 : *
137 : * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references
138 : * a zone specific resource and IO to a different zone on the same device could
139 : * still be served. Examples of that are zones that are write-locked, but a read
140 : * to the same zone could be served.
141 : */
142 : #define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14)
143 :
144 : /*
145 : * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
146 : * path if the device returns a status indicating that too many zone resources
147 : * are currently open. The same command should be successful if resubmitted
148 : * after the number of open zones decreases below the device's limits, which is
149 : * reported in the request_queue's max_open_zones.
150 : */
151 : #define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)15)
152 :
153 : /*
154 : * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
155 : * path if the device returns a status indicating that too many zone resources
156 : * are currently active. The same command should be successful if resubmitted
157 : * after the number of active zones decreases below the device's limits, which
158 : * is reported in the request_queue's max_active_zones.
159 : */
160 : #define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)16)
161 :
162 : /*
163 : * BLK_STS_OFFLINE is returned from the driver when the target device is offline
164 : * or is being taken offline. This could help differentiate the case where a
165 : * device is intentionally being shut down from a real I/O error.
166 : */
167 : #define BLK_STS_OFFLINE ((__force blk_status_t)17)
168 :
169 : /**
170 : * blk_path_error - returns true if error may be path related
171 : * @error: status the request was completed with
172 : *
173 : * Description:
174 : * This classifies block error status into non-retryable errors and ones
175 : * that may be successful if retried on a failover path.
176 : *
177 : * Return:
178 : * %false - retrying failover path will not help
179 : * %true - may succeed if retried
180 : */
181 : static inline bool blk_path_error(blk_status_t error)
182 : {
183 : switch (error) {
184 : case BLK_STS_NOTSUPP:
185 : case BLK_STS_NOSPC:
186 : case BLK_STS_TARGET:
187 : case BLK_STS_NEXUS:
188 : case BLK_STS_MEDIUM:
189 : case BLK_STS_PROTECTION:
190 : return false;
191 : }
192 :
193 : /* Anything else could be a path failure, so should be retried */
194 : return true;
195 : }
196 :
197 : /*
198 : * From most significant bit:
199 : * 1 bit: reserved for other usage, see below
200 : * 12 bits: original size of bio
201 : * 51 bits: issue time of bio
202 : */
203 : #define BIO_ISSUE_RES_BITS 1
204 : #define BIO_ISSUE_SIZE_BITS 12
205 : #define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS)
206 : #define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
207 : #define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
208 : #define BIO_ISSUE_SIZE_MASK \
209 : (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
210 : #define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
211 :
212 : /* Reserved bit for blk-throtl */
213 : #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
214 :
215 : struct bio_issue {
216 : u64 value;
217 : };
218 :
219 : static inline u64 __bio_issue_time(u64 time)
220 : {
221 : return time & BIO_ISSUE_TIME_MASK;
222 : }
223 :
224 : static inline u64 bio_issue_time(struct bio_issue *issue)
225 : {
226 : return __bio_issue_time(issue->value);
227 : }
228 :
229 : static inline sector_t bio_issue_size(struct bio_issue *issue)
230 : {
231 : return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
232 : }
233 :
234 : static inline void bio_issue_init(struct bio_issue *issue,
235 : sector_t size)
236 : {
237 : size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
238 : issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
239 : (ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
240 : ((u64)size << BIO_ISSUE_SIZE_SHIFT));
241 : }
242 :
243 : typedef __u32 __bitwise blk_opf_t;
244 :
245 : typedef unsigned int blk_qc_t;
246 : #define BLK_QC_T_NONE -1U
247 :
248 : /*
249 : * main unit of I/O for the block layer and lower layers (ie drivers and
250 : * stacking drivers)
251 : */
252 : struct bio {
253 : struct bio *bi_next; /* request queue link */
254 : struct block_device *bi_bdev;
255 : blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits
256 : * req_flags.
257 : */
258 : unsigned short bi_flags; /* BIO_* below */
259 : unsigned short bi_ioprio;
260 : blk_status_t bi_status;
261 : atomic_t __bi_remaining;
262 :
263 : struct bvec_iter bi_iter;
264 :
265 : blk_qc_t bi_cookie;
266 : bio_end_io_t *bi_end_io;
267 : void *bi_private;
268 : #ifdef CONFIG_BLK_CGROUP
269 : /*
270 : * Represents the association of the css and request_queue for the bio.
271 : * If a bio goes direct to device, it will not have a blkg as it will
272 : * not have a request_queue associated with it. The reference is put
273 : * on release of the bio.
274 : */
275 : struct blkcg_gq *bi_blkg;
276 : struct bio_issue bi_issue;
277 : #ifdef CONFIG_BLK_CGROUP_IOCOST
278 : u64 bi_iocost_cost;
279 : #endif
280 : #endif
281 :
282 : #ifdef CONFIG_BLK_INLINE_ENCRYPTION
283 : struct bio_crypt_ctx *bi_crypt_context;
284 : #endif
285 :
286 : union {
287 : #if defined(CONFIG_BLK_DEV_INTEGRITY)
288 : struct bio_integrity_payload *bi_integrity; /* data integrity */
289 : #endif
290 : };
291 :
292 : unsigned short bi_vcnt; /* how many bio_vec's */
293 :
294 : /*
295 : * Everything starting with bi_max_vecs will be preserved by bio_reset()
296 : */
297 :
298 : unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
299 :
300 : atomic_t __bi_cnt; /* pin count */
301 :
302 : struct bio_vec *bi_io_vec; /* the actual vec list */
303 :
304 : struct bio_set *bi_pool;
305 :
306 : /*
307 : * We can inline a number of vecs at the end of the bio, to avoid
308 : * double allocations for a small number of bio_vecs. This member
309 : * MUST obviously be kept at the very end of the bio.
310 : */
311 : struct bio_vec bi_inline_vecs[];
312 : };
313 :
314 : #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
315 : #define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT)
316 :
317 : /*
318 : * bio flags
319 : */
320 : enum {
321 : BIO_NO_PAGE_REF, /* don't put release vec pages */
322 : BIO_CLONED, /* doesn't own data */
323 : BIO_BOUNCED, /* bio is a bounce bio */
324 : BIO_QUIET, /* Make BIO Quiet */
325 : BIO_CHAIN, /* chained bio, ->bi_remaining in effect */
326 : BIO_REFFED, /* bio has elevated ->bi_cnt */
327 : BIO_BPS_THROTTLED, /* This bio has already been subjected to
328 : * throttling rules. Don't do it again. */
329 : BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion
330 : * of this bio. */
331 : BIO_CGROUP_ACCT, /* has been accounted to a cgroup */
332 : BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */
333 : BIO_QOS_MERGED, /* but went through rq_qos merge path */
334 : BIO_REMAPPED,
335 : BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */
336 : BIO_FLAG_LAST
337 : };
338 :
339 : typedef __u32 __bitwise blk_mq_req_flags_t;
340 :
341 : #define REQ_OP_BITS 8
342 : #define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1)
343 : #define REQ_FLAG_BITS 24
344 :
345 : /**
346 : * enum req_op - Operations common to the bio and request structures.
347 : * We use 8 bits for encoding the operation, and the remaining 24 for flags.
348 : *
349 : * The least significant bit of the operation number indicates the data
350 : * transfer direction:
351 : *
352 : * - if the least significant bit is set transfers are TO the device
353 : * - if the least significant bit is not set transfers are FROM the device
354 : *
355 : * If a operation does not transfer data the least significant bit has no
356 : * meaning.
357 : */
358 : enum req_op {
359 : /* read sectors from the device */
360 : REQ_OP_READ = (__force blk_opf_t)0,
361 : /* write sectors to the device */
362 : REQ_OP_WRITE = (__force blk_opf_t)1,
363 : /* flush the volatile write cache */
364 : REQ_OP_FLUSH = (__force blk_opf_t)2,
365 : /* discard sectors */
366 : REQ_OP_DISCARD = (__force blk_opf_t)3,
367 : /* securely erase sectors */
368 : REQ_OP_SECURE_ERASE = (__force blk_opf_t)5,
369 : /* write the zero filled sector many times */
370 : REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9,
371 : /* Open a zone */
372 : REQ_OP_ZONE_OPEN = (__force blk_opf_t)10,
373 : /* Close a zone */
374 : REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11,
375 : /* Transition a zone to full */
376 : REQ_OP_ZONE_FINISH = (__force blk_opf_t)12,
377 : /* write data at the current zone write pointer */
378 : REQ_OP_ZONE_APPEND = (__force blk_opf_t)13,
379 : /* reset a zone write pointer */
380 : REQ_OP_ZONE_RESET = (__force blk_opf_t)15,
381 : /* reset all the zone present on the device */
382 : REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)17,
383 :
384 : /* Driver private requests */
385 : REQ_OP_DRV_IN = (__force blk_opf_t)34,
386 : REQ_OP_DRV_OUT = (__force blk_opf_t)35,
387 :
388 : REQ_OP_LAST = (__force blk_opf_t)36,
389 : };
390 :
391 : enum req_flag_bits {
392 : __REQ_FAILFAST_DEV = /* no driver retries of device errors */
393 : REQ_OP_BITS,
394 : __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
395 : __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
396 : __REQ_SYNC, /* request is sync (sync write or read) */
397 : __REQ_META, /* metadata io request */
398 : __REQ_PRIO, /* boost priority in cfq */
399 : __REQ_NOMERGE, /* don't touch this for merging */
400 : __REQ_IDLE, /* anticipate more IO after this one */
401 : __REQ_INTEGRITY, /* I/O includes block integrity payload */
402 : __REQ_FUA, /* forced unit access */
403 : __REQ_PREFLUSH, /* request for cache flush */
404 : __REQ_RAHEAD, /* read ahead, can fail anytime */
405 : __REQ_BACKGROUND, /* background IO */
406 : __REQ_NOWAIT, /* Don't wait if request will block */
407 : /*
408 : * When a shared kthread needs to issue a bio for a cgroup, doing
409 : * so synchronously can lead to priority inversions as the kthread
410 : * can be trapped waiting for that cgroup. CGROUP_PUNT flag makes
411 : * submit_bio() punt the actual issuing to a dedicated per-blkcg
412 : * work item to avoid such priority inversions.
413 : */
414 : __REQ_CGROUP_PUNT,
415 : __REQ_POLLED, /* caller polls for completion using bio_poll */
416 : __REQ_ALLOC_CACHE, /* allocate IO from cache if available */
417 : __REQ_SWAP, /* swap I/O */
418 : __REQ_DRV, /* for driver use */
419 :
420 : /*
421 : * Command specific flags, keep last:
422 : */
423 : /* for REQ_OP_WRITE_ZEROES: */
424 : __REQ_NOUNMAP, /* do not free blocks when zeroing */
425 :
426 : __REQ_NR_BITS, /* stops here */
427 : };
428 :
429 : #define REQ_FAILFAST_DEV \
430 : (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV)
431 : #define REQ_FAILFAST_TRANSPORT \
432 : (__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT)
433 : #define REQ_FAILFAST_DRIVER \
434 : (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER)
435 : #define REQ_SYNC (__force blk_opf_t)(1ULL << __REQ_SYNC)
436 : #define REQ_META (__force blk_opf_t)(1ULL << __REQ_META)
437 : #define REQ_PRIO (__force blk_opf_t)(1ULL << __REQ_PRIO)
438 : #define REQ_NOMERGE (__force blk_opf_t)(1ULL << __REQ_NOMERGE)
439 : #define REQ_IDLE (__force blk_opf_t)(1ULL << __REQ_IDLE)
440 : #define REQ_INTEGRITY (__force blk_opf_t)(1ULL << __REQ_INTEGRITY)
441 : #define REQ_FUA (__force blk_opf_t)(1ULL << __REQ_FUA)
442 : #define REQ_PREFLUSH (__force blk_opf_t)(1ULL << __REQ_PREFLUSH)
443 : #define REQ_RAHEAD (__force blk_opf_t)(1ULL << __REQ_RAHEAD)
444 : #define REQ_BACKGROUND (__force blk_opf_t)(1ULL << __REQ_BACKGROUND)
445 : #define REQ_NOWAIT (__force blk_opf_t)(1ULL << __REQ_NOWAIT)
446 : #define REQ_CGROUP_PUNT (__force blk_opf_t)(1ULL << __REQ_CGROUP_PUNT)
447 :
448 : #define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
449 : #define REQ_POLLED (__force blk_opf_t)(1ULL << __REQ_POLLED)
450 : #define REQ_ALLOC_CACHE (__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE)
451 :
452 : #define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV)
453 : #define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP)
454 :
455 : #define REQ_FAILFAST_MASK \
456 : (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
457 :
458 : #define REQ_NOMERGE_FLAGS \
459 : (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
460 :
461 : enum stat_group {
462 : STAT_READ,
463 : STAT_WRITE,
464 : STAT_DISCARD,
465 : STAT_FLUSH,
466 :
467 : NR_STAT_GROUPS
468 : };
469 :
470 : static inline enum req_op bio_op(const struct bio *bio)
471 : {
472 0 : return bio->bi_opf & REQ_OP_MASK;
473 : }
474 :
475 : static inline bool op_is_write(blk_opf_t op)
476 : {
477 0 : return !!(op & (__force blk_opf_t)1);
478 : }
479 :
480 : /*
481 : * Check if the bio or request is one that needs special treatment in the
482 : * flush state machine.
483 : */
484 : static inline bool op_is_flush(blk_opf_t op)
485 : {
486 0 : return op & (REQ_FUA | REQ_PREFLUSH);
487 : }
488 :
489 : /*
490 : * Reads are always treated as synchronous, as are requests with the FUA or
491 : * PREFLUSH flag. Other operations may be marked as synchronous using the
492 : * REQ_SYNC flag.
493 : */
494 : static inline bool op_is_sync(blk_opf_t op)
495 : {
496 0 : return (op & REQ_OP_MASK) == REQ_OP_READ ||
497 0 : (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
498 : }
499 :
500 : static inline bool op_is_discard(blk_opf_t op)
501 : {
502 0 : return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
503 : }
504 :
505 : /*
506 : * Check if a bio or request operation is a zone management operation, with
507 : * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
508 : * due to its different handling in the block layer and device response in
509 : * case of command failure.
510 : */
511 : static inline bool op_is_zone_mgmt(enum req_op op)
512 : {
513 : switch (op & REQ_OP_MASK) {
514 : case REQ_OP_ZONE_RESET:
515 : case REQ_OP_ZONE_OPEN:
516 : case REQ_OP_ZONE_CLOSE:
517 : case REQ_OP_ZONE_FINISH:
518 : return true;
519 : default:
520 : return false;
521 : }
522 : }
523 :
524 : static inline int op_stat_group(enum req_op op)
525 : {
526 0 : if (op_is_discard(op))
527 : return STAT_DISCARD;
528 0 : return op_is_write(op);
529 : }
530 :
531 : struct blk_rq_stat {
532 : u64 mean;
533 : u64 min;
534 : u64 max;
535 : u32 nr_samples;
536 : u64 batch;
537 : };
538 :
539 : #endif /* __LINUX_BLK_TYPES_H */
|