Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : /*
3 : * include/linux/buffer_head.h
4 : *
5 : * Everything to do with buffer_heads.
6 : */
7 :
8 : #ifndef _LINUX_BUFFER_HEAD_H
9 : #define _LINUX_BUFFER_HEAD_H
10 :
11 : #include <linux/types.h>
12 : #include <linux/blk_types.h>
13 : #include <linux/fs.h>
14 : #include <linux/linkage.h>
15 : #include <linux/pagemap.h>
16 : #include <linux/wait.h>
17 : #include <linux/atomic.h>
18 :
19 : #ifdef CONFIG_BLOCK
20 :
21 : enum bh_state_bits {
22 : BH_Uptodate, /* Contains valid data */
23 : BH_Dirty, /* Is dirty */
24 : BH_Lock, /* Is locked */
25 : BH_Req, /* Has been submitted for I/O */
26 :
27 : BH_Mapped, /* Has a disk mapping */
28 : BH_New, /* Disk mapping was newly created by get_block */
29 : BH_Async_Read, /* Is under end_buffer_async_read I/O */
30 : BH_Async_Write, /* Is under end_buffer_async_write I/O */
31 : BH_Delay, /* Buffer is not yet allocated on disk */
32 : BH_Boundary, /* Block is followed by a discontiguity */
33 : BH_Write_EIO, /* I/O error on write */
34 : BH_Unwritten, /* Buffer is allocated on disk but not written */
35 : BH_Quiet, /* Buffer Error Prinks to be quiet */
36 : BH_Meta, /* Buffer contains metadata */
37 : BH_Prio, /* Buffer should be submitted with REQ_PRIO */
38 : BH_Defer_Completion, /* Defer AIO completion to workqueue */
39 :
40 : BH_PrivateStart,/* not a state bit, but the first bit available
41 : * for private allocation by other entities
42 : */
43 : };
44 :
45 : #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
46 :
47 : struct page;
48 : struct buffer_head;
49 : struct address_space;
50 : typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
51 :
52 : /*
53 : * Historically, a buffer_head was used to map a single block
54 : * within a page, and of course as the unit of I/O through the
55 : * filesystem and block layers. Nowadays the basic I/O unit
56 : * is the bio, and buffer_heads are used for extracting block
57 : * mappings (via a get_block_t call), for tracking state within
58 : * a page (via a page_mapping) and for wrapping bio submission
59 : * for backward compatibility reasons (e.g. submit_bh).
60 : */
61 : struct buffer_head {
62 : unsigned long b_state; /* buffer state bitmap (see above) */
63 : struct buffer_head *b_this_page;/* circular list of page's buffers */
64 : union {
65 : struct page *b_page; /* the page this bh is mapped to */
66 : struct folio *b_folio; /* the folio this bh is mapped to */
67 : };
68 :
69 : sector_t b_blocknr; /* start block number */
70 : size_t b_size; /* size of mapping */
71 : char *b_data; /* pointer to data within the page */
72 :
73 : struct block_device *b_bdev;
74 : bh_end_io_t *b_end_io; /* I/O completion */
75 : void *b_private; /* reserved for b_end_io */
76 : struct list_head b_assoc_buffers; /* associated with another mapping */
77 : struct address_space *b_assoc_map; /* mapping this buffer is
78 : associated with */
79 : atomic_t b_count; /* users using this buffer_head */
80 : spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to
81 : * serialise IO completion of other
82 : * buffers in the page */
83 : };
84 :
85 : /*
86 : * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
87 : * and buffer_foo() functions.
88 : * To avoid reset buffer flags that are already set, because that causes
89 : * a costly cache line transition, check the flag first.
90 : */
91 : #define BUFFER_FNS(bit, name) \
92 : static __always_inline void set_buffer_##name(struct buffer_head *bh) \
93 : { \
94 : if (!test_bit(BH_##bit, &(bh)->b_state)) \
95 : set_bit(BH_##bit, &(bh)->b_state); \
96 : } \
97 : static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
98 : { \
99 : clear_bit(BH_##bit, &(bh)->b_state); \
100 : } \
101 : static __always_inline int buffer_##name(const struct buffer_head *bh) \
102 : { \
103 : return test_bit(BH_##bit, &(bh)->b_state); \
104 : }
105 :
106 : /*
107 : * test_set_buffer_foo() and test_clear_buffer_foo()
108 : */
109 : #define TAS_BUFFER_FNS(bit, name) \
110 : static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
111 : { \
112 : return test_and_set_bit(BH_##bit, &(bh)->b_state); \
113 : } \
114 : static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
115 : { \
116 : return test_and_clear_bit(BH_##bit, &(bh)->b_state); \
117 : } \
118 :
119 : /*
120 : * Emit the buffer bitops functions. Note that there are also functions
121 : * of the form "mark_buffer_foo()". These are higher-level functions which
122 : * do something in addition to setting a b_state bit.
123 : */
124 0 : BUFFER_FNS(Dirty, dirty)
125 0 : TAS_BUFFER_FNS(Dirty, dirty)
126 0 : BUFFER_FNS(Lock, locked)
127 0 : BUFFER_FNS(Req, req)
128 0 : TAS_BUFFER_FNS(Req, req)
129 0 : BUFFER_FNS(Mapped, mapped)
130 0 : BUFFER_FNS(New, new)
131 0 : BUFFER_FNS(Async_Read, async_read)
132 0 : BUFFER_FNS(Async_Write, async_write)
133 0 : BUFFER_FNS(Delay, delay)
134 0 : BUFFER_FNS(Boundary, boundary)
135 0 : BUFFER_FNS(Write_EIO, write_io_error)
136 0 : BUFFER_FNS(Unwritten, unwritten)
137 0 : BUFFER_FNS(Meta, meta)
138 0 : BUFFER_FNS(Prio, prio)
139 : BUFFER_FNS(Defer_Completion, defer_completion)
140 :
141 : static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
142 : {
143 : /*
144 : * If somebody else already set this uptodate, they will
145 : * have done the memory barrier, and a reader will thus
146 : * see *some* valid buffer state.
147 : *
148 : * Any other serialization (with IO errors or whatever that
149 : * might clear the bit) has to come from other state (eg BH_Lock).
150 : */
151 0 : if (test_bit(BH_Uptodate, &bh->b_state))
152 : return;
153 :
154 : /*
155 : * make it consistent with folio_mark_uptodate
156 : * pairs with smp_load_acquire in buffer_uptodate
157 : */
158 0 : smp_mb__before_atomic();
159 0 : set_bit(BH_Uptodate, &bh->b_state);
160 : }
161 :
162 : static __always_inline void clear_buffer_uptodate(struct buffer_head *bh)
163 : {
164 0 : clear_bit(BH_Uptodate, &bh->b_state);
165 : }
166 :
167 : static __always_inline int buffer_uptodate(const struct buffer_head *bh)
168 : {
169 : /*
170 : * make it consistent with folio_test_uptodate
171 : * pairs with smp_mb__before_atomic in set_buffer_uptodate
172 : */
173 0 : return test_bit_acquire(BH_Uptodate, &bh->b_state);
174 : }
175 :
176 : #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
177 :
178 : /* If we *know* page->private refers to buffer_heads */
179 : #define page_buffers(page) \
180 : ({ \
181 : BUG_ON(!PagePrivate(page)); \
182 : ((struct buffer_head *)page_private(page)); \
183 : })
184 : #define page_has_buffers(page) PagePrivate(page)
185 : #define folio_buffers(folio) folio_get_private(folio)
186 :
187 : void buffer_check_dirty_writeback(struct folio *folio,
188 : bool *dirty, bool *writeback);
189 :
190 : /*
191 : * Declarations
192 : */
193 :
194 : void mark_buffer_dirty(struct buffer_head *bh);
195 : void mark_buffer_write_io_error(struct buffer_head *bh);
196 : void touch_buffer(struct buffer_head *bh);
197 : void set_bh_page(struct buffer_head *bh,
198 : struct page *page, unsigned long offset);
199 : void folio_set_bh(struct buffer_head *bh, struct folio *folio,
200 : unsigned long offset);
201 : bool try_to_free_buffers(struct folio *);
202 : struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
203 : bool retry);
204 : struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
205 : bool retry);
206 : void create_empty_buffers(struct page *, unsigned long,
207 : unsigned long b_state);
208 : void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize,
209 : unsigned long b_state);
210 : void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
211 : void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
212 : void end_buffer_async_write(struct buffer_head *bh, int uptodate);
213 :
214 : /* Things to do with buffers at mapping->private_list */
215 : void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
216 : int inode_has_buffers(struct inode *);
217 : void invalidate_inode_buffers(struct inode *);
218 : int remove_inode_buffers(struct inode *inode);
219 : int sync_mapping_buffers(struct address_space *mapping);
220 : void clean_bdev_aliases(struct block_device *bdev, sector_t block,
221 : sector_t len);
222 : static inline void clean_bdev_bh_alias(struct buffer_head *bh)
223 : {
224 0 : clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1);
225 : }
226 :
227 : void mark_buffer_async_write(struct buffer_head *bh);
228 : void __wait_on_buffer(struct buffer_head *);
229 : wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
230 : struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
231 : unsigned size);
232 : struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
233 : unsigned size, gfp_t gfp);
234 : void __brelse(struct buffer_head *);
235 : void __bforget(struct buffer_head *);
236 : void __breadahead(struct block_device *, sector_t block, unsigned int size);
237 : struct buffer_head *__bread_gfp(struct block_device *,
238 : sector_t block, unsigned size, gfp_t gfp);
239 : void invalidate_bh_lrus(void);
240 : void invalidate_bh_lrus_cpu(void);
241 : bool has_bh_in_lru(int cpu, void *dummy);
242 : struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
243 : void free_buffer_head(struct buffer_head * bh);
244 : void unlock_buffer(struct buffer_head *bh);
245 : void __lock_buffer(struct buffer_head *bh);
246 : int sync_dirty_buffer(struct buffer_head *bh);
247 : int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
248 : void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
249 : void submit_bh(blk_opf_t, struct buffer_head *);
250 : void write_boundary_block(struct block_device *bdev,
251 : sector_t bblock, unsigned blocksize);
252 : int bh_uptodate_or_lock(struct buffer_head *bh);
253 : int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
254 : void __bh_read_batch(int nr, struct buffer_head *bhs[],
255 : blk_opf_t op_flags, bool force_lock);
256 :
257 : extern int buffer_heads_over_limit;
258 :
259 : /*
260 : * Generic address_space_operations implementations for buffer_head-backed
261 : * address_spaces.
262 : */
263 : void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
264 : int block_write_full_page(struct page *page, get_block_t *get_block,
265 : struct writeback_control *wbc);
266 : int __block_write_full_page(struct inode *inode, struct page *page,
267 : get_block_t *get_block, struct writeback_control *wbc,
268 : bh_end_io_t *handler);
269 : int block_read_full_folio(struct folio *, get_block_t *);
270 : bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
271 : int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
272 : struct page **pagep, get_block_t *get_block);
273 : int __block_write_begin(struct page *page, loff_t pos, unsigned len,
274 : get_block_t *get_block);
275 : int block_write_end(struct file *, struct address_space *,
276 : loff_t, unsigned, unsigned,
277 : struct page *, void *);
278 : int generic_write_end(struct file *, struct address_space *,
279 : loff_t, unsigned, unsigned,
280 : struct page *, void *);
281 : void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
282 : void clean_page_buffers(struct page *page);
283 : int cont_write_begin(struct file *, struct address_space *, loff_t,
284 : unsigned, struct page **, void **,
285 : get_block_t *, loff_t *);
286 : int generic_cont_expand_simple(struct inode *inode, loff_t size);
287 : int block_commit_write(struct page *page, unsigned from, unsigned to);
288 : int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
289 : get_block_t get_block);
290 : /* Convert errno to return value from ->page_mkwrite() call */
291 : static inline vm_fault_t block_page_mkwrite_return(int err)
292 : {
293 : if (err == 0)
294 : return VM_FAULT_LOCKED;
295 : if (err == -EFAULT || err == -EAGAIN)
296 : return VM_FAULT_NOPAGE;
297 : if (err == -ENOMEM)
298 : return VM_FAULT_OOM;
299 : /* -ENOSPC, -EDQUOT, -EIO ... */
300 : return VM_FAULT_SIGBUS;
301 : }
302 : sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
303 : int block_truncate_page(struct address_space *, loff_t, get_block_t *);
304 :
305 : #ifdef CONFIG_MIGRATION
306 : extern int buffer_migrate_folio(struct address_space *,
307 : struct folio *dst, struct folio *src, enum migrate_mode);
308 : extern int buffer_migrate_folio_norefs(struct address_space *,
309 : struct folio *dst, struct folio *src, enum migrate_mode);
310 : #else
311 : #define buffer_migrate_folio NULL
312 : #define buffer_migrate_folio_norefs NULL
313 : #endif
314 :
315 : void buffer_init(void);
316 :
317 : /*
318 : * inline definitions
319 : */
320 :
321 : static inline void get_bh(struct buffer_head *bh)
322 : {
323 0 : atomic_inc(&bh->b_count);
324 : }
325 :
326 : static inline void put_bh(struct buffer_head *bh)
327 : {
328 0 : smp_mb__before_atomic();
329 0 : atomic_dec(&bh->b_count);
330 : }
331 :
332 0 : static inline void brelse(struct buffer_head *bh)
333 : {
334 0 : if (bh)
335 : __brelse(bh);
336 0 : }
337 :
338 : static inline void bforget(struct buffer_head *bh)
339 : {
340 : if (bh)
341 : __bforget(bh);
342 : }
343 :
344 : static inline struct buffer_head *
345 : sb_bread(struct super_block *sb, sector_t block)
346 : {
347 : return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
348 : }
349 :
350 : static inline struct buffer_head *
351 : sb_bread_unmovable(struct super_block *sb, sector_t block)
352 : {
353 : return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
354 : }
355 :
356 : static inline void
357 : sb_breadahead(struct super_block *sb, sector_t block)
358 : {
359 : __breadahead(sb->s_bdev, block, sb->s_blocksize);
360 : }
361 :
362 : static inline struct buffer_head *
363 : sb_getblk(struct super_block *sb, sector_t block)
364 : {
365 : return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
366 : }
367 :
368 :
369 : static inline struct buffer_head *
370 : sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
371 : {
372 : return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
373 : }
374 :
375 : static inline struct buffer_head *
376 : sb_find_get_block(struct super_block *sb, sector_t block)
377 : {
378 : return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
379 : }
380 :
381 : static inline void
382 : map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
383 : {
384 : set_buffer_mapped(bh);
385 : bh->b_bdev = sb->s_bdev;
386 : bh->b_blocknr = block;
387 : bh->b_size = sb->s_blocksize;
388 : }
389 :
390 0 : static inline void wait_on_buffer(struct buffer_head *bh)
391 : {
392 : might_sleep();
393 0 : if (buffer_locked(bh))
394 : __wait_on_buffer(bh);
395 0 : }
396 :
397 : static inline int trylock_buffer(struct buffer_head *bh)
398 : {
399 0 : return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
400 : }
401 :
402 : static inline void lock_buffer(struct buffer_head *bh)
403 : {
404 : might_sleep();
405 0 : if (!trylock_buffer(bh))
406 0 : __lock_buffer(bh);
407 : }
408 :
409 : static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
410 : sector_t block,
411 : unsigned size)
412 : {
413 : return __getblk_gfp(bdev, block, size, 0);
414 : }
415 :
416 : static inline struct buffer_head *__getblk(struct block_device *bdev,
417 : sector_t block,
418 : unsigned size)
419 : {
420 0 : return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
421 : }
422 :
423 0 : static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags)
424 : {
425 0 : if (!buffer_uptodate(bh) && trylock_buffer(bh)) {
426 0 : if (!buffer_uptodate(bh))
427 0 : __bh_read(bh, op_flags, false);
428 : else
429 : unlock_buffer(bh);
430 : }
431 0 : }
432 :
433 0 : static inline void bh_read_nowait(struct buffer_head *bh, blk_opf_t op_flags)
434 : {
435 0 : if (!bh_uptodate_or_lock(bh))
436 0 : __bh_read(bh, op_flags, false);
437 0 : }
438 :
439 : /* Returns 1 if buffer uptodated, 0 on success, and -EIO on error. */
440 0 : static inline int bh_read(struct buffer_head *bh, blk_opf_t op_flags)
441 : {
442 0 : if (bh_uptodate_or_lock(bh))
443 : return 1;
444 0 : return __bh_read(bh, op_flags, true);
445 : }
446 :
447 : static inline void bh_read_batch(int nr, struct buffer_head *bhs[])
448 : {
449 : __bh_read_batch(nr, bhs, 0, true);
450 : }
451 :
452 : static inline void bh_readahead_batch(int nr, struct buffer_head *bhs[],
453 : blk_opf_t op_flags)
454 : {
455 : __bh_read_batch(nr, bhs, op_flags, false);
456 : }
457 :
458 : /**
459 : * __bread() - reads a specified block and returns the bh
460 : * @bdev: the block_device to read from
461 : * @block: number of block
462 : * @size: size (in bytes) to read
463 : *
464 : * Reads a specified block, and returns buffer head that contains it.
465 : * The page cache is allocated from movable area so that it can be migrated.
466 : * It returns NULL if the block was unreadable.
467 : */
468 : static inline struct buffer_head *
469 : __bread(struct block_device *bdev, sector_t block, unsigned size)
470 : {
471 : return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
472 : }
473 :
474 : bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
475 :
476 : #else /* CONFIG_BLOCK */
477 :
478 : static inline void buffer_init(void) {}
479 : static inline bool try_to_free_buffers(struct folio *folio) { return true; }
480 : static inline int inode_has_buffers(struct inode *inode) { return 0; }
481 : static inline void invalidate_inode_buffers(struct inode *inode) {}
482 : static inline int remove_inode_buffers(struct inode *inode) { return 1; }
483 : static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
484 : static inline void invalidate_bh_lrus_cpu(void) {}
485 : static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
486 : #define buffer_heads_over_limit 0
487 :
488 : #endif /* CONFIG_BLOCK */
489 : #endif /* _LINUX_BUFFER_HEAD_H */
|