Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * fs/mpage.c
4 : *
5 : * Copyright (C) 2002, Linus Torvalds.
6 : *
7 : * Contains functions related to preparing and submitting BIOs which contain
8 : * multiple pagecache pages.
9 : *
10 : * 15May2002 Andrew Morton
11 : * Initial version
12 : * 27Jun2002 axboe@suse.de
13 : * use bio_add_page() to build bio's just the right size
14 : */
15 :
16 : #include <linux/kernel.h>
17 : #include <linux/export.h>
18 : #include <linux/mm.h>
19 : #include <linux/kdev_t.h>
20 : #include <linux/gfp.h>
21 : #include <linux/bio.h>
22 : #include <linux/fs.h>
23 : #include <linux/buffer_head.h>
24 : #include <linux/blkdev.h>
25 : #include <linux/highmem.h>
26 : #include <linux/prefetch.h>
27 : #include <linux/mpage.h>
28 : #include <linux/mm_inline.h>
29 : #include <linux/writeback.h>
30 : #include <linux/backing-dev.h>
31 : #include <linux/pagevec.h>
32 : #include "internal.h"
33 :
34 : /*
35 : * I/O completion handler for multipage BIOs.
36 : *
37 : * The mpage code never puts partial pages into a BIO (except for end-of-file).
38 : * If a page does not map to a contiguous run of blocks then it simply falls
39 : * back to block_read_full_folio().
40 : *
41 : * Why is this? If a page's completion depends on a number of different BIOs
42 : * which can complete in any order (or at the same time) then determining the
43 : * status of that page is hard. See end_buffer_async_read() for the details.
44 : * There is no point in duplicating all that complexity.
45 : */
46 0 : static void mpage_end_io(struct bio *bio)
47 : {
48 : struct bio_vec *bv;
49 : struct bvec_iter_all iter_all;
50 :
51 0 : bio_for_each_segment_all(bv, bio, iter_all) {
52 0 : struct page *page = bv->bv_page;
53 0 : page_endio(page, bio_op(bio),
54 0 : blk_status_to_errno(bio->bi_status));
55 : }
56 :
57 0 : bio_put(bio);
58 0 : }
59 :
60 : static struct bio *mpage_bio_submit(struct bio *bio)
61 : {
62 0 : bio->bi_end_io = mpage_end_io;
63 0 : guard_bio_eod(bio);
64 0 : submit_bio(bio);
65 : return NULL;
66 : }
67 :
68 : /*
69 : * support function for mpage_readahead. The fs supplied get_block might
70 : * return an up to date buffer. This is used to map that buffer into
71 : * the page, which allows read_folio to avoid triggering a duplicate call
72 : * to get_block.
73 : *
74 : * The idea is to avoid adding buffers to pages that don't already have
75 : * them. So when the buffer is up to date and the page size == block size,
76 : * this marks the page up to date instead of adding new buffers.
77 : */
78 0 : static void map_buffer_to_folio(struct folio *folio, struct buffer_head *bh,
79 : int page_block)
80 : {
81 0 : struct inode *inode = folio->mapping->host;
82 : struct buffer_head *page_bh, *head;
83 0 : int block = 0;
84 :
85 0 : head = folio_buffers(folio);
86 0 : if (!head) {
87 : /*
88 : * don't make any buffers if there is only one buffer on
89 : * the folio and the folio just needs to be set up to date
90 : */
91 0 : if (inode->i_blkbits == PAGE_SHIFT &&
92 0 : buffer_uptodate(bh)) {
93 : folio_mark_uptodate(folio);
94 : return;
95 : }
96 0 : create_empty_buffers(&folio->page, i_blocksize(inode), 0);
97 0 : head = folio_buffers(folio);
98 : }
99 :
100 0 : page_bh = head;
101 : do {
102 0 : if (block == page_block) {
103 0 : page_bh->b_state = bh->b_state;
104 0 : page_bh->b_bdev = bh->b_bdev;
105 0 : page_bh->b_blocknr = bh->b_blocknr;
106 0 : break;
107 : }
108 0 : page_bh = page_bh->b_this_page;
109 0 : block++;
110 0 : } while (page_bh != head);
111 : }
112 :
113 : struct mpage_readpage_args {
114 : struct bio *bio;
115 : struct folio *folio;
116 : unsigned int nr_pages;
117 : bool is_readahead;
118 : sector_t last_block_in_bio;
119 : struct buffer_head map_bh;
120 : unsigned long first_logical_block;
121 : get_block_t *get_block;
122 : };
123 :
124 : /*
125 : * This is the worker routine which does all the work of mapping the disk
126 : * blocks and constructs largest possible bios, submits them for IO if the
127 : * blocks are not contiguous on the disk.
128 : *
129 : * We pass a buffer_head back and forth and use its buffer_mapped() flag to
130 : * represent the validity of its disk mapping and to decide when to do the next
131 : * get_block() call.
132 : */
133 0 : static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
134 : {
135 0 : struct folio *folio = args->folio;
136 0 : struct inode *inode = folio->mapping->host;
137 0 : const unsigned blkbits = inode->i_blkbits;
138 0 : const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
139 0 : const unsigned blocksize = 1 << blkbits;
140 0 : struct buffer_head *map_bh = &args->map_bh;
141 : sector_t block_in_file;
142 : sector_t last_block;
143 : sector_t last_block_in_file;
144 : sector_t blocks[MAX_BUF_PER_PAGE];
145 : unsigned page_block;
146 0 : unsigned first_hole = blocks_per_page;
147 0 : struct block_device *bdev = NULL;
148 : int length;
149 0 : int fully_mapped = 1;
150 0 : blk_opf_t opf = REQ_OP_READ;
151 : unsigned nblocks;
152 : unsigned relative_block;
153 0 : gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
154 :
155 : /* MAX_BUF_PER_PAGE, for example */
156 : VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
157 :
158 0 : if (args->is_readahead) {
159 0 : opf |= REQ_RAHEAD;
160 0 : gfp |= __GFP_NORETRY | __GFP_NOWARN;
161 : }
162 :
163 0 : if (folio_buffers(folio))
164 : goto confused;
165 :
166 0 : block_in_file = (sector_t)folio->index << (PAGE_SHIFT - blkbits);
167 0 : last_block = block_in_file + args->nr_pages * blocks_per_page;
168 0 : last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
169 0 : if (last_block > last_block_in_file)
170 0 : last_block = last_block_in_file;
171 0 : page_block = 0;
172 :
173 : /*
174 : * Map blocks using the result from the previous get_blocks call first.
175 : */
176 0 : nblocks = map_bh->b_size >> blkbits;
177 0 : if (buffer_mapped(map_bh) &&
178 0 : block_in_file > args->first_logical_block &&
179 0 : block_in_file < (args->first_logical_block + nblocks)) {
180 0 : unsigned map_offset = block_in_file - args->first_logical_block;
181 0 : unsigned last = nblocks - map_offset;
182 :
183 0 : for (relative_block = 0; ; relative_block++) {
184 0 : if (relative_block == last) {
185 : clear_buffer_mapped(map_bh);
186 : break;
187 : }
188 0 : if (page_block == blocks_per_page)
189 : break;
190 0 : blocks[page_block] = map_bh->b_blocknr + map_offset +
191 : relative_block;
192 0 : page_block++;
193 0 : block_in_file++;
194 : }
195 0 : bdev = map_bh->b_bdev;
196 : }
197 :
198 : /*
199 : * Then do more get_blocks calls until we are done with this folio.
200 : */
201 0 : map_bh->b_folio = folio;
202 0 : while (page_block < blocks_per_page) {
203 0 : map_bh->b_state = 0;
204 0 : map_bh->b_size = 0;
205 :
206 0 : if (block_in_file < last_block) {
207 0 : map_bh->b_size = (last_block-block_in_file) << blkbits;
208 0 : if (args->get_block(inode, block_in_file, map_bh, 0))
209 : goto confused;
210 0 : args->first_logical_block = block_in_file;
211 : }
212 :
213 0 : if (!buffer_mapped(map_bh)) {
214 0 : fully_mapped = 0;
215 0 : if (first_hole == blocks_per_page)
216 0 : first_hole = page_block;
217 0 : page_block++;
218 0 : block_in_file++;
219 0 : continue;
220 : }
221 :
222 : /* some filesystems will copy data into the page during
223 : * the get_block call, in which case we don't want to
224 : * read it again. map_buffer_to_folio copies the data
225 : * we just collected from get_block into the folio's buffers
226 : * so read_folio doesn't have to repeat the get_block call
227 : */
228 0 : if (buffer_uptodate(map_bh)) {
229 0 : map_buffer_to_folio(folio, map_bh, page_block);
230 0 : goto confused;
231 : }
232 :
233 0 : if (first_hole != blocks_per_page)
234 : goto confused; /* hole -> non-hole */
235 :
236 : /* Contiguous blocks? */
237 0 : if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1)
238 : goto confused;
239 0 : nblocks = map_bh->b_size >> blkbits;
240 0 : for (relative_block = 0; ; relative_block++) {
241 0 : if (relative_block == nblocks) {
242 : clear_buffer_mapped(map_bh);
243 : break;
244 0 : } else if (page_block == blocks_per_page)
245 : break;
246 0 : blocks[page_block] = map_bh->b_blocknr+relative_block;
247 0 : page_block++;
248 0 : block_in_file++;
249 : }
250 0 : bdev = map_bh->b_bdev;
251 : }
252 :
253 0 : if (first_hole != blocks_per_page) {
254 0 : folio_zero_segment(folio, first_hole << blkbits, PAGE_SIZE);
255 0 : if (first_hole == 0) {
256 0 : folio_mark_uptodate(folio);
257 0 : folio_unlock(folio);
258 0 : goto out;
259 : }
260 0 : } else if (fully_mapped) {
261 : folio_set_mappedtodisk(folio);
262 : }
263 :
264 : /*
265 : * This folio will go to BIO. Do we need to send this BIO off first?
266 : */
267 0 : if (args->bio && (args->last_block_in_bio != blocks[0] - 1))
268 0 : args->bio = mpage_bio_submit(args->bio);
269 :
270 : alloc_new:
271 0 : if (args->bio == NULL) {
272 0 : args->bio = bio_alloc(bdev, bio_max_segs(args->nr_pages), opf,
273 : gfp);
274 0 : if (args->bio == NULL)
275 : goto confused;
276 0 : args->bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
277 : }
278 :
279 0 : length = first_hole << blkbits;
280 0 : if (!bio_add_folio(args->bio, folio, length, 0)) {
281 0 : args->bio = mpage_bio_submit(args->bio);
282 0 : goto alloc_new;
283 : }
284 :
285 0 : relative_block = block_in_file - args->first_logical_block;
286 0 : nblocks = map_bh->b_size >> blkbits;
287 0 : if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
288 : (first_hole != blocks_per_page))
289 0 : args->bio = mpage_bio_submit(args->bio);
290 : else
291 0 : args->last_block_in_bio = blocks[blocks_per_page - 1];
292 : out:
293 0 : return args->bio;
294 :
295 : confused:
296 0 : if (args->bio)
297 0 : args->bio = mpage_bio_submit(args->bio);
298 0 : if (!folio_test_uptodate(folio))
299 0 : block_read_full_folio(folio, args->get_block);
300 : else
301 0 : folio_unlock(folio);
302 : goto out;
303 : }
304 :
305 : /**
306 : * mpage_readahead - start reads against pages
307 : * @rac: Describes which pages to read.
308 : * @get_block: The filesystem's block mapper function.
309 : *
310 : * This function walks the pages and the blocks within each page, building and
311 : * emitting large BIOs.
312 : *
313 : * If anything unusual happens, such as:
314 : *
315 : * - encountering a page which has buffers
316 : * - encountering a page which has a non-hole after a hole
317 : * - encountering a page with non-contiguous blocks
318 : *
319 : * then this code just gives up and calls the buffer_head-based read function.
320 : * It does handle a page which has holes at the end - that is a common case:
321 : * the end-of-file on blocksize < PAGE_SIZE setups.
322 : *
323 : * BH_Boundary explanation:
324 : *
325 : * There is a problem. The mpage read code assembles several pages, gets all
326 : * their disk mappings, and then submits them all. That's fine, but obtaining
327 : * the disk mappings may require I/O. Reads of indirect blocks, for example.
328 : *
329 : * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
330 : * submitted in the following order:
331 : *
332 : * 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
333 : *
334 : * because the indirect block has to be read to get the mappings of blocks
335 : * 13,14,15,16. Obviously, this impacts performance.
336 : *
337 : * So what we do it to allow the filesystem's get_block() function to set
338 : * BH_Boundary when it maps block 11. BH_Boundary says: mapping of the block
339 : * after this one will require I/O against a block which is probably close to
340 : * this one. So you should push what I/O you have currently accumulated.
341 : *
342 : * This all causes the disk requests to be issued in the correct order.
343 : */
344 0 : void mpage_readahead(struct readahead_control *rac, get_block_t get_block)
345 : {
346 : struct folio *folio;
347 0 : struct mpage_readpage_args args = {
348 : .get_block = get_block,
349 : .is_readahead = true,
350 : };
351 :
352 0 : while ((folio = readahead_folio(rac))) {
353 0 : prefetchw(&folio->flags);
354 0 : args.folio = folio;
355 0 : args.nr_pages = readahead_count(rac);
356 0 : args.bio = do_mpage_readpage(&args);
357 : }
358 0 : if (args.bio)
359 0 : mpage_bio_submit(args.bio);
360 0 : }
361 : EXPORT_SYMBOL(mpage_readahead);
362 :
363 : /*
364 : * This isn't called much at all
365 : */
366 0 : int mpage_read_folio(struct folio *folio, get_block_t get_block)
367 : {
368 0 : struct mpage_readpage_args args = {
369 : .folio = folio,
370 : .nr_pages = 1,
371 : .get_block = get_block,
372 : };
373 :
374 0 : args.bio = do_mpage_readpage(&args);
375 0 : if (args.bio)
376 0 : mpage_bio_submit(args.bio);
377 0 : return 0;
378 : }
379 : EXPORT_SYMBOL(mpage_read_folio);
380 :
381 : /*
382 : * Writing is not so simple.
383 : *
384 : * If the page has buffers then they will be used for obtaining the disk
385 : * mapping. We only support pages which are fully mapped-and-dirty, with a
386 : * special case for pages which are unmapped at the end: end-of-file.
387 : *
388 : * If the page has no buffers (preferred) then the page is mapped here.
389 : *
390 : * If all blocks are found to be contiguous then the page can go into the
391 : * BIO. Otherwise fall back to the mapping's writepage().
392 : *
393 : * FIXME: This code wants an estimate of how many pages are still to be
394 : * written, so it can intelligently allocate a suitably-sized BIO. For now,
395 : * just allocate full-size (16-page) BIOs.
396 : */
397 :
398 : struct mpage_data {
399 : struct bio *bio;
400 : sector_t last_block_in_bio;
401 : get_block_t *get_block;
402 : };
403 :
404 : /*
405 : * We have our BIO, so we can now mark the buffers clean. Make
406 : * sure to only clean buffers which we know we'll be writing.
407 : */
408 0 : static void clean_buffers(struct page *page, unsigned first_unmapped)
409 : {
410 0 : unsigned buffer_counter = 0;
411 : struct buffer_head *bh, *head;
412 0 : if (!page_has_buffers(page))
413 : return;
414 0 : head = page_buffers(page);
415 0 : bh = head;
416 :
417 : do {
418 0 : if (buffer_counter++ == first_unmapped)
419 : break;
420 0 : clear_buffer_dirty(bh);
421 0 : bh = bh->b_this_page;
422 0 : } while (bh != head);
423 :
424 : /*
425 : * we cannot drop the bh if the page is not uptodate or a concurrent
426 : * read_folio would fail to serialize with the bh and it would read from
427 : * disk before we reach the platter.
428 : */
429 0 : if (buffer_heads_over_limit && PageUptodate(page))
430 0 : try_to_free_buffers(page_folio(page));
431 : }
432 :
433 : /*
434 : * For situations where we want to clean all buffers attached to a page.
435 : * We don't need to calculate how many buffers are attached to the page,
436 : * we just need to specify a number larger than the maximum number of buffers.
437 : */
438 0 : void clean_page_buffers(struct page *page)
439 : {
440 0 : clean_buffers(page, ~0U);
441 0 : }
442 :
443 0 : static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
444 : void *data)
445 : {
446 0 : struct mpage_data *mpd = data;
447 0 : struct bio *bio = mpd->bio;
448 0 : struct address_space *mapping = folio->mapping;
449 0 : struct inode *inode = mapping->host;
450 0 : const unsigned blkbits = inode->i_blkbits;
451 0 : const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
452 : sector_t last_block;
453 : sector_t block_in_file;
454 : sector_t blocks[MAX_BUF_PER_PAGE];
455 : unsigned page_block;
456 0 : unsigned first_unmapped = blocks_per_page;
457 0 : struct block_device *bdev = NULL;
458 0 : int boundary = 0;
459 0 : sector_t boundary_block = 0;
460 0 : struct block_device *boundary_bdev = NULL;
461 : size_t length;
462 : struct buffer_head map_bh;
463 0 : loff_t i_size = i_size_read(inode);
464 0 : int ret = 0;
465 0 : struct buffer_head *head = folio_buffers(folio);
466 :
467 0 : if (head) {
468 : struct buffer_head *bh = head;
469 :
470 : /* If they're all mapped and dirty, do it */
471 : page_block = 0;
472 : do {
473 0 : BUG_ON(buffer_locked(bh));
474 0 : if (!buffer_mapped(bh)) {
475 : /*
476 : * unmapped dirty buffers are created by
477 : * block_dirty_folio -> mmapped data
478 : */
479 0 : if (buffer_dirty(bh))
480 : goto confused;
481 0 : if (first_unmapped == blocks_per_page)
482 0 : first_unmapped = page_block;
483 0 : continue;
484 : }
485 :
486 0 : if (first_unmapped != blocks_per_page)
487 : goto confused; /* hole -> non-hole */
488 :
489 0 : if (!buffer_dirty(bh) || !buffer_uptodate(bh))
490 : goto confused;
491 0 : if (page_block) {
492 0 : if (bh->b_blocknr != blocks[page_block-1] + 1)
493 : goto confused;
494 : }
495 0 : blocks[page_block++] = bh->b_blocknr;
496 0 : boundary = buffer_boundary(bh);
497 0 : if (boundary) {
498 0 : boundary_block = bh->b_blocknr;
499 0 : boundary_bdev = bh->b_bdev;
500 : }
501 0 : bdev = bh->b_bdev;
502 0 : } while ((bh = bh->b_this_page) != head);
503 :
504 0 : if (first_unmapped)
505 : goto page_is_mapped;
506 :
507 : /*
508 : * Page has buffers, but they are all unmapped. The page was
509 : * created by pagein or read over a hole which was handled by
510 : * block_read_full_folio(). If this address_space is also
511 : * using mpage_readahead then this can rarely happen.
512 : */
513 : goto confused;
514 : }
515 :
516 : /*
517 : * The page has no buffers: map it to disk
518 : */
519 0 : BUG_ON(!folio_test_uptodate(folio));
520 0 : block_in_file = (sector_t)folio->index << (PAGE_SHIFT - blkbits);
521 : /*
522 : * Whole page beyond EOF? Skip allocating blocks to avoid leaking
523 : * space.
524 : */
525 0 : if (block_in_file >= (i_size + (1 << blkbits) - 1) >> blkbits)
526 : goto page_is_mapped;
527 0 : last_block = (i_size - 1) >> blkbits;
528 0 : map_bh.b_folio = folio;
529 0 : for (page_block = 0; page_block < blocks_per_page; ) {
530 :
531 0 : map_bh.b_state = 0;
532 0 : map_bh.b_size = 1 << blkbits;
533 0 : if (mpd->get_block(inode, block_in_file, &map_bh, 1))
534 : goto confused;
535 0 : if (!buffer_mapped(&map_bh))
536 : goto confused;
537 0 : if (buffer_new(&map_bh))
538 0 : clean_bdev_bh_alias(&map_bh);
539 0 : if (buffer_boundary(&map_bh)) {
540 0 : boundary_block = map_bh.b_blocknr;
541 0 : boundary_bdev = map_bh.b_bdev;
542 : }
543 0 : if (page_block) {
544 0 : if (map_bh.b_blocknr != blocks[page_block-1] + 1)
545 : goto confused;
546 : }
547 0 : blocks[page_block++] = map_bh.b_blocknr;
548 0 : boundary = buffer_boundary(&map_bh);
549 0 : bdev = map_bh.b_bdev;
550 0 : if (block_in_file == last_block)
551 : break;
552 0 : block_in_file++;
553 : }
554 0 : BUG_ON(page_block == 0);
555 :
556 : first_unmapped = page_block;
557 :
558 : page_is_mapped:
559 : /* Don't bother writing beyond EOF, truncate will discard the folio */
560 0 : if (folio_pos(folio) >= i_size)
561 : goto confused;
562 0 : length = folio_size(folio);
563 0 : if (folio_pos(folio) + length > i_size) {
564 : /*
565 : * The page straddles i_size. It must be zeroed out on each
566 : * and every writepage invocation because it may be mmapped.
567 : * "A file is mapped in multiples of the page size. For a file
568 : * that is not a multiple of the page size, the remaining memory
569 : * is zeroed when mapped, and writes to that region are not
570 : * written out to the file."
571 : */
572 0 : length = i_size - folio_pos(folio);
573 0 : folio_zero_segment(folio, length, folio_size(folio));
574 : }
575 :
576 : /*
577 : * This page will go to BIO. Do we need to send this BIO off first?
578 : */
579 0 : if (bio && mpd->last_block_in_bio != blocks[0] - 1)
580 0 : bio = mpage_bio_submit(bio);
581 :
582 : alloc_new:
583 0 : if (bio == NULL) {
584 0 : bio = bio_alloc(bdev, BIO_MAX_VECS,
585 0 : REQ_OP_WRITE | wbc_to_write_flags(wbc),
586 : GFP_NOFS);
587 0 : bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
588 0 : wbc_init_bio(wbc, bio);
589 : }
590 :
591 : /*
592 : * Must try to add the page before marking the buffer clean or
593 : * the confused fail path above (OOM) will be very confused when
594 : * it finds all bh marked clean (i.e. it will not write anything)
595 : */
596 0 : wbc_account_cgroup_owner(wbc, &folio->page, folio_size(folio));
597 0 : length = first_unmapped << blkbits;
598 0 : if (!bio_add_folio(bio, folio, length, 0)) {
599 0 : bio = mpage_bio_submit(bio);
600 0 : goto alloc_new;
601 : }
602 :
603 0 : clean_buffers(&folio->page, first_unmapped);
604 :
605 0 : BUG_ON(folio_test_writeback(folio));
606 0 : folio_start_writeback(folio);
607 0 : folio_unlock(folio);
608 0 : if (boundary || (first_unmapped != blocks_per_page)) {
609 0 : bio = mpage_bio_submit(bio);
610 0 : if (boundary_block) {
611 0 : write_boundary_block(boundary_bdev,
612 0 : boundary_block, 1 << blkbits);
613 : }
614 : } else {
615 0 : mpd->last_block_in_bio = blocks[blocks_per_page - 1];
616 : }
617 : goto out;
618 :
619 : confused:
620 0 : if (bio)
621 0 : bio = mpage_bio_submit(bio);
622 :
623 : /*
624 : * The caller has a ref on the inode, so *mapping is stable
625 : */
626 0 : ret = block_write_full_page(&folio->page, mpd->get_block, wbc);
627 0 : mapping_set_error(mapping, ret);
628 : out:
629 0 : mpd->bio = bio;
630 0 : return ret;
631 : }
632 :
633 : /**
634 : * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them
635 : * @mapping: address space structure to write
636 : * @wbc: subtract the number of written pages from *@wbc->nr_to_write
637 : * @get_block: the filesystem's block mapper function.
638 : *
639 : * This is a library function, which implements the writepages()
640 : * address_space_operation.
641 : */
642 : int
643 0 : mpage_writepages(struct address_space *mapping,
644 : struct writeback_control *wbc, get_block_t get_block)
645 : {
646 0 : struct mpage_data mpd = {
647 : .get_block = get_block,
648 : };
649 : struct blk_plug plug;
650 : int ret;
651 :
652 0 : blk_start_plug(&plug);
653 0 : ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
654 0 : if (mpd.bio)
655 0 : mpage_bio_submit(mpd.bio);
656 0 : blk_finish_plug(&plug);
657 0 : return ret;
658 : }
659 : EXPORT_SYMBOL(mpage_writepages);
|