Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Functions related to generic helpers functions
4 : */
5 : #include <linux/kernel.h>
6 : #include <linux/module.h>
7 : #include <linux/bio.h>
8 : #include <linux/blkdev.h>
9 : #include <linux/scatterlist.h>
10 :
11 : #include "blk.h"
12 :
13 : static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector)
14 : {
15 0 : unsigned int discard_granularity = bdev_discard_granularity(bdev);
16 : sector_t granularity_aligned_sector;
17 :
18 0 : if (bdev_is_partition(bdev))
19 0 : sector += bdev->bd_start_sect;
20 :
21 0 : granularity_aligned_sector =
22 0 : round_up(sector, discard_granularity >> SECTOR_SHIFT);
23 :
24 : /*
25 : * Make sure subsequent bios start aligned to the discard granularity if
26 : * it needs to be split.
27 : */
28 0 : if (granularity_aligned_sector != sector)
29 0 : return granularity_aligned_sector - sector;
30 :
31 : /*
32 : * Align the bio size to the discard granularity to make splitting the bio
33 : * at discard granularity boundaries easier in the driver if needed.
34 : */
35 0 : return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT;
36 : }
37 :
38 0 : int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
39 : sector_t nr_sects, gfp_t gfp_mask, struct bio **biop)
40 : {
41 0 : struct bio *bio = *biop;
42 : sector_t bs_mask;
43 :
44 0 : if (bdev_read_only(bdev))
45 : return -EPERM;
46 0 : if (!bdev_max_discard_sectors(bdev))
47 : return -EOPNOTSUPP;
48 :
49 : /* In case the discard granularity isn't set by buggy device driver */
50 0 : if (WARN_ON_ONCE(!bdev_discard_granularity(bdev))) {
51 0 : pr_err_ratelimited("%pg: Error: discard_granularity is 0.\n",
52 : bdev);
53 : return -EOPNOTSUPP;
54 : }
55 :
56 0 : bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
57 0 : if ((sector | nr_sects) & bs_mask)
58 : return -EINVAL;
59 :
60 0 : if (!nr_sects)
61 : return -EINVAL;
62 :
63 0 : while (nr_sects) {
64 0 : sector_t req_sects =
65 0 : min(nr_sects, bio_discard_limit(bdev, sector));
66 :
67 0 : bio = blk_next_bio(bio, bdev, 0, REQ_OP_DISCARD, gfp_mask);
68 0 : bio->bi_iter.bi_sector = sector;
69 0 : bio->bi_iter.bi_size = req_sects << 9;
70 0 : sector += req_sects;
71 0 : nr_sects -= req_sects;
72 :
73 : /*
74 : * We can loop for a long time in here, if someone does
75 : * full device discards (like mkfs). Be nice and allow
76 : * us to schedule out to avoid softlocking if preempt
77 : * is disabled.
78 : */
79 0 : cond_resched();
80 : }
81 :
82 0 : *biop = bio;
83 0 : return 0;
84 : }
85 : EXPORT_SYMBOL(__blkdev_issue_discard);
86 :
87 : /**
88 : * blkdev_issue_discard - queue a discard
89 : * @bdev: blockdev to issue discard for
90 : * @sector: start sector
91 : * @nr_sects: number of sectors to discard
92 : * @gfp_mask: memory allocation flags (for bio_alloc)
93 : *
94 : * Description:
95 : * Issue a discard request for the sectors in question.
96 : */
97 0 : int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
98 : sector_t nr_sects, gfp_t gfp_mask)
99 : {
100 0 : struct bio *bio = NULL;
101 : struct blk_plug plug;
102 : int ret;
103 :
104 0 : blk_start_plug(&plug);
105 0 : ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio);
106 0 : if (!ret && bio) {
107 0 : ret = submit_bio_wait(bio);
108 0 : if (ret == -EOPNOTSUPP)
109 0 : ret = 0;
110 0 : bio_put(bio);
111 : }
112 0 : blk_finish_plug(&plug);
113 :
114 0 : return ret;
115 : }
116 : EXPORT_SYMBOL(blkdev_issue_discard);
117 :
118 0 : static int __blkdev_issue_write_zeroes(struct block_device *bdev,
119 : sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
120 : struct bio **biop, unsigned flags)
121 : {
122 0 : struct bio *bio = *biop;
123 : unsigned int max_write_zeroes_sectors;
124 :
125 0 : if (bdev_read_only(bdev))
126 : return -EPERM;
127 :
128 : /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
129 0 : max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
130 :
131 0 : if (max_write_zeroes_sectors == 0)
132 : return -EOPNOTSUPP;
133 :
134 0 : while (nr_sects) {
135 0 : bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask);
136 0 : bio->bi_iter.bi_sector = sector;
137 0 : if (flags & BLKDEV_ZERO_NOUNMAP)
138 0 : bio->bi_opf |= REQ_NOUNMAP;
139 :
140 0 : if (nr_sects > max_write_zeroes_sectors) {
141 0 : bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
142 0 : nr_sects -= max_write_zeroes_sectors;
143 0 : sector += max_write_zeroes_sectors;
144 : } else {
145 0 : bio->bi_iter.bi_size = nr_sects << 9;
146 0 : nr_sects = 0;
147 : }
148 0 : cond_resched();
149 : }
150 :
151 0 : *biop = bio;
152 0 : return 0;
153 : }
154 :
155 : /*
156 : * Convert a number of 512B sectors to a number of pages.
157 : * The result is limited to a number of pages that can fit into a BIO.
158 : * Also make sure that the result is always at least 1 (page) for the cases
159 : * where nr_sects is lower than the number of sectors in a page.
160 : */
161 : static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
162 : {
163 0 : sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
164 :
165 0 : return min(pages, (sector_t)BIO_MAX_VECS);
166 : }
167 :
168 0 : static int __blkdev_issue_zero_pages(struct block_device *bdev,
169 : sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
170 : struct bio **biop)
171 : {
172 0 : struct bio *bio = *biop;
173 0 : int bi_size = 0;
174 : unsigned int sz;
175 :
176 0 : if (bdev_read_only(bdev))
177 : return -EPERM;
178 :
179 0 : while (nr_sects != 0) {
180 0 : bio = blk_next_bio(bio, bdev, __blkdev_sectors_to_bio_pages(nr_sects),
181 : REQ_OP_WRITE, gfp_mask);
182 0 : bio->bi_iter.bi_sector = sector;
183 :
184 0 : while (nr_sects != 0) {
185 0 : sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
186 0 : bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
187 0 : nr_sects -= bi_size >> 9;
188 0 : sector += bi_size >> 9;
189 0 : if (bi_size < sz)
190 : break;
191 : }
192 0 : cond_resched();
193 : }
194 :
195 0 : *biop = bio;
196 0 : return 0;
197 : }
198 :
199 : /**
200 : * __blkdev_issue_zeroout - generate number of zero filed write bios
201 : * @bdev: blockdev to issue
202 : * @sector: start sector
203 : * @nr_sects: number of sectors to write
204 : * @gfp_mask: memory allocation flags (for bio_alloc)
205 : * @biop: pointer to anchor bio
206 : * @flags: controls detailed behavior
207 : *
208 : * Description:
209 : * Zero-fill a block range, either using hardware offload or by explicitly
210 : * writing zeroes to the device.
211 : *
212 : * If a device is using logical block provisioning, the underlying space will
213 : * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
214 : *
215 : * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
216 : * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
217 : */
218 0 : int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
219 : sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
220 : unsigned flags)
221 : {
222 : int ret;
223 : sector_t bs_mask;
224 :
225 0 : bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
226 0 : if ((sector | nr_sects) & bs_mask)
227 : return -EINVAL;
228 :
229 0 : ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
230 : biop, flags);
231 0 : if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
232 : return ret;
233 :
234 0 : return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
235 : biop);
236 : }
237 : EXPORT_SYMBOL(__blkdev_issue_zeroout);
238 :
239 : /**
240 : * blkdev_issue_zeroout - zero-fill a block range
241 : * @bdev: blockdev to write
242 : * @sector: start sector
243 : * @nr_sects: number of sectors to write
244 : * @gfp_mask: memory allocation flags (for bio_alloc)
245 : * @flags: controls detailed behavior
246 : *
247 : * Description:
248 : * Zero-fill a block range, either using hardware offload or by explicitly
249 : * writing zeroes to the device. See __blkdev_issue_zeroout() for the
250 : * valid values for %flags.
251 : */
252 0 : int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
253 : sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
254 : {
255 0 : int ret = 0;
256 : sector_t bs_mask;
257 : struct bio *bio;
258 : struct blk_plug plug;
259 0 : bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
260 :
261 0 : bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
262 0 : if ((sector | nr_sects) & bs_mask)
263 : return -EINVAL;
264 :
265 : retry:
266 0 : bio = NULL;
267 0 : blk_start_plug(&plug);
268 0 : if (try_write_zeroes) {
269 0 : ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
270 : gfp_mask, &bio, flags);
271 0 : } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
272 0 : ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
273 : gfp_mask, &bio);
274 : } else {
275 : /* No zeroing offload support */
276 : ret = -EOPNOTSUPP;
277 : }
278 0 : if (ret == 0 && bio) {
279 0 : ret = submit_bio_wait(bio);
280 0 : bio_put(bio);
281 : }
282 0 : blk_finish_plug(&plug);
283 0 : if (ret && try_write_zeroes) {
284 0 : if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
285 : try_write_zeroes = false;
286 : goto retry;
287 : }
288 0 : if (!bdev_write_zeroes_sectors(bdev)) {
289 : /*
290 : * Zeroing offload support was indicated, but the
291 : * device reported ILLEGAL REQUEST (for some devices
292 : * there is no non-destructive way to verify whether
293 : * WRITE ZEROES is actually supported).
294 : */
295 0 : ret = -EOPNOTSUPP;
296 : }
297 : }
298 :
299 : return ret;
300 : }
301 : EXPORT_SYMBOL(blkdev_issue_zeroout);
302 :
303 0 : int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
304 : sector_t nr_sects, gfp_t gfp)
305 : {
306 0 : sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
307 0 : unsigned int max_sectors = bdev_max_secure_erase_sectors(bdev);
308 0 : struct bio *bio = NULL;
309 : struct blk_plug plug;
310 0 : int ret = 0;
311 :
312 : /* make sure that "len << SECTOR_SHIFT" doesn't overflow */
313 0 : if (max_sectors > UINT_MAX >> SECTOR_SHIFT)
314 0 : max_sectors = UINT_MAX >> SECTOR_SHIFT;
315 0 : max_sectors &= ~bs_mask;
316 :
317 0 : if (max_sectors == 0)
318 : return -EOPNOTSUPP;
319 0 : if ((sector | nr_sects) & bs_mask)
320 : return -EINVAL;
321 0 : if (bdev_read_only(bdev))
322 : return -EPERM;
323 :
324 0 : blk_start_plug(&plug);
325 0 : for (;;) {
326 0 : unsigned int len = min_t(sector_t, nr_sects, max_sectors);
327 :
328 0 : bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp);
329 0 : bio->bi_iter.bi_sector = sector;
330 0 : bio->bi_iter.bi_size = len << SECTOR_SHIFT;
331 :
332 0 : sector += len;
333 0 : nr_sects -= len;
334 0 : if (!nr_sects) {
335 0 : ret = submit_bio_wait(bio);
336 0 : bio_put(bio);
337 : break;
338 : }
339 0 : cond_resched();
340 : }
341 0 : blk_finish_plug(&plug);
342 :
343 0 : return ret;
344 : }
345 : EXPORT_SYMBOL(blkdev_issue_secure_erase);
|