Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : #include <crypto/hash.h>
3 : #include <linux/export.h>
4 : #include <linux/bvec.h>
5 : #include <linux/fault-inject-usercopy.h>
6 : #include <linux/uio.h>
7 : #include <linux/pagemap.h>
8 : #include <linux/highmem.h>
9 : #include <linux/slab.h>
10 : #include <linux/vmalloc.h>
11 : #include <linux/splice.h>
12 : #include <linux/compat.h>
13 : #include <net/checksum.h>
14 : #include <linux/scatterlist.h>
15 : #include <linux/instrumented.h>
16 :
17 : #define PIPE_PARANOIA /* for now */
18 :
19 : /* covers ubuf and kbuf alike */
20 : #define iterate_buf(i, n, base, len, off, __p, STEP) { \
21 : size_t __maybe_unused off = 0; \
22 : len = n; \
23 : base = __p + i->iov_offset; \
24 : len -= (STEP); \
25 : i->iov_offset += len; \
26 : n = len; \
27 : }
28 :
29 : /* covers iovec and kvec alike */
30 : #define iterate_iovec(i, n, base, len, off, __p, STEP) { \
31 : size_t off = 0; \
32 : size_t skip = i->iov_offset; \
33 : do { \
34 : len = min(n, __p->iov_len - skip); \
35 : if (likely(len)) { \
36 : base = __p->iov_base + skip; \
37 : len -= (STEP); \
38 : off += len; \
39 : skip += len; \
40 : n -= len; \
41 : if (skip < __p->iov_len) \
42 : break; \
43 : } \
44 : __p++; \
45 : skip = 0; \
46 : } while (n); \
47 : i->iov_offset = skip; \
48 : n = off; \
49 : }
50 :
51 : #define iterate_bvec(i, n, base, len, off, p, STEP) { \
52 : size_t off = 0; \
53 : unsigned skip = i->iov_offset; \
54 : while (n) { \
55 : unsigned offset = p->bv_offset + skip; \
56 : unsigned left; \
57 : void *kaddr = kmap_local_page(p->bv_page + \
58 : offset / PAGE_SIZE); \
59 : base = kaddr + offset % PAGE_SIZE; \
60 : len = min(min(n, (size_t)(p->bv_len - skip)), \
61 : (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
62 : left = (STEP); \
63 : kunmap_local(kaddr); \
64 : len -= left; \
65 : off += len; \
66 : skip += len; \
67 : if (skip == p->bv_len) { \
68 : skip = 0; \
69 : p++; \
70 : } \
71 : n -= len; \
72 : if (left) \
73 : break; \
74 : } \
75 : i->iov_offset = skip; \
76 : n = off; \
77 : }
78 :
79 : #define iterate_xarray(i, n, base, len, __off, STEP) { \
80 : __label__ __out; \
81 : size_t __off = 0; \
82 : struct folio *folio; \
83 : loff_t start = i->xarray_start + i->iov_offset; \
84 : pgoff_t index = start / PAGE_SIZE; \
85 : XA_STATE(xas, i->xarray, index); \
86 : \
87 : len = PAGE_SIZE - offset_in_page(start); \
88 : rcu_read_lock(); \
89 : xas_for_each(&xas, folio, ULONG_MAX) { \
90 : unsigned left; \
91 : size_t offset; \
92 : if (xas_retry(&xas, folio)) \
93 : continue; \
94 : if (WARN_ON(xa_is_value(folio))) \
95 : break; \
96 : if (WARN_ON(folio_test_hugetlb(folio))) \
97 : break; \
98 : offset = offset_in_folio(folio, start + __off); \
99 : while (offset < folio_size(folio)) { \
100 : base = kmap_local_folio(folio, offset); \
101 : len = min(n, len); \
102 : left = (STEP); \
103 : kunmap_local(base); \
104 : len -= left; \
105 : __off += len; \
106 : n -= len; \
107 : if (left || n == 0) \
108 : goto __out; \
109 : offset += len; \
110 : len = PAGE_SIZE; \
111 : } \
112 : } \
113 : __out: \
114 : rcu_read_unlock(); \
115 : i->iov_offset += __off; \
116 : n = __off; \
117 : }
118 :
119 : #define __iterate_and_advance(i, n, base, len, off, I, K) { \
120 : if (unlikely(i->count < n)) \
121 : n = i->count; \
122 : if (likely(n)) { \
123 : if (likely(iter_is_ubuf(i))) { \
124 : void __user *base; \
125 : size_t len; \
126 : iterate_buf(i, n, base, len, off, \
127 : i->ubuf, (I)) \
128 : } else if (likely(iter_is_iovec(i))) { \
129 : const struct iovec *iov = iter_iov(i); \
130 : void __user *base; \
131 : size_t len; \
132 : iterate_iovec(i, n, base, len, off, \
133 : iov, (I)) \
134 : i->nr_segs -= iov - iter_iov(i); \
135 : i->__iov = iov; \
136 : } else if (iov_iter_is_bvec(i)) { \
137 : const struct bio_vec *bvec = i->bvec; \
138 : void *base; \
139 : size_t len; \
140 : iterate_bvec(i, n, base, len, off, \
141 : bvec, (K)) \
142 : i->nr_segs -= bvec - i->bvec; \
143 : i->bvec = bvec; \
144 : } else if (iov_iter_is_kvec(i)) { \
145 : const struct kvec *kvec = i->kvec; \
146 : void *base; \
147 : size_t len; \
148 : iterate_iovec(i, n, base, len, off, \
149 : kvec, (K)) \
150 : i->nr_segs -= kvec - i->kvec; \
151 : i->kvec = kvec; \
152 : } else if (iov_iter_is_xarray(i)) { \
153 : void *base; \
154 : size_t len; \
155 : iterate_xarray(i, n, base, len, off, \
156 : (K)) \
157 : } \
158 : i->count -= n; \
159 : } \
160 : }
161 : #define iterate_and_advance(i, n, base, len, off, I, K) \
162 : __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
163 :
164 0 : static int copyout(void __user *to, const void *from, size_t n)
165 : {
166 : if (should_fail_usercopy())
167 : return n;
168 0 : if (access_ok(to, n)) {
169 0 : instrument_copy_to_user(to, from, n);
170 0 : n = raw_copy_to_user(to, from, n);
171 : }
172 0 : return n;
173 : }
174 :
175 : static int copyout_nofault(void __user *to, const void *from, size_t n)
176 : {
177 : long res;
178 :
179 : if (should_fail_usercopy())
180 : return n;
181 :
182 0 : res = copy_to_user_nofault(to, from, n);
183 :
184 0 : return res < 0 ? n : res;
185 : }
186 :
187 0 : static int copyin(void *to, const void __user *from, size_t n)
188 : {
189 0 : size_t res = n;
190 :
191 : if (should_fail_usercopy())
192 : return n;
193 0 : if (access_ok(from, n)) {
194 0 : instrument_copy_from_user_before(to, from, n);
195 0 : res = raw_copy_from_user(to, from, n);
196 : instrument_copy_from_user_after(to, from, n, res);
197 : }
198 0 : return res;
199 : }
200 :
201 : #ifdef PIPE_PARANOIA
202 0 : static bool sanity(const struct iov_iter *i)
203 : {
204 0 : struct pipe_inode_info *pipe = i->pipe;
205 0 : unsigned int p_head = pipe->head;
206 0 : unsigned int p_tail = pipe->tail;
207 0 : unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
208 0 : unsigned int i_head = i->head;
209 : unsigned int idx;
210 :
211 0 : if (i->last_offset) {
212 : struct pipe_buffer *p;
213 0 : if (unlikely(p_occupancy == 0))
214 : goto Bad; // pipe must be non-empty
215 0 : if (unlikely(i_head != p_head - 1))
216 : goto Bad; // must be at the last buffer...
217 :
218 0 : p = pipe_buf(pipe, i_head);
219 0 : if (unlikely(p->offset + p->len != abs(i->last_offset)))
220 : goto Bad; // ... at the end of segment
221 : } else {
222 0 : if (i_head != p_head)
223 : goto Bad; // must be right after the last buffer
224 : }
225 : return true;
226 : Bad:
227 0 : printk(KERN_ERR "idx = %d, offset = %d\n", i_head, i->last_offset);
228 0 : printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
229 : p_head, p_tail, pipe->ring_size);
230 0 : for (idx = 0; idx < pipe->ring_size; idx++)
231 0 : printk(KERN_ERR "[%p %p %d %d]\n",
232 : pipe->bufs[idx].ops,
233 : pipe->bufs[idx].page,
234 : pipe->bufs[idx].offset,
235 : pipe->bufs[idx].len);
236 0 : WARN_ON(1);
237 : return false;
238 : }
239 : #else
240 : #define sanity(i) true
241 : #endif
242 :
243 0 : static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size)
244 : {
245 0 : struct page *page = alloc_page(GFP_USER);
246 0 : if (page) {
247 0 : struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
248 0 : *buf = (struct pipe_buffer) {
249 : .ops = &default_pipe_buf_ops,
250 : .page = page,
251 : .offset = 0,
252 : .len = size
253 : };
254 : }
255 0 : return page;
256 : }
257 :
258 0 : static void push_page(struct pipe_inode_info *pipe, struct page *page,
259 : unsigned int offset, unsigned int size)
260 : {
261 0 : struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
262 0 : *buf = (struct pipe_buffer) {
263 : .ops = &page_cache_pipe_buf_ops,
264 : .page = page,
265 : .offset = offset,
266 : .len = size
267 : };
268 0 : get_page(page);
269 0 : }
270 :
271 : static inline int last_offset(const struct pipe_buffer *buf)
272 : {
273 0 : if (buf->ops == &default_pipe_buf_ops)
274 0 : return buf->len; // buf->offset is 0 for those
275 : else
276 0 : return -(buf->offset + buf->len);
277 : }
278 :
279 0 : static struct page *append_pipe(struct iov_iter *i, size_t size,
280 : unsigned int *off)
281 : {
282 0 : struct pipe_inode_info *pipe = i->pipe;
283 0 : int offset = i->last_offset;
284 : struct pipe_buffer *buf;
285 : struct page *page;
286 :
287 0 : if (offset > 0 && offset < PAGE_SIZE) {
288 : // some space in the last buffer; add to it
289 0 : buf = pipe_buf(pipe, pipe->head - 1);
290 0 : size = min_t(size_t, size, PAGE_SIZE - offset);
291 0 : buf->len += size;
292 0 : i->last_offset += size;
293 0 : i->count -= size;
294 0 : *off = offset;
295 0 : return buf->page;
296 : }
297 : // OK, we need a new buffer
298 0 : *off = 0;
299 0 : size = min_t(size_t, size, PAGE_SIZE);
300 0 : if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
301 : return NULL;
302 0 : page = push_anon(pipe, size);
303 0 : if (!page)
304 : return NULL;
305 0 : i->head = pipe->head - 1;
306 0 : i->last_offset = size;
307 0 : i->count -= size;
308 0 : return page;
309 : }
310 :
311 0 : static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
312 : struct iov_iter *i)
313 : {
314 0 : struct pipe_inode_info *pipe = i->pipe;
315 0 : unsigned int head = pipe->head;
316 :
317 0 : if (unlikely(bytes > i->count))
318 0 : bytes = i->count;
319 :
320 0 : if (unlikely(!bytes))
321 : return 0;
322 :
323 0 : if (!sanity(i))
324 : return 0;
325 :
326 0 : if (offset && i->last_offset == -offset) { // could we merge it?
327 0 : struct pipe_buffer *buf = pipe_buf(pipe, head - 1);
328 0 : if (buf->page == page) {
329 0 : buf->len += bytes;
330 0 : i->last_offset -= bytes;
331 0 : i->count -= bytes;
332 0 : return bytes;
333 : }
334 : }
335 0 : if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
336 : return 0;
337 :
338 0 : push_page(pipe, page, offset, bytes);
339 0 : i->last_offset = -(offset + bytes);
340 0 : i->head = head;
341 0 : i->count -= bytes;
342 0 : return bytes;
343 : }
344 :
345 : /*
346 : * fault_in_iov_iter_readable - fault in iov iterator for reading
347 : * @i: iterator
348 : * @size: maximum length
349 : *
350 : * Fault in one or more iovecs of the given iov_iter, to a maximum length of
351 : * @size. For each iovec, fault in each page that constitutes the iovec.
352 : *
353 : * Returns the number of bytes not faulted in (like copy_to_user() and
354 : * copy_from_user()).
355 : *
356 : * Always returns 0 for non-userspace iterators.
357 : */
358 0 : size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
359 : {
360 0 : if (iter_is_ubuf(i)) {
361 0 : size_t n = min(size, iov_iter_count(i));
362 0 : n -= fault_in_readable(i->ubuf + i->iov_offset, n);
363 0 : return size - n;
364 0 : } else if (iter_is_iovec(i)) {
365 0 : size_t count = min(size, iov_iter_count(i));
366 : const struct iovec *p;
367 : size_t skip;
368 :
369 0 : size -= count;
370 0 : for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
371 0 : size_t len = min(count, p->iov_len - skip);
372 : size_t ret;
373 :
374 0 : if (unlikely(!len))
375 0 : continue;
376 0 : ret = fault_in_readable(p->iov_base + skip, len);
377 0 : count -= len - ret;
378 0 : if (ret)
379 : break;
380 : }
381 0 : return count + size;
382 : }
383 : return 0;
384 : }
385 : EXPORT_SYMBOL(fault_in_iov_iter_readable);
386 :
387 : /*
388 : * fault_in_iov_iter_writeable - fault in iov iterator for writing
389 : * @i: iterator
390 : * @size: maximum length
391 : *
392 : * Faults in the iterator using get_user_pages(), i.e., without triggering
393 : * hardware page faults. This is primarily useful when we already know that
394 : * some or all of the pages in @i aren't in memory.
395 : *
396 : * Returns the number of bytes not faulted in, like copy_to_user() and
397 : * copy_from_user().
398 : *
399 : * Always returns 0 for non-user-space iterators.
400 : */
401 0 : size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
402 : {
403 0 : if (iter_is_ubuf(i)) {
404 0 : size_t n = min(size, iov_iter_count(i));
405 0 : n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
406 0 : return size - n;
407 0 : } else if (iter_is_iovec(i)) {
408 0 : size_t count = min(size, iov_iter_count(i));
409 : const struct iovec *p;
410 : size_t skip;
411 :
412 0 : size -= count;
413 0 : for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
414 0 : size_t len = min(count, p->iov_len - skip);
415 : size_t ret;
416 :
417 0 : if (unlikely(!len))
418 0 : continue;
419 0 : ret = fault_in_safe_writeable(p->iov_base + skip, len);
420 0 : count -= len - ret;
421 0 : if (ret)
422 : break;
423 : }
424 0 : return count + size;
425 : }
426 : return 0;
427 : }
428 : EXPORT_SYMBOL(fault_in_iov_iter_writeable);
429 :
430 0 : void iov_iter_init(struct iov_iter *i, unsigned int direction,
431 : const struct iovec *iov, unsigned long nr_segs,
432 : size_t count)
433 : {
434 0 : WARN_ON(direction & ~(READ | WRITE));
435 0 : *i = (struct iov_iter) {
436 : .iter_type = ITER_IOVEC,
437 : .copy_mc = false,
438 : .nofault = false,
439 : .user_backed = true,
440 : .data_source = direction,
441 : .__iov = iov,
442 : .nr_segs = nr_segs,
443 : .iov_offset = 0,
444 : .count = count
445 : };
446 0 : }
447 : EXPORT_SYMBOL(iov_iter_init);
448 :
449 : // returns the offset in partial buffer (if any)
450 : static inline unsigned int pipe_npages(const struct iov_iter *i, int *npages)
451 : {
452 0 : struct pipe_inode_info *pipe = i->pipe;
453 0 : int used = pipe->head - pipe->tail;
454 0 : int off = i->last_offset;
455 :
456 0 : *npages = max((int)pipe->max_usage - used, 0);
457 :
458 0 : if (off > 0 && off < PAGE_SIZE) { // anon and not full
459 0 : (*npages)++;
460 : return off;
461 : }
462 : return 0;
463 : }
464 :
465 0 : static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
466 : struct iov_iter *i)
467 : {
468 : unsigned int off, chunk;
469 :
470 0 : if (unlikely(bytes > i->count))
471 0 : bytes = i->count;
472 0 : if (unlikely(!bytes))
473 : return 0;
474 :
475 0 : if (!sanity(i))
476 : return 0;
477 :
478 0 : for (size_t n = bytes; n; n -= chunk) {
479 0 : struct page *page = append_pipe(i, n, &off);
480 0 : chunk = min_t(size_t, n, PAGE_SIZE - off);
481 0 : if (!page)
482 0 : return bytes - n;
483 0 : memcpy_to_page(page, off, addr, chunk);
484 0 : addr += chunk;
485 : }
486 : return bytes;
487 : }
488 :
489 0 : static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
490 : __wsum sum, size_t off)
491 : {
492 0 : __wsum next = csum_partial_copy_nocheck(from, to, len);
493 0 : return csum_block_add(sum, next, off);
494 : }
495 :
496 0 : static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
497 : struct iov_iter *i, __wsum *sump)
498 : {
499 0 : __wsum sum = *sump;
500 0 : size_t off = 0;
501 : unsigned int chunk, r;
502 :
503 0 : if (unlikely(bytes > i->count))
504 0 : bytes = i->count;
505 0 : if (unlikely(!bytes))
506 : return 0;
507 :
508 0 : if (!sanity(i))
509 : return 0;
510 :
511 0 : while (bytes) {
512 0 : struct page *page = append_pipe(i, bytes, &r);
513 : char *p;
514 :
515 0 : if (!page)
516 : break;
517 0 : chunk = min_t(size_t, bytes, PAGE_SIZE - r);
518 0 : p = kmap_local_page(page);
519 0 : sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
520 : kunmap_local(p);
521 0 : off += chunk;
522 0 : bytes -= chunk;
523 : }
524 0 : *sump = sum;
525 0 : return off;
526 : }
527 :
528 0 : size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
529 : {
530 0 : if (WARN_ON_ONCE(i->data_source))
531 : return 0;
532 0 : if (unlikely(iov_iter_is_pipe(i)))
533 0 : return copy_pipe_to_iter(addr, bytes, i);
534 0 : if (user_backed_iter(i))
535 : might_fault();
536 0 : iterate_and_advance(i, bytes, base, len, off,
537 : copyout(base, addr + off, len),
538 : memcpy(base, addr + off, len)
539 : )
540 :
541 : return bytes;
542 : }
543 : EXPORT_SYMBOL(_copy_to_iter);
544 :
545 : #ifdef CONFIG_ARCH_HAS_COPY_MC
546 : static int copyout_mc(void __user *to, const void *from, size_t n)
547 : {
548 : if (access_ok(to, n)) {
549 : instrument_copy_to_user(to, from, n);
550 : n = copy_mc_to_user((__force void *) to, from, n);
551 : }
552 : return n;
553 : }
554 :
555 : static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
556 : struct iov_iter *i)
557 : {
558 : size_t xfer = 0;
559 : unsigned int off, chunk;
560 :
561 : if (unlikely(bytes > i->count))
562 : bytes = i->count;
563 : if (unlikely(!bytes))
564 : return 0;
565 :
566 : if (!sanity(i))
567 : return 0;
568 :
569 : while (bytes) {
570 : struct page *page = append_pipe(i, bytes, &off);
571 : unsigned long rem;
572 : char *p;
573 :
574 : if (!page)
575 : break;
576 : chunk = min_t(size_t, bytes, PAGE_SIZE - off);
577 : p = kmap_local_page(page);
578 : rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
579 : chunk -= rem;
580 : kunmap_local(p);
581 : xfer += chunk;
582 : bytes -= chunk;
583 : if (rem) {
584 : iov_iter_revert(i, rem);
585 : break;
586 : }
587 : }
588 : return xfer;
589 : }
590 :
591 : /**
592 : * _copy_mc_to_iter - copy to iter with source memory error exception handling
593 : * @addr: source kernel address
594 : * @bytes: total transfer length
595 : * @i: destination iterator
596 : *
597 : * The pmem driver deploys this for the dax operation
598 : * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
599 : * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
600 : * successfully copied.
601 : *
602 : * The main differences between this and typical _copy_to_iter().
603 : *
604 : * * Typical tail/residue handling after a fault retries the copy
605 : * byte-by-byte until the fault happens again. Re-triggering machine
606 : * checks is potentially fatal so the implementation uses source
607 : * alignment and poison alignment assumptions to avoid re-triggering
608 : * hardware exceptions.
609 : *
610 : * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
611 : * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
612 : * a short copy.
613 : *
614 : * Return: number of bytes copied (may be %0)
615 : */
616 : size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
617 : {
618 : if (WARN_ON_ONCE(i->data_source))
619 : return 0;
620 : if (unlikely(iov_iter_is_pipe(i)))
621 : return copy_mc_pipe_to_iter(addr, bytes, i);
622 : if (user_backed_iter(i))
623 : might_fault();
624 : __iterate_and_advance(i, bytes, base, len, off,
625 : copyout_mc(base, addr + off, len),
626 : copy_mc_to_kernel(base, addr + off, len)
627 : )
628 :
629 : return bytes;
630 : }
631 : EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
632 : #endif /* CONFIG_ARCH_HAS_COPY_MC */
633 :
634 : static void *memcpy_from_iter(struct iov_iter *i, void *to, const void *from,
635 : size_t size)
636 : {
637 0 : if (iov_iter_is_copy_mc(i))
638 : return (void *)copy_mc_to_kernel(to, from, size);
639 0 : return memcpy(to, from, size);
640 : }
641 :
642 0 : size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
643 : {
644 0 : if (WARN_ON_ONCE(!i->data_source))
645 : return 0;
646 :
647 0 : if (user_backed_iter(i))
648 : might_fault();
649 0 : iterate_and_advance(i, bytes, base, len, off,
650 : copyin(addr + off, base, len),
651 : memcpy_from_iter(i, addr + off, base, len)
652 : )
653 :
654 : return bytes;
655 : }
656 : EXPORT_SYMBOL(_copy_from_iter);
657 :
658 0 : size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
659 : {
660 0 : if (WARN_ON_ONCE(!i->data_source))
661 : return 0;
662 :
663 0 : iterate_and_advance(i, bytes, base, len, off,
664 : __copy_from_user_inatomic_nocache(addr + off, base, len),
665 : memcpy(addr + off, base, len)
666 : )
667 :
668 : return bytes;
669 : }
670 : EXPORT_SYMBOL(_copy_from_iter_nocache);
671 :
672 : #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
673 : /**
674 : * _copy_from_iter_flushcache - write destination through cpu cache
675 : * @addr: destination kernel address
676 : * @bytes: total transfer length
677 : * @i: source iterator
678 : *
679 : * The pmem driver arranges for filesystem-dax to use this facility via
680 : * dax_copy_from_iter() for ensuring that writes to persistent memory
681 : * are flushed through the CPU cache. It is differentiated from
682 : * _copy_from_iter_nocache() in that guarantees all data is flushed for
683 : * all iterator types. The _copy_from_iter_nocache() only attempts to
684 : * bypass the cache for the ITER_IOVEC case, and on some archs may use
685 : * instructions that strand dirty-data in the cache.
686 : *
687 : * Return: number of bytes copied (may be %0)
688 : */
689 : size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
690 : {
691 : if (WARN_ON_ONCE(!i->data_source))
692 : return 0;
693 :
694 : iterate_and_advance(i, bytes, base, len, off,
695 : __copy_from_user_flushcache(addr + off, base, len),
696 : memcpy_flushcache(addr + off, base, len)
697 : )
698 :
699 : return bytes;
700 : }
701 : EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
702 : #endif
703 :
704 0 : static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
705 : {
706 : struct page *head;
707 0 : size_t v = n + offset;
708 :
709 : /*
710 : * The general case needs to access the page order in order
711 : * to compute the page size.
712 : * However, we mostly deal with order-0 pages and thus can
713 : * avoid a possible cache line miss for requests that fit all
714 : * page orders.
715 : */
716 0 : if (n <= v && v <= PAGE_SIZE)
717 : return true;
718 :
719 0 : head = compound_head(page);
720 0 : v += (page - head) << PAGE_SHIFT;
721 :
722 0 : if (WARN_ON(n > v || v > page_size(head)))
723 : return false;
724 0 : return true;
725 : }
726 :
727 0 : size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
728 : struct iov_iter *i)
729 : {
730 0 : size_t res = 0;
731 0 : if (!page_copy_sane(page, offset, bytes))
732 : return 0;
733 0 : if (WARN_ON_ONCE(i->data_source))
734 : return 0;
735 0 : if (unlikely(iov_iter_is_pipe(i)))
736 0 : return copy_page_to_iter_pipe(page, offset, bytes, i);
737 0 : page += offset / PAGE_SIZE; // first subpage
738 0 : offset %= PAGE_SIZE;
739 : while (1) {
740 0 : void *kaddr = kmap_local_page(page);
741 0 : size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
742 0 : n = _copy_to_iter(kaddr + offset, n, i);
743 : kunmap_local(kaddr);
744 0 : res += n;
745 0 : bytes -= n;
746 0 : if (!bytes || !n)
747 : break;
748 0 : offset += n;
749 0 : if (offset == PAGE_SIZE) {
750 0 : page++;
751 0 : offset = 0;
752 : }
753 : }
754 : return res;
755 : }
756 : EXPORT_SYMBOL(copy_page_to_iter);
757 :
758 0 : size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t bytes,
759 : struct iov_iter *i)
760 : {
761 0 : size_t res = 0;
762 :
763 0 : if (!page_copy_sane(page, offset, bytes))
764 : return 0;
765 0 : if (WARN_ON_ONCE(i->data_source))
766 : return 0;
767 0 : if (unlikely(iov_iter_is_pipe(i)))
768 0 : return copy_page_to_iter_pipe(page, offset, bytes, i);
769 0 : page += offset / PAGE_SIZE; // first subpage
770 0 : offset %= PAGE_SIZE;
771 : while (1) {
772 0 : void *kaddr = kmap_local_page(page);
773 0 : size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
774 :
775 0 : iterate_and_advance(i, n, base, len, off,
776 : copyout_nofault(base, kaddr + offset + off, len),
777 : memcpy(base, kaddr + offset + off, len)
778 : )
779 : kunmap_local(kaddr);
780 0 : res += n;
781 0 : bytes -= n;
782 0 : if (!bytes || !n)
783 : break;
784 0 : offset += n;
785 0 : if (offset == PAGE_SIZE) {
786 0 : page++;
787 0 : offset = 0;
788 : }
789 : }
790 : return res;
791 : }
792 : EXPORT_SYMBOL(copy_page_to_iter_nofault);
793 :
794 0 : size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
795 : struct iov_iter *i)
796 : {
797 0 : size_t res = 0;
798 0 : if (!page_copy_sane(page, offset, bytes))
799 : return 0;
800 0 : page += offset / PAGE_SIZE; // first subpage
801 0 : offset %= PAGE_SIZE;
802 : while (1) {
803 0 : void *kaddr = kmap_local_page(page);
804 0 : size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
805 0 : n = _copy_from_iter(kaddr + offset, n, i);
806 : kunmap_local(kaddr);
807 0 : res += n;
808 0 : bytes -= n;
809 0 : if (!bytes || !n)
810 : break;
811 0 : offset += n;
812 0 : if (offset == PAGE_SIZE) {
813 0 : page++;
814 0 : offset = 0;
815 : }
816 : }
817 : return res;
818 : }
819 : EXPORT_SYMBOL(copy_page_from_iter);
820 :
821 0 : static size_t pipe_zero(size_t bytes, struct iov_iter *i)
822 : {
823 : unsigned int chunk, off;
824 :
825 0 : if (unlikely(bytes > i->count))
826 0 : bytes = i->count;
827 0 : if (unlikely(!bytes))
828 : return 0;
829 :
830 0 : if (!sanity(i))
831 : return 0;
832 :
833 0 : for (size_t n = bytes; n; n -= chunk) {
834 0 : struct page *page = append_pipe(i, n, &off);
835 : char *p;
836 :
837 0 : if (!page)
838 0 : return bytes - n;
839 0 : chunk = min_t(size_t, n, PAGE_SIZE - off);
840 0 : p = kmap_local_page(page);
841 0 : memset(p + off, 0, chunk);
842 : kunmap_local(p);
843 : }
844 : return bytes;
845 : }
846 :
847 0 : size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
848 : {
849 0 : if (unlikely(iov_iter_is_pipe(i)))
850 0 : return pipe_zero(bytes, i);
851 0 : iterate_and_advance(i, bytes, base, len, count,
852 : clear_user(base, len),
853 : memset(base, 0, len)
854 : )
855 :
856 : return bytes;
857 : }
858 : EXPORT_SYMBOL(iov_iter_zero);
859 :
860 0 : size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
861 : struct iov_iter *i)
862 : {
863 0 : char *kaddr = kmap_atomic(page), *p = kaddr + offset;
864 0 : if (!page_copy_sane(page, offset, bytes)) {
865 0 : kunmap_atomic(kaddr);
866 0 : return 0;
867 : }
868 0 : if (WARN_ON_ONCE(!i->data_source)) {
869 0 : kunmap_atomic(kaddr);
870 0 : return 0;
871 : }
872 0 : iterate_and_advance(i, bytes, base, len, off,
873 : copyin(p + off, base, len),
874 : memcpy_from_iter(i, p + off, base, len)
875 : )
876 0 : kunmap_atomic(kaddr);
877 0 : return bytes;
878 : }
879 : EXPORT_SYMBOL(copy_page_from_iter_atomic);
880 :
881 0 : static void pipe_advance(struct iov_iter *i, size_t size)
882 : {
883 0 : struct pipe_inode_info *pipe = i->pipe;
884 0 : int off = i->last_offset;
885 :
886 0 : if (!off && !size) {
887 0 : pipe_discard_from(pipe, i->start_head); // discard everything
888 : return;
889 : }
890 0 : i->count -= size;
891 0 : while (1) {
892 0 : struct pipe_buffer *buf = pipe_buf(pipe, i->head);
893 0 : if (off) /* make it relative to the beginning of buffer */
894 0 : size += abs(off) - buf->offset;
895 0 : if (size <= buf->len) {
896 0 : buf->len = size;
897 0 : i->last_offset = last_offset(buf);
898 : break;
899 : }
900 0 : size -= buf->len;
901 0 : i->head++;
902 0 : off = 0;
903 : }
904 0 : pipe_discard_from(pipe, i->head + 1); // discard everything past this one
905 : }
906 :
907 0 : static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
908 : {
909 : const struct bio_vec *bvec, *end;
910 :
911 0 : if (!i->count)
912 : return;
913 0 : i->count -= size;
914 :
915 0 : size += i->iov_offset;
916 :
917 0 : for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
918 0 : if (likely(size < bvec->bv_len))
919 : break;
920 0 : size -= bvec->bv_len;
921 : }
922 0 : i->iov_offset = size;
923 0 : i->nr_segs -= bvec - i->bvec;
924 0 : i->bvec = bvec;
925 : }
926 :
927 0 : static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
928 : {
929 : const struct iovec *iov, *end;
930 :
931 0 : if (!i->count)
932 : return;
933 0 : i->count -= size;
934 :
935 0 : size += i->iov_offset; // from beginning of current segment
936 0 : for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) {
937 0 : if (likely(size < iov->iov_len))
938 : break;
939 0 : size -= iov->iov_len;
940 : }
941 0 : i->iov_offset = size;
942 0 : i->nr_segs -= iov - iter_iov(i);
943 0 : i->__iov = iov;
944 : }
945 :
946 0 : void iov_iter_advance(struct iov_iter *i, size_t size)
947 : {
948 0 : if (unlikely(i->count < size))
949 0 : size = i->count;
950 0 : if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
951 0 : i->iov_offset += size;
952 0 : i->count -= size;
953 0 : } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
954 : /* iovec and kvec have identical layouts */
955 0 : iov_iter_iovec_advance(i, size);
956 0 : } else if (iov_iter_is_bvec(i)) {
957 0 : iov_iter_bvec_advance(i, size);
958 0 : } else if (iov_iter_is_pipe(i)) {
959 0 : pipe_advance(i, size);
960 0 : } else if (iov_iter_is_discard(i)) {
961 0 : i->count -= size;
962 : }
963 0 : }
964 : EXPORT_SYMBOL(iov_iter_advance);
965 :
966 0 : void iov_iter_revert(struct iov_iter *i, size_t unroll)
967 : {
968 0 : if (!unroll)
969 : return;
970 0 : if (WARN_ON(unroll > MAX_RW_COUNT))
971 : return;
972 0 : i->count += unroll;
973 0 : if (unlikely(iov_iter_is_pipe(i))) {
974 0 : struct pipe_inode_info *pipe = i->pipe;
975 0 : unsigned int head = pipe->head;
976 :
977 0 : while (head > i->start_head) {
978 0 : struct pipe_buffer *b = pipe_buf(pipe, --head);
979 0 : if (unroll < b->len) {
980 0 : b->len -= unroll;
981 0 : i->last_offset = last_offset(b);
982 0 : i->head = head;
983 0 : return;
984 : }
985 0 : unroll -= b->len;
986 0 : pipe_buf_release(pipe, b);
987 0 : pipe->head--;
988 : }
989 0 : i->last_offset = 0;
990 0 : i->head = head;
991 0 : return;
992 : }
993 0 : if (unlikely(iov_iter_is_discard(i)))
994 : return;
995 0 : if (unroll <= i->iov_offset) {
996 0 : i->iov_offset -= unroll;
997 0 : return;
998 : }
999 0 : unroll -= i->iov_offset;
1000 0 : if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
1001 0 : BUG(); /* We should never go beyond the start of the specified
1002 : * range since we might then be straying into pages that
1003 : * aren't pinned.
1004 : */
1005 0 : } else if (iov_iter_is_bvec(i)) {
1006 0 : const struct bio_vec *bvec = i->bvec;
1007 0 : while (1) {
1008 0 : size_t n = (--bvec)->bv_len;
1009 0 : i->nr_segs++;
1010 0 : if (unroll <= n) {
1011 0 : i->bvec = bvec;
1012 0 : i->iov_offset = n - unroll;
1013 0 : return;
1014 : }
1015 0 : unroll -= n;
1016 : }
1017 : } else { /* same logics for iovec and kvec */
1018 0 : const struct iovec *iov = iter_iov(i);
1019 0 : while (1) {
1020 0 : size_t n = (--iov)->iov_len;
1021 0 : i->nr_segs++;
1022 0 : if (unroll <= n) {
1023 0 : i->__iov = iov;
1024 0 : i->iov_offset = n - unroll;
1025 0 : return;
1026 : }
1027 0 : unroll -= n;
1028 : }
1029 : }
1030 : }
1031 : EXPORT_SYMBOL(iov_iter_revert);
1032 :
1033 : /*
1034 : * Return the count of just the current iov_iter segment.
1035 : */
1036 0 : size_t iov_iter_single_seg_count(const struct iov_iter *i)
1037 : {
1038 0 : if (i->nr_segs > 1) {
1039 0 : if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1040 0 : return min(i->count, iter_iov(i)->iov_len - i->iov_offset);
1041 0 : if (iov_iter_is_bvec(i))
1042 0 : return min(i->count, i->bvec->bv_len - i->iov_offset);
1043 : }
1044 0 : return i->count;
1045 : }
1046 : EXPORT_SYMBOL(iov_iter_single_seg_count);
1047 :
1048 0 : void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1049 : const struct kvec *kvec, unsigned long nr_segs,
1050 : size_t count)
1051 : {
1052 0 : WARN_ON(direction & ~(READ | WRITE));
1053 0 : *i = (struct iov_iter){
1054 : .iter_type = ITER_KVEC,
1055 : .copy_mc = false,
1056 : .data_source = direction,
1057 : .kvec = kvec,
1058 : .nr_segs = nr_segs,
1059 : .iov_offset = 0,
1060 : .count = count
1061 : };
1062 0 : }
1063 : EXPORT_SYMBOL(iov_iter_kvec);
1064 :
1065 0 : void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1066 : const struct bio_vec *bvec, unsigned long nr_segs,
1067 : size_t count)
1068 : {
1069 0 : WARN_ON(direction & ~(READ | WRITE));
1070 0 : *i = (struct iov_iter){
1071 : .iter_type = ITER_BVEC,
1072 : .copy_mc = false,
1073 : .data_source = direction,
1074 : .bvec = bvec,
1075 : .nr_segs = nr_segs,
1076 : .iov_offset = 0,
1077 : .count = count
1078 : };
1079 0 : }
1080 : EXPORT_SYMBOL(iov_iter_bvec);
1081 :
1082 0 : void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1083 : struct pipe_inode_info *pipe,
1084 : size_t count)
1085 : {
1086 0 : BUG_ON(direction != READ);
1087 0 : WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1088 0 : *i = (struct iov_iter){
1089 : .iter_type = ITER_PIPE,
1090 : .data_source = false,
1091 : .pipe = pipe,
1092 0 : .head = pipe->head,
1093 : .start_head = pipe->head,
1094 : .last_offset = 0,
1095 : .count = count
1096 : };
1097 0 : }
1098 : EXPORT_SYMBOL(iov_iter_pipe);
1099 :
1100 : /**
1101 : * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1102 : * @i: The iterator to initialise.
1103 : * @direction: The direction of the transfer.
1104 : * @xarray: The xarray to access.
1105 : * @start: The start file position.
1106 : * @count: The size of the I/O buffer in bytes.
1107 : *
1108 : * Set up an I/O iterator to either draw data out of the pages attached to an
1109 : * inode or to inject data into those pages. The pages *must* be prevented
1110 : * from evaporation, either by taking a ref on them or locking them by the
1111 : * caller.
1112 : */
1113 0 : void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1114 : struct xarray *xarray, loff_t start, size_t count)
1115 : {
1116 0 : BUG_ON(direction & ~1);
1117 0 : *i = (struct iov_iter) {
1118 : .iter_type = ITER_XARRAY,
1119 : .copy_mc = false,
1120 : .data_source = direction,
1121 : .xarray = xarray,
1122 : .xarray_start = start,
1123 : .count = count,
1124 : .iov_offset = 0
1125 : };
1126 0 : }
1127 : EXPORT_SYMBOL(iov_iter_xarray);
1128 :
1129 : /**
1130 : * iov_iter_discard - Initialise an I/O iterator that discards data
1131 : * @i: The iterator to initialise.
1132 : * @direction: The direction of the transfer.
1133 : * @count: The size of the I/O buffer in bytes.
1134 : *
1135 : * Set up an I/O iterator that just discards everything that's written to it.
1136 : * It's only available as a READ iterator.
1137 : */
1138 0 : void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1139 : {
1140 0 : BUG_ON(direction != READ);
1141 0 : *i = (struct iov_iter){
1142 : .iter_type = ITER_DISCARD,
1143 : .copy_mc = false,
1144 : .data_source = false,
1145 : .count = count,
1146 : .iov_offset = 0
1147 : };
1148 0 : }
1149 : EXPORT_SYMBOL(iov_iter_discard);
1150 :
1151 0 : static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
1152 : unsigned len_mask)
1153 : {
1154 0 : size_t size = i->count;
1155 0 : size_t skip = i->iov_offset;
1156 : unsigned k;
1157 :
1158 0 : for (k = 0; k < i->nr_segs; k++, skip = 0) {
1159 0 : const struct iovec *iov = iter_iov(i) + k;
1160 0 : size_t len = iov->iov_len - skip;
1161 :
1162 0 : if (len > size)
1163 0 : len = size;
1164 0 : if (len & len_mask)
1165 : return false;
1166 0 : if ((unsigned long)(iov->iov_base + skip) & addr_mask)
1167 : return false;
1168 :
1169 0 : size -= len;
1170 0 : if (!size)
1171 : break;
1172 : }
1173 : return true;
1174 : }
1175 :
1176 0 : static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
1177 : unsigned len_mask)
1178 : {
1179 0 : size_t size = i->count;
1180 0 : unsigned skip = i->iov_offset;
1181 : unsigned k;
1182 :
1183 0 : for (k = 0; k < i->nr_segs; k++, skip = 0) {
1184 0 : size_t len = i->bvec[k].bv_len - skip;
1185 :
1186 0 : if (len > size)
1187 0 : len = size;
1188 0 : if (len & len_mask)
1189 : return false;
1190 0 : if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
1191 : return false;
1192 :
1193 0 : size -= len;
1194 0 : if (!size)
1195 : break;
1196 : }
1197 : return true;
1198 : }
1199 :
1200 : /**
1201 : * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
1202 : * are aligned to the parameters.
1203 : *
1204 : * @i: &struct iov_iter to restore
1205 : * @addr_mask: bit mask to check against the iov element's addresses
1206 : * @len_mask: bit mask to check against the iov element's lengths
1207 : *
1208 : * Return: false if any addresses or lengths intersect with the provided masks
1209 : */
1210 0 : bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
1211 : unsigned len_mask)
1212 : {
1213 0 : if (likely(iter_is_ubuf(i))) {
1214 0 : if (i->count & len_mask)
1215 : return false;
1216 0 : if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask)
1217 : return false;
1218 0 : return true;
1219 : }
1220 :
1221 0 : if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1222 0 : return iov_iter_aligned_iovec(i, addr_mask, len_mask);
1223 :
1224 0 : if (iov_iter_is_bvec(i))
1225 0 : return iov_iter_aligned_bvec(i, addr_mask, len_mask);
1226 :
1227 0 : if (iov_iter_is_pipe(i)) {
1228 0 : size_t size = i->count;
1229 :
1230 0 : if (size & len_mask)
1231 : return false;
1232 0 : if (size && i->last_offset > 0) {
1233 0 : if (i->last_offset & addr_mask)
1234 : return false;
1235 : }
1236 :
1237 0 : return true;
1238 : }
1239 :
1240 0 : if (iov_iter_is_xarray(i)) {
1241 0 : if (i->count & len_mask)
1242 : return false;
1243 0 : if ((i->xarray_start + i->iov_offset) & addr_mask)
1244 : return false;
1245 : }
1246 :
1247 0 : return true;
1248 : }
1249 : EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
1250 :
1251 0 : static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
1252 : {
1253 0 : unsigned long res = 0;
1254 0 : size_t size = i->count;
1255 0 : size_t skip = i->iov_offset;
1256 : unsigned k;
1257 :
1258 0 : for (k = 0; k < i->nr_segs; k++, skip = 0) {
1259 0 : const struct iovec *iov = iter_iov(i) + k;
1260 0 : size_t len = iov->iov_len - skip;
1261 0 : if (len) {
1262 0 : res |= (unsigned long)iov->iov_base + skip;
1263 0 : if (len > size)
1264 0 : len = size;
1265 0 : res |= len;
1266 0 : size -= len;
1267 0 : if (!size)
1268 : break;
1269 : }
1270 : }
1271 0 : return res;
1272 : }
1273 :
1274 0 : static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
1275 : {
1276 0 : unsigned res = 0;
1277 0 : size_t size = i->count;
1278 0 : unsigned skip = i->iov_offset;
1279 : unsigned k;
1280 :
1281 0 : for (k = 0; k < i->nr_segs; k++, skip = 0) {
1282 0 : size_t len = i->bvec[k].bv_len - skip;
1283 0 : res |= (unsigned long)i->bvec[k].bv_offset + skip;
1284 0 : if (len > size)
1285 0 : len = size;
1286 0 : res |= len;
1287 0 : size -= len;
1288 0 : if (!size)
1289 : break;
1290 : }
1291 0 : return res;
1292 : }
1293 :
1294 0 : unsigned long iov_iter_alignment(const struct iov_iter *i)
1295 : {
1296 0 : if (likely(iter_is_ubuf(i))) {
1297 0 : size_t size = i->count;
1298 0 : if (size)
1299 0 : return ((unsigned long)i->ubuf + i->iov_offset) | size;
1300 : return 0;
1301 : }
1302 :
1303 : /* iovec and kvec have identical layouts */
1304 0 : if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1305 0 : return iov_iter_alignment_iovec(i);
1306 :
1307 0 : if (iov_iter_is_bvec(i))
1308 0 : return iov_iter_alignment_bvec(i);
1309 :
1310 0 : if (iov_iter_is_pipe(i)) {
1311 0 : size_t size = i->count;
1312 :
1313 0 : if (size && i->last_offset > 0)
1314 0 : return size | i->last_offset;
1315 : return size;
1316 : }
1317 :
1318 0 : if (iov_iter_is_xarray(i))
1319 0 : return (i->xarray_start + i->iov_offset) | i->count;
1320 :
1321 : return 0;
1322 : }
1323 : EXPORT_SYMBOL(iov_iter_alignment);
1324 :
1325 0 : unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1326 : {
1327 0 : unsigned long res = 0;
1328 0 : unsigned long v = 0;
1329 0 : size_t size = i->count;
1330 : unsigned k;
1331 :
1332 0 : if (iter_is_ubuf(i))
1333 : return 0;
1334 :
1335 0 : if (WARN_ON(!iter_is_iovec(i)))
1336 : return ~0U;
1337 :
1338 0 : for (k = 0; k < i->nr_segs; k++) {
1339 0 : const struct iovec *iov = iter_iov(i) + k;
1340 0 : if (iov->iov_len) {
1341 0 : unsigned long base = (unsigned long)iov->iov_base;
1342 0 : if (v) // if not the first one
1343 0 : res |= base | v; // this start | previous end
1344 0 : v = base + iov->iov_len;
1345 0 : if (size <= iov->iov_len)
1346 : break;
1347 0 : size -= iov->iov_len;
1348 : }
1349 : }
1350 : return res;
1351 : }
1352 : EXPORT_SYMBOL(iov_iter_gap_alignment);
1353 :
1354 0 : static int want_pages_array(struct page ***res, size_t size,
1355 : size_t start, unsigned int maxpages)
1356 : {
1357 0 : unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE);
1358 :
1359 0 : if (count > maxpages)
1360 0 : count = maxpages;
1361 0 : WARN_ON(!count); // caller should've prevented that
1362 0 : if (!*res) {
1363 0 : *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
1364 0 : if (!*res)
1365 : return 0;
1366 : }
1367 0 : return count;
1368 : }
1369 :
1370 0 : static ssize_t pipe_get_pages(struct iov_iter *i,
1371 : struct page ***pages, size_t maxsize, unsigned maxpages,
1372 : size_t *start)
1373 : {
1374 : unsigned int npages, count, off, chunk;
1375 : struct page **p;
1376 : size_t left;
1377 :
1378 0 : if (!sanity(i))
1379 : return -EFAULT;
1380 :
1381 0 : *start = off = pipe_npages(i, &npages);
1382 0 : if (!npages)
1383 : return -EFAULT;
1384 0 : count = want_pages_array(pages, maxsize, off, min(npages, maxpages));
1385 0 : if (!count)
1386 : return -ENOMEM;
1387 0 : p = *pages;
1388 0 : for (npages = 0, left = maxsize ; npages < count; npages++, left -= chunk) {
1389 0 : struct page *page = append_pipe(i, left, &off);
1390 0 : if (!page)
1391 : break;
1392 0 : chunk = min_t(size_t, left, PAGE_SIZE - off);
1393 0 : get_page(*p++ = page);
1394 : }
1395 0 : if (!npages)
1396 : return -EFAULT;
1397 0 : return maxsize - left;
1398 : }
1399 :
1400 0 : static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1401 : pgoff_t index, unsigned int nr_pages)
1402 : {
1403 0 : XA_STATE(xas, xa, index);
1404 : struct page *page;
1405 0 : unsigned int ret = 0;
1406 :
1407 : rcu_read_lock();
1408 0 : for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1409 0 : if (xas_retry(&xas, page))
1410 0 : continue;
1411 :
1412 : /* Has the page moved or been split? */
1413 0 : if (unlikely(page != xas_reload(&xas))) {
1414 0 : xas_reset(&xas);
1415 0 : continue;
1416 : }
1417 :
1418 0 : pages[ret] = find_subpage(page, xas.xa_index);
1419 0 : get_page(pages[ret]);
1420 0 : if (++ret == nr_pages)
1421 : break;
1422 : }
1423 : rcu_read_unlock();
1424 0 : return ret;
1425 : }
1426 :
1427 0 : static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1428 : struct page ***pages, size_t maxsize,
1429 : unsigned maxpages, size_t *_start_offset)
1430 : {
1431 : unsigned nr, offset, count;
1432 : pgoff_t index;
1433 : loff_t pos;
1434 :
1435 0 : pos = i->xarray_start + i->iov_offset;
1436 0 : index = pos >> PAGE_SHIFT;
1437 0 : offset = pos & ~PAGE_MASK;
1438 0 : *_start_offset = offset;
1439 :
1440 0 : count = want_pages_array(pages, maxsize, offset, maxpages);
1441 0 : if (!count)
1442 : return -ENOMEM;
1443 0 : nr = iter_xarray_populate_pages(*pages, i->xarray, index, count);
1444 0 : if (nr == 0)
1445 : return 0;
1446 :
1447 0 : maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1448 0 : i->iov_offset += maxsize;
1449 0 : i->count -= maxsize;
1450 0 : return maxsize;
1451 : }
1452 :
1453 : /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
1454 0 : static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
1455 : {
1456 : size_t skip;
1457 : long k;
1458 :
1459 0 : if (iter_is_ubuf(i))
1460 0 : return (unsigned long)i->ubuf + i->iov_offset;
1461 :
1462 0 : for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1463 0 : const struct iovec *iov = iter_iov(i) + k;
1464 0 : size_t len = iov->iov_len - skip;
1465 :
1466 0 : if (unlikely(!len))
1467 0 : continue;
1468 0 : if (*size > len)
1469 0 : *size = len;
1470 0 : return (unsigned long)iov->iov_base + skip;
1471 : }
1472 0 : BUG(); // if it had been empty, we wouldn't get called
1473 : }
1474 :
1475 : /* must be done on non-empty ITER_BVEC one */
1476 : static struct page *first_bvec_segment(const struct iov_iter *i,
1477 : size_t *size, size_t *start)
1478 : {
1479 : struct page *page;
1480 0 : size_t skip = i->iov_offset, len;
1481 :
1482 0 : len = i->bvec->bv_len - skip;
1483 0 : if (*size > len)
1484 0 : *size = len;
1485 0 : skip += i->bvec->bv_offset;
1486 0 : page = i->bvec->bv_page + skip / PAGE_SIZE;
1487 0 : *start = skip % PAGE_SIZE;
1488 : return page;
1489 : }
1490 :
1491 0 : static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
1492 : struct page ***pages, size_t maxsize,
1493 : unsigned int maxpages, size_t *start,
1494 : iov_iter_extraction_t extraction_flags)
1495 : {
1496 0 : unsigned int n, gup_flags = 0;
1497 :
1498 0 : if (maxsize > i->count)
1499 0 : maxsize = i->count;
1500 0 : if (!maxsize)
1501 : return 0;
1502 0 : if (maxsize > MAX_RW_COUNT)
1503 0 : maxsize = MAX_RW_COUNT;
1504 0 : if (extraction_flags & ITER_ALLOW_P2PDMA)
1505 0 : gup_flags |= FOLL_PCI_P2PDMA;
1506 :
1507 0 : if (likely(user_backed_iter(i))) {
1508 : unsigned long addr;
1509 : int res;
1510 :
1511 0 : if (iov_iter_rw(i) != WRITE)
1512 0 : gup_flags |= FOLL_WRITE;
1513 0 : if (i->nofault)
1514 0 : gup_flags |= FOLL_NOFAULT;
1515 :
1516 0 : addr = first_iovec_segment(i, &maxsize);
1517 0 : *start = addr % PAGE_SIZE;
1518 0 : addr &= PAGE_MASK;
1519 0 : n = want_pages_array(pages, maxsize, *start, maxpages);
1520 0 : if (!n)
1521 : return -ENOMEM;
1522 0 : res = get_user_pages_fast(addr, n, gup_flags, *pages);
1523 0 : if (unlikely(res <= 0))
1524 0 : return res;
1525 0 : maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1526 0 : iov_iter_advance(i, maxsize);
1527 0 : return maxsize;
1528 : }
1529 0 : if (iov_iter_is_bvec(i)) {
1530 : struct page **p;
1531 : struct page *page;
1532 :
1533 0 : page = first_bvec_segment(i, &maxsize, start);
1534 0 : n = want_pages_array(pages, maxsize, *start, maxpages);
1535 0 : if (!n)
1536 : return -ENOMEM;
1537 0 : p = *pages;
1538 0 : for (int k = 0; k < n; k++)
1539 0 : get_page(p[k] = page + k);
1540 0 : maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
1541 0 : i->count -= maxsize;
1542 0 : i->iov_offset += maxsize;
1543 0 : if (i->iov_offset == i->bvec->bv_len) {
1544 0 : i->iov_offset = 0;
1545 0 : i->bvec++;
1546 0 : i->nr_segs--;
1547 : }
1548 0 : return maxsize;
1549 : }
1550 0 : if (iov_iter_is_pipe(i))
1551 0 : return pipe_get_pages(i, pages, maxsize, maxpages, start);
1552 0 : if (iov_iter_is_xarray(i))
1553 0 : return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1554 : return -EFAULT;
1555 : }
1556 :
1557 0 : ssize_t iov_iter_get_pages(struct iov_iter *i,
1558 : struct page **pages, size_t maxsize, unsigned maxpages,
1559 : size_t *start, iov_iter_extraction_t extraction_flags)
1560 : {
1561 0 : if (!maxpages)
1562 : return 0;
1563 0 : BUG_ON(!pages);
1564 :
1565 0 : return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages,
1566 : start, extraction_flags);
1567 : }
1568 : EXPORT_SYMBOL_GPL(iov_iter_get_pages);
1569 :
1570 0 : ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
1571 : size_t maxsize, unsigned maxpages, size_t *start)
1572 : {
1573 0 : return iov_iter_get_pages(i, pages, maxsize, maxpages, start, 0);
1574 : }
1575 : EXPORT_SYMBOL(iov_iter_get_pages2);
1576 :
1577 0 : ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1578 : struct page ***pages, size_t maxsize,
1579 : size_t *start, iov_iter_extraction_t extraction_flags)
1580 : {
1581 : ssize_t len;
1582 :
1583 0 : *pages = NULL;
1584 :
1585 0 : len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start,
1586 : extraction_flags);
1587 0 : if (len <= 0) {
1588 0 : kvfree(*pages);
1589 0 : *pages = NULL;
1590 : }
1591 0 : return len;
1592 : }
1593 : EXPORT_SYMBOL_GPL(iov_iter_get_pages_alloc);
1594 :
1595 0 : ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
1596 : struct page ***pages, size_t maxsize, size_t *start)
1597 : {
1598 0 : return iov_iter_get_pages_alloc(i, pages, maxsize, start, 0);
1599 : }
1600 : EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
1601 :
1602 0 : size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1603 : struct iov_iter *i)
1604 : {
1605 : __wsum sum, next;
1606 0 : sum = *csum;
1607 0 : if (WARN_ON_ONCE(!i->data_source))
1608 : return 0;
1609 :
1610 0 : iterate_and_advance(i, bytes, base, len, off, ({
1611 : next = csum_and_copy_from_user(base, addr + off, len);
1612 : sum = csum_block_add(sum, next, off);
1613 : next ? 0 : len;
1614 : }), ({
1615 : sum = csum_and_memcpy(addr + off, base, len, sum, off);
1616 : })
1617 : )
1618 0 : *csum = sum;
1619 0 : return bytes;
1620 : }
1621 : EXPORT_SYMBOL(csum_and_copy_from_iter);
1622 :
1623 0 : size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1624 : struct iov_iter *i)
1625 : {
1626 0 : struct csum_state *csstate = _csstate;
1627 : __wsum sum, next;
1628 :
1629 0 : if (WARN_ON_ONCE(i->data_source))
1630 : return 0;
1631 0 : if (unlikely(iov_iter_is_discard(i))) {
1632 : // can't use csum_memcpy() for that one - data is not copied
1633 0 : csstate->csum = csum_block_add(csstate->csum,
1634 : csum_partial(addr, bytes, 0),
1635 0 : csstate->off);
1636 0 : csstate->off += bytes;
1637 0 : return bytes;
1638 : }
1639 :
1640 0 : sum = csum_shift(csstate->csum, csstate->off);
1641 0 : if (unlikely(iov_iter_is_pipe(i)))
1642 0 : bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
1643 0 : else iterate_and_advance(i, bytes, base, len, off, ({
1644 : next = csum_and_copy_to_user(addr + off, base, len);
1645 : sum = csum_block_add(sum, next, off);
1646 : next ? 0 : len;
1647 : }), ({
1648 : sum = csum_and_memcpy(base, addr + off, len, sum, off);
1649 : })
1650 : )
1651 0 : csstate->csum = csum_shift(sum, csstate->off);
1652 0 : csstate->off += bytes;
1653 0 : return bytes;
1654 : }
1655 : EXPORT_SYMBOL(csum_and_copy_to_iter);
1656 :
1657 0 : size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1658 : struct iov_iter *i)
1659 : {
1660 : #ifdef CONFIG_CRYPTO_HASH
1661 : struct ahash_request *hash = hashp;
1662 : struct scatterlist sg;
1663 : size_t copied;
1664 :
1665 : copied = copy_to_iter(addr, bytes, i);
1666 : sg_init_one(&sg, addr, copied);
1667 : ahash_request_set_crypt(hash, &sg, NULL, copied);
1668 : crypto_ahash_update(hash);
1669 : return copied;
1670 : #else
1671 0 : return 0;
1672 : #endif
1673 : }
1674 : EXPORT_SYMBOL(hash_and_copy_to_iter);
1675 :
1676 0 : static int iov_npages(const struct iov_iter *i, int maxpages)
1677 : {
1678 0 : size_t skip = i->iov_offset, size = i->count;
1679 : const struct iovec *p;
1680 0 : int npages = 0;
1681 :
1682 0 : for (p = iter_iov(i); size; skip = 0, p++) {
1683 0 : unsigned offs = offset_in_page(p->iov_base + skip);
1684 0 : size_t len = min(p->iov_len - skip, size);
1685 :
1686 0 : if (len) {
1687 0 : size -= len;
1688 0 : npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1689 0 : if (unlikely(npages > maxpages))
1690 : return maxpages;
1691 : }
1692 : }
1693 : return npages;
1694 : }
1695 :
1696 : static int bvec_npages(const struct iov_iter *i, int maxpages)
1697 : {
1698 0 : size_t skip = i->iov_offset, size = i->count;
1699 : const struct bio_vec *p;
1700 0 : int npages = 0;
1701 :
1702 0 : for (p = i->bvec; size; skip = 0, p++) {
1703 0 : unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1704 0 : size_t len = min(p->bv_len - skip, size);
1705 :
1706 0 : size -= len;
1707 0 : npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1708 0 : if (unlikely(npages > maxpages))
1709 : return maxpages;
1710 : }
1711 : return npages;
1712 : }
1713 :
1714 0 : int iov_iter_npages(const struct iov_iter *i, int maxpages)
1715 : {
1716 0 : if (unlikely(!i->count))
1717 : return 0;
1718 0 : if (likely(iter_is_ubuf(i))) {
1719 0 : unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
1720 0 : int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
1721 0 : return min(npages, maxpages);
1722 : }
1723 : /* iovec and kvec have identical layouts */
1724 0 : if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1725 0 : return iov_npages(i, maxpages);
1726 0 : if (iov_iter_is_bvec(i))
1727 : return bvec_npages(i, maxpages);
1728 0 : if (iov_iter_is_pipe(i)) {
1729 : int npages;
1730 :
1731 0 : if (!sanity(i))
1732 : return 0;
1733 :
1734 0 : pipe_npages(i, &npages);
1735 0 : return min(npages, maxpages);
1736 : }
1737 0 : if (iov_iter_is_xarray(i)) {
1738 0 : unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1739 0 : int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1740 0 : return min(npages, maxpages);
1741 : }
1742 : return 0;
1743 : }
1744 : EXPORT_SYMBOL(iov_iter_npages);
1745 :
1746 0 : const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1747 : {
1748 0 : *new = *old;
1749 0 : if (unlikely(iov_iter_is_pipe(new))) {
1750 0 : WARN_ON(1);
1751 0 : return NULL;
1752 : }
1753 0 : if (iov_iter_is_bvec(new))
1754 0 : return new->bvec = kmemdup(new->bvec,
1755 0 : new->nr_segs * sizeof(struct bio_vec),
1756 : flags);
1757 0 : else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
1758 : /* iovec and kvec have identical layout */
1759 0 : return new->__iov = kmemdup(new->__iov,
1760 0 : new->nr_segs * sizeof(struct iovec),
1761 : flags);
1762 : return NULL;
1763 : }
1764 : EXPORT_SYMBOL(dup_iter);
1765 :
1766 0 : static __noclone int copy_compat_iovec_from_user(struct iovec *iov,
1767 : const struct iovec __user *uvec, unsigned long nr_segs)
1768 : {
1769 0 : const struct compat_iovec __user *uiov =
1770 : (const struct compat_iovec __user *)uvec;
1771 0 : int ret = -EFAULT, i;
1772 :
1773 0 : if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1774 : return -EFAULT;
1775 :
1776 0 : for (i = 0; i < nr_segs; i++) {
1777 : compat_uptr_t buf;
1778 : compat_ssize_t len;
1779 :
1780 0 : unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1781 0 : unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1782 :
1783 : /* check for compat_size_t not fitting in compat_ssize_t .. */
1784 0 : if (len < 0) {
1785 : ret = -EINVAL;
1786 : goto uaccess_end;
1787 : }
1788 0 : iov[i].iov_base = compat_ptr(buf);
1789 0 : iov[i].iov_len = len;
1790 : }
1791 :
1792 : ret = 0;
1793 : uaccess_end:
1794 : user_access_end();
1795 : return ret;
1796 : }
1797 :
1798 0 : static int copy_iovec_from_user(struct iovec *iov,
1799 : const struct iovec __user *uiov, unsigned long nr_segs)
1800 : {
1801 0 : int ret = -EFAULT;
1802 :
1803 0 : if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1804 : return -EFAULT;
1805 :
1806 : do {
1807 : void __user *buf;
1808 : ssize_t len;
1809 :
1810 0 : unsafe_get_user(len, &uiov->iov_len, uaccess_end);
1811 0 : unsafe_get_user(buf, &uiov->iov_base, uaccess_end);
1812 :
1813 : /* check for size_t not fitting in ssize_t .. */
1814 0 : if (unlikely(len < 0)) {
1815 : ret = -EINVAL;
1816 : goto uaccess_end;
1817 : }
1818 0 : iov->iov_base = buf;
1819 0 : iov->iov_len = len;
1820 :
1821 0 : uiov++; iov++;
1822 0 : } while (--nr_segs);
1823 :
1824 : ret = 0;
1825 : uaccess_end:
1826 : user_access_end();
1827 : return ret;
1828 : }
1829 :
1830 0 : struct iovec *iovec_from_user(const struct iovec __user *uvec,
1831 : unsigned long nr_segs, unsigned long fast_segs,
1832 : struct iovec *fast_iov, bool compat)
1833 : {
1834 0 : struct iovec *iov = fast_iov;
1835 : int ret;
1836 :
1837 : /*
1838 : * SuS says "The readv() function *may* fail if the iovcnt argument was
1839 : * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1840 : * traditionally returned zero for zero segments, so...
1841 : */
1842 0 : if (nr_segs == 0)
1843 : return iov;
1844 0 : if (nr_segs > UIO_MAXIOV)
1845 : return ERR_PTR(-EINVAL);
1846 0 : if (nr_segs > fast_segs) {
1847 0 : iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1848 0 : if (!iov)
1849 : return ERR_PTR(-ENOMEM);
1850 : }
1851 :
1852 0 : if (unlikely(compat))
1853 0 : ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1854 : else
1855 0 : ret = copy_iovec_from_user(iov, uvec, nr_segs);
1856 0 : if (ret) {
1857 0 : if (iov != fast_iov)
1858 0 : kfree(iov);
1859 0 : return ERR_PTR(ret);
1860 : }
1861 :
1862 : return iov;
1863 : }
1864 :
1865 : /*
1866 : * Single segment iovec supplied by the user, import it as ITER_UBUF.
1867 : */
1868 0 : static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
1869 : struct iovec **iovp, struct iov_iter *i,
1870 : bool compat)
1871 : {
1872 0 : struct iovec *iov = *iovp;
1873 : ssize_t ret;
1874 :
1875 0 : if (compat)
1876 0 : ret = copy_compat_iovec_from_user(iov, uvec, 1);
1877 : else
1878 0 : ret = copy_iovec_from_user(iov, uvec, 1);
1879 0 : if (unlikely(ret))
1880 : return ret;
1881 :
1882 0 : ret = import_ubuf(type, iov->iov_base, iov->iov_len, i);
1883 0 : if (unlikely(ret))
1884 : return ret;
1885 0 : *iovp = NULL;
1886 0 : return i->count;
1887 : }
1888 :
1889 0 : ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1890 : unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1891 : struct iov_iter *i, bool compat)
1892 : {
1893 0 : ssize_t total_len = 0;
1894 : unsigned long seg;
1895 : struct iovec *iov;
1896 :
1897 0 : if (nr_segs == 1)
1898 0 : return __import_iovec_ubuf(type, uvec, iovp, i, compat);
1899 :
1900 0 : iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1901 0 : if (IS_ERR(iov)) {
1902 0 : *iovp = NULL;
1903 0 : return PTR_ERR(iov);
1904 : }
1905 :
1906 : /*
1907 : * According to the Single Unix Specification we should return EINVAL if
1908 : * an element length is < 0 when cast to ssize_t or if the total length
1909 : * would overflow the ssize_t return value of the system call.
1910 : *
1911 : * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1912 : * overflow case.
1913 : */
1914 0 : for (seg = 0; seg < nr_segs; seg++) {
1915 0 : ssize_t len = (ssize_t)iov[seg].iov_len;
1916 :
1917 0 : if (!access_ok(iov[seg].iov_base, len)) {
1918 0 : if (iov != *iovp)
1919 0 : kfree(iov);
1920 0 : *iovp = NULL;
1921 0 : return -EFAULT;
1922 : }
1923 :
1924 0 : if (len > MAX_RW_COUNT - total_len) {
1925 0 : len = MAX_RW_COUNT - total_len;
1926 0 : iov[seg].iov_len = len;
1927 : }
1928 0 : total_len += len;
1929 : }
1930 :
1931 0 : iov_iter_init(i, type, iov, nr_segs, total_len);
1932 0 : if (iov == *iovp)
1933 0 : *iovp = NULL;
1934 : else
1935 0 : *iovp = iov;
1936 : return total_len;
1937 : }
1938 :
1939 : /**
1940 : * import_iovec() - Copy an array of &struct iovec from userspace
1941 : * into the kernel, check that it is valid, and initialize a new
1942 : * &struct iov_iter iterator to access it.
1943 : *
1944 : * @type: One of %READ or %WRITE.
1945 : * @uvec: Pointer to the userspace array.
1946 : * @nr_segs: Number of elements in userspace array.
1947 : * @fast_segs: Number of elements in @iov.
1948 : * @iovp: (input and output parameter) Pointer to pointer to (usually small
1949 : * on-stack) kernel array.
1950 : * @i: Pointer to iterator that will be initialized on success.
1951 : *
1952 : * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1953 : * then this function places %NULL in *@iov on return. Otherwise, a new
1954 : * array will be allocated and the result placed in *@iov. This means that
1955 : * the caller may call kfree() on *@iov regardless of whether the small
1956 : * on-stack array was used or not (and regardless of whether this function
1957 : * returns an error or not).
1958 : *
1959 : * Return: Negative error code on error, bytes imported on success
1960 : */
1961 0 : ssize_t import_iovec(int type, const struct iovec __user *uvec,
1962 : unsigned nr_segs, unsigned fast_segs,
1963 : struct iovec **iovp, struct iov_iter *i)
1964 : {
1965 0 : return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1966 : in_compat_syscall());
1967 : }
1968 : EXPORT_SYMBOL(import_iovec);
1969 :
1970 0 : int import_single_range(int rw, void __user *buf, size_t len,
1971 : struct iovec *iov, struct iov_iter *i)
1972 : {
1973 0 : if (len > MAX_RW_COUNT)
1974 0 : len = MAX_RW_COUNT;
1975 0 : if (unlikely(!access_ok(buf, len)))
1976 : return -EFAULT;
1977 :
1978 0 : iov_iter_ubuf(i, rw, buf, len);
1979 0 : return 0;
1980 : }
1981 : EXPORT_SYMBOL(import_single_range);
1982 :
1983 0 : int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i)
1984 : {
1985 0 : if (len > MAX_RW_COUNT)
1986 0 : len = MAX_RW_COUNT;
1987 0 : if (unlikely(!access_ok(buf, len)))
1988 : return -EFAULT;
1989 :
1990 0 : iov_iter_ubuf(i, rw, buf, len);
1991 0 : return 0;
1992 : }
1993 :
1994 : /**
1995 : * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1996 : * iov_iter_save_state() was called.
1997 : *
1998 : * @i: &struct iov_iter to restore
1999 : * @state: state to restore from
2000 : *
2001 : * Used after iov_iter_save_state() to bring restore @i, if operations may
2002 : * have advanced it.
2003 : *
2004 : * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
2005 : */
2006 0 : void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
2007 : {
2008 0 : if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) &&
2009 0 : !iter_is_ubuf(i)) && !iov_iter_is_kvec(i))
2010 : return;
2011 0 : i->iov_offset = state->iov_offset;
2012 0 : i->count = state->count;
2013 0 : if (iter_is_ubuf(i))
2014 : return;
2015 : /*
2016 : * For the *vec iters, nr_segs + iov is constant - if we increment
2017 : * the vec, then we also decrement the nr_segs count. Hence we don't
2018 : * need to track both of these, just one is enough and we can deduct
2019 : * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
2020 : * size, so we can just increment the iov pointer as they are unionzed.
2021 : * ITER_BVEC _may_ be the same size on some archs, but on others it is
2022 : * not. Be safe and handle it separately.
2023 : */
2024 : BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
2025 0 : if (iov_iter_is_bvec(i))
2026 0 : i->bvec -= state->nr_segs - i->nr_segs;
2027 : else
2028 0 : i->__iov -= state->nr_segs - i->nr_segs;
2029 0 : i->nr_segs = state->nr_segs;
2030 : }
2031 :
2032 : /*
2033 : * Extract a list of contiguous pages from an ITER_XARRAY iterator. This does not
2034 : * get references on the pages, nor does it get a pin on them.
2035 : */
2036 0 : static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i,
2037 : struct page ***pages, size_t maxsize,
2038 : unsigned int maxpages,
2039 : iov_iter_extraction_t extraction_flags,
2040 : size_t *offset0)
2041 : {
2042 : struct page *page, **p;
2043 0 : unsigned int nr = 0, offset;
2044 0 : loff_t pos = i->xarray_start + i->iov_offset;
2045 0 : pgoff_t index = pos >> PAGE_SHIFT;
2046 0 : XA_STATE(xas, i->xarray, index);
2047 :
2048 0 : offset = pos & ~PAGE_MASK;
2049 0 : *offset0 = offset;
2050 :
2051 0 : maxpages = want_pages_array(pages, maxsize, offset, maxpages);
2052 0 : if (!maxpages)
2053 : return -ENOMEM;
2054 0 : p = *pages;
2055 :
2056 : rcu_read_lock();
2057 0 : for (page = xas_load(&xas); page; page = xas_next(&xas)) {
2058 0 : if (xas_retry(&xas, page))
2059 0 : continue;
2060 :
2061 : /* Has the page moved or been split? */
2062 0 : if (unlikely(page != xas_reload(&xas))) {
2063 0 : xas_reset(&xas);
2064 0 : continue;
2065 : }
2066 :
2067 0 : p[nr++] = find_subpage(page, xas.xa_index);
2068 0 : if (nr == maxpages)
2069 : break;
2070 : }
2071 : rcu_read_unlock();
2072 :
2073 0 : maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
2074 0 : iov_iter_advance(i, maxsize);
2075 0 : return maxsize;
2076 : }
2077 :
2078 : /*
2079 : * Extract a list of contiguous pages from an ITER_BVEC iterator. This does
2080 : * not get references on the pages, nor does it get a pin on them.
2081 : */
2082 0 : static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i,
2083 : struct page ***pages, size_t maxsize,
2084 : unsigned int maxpages,
2085 : iov_iter_extraction_t extraction_flags,
2086 : size_t *offset0)
2087 : {
2088 : struct page **p, *page;
2089 0 : size_t skip = i->iov_offset, offset;
2090 : int k;
2091 :
2092 : for (;;) {
2093 0 : if (i->nr_segs == 0)
2094 : return 0;
2095 0 : maxsize = min(maxsize, i->bvec->bv_len - skip);
2096 0 : if (maxsize)
2097 : break;
2098 0 : i->iov_offset = 0;
2099 0 : i->nr_segs--;
2100 0 : i->bvec++;
2101 0 : skip = 0;
2102 : }
2103 :
2104 0 : skip += i->bvec->bv_offset;
2105 0 : page = i->bvec->bv_page + skip / PAGE_SIZE;
2106 0 : offset = skip % PAGE_SIZE;
2107 0 : *offset0 = offset;
2108 :
2109 0 : maxpages = want_pages_array(pages, maxsize, offset, maxpages);
2110 0 : if (!maxpages)
2111 : return -ENOMEM;
2112 0 : p = *pages;
2113 0 : for (k = 0; k < maxpages; k++)
2114 0 : p[k] = page + k;
2115 :
2116 0 : maxsize = min_t(size_t, maxsize, maxpages * PAGE_SIZE - offset);
2117 0 : iov_iter_advance(i, maxsize);
2118 0 : return maxsize;
2119 : }
2120 :
2121 : /*
2122 : * Extract a list of virtually contiguous pages from an ITER_KVEC iterator.
2123 : * This does not get references on the pages, nor does it get a pin on them.
2124 : */
2125 0 : static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i,
2126 : struct page ***pages, size_t maxsize,
2127 : unsigned int maxpages,
2128 : iov_iter_extraction_t extraction_flags,
2129 : size_t *offset0)
2130 : {
2131 : struct page **p, *page;
2132 : const void *kaddr;
2133 0 : size_t skip = i->iov_offset, offset, len;
2134 : int k;
2135 :
2136 : for (;;) {
2137 0 : if (i->nr_segs == 0)
2138 : return 0;
2139 0 : maxsize = min(maxsize, i->kvec->iov_len - skip);
2140 0 : if (maxsize)
2141 : break;
2142 0 : i->iov_offset = 0;
2143 0 : i->nr_segs--;
2144 0 : i->kvec++;
2145 0 : skip = 0;
2146 : }
2147 :
2148 0 : kaddr = i->kvec->iov_base + skip;
2149 0 : offset = (unsigned long)kaddr & ~PAGE_MASK;
2150 0 : *offset0 = offset;
2151 :
2152 0 : maxpages = want_pages_array(pages, maxsize, offset, maxpages);
2153 0 : if (!maxpages)
2154 : return -ENOMEM;
2155 0 : p = *pages;
2156 :
2157 0 : kaddr -= offset;
2158 0 : len = offset + maxsize;
2159 0 : for (k = 0; k < maxpages; k++) {
2160 0 : size_t seg = min_t(size_t, len, PAGE_SIZE);
2161 :
2162 0 : if (is_vmalloc_or_module_addr(kaddr))
2163 0 : page = vmalloc_to_page(kaddr);
2164 : else
2165 0 : page = virt_to_page(kaddr);
2166 :
2167 0 : p[k] = page;
2168 0 : len -= seg;
2169 0 : kaddr += PAGE_SIZE;
2170 : }
2171 :
2172 0 : maxsize = min_t(size_t, maxsize, maxpages * PAGE_SIZE - offset);
2173 0 : iov_iter_advance(i, maxsize);
2174 0 : return maxsize;
2175 : }
2176 :
2177 : /*
2178 : * Extract a list of contiguous pages from a user iterator and get a pin on
2179 : * each of them. This should only be used if the iterator is user-backed
2180 : * (IOBUF/UBUF).
2181 : *
2182 : * It does not get refs on the pages, but the pages must be unpinned by the
2183 : * caller once the transfer is complete.
2184 : *
2185 : * This is safe to be used where background IO/DMA *is* going to be modifying
2186 : * the buffer; using a pin rather than a ref makes forces fork() to give the
2187 : * child a copy of the page.
2188 : */
2189 0 : static ssize_t iov_iter_extract_user_pages(struct iov_iter *i,
2190 : struct page ***pages,
2191 : size_t maxsize,
2192 : unsigned int maxpages,
2193 : iov_iter_extraction_t extraction_flags,
2194 : size_t *offset0)
2195 : {
2196 : unsigned long addr;
2197 0 : unsigned int gup_flags = 0;
2198 : size_t offset;
2199 : int res;
2200 :
2201 0 : if (i->data_source == ITER_DEST)
2202 0 : gup_flags |= FOLL_WRITE;
2203 0 : if (extraction_flags & ITER_ALLOW_P2PDMA)
2204 0 : gup_flags |= FOLL_PCI_P2PDMA;
2205 0 : if (i->nofault)
2206 0 : gup_flags |= FOLL_NOFAULT;
2207 :
2208 0 : addr = first_iovec_segment(i, &maxsize);
2209 0 : *offset0 = offset = addr % PAGE_SIZE;
2210 0 : addr &= PAGE_MASK;
2211 0 : maxpages = want_pages_array(pages, maxsize, offset, maxpages);
2212 0 : if (!maxpages)
2213 : return -ENOMEM;
2214 0 : res = pin_user_pages_fast(addr, maxpages, gup_flags, *pages);
2215 0 : if (unlikely(res <= 0))
2216 0 : return res;
2217 0 : maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset);
2218 0 : iov_iter_advance(i, maxsize);
2219 0 : return maxsize;
2220 : }
2221 :
2222 : /**
2223 : * iov_iter_extract_pages - Extract a list of contiguous pages from an iterator
2224 : * @i: The iterator to extract from
2225 : * @pages: Where to return the list of pages
2226 : * @maxsize: The maximum amount of iterator to extract
2227 : * @maxpages: The maximum size of the list of pages
2228 : * @extraction_flags: Flags to qualify request
2229 : * @offset0: Where to return the starting offset into (*@pages)[0]
2230 : *
2231 : * Extract a list of contiguous pages from the current point of the iterator,
2232 : * advancing the iterator. The maximum number of pages and the maximum amount
2233 : * of page contents can be set.
2234 : *
2235 : * If *@pages is NULL, a page list will be allocated to the required size and
2236 : * *@pages will be set to its base. If *@pages is not NULL, it will be assumed
2237 : * that the caller allocated a page list at least @maxpages in size and this
2238 : * will be filled in.
2239 : *
2240 : * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
2241 : * be allowed on the pages extracted.
2242 : *
2243 : * The iov_iter_extract_will_pin() function can be used to query how cleanup
2244 : * should be performed.
2245 : *
2246 : * Extra refs or pins on the pages may be obtained as follows:
2247 : *
2248 : * (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be
2249 : * added to the pages, but refs will not be taken.
2250 : * iov_iter_extract_will_pin() will return true.
2251 : *
2252 : * (*) If the iterator is ITER_KVEC, ITER_BVEC or ITER_XARRAY, the pages are
2253 : * merely listed; no extra refs or pins are obtained.
2254 : * iov_iter_extract_will_pin() will return 0.
2255 : *
2256 : * Note also:
2257 : *
2258 : * (*) Use with ITER_DISCARD is not supported as that has no content.
2259 : *
2260 : * On success, the function sets *@pages to the new pagelist, if allocated, and
2261 : * sets *offset0 to the offset into the first page.
2262 : *
2263 : * It may also return -ENOMEM and -EFAULT.
2264 : */
2265 0 : ssize_t iov_iter_extract_pages(struct iov_iter *i,
2266 : struct page ***pages,
2267 : size_t maxsize,
2268 : unsigned int maxpages,
2269 : iov_iter_extraction_t extraction_flags,
2270 : size_t *offset0)
2271 : {
2272 0 : maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT);
2273 0 : if (!maxsize)
2274 : return 0;
2275 :
2276 0 : if (likely(user_backed_iter(i)))
2277 0 : return iov_iter_extract_user_pages(i, pages, maxsize,
2278 : maxpages, extraction_flags,
2279 : offset0);
2280 0 : if (iov_iter_is_kvec(i))
2281 0 : return iov_iter_extract_kvec_pages(i, pages, maxsize,
2282 : maxpages, extraction_flags,
2283 : offset0);
2284 0 : if (iov_iter_is_bvec(i))
2285 0 : return iov_iter_extract_bvec_pages(i, pages, maxsize,
2286 : maxpages, extraction_flags,
2287 : offset0);
2288 0 : if (iov_iter_is_xarray(i))
2289 0 : return iov_iter_extract_xarray_pages(i, pages, maxsize,
2290 : maxpages, extraction_flags,
2291 : offset0);
2292 : return -EFAULT;
2293 : }
2294 : EXPORT_SYMBOL_GPL(iov_iter_extract_pages);
|