LCOV - code coverage report
Current view: top level - lib - iov_iter.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 830 0.0 %
Date: 2023-04-06 08:38:28 Functions: 0 69 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : #include <crypto/hash.h>
       3             : #include <linux/export.h>
       4             : #include <linux/bvec.h>
       5             : #include <linux/fault-inject-usercopy.h>
       6             : #include <linux/uio.h>
       7             : #include <linux/pagemap.h>
       8             : #include <linux/highmem.h>
       9             : #include <linux/slab.h>
      10             : #include <linux/vmalloc.h>
      11             : #include <linux/splice.h>
      12             : #include <linux/compat.h>
      13             : #include <net/checksum.h>
      14             : #include <linux/scatterlist.h>
      15             : #include <linux/instrumented.h>
      16             : 
      17             : #define PIPE_PARANOIA /* for now */
      18             : 
      19             : /* covers ubuf and kbuf alike */
      20             : #define iterate_buf(i, n, base, len, off, __p, STEP) {          \
      21             :         size_t __maybe_unused off = 0;                          \
      22             :         len = n;                                                \
      23             :         base = __p + i->iov_offset;                          \
      24             :         len -= (STEP);                                          \
      25             :         i->iov_offset += len;                                        \
      26             :         n = len;                                                \
      27             : }
      28             : 
      29             : /* covers iovec and kvec alike */
      30             : #define iterate_iovec(i, n, base, len, off, __p, STEP) {        \
      31             :         size_t off = 0;                                         \
      32             :         size_t skip = i->iov_offset;                         \
      33             :         do {                                                    \
      34             :                 len = min(n, __p->iov_len - skip);           \
      35             :                 if (likely(len)) {                              \
      36             :                         base = __p->iov_base + skip;         \
      37             :                         len -= (STEP);                          \
      38             :                         off += len;                             \
      39             :                         skip += len;                            \
      40             :                         n -= len;                               \
      41             :                         if (skip < __p->iov_len)          \
      42             :                                 break;                          \
      43             :                 }                                               \
      44             :                 __p++;                                          \
      45             :                 skip = 0;                                       \
      46             :         } while (n);                                            \
      47             :         i->iov_offset = skip;                                        \
      48             :         n = off;                                                \
      49             : }
      50             : 
      51             : #define iterate_bvec(i, n, base, len, off, p, STEP) {           \
      52             :         size_t off = 0;                                         \
      53             :         unsigned skip = i->iov_offset;                               \
      54             :         while (n) {                                             \
      55             :                 unsigned offset = p->bv_offset + skip;               \
      56             :                 unsigned left;                                  \
      57             :                 void *kaddr = kmap_local_page(p->bv_page +   \
      58             :                                         offset / PAGE_SIZE);    \
      59             :                 base = kaddr + offset % PAGE_SIZE;              \
      60             :                 len = min(min(n, (size_t)(p->bv_len - skip)),        \
      61             :                      (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
      62             :                 left = (STEP);                                  \
      63             :                 kunmap_local(kaddr);                            \
      64             :                 len -= left;                                    \
      65             :                 off += len;                                     \
      66             :                 skip += len;                                    \
      67             :                 if (skip == p->bv_len) {                     \
      68             :                         skip = 0;                               \
      69             :                         p++;                                    \
      70             :                 }                                               \
      71             :                 n -= len;                                       \
      72             :                 if (left)                                       \
      73             :                         break;                                  \
      74             :         }                                                       \
      75             :         i->iov_offset = skip;                                        \
      76             :         n = off;                                                \
      77             : }
      78             : 
      79             : #define iterate_xarray(i, n, base, len, __off, STEP) {          \
      80             :         __label__ __out;                                        \
      81             :         size_t __off = 0;                                       \
      82             :         struct folio *folio;                                    \
      83             :         loff_t start = i->xarray_start + i->iov_offset;           \
      84             :         pgoff_t index = start / PAGE_SIZE;                      \
      85             :         XA_STATE(xas, i->xarray, index);                     \
      86             :                                                                 \
      87             :         len = PAGE_SIZE - offset_in_page(start);                \
      88             :         rcu_read_lock();                                        \
      89             :         xas_for_each(&xas, folio, ULONG_MAX) {                      \
      90             :                 unsigned left;                                  \
      91             :                 size_t offset;                                  \
      92             :                 if (xas_retry(&xas, folio))                 \
      93             :                         continue;                               \
      94             :                 if (WARN_ON(xa_is_value(folio)))                \
      95             :                         break;                                  \
      96             :                 if (WARN_ON(folio_test_hugetlb(folio)))         \
      97             :                         break;                                  \
      98             :                 offset = offset_in_folio(folio, start + __off); \
      99             :                 while (offset < folio_size(folio)) {         \
     100             :                         base = kmap_local_folio(folio, offset); \
     101             :                         len = min(n, len);                      \
     102             :                         left = (STEP);                          \
     103             :                         kunmap_local(base);                     \
     104             :                         len -= left;                            \
     105             :                         __off += len;                           \
     106             :                         n -= len;                               \
     107             :                         if (left || n == 0)                     \
     108             :                                 goto __out;                     \
     109             :                         offset += len;                          \
     110             :                         len = PAGE_SIZE;                        \
     111             :                 }                                               \
     112             :         }                                                       \
     113             : __out:                                                          \
     114             :         rcu_read_unlock();                                      \
     115             :         i->iov_offset += __off;                                      \
     116             :         n = __off;                                              \
     117             : }
     118             : 
     119             : #define __iterate_and_advance(i, n, base, len, off, I, K) {     \
     120             :         if (unlikely(i->count < n))                               \
     121             :                 n = i->count;                                        \
     122             :         if (likely(n)) {                                        \
     123             :                 if (likely(iter_is_ubuf(i))) {                  \
     124             :                         void __user *base;                      \
     125             :                         size_t len;                             \
     126             :                         iterate_buf(i, n, base, len, off,       \
     127             :                                                 i->ubuf, (I))        \
     128             :                 } else if (likely(iter_is_iovec(i))) {          \
     129             :                         const struct iovec *iov = i->iov;    \
     130             :                         void __user *base;                      \
     131             :                         size_t len;                             \
     132             :                         iterate_iovec(i, n, base, len, off,     \
     133             :                                                 iov, (I))       \
     134             :                         i->nr_segs -= iov - i->iov;               \
     135             :                         i->iov = iov;                                \
     136             :                 } else if (iov_iter_is_bvec(i)) {               \
     137             :                         const struct bio_vec *bvec = i->bvec;        \
     138             :                         void *base;                             \
     139             :                         size_t len;                             \
     140             :                         iterate_bvec(i, n, base, len, off,      \
     141             :                                                 bvec, (K))      \
     142             :                         i->nr_segs -= bvec - i->bvec;             \
     143             :                         i->bvec = bvec;                              \
     144             :                 } else if (iov_iter_is_kvec(i)) {               \
     145             :                         const struct kvec *kvec = i->kvec;   \
     146             :                         void *base;                             \
     147             :                         size_t len;                             \
     148             :                         iterate_iovec(i, n, base, len, off,     \
     149             :                                                 kvec, (K))      \
     150             :                         i->nr_segs -= kvec - i->kvec;             \
     151             :                         i->kvec = kvec;                              \
     152             :                 } else if (iov_iter_is_xarray(i)) {             \
     153             :                         void *base;                             \
     154             :                         size_t len;                             \
     155             :                         iterate_xarray(i, n, base, len, off,    \
     156             :                                                         (K))    \
     157             :                 }                                               \
     158             :                 i->count -= n;                                       \
     159             :         }                                                       \
     160             : }
     161             : #define iterate_and_advance(i, n, base, len, off, I, K) \
     162             :         __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
     163             : 
     164           0 : static int copyout(void __user *to, const void *from, size_t n)
     165             : {
     166             :         if (should_fail_usercopy())
     167             :                 return n;
     168           0 :         if (access_ok(to, n)) {
     169           0 :                 instrument_copy_to_user(to, from, n);
     170           0 :                 n = raw_copy_to_user(to, from, n);
     171             :         }
     172           0 :         return n;
     173             : }
     174             : 
     175           0 : static int copyin(void *to, const void __user *from, size_t n)
     176             : {
     177           0 :         size_t res = n;
     178             : 
     179             :         if (should_fail_usercopy())
     180             :                 return n;
     181           0 :         if (access_ok(from, n)) {
     182           0 :                 instrument_copy_from_user_before(to, from, n);
     183           0 :                 res = raw_copy_from_user(to, from, n);
     184             :                 instrument_copy_from_user_after(to, from, n, res);
     185             :         }
     186           0 :         return res;
     187             : }
     188             : 
     189             : #ifdef PIPE_PARANOIA
     190           0 : static bool sanity(const struct iov_iter *i)
     191             : {
     192           0 :         struct pipe_inode_info *pipe = i->pipe;
     193           0 :         unsigned int p_head = pipe->head;
     194           0 :         unsigned int p_tail = pipe->tail;
     195           0 :         unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
     196           0 :         unsigned int i_head = i->head;
     197             :         unsigned int idx;
     198             : 
     199           0 :         if (i->last_offset) {
     200             :                 struct pipe_buffer *p;
     201           0 :                 if (unlikely(p_occupancy == 0))
     202             :                         goto Bad;       // pipe must be non-empty
     203           0 :                 if (unlikely(i_head != p_head - 1))
     204             :                         goto Bad;       // must be at the last buffer...
     205             : 
     206           0 :                 p = pipe_buf(pipe, i_head);
     207           0 :                 if (unlikely(p->offset + p->len != abs(i->last_offset)))
     208             :                         goto Bad;       // ... at the end of segment
     209             :         } else {
     210           0 :                 if (i_head != p_head)
     211             :                         goto Bad;       // must be right after the last buffer
     212             :         }
     213             :         return true;
     214             : Bad:
     215           0 :         printk(KERN_ERR "idx = %d, offset = %d\n", i_head, i->last_offset);
     216           0 :         printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
     217             :                         p_head, p_tail, pipe->ring_size);
     218           0 :         for (idx = 0; idx < pipe->ring_size; idx++)
     219           0 :                 printk(KERN_ERR "[%p %p %d %d]\n",
     220             :                         pipe->bufs[idx].ops,
     221             :                         pipe->bufs[idx].page,
     222             :                         pipe->bufs[idx].offset,
     223             :                         pipe->bufs[idx].len);
     224           0 :         WARN_ON(1);
     225             :         return false;
     226             : }
     227             : #else
     228             : #define sanity(i) true
     229             : #endif
     230             : 
     231           0 : static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size)
     232             : {
     233           0 :         struct page *page = alloc_page(GFP_USER);
     234           0 :         if (page) {
     235           0 :                 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
     236           0 :                 *buf = (struct pipe_buffer) {
     237             :                         .ops = &default_pipe_buf_ops,
     238             :                         .page = page,
     239             :                         .offset = 0,
     240             :                         .len = size
     241             :                 };
     242             :         }
     243           0 :         return page;
     244             : }
     245             : 
     246           0 : static void push_page(struct pipe_inode_info *pipe, struct page *page,
     247             :                         unsigned int offset, unsigned int size)
     248             : {
     249           0 :         struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
     250           0 :         *buf = (struct pipe_buffer) {
     251             :                 .ops = &page_cache_pipe_buf_ops,
     252             :                 .page = page,
     253             :                 .offset = offset,
     254             :                 .len = size
     255             :         };
     256           0 :         get_page(page);
     257           0 : }
     258             : 
     259             : static inline int last_offset(const struct pipe_buffer *buf)
     260             : {
     261           0 :         if (buf->ops == &default_pipe_buf_ops)
     262           0 :                 return buf->len;     // buf->offset is 0 for those
     263             :         else
     264           0 :                 return -(buf->offset + buf->len);
     265             : }
     266             : 
     267           0 : static struct page *append_pipe(struct iov_iter *i, size_t size,
     268             :                                 unsigned int *off)
     269             : {
     270           0 :         struct pipe_inode_info *pipe = i->pipe;
     271           0 :         int offset = i->last_offset;
     272             :         struct pipe_buffer *buf;
     273             :         struct page *page;
     274             : 
     275           0 :         if (offset > 0 && offset < PAGE_SIZE) {
     276             :                 // some space in the last buffer; add to it
     277           0 :                 buf = pipe_buf(pipe, pipe->head - 1);
     278           0 :                 size = min_t(size_t, size, PAGE_SIZE - offset);
     279           0 :                 buf->len += size;
     280           0 :                 i->last_offset += size;
     281           0 :                 i->count -= size;
     282           0 :                 *off = offset;
     283           0 :                 return buf->page;
     284             :         }
     285             :         // OK, we need a new buffer
     286           0 :         *off = 0;
     287           0 :         size = min_t(size_t, size, PAGE_SIZE);
     288           0 :         if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
     289             :                 return NULL;
     290           0 :         page = push_anon(pipe, size);
     291           0 :         if (!page)
     292             :                 return NULL;
     293           0 :         i->head = pipe->head - 1;
     294           0 :         i->last_offset = size;
     295           0 :         i->count -= size;
     296           0 :         return page;
     297             : }
     298             : 
     299           0 : static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
     300             :                          struct iov_iter *i)
     301             : {
     302           0 :         struct pipe_inode_info *pipe = i->pipe;
     303           0 :         unsigned int head = pipe->head;
     304             : 
     305           0 :         if (unlikely(bytes > i->count))
     306           0 :                 bytes = i->count;
     307             : 
     308           0 :         if (unlikely(!bytes))
     309             :                 return 0;
     310             : 
     311           0 :         if (!sanity(i))
     312             :                 return 0;
     313             : 
     314           0 :         if (offset && i->last_offset == -offset) { // could we merge it?
     315           0 :                 struct pipe_buffer *buf = pipe_buf(pipe, head - 1);
     316           0 :                 if (buf->page == page) {
     317           0 :                         buf->len += bytes;
     318           0 :                         i->last_offset -= bytes;
     319           0 :                         i->count -= bytes;
     320           0 :                         return bytes;
     321             :                 }
     322             :         }
     323           0 :         if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
     324             :                 return 0;
     325             : 
     326           0 :         push_page(pipe, page, offset, bytes);
     327           0 :         i->last_offset = -(offset + bytes);
     328           0 :         i->head = head;
     329           0 :         i->count -= bytes;
     330           0 :         return bytes;
     331             : }
     332             : 
     333             : /*
     334             :  * fault_in_iov_iter_readable - fault in iov iterator for reading
     335             :  * @i: iterator
     336             :  * @size: maximum length
     337             :  *
     338             :  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
     339             :  * @size.  For each iovec, fault in each page that constitutes the iovec.
     340             :  *
     341             :  * Returns the number of bytes not faulted in (like copy_to_user() and
     342             :  * copy_from_user()).
     343             :  *
     344             :  * Always returns 0 for non-userspace iterators.
     345             :  */
     346           0 : size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
     347             : {
     348           0 :         if (iter_is_ubuf(i)) {
     349           0 :                 size_t n = min(size, iov_iter_count(i));
     350           0 :                 n -= fault_in_readable(i->ubuf + i->iov_offset, n);
     351           0 :                 return size - n;
     352           0 :         } else if (iter_is_iovec(i)) {
     353           0 :                 size_t count = min(size, iov_iter_count(i));
     354             :                 const struct iovec *p;
     355             :                 size_t skip;
     356             : 
     357           0 :                 size -= count;
     358           0 :                 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
     359           0 :                         size_t len = min(count, p->iov_len - skip);
     360             :                         size_t ret;
     361             : 
     362           0 :                         if (unlikely(!len))
     363           0 :                                 continue;
     364           0 :                         ret = fault_in_readable(p->iov_base + skip, len);
     365           0 :                         count -= len - ret;
     366           0 :                         if (ret)
     367             :                                 break;
     368             :                 }
     369           0 :                 return count + size;
     370             :         }
     371             :         return 0;
     372             : }
     373             : EXPORT_SYMBOL(fault_in_iov_iter_readable);
     374             : 
     375             : /*
     376             :  * fault_in_iov_iter_writeable - fault in iov iterator for writing
     377             :  * @i: iterator
     378             :  * @size: maximum length
     379             :  *
     380             :  * Faults in the iterator using get_user_pages(), i.e., without triggering
     381             :  * hardware page faults.  This is primarily useful when we already know that
     382             :  * some or all of the pages in @i aren't in memory.
     383             :  *
     384             :  * Returns the number of bytes not faulted in, like copy_to_user() and
     385             :  * copy_from_user().
     386             :  *
     387             :  * Always returns 0 for non-user-space iterators.
     388             :  */
     389           0 : size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
     390             : {
     391           0 :         if (iter_is_ubuf(i)) {
     392           0 :                 size_t n = min(size, iov_iter_count(i));
     393           0 :                 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
     394           0 :                 return size - n;
     395           0 :         } else if (iter_is_iovec(i)) {
     396           0 :                 size_t count = min(size, iov_iter_count(i));
     397             :                 const struct iovec *p;
     398             :                 size_t skip;
     399             : 
     400           0 :                 size -= count;
     401           0 :                 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
     402           0 :                         size_t len = min(count, p->iov_len - skip);
     403             :                         size_t ret;
     404             : 
     405           0 :                         if (unlikely(!len))
     406           0 :                                 continue;
     407           0 :                         ret = fault_in_safe_writeable(p->iov_base + skip, len);
     408           0 :                         count -= len - ret;
     409           0 :                         if (ret)
     410             :                                 break;
     411             :                 }
     412           0 :                 return count + size;
     413             :         }
     414             :         return 0;
     415             : }
     416             : EXPORT_SYMBOL(fault_in_iov_iter_writeable);
     417             : 
     418           0 : void iov_iter_init(struct iov_iter *i, unsigned int direction,
     419             :                         const struct iovec *iov, unsigned long nr_segs,
     420             :                         size_t count)
     421             : {
     422           0 :         WARN_ON(direction & ~(READ | WRITE));
     423           0 :         *i = (struct iov_iter) {
     424             :                 .iter_type = ITER_IOVEC,
     425             :                 .nofault = false,
     426             :                 .user_backed = true,
     427             :                 .data_source = direction,
     428             :                 .iov = iov,
     429             :                 .nr_segs = nr_segs,
     430             :                 .iov_offset = 0,
     431             :                 .count = count
     432             :         };
     433           0 : }
     434             : EXPORT_SYMBOL(iov_iter_init);
     435             : 
     436             : // returns the offset in partial buffer (if any)
     437             : static inline unsigned int pipe_npages(const struct iov_iter *i, int *npages)
     438             : {
     439           0 :         struct pipe_inode_info *pipe = i->pipe;
     440           0 :         int used = pipe->head - pipe->tail;
     441           0 :         int off = i->last_offset;
     442             : 
     443           0 :         *npages = max((int)pipe->max_usage - used, 0);
     444             : 
     445           0 :         if (off > 0 && off < PAGE_SIZE) { // anon and not full
     446           0 :                 (*npages)++;
     447             :                 return off;
     448             :         }
     449             :         return 0;
     450             : }
     451             : 
     452           0 : static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
     453             :                                 struct iov_iter *i)
     454             : {
     455             :         unsigned int off, chunk;
     456             : 
     457           0 :         if (unlikely(bytes > i->count))
     458           0 :                 bytes = i->count;
     459           0 :         if (unlikely(!bytes))
     460             :                 return 0;
     461             : 
     462           0 :         if (!sanity(i))
     463             :                 return 0;
     464             : 
     465           0 :         for (size_t n = bytes; n; n -= chunk) {
     466           0 :                 struct page *page = append_pipe(i, n, &off);
     467           0 :                 chunk = min_t(size_t, n, PAGE_SIZE - off);
     468           0 :                 if (!page)
     469           0 :                         return bytes - n;
     470           0 :                 memcpy_to_page(page, off, addr, chunk);
     471           0 :                 addr += chunk;
     472             :         }
     473             :         return bytes;
     474             : }
     475             : 
     476           0 : static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
     477             :                               __wsum sum, size_t off)
     478             : {
     479           0 :         __wsum next = csum_partial_copy_nocheck(from, to, len);
     480           0 :         return csum_block_add(sum, next, off);
     481             : }
     482             : 
     483           0 : static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
     484             :                                          struct iov_iter *i, __wsum *sump)
     485             : {
     486           0 :         __wsum sum = *sump;
     487           0 :         size_t off = 0;
     488             :         unsigned int chunk, r;
     489             : 
     490           0 :         if (unlikely(bytes > i->count))
     491           0 :                 bytes = i->count;
     492           0 :         if (unlikely(!bytes))
     493             :                 return 0;
     494             : 
     495           0 :         if (!sanity(i))
     496             :                 return 0;
     497             : 
     498           0 :         while (bytes) {
     499           0 :                 struct page *page = append_pipe(i, bytes, &r);
     500             :                 char *p;
     501             : 
     502           0 :                 if (!page)
     503             :                         break;
     504           0 :                 chunk = min_t(size_t, bytes, PAGE_SIZE - r);
     505           0 :                 p = kmap_local_page(page);
     506           0 :                 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
     507             :                 kunmap_local(p);
     508           0 :                 off += chunk;
     509           0 :                 bytes -= chunk;
     510             :         }
     511           0 :         *sump = sum;
     512           0 :         return off;
     513             : }
     514             : 
     515           0 : size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
     516             : {
     517           0 :         if (WARN_ON_ONCE(i->data_source))
     518             :                 return 0;
     519           0 :         if (unlikely(iov_iter_is_pipe(i)))
     520           0 :                 return copy_pipe_to_iter(addr, bytes, i);
     521           0 :         if (user_backed_iter(i))
     522             :                 might_fault();
     523           0 :         iterate_and_advance(i, bytes, base, len, off,
     524             :                 copyout(base, addr + off, len),
     525             :                 memcpy(base, addr + off, len)
     526             :         )
     527             : 
     528             :         return bytes;
     529             : }
     530             : EXPORT_SYMBOL(_copy_to_iter);
     531             : 
     532             : #ifdef CONFIG_ARCH_HAS_COPY_MC
     533             : static int copyout_mc(void __user *to, const void *from, size_t n)
     534             : {
     535             :         if (access_ok(to, n)) {
     536             :                 instrument_copy_to_user(to, from, n);
     537             :                 n = copy_mc_to_user((__force void *) to, from, n);
     538             :         }
     539             :         return n;
     540             : }
     541             : 
     542             : static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
     543             :                                 struct iov_iter *i)
     544             : {
     545             :         size_t xfer = 0;
     546             :         unsigned int off, chunk;
     547             : 
     548             :         if (unlikely(bytes > i->count))
     549             :                 bytes = i->count;
     550             :         if (unlikely(!bytes))
     551             :                 return 0;
     552             : 
     553             :         if (!sanity(i))
     554             :                 return 0;
     555             : 
     556             :         while (bytes) {
     557             :                 struct page *page = append_pipe(i, bytes, &off);
     558             :                 unsigned long rem;
     559             :                 char *p;
     560             : 
     561             :                 if (!page)
     562             :                         break;
     563             :                 chunk = min_t(size_t, bytes, PAGE_SIZE - off);
     564             :                 p = kmap_local_page(page);
     565             :                 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
     566             :                 chunk -= rem;
     567             :                 kunmap_local(p);
     568             :                 xfer += chunk;
     569             :                 bytes -= chunk;
     570             :                 if (rem) {
     571             :                         iov_iter_revert(i, rem);
     572             :                         break;
     573             :                 }
     574             :         }
     575             :         return xfer;
     576             : }
     577             : 
     578             : /**
     579             :  * _copy_mc_to_iter - copy to iter with source memory error exception handling
     580             :  * @addr: source kernel address
     581             :  * @bytes: total transfer length
     582             :  * @i: destination iterator
     583             :  *
     584             :  * The pmem driver deploys this for the dax operation
     585             :  * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
     586             :  * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
     587             :  * successfully copied.
     588             :  *
     589             :  * The main differences between this and typical _copy_to_iter().
     590             :  *
     591             :  * * Typical tail/residue handling after a fault retries the copy
     592             :  *   byte-by-byte until the fault happens again. Re-triggering machine
     593             :  *   checks is potentially fatal so the implementation uses source
     594             :  *   alignment and poison alignment assumptions to avoid re-triggering
     595             :  *   hardware exceptions.
     596             :  *
     597             :  * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
     598             :  *   Compare to copy_to_iter() where only ITER_IOVEC attempts might return
     599             :  *   a short copy.
     600             :  *
     601             :  * Return: number of bytes copied (may be %0)
     602             :  */
     603             : size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
     604             : {
     605             :         if (WARN_ON_ONCE(i->data_source))
     606             :                 return 0;
     607             :         if (unlikely(iov_iter_is_pipe(i)))
     608             :                 return copy_mc_pipe_to_iter(addr, bytes, i);
     609             :         if (user_backed_iter(i))
     610             :                 might_fault();
     611             :         __iterate_and_advance(i, bytes, base, len, off,
     612             :                 copyout_mc(base, addr + off, len),
     613             :                 copy_mc_to_kernel(base, addr + off, len)
     614             :         )
     615             : 
     616             :         return bytes;
     617             : }
     618             : EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
     619             : #endif /* CONFIG_ARCH_HAS_COPY_MC */
     620             : 
     621           0 : size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
     622             : {
     623           0 :         if (WARN_ON_ONCE(!i->data_source))
     624             :                 return 0;
     625             : 
     626           0 :         if (user_backed_iter(i))
     627             :                 might_fault();
     628           0 :         iterate_and_advance(i, bytes, base, len, off,
     629             :                 copyin(addr + off, base, len),
     630             :                 memcpy(addr + off, base, len)
     631             :         )
     632             : 
     633             :         return bytes;
     634             : }
     635             : EXPORT_SYMBOL(_copy_from_iter);
     636             : 
     637           0 : size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
     638             : {
     639           0 :         if (WARN_ON_ONCE(!i->data_source))
     640             :                 return 0;
     641             : 
     642           0 :         iterate_and_advance(i, bytes, base, len, off,
     643             :                 __copy_from_user_inatomic_nocache(addr + off, base, len),
     644             :                 memcpy(addr + off, base, len)
     645             :         )
     646             : 
     647             :         return bytes;
     648             : }
     649             : EXPORT_SYMBOL(_copy_from_iter_nocache);
     650             : 
     651             : #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
     652             : /**
     653             :  * _copy_from_iter_flushcache - write destination through cpu cache
     654             :  * @addr: destination kernel address
     655             :  * @bytes: total transfer length
     656             :  * @i: source iterator
     657             :  *
     658             :  * The pmem driver arranges for filesystem-dax to use this facility via
     659             :  * dax_copy_from_iter() for ensuring that writes to persistent memory
     660             :  * are flushed through the CPU cache. It is differentiated from
     661             :  * _copy_from_iter_nocache() in that guarantees all data is flushed for
     662             :  * all iterator types. The _copy_from_iter_nocache() only attempts to
     663             :  * bypass the cache for the ITER_IOVEC case, and on some archs may use
     664             :  * instructions that strand dirty-data in the cache.
     665             :  *
     666             :  * Return: number of bytes copied (may be %0)
     667             :  */
     668             : size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
     669             : {
     670             :         if (WARN_ON_ONCE(!i->data_source))
     671             :                 return 0;
     672             : 
     673             :         iterate_and_advance(i, bytes, base, len, off,
     674             :                 __copy_from_user_flushcache(addr + off, base, len),
     675             :                 memcpy_flushcache(addr + off, base, len)
     676             :         )
     677             : 
     678             :         return bytes;
     679             : }
     680             : EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
     681             : #endif
     682             : 
     683           0 : static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
     684             : {
     685             :         struct page *head;
     686           0 :         size_t v = n + offset;
     687             : 
     688             :         /*
     689             :          * The general case needs to access the page order in order
     690             :          * to compute the page size.
     691             :          * However, we mostly deal with order-0 pages and thus can
     692             :          * avoid a possible cache line miss for requests that fit all
     693             :          * page orders.
     694             :          */
     695           0 :         if (n <= v && v <= PAGE_SIZE)
     696             :                 return true;
     697             : 
     698           0 :         head = compound_head(page);
     699           0 :         v += (page - head) << PAGE_SHIFT;
     700             : 
     701           0 :         if (WARN_ON(n > v || v > page_size(head)))
     702             :                 return false;
     703           0 :         return true;
     704             : }
     705             : 
     706           0 : size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
     707             :                          struct iov_iter *i)
     708             : {
     709           0 :         size_t res = 0;
     710           0 :         if (!page_copy_sane(page, offset, bytes))
     711             :                 return 0;
     712           0 :         if (WARN_ON_ONCE(i->data_source))
     713             :                 return 0;
     714           0 :         if (unlikely(iov_iter_is_pipe(i)))
     715           0 :                 return copy_page_to_iter_pipe(page, offset, bytes, i);
     716           0 :         page += offset / PAGE_SIZE; // first subpage
     717           0 :         offset %= PAGE_SIZE;
     718             :         while (1) {
     719           0 :                 void *kaddr = kmap_local_page(page);
     720           0 :                 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
     721           0 :                 n = _copy_to_iter(kaddr + offset, n, i);
     722             :                 kunmap_local(kaddr);
     723           0 :                 res += n;
     724           0 :                 bytes -= n;
     725           0 :                 if (!bytes || !n)
     726             :                         break;
     727           0 :                 offset += n;
     728           0 :                 if (offset == PAGE_SIZE) {
     729           0 :                         page++;
     730           0 :                         offset = 0;
     731             :                 }
     732             :         }
     733             :         return res;
     734             : }
     735             : EXPORT_SYMBOL(copy_page_to_iter);
     736             : 
     737           0 : size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
     738             :                          struct iov_iter *i)
     739             : {
     740           0 :         size_t res = 0;
     741           0 :         if (!page_copy_sane(page, offset, bytes))
     742             :                 return 0;
     743           0 :         page += offset / PAGE_SIZE; // first subpage
     744           0 :         offset %= PAGE_SIZE;
     745             :         while (1) {
     746           0 :                 void *kaddr = kmap_local_page(page);
     747           0 :                 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
     748           0 :                 n = _copy_from_iter(kaddr + offset, n, i);
     749             :                 kunmap_local(kaddr);
     750           0 :                 res += n;
     751           0 :                 bytes -= n;
     752           0 :                 if (!bytes || !n)
     753             :                         break;
     754           0 :                 offset += n;
     755           0 :                 if (offset == PAGE_SIZE) {
     756           0 :                         page++;
     757           0 :                         offset = 0;
     758             :                 }
     759             :         }
     760             :         return res;
     761             : }
     762             : EXPORT_SYMBOL(copy_page_from_iter);
     763             : 
     764           0 : static size_t pipe_zero(size_t bytes, struct iov_iter *i)
     765             : {
     766             :         unsigned int chunk, off;
     767             : 
     768           0 :         if (unlikely(bytes > i->count))
     769           0 :                 bytes = i->count;
     770           0 :         if (unlikely(!bytes))
     771             :                 return 0;
     772             : 
     773           0 :         if (!sanity(i))
     774             :                 return 0;
     775             : 
     776           0 :         for (size_t n = bytes; n; n -= chunk) {
     777           0 :                 struct page *page = append_pipe(i, n, &off);
     778             :                 char *p;
     779             : 
     780           0 :                 if (!page)
     781           0 :                         return bytes - n;
     782           0 :                 chunk = min_t(size_t, n, PAGE_SIZE - off);
     783           0 :                 p = kmap_local_page(page);
     784           0 :                 memset(p + off, 0, chunk);
     785             :                 kunmap_local(p);
     786             :         }
     787             :         return bytes;
     788             : }
     789             : 
     790           0 : size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
     791             : {
     792           0 :         if (unlikely(iov_iter_is_pipe(i)))
     793           0 :                 return pipe_zero(bytes, i);
     794           0 :         iterate_and_advance(i, bytes, base, len, count,
     795             :                 clear_user(base, len),
     796             :                 memset(base, 0, len)
     797             :         )
     798             : 
     799             :         return bytes;
     800             : }
     801             : EXPORT_SYMBOL(iov_iter_zero);
     802             : 
     803           0 : size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
     804             :                                   struct iov_iter *i)
     805             : {
     806           0 :         char *kaddr = kmap_atomic(page), *p = kaddr + offset;
     807           0 :         if (!page_copy_sane(page, offset, bytes)) {
     808           0 :                 kunmap_atomic(kaddr);
     809           0 :                 return 0;
     810             :         }
     811           0 :         if (WARN_ON_ONCE(!i->data_source)) {
     812           0 :                 kunmap_atomic(kaddr);
     813           0 :                 return 0;
     814             :         }
     815           0 :         iterate_and_advance(i, bytes, base, len, off,
     816             :                 copyin(p + off, base, len),
     817             :                 memcpy(p + off, base, len)
     818             :         )
     819           0 :         kunmap_atomic(kaddr);
     820           0 :         return bytes;
     821             : }
     822             : EXPORT_SYMBOL(copy_page_from_iter_atomic);
     823             : 
     824           0 : static void pipe_advance(struct iov_iter *i, size_t size)
     825             : {
     826           0 :         struct pipe_inode_info *pipe = i->pipe;
     827           0 :         int off = i->last_offset;
     828             : 
     829           0 :         if (!off && !size) {
     830           0 :                 pipe_discard_from(pipe, i->start_head); // discard everything
     831             :                 return;
     832             :         }
     833           0 :         i->count -= size;
     834           0 :         while (1) {
     835           0 :                 struct pipe_buffer *buf = pipe_buf(pipe, i->head);
     836           0 :                 if (off) /* make it relative to the beginning of buffer */
     837           0 :                         size += abs(off) - buf->offset;
     838           0 :                 if (size <= buf->len) {
     839           0 :                         buf->len = size;
     840           0 :                         i->last_offset = last_offset(buf);
     841             :                         break;
     842             :                 }
     843           0 :                 size -= buf->len;
     844           0 :                 i->head++;
     845           0 :                 off = 0;
     846             :         }
     847           0 :         pipe_discard_from(pipe, i->head + 1); // discard everything past this one
     848             : }
     849             : 
     850           0 : static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
     851             : {
     852             :         const struct bio_vec *bvec, *end;
     853             : 
     854           0 :         if (!i->count)
     855             :                 return;
     856           0 :         i->count -= size;
     857             : 
     858           0 :         size += i->iov_offset;
     859             : 
     860           0 :         for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
     861           0 :                 if (likely(size < bvec->bv_len))
     862             :                         break;
     863           0 :                 size -= bvec->bv_len;
     864             :         }
     865           0 :         i->iov_offset = size;
     866           0 :         i->nr_segs -= bvec - i->bvec;
     867           0 :         i->bvec = bvec;
     868             : }
     869             : 
     870           0 : static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
     871             : {
     872             :         const struct iovec *iov, *end;
     873             : 
     874           0 :         if (!i->count)
     875             :                 return;
     876           0 :         i->count -= size;
     877             : 
     878           0 :         size += i->iov_offset; // from beginning of current segment
     879           0 :         for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
     880           0 :                 if (likely(size < iov->iov_len))
     881             :                         break;
     882           0 :                 size -= iov->iov_len;
     883             :         }
     884           0 :         i->iov_offset = size;
     885           0 :         i->nr_segs -= iov - i->iov;
     886           0 :         i->iov = iov;
     887             : }
     888             : 
     889           0 : void iov_iter_advance(struct iov_iter *i, size_t size)
     890             : {
     891           0 :         if (unlikely(i->count < size))
     892           0 :                 size = i->count;
     893           0 :         if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
     894           0 :                 i->iov_offset += size;
     895           0 :                 i->count -= size;
     896           0 :         } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
     897             :                 /* iovec and kvec have identical layouts */
     898           0 :                 iov_iter_iovec_advance(i, size);
     899           0 :         } else if (iov_iter_is_bvec(i)) {
     900           0 :                 iov_iter_bvec_advance(i, size);
     901           0 :         } else if (iov_iter_is_pipe(i)) {
     902           0 :                 pipe_advance(i, size);
     903           0 :         } else if (iov_iter_is_discard(i)) {
     904           0 :                 i->count -= size;
     905             :         }
     906           0 : }
     907             : EXPORT_SYMBOL(iov_iter_advance);
     908             : 
     909           0 : void iov_iter_revert(struct iov_iter *i, size_t unroll)
     910             : {
     911           0 :         if (!unroll)
     912             :                 return;
     913           0 :         if (WARN_ON(unroll > MAX_RW_COUNT))
     914             :                 return;
     915           0 :         i->count += unroll;
     916           0 :         if (unlikely(iov_iter_is_pipe(i))) {
     917           0 :                 struct pipe_inode_info *pipe = i->pipe;
     918           0 :                 unsigned int head = pipe->head;
     919             : 
     920           0 :                 while (head > i->start_head) {
     921           0 :                         struct pipe_buffer *b = pipe_buf(pipe, --head);
     922           0 :                         if (unroll < b->len) {
     923           0 :                                 b->len -= unroll;
     924           0 :                                 i->last_offset = last_offset(b);
     925           0 :                                 i->head = head;
     926           0 :                                 return;
     927             :                         }
     928           0 :                         unroll -= b->len;
     929           0 :                         pipe_buf_release(pipe, b);
     930           0 :                         pipe->head--;
     931             :                 }
     932           0 :                 i->last_offset = 0;
     933           0 :                 i->head = head;
     934           0 :                 return;
     935             :         }
     936           0 :         if (unlikely(iov_iter_is_discard(i)))
     937             :                 return;
     938           0 :         if (unroll <= i->iov_offset) {
     939           0 :                 i->iov_offset -= unroll;
     940           0 :                 return;
     941             :         }
     942           0 :         unroll -= i->iov_offset;
     943           0 :         if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
     944           0 :                 BUG(); /* We should never go beyond the start of the specified
     945             :                         * range since we might then be straying into pages that
     946             :                         * aren't pinned.
     947             :                         */
     948           0 :         } else if (iov_iter_is_bvec(i)) {
     949           0 :                 const struct bio_vec *bvec = i->bvec;
     950           0 :                 while (1) {
     951           0 :                         size_t n = (--bvec)->bv_len;
     952           0 :                         i->nr_segs++;
     953           0 :                         if (unroll <= n) {
     954           0 :                                 i->bvec = bvec;
     955           0 :                                 i->iov_offset = n - unroll;
     956           0 :                                 return;
     957             :                         }
     958           0 :                         unroll -= n;
     959             :                 }
     960             :         } else { /* same logics for iovec and kvec */
     961           0 :                 const struct iovec *iov = i->iov;
     962           0 :                 while (1) {
     963           0 :                         size_t n = (--iov)->iov_len;
     964           0 :                         i->nr_segs++;
     965           0 :                         if (unroll <= n) {
     966           0 :                                 i->iov = iov;
     967           0 :                                 i->iov_offset = n - unroll;
     968           0 :                                 return;
     969             :                         }
     970           0 :                         unroll -= n;
     971             :                 }
     972             :         }
     973             : }
     974             : EXPORT_SYMBOL(iov_iter_revert);
     975             : 
     976             : /*
     977             :  * Return the count of just the current iov_iter segment.
     978             :  */
     979           0 : size_t iov_iter_single_seg_count(const struct iov_iter *i)
     980             : {
     981           0 :         if (i->nr_segs > 1) {
     982           0 :                 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
     983           0 :                         return min(i->count, i->iov->iov_len - i->iov_offset);
     984           0 :                 if (iov_iter_is_bvec(i))
     985           0 :                         return min(i->count, i->bvec->bv_len - i->iov_offset);
     986             :         }
     987           0 :         return i->count;
     988             : }
     989             : EXPORT_SYMBOL(iov_iter_single_seg_count);
     990             : 
     991           0 : void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
     992             :                         const struct kvec *kvec, unsigned long nr_segs,
     993             :                         size_t count)
     994             : {
     995           0 :         WARN_ON(direction & ~(READ | WRITE));
     996           0 :         *i = (struct iov_iter){
     997             :                 .iter_type = ITER_KVEC,
     998             :                 .data_source = direction,
     999             :                 .kvec = kvec,
    1000             :                 .nr_segs = nr_segs,
    1001             :                 .iov_offset = 0,
    1002             :                 .count = count
    1003             :         };
    1004           0 : }
    1005             : EXPORT_SYMBOL(iov_iter_kvec);
    1006             : 
    1007           0 : void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
    1008             :                         const struct bio_vec *bvec, unsigned long nr_segs,
    1009             :                         size_t count)
    1010             : {
    1011           0 :         WARN_ON(direction & ~(READ | WRITE));
    1012           0 :         *i = (struct iov_iter){
    1013             :                 .iter_type = ITER_BVEC,
    1014             :                 .data_source = direction,
    1015             :                 .bvec = bvec,
    1016             :                 .nr_segs = nr_segs,
    1017             :                 .iov_offset = 0,
    1018             :                 .count = count
    1019             :         };
    1020           0 : }
    1021             : EXPORT_SYMBOL(iov_iter_bvec);
    1022             : 
    1023           0 : void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
    1024             :                         struct pipe_inode_info *pipe,
    1025             :                         size_t count)
    1026             : {
    1027           0 :         BUG_ON(direction != READ);
    1028           0 :         WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
    1029           0 :         *i = (struct iov_iter){
    1030             :                 .iter_type = ITER_PIPE,
    1031             :                 .data_source = false,
    1032             :                 .pipe = pipe,
    1033           0 :                 .head = pipe->head,
    1034             :                 .start_head = pipe->head,
    1035             :                 .last_offset = 0,
    1036             :                 .count = count
    1037             :         };
    1038           0 : }
    1039             : EXPORT_SYMBOL(iov_iter_pipe);
    1040             : 
    1041             : /**
    1042             :  * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
    1043             :  * @i: The iterator to initialise.
    1044             :  * @direction: The direction of the transfer.
    1045             :  * @xarray: The xarray to access.
    1046             :  * @start: The start file position.
    1047             :  * @count: The size of the I/O buffer in bytes.
    1048             :  *
    1049             :  * Set up an I/O iterator to either draw data out of the pages attached to an
    1050             :  * inode or to inject data into those pages.  The pages *must* be prevented
    1051             :  * from evaporation, either by taking a ref on them or locking them by the
    1052             :  * caller.
    1053             :  */
    1054           0 : void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
    1055             :                      struct xarray *xarray, loff_t start, size_t count)
    1056             : {
    1057           0 :         BUG_ON(direction & ~1);
    1058           0 :         *i = (struct iov_iter) {
    1059             :                 .iter_type = ITER_XARRAY,
    1060             :                 .data_source = direction,
    1061             :                 .xarray = xarray,
    1062             :                 .xarray_start = start,
    1063             :                 .count = count,
    1064             :                 .iov_offset = 0
    1065             :         };
    1066           0 : }
    1067             : EXPORT_SYMBOL(iov_iter_xarray);
    1068             : 
    1069             : /**
    1070             :  * iov_iter_discard - Initialise an I/O iterator that discards data
    1071             :  * @i: The iterator to initialise.
    1072             :  * @direction: The direction of the transfer.
    1073             :  * @count: The size of the I/O buffer in bytes.
    1074             :  *
    1075             :  * Set up an I/O iterator that just discards everything that's written to it.
    1076             :  * It's only available as a READ iterator.
    1077             :  */
    1078           0 : void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
    1079             : {
    1080           0 :         BUG_ON(direction != READ);
    1081           0 :         *i = (struct iov_iter){
    1082             :                 .iter_type = ITER_DISCARD,
    1083             :                 .data_source = false,
    1084             :                 .count = count,
    1085             :                 .iov_offset = 0
    1086             :         };
    1087           0 : }
    1088             : EXPORT_SYMBOL(iov_iter_discard);
    1089             : 
    1090           0 : static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
    1091             :                                    unsigned len_mask)
    1092             : {
    1093           0 :         size_t size = i->count;
    1094           0 :         size_t skip = i->iov_offset;
    1095             :         unsigned k;
    1096             : 
    1097           0 :         for (k = 0; k < i->nr_segs; k++, skip = 0) {
    1098           0 :                 size_t len = i->iov[k].iov_len - skip;
    1099             : 
    1100           0 :                 if (len > size)
    1101           0 :                         len = size;
    1102           0 :                 if (len & len_mask)
    1103             :                         return false;
    1104           0 :                 if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask)
    1105             :                         return false;
    1106             : 
    1107           0 :                 size -= len;
    1108           0 :                 if (!size)
    1109             :                         break;
    1110             :         }
    1111             :         return true;
    1112             : }
    1113             : 
    1114           0 : static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
    1115             :                                   unsigned len_mask)
    1116             : {
    1117           0 :         size_t size = i->count;
    1118           0 :         unsigned skip = i->iov_offset;
    1119             :         unsigned k;
    1120             : 
    1121           0 :         for (k = 0; k < i->nr_segs; k++, skip = 0) {
    1122           0 :                 size_t len = i->bvec[k].bv_len - skip;
    1123             : 
    1124           0 :                 if (len > size)
    1125           0 :                         len = size;
    1126           0 :                 if (len & len_mask)
    1127             :                         return false;
    1128           0 :                 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
    1129             :                         return false;
    1130             : 
    1131           0 :                 size -= len;
    1132           0 :                 if (!size)
    1133             :                         break;
    1134             :         }
    1135             :         return true;
    1136             : }
    1137             : 
    1138             : /**
    1139             :  * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
    1140             :  *      are aligned to the parameters.
    1141             :  *
    1142             :  * @i: &struct iov_iter to restore
    1143             :  * @addr_mask: bit mask to check against the iov element's addresses
    1144             :  * @len_mask: bit mask to check against the iov element's lengths
    1145             :  *
    1146             :  * Return: false if any addresses or lengths intersect with the provided masks
    1147             :  */
    1148           0 : bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
    1149             :                          unsigned len_mask)
    1150             : {
    1151           0 :         if (likely(iter_is_ubuf(i))) {
    1152           0 :                 if (i->count & len_mask)
    1153             :                         return false;
    1154           0 :                 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask)
    1155             :                         return false;
    1156           0 :                 return true;
    1157             :         }
    1158             : 
    1159           0 :         if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
    1160           0 :                 return iov_iter_aligned_iovec(i, addr_mask, len_mask);
    1161             : 
    1162           0 :         if (iov_iter_is_bvec(i))
    1163           0 :                 return iov_iter_aligned_bvec(i, addr_mask, len_mask);
    1164             : 
    1165           0 :         if (iov_iter_is_pipe(i)) {
    1166           0 :                 size_t size = i->count;
    1167             : 
    1168           0 :                 if (size & len_mask)
    1169             :                         return false;
    1170           0 :                 if (size && i->last_offset > 0) {
    1171           0 :                         if (i->last_offset & addr_mask)
    1172             :                                 return false;
    1173             :                 }
    1174             : 
    1175           0 :                 return true;
    1176             :         }
    1177             : 
    1178           0 :         if (iov_iter_is_xarray(i)) {
    1179           0 :                 if (i->count & len_mask)
    1180             :                         return false;
    1181           0 :                 if ((i->xarray_start + i->iov_offset) & addr_mask)
    1182             :                         return false;
    1183             :         }
    1184             : 
    1185           0 :         return true;
    1186             : }
    1187             : EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
    1188             : 
    1189           0 : static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
    1190             : {
    1191           0 :         unsigned long res = 0;
    1192           0 :         size_t size = i->count;
    1193           0 :         size_t skip = i->iov_offset;
    1194             :         unsigned k;
    1195             : 
    1196           0 :         for (k = 0; k < i->nr_segs; k++, skip = 0) {
    1197           0 :                 size_t len = i->iov[k].iov_len - skip;
    1198           0 :                 if (len) {
    1199           0 :                         res |= (unsigned long)i->iov[k].iov_base + skip;
    1200           0 :                         if (len > size)
    1201           0 :                                 len = size;
    1202           0 :                         res |= len;
    1203           0 :                         size -= len;
    1204           0 :                         if (!size)
    1205             :                                 break;
    1206             :                 }
    1207             :         }
    1208           0 :         return res;
    1209             : }
    1210             : 
    1211           0 : static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
    1212             : {
    1213           0 :         unsigned res = 0;
    1214           0 :         size_t size = i->count;
    1215           0 :         unsigned skip = i->iov_offset;
    1216             :         unsigned k;
    1217             : 
    1218           0 :         for (k = 0; k < i->nr_segs; k++, skip = 0) {
    1219           0 :                 size_t len = i->bvec[k].bv_len - skip;
    1220           0 :                 res |= (unsigned long)i->bvec[k].bv_offset + skip;
    1221           0 :                 if (len > size)
    1222           0 :                         len = size;
    1223           0 :                 res |= len;
    1224           0 :                 size -= len;
    1225           0 :                 if (!size)
    1226             :                         break;
    1227             :         }
    1228           0 :         return res;
    1229             : }
    1230             : 
    1231           0 : unsigned long iov_iter_alignment(const struct iov_iter *i)
    1232             : {
    1233           0 :         if (likely(iter_is_ubuf(i))) {
    1234           0 :                 size_t size = i->count;
    1235           0 :                 if (size)
    1236           0 :                         return ((unsigned long)i->ubuf + i->iov_offset) | size;
    1237             :                 return 0;
    1238             :         }
    1239             : 
    1240             :         /* iovec and kvec have identical layouts */
    1241           0 :         if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
    1242           0 :                 return iov_iter_alignment_iovec(i);
    1243             : 
    1244           0 :         if (iov_iter_is_bvec(i))
    1245           0 :                 return iov_iter_alignment_bvec(i);
    1246             : 
    1247           0 :         if (iov_iter_is_pipe(i)) {
    1248           0 :                 size_t size = i->count;
    1249             : 
    1250           0 :                 if (size && i->last_offset > 0)
    1251           0 :                         return size | i->last_offset;
    1252             :                 return size;
    1253             :         }
    1254             : 
    1255           0 :         if (iov_iter_is_xarray(i))
    1256           0 :                 return (i->xarray_start + i->iov_offset) | i->count;
    1257             : 
    1258             :         return 0;
    1259             : }
    1260             : EXPORT_SYMBOL(iov_iter_alignment);
    1261             : 
    1262           0 : unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
    1263             : {
    1264           0 :         unsigned long res = 0;
    1265           0 :         unsigned long v = 0;
    1266           0 :         size_t size = i->count;
    1267             :         unsigned k;
    1268             : 
    1269           0 :         if (iter_is_ubuf(i))
    1270             :                 return 0;
    1271             : 
    1272           0 :         if (WARN_ON(!iter_is_iovec(i)))
    1273             :                 return ~0U;
    1274             : 
    1275           0 :         for (k = 0; k < i->nr_segs; k++) {
    1276           0 :                 if (i->iov[k].iov_len) {
    1277           0 :                         unsigned long base = (unsigned long)i->iov[k].iov_base;
    1278           0 :                         if (v) // if not the first one
    1279           0 :                                 res |= base | v; // this start | previous end
    1280           0 :                         v = base + i->iov[k].iov_len;
    1281           0 :                         if (size <= i->iov[k].iov_len)
    1282             :                                 break;
    1283           0 :                         size -= i->iov[k].iov_len;
    1284             :                 }
    1285             :         }
    1286             :         return res;
    1287             : }
    1288             : EXPORT_SYMBOL(iov_iter_gap_alignment);
    1289             : 
    1290           0 : static int want_pages_array(struct page ***res, size_t size,
    1291             :                             size_t start, unsigned int maxpages)
    1292             : {
    1293           0 :         unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE);
    1294             : 
    1295           0 :         if (count > maxpages)
    1296           0 :                 count = maxpages;
    1297           0 :         WARN_ON(!count);        // caller should've prevented that
    1298           0 :         if (!*res) {
    1299           0 :                 *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
    1300           0 :                 if (!*res)
    1301             :                         return 0;
    1302             :         }
    1303           0 :         return count;
    1304             : }
    1305             : 
    1306           0 : static ssize_t pipe_get_pages(struct iov_iter *i,
    1307             :                    struct page ***pages, size_t maxsize, unsigned maxpages,
    1308             :                    size_t *start)
    1309             : {
    1310             :         unsigned int npages, count, off, chunk;
    1311             :         struct page **p;
    1312             :         size_t left;
    1313             : 
    1314           0 :         if (!sanity(i))
    1315             :                 return -EFAULT;
    1316             : 
    1317           0 :         *start = off = pipe_npages(i, &npages);
    1318           0 :         if (!npages)
    1319             :                 return -EFAULT;
    1320           0 :         count = want_pages_array(pages, maxsize, off, min(npages, maxpages));
    1321           0 :         if (!count)
    1322             :                 return -ENOMEM;
    1323           0 :         p = *pages;
    1324           0 :         for (npages = 0, left = maxsize ; npages < count; npages++, left -= chunk) {
    1325           0 :                 struct page *page = append_pipe(i, left, &off);
    1326           0 :                 if (!page)
    1327             :                         break;
    1328           0 :                 chunk = min_t(size_t, left, PAGE_SIZE - off);
    1329           0 :                 get_page(*p++ = page);
    1330             :         }
    1331           0 :         if (!npages)
    1332             :                 return -EFAULT;
    1333           0 :         return maxsize - left;
    1334             : }
    1335             : 
    1336           0 : static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
    1337             :                                           pgoff_t index, unsigned int nr_pages)
    1338             : {
    1339           0 :         XA_STATE(xas, xa, index);
    1340             :         struct page *page;
    1341           0 :         unsigned int ret = 0;
    1342             : 
    1343             :         rcu_read_lock();
    1344           0 :         for (page = xas_load(&xas); page; page = xas_next(&xas)) {
    1345           0 :                 if (xas_retry(&xas, page))
    1346           0 :                         continue;
    1347             : 
    1348             :                 /* Has the page moved or been split? */
    1349           0 :                 if (unlikely(page != xas_reload(&xas))) {
    1350           0 :                         xas_reset(&xas);
    1351           0 :                         continue;
    1352             :                 }
    1353             : 
    1354           0 :                 pages[ret] = find_subpage(page, xas.xa_index);
    1355           0 :                 get_page(pages[ret]);
    1356           0 :                 if (++ret == nr_pages)
    1357             :                         break;
    1358             :         }
    1359             :         rcu_read_unlock();
    1360           0 :         return ret;
    1361             : }
    1362             : 
    1363           0 : static ssize_t iter_xarray_get_pages(struct iov_iter *i,
    1364             :                                      struct page ***pages, size_t maxsize,
    1365             :                                      unsigned maxpages, size_t *_start_offset)
    1366             : {
    1367             :         unsigned nr, offset, count;
    1368             :         pgoff_t index;
    1369             :         loff_t pos;
    1370             : 
    1371           0 :         pos = i->xarray_start + i->iov_offset;
    1372           0 :         index = pos >> PAGE_SHIFT;
    1373           0 :         offset = pos & ~PAGE_MASK;
    1374           0 :         *_start_offset = offset;
    1375             : 
    1376           0 :         count = want_pages_array(pages, maxsize, offset, maxpages);
    1377           0 :         if (!count)
    1378             :                 return -ENOMEM;
    1379           0 :         nr = iter_xarray_populate_pages(*pages, i->xarray, index, count);
    1380           0 :         if (nr == 0)
    1381             :                 return 0;
    1382             : 
    1383           0 :         maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
    1384           0 :         i->iov_offset += maxsize;
    1385           0 :         i->count -= maxsize;
    1386           0 :         return maxsize;
    1387             : }
    1388             : 
    1389             : /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
    1390           0 : static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
    1391             : {
    1392             :         size_t skip;
    1393             :         long k;
    1394             : 
    1395           0 :         if (iter_is_ubuf(i))
    1396           0 :                 return (unsigned long)i->ubuf + i->iov_offset;
    1397             : 
    1398           0 :         for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
    1399           0 :                 size_t len = i->iov[k].iov_len - skip;
    1400             : 
    1401           0 :                 if (unlikely(!len))
    1402           0 :                         continue;
    1403           0 :                 if (*size > len)
    1404           0 :                         *size = len;
    1405           0 :                 return (unsigned long)i->iov[k].iov_base + skip;
    1406             :         }
    1407           0 :         BUG(); // if it had been empty, we wouldn't get called
    1408             : }
    1409             : 
    1410             : /* must be done on non-empty ITER_BVEC one */
    1411             : static struct page *first_bvec_segment(const struct iov_iter *i,
    1412             :                                        size_t *size, size_t *start)
    1413             : {
    1414             :         struct page *page;
    1415           0 :         size_t skip = i->iov_offset, len;
    1416             : 
    1417           0 :         len = i->bvec->bv_len - skip;
    1418           0 :         if (*size > len)
    1419           0 :                 *size = len;
    1420           0 :         skip += i->bvec->bv_offset;
    1421           0 :         page = i->bvec->bv_page + skip / PAGE_SIZE;
    1422           0 :         *start = skip % PAGE_SIZE;
    1423             :         return page;
    1424             : }
    1425             : 
    1426           0 : static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
    1427             :                    struct page ***pages, size_t maxsize,
    1428             :                    unsigned int maxpages, size_t *start,
    1429             :                    iov_iter_extraction_t extraction_flags)
    1430             : {
    1431           0 :         unsigned int n, gup_flags = 0;
    1432             : 
    1433           0 :         if (maxsize > i->count)
    1434           0 :                 maxsize = i->count;
    1435           0 :         if (!maxsize)
    1436             :                 return 0;
    1437           0 :         if (maxsize > MAX_RW_COUNT)
    1438           0 :                 maxsize = MAX_RW_COUNT;
    1439           0 :         if (extraction_flags & ITER_ALLOW_P2PDMA)
    1440           0 :                 gup_flags |= FOLL_PCI_P2PDMA;
    1441             : 
    1442           0 :         if (likely(user_backed_iter(i))) {
    1443             :                 unsigned long addr;
    1444             :                 int res;
    1445             : 
    1446           0 :                 if (iov_iter_rw(i) != WRITE)
    1447           0 :                         gup_flags |= FOLL_WRITE;
    1448           0 :                 if (i->nofault)
    1449           0 :                         gup_flags |= FOLL_NOFAULT;
    1450             : 
    1451           0 :                 addr = first_iovec_segment(i, &maxsize);
    1452           0 :                 *start = addr % PAGE_SIZE;
    1453           0 :                 addr &= PAGE_MASK;
    1454           0 :                 n = want_pages_array(pages, maxsize, *start, maxpages);
    1455           0 :                 if (!n)
    1456             :                         return -ENOMEM;
    1457           0 :                 res = get_user_pages_fast(addr, n, gup_flags, *pages);
    1458           0 :                 if (unlikely(res <= 0))
    1459           0 :                         return res;
    1460           0 :                 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start);
    1461           0 :                 iov_iter_advance(i, maxsize);
    1462           0 :                 return maxsize;
    1463             :         }
    1464           0 :         if (iov_iter_is_bvec(i)) {
    1465             :                 struct page **p;
    1466             :                 struct page *page;
    1467             : 
    1468           0 :                 page = first_bvec_segment(i, &maxsize, start);
    1469           0 :                 n = want_pages_array(pages, maxsize, *start, maxpages);
    1470           0 :                 if (!n)
    1471             :                         return -ENOMEM;
    1472           0 :                 p = *pages;
    1473           0 :                 for (int k = 0; k < n; k++)
    1474           0 :                         get_page(p[k] = page + k);
    1475           0 :                 maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
    1476           0 :                 i->count -= maxsize;
    1477           0 :                 i->iov_offset += maxsize;
    1478           0 :                 if (i->iov_offset == i->bvec->bv_len) {
    1479           0 :                         i->iov_offset = 0;
    1480           0 :                         i->bvec++;
    1481           0 :                         i->nr_segs--;
    1482             :                 }
    1483           0 :                 return maxsize;
    1484             :         }
    1485           0 :         if (iov_iter_is_pipe(i))
    1486           0 :                 return pipe_get_pages(i, pages, maxsize, maxpages, start);
    1487           0 :         if (iov_iter_is_xarray(i))
    1488           0 :                 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
    1489             :         return -EFAULT;
    1490             : }
    1491             : 
    1492           0 : ssize_t iov_iter_get_pages(struct iov_iter *i,
    1493             :                    struct page **pages, size_t maxsize, unsigned maxpages,
    1494             :                    size_t *start, iov_iter_extraction_t extraction_flags)
    1495             : {
    1496           0 :         if (!maxpages)
    1497             :                 return 0;
    1498           0 :         BUG_ON(!pages);
    1499             : 
    1500           0 :         return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages,
    1501             :                                           start, extraction_flags);
    1502             : }
    1503             : EXPORT_SYMBOL_GPL(iov_iter_get_pages);
    1504             : 
    1505           0 : ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
    1506             :                 size_t maxsize, unsigned maxpages, size_t *start)
    1507             : {
    1508           0 :         return iov_iter_get_pages(i, pages, maxsize, maxpages, start, 0);
    1509             : }
    1510             : EXPORT_SYMBOL(iov_iter_get_pages2);
    1511             : 
    1512           0 : ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
    1513             :                    struct page ***pages, size_t maxsize,
    1514             :                    size_t *start, iov_iter_extraction_t extraction_flags)
    1515             : {
    1516             :         ssize_t len;
    1517             : 
    1518           0 :         *pages = NULL;
    1519             : 
    1520           0 :         len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start,
    1521             :                                          extraction_flags);
    1522           0 :         if (len <= 0) {
    1523           0 :                 kvfree(*pages);
    1524           0 :                 *pages = NULL;
    1525             :         }
    1526           0 :         return len;
    1527             : }
    1528             : EXPORT_SYMBOL_GPL(iov_iter_get_pages_alloc);
    1529             : 
    1530           0 : ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
    1531             :                 struct page ***pages, size_t maxsize, size_t *start)
    1532             : {
    1533           0 :         return iov_iter_get_pages_alloc(i, pages, maxsize, start, 0);
    1534             : }
    1535             : EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
    1536             : 
    1537           0 : size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
    1538             :                                struct iov_iter *i)
    1539             : {
    1540             :         __wsum sum, next;
    1541           0 :         sum = *csum;
    1542           0 :         if (WARN_ON_ONCE(!i->data_source))
    1543             :                 return 0;
    1544             : 
    1545           0 :         iterate_and_advance(i, bytes, base, len, off, ({
    1546             :                 next = csum_and_copy_from_user(base, addr + off, len);
    1547             :                 sum = csum_block_add(sum, next, off);
    1548             :                 next ? 0 : len;
    1549             :         }), ({
    1550             :                 sum = csum_and_memcpy(addr + off, base, len, sum, off);
    1551             :         })
    1552             :         )
    1553           0 :         *csum = sum;
    1554           0 :         return bytes;
    1555             : }
    1556             : EXPORT_SYMBOL(csum_and_copy_from_iter);
    1557             : 
    1558           0 : size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
    1559             :                              struct iov_iter *i)
    1560             : {
    1561           0 :         struct csum_state *csstate = _csstate;
    1562             :         __wsum sum, next;
    1563             : 
    1564           0 :         if (WARN_ON_ONCE(i->data_source))
    1565             :                 return 0;
    1566           0 :         if (unlikely(iov_iter_is_discard(i))) {
    1567             :                 // can't use csum_memcpy() for that one - data is not copied
    1568           0 :                 csstate->csum = csum_block_add(csstate->csum,
    1569             :                                                csum_partial(addr, bytes, 0),
    1570           0 :                                                csstate->off);
    1571           0 :                 csstate->off += bytes;
    1572           0 :                 return bytes;
    1573             :         }
    1574             : 
    1575           0 :         sum = csum_shift(csstate->csum, csstate->off);
    1576           0 :         if (unlikely(iov_iter_is_pipe(i)))
    1577           0 :                 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
    1578           0 :         else iterate_and_advance(i, bytes, base, len, off, ({
    1579             :                 next = csum_and_copy_to_user(addr + off, base, len);
    1580             :                 sum = csum_block_add(sum, next, off);
    1581             :                 next ? 0 : len;
    1582             :         }), ({
    1583             :                 sum = csum_and_memcpy(base, addr + off, len, sum, off);
    1584             :         })
    1585             :         )
    1586           0 :         csstate->csum = csum_shift(sum, csstate->off);
    1587           0 :         csstate->off += bytes;
    1588           0 :         return bytes;
    1589             : }
    1590             : EXPORT_SYMBOL(csum_and_copy_to_iter);
    1591             : 
    1592           0 : size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
    1593             :                 struct iov_iter *i)
    1594             : {
    1595             : #ifdef CONFIG_CRYPTO_HASH
    1596             :         struct ahash_request *hash = hashp;
    1597             :         struct scatterlist sg;
    1598             :         size_t copied;
    1599             : 
    1600             :         copied = copy_to_iter(addr, bytes, i);
    1601             :         sg_init_one(&sg, addr, copied);
    1602             :         ahash_request_set_crypt(hash, &sg, NULL, copied);
    1603             :         crypto_ahash_update(hash);
    1604             :         return copied;
    1605             : #else
    1606           0 :         return 0;
    1607             : #endif
    1608             : }
    1609             : EXPORT_SYMBOL(hash_and_copy_to_iter);
    1610             : 
    1611           0 : static int iov_npages(const struct iov_iter *i, int maxpages)
    1612             : {
    1613           0 :         size_t skip = i->iov_offset, size = i->count;
    1614             :         const struct iovec *p;
    1615           0 :         int npages = 0;
    1616             : 
    1617           0 :         for (p = i->iov; size; skip = 0, p++) {
    1618           0 :                 unsigned offs = offset_in_page(p->iov_base + skip);
    1619           0 :                 size_t len = min(p->iov_len - skip, size);
    1620             : 
    1621           0 :                 if (len) {
    1622           0 :                         size -= len;
    1623           0 :                         npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
    1624           0 :                         if (unlikely(npages > maxpages))
    1625             :                                 return maxpages;
    1626             :                 }
    1627             :         }
    1628             :         return npages;
    1629             : }
    1630             : 
    1631             : static int bvec_npages(const struct iov_iter *i, int maxpages)
    1632             : {
    1633           0 :         size_t skip = i->iov_offset, size = i->count;
    1634             :         const struct bio_vec *p;
    1635           0 :         int npages = 0;
    1636             : 
    1637           0 :         for (p = i->bvec; size; skip = 0, p++) {
    1638           0 :                 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
    1639           0 :                 size_t len = min(p->bv_len - skip, size);
    1640             : 
    1641           0 :                 size -= len;
    1642           0 :                 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
    1643           0 :                 if (unlikely(npages > maxpages))
    1644             :                         return maxpages;
    1645             :         }
    1646             :         return npages;
    1647             : }
    1648             : 
    1649           0 : int iov_iter_npages(const struct iov_iter *i, int maxpages)
    1650             : {
    1651           0 :         if (unlikely(!i->count))
    1652             :                 return 0;
    1653           0 :         if (likely(iter_is_ubuf(i))) {
    1654           0 :                 unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
    1655           0 :                 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
    1656           0 :                 return min(npages, maxpages);
    1657             :         }
    1658             :         /* iovec and kvec have identical layouts */
    1659           0 :         if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
    1660           0 :                 return iov_npages(i, maxpages);
    1661           0 :         if (iov_iter_is_bvec(i))
    1662             :                 return bvec_npages(i, maxpages);
    1663           0 :         if (iov_iter_is_pipe(i)) {
    1664             :                 int npages;
    1665             : 
    1666           0 :                 if (!sanity(i))
    1667             :                         return 0;
    1668             : 
    1669           0 :                 pipe_npages(i, &npages);
    1670           0 :                 return min(npages, maxpages);
    1671             :         }
    1672           0 :         if (iov_iter_is_xarray(i)) {
    1673           0 :                 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
    1674           0 :                 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
    1675           0 :                 return min(npages, maxpages);
    1676             :         }
    1677             :         return 0;
    1678             : }
    1679             : EXPORT_SYMBOL(iov_iter_npages);
    1680             : 
    1681           0 : const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
    1682             : {
    1683           0 :         *new = *old;
    1684           0 :         if (unlikely(iov_iter_is_pipe(new))) {
    1685           0 :                 WARN_ON(1);
    1686           0 :                 return NULL;
    1687             :         }
    1688           0 :         if (iov_iter_is_bvec(new))
    1689           0 :                 return new->bvec = kmemdup(new->bvec,
    1690           0 :                                     new->nr_segs * sizeof(struct bio_vec),
    1691             :                                     flags);
    1692           0 :         else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
    1693             :                 /* iovec and kvec have identical layout */
    1694           0 :                 return new->iov = kmemdup(new->iov,
    1695           0 :                                    new->nr_segs * sizeof(struct iovec),
    1696             :                                    flags);
    1697             :         return NULL;
    1698             : }
    1699             : EXPORT_SYMBOL(dup_iter);
    1700             : 
    1701           0 : static int copy_compat_iovec_from_user(struct iovec *iov,
    1702             :                 const struct iovec __user *uvec, unsigned long nr_segs)
    1703             : {
    1704           0 :         const struct compat_iovec __user *uiov =
    1705             :                 (const struct compat_iovec __user *)uvec;
    1706           0 :         int ret = -EFAULT, i;
    1707             : 
    1708           0 :         if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
    1709             :                 return -EFAULT;
    1710             : 
    1711           0 :         for (i = 0; i < nr_segs; i++) {
    1712             :                 compat_uptr_t buf;
    1713             :                 compat_ssize_t len;
    1714             : 
    1715           0 :                 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
    1716           0 :                 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
    1717             : 
    1718             :                 /* check for compat_size_t not fitting in compat_ssize_t .. */
    1719           0 :                 if (len < 0) {
    1720             :                         ret = -EINVAL;
    1721             :                         goto uaccess_end;
    1722             :                 }
    1723           0 :                 iov[i].iov_base = compat_ptr(buf);
    1724           0 :                 iov[i].iov_len = len;
    1725             :         }
    1726             : 
    1727             :         ret = 0;
    1728             : uaccess_end:
    1729             :         user_access_end();
    1730             :         return ret;
    1731             : }
    1732             : 
    1733           0 : static int copy_iovec_from_user(struct iovec *iov,
    1734             :                 const struct iovec __user *uvec, unsigned long nr_segs)
    1735             : {
    1736             :         unsigned long seg;
    1737             : 
    1738           0 :         if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
    1739             :                 return -EFAULT;
    1740           0 :         for (seg = 0; seg < nr_segs; seg++) {
    1741           0 :                 if ((ssize_t)iov[seg].iov_len < 0)
    1742             :                         return -EINVAL;
    1743             :         }
    1744             : 
    1745             :         return 0;
    1746             : }
    1747             : 
    1748           0 : struct iovec *iovec_from_user(const struct iovec __user *uvec,
    1749             :                 unsigned long nr_segs, unsigned long fast_segs,
    1750             :                 struct iovec *fast_iov, bool compat)
    1751             : {
    1752           0 :         struct iovec *iov = fast_iov;
    1753             :         int ret;
    1754             : 
    1755             :         /*
    1756             :          * SuS says "The readv() function *may* fail if the iovcnt argument was
    1757             :          * less than or equal to 0, or greater than {IOV_MAX}.  Linux has
    1758             :          * traditionally returned zero for zero segments, so...
    1759             :          */
    1760           0 :         if (nr_segs == 0)
    1761             :                 return iov;
    1762           0 :         if (nr_segs > UIO_MAXIOV)
    1763             :                 return ERR_PTR(-EINVAL);
    1764           0 :         if (nr_segs > fast_segs) {
    1765           0 :                 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
    1766           0 :                 if (!iov)
    1767             :                         return ERR_PTR(-ENOMEM);
    1768             :         }
    1769             : 
    1770           0 :         if (compat)
    1771           0 :                 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
    1772             :         else
    1773           0 :                 ret = copy_iovec_from_user(iov, uvec, nr_segs);
    1774           0 :         if (ret) {
    1775           0 :                 if (iov != fast_iov)
    1776           0 :                         kfree(iov);
    1777           0 :                 return ERR_PTR(ret);
    1778             :         }
    1779             : 
    1780             :         return iov;
    1781             : }
    1782             : 
    1783           0 : ssize_t __import_iovec(int type, const struct iovec __user *uvec,
    1784             :                  unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
    1785             :                  struct iov_iter *i, bool compat)
    1786             : {
    1787           0 :         ssize_t total_len = 0;
    1788             :         unsigned long seg;
    1789             :         struct iovec *iov;
    1790             : 
    1791           0 :         iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
    1792           0 :         if (IS_ERR(iov)) {
    1793           0 :                 *iovp = NULL;
    1794           0 :                 return PTR_ERR(iov);
    1795             :         }
    1796             : 
    1797             :         /*
    1798             :          * According to the Single Unix Specification we should return EINVAL if
    1799             :          * an element length is < 0 when cast to ssize_t or if the total length
    1800             :          * would overflow the ssize_t return value of the system call.
    1801             :          *
    1802             :          * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
    1803             :          * overflow case.
    1804             :          */
    1805           0 :         for (seg = 0; seg < nr_segs; seg++) {
    1806           0 :                 ssize_t len = (ssize_t)iov[seg].iov_len;
    1807             : 
    1808           0 :                 if (!access_ok(iov[seg].iov_base, len)) {
    1809           0 :                         if (iov != *iovp)
    1810           0 :                                 kfree(iov);
    1811           0 :                         *iovp = NULL;
    1812           0 :                         return -EFAULT;
    1813             :                 }
    1814             : 
    1815           0 :                 if (len > MAX_RW_COUNT - total_len) {
    1816           0 :                         len = MAX_RW_COUNT - total_len;
    1817           0 :                         iov[seg].iov_len = len;
    1818             :                 }
    1819           0 :                 total_len += len;
    1820             :         }
    1821             : 
    1822           0 :         iov_iter_init(i, type, iov, nr_segs, total_len);
    1823           0 :         if (iov == *iovp)
    1824           0 :                 *iovp = NULL;
    1825             :         else
    1826           0 :                 *iovp = iov;
    1827             :         return total_len;
    1828             : }
    1829             : 
    1830             : /**
    1831             :  * import_iovec() - Copy an array of &struct iovec from userspace
    1832             :  *     into the kernel, check that it is valid, and initialize a new
    1833             :  *     &struct iov_iter iterator to access it.
    1834             :  *
    1835             :  * @type: One of %READ or %WRITE.
    1836             :  * @uvec: Pointer to the userspace array.
    1837             :  * @nr_segs: Number of elements in userspace array.
    1838             :  * @fast_segs: Number of elements in @iov.
    1839             :  * @iovp: (input and output parameter) Pointer to pointer to (usually small
    1840             :  *     on-stack) kernel array.
    1841             :  * @i: Pointer to iterator that will be initialized on success.
    1842             :  *
    1843             :  * If the array pointed to by *@iov is large enough to hold all @nr_segs,
    1844             :  * then this function places %NULL in *@iov on return. Otherwise, a new
    1845             :  * array will be allocated and the result placed in *@iov. This means that
    1846             :  * the caller may call kfree() on *@iov regardless of whether the small
    1847             :  * on-stack array was used or not (and regardless of whether this function
    1848             :  * returns an error or not).
    1849             :  *
    1850             :  * Return: Negative error code on error, bytes imported on success
    1851             :  */
    1852           0 : ssize_t import_iovec(int type, const struct iovec __user *uvec,
    1853             :                  unsigned nr_segs, unsigned fast_segs,
    1854             :                  struct iovec **iovp, struct iov_iter *i)
    1855             : {
    1856           0 :         return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
    1857             :                               in_compat_syscall());
    1858             : }
    1859             : EXPORT_SYMBOL(import_iovec);
    1860             : 
    1861           0 : int import_single_range(int rw, void __user *buf, size_t len,
    1862             :                  struct iovec *iov, struct iov_iter *i)
    1863             : {
    1864           0 :         if (len > MAX_RW_COUNT)
    1865           0 :                 len = MAX_RW_COUNT;
    1866           0 :         if (unlikely(!access_ok(buf, len)))
    1867             :                 return -EFAULT;
    1868             : 
    1869           0 :         iov->iov_base = buf;
    1870           0 :         iov->iov_len = len;
    1871           0 :         iov_iter_init(i, rw, iov, 1, len);
    1872           0 :         return 0;
    1873             : }
    1874             : EXPORT_SYMBOL(import_single_range);
    1875             : 
    1876           0 : int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i)
    1877             : {
    1878           0 :         if (len > MAX_RW_COUNT)
    1879           0 :                 len = MAX_RW_COUNT;
    1880           0 :         if (unlikely(!access_ok(buf, len)))
    1881             :                 return -EFAULT;
    1882             : 
    1883           0 :         iov_iter_ubuf(i, rw, buf, len);
    1884           0 :         return 0;
    1885             : }
    1886             : 
    1887             : /**
    1888             :  * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
    1889             :  *     iov_iter_save_state() was called.
    1890             :  *
    1891             :  * @i: &struct iov_iter to restore
    1892             :  * @state: state to restore from
    1893             :  *
    1894             :  * Used after iov_iter_save_state() to bring restore @i, if operations may
    1895             :  * have advanced it.
    1896             :  *
    1897             :  * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
    1898             :  */
    1899           0 : void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
    1900             : {
    1901           0 :         if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) &&
    1902           0 :                          !iter_is_ubuf(i)) && !iov_iter_is_kvec(i))
    1903             :                 return;
    1904           0 :         i->iov_offset = state->iov_offset;
    1905           0 :         i->count = state->count;
    1906           0 :         if (iter_is_ubuf(i))
    1907             :                 return;
    1908             :         /*
    1909             :          * For the *vec iters, nr_segs + iov is constant - if we increment
    1910             :          * the vec, then we also decrement the nr_segs count. Hence we don't
    1911             :          * need to track both of these, just one is enough and we can deduct
    1912             :          * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
    1913             :          * size, so we can just increment the iov pointer as they are unionzed.
    1914             :          * ITER_BVEC _may_ be the same size on some archs, but on others it is
    1915             :          * not. Be safe and handle it separately.
    1916             :          */
    1917             :         BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
    1918           0 :         if (iov_iter_is_bvec(i))
    1919           0 :                 i->bvec -= state->nr_segs - i->nr_segs;
    1920             :         else
    1921           0 :                 i->iov -= state->nr_segs - i->nr_segs;
    1922           0 :         i->nr_segs = state->nr_segs;
    1923             : }
    1924             : 
    1925             : /*
    1926             :  * Extract a list of contiguous pages from an ITER_XARRAY iterator.  This does not
    1927             :  * get references on the pages, nor does it get a pin on them.
    1928             :  */
    1929           0 : static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i,
    1930             :                                              struct page ***pages, size_t maxsize,
    1931             :                                              unsigned int maxpages,
    1932             :                                              iov_iter_extraction_t extraction_flags,
    1933             :                                              size_t *offset0)
    1934             : {
    1935             :         struct page *page, **p;
    1936           0 :         unsigned int nr = 0, offset;
    1937           0 :         loff_t pos = i->xarray_start + i->iov_offset;
    1938           0 :         pgoff_t index = pos >> PAGE_SHIFT;
    1939           0 :         XA_STATE(xas, i->xarray, index);
    1940             : 
    1941           0 :         offset = pos & ~PAGE_MASK;
    1942           0 :         *offset0 = offset;
    1943             : 
    1944           0 :         maxpages = want_pages_array(pages, maxsize, offset, maxpages);
    1945           0 :         if (!maxpages)
    1946             :                 return -ENOMEM;
    1947           0 :         p = *pages;
    1948             : 
    1949             :         rcu_read_lock();
    1950           0 :         for (page = xas_load(&xas); page; page = xas_next(&xas)) {
    1951           0 :                 if (xas_retry(&xas, page))
    1952           0 :                         continue;
    1953             : 
    1954             :                 /* Has the page moved or been split? */
    1955           0 :                 if (unlikely(page != xas_reload(&xas))) {
    1956           0 :                         xas_reset(&xas);
    1957           0 :                         continue;
    1958             :                 }
    1959             : 
    1960           0 :                 p[nr++] = find_subpage(page, xas.xa_index);
    1961           0 :                 if (nr == maxpages)
    1962             :                         break;
    1963             :         }
    1964             :         rcu_read_unlock();
    1965             : 
    1966           0 :         maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
    1967           0 :         iov_iter_advance(i, maxsize);
    1968           0 :         return maxsize;
    1969             : }
    1970             : 
    1971             : /*
    1972             :  * Extract a list of contiguous pages from an ITER_BVEC iterator.  This does
    1973             :  * not get references on the pages, nor does it get a pin on them.
    1974             :  */
    1975           0 : static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i,
    1976             :                                            struct page ***pages, size_t maxsize,
    1977             :                                            unsigned int maxpages,
    1978             :                                            iov_iter_extraction_t extraction_flags,
    1979             :                                            size_t *offset0)
    1980             : {
    1981             :         struct page **p, *page;
    1982           0 :         size_t skip = i->iov_offset, offset;
    1983             :         int k;
    1984             : 
    1985             :         for (;;) {
    1986           0 :                 if (i->nr_segs == 0)
    1987             :                         return 0;
    1988           0 :                 maxsize = min(maxsize, i->bvec->bv_len - skip);
    1989           0 :                 if (maxsize)
    1990             :                         break;
    1991           0 :                 i->iov_offset = 0;
    1992           0 :                 i->nr_segs--;
    1993           0 :                 i->bvec++;
    1994           0 :                 skip = 0;
    1995             :         }
    1996             : 
    1997           0 :         skip += i->bvec->bv_offset;
    1998           0 :         page = i->bvec->bv_page + skip / PAGE_SIZE;
    1999           0 :         offset = skip % PAGE_SIZE;
    2000           0 :         *offset0 = offset;
    2001             : 
    2002           0 :         maxpages = want_pages_array(pages, maxsize, offset, maxpages);
    2003           0 :         if (!maxpages)
    2004             :                 return -ENOMEM;
    2005           0 :         p = *pages;
    2006           0 :         for (k = 0; k < maxpages; k++)
    2007           0 :                 p[k] = page + k;
    2008             : 
    2009           0 :         maxsize = min_t(size_t, maxsize, maxpages * PAGE_SIZE - offset);
    2010           0 :         iov_iter_advance(i, maxsize);
    2011           0 :         return maxsize;
    2012             : }
    2013             : 
    2014             : /*
    2015             :  * Extract a list of virtually contiguous pages from an ITER_KVEC iterator.
    2016             :  * This does not get references on the pages, nor does it get a pin on them.
    2017             :  */
    2018           0 : static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i,
    2019             :                                            struct page ***pages, size_t maxsize,
    2020             :                                            unsigned int maxpages,
    2021             :                                            iov_iter_extraction_t extraction_flags,
    2022             :                                            size_t *offset0)
    2023             : {
    2024             :         struct page **p, *page;
    2025             :         const void *kaddr;
    2026           0 :         size_t skip = i->iov_offset, offset, len;
    2027             :         int k;
    2028             : 
    2029             :         for (;;) {
    2030           0 :                 if (i->nr_segs == 0)
    2031             :                         return 0;
    2032           0 :                 maxsize = min(maxsize, i->kvec->iov_len - skip);
    2033           0 :                 if (maxsize)
    2034             :                         break;
    2035           0 :                 i->iov_offset = 0;
    2036           0 :                 i->nr_segs--;
    2037           0 :                 i->kvec++;
    2038           0 :                 skip = 0;
    2039             :         }
    2040             : 
    2041           0 :         kaddr = i->kvec->iov_base + skip;
    2042           0 :         offset = (unsigned long)kaddr & ~PAGE_MASK;
    2043           0 :         *offset0 = offset;
    2044             : 
    2045           0 :         maxpages = want_pages_array(pages, maxsize, offset, maxpages);
    2046           0 :         if (!maxpages)
    2047             :                 return -ENOMEM;
    2048           0 :         p = *pages;
    2049             : 
    2050           0 :         kaddr -= offset;
    2051           0 :         len = offset + maxsize;
    2052           0 :         for (k = 0; k < maxpages; k++) {
    2053           0 :                 size_t seg = min_t(size_t, len, PAGE_SIZE);
    2054             : 
    2055           0 :                 if (is_vmalloc_or_module_addr(kaddr))
    2056           0 :                         page = vmalloc_to_page(kaddr);
    2057             :                 else
    2058           0 :                         page = virt_to_page(kaddr);
    2059             : 
    2060           0 :                 p[k] = page;
    2061           0 :                 len -= seg;
    2062           0 :                 kaddr += PAGE_SIZE;
    2063             :         }
    2064             : 
    2065           0 :         maxsize = min_t(size_t, maxsize, maxpages * PAGE_SIZE - offset);
    2066           0 :         iov_iter_advance(i, maxsize);
    2067           0 :         return maxsize;
    2068             : }
    2069             : 
    2070             : /*
    2071             :  * Extract a list of contiguous pages from a user iterator and get a pin on
    2072             :  * each of them.  This should only be used if the iterator is user-backed
    2073             :  * (IOBUF/UBUF).
    2074             :  *
    2075             :  * It does not get refs on the pages, but the pages must be unpinned by the
    2076             :  * caller once the transfer is complete.
    2077             :  *
    2078             :  * This is safe to be used where background IO/DMA *is* going to be modifying
    2079             :  * the buffer; using a pin rather than a ref makes forces fork() to give the
    2080             :  * child a copy of the page.
    2081             :  */
    2082           0 : static ssize_t iov_iter_extract_user_pages(struct iov_iter *i,
    2083             :                                            struct page ***pages,
    2084             :                                            size_t maxsize,
    2085             :                                            unsigned int maxpages,
    2086             :                                            iov_iter_extraction_t extraction_flags,
    2087             :                                            size_t *offset0)
    2088             : {
    2089             :         unsigned long addr;
    2090           0 :         unsigned int gup_flags = 0;
    2091             :         size_t offset;
    2092             :         int res;
    2093             : 
    2094           0 :         if (i->data_source == ITER_DEST)
    2095           0 :                 gup_flags |= FOLL_WRITE;
    2096           0 :         if (extraction_flags & ITER_ALLOW_P2PDMA)
    2097           0 :                 gup_flags |= FOLL_PCI_P2PDMA;
    2098           0 :         if (i->nofault)
    2099           0 :                 gup_flags |= FOLL_NOFAULT;
    2100             : 
    2101           0 :         addr = first_iovec_segment(i, &maxsize);
    2102           0 :         *offset0 = offset = addr % PAGE_SIZE;
    2103           0 :         addr &= PAGE_MASK;
    2104           0 :         maxpages = want_pages_array(pages, maxsize, offset, maxpages);
    2105           0 :         if (!maxpages)
    2106             :                 return -ENOMEM;
    2107           0 :         res = pin_user_pages_fast(addr, maxpages, gup_flags, *pages);
    2108           0 :         if (unlikely(res <= 0))
    2109           0 :                 return res;
    2110           0 :         maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset);
    2111           0 :         iov_iter_advance(i, maxsize);
    2112           0 :         return maxsize;
    2113             : }
    2114             : 
    2115             : /**
    2116             :  * iov_iter_extract_pages - Extract a list of contiguous pages from an iterator
    2117             :  * @i: The iterator to extract from
    2118             :  * @pages: Where to return the list of pages
    2119             :  * @maxsize: The maximum amount of iterator to extract
    2120             :  * @maxpages: The maximum size of the list of pages
    2121             :  * @extraction_flags: Flags to qualify request
    2122             :  * @offset0: Where to return the starting offset into (*@pages)[0]
    2123             :  *
    2124             :  * Extract a list of contiguous pages from the current point of the iterator,
    2125             :  * advancing the iterator.  The maximum number of pages and the maximum amount
    2126             :  * of page contents can be set.
    2127             :  *
    2128             :  * If *@pages is NULL, a page list will be allocated to the required size and
    2129             :  * *@pages will be set to its base.  If *@pages is not NULL, it will be assumed
    2130             :  * that the caller allocated a page list at least @maxpages in size and this
    2131             :  * will be filled in.
    2132             :  *
    2133             :  * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
    2134             :  * be allowed on the pages extracted.
    2135             :  *
    2136             :  * The iov_iter_extract_will_pin() function can be used to query how cleanup
    2137             :  * should be performed.
    2138             :  *
    2139             :  * Extra refs or pins on the pages may be obtained as follows:
    2140             :  *
    2141             :  *  (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be
    2142             :  *      added to the pages, but refs will not be taken.
    2143             :  *      iov_iter_extract_will_pin() will return true.
    2144             :  *
    2145             :  *  (*) If the iterator is ITER_KVEC, ITER_BVEC or ITER_XARRAY, the pages are
    2146             :  *      merely listed; no extra refs or pins are obtained.
    2147             :  *      iov_iter_extract_will_pin() will return 0.
    2148             :  *
    2149             :  * Note also:
    2150             :  *
    2151             :  *  (*) Use with ITER_DISCARD is not supported as that has no content.
    2152             :  *
    2153             :  * On success, the function sets *@pages to the new pagelist, if allocated, and
    2154             :  * sets *offset0 to the offset into the first page.
    2155             :  *
    2156             :  * It may also return -ENOMEM and -EFAULT.
    2157             :  */
    2158           0 : ssize_t iov_iter_extract_pages(struct iov_iter *i,
    2159             :                                struct page ***pages,
    2160             :                                size_t maxsize,
    2161             :                                unsigned int maxpages,
    2162             :                                iov_iter_extraction_t extraction_flags,
    2163             :                                size_t *offset0)
    2164             : {
    2165           0 :         maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT);
    2166           0 :         if (!maxsize)
    2167             :                 return 0;
    2168             : 
    2169           0 :         if (likely(user_backed_iter(i)))
    2170           0 :                 return iov_iter_extract_user_pages(i, pages, maxsize,
    2171             :                                                    maxpages, extraction_flags,
    2172             :                                                    offset0);
    2173           0 :         if (iov_iter_is_kvec(i))
    2174           0 :                 return iov_iter_extract_kvec_pages(i, pages, maxsize,
    2175             :                                                    maxpages, extraction_flags,
    2176             :                                                    offset0);
    2177           0 :         if (iov_iter_is_bvec(i))
    2178           0 :                 return iov_iter_extract_bvec_pages(i, pages, maxsize,
    2179             :                                                    maxpages, extraction_flags,
    2180             :                                                    offset0);
    2181           0 :         if (iov_iter_is_xarray(i))
    2182           0 :                 return iov_iter_extract_xarray_pages(i, pages, maxsize,
    2183             :                                                      maxpages, extraction_flags,
    2184             :                                                      offset0);
    2185             :         return -EFAULT;
    2186             : }
    2187             : EXPORT_SYMBOL_GPL(iov_iter_extract_pages);

Generated by: LCOV version 1.14