LCOV - code coverage report
Current view: top level - include/linux - uio.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 28 0.0 %
Date: 2023-07-19 18:55:55 Functions: 0 1 0.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0-or-later */
       2             : /*
       3             :  *      Berkeley style UIO structures   -       Alan Cox 1994.
       4             :  */
       5             : #ifndef __LINUX_UIO_H
       6             : #define __LINUX_UIO_H
       7             : 
       8             : #include <linux/kernel.h>
       9             : #include <linux/thread_info.h>
      10             : #include <linux/mm_types.h>
      11             : #include <uapi/linux/uio.h>
      12             : 
      13             : struct page;
      14             : struct pipe_inode_info;
      15             : 
      16             : typedef unsigned int __bitwise iov_iter_extraction_t;
      17             : 
      18             : struct kvec {
      19             :         void *iov_base; /* and that should *never* hold a userland pointer */
      20             :         size_t iov_len;
      21             : };
      22             : 
      23             : enum iter_type {
      24             :         /* iter types */
      25             :         ITER_IOVEC,
      26             :         ITER_KVEC,
      27             :         ITER_BVEC,
      28             :         ITER_PIPE,
      29             :         ITER_XARRAY,
      30             :         ITER_DISCARD,
      31             :         ITER_UBUF,
      32             : };
      33             : 
      34             : #define ITER_SOURCE     1       // == WRITE
      35             : #define ITER_DEST       0       // == READ
      36             : 
      37             : struct iov_iter_state {
      38             :         size_t iov_offset;
      39             :         size_t count;
      40             :         unsigned long nr_segs;
      41             : };
      42             : 
      43             : struct iov_iter {
      44             :         u8 iter_type;
      45             :         bool copy_mc;
      46             :         bool nofault;
      47             :         bool data_source;
      48             :         bool user_backed;
      49             :         union {
      50             :                 size_t iov_offset;
      51             :                 int last_offset;
      52             :         };
      53             :         /*
      54             :          * Hack alert: overlay ubuf_iovec with iovec + count, so
      55             :          * that the members resolve correctly regardless of the type
      56             :          * of iterator used. This means that you can use:
      57             :          *
      58             :          * &iter->__ubuf_iovec or iter->__iov
      59             :          *
      60             :          * interchangably for the user_backed cases, hence simplifying
      61             :          * some of the cases that need to deal with both.
      62             :          */
      63             :         union {
      64             :                 /*
      65             :                  * This really should be a const, but we cannot do that without
      66             :                  * also modifying any of the zero-filling iter init functions.
      67             :                  * Leave it non-const for now, but it should be treated as such.
      68             :                  */
      69             :                 struct iovec __ubuf_iovec;
      70             :                 struct {
      71             :                         union {
      72             :                                 /* use iter_iov() to get the current vec */
      73             :                                 const struct iovec *__iov;
      74             :                                 const struct kvec *kvec;
      75             :                                 const struct bio_vec *bvec;
      76             :                                 struct xarray *xarray;
      77             :                                 struct pipe_inode_info *pipe;
      78             :                                 void __user *ubuf;
      79             :                         };
      80             :                         size_t count;
      81             :                 };
      82             :         };
      83             :         union {
      84             :                 unsigned long nr_segs;
      85             :                 struct {
      86             :                         unsigned int head;
      87             :                         unsigned int start_head;
      88             :                 };
      89             :                 loff_t xarray_start;
      90             :         };
      91             : };
      92             : 
      93             : static inline const struct iovec *iter_iov(const struct iov_iter *iter)
      94             : {
      95           0 :         if (iter->iter_type == ITER_UBUF)
      96           0 :                 return (const struct iovec *) &iter->__ubuf_iovec;
      97           0 :         return iter->__iov;
      98             : }
      99             : 
     100             : #define iter_iov_addr(iter)     (iter_iov(iter)->iov_base + (iter)->iov_offset)
     101             : #define iter_iov_len(iter)      (iter_iov(iter)->iov_len - (iter)->iov_offset)
     102             : 
     103             : static inline enum iter_type iov_iter_type(const struct iov_iter *i)
     104             : {
     105             :         return i->iter_type;
     106             : }
     107             : 
     108             : static inline void iov_iter_save_state(struct iov_iter *iter,
     109             :                                        struct iov_iter_state *state)
     110             : {
     111           0 :         state->iov_offset = iter->iov_offset;
     112           0 :         state->count = iter->count;
     113           0 :         state->nr_segs = iter->nr_segs;
     114             : }
     115             : 
     116             : static inline bool iter_is_ubuf(const struct iov_iter *i)
     117             : {
     118           0 :         return iov_iter_type(i) == ITER_UBUF;
     119             : }
     120             : 
     121             : static inline bool iter_is_iovec(const struct iov_iter *i)
     122             : {
     123           0 :         return iov_iter_type(i) == ITER_IOVEC;
     124             : }
     125             : 
     126             : static inline bool iov_iter_is_kvec(const struct iov_iter *i)
     127             : {
     128           0 :         return iov_iter_type(i) == ITER_KVEC;
     129             : }
     130             : 
     131             : static inline bool iov_iter_is_bvec(const struct iov_iter *i)
     132             : {
     133           0 :         return iov_iter_type(i) == ITER_BVEC;
     134             : }
     135             : 
     136             : static inline bool iov_iter_is_pipe(const struct iov_iter *i)
     137             : {
     138           0 :         return iov_iter_type(i) == ITER_PIPE;
     139             : }
     140             : 
     141             : static inline bool iov_iter_is_discard(const struct iov_iter *i)
     142             : {
     143           0 :         return iov_iter_type(i) == ITER_DISCARD;
     144             : }
     145             : 
     146             : static inline bool iov_iter_is_xarray(const struct iov_iter *i)
     147             : {
     148           0 :         return iov_iter_type(i) == ITER_XARRAY;
     149             : }
     150             : 
     151             : static inline unsigned char iov_iter_rw(const struct iov_iter *i)
     152             : {
     153             :         return i->data_source ? WRITE : READ;
     154             : }
     155             : 
     156             : static inline bool user_backed_iter(const struct iov_iter *i)
     157             : {
     158             :         return i->user_backed;
     159             : }
     160             : 
     161             : /*
     162             :  * Total number of bytes covered by an iovec.
     163             :  *
     164             :  * NOTE that it is not safe to use this function until all the iovec's
     165             :  * segment lengths have been validated.  Because the individual lengths can
     166             :  * overflow a size_t when added together.
     167             :  */
     168             : static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
     169             : {
     170             :         unsigned long seg;
     171             :         size_t ret = 0;
     172             : 
     173             :         for (seg = 0; seg < nr_segs; seg++)
     174             :                 ret += iov[seg].iov_len;
     175             :         return ret;
     176             : }
     177             : 
     178             : size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
     179             :                                   size_t bytes, struct iov_iter *i);
     180             : void iov_iter_advance(struct iov_iter *i, size_t bytes);
     181             : void iov_iter_revert(struct iov_iter *i, size_t bytes);
     182             : size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
     183             : size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
     184             : size_t iov_iter_single_seg_count(const struct iov_iter *i);
     185             : size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
     186             :                          struct iov_iter *i);
     187             : size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
     188             :                          struct iov_iter *i);
     189             : 
     190             : size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
     191             : size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
     192             : size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
     193             : 
     194             : static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
     195             :                 size_t bytes, struct iov_iter *i)
     196             : {
     197           0 :         return copy_page_to_iter(&folio->page, offset, bytes, i);
     198             : }
     199             : size_t copy_page_to_iter_nofault(struct page *page, unsigned offset,
     200             :                                  size_t bytes, struct iov_iter *i);
     201             : 
     202             : static __always_inline __must_check
     203             : size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
     204             : {
     205           0 :         if (check_copy_size(addr, bytes, true))
     206           0 :                 return _copy_to_iter(addr, bytes, i);
     207             :         return 0;
     208             : }
     209             : 
     210             : static __always_inline __must_check
     211             : size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
     212             : {
     213           0 :         if (check_copy_size(addr, bytes, false))
     214           0 :                 return _copy_from_iter(addr, bytes, i);
     215             :         return 0;
     216             : }
     217             : 
     218             : static __always_inline __must_check
     219             : bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
     220             : {
     221           0 :         size_t copied = copy_from_iter(addr, bytes, i);
     222           0 :         if (likely(copied == bytes))
     223             :                 return true;
     224           0 :         iov_iter_revert(i, copied);
     225             :         return false;
     226             : }
     227             : 
     228             : static __always_inline __must_check
     229             : size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
     230             : {
     231             :         if (check_copy_size(addr, bytes, false))
     232             :                 return _copy_from_iter_nocache(addr, bytes, i);
     233             :         return 0;
     234             : }
     235             : 
     236             : static __always_inline __must_check
     237             : bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
     238             : {
     239             :         size_t copied = copy_from_iter_nocache(addr, bytes, i);
     240             :         if (likely(copied == bytes))
     241             :                 return true;
     242             :         iov_iter_revert(i, copied);
     243             :         return false;
     244             : }
     245             : 
     246             : #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
     247             : /*
     248             :  * Note, users like pmem that depend on the stricter semantics of
     249             :  * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
     250             :  * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
     251             :  * destination is flushed from the cache on return.
     252             :  */
     253             : size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
     254             : #else
     255             : #define _copy_from_iter_flushcache _copy_from_iter_nocache
     256             : #endif
     257             : 
     258             : #ifdef CONFIG_ARCH_HAS_COPY_MC
     259             : size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
     260             : static inline void iov_iter_set_copy_mc(struct iov_iter *i)
     261             : {
     262             :         i->copy_mc = true;
     263             : }
     264             : 
     265             : static inline bool iov_iter_is_copy_mc(const struct iov_iter *i)
     266             : {
     267             :         return i->copy_mc;
     268             : }
     269             : #else
     270             : #define _copy_mc_to_iter _copy_to_iter
     271             : static inline void iov_iter_set_copy_mc(struct iov_iter *i) { }
     272             : static inline bool iov_iter_is_copy_mc(const struct iov_iter *i)
     273             : {
     274             :         return false;
     275             : }
     276             : #endif
     277             : 
     278             : size_t iov_iter_zero(size_t bytes, struct iov_iter *);
     279             : bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
     280             :                         unsigned len_mask);
     281             : unsigned long iov_iter_alignment(const struct iov_iter *i);
     282             : unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
     283             : void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
     284             :                         unsigned long nr_segs, size_t count);
     285             : void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
     286             :                         unsigned long nr_segs, size_t count);
     287             : void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
     288             :                         unsigned long nr_segs, size_t count);
     289             : void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
     290             :                         size_t count);
     291             : void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
     292             : void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
     293             :                      loff_t start, size_t count);
     294             : ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
     295             :                 size_t maxsize, unsigned maxpages, size_t *start,
     296             :                 iov_iter_extraction_t extraction_flags);
     297             : ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
     298             :                         size_t maxsize, unsigned maxpages, size_t *start);
     299             : ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
     300             :                 struct page ***pages, size_t maxsize, size_t *start,
     301             :                 iov_iter_extraction_t extraction_flags);
     302             : ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
     303             :                         size_t maxsize, size_t *start);
     304             : int iov_iter_npages(const struct iov_iter *i, int maxpages);
     305             : void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
     306             : 
     307             : const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
     308             : 
     309             : static inline size_t iov_iter_count(const struct iov_iter *i)
     310             : {
     311             :         return i->count;
     312             : }
     313             : 
     314             : /*
     315             :  * Cap the iov_iter by given limit; note that the second argument is
     316             :  * *not* the new size - it's upper limit for such.  Passing it a value
     317             :  * greater than the amount of data in iov_iter is fine - it'll just do
     318             :  * nothing in that case.
     319             :  */
     320             : static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
     321             : {
     322             :         /*
     323             :          * count doesn't have to fit in size_t - comparison extends both
     324             :          * operands to u64 here and any value that would be truncated by
     325             :          * conversion in assignement is by definition greater than all
     326             :          * values of size_t, including old i->count.
     327             :          */
     328           0 :         if (i->count > count)
     329           0 :                 i->count = count;
     330             : }
     331             : 
     332             : /*
     333             :  * reexpand a previously truncated iterator; count must be no more than how much
     334             :  * we had shrunk it.
     335             :  */
     336             : static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
     337             : {
     338           0 :         i->count = count;
     339             : }
     340             : 
     341             : static inline int
     342             : iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
     343             : {
     344             :         size_t shorted = 0;
     345             :         int npages;
     346             : 
     347             :         if (iov_iter_count(i) > max_bytes) {
     348             :                 shorted = iov_iter_count(i) - max_bytes;
     349             :                 iov_iter_truncate(i, max_bytes);
     350             :         }
     351             :         npages = iov_iter_npages(i, maxpages);
     352             :         if (shorted)
     353             :                 iov_iter_reexpand(i, iov_iter_count(i) + shorted);
     354             : 
     355             :         return npages;
     356             : }
     357             : 
     358             : struct csum_state {
     359             :         __wsum csum;
     360             :         size_t off;
     361             : };
     362             : 
     363             : size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
     364             : size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
     365             : 
     366             : static __always_inline __must_check
     367             : bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
     368             :                                   __wsum *csum, struct iov_iter *i)
     369             : {
     370             :         size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
     371             :         if (likely(copied == bytes))
     372             :                 return true;
     373             :         iov_iter_revert(i, copied);
     374             :         return false;
     375             : }
     376             : size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
     377             :                 struct iov_iter *i);
     378             : 
     379             : struct iovec *iovec_from_user(const struct iovec __user *uvector,
     380             :                 unsigned long nr_segs, unsigned long fast_segs,
     381             :                 struct iovec *fast_iov, bool compat);
     382             : ssize_t import_iovec(int type, const struct iovec __user *uvec,
     383             :                  unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
     384             :                  struct iov_iter *i);
     385             : ssize_t __import_iovec(int type, const struct iovec __user *uvec,
     386             :                  unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
     387             :                  struct iov_iter *i, bool compat);
     388             : int import_single_range(int type, void __user *buf, size_t len,
     389             :                  struct iovec *iov, struct iov_iter *i);
     390             : int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i);
     391             : 
     392           0 : static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
     393             :                         void __user *buf, size_t count)
     394             : {
     395           0 :         WARN_ON(direction & ~(READ | WRITE));
     396           0 :         *i = (struct iov_iter) {
     397             :                 .iter_type = ITER_UBUF,
     398             :                 .copy_mc = false,
     399             :                 .user_backed = true,
     400             :                 .data_source = direction,
     401             :                 .ubuf = buf,
     402             :                 .count = count,
     403             :                 .nr_segs = 1
     404             :         };
     405           0 : }
     406             : /* Flags for iov_iter_get/extract_pages*() */
     407             : /* Allow P2PDMA on the extracted pages */
     408             : #define ITER_ALLOW_P2PDMA       ((__force iov_iter_extraction_t)0x01)
     409             : 
     410             : ssize_t iov_iter_extract_pages(struct iov_iter *i, struct page ***pages,
     411             :                                size_t maxsize, unsigned int maxpages,
     412             :                                iov_iter_extraction_t extraction_flags,
     413             :                                size_t *offset0);
     414             : 
     415             : /**
     416             :  * iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained
     417             :  * @iter: The iterator
     418             :  *
     419             :  * Examine the iterator and indicate by returning true or false as to how, if
     420             :  * at all, pages extracted from the iterator will be retained by the extraction
     421             :  * function.
     422             :  *
     423             :  * %true indicates that the pages will have a pin placed in them that the
     424             :  * caller must unpin.  This is must be done for DMA/async DIO to force fork()
     425             :  * to forcibly copy a page for the child (the parent must retain the original
     426             :  * page).
     427             :  *
     428             :  * %false indicates that no measures are taken and that it's up to the caller
     429             :  * to retain the pages.
     430             :  */
     431             : static inline bool iov_iter_extract_will_pin(const struct iov_iter *iter)
     432             : {
     433             :         return user_backed_iter(iter);
     434             : }
     435             : 
     436             : #endif

Generated by: LCOV version 1.14