LCOV - code coverage report
Current view: top level - block - blk-map.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 331 0.0 %
Date: 2023-03-27 20:00:47 Functions: 0 20 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Functions related to mapping data to requests
       4             :  */
       5             : #include <linux/kernel.h>
       6             : #include <linux/sched/task_stack.h>
       7             : #include <linux/module.h>
       8             : #include <linux/bio.h>
       9             : #include <linux/blkdev.h>
      10             : #include <linux/uio.h>
      11             : 
      12             : #include "blk.h"
      13             : 
      14             : struct bio_map_data {
      15             :         bool is_our_pages : 1;
      16             :         bool is_null_mapped : 1;
      17             :         struct iov_iter iter;
      18             :         struct iovec iov[];
      19             : };
      20             : 
      21           0 : static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
      22             :                                                gfp_t gfp_mask)
      23             : {
      24             :         struct bio_map_data *bmd;
      25             : 
      26           0 :         if (data->nr_segs > UIO_MAXIOV)
      27             :                 return NULL;
      28             : 
      29           0 :         bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
      30           0 :         if (!bmd)
      31             :                 return NULL;
      32           0 :         memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
      33           0 :         bmd->iter = *data;
      34           0 :         if (iter_is_iovec(data))
      35           0 :                 bmd->iter.iov = bmd->iov;
      36             :         return bmd;
      37             : }
      38             : 
      39             : /**
      40             :  * bio_copy_from_iter - copy all pages from iov_iter to bio
      41             :  * @bio: The &struct bio which describes the I/O as destination
      42             :  * @iter: iov_iter as source
      43             :  *
      44             :  * Copy all pages from iov_iter to bio.
      45             :  * Returns 0 on success, or error on failure.
      46             :  */
      47           0 : static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
      48             : {
      49             :         struct bio_vec *bvec;
      50             :         struct bvec_iter_all iter_all;
      51             : 
      52           0 :         bio_for_each_segment_all(bvec, bio, iter_all) {
      53             :                 ssize_t ret;
      54             : 
      55           0 :                 ret = copy_page_from_iter(bvec->bv_page,
      56           0 :                                           bvec->bv_offset,
      57           0 :                                           bvec->bv_len,
      58             :                                           iter);
      59             : 
      60           0 :                 if (!iov_iter_count(iter))
      61             :                         break;
      62             : 
      63           0 :                 if (ret < bvec->bv_len)
      64             :                         return -EFAULT;
      65             :         }
      66             : 
      67             :         return 0;
      68             : }
      69             : 
      70             : /**
      71             :  * bio_copy_to_iter - copy all pages from bio to iov_iter
      72             :  * @bio: The &struct bio which describes the I/O as source
      73             :  * @iter: iov_iter as destination
      74             :  *
      75             :  * Copy all pages from bio to iov_iter.
      76             :  * Returns 0 on success, or error on failure.
      77             :  */
      78           0 : static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
      79             : {
      80             :         struct bio_vec *bvec;
      81             :         struct bvec_iter_all iter_all;
      82             : 
      83           0 :         bio_for_each_segment_all(bvec, bio, iter_all) {
      84             :                 ssize_t ret;
      85             : 
      86           0 :                 ret = copy_page_to_iter(bvec->bv_page,
      87           0 :                                         bvec->bv_offset,
      88           0 :                                         bvec->bv_len,
      89             :                                         &iter);
      90             : 
      91           0 :                 if (!iov_iter_count(&iter))
      92             :                         break;
      93             : 
      94           0 :                 if (ret < bvec->bv_len)
      95             :                         return -EFAULT;
      96             :         }
      97             : 
      98             :         return 0;
      99             : }
     100             : 
     101             : /**
     102             :  *      bio_uncopy_user -       finish previously mapped bio
     103             :  *      @bio: bio being terminated
     104             :  *
     105             :  *      Free pages allocated from bio_copy_user_iov() and write back data
     106             :  *      to user space in case of a read.
     107             :  */
     108           0 : static int bio_uncopy_user(struct bio *bio)
     109             : {
     110           0 :         struct bio_map_data *bmd = bio->bi_private;
     111           0 :         int ret = 0;
     112             : 
     113           0 :         if (!bmd->is_null_mapped) {
     114             :                 /*
     115             :                  * if we're in a workqueue, the request is orphaned, so
     116             :                  * don't copy into a random user address space, just free
     117             :                  * and return -EINTR so user space doesn't expect any data.
     118             :                  */
     119           0 :                 if (!current->mm)
     120             :                         ret = -EINTR;
     121           0 :                 else if (bio_data_dir(bio) == READ)
     122           0 :                         ret = bio_copy_to_iter(bio, bmd->iter);
     123           0 :                 if (bmd->is_our_pages)
     124           0 :                         bio_free_pages(bio);
     125             :         }
     126           0 :         kfree(bmd);
     127           0 :         return ret;
     128             : }
     129             : 
     130           0 : static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
     131             :                 struct iov_iter *iter, gfp_t gfp_mask)
     132             : {
     133             :         struct bio_map_data *bmd;
     134             :         struct page *page;
     135             :         struct bio *bio;
     136           0 :         int i = 0, ret;
     137             :         int nr_pages;
     138           0 :         unsigned int len = iter->count;
     139           0 :         unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
     140             : 
     141           0 :         bmd = bio_alloc_map_data(iter, gfp_mask);
     142           0 :         if (!bmd)
     143             :                 return -ENOMEM;
     144             : 
     145             :         /*
     146             :          * We need to do a deep copy of the iov_iter including the iovecs.
     147             :          * The caller provided iov might point to an on-stack or otherwise
     148             :          * shortlived one.
     149             :          */
     150           0 :         bmd->is_our_pages = !map_data;
     151           0 :         bmd->is_null_mapped = (map_data && map_data->null_mapped);
     152             : 
     153           0 :         nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
     154             : 
     155           0 :         ret = -ENOMEM;
     156           0 :         bio = bio_kmalloc(nr_pages, gfp_mask);
     157           0 :         if (!bio)
     158             :                 goto out_bmd;
     159           0 :         bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq));
     160             : 
     161           0 :         if (map_data) {
     162           0 :                 nr_pages = 1U << map_data->page_order;
     163           0 :                 i = map_data->offset / PAGE_SIZE;
     164             :         }
     165           0 :         while (len) {
     166           0 :                 unsigned int bytes = PAGE_SIZE;
     167             : 
     168           0 :                 bytes -= offset;
     169             : 
     170           0 :                 if (bytes > len)
     171           0 :                         bytes = len;
     172             : 
     173           0 :                 if (map_data) {
     174           0 :                         if (i == map_data->nr_entries * nr_pages) {
     175             :                                 ret = -ENOMEM;
     176             :                                 goto cleanup;
     177             :                         }
     178             : 
     179           0 :                         page = map_data->pages[i / nr_pages];
     180           0 :                         page += (i % nr_pages);
     181             : 
     182           0 :                         i++;
     183             :                 } else {
     184           0 :                         page = alloc_page(GFP_NOIO | gfp_mask);
     185           0 :                         if (!page) {
     186             :                                 ret = -ENOMEM;
     187             :                                 goto cleanup;
     188             :                         }
     189             :                 }
     190             : 
     191           0 :                 if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) {
     192           0 :                         if (!map_data)
     193           0 :                                 __free_page(page);
     194             :                         break;
     195             :                 }
     196             : 
     197           0 :                 len -= bytes;
     198           0 :                 offset = 0;
     199             :         }
     200             : 
     201           0 :         if (map_data)
     202           0 :                 map_data->offset += bio->bi_iter.bi_size;
     203             : 
     204             :         /*
     205             :          * success
     206             :          */
     207           0 :         if ((iov_iter_rw(iter) == WRITE &&
     208           0 :              (!map_data || !map_data->null_mapped)) ||
     209           0 :             (map_data && map_data->from_user)) {
     210           0 :                 ret = bio_copy_from_iter(bio, iter);
     211           0 :                 if (ret)
     212             :                         goto cleanup;
     213             :         } else {
     214           0 :                 if (bmd->is_our_pages)
     215           0 :                         zero_fill_bio(bio);
     216           0 :                 iov_iter_advance(iter, bio->bi_iter.bi_size);
     217             :         }
     218             : 
     219           0 :         bio->bi_private = bmd;
     220             : 
     221           0 :         ret = blk_rq_append_bio(rq, bio);
     222           0 :         if (ret)
     223             :                 goto cleanup;
     224             :         return 0;
     225             : cleanup:
     226           0 :         if (!map_data)
     227           0 :                 bio_free_pages(bio);
     228           0 :         bio_uninit(bio);
     229           0 :         kfree(bio);
     230             : out_bmd:
     231           0 :         kfree(bmd);
     232           0 :         return ret;
     233             : }
     234             : 
     235           0 : static void blk_mq_map_bio_put(struct bio *bio)
     236             : {
     237           0 :         if (bio->bi_opf & REQ_ALLOC_CACHE) {
     238           0 :                 bio_put(bio);
     239             :         } else {
     240           0 :                 bio_uninit(bio);
     241           0 :                 kfree(bio);
     242             :         }
     243           0 : }
     244             : 
     245           0 : static struct bio *blk_rq_map_bio_alloc(struct request *rq,
     246             :                 unsigned int nr_vecs, gfp_t gfp_mask)
     247             : {
     248             :         struct bio *bio;
     249             : 
     250           0 :         if (rq->cmd_flags & REQ_ALLOC_CACHE) {
     251           0 :                 bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask,
     252             :                                         &fs_bio_set);
     253           0 :                 if (!bio)
     254             :                         return NULL;
     255             :         } else {
     256           0 :                 bio = bio_kmalloc(nr_vecs, gfp_mask);
     257           0 :                 if (!bio)
     258             :                         return NULL;
     259           0 :                 bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
     260             :         }
     261             :         return bio;
     262             : }
     263             : 
     264           0 : static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
     265             :                 gfp_t gfp_mask)
     266             : {
     267           0 :         iov_iter_extraction_t extraction_flags = 0;
     268           0 :         unsigned int max_sectors = queue_max_hw_sectors(rq->q);
     269           0 :         unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
     270             :         struct bio *bio;
     271             :         int ret;
     272             :         int j;
     273             : 
     274           0 :         if (!iov_iter_count(iter))
     275             :                 return -EINVAL;
     276             : 
     277           0 :         bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
     278           0 :         if (bio == NULL)
     279             :                 return -ENOMEM;
     280             : 
     281           0 :         if (blk_queue_pci_p2pdma(rq->q))
     282           0 :                 extraction_flags |= ITER_ALLOW_P2PDMA;
     283             : 
     284           0 :         while (iov_iter_count(iter)) {
     285             :                 struct page **pages, *stack_pages[UIO_FASTIOV];
     286             :                 ssize_t bytes;
     287             :                 size_t offs;
     288             :                 int npages;
     289             : 
     290           0 :                 if (nr_vecs <= ARRAY_SIZE(stack_pages)) {
     291           0 :                         pages = stack_pages;
     292           0 :                         bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
     293             :                                                    nr_vecs, &offs, extraction_flags);
     294             :                 } else {
     295           0 :                         bytes = iov_iter_get_pages_alloc(iter, &pages,
     296             :                                                 LONG_MAX, &offs, extraction_flags);
     297             :                 }
     298           0 :                 if (unlikely(bytes <= 0)) {
     299           0 :                         ret = bytes ? bytes : -EFAULT;
     300           0 :                         goto out_unmap;
     301             :                 }
     302             : 
     303           0 :                 npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
     304             : 
     305           0 :                 if (unlikely(offs & queue_dma_alignment(rq->q)))
     306             :                         j = 0;
     307             :                 else {
     308           0 :                         for (j = 0; j < npages; j++) {
     309           0 :                                 struct page *page = pages[j];
     310           0 :                                 unsigned int n = PAGE_SIZE - offs;
     311           0 :                                 bool same_page = false;
     312             : 
     313           0 :                                 if (n > bytes)
     314           0 :                                         n = bytes;
     315             : 
     316           0 :                                 if (!bio_add_hw_page(rq->q, bio, page, n, offs,
     317             :                                                      max_sectors, &same_page)) {
     318           0 :                                         if (same_page)
     319           0 :                                                 put_page(page);
     320           0 :                                         break;
     321             :                                 }
     322             : 
     323           0 :                                 bytes -= n;
     324           0 :                                 offs = 0;
     325             :                         }
     326             :                 }
     327             :                 /*
     328             :                  * release the pages we didn't map into the bio, if any
     329             :                  */
     330           0 :                 while (j < npages)
     331           0 :                         put_page(pages[j++]);
     332           0 :                 if (pages != stack_pages)
     333           0 :                         kvfree(pages);
     334             :                 /* couldn't stuff something into bio? */
     335           0 :                 if (bytes) {
     336           0 :                         iov_iter_revert(iter, bytes);
     337           0 :                         break;
     338             :                 }
     339             :         }
     340             : 
     341           0 :         ret = blk_rq_append_bio(rq, bio);
     342           0 :         if (ret)
     343             :                 goto out_unmap;
     344             :         return 0;
     345             : 
     346             :  out_unmap:
     347           0 :         bio_release_pages(bio, false);
     348           0 :         blk_mq_map_bio_put(bio);
     349           0 :         return ret;
     350             : }
     351             : 
     352             : static void bio_invalidate_vmalloc_pages(struct bio *bio)
     353             : {
     354             : #ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
     355             :         if (bio->bi_private && !op_is_write(bio_op(bio))) {
     356             :                 unsigned long i, len = 0;
     357             : 
     358             :                 for (i = 0; i < bio->bi_vcnt; i++)
     359             :                         len += bio->bi_io_vec[i].bv_len;
     360             :                 invalidate_kernel_vmap_range(bio->bi_private, len);
     361             :         }
     362             : #endif
     363             : }
     364             : 
     365           0 : static void bio_map_kern_endio(struct bio *bio)
     366             : {
     367           0 :         bio_invalidate_vmalloc_pages(bio);
     368           0 :         bio_uninit(bio);
     369           0 :         kfree(bio);
     370           0 : }
     371             : 
     372             : /**
     373             :  *      bio_map_kern    -       map kernel address into bio
     374             :  *      @q: the struct request_queue for the bio
     375             :  *      @data: pointer to buffer to map
     376             :  *      @len: length in bytes
     377             :  *      @gfp_mask: allocation flags for bio allocation
     378             :  *
     379             :  *      Map the kernel address into a bio suitable for io to a block
     380             :  *      device. Returns an error pointer in case of error.
     381             :  */
     382           0 : static struct bio *bio_map_kern(struct request_queue *q, void *data,
     383             :                 unsigned int len, gfp_t gfp_mask)
     384             : {
     385           0 :         unsigned long kaddr = (unsigned long)data;
     386           0 :         unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
     387           0 :         unsigned long start = kaddr >> PAGE_SHIFT;
     388           0 :         const int nr_pages = end - start;
     389           0 :         bool is_vmalloc = is_vmalloc_addr(data);
     390             :         struct page *page;
     391             :         int offset, i;
     392             :         struct bio *bio;
     393             : 
     394           0 :         bio = bio_kmalloc(nr_pages, gfp_mask);
     395           0 :         if (!bio)
     396             :                 return ERR_PTR(-ENOMEM);
     397           0 :         bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
     398             : 
     399           0 :         if (is_vmalloc) {
     400           0 :                 flush_kernel_vmap_range(data, len);
     401           0 :                 bio->bi_private = data;
     402             :         }
     403             : 
     404           0 :         offset = offset_in_page(kaddr);
     405           0 :         for (i = 0; i < nr_pages; i++) {
     406           0 :                 unsigned int bytes = PAGE_SIZE - offset;
     407             : 
     408           0 :                 if (len <= 0)
     409             :                         break;
     410             : 
     411           0 :                 if (bytes > len)
     412           0 :                         bytes = len;
     413             : 
     414           0 :                 if (!is_vmalloc)
     415           0 :                         page = virt_to_page(data);
     416             :                 else
     417           0 :                         page = vmalloc_to_page(data);
     418           0 :                 if (bio_add_pc_page(q, bio, page, bytes,
     419             :                                     offset) < bytes) {
     420             :                         /* we don't support partial mappings */
     421           0 :                         bio_uninit(bio);
     422           0 :                         kfree(bio);
     423           0 :                         return ERR_PTR(-EINVAL);
     424             :                 }
     425             : 
     426           0 :                 data += bytes;
     427           0 :                 len -= bytes;
     428           0 :                 offset = 0;
     429             :         }
     430             : 
     431           0 :         bio->bi_end_io = bio_map_kern_endio;
     432           0 :         return bio;
     433             : }
     434             : 
     435           0 : static void bio_copy_kern_endio(struct bio *bio)
     436             : {
     437           0 :         bio_free_pages(bio);
     438           0 :         bio_uninit(bio);
     439           0 :         kfree(bio);
     440           0 : }
     441             : 
     442           0 : static void bio_copy_kern_endio_read(struct bio *bio)
     443             : {
     444           0 :         char *p = bio->bi_private;
     445             :         struct bio_vec *bvec;
     446             :         struct bvec_iter_all iter_all;
     447             : 
     448           0 :         bio_for_each_segment_all(bvec, bio, iter_all) {
     449           0 :                 memcpy_from_bvec(p, bvec);
     450           0 :                 p += bvec->bv_len;
     451             :         }
     452             : 
     453           0 :         bio_copy_kern_endio(bio);
     454           0 : }
     455             : 
     456             : /**
     457             :  *      bio_copy_kern   -       copy kernel address into bio
     458             :  *      @q: the struct request_queue for the bio
     459             :  *      @data: pointer to buffer to copy
     460             :  *      @len: length in bytes
     461             :  *      @gfp_mask: allocation flags for bio and page allocation
     462             :  *      @reading: data direction is READ
     463             :  *
     464             :  *      copy the kernel address into a bio suitable for io to a block
     465             :  *      device. Returns an error pointer in case of error.
     466             :  */
     467           0 : static struct bio *bio_copy_kern(struct request_queue *q, void *data,
     468             :                 unsigned int len, gfp_t gfp_mask, int reading)
     469             : {
     470           0 :         unsigned long kaddr = (unsigned long)data;
     471           0 :         unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
     472           0 :         unsigned long start = kaddr >> PAGE_SHIFT;
     473             :         struct bio *bio;
     474           0 :         void *p = data;
     475           0 :         int nr_pages = 0;
     476             : 
     477             :         /*
     478             :          * Overflow, abort
     479             :          */
     480           0 :         if (end < start)
     481             :                 return ERR_PTR(-EINVAL);
     482             : 
     483           0 :         nr_pages = end - start;
     484           0 :         bio = bio_kmalloc(nr_pages, gfp_mask);
     485           0 :         if (!bio)
     486             :                 return ERR_PTR(-ENOMEM);
     487           0 :         bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
     488             : 
     489           0 :         while (len) {
     490             :                 struct page *page;
     491           0 :                 unsigned int bytes = PAGE_SIZE;
     492             : 
     493           0 :                 if (bytes > len)
     494           0 :                         bytes = len;
     495             : 
     496           0 :                 page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask);
     497           0 :                 if (!page)
     498             :                         goto cleanup;
     499             : 
     500           0 :                 if (!reading)
     501           0 :                         memcpy(page_address(page), p, bytes);
     502             : 
     503           0 :                 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
     504             :                         break;
     505             : 
     506           0 :                 len -= bytes;
     507           0 :                 p += bytes;
     508             :         }
     509             : 
     510           0 :         if (reading) {
     511           0 :                 bio->bi_end_io = bio_copy_kern_endio_read;
     512           0 :                 bio->bi_private = data;
     513             :         } else {
     514           0 :                 bio->bi_end_io = bio_copy_kern_endio;
     515             :         }
     516             : 
     517             :         return bio;
     518             : 
     519             : cleanup:
     520           0 :         bio_free_pages(bio);
     521           0 :         bio_uninit(bio);
     522           0 :         kfree(bio);
     523           0 :         return ERR_PTR(-ENOMEM);
     524             : }
     525             : 
     526             : /*
     527             :  * Append a bio to a passthrough request.  Only works if the bio can be merged
     528             :  * into the request based on the driver constraints.
     529             :  */
     530           0 : int blk_rq_append_bio(struct request *rq, struct bio *bio)
     531             : {
     532             :         struct bvec_iter iter;
     533             :         struct bio_vec bv;
     534           0 :         unsigned int nr_segs = 0;
     535             : 
     536           0 :         bio_for_each_bvec(bv, bio, iter)
     537           0 :                 nr_segs++;
     538             : 
     539           0 :         if (!rq->bio) {
     540             :                 blk_rq_bio_prep(rq, bio, nr_segs);
     541             :         } else {
     542           0 :                 if (!ll_back_merge_fn(rq, bio, nr_segs))
     543             :                         return -EINVAL;
     544           0 :                 rq->biotail->bi_next = bio;
     545           0 :                 rq->biotail = bio;
     546           0 :                 rq->__data_len += (bio)->bi_iter.bi_size;
     547           0 :                 bio_crypt_free_ctx(bio);
     548             :         }
     549             : 
     550             :         return 0;
     551             : }
     552             : EXPORT_SYMBOL(blk_rq_append_bio);
     553             : 
     554             : /* Prepare bio for passthrough IO given ITER_BVEC iter */
     555           0 : static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
     556             : {
     557           0 :         struct request_queue *q = rq->q;
     558           0 :         size_t nr_iter = iov_iter_count(iter);
     559           0 :         size_t nr_segs = iter->nr_segs;
     560           0 :         struct bio_vec *bvecs, *bvprvp = NULL;
     561           0 :         const struct queue_limits *lim = &q->limits;
     562           0 :         unsigned int nsegs = 0, bytes = 0;
     563             :         struct bio *bio;
     564             :         size_t i;
     565             : 
     566           0 :         if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
     567             :                 return -EINVAL;
     568           0 :         if (nr_segs > queue_max_segments(q))
     569             :                 return -EINVAL;
     570             : 
     571             :         /* no iovecs to alloc, as we already have a BVEC iterator */
     572           0 :         bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
     573           0 :         if (bio == NULL)
     574             :                 return -ENOMEM;
     575             : 
     576           0 :         bio_iov_bvec_set(bio, (struct iov_iter *)iter);
     577           0 :         blk_rq_bio_prep(rq, bio, nr_segs);
     578             : 
     579             :         /* loop to perform a bunch of sanity checks */
     580           0 :         bvecs = (struct bio_vec *)iter->bvec;
     581           0 :         for (i = 0; i < nr_segs; i++) {
     582           0 :                 struct bio_vec *bv = &bvecs[i];
     583             : 
     584             :                 /*
     585             :                  * If the queue doesn't support SG gaps and adding this
     586             :                  * offset would create a gap, fallback to copy.
     587             :                  */
     588           0 :                 if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
     589           0 :                         blk_mq_map_bio_put(bio);
     590           0 :                         return -EREMOTEIO;
     591             :                 }
     592             :                 /* check full condition */
     593           0 :                 if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
     594             :                         goto put_bio;
     595           0 :                 if (bytes + bv->bv_len > nr_iter)
     596             :                         goto put_bio;
     597           0 :                 if (bv->bv_offset + bv->bv_len > PAGE_SIZE)
     598             :                         goto put_bio;
     599             : 
     600           0 :                 nsegs++;
     601           0 :                 bytes += bv->bv_len;
     602           0 :                 bvprvp = bv;
     603             :         }
     604             :         return 0;
     605             : put_bio:
     606           0 :         blk_mq_map_bio_put(bio);
     607           0 :         return -EINVAL;
     608             : }
     609             : 
     610             : /**
     611             :  * blk_rq_map_user_iov - map user data to a request, for passthrough requests
     612             :  * @q:          request queue where request should be inserted
     613             :  * @rq:         request to map data to
     614             :  * @map_data:   pointer to the rq_map_data holding pages (if necessary)
     615             :  * @iter:       iovec iterator
     616             :  * @gfp_mask:   memory allocation flags
     617             :  *
     618             :  * Description:
     619             :  *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
     620             :  *    a kernel bounce buffer is used.
     621             :  *
     622             :  *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
     623             :  *    still in process context.
     624             :  */
     625           0 : int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
     626             :                         struct rq_map_data *map_data,
     627             :                         const struct iov_iter *iter, gfp_t gfp_mask)
     628             : {
     629           0 :         bool copy = false, map_bvec = false;
     630           0 :         unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
     631           0 :         struct bio *bio = NULL;
     632             :         struct iov_iter i;
     633           0 :         int ret = -EINVAL;
     634             : 
     635           0 :         if (map_data)
     636             :                 copy = true;
     637           0 :         else if (blk_queue_may_bounce(q))
     638             :                 copy = true;
     639           0 :         else if (iov_iter_alignment(iter) & align)
     640             :                 copy = true;
     641           0 :         else if (iov_iter_is_bvec(iter))
     642             :                 map_bvec = true;
     643           0 :         else if (!user_backed_iter(iter))
     644             :                 copy = true;
     645           0 :         else if (queue_virt_boundary(q))
     646           0 :                 copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
     647             : 
     648           0 :         if (map_bvec) {
     649           0 :                 ret = blk_rq_map_user_bvec(rq, iter);
     650           0 :                 if (!ret)
     651             :                         return 0;
     652           0 :                 if (ret != -EREMOTEIO)
     653             :                         goto fail;
     654             :                 /* fall back to copying the data on limits mismatches */
     655             :                 copy = true;
     656             :         }
     657             : 
     658           0 :         i = *iter;
     659             :         do {
     660           0 :                 if (copy)
     661           0 :                         ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask);
     662             :                 else
     663           0 :                         ret = bio_map_user_iov(rq, &i, gfp_mask);
     664           0 :                 if (ret)
     665             :                         goto unmap_rq;
     666           0 :                 if (!bio)
     667           0 :                         bio = rq->bio;
     668           0 :         } while (iov_iter_count(&i));
     669             : 
     670             :         return 0;
     671             : 
     672             : unmap_rq:
     673           0 :         blk_rq_unmap_user(bio);
     674             : fail:
     675           0 :         rq->bio = NULL;
     676           0 :         return ret;
     677             : }
     678             : EXPORT_SYMBOL(blk_rq_map_user_iov);
     679             : 
     680           0 : int blk_rq_map_user(struct request_queue *q, struct request *rq,
     681             :                     struct rq_map_data *map_data, void __user *ubuf,
     682             :                     unsigned long len, gfp_t gfp_mask)
     683             : {
     684             :         struct iov_iter i;
     685           0 :         int ret = import_ubuf(rq_data_dir(rq), ubuf, len, &i);
     686             : 
     687           0 :         if (unlikely(ret < 0))
     688             :                 return ret;
     689             : 
     690           0 :         return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
     691             : }
     692             : EXPORT_SYMBOL(blk_rq_map_user);
     693             : 
     694           0 : int blk_rq_map_user_io(struct request *req, struct rq_map_data *map_data,
     695             :                 void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask,
     696             :                 bool vec, int iov_count, bool check_iter_count, int rw)
     697             : {
     698           0 :         int ret = 0;
     699             : 
     700           0 :         if (vec) {
     701             :                 struct iovec fast_iov[UIO_FASTIOV];
     702           0 :                 struct iovec *iov = fast_iov;
     703             :                 struct iov_iter iter;
     704             : 
     705           0 :                 ret = import_iovec(rw, ubuf, iov_count ? iov_count : buf_len,
     706             :                                 UIO_FASTIOV, &iov, &iter);
     707           0 :                 if (ret < 0)
     708           0 :                         return ret;
     709             : 
     710           0 :                 if (iov_count) {
     711             :                         /* SG_IO howto says that the shorter of the two wins */
     712           0 :                         iov_iter_truncate(&iter, buf_len);
     713           0 :                         if (check_iter_count && !iov_iter_count(&iter)) {
     714           0 :                                 kfree(iov);
     715           0 :                                 return -EINVAL;
     716             :                         }
     717             :                 }
     718             : 
     719           0 :                 ret = blk_rq_map_user_iov(req->q, req, map_data, &iter,
     720             :                                 gfp_mask);
     721           0 :                 kfree(iov);
     722           0 :         } else if (buf_len) {
     723           0 :                 ret = blk_rq_map_user(req->q, req, map_data, ubuf, buf_len,
     724             :                                 gfp_mask);
     725             :         }
     726             :         return ret;
     727             : }
     728             : EXPORT_SYMBOL(blk_rq_map_user_io);
     729             : 
     730             : /**
     731             :  * blk_rq_unmap_user - unmap a request with user data
     732             :  * @bio:               start of bio list
     733             :  *
     734             :  * Description:
     735             :  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
     736             :  *    supply the original rq->bio from the blk_rq_map_user() return, since
     737             :  *    the I/O completion may have changed rq->bio.
     738             :  */
     739           0 : int blk_rq_unmap_user(struct bio *bio)
     740             : {
     741             :         struct bio *next_bio;
     742           0 :         int ret = 0, ret2;
     743             : 
     744           0 :         while (bio) {
     745           0 :                 if (bio->bi_private) {
     746           0 :                         ret2 = bio_uncopy_user(bio);
     747           0 :                         if (ret2 && !ret)
     748           0 :                                 ret = ret2;
     749             :                 } else {
     750           0 :                         bio_release_pages(bio, bio_data_dir(bio) == READ);
     751             :                 }
     752             : 
     753           0 :                 next_bio = bio;
     754           0 :                 bio = bio->bi_next;
     755           0 :                 blk_mq_map_bio_put(next_bio);
     756             :         }
     757             : 
     758           0 :         return ret;
     759             : }
     760             : EXPORT_SYMBOL(blk_rq_unmap_user);
     761             : 
     762             : /**
     763             :  * blk_rq_map_kern - map kernel data to a request, for passthrough requests
     764             :  * @q:          request queue where request should be inserted
     765             :  * @rq:         request to fill
     766             :  * @kbuf:       the kernel buffer
     767             :  * @len:        length of user data
     768             :  * @gfp_mask:   memory allocation flags
     769             :  *
     770             :  * Description:
     771             :  *    Data will be mapped directly if possible. Otherwise a bounce
     772             :  *    buffer is used. Can be called multiple times to append multiple
     773             :  *    buffers.
     774             :  */
     775           0 : int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
     776             :                     unsigned int len, gfp_t gfp_mask)
     777             : {
     778           0 :         int reading = rq_data_dir(rq) == READ;
     779           0 :         unsigned long addr = (unsigned long) kbuf;
     780             :         struct bio *bio;
     781             :         int ret;
     782             : 
     783           0 :         if (len > (queue_max_hw_sectors(q) << 9))
     784             :                 return -EINVAL;
     785           0 :         if (!len || !kbuf)
     786             :                 return -EINVAL;
     787             : 
     788           0 :         if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) ||
     789           0 :             blk_queue_may_bounce(q))
     790           0 :                 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
     791             :         else
     792           0 :                 bio = bio_map_kern(q, kbuf, len, gfp_mask);
     793             : 
     794           0 :         if (IS_ERR(bio))
     795           0 :                 return PTR_ERR(bio);
     796             : 
     797           0 :         bio->bi_opf &= ~REQ_OP_MASK;
     798           0 :         bio->bi_opf |= req_op(rq);
     799             : 
     800           0 :         ret = blk_rq_append_bio(rq, bio);
     801           0 :         if (unlikely(ret)) {
     802           0 :                 bio_uninit(bio);
     803           0 :                 kfree(bio);
     804             :         }
     805             :         return ret;
     806             : }
     807             : EXPORT_SYMBOL(blk_rq_map_kern);

Generated by: LCOV version 1.14