LCOV - code coverage report
Current view: top level - block - blk-map.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 331 0.0 %
Date: 2023-08-24 13:40:31 Functions: 0 20 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Functions related to mapping data to requests
       4             :  */
       5             : #include <linux/kernel.h>
       6             : #include <linux/sched/task_stack.h>
       7             : #include <linux/module.h>
       8             : #include <linux/bio.h>
       9             : #include <linux/blkdev.h>
      10             : #include <linux/uio.h>
      11             : 
      12             : #include "blk.h"
      13             : 
      14             : struct bio_map_data {
      15             :         bool is_our_pages : 1;
      16             :         bool is_null_mapped : 1;
      17             :         struct iov_iter iter;
      18             :         struct iovec iov[];
      19             : };
      20             : 
      21           0 : static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
      22             :                                                gfp_t gfp_mask)
      23             : {
      24             :         struct bio_map_data *bmd;
      25             : 
      26           0 :         if (data->nr_segs > UIO_MAXIOV)
      27             :                 return NULL;
      28             : 
      29           0 :         bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
      30           0 :         if (!bmd)
      31             :                 return NULL;
      32           0 :         bmd->iter = *data;
      33           0 :         if (iter_is_iovec(data)) {
      34           0 :                 memcpy(bmd->iov, iter_iov(data), sizeof(struct iovec) * data->nr_segs);
      35           0 :                 bmd->iter.__iov = bmd->iov;
      36             :         }
      37             :         return bmd;
      38             : }
      39             : 
      40             : /**
      41             :  * bio_copy_from_iter - copy all pages from iov_iter to bio
      42             :  * @bio: The &struct bio which describes the I/O as destination
      43             :  * @iter: iov_iter as source
      44             :  *
      45             :  * Copy all pages from iov_iter to bio.
      46             :  * Returns 0 on success, or error on failure.
      47             :  */
      48           0 : static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
      49             : {
      50             :         struct bio_vec *bvec;
      51             :         struct bvec_iter_all iter_all;
      52             : 
      53           0 :         bio_for_each_segment_all(bvec, bio, iter_all) {
      54             :                 ssize_t ret;
      55             : 
      56           0 :                 ret = copy_page_from_iter(bvec->bv_page,
      57           0 :                                           bvec->bv_offset,
      58           0 :                                           bvec->bv_len,
      59             :                                           iter);
      60             : 
      61           0 :                 if (!iov_iter_count(iter))
      62             :                         break;
      63             : 
      64           0 :                 if (ret < bvec->bv_len)
      65             :                         return -EFAULT;
      66             :         }
      67             : 
      68             :         return 0;
      69             : }
      70             : 
      71             : /**
      72             :  * bio_copy_to_iter - copy all pages from bio to iov_iter
      73             :  * @bio: The &struct bio which describes the I/O as source
      74             :  * @iter: iov_iter as destination
      75             :  *
      76             :  * Copy all pages from bio to iov_iter.
      77             :  * Returns 0 on success, or error on failure.
      78             :  */
      79           0 : static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
      80             : {
      81             :         struct bio_vec *bvec;
      82             :         struct bvec_iter_all iter_all;
      83             : 
      84           0 :         bio_for_each_segment_all(bvec, bio, iter_all) {
      85             :                 ssize_t ret;
      86             : 
      87           0 :                 ret = copy_page_to_iter(bvec->bv_page,
      88           0 :                                         bvec->bv_offset,
      89           0 :                                         bvec->bv_len,
      90             :                                         &iter);
      91             : 
      92           0 :                 if (!iov_iter_count(&iter))
      93             :                         break;
      94             : 
      95           0 :                 if (ret < bvec->bv_len)
      96             :                         return -EFAULT;
      97             :         }
      98             : 
      99             :         return 0;
     100             : }
     101             : 
     102             : /**
     103             :  *      bio_uncopy_user -       finish previously mapped bio
     104             :  *      @bio: bio being terminated
     105             :  *
     106             :  *      Free pages allocated from bio_copy_user_iov() and write back data
     107             :  *      to user space in case of a read.
     108             :  */
     109           0 : static int bio_uncopy_user(struct bio *bio)
     110             : {
     111           0 :         struct bio_map_data *bmd = bio->bi_private;
     112           0 :         int ret = 0;
     113             : 
     114           0 :         if (!bmd->is_null_mapped) {
     115             :                 /*
     116             :                  * if we're in a workqueue, the request is orphaned, so
     117             :                  * don't copy into a random user address space, just free
     118             :                  * and return -EINTR so user space doesn't expect any data.
     119             :                  */
     120           0 :                 if (!current->mm)
     121             :                         ret = -EINTR;
     122           0 :                 else if (bio_data_dir(bio) == READ)
     123           0 :                         ret = bio_copy_to_iter(bio, bmd->iter);
     124           0 :                 if (bmd->is_our_pages)
     125           0 :                         bio_free_pages(bio);
     126             :         }
     127           0 :         kfree(bmd);
     128           0 :         return ret;
     129             : }
     130             : 
     131           0 : static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
     132             :                 struct iov_iter *iter, gfp_t gfp_mask)
     133             : {
     134             :         struct bio_map_data *bmd;
     135             :         struct page *page;
     136             :         struct bio *bio;
     137           0 :         int i = 0, ret;
     138             :         int nr_pages;
     139           0 :         unsigned int len = iter->count;
     140           0 :         unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
     141             : 
     142           0 :         bmd = bio_alloc_map_data(iter, gfp_mask);
     143           0 :         if (!bmd)
     144             :                 return -ENOMEM;
     145             : 
     146             :         /*
     147             :          * We need to do a deep copy of the iov_iter including the iovecs.
     148             :          * The caller provided iov might point to an on-stack or otherwise
     149             :          * shortlived one.
     150             :          */
     151           0 :         bmd->is_our_pages = !map_data;
     152           0 :         bmd->is_null_mapped = (map_data && map_data->null_mapped);
     153             : 
     154           0 :         nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
     155             : 
     156           0 :         ret = -ENOMEM;
     157           0 :         bio = bio_kmalloc(nr_pages, gfp_mask);
     158           0 :         if (!bio)
     159             :                 goto out_bmd;
     160           0 :         bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq));
     161             : 
     162           0 :         if (map_data) {
     163           0 :                 nr_pages = 1U << map_data->page_order;
     164           0 :                 i = map_data->offset / PAGE_SIZE;
     165             :         }
     166           0 :         while (len) {
     167           0 :                 unsigned int bytes = PAGE_SIZE;
     168             : 
     169           0 :                 bytes -= offset;
     170             : 
     171           0 :                 if (bytes > len)
     172           0 :                         bytes = len;
     173             : 
     174           0 :                 if (map_data) {
     175           0 :                         if (i == map_data->nr_entries * nr_pages) {
     176             :                                 ret = -ENOMEM;
     177             :                                 goto cleanup;
     178             :                         }
     179             : 
     180           0 :                         page = map_data->pages[i / nr_pages];
     181           0 :                         page += (i % nr_pages);
     182             : 
     183           0 :                         i++;
     184             :                 } else {
     185           0 :                         page = alloc_page(GFP_NOIO | gfp_mask);
     186           0 :                         if (!page) {
     187             :                                 ret = -ENOMEM;
     188             :                                 goto cleanup;
     189             :                         }
     190             :                 }
     191             : 
     192           0 :                 if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) {
     193           0 :                         if (!map_data)
     194           0 :                                 __free_page(page);
     195             :                         break;
     196             :                 }
     197             : 
     198           0 :                 len -= bytes;
     199           0 :                 offset = 0;
     200             :         }
     201             : 
     202           0 :         if (map_data)
     203           0 :                 map_data->offset += bio->bi_iter.bi_size;
     204             : 
     205             :         /*
     206             :          * success
     207             :          */
     208           0 :         if ((iov_iter_rw(iter) == WRITE &&
     209           0 :              (!map_data || !map_data->null_mapped)) ||
     210           0 :             (map_data && map_data->from_user)) {
     211           0 :                 ret = bio_copy_from_iter(bio, iter);
     212           0 :                 if (ret)
     213             :                         goto cleanup;
     214             :         } else {
     215           0 :                 if (bmd->is_our_pages)
     216           0 :                         zero_fill_bio(bio);
     217           0 :                 iov_iter_advance(iter, bio->bi_iter.bi_size);
     218             :         }
     219             : 
     220           0 :         bio->bi_private = bmd;
     221             : 
     222           0 :         ret = blk_rq_append_bio(rq, bio);
     223           0 :         if (ret)
     224             :                 goto cleanup;
     225             :         return 0;
     226             : cleanup:
     227           0 :         if (!map_data)
     228           0 :                 bio_free_pages(bio);
     229           0 :         bio_uninit(bio);
     230           0 :         kfree(bio);
     231             : out_bmd:
     232           0 :         kfree(bmd);
     233           0 :         return ret;
     234             : }
     235             : 
     236           0 : static void blk_mq_map_bio_put(struct bio *bio)
     237             : {
     238           0 :         if (bio->bi_opf & REQ_ALLOC_CACHE) {
     239           0 :                 bio_put(bio);
     240             :         } else {
     241           0 :                 bio_uninit(bio);
     242           0 :                 kfree(bio);
     243             :         }
     244           0 : }
     245             : 
     246           0 : static struct bio *blk_rq_map_bio_alloc(struct request *rq,
     247             :                 unsigned int nr_vecs, gfp_t gfp_mask)
     248             : {
     249             :         struct bio *bio;
     250             : 
     251           0 :         if (rq->cmd_flags & REQ_ALLOC_CACHE && (nr_vecs <= BIO_INLINE_VECS)) {
     252           0 :                 bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask,
     253             :                                         &fs_bio_set);
     254           0 :                 if (!bio)
     255             :                         return NULL;
     256             :         } else {
     257           0 :                 bio = bio_kmalloc(nr_vecs, gfp_mask);
     258           0 :                 if (!bio)
     259             :                         return NULL;
     260           0 :                 bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
     261             :         }
     262             :         return bio;
     263             : }
     264             : 
     265           0 : static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
     266             :                 gfp_t gfp_mask)
     267             : {
     268           0 :         iov_iter_extraction_t extraction_flags = 0;
     269           0 :         unsigned int max_sectors = queue_max_hw_sectors(rq->q);
     270           0 :         unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
     271             :         struct bio *bio;
     272             :         int ret;
     273             :         int j;
     274             : 
     275           0 :         if (!iov_iter_count(iter))
     276             :                 return -EINVAL;
     277             : 
     278           0 :         bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
     279           0 :         if (bio == NULL)
     280             :                 return -ENOMEM;
     281             : 
     282           0 :         if (blk_queue_pci_p2pdma(rq->q))
     283           0 :                 extraction_flags |= ITER_ALLOW_P2PDMA;
     284           0 :         if (iov_iter_extract_will_pin(iter))
     285             :                 bio_set_flag(bio, BIO_PAGE_PINNED);
     286             : 
     287           0 :         while (iov_iter_count(iter)) {
     288             :                 struct page *stack_pages[UIO_FASTIOV];
     289           0 :                 struct page **pages = stack_pages;
     290             :                 ssize_t bytes;
     291             :                 size_t offs;
     292             :                 int npages;
     293             : 
     294           0 :                 if (nr_vecs > ARRAY_SIZE(stack_pages))
     295           0 :                         pages = NULL;
     296             : 
     297           0 :                 bytes = iov_iter_extract_pages(iter, &pages, LONG_MAX,
     298             :                                                nr_vecs, extraction_flags, &offs);
     299           0 :                 if (unlikely(bytes <= 0)) {
     300           0 :                         ret = bytes ? bytes : -EFAULT;
     301           0 :                         goto out_unmap;
     302             :                 }
     303             : 
     304           0 :                 npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
     305             : 
     306           0 :                 if (unlikely(offs & queue_dma_alignment(rq->q)))
     307             :                         j = 0;
     308             :                 else {
     309           0 :                         for (j = 0; j < npages; j++) {
     310           0 :                                 struct page *page = pages[j];
     311           0 :                                 unsigned int n = PAGE_SIZE - offs;
     312           0 :                                 bool same_page = false;
     313             : 
     314           0 :                                 if (n > bytes)
     315           0 :                                         n = bytes;
     316             : 
     317           0 :                                 if (!bio_add_hw_page(rq->q, bio, page, n, offs,
     318             :                                                      max_sectors, &same_page)) {
     319           0 :                                         if (same_page)
     320             :                                                 bio_release_page(bio, page);
     321           0 :                                         break;
     322             :                                 }
     323             : 
     324           0 :                                 bytes -= n;
     325           0 :                                 offs = 0;
     326             :                         }
     327             :                 }
     328             :                 /*
     329             :                  * release the pages we didn't map into the bio, if any
     330             :                  */
     331           0 :                 while (j < npages)
     332           0 :                         bio_release_page(bio, pages[j++]);
     333           0 :                 if (pages != stack_pages)
     334           0 :                         kvfree(pages);
     335             :                 /* couldn't stuff something into bio? */
     336           0 :                 if (bytes) {
     337           0 :                         iov_iter_revert(iter, bytes);
     338           0 :                         break;
     339             :                 }
     340             :         }
     341             : 
     342           0 :         ret = blk_rq_append_bio(rq, bio);
     343           0 :         if (ret)
     344             :                 goto out_unmap;
     345             :         return 0;
     346             : 
     347             :  out_unmap:
     348           0 :         bio_release_pages(bio, false);
     349           0 :         blk_mq_map_bio_put(bio);
     350           0 :         return ret;
     351             : }
     352             : 
     353             : static void bio_invalidate_vmalloc_pages(struct bio *bio)
     354             : {
     355             : #ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
     356             :         if (bio->bi_private && !op_is_write(bio_op(bio))) {
     357             :                 unsigned long i, len = 0;
     358             : 
     359             :                 for (i = 0; i < bio->bi_vcnt; i++)
     360             :                         len += bio->bi_io_vec[i].bv_len;
     361             :                 invalidate_kernel_vmap_range(bio->bi_private, len);
     362             :         }
     363             : #endif
     364             : }
     365             : 
     366           0 : static void bio_map_kern_endio(struct bio *bio)
     367             : {
     368           0 :         bio_invalidate_vmalloc_pages(bio);
     369           0 :         bio_uninit(bio);
     370           0 :         kfree(bio);
     371           0 : }
     372             : 
     373             : /**
     374             :  *      bio_map_kern    -       map kernel address into bio
     375             :  *      @q: the struct request_queue for the bio
     376             :  *      @data: pointer to buffer to map
     377             :  *      @len: length in bytes
     378             :  *      @gfp_mask: allocation flags for bio allocation
     379             :  *
     380             :  *      Map the kernel address into a bio suitable for io to a block
     381             :  *      device. Returns an error pointer in case of error.
     382             :  */
     383           0 : static struct bio *bio_map_kern(struct request_queue *q, void *data,
     384             :                 unsigned int len, gfp_t gfp_mask)
     385             : {
     386           0 :         unsigned long kaddr = (unsigned long)data;
     387           0 :         unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
     388           0 :         unsigned long start = kaddr >> PAGE_SHIFT;
     389           0 :         const int nr_pages = end - start;
     390           0 :         bool is_vmalloc = is_vmalloc_addr(data);
     391             :         struct page *page;
     392             :         int offset, i;
     393             :         struct bio *bio;
     394             : 
     395           0 :         bio = bio_kmalloc(nr_pages, gfp_mask);
     396           0 :         if (!bio)
     397             :                 return ERR_PTR(-ENOMEM);
     398           0 :         bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
     399             : 
     400           0 :         if (is_vmalloc) {
     401           0 :                 flush_kernel_vmap_range(data, len);
     402           0 :                 bio->bi_private = data;
     403             :         }
     404             : 
     405           0 :         offset = offset_in_page(kaddr);
     406           0 :         for (i = 0; i < nr_pages; i++) {
     407           0 :                 unsigned int bytes = PAGE_SIZE - offset;
     408             : 
     409           0 :                 if (len <= 0)
     410             :                         break;
     411             : 
     412           0 :                 if (bytes > len)
     413           0 :                         bytes = len;
     414             : 
     415           0 :                 if (!is_vmalloc)
     416           0 :                         page = virt_to_page(data);
     417             :                 else
     418           0 :                         page = vmalloc_to_page(data);
     419           0 :                 if (bio_add_pc_page(q, bio, page, bytes,
     420             :                                     offset) < bytes) {
     421             :                         /* we don't support partial mappings */
     422           0 :                         bio_uninit(bio);
     423           0 :                         kfree(bio);
     424           0 :                         return ERR_PTR(-EINVAL);
     425             :                 }
     426             : 
     427           0 :                 data += bytes;
     428           0 :                 len -= bytes;
     429           0 :                 offset = 0;
     430             :         }
     431             : 
     432           0 :         bio->bi_end_io = bio_map_kern_endio;
     433           0 :         return bio;
     434             : }
     435             : 
     436           0 : static void bio_copy_kern_endio(struct bio *bio)
     437             : {
     438           0 :         bio_free_pages(bio);
     439           0 :         bio_uninit(bio);
     440           0 :         kfree(bio);
     441           0 : }
     442             : 
     443           0 : static void bio_copy_kern_endio_read(struct bio *bio)
     444             : {
     445           0 :         char *p = bio->bi_private;
     446             :         struct bio_vec *bvec;
     447             :         struct bvec_iter_all iter_all;
     448             : 
     449           0 :         bio_for_each_segment_all(bvec, bio, iter_all) {
     450           0 :                 memcpy_from_bvec(p, bvec);
     451           0 :                 p += bvec->bv_len;
     452             :         }
     453             : 
     454           0 :         bio_copy_kern_endio(bio);
     455           0 : }
     456             : 
     457             : /**
     458             :  *      bio_copy_kern   -       copy kernel address into bio
     459             :  *      @q: the struct request_queue for the bio
     460             :  *      @data: pointer to buffer to copy
     461             :  *      @len: length in bytes
     462             :  *      @gfp_mask: allocation flags for bio and page allocation
     463             :  *      @reading: data direction is READ
     464             :  *
     465             :  *      copy the kernel address into a bio suitable for io to a block
     466             :  *      device. Returns an error pointer in case of error.
     467             :  */
     468           0 : static struct bio *bio_copy_kern(struct request_queue *q, void *data,
     469             :                 unsigned int len, gfp_t gfp_mask, int reading)
     470             : {
     471           0 :         unsigned long kaddr = (unsigned long)data;
     472           0 :         unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
     473           0 :         unsigned long start = kaddr >> PAGE_SHIFT;
     474             :         struct bio *bio;
     475           0 :         void *p = data;
     476           0 :         int nr_pages = 0;
     477             : 
     478             :         /*
     479             :          * Overflow, abort
     480             :          */
     481           0 :         if (end < start)
     482             :                 return ERR_PTR(-EINVAL);
     483             : 
     484           0 :         nr_pages = end - start;
     485           0 :         bio = bio_kmalloc(nr_pages, gfp_mask);
     486           0 :         if (!bio)
     487             :                 return ERR_PTR(-ENOMEM);
     488           0 :         bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
     489             : 
     490           0 :         while (len) {
     491             :                 struct page *page;
     492           0 :                 unsigned int bytes = PAGE_SIZE;
     493             : 
     494           0 :                 if (bytes > len)
     495           0 :                         bytes = len;
     496             : 
     497           0 :                 page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask);
     498           0 :                 if (!page)
     499             :                         goto cleanup;
     500             : 
     501           0 :                 if (!reading)
     502           0 :                         memcpy(page_address(page), p, bytes);
     503             : 
     504           0 :                 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
     505             :                         break;
     506             : 
     507           0 :                 len -= bytes;
     508           0 :                 p += bytes;
     509             :         }
     510             : 
     511           0 :         if (reading) {
     512           0 :                 bio->bi_end_io = bio_copy_kern_endio_read;
     513           0 :                 bio->bi_private = data;
     514             :         } else {
     515           0 :                 bio->bi_end_io = bio_copy_kern_endio;
     516             :         }
     517             : 
     518             :         return bio;
     519             : 
     520             : cleanup:
     521           0 :         bio_free_pages(bio);
     522           0 :         bio_uninit(bio);
     523           0 :         kfree(bio);
     524           0 :         return ERR_PTR(-ENOMEM);
     525             : }
     526             : 
     527             : /*
     528             :  * Append a bio to a passthrough request.  Only works if the bio can be merged
     529             :  * into the request based on the driver constraints.
     530             :  */
     531           0 : int blk_rq_append_bio(struct request *rq, struct bio *bio)
     532             : {
     533             :         struct bvec_iter iter;
     534             :         struct bio_vec bv;
     535           0 :         unsigned int nr_segs = 0;
     536             : 
     537           0 :         bio_for_each_bvec(bv, bio, iter)
     538           0 :                 nr_segs++;
     539             : 
     540           0 :         if (!rq->bio) {
     541             :                 blk_rq_bio_prep(rq, bio, nr_segs);
     542             :         } else {
     543           0 :                 if (!ll_back_merge_fn(rq, bio, nr_segs))
     544             :                         return -EINVAL;
     545           0 :                 rq->biotail->bi_next = bio;
     546           0 :                 rq->biotail = bio;
     547           0 :                 rq->__data_len += (bio)->bi_iter.bi_size;
     548           0 :                 bio_crypt_free_ctx(bio);
     549             :         }
     550             : 
     551             :         return 0;
     552             : }
     553             : EXPORT_SYMBOL(blk_rq_append_bio);
     554             : 
     555             : /* Prepare bio for passthrough IO given ITER_BVEC iter */
     556           0 : static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
     557             : {
     558           0 :         struct request_queue *q = rq->q;
     559           0 :         size_t nr_iter = iov_iter_count(iter);
     560           0 :         size_t nr_segs = iter->nr_segs;
     561           0 :         struct bio_vec *bvecs, *bvprvp = NULL;
     562           0 :         const struct queue_limits *lim = &q->limits;
     563           0 :         unsigned int nsegs = 0, bytes = 0;
     564             :         struct bio *bio;
     565             :         size_t i;
     566             : 
     567           0 :         if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
     568             :                 return -EINVAL;
     569           0 :         if (nr_segs > queue_max_segments(q))
     570             :                 return -EINVAL;
     571             : 
     572             :         /* no iovecs to alloc, as we already have a BVEC iterator */
     573           0 :         bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
     574           0 :         if (bio == NULL)
     575             :                 return -ENOMEM;
     576             : 
     577           0 :         bio_iov_bvec_set(bio, (struct iov_iter *)iter);
     578           0 :         blk_rq_bio_prep(rq, bio, nr_segs);
     579             : 
     580             :         /* loop to perform a bunch of sanity checks */
     581           0 :         bvecs = (struct bio_vec *)iter->bvec;
     582           0 :         for (i = 0; i < nr_segs; i++) {
     583           0 :                 struct bio_vec *bv = &bvecs[i];
     584             : 
     585             :                 /*
     586             :                  * If the queue doesn't support SG gaps and adding this
     587             :                  * offset would create a gap, fallback to copy.
     588             :                  */
     589           0 :                 if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
     590           0 :                         blk_mq_map_bio_put(bio);
     591           0 :                         return -EREMOTEIO;
     592             :                 }
     593             :                 /* check full condition */
     594           0 :                 if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
     595             :                         goto put_bio;
     596           0 :                 if (bytes + bv->bv_len > nr_iter)
     597             :                         goto put_bio;
     598           0 :                 if (bv->bv_offset + bv->bv_len > PAGE_SIZE)
     599             :                         goto put_bio;
     600             : 
     601           0 :                 nsegs++;
     602           0 :                 bytes += bv->bv_len;
     603           0 :                 bvprvp = bv;
     604             :         }
     605             :         return 0;
     606             : put_bio:
     607           0 :         blk_mq_map_bio_put(bio);
     608           0 :         return -EINVAL;
     609             : }
     610             : 
     611             : /**
     612             :  * blk_rq_map_user_iov - map user data to a request, for passthrough requests
     613             :  * @q:          request queue where request should be inserted
     614             :  * @rq:         request to map data to
     615             :  * @map_data:   pointer to the rq_map_data holding pages (if necessary)
     616             :  * @iter:       iovec iterator
     617             :  * @gfp_mask:   memory allocation flags
     618             :  *
     619             :  * Description:
     620             :  *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
     621             :  *    a kernel bounce buffer is used.
     622             :  *
     623             :  *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
     624             :  *    still in process context.
     625             :  */
     626           0 : int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
     627             :                         struct rq_map_data *map_data,
     628             :                         const struct iov_iter *iter, gfp_t gfp_mask)
     629             : {
     630           0 :         bool copy = false, map_bvec = false;
     631           0 :         unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
     632           0 :         struct bio *bio = NULL;
     633             :         struct iov_iter i;
     634           0 :         int ret = -EINVAL;
     635             : 
     636           0 :         if (map_data)
     637             :                 copy = true;
     638           0 :         else if (blk_queue_may_bounce(q))
     639             :                 copy = true;
     640           0 :         else if (iov_iter_alignment(iter) & align)
     641             :                 copy = true;
     642           0 :         else if (iov_iter_is_bvec(iter))
     643             :                 map_bvec = true;
     644           0 :         else if (!user_backed_iter(iter))
     645             :                 copy = true;
     646           0 :         else if (queue_virt_boundary(q))
     647           0 :                 copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
     648             : 
     649           0 :         if (map_bvec) {
     650           0 :                 ret = blk_rq_map_user_bvec(rq, iter);
     651           0 :                 if (!ret)
     652             :                         return 0;
     653           0 :                 if (ret != -EREMOTEIO)
     654             :                         goto fail;
     655             :                 /* fall back to copying the data on limits mismatches */
     656             :                 copy = true;
     657             :         }
     658             : 
     659           0 :         i = *iter;
     660             :         do {
     661           0 :                 if (copy)
     662           0 :                         ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask);
     663             :                 else
     664           0 :                         ret = bio_map_user_iov(rq, &i, gfp_mask);
     665           0 :                 if (ret)
     666             :                         goto unmap_rq;
     667           0 :                 if (!bio)
     668           0 :                         bio = rq->bio;
     669           0 :         } while (iov_iter_count(&i));
     670             : 
     671             :         return 0;
     672             : 
     673             : unmap_rq:
     674           0 :         blk_rq_unmap_user(bio);
     675             : fail:
     676           0 :         rq->bio = NULL;
     677           0 :         return ret;
     678             : }
     679             : EXPORT_SYMBOL(blk_rq_map_user_iov);
     680             : 
     681           0 : int blk_rq_map_user(struct request_queue *q, struct request *rq,
     682             :                     struct rq_map_data *map_data, void __user *ubuf,
     683             :                     unsigned long len, gfp_t gfp_mask)
     684             : {
     685             :         struct iov_iter i;
     686           0 :         int ret = import_ubuf(rq_data_dir(rq), ubuf, len, &i);
     687             : 
     688           0 :         if (unlikely(ret < 0))
     689             :                 return ret;
     690             : 
     691           0 :         return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
     692             : }
     693             : EXPORT_SYMBOL(blk_rq_map_user);
     694             : 
     695           0 : int blk_rq_map_user_io(struct request *req, struct rq_map_data *map_data,
     696             :                 void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask,
     697             :                 bool vec, int iov_count, bool check_iter_count, int rw)
     698             : {
     699           0 :         int ret = 0;
     700             : 
     701           0 :         if (vec) {
     702             :                 struct iovec fast_iov[UIO_FASTIOV];
     703           0 :                 struct iovec *iov = fast_iov;
     704             :                 struct iov_iter iter;
     705             : 
     706           0 :                 ret = import_iovec(rw, ubuf, iov_count ? iov_count : buf_len,
     707             :                                 UIO_FASTIOV, &iov, &iter);
     708           0 :                 if (ret < 0)
     709           0 :                         return ret;
     710             : 
     711           0 :                 if (iov_count) {
     712             :                         /* SG_IO howto says that the shorter of the two wins */
     713           0 :                         iov_iter_truncate(&iter, buf_len);
     714           0 :                         if (check_iter_count && !iov_iter_count(&iter)) {
     715           0 :                                 kfree(iov);
     716           0 :                                 return -EINVAL;
     717             :                         }
     718             :                 }
     719             : 
     720           0 :                 ret = blk_rq_map_user_iov(req->q, req, map_data, &iter,
     721             :                                 gfp_mask);
     722           0 :                 kfree(iov);
     723           0 :         } else if (buf_len) {
     724           0 :                 ret = blk_rq_map_user(req->q, req, map_data, ubuf, buf_len,
     725             :                                 gfp_mask);
     726             :         }
     727             :         return ret;
     728             : }
     729             : EXPORT_SYMBOL(blk_rq_map_user_io);
     730             : 
     731             : /**
     732             :  * blk_rq_unmap_user - unmap a request with user data
     733             :  * @bio:               start of bio list
     734             :  *
     735             :  * Description:
     736             :  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
     737             :  *    supply the original rq->bio from the blk_rq_map_user() return, since
     738             :  *    the I/O completion may have changed rq->bio.
     739             :  */
     740           0 : int blk_rq_unmap_user(struct bio *bio)
     741             : {
     742             :         struct bio *next_bio;
     743           0 :         int ret = 0, ret2;
     744             : 
     745           0 :         while (bio) {
     746           0 :                 if (bio->bi_private) {
     747           0 :                         ret2 = bio_uncopy_user(bio);
     748           0 :                         if (ret2 && !ret)
     749           0 :                                 ret = ret2;
     750             :                 } else {
     751           0 :                         bio_release_pages(bio, bio_data_dir(bio) == READ);
     752             :                 }
     753             : 
     754           0 :                 next_bio = bio;
     755           0 :                 bio = bio->bi_next;
     756           0 :                 blk_mq_map_bio_put(next_bio);
     757             :         }
     758             : 
     759           0 :         return ret;
     760             : }
     761             : EXPORT_SYMBOL(blk_rq_unmap_user);
     762             : 
     763             : /**
     764             :  * blk_rq_map_kern - map kernel data to a request, for passthrough requests
     765             :  * @q:          request queue where request should be inserted
     766             :  * @rq:         request to fill
     767             :  * @kbuf:       the kernel buffer
     768             :  * @len:        length of user data
     769             :  * @gfp_mask:   memory allocation flags
     770             :  *
     771             :  * Description:
     772             :  *    Data will be mapped directly if possible. Otherwise a bounce
     773             :  *    buffer is used. Can be called multiple times to append multiple
     774             :  *    buffers.
     775             :  */
     776           0 : int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
     777             :                     unsigned int len, gfp_t gfp_mask)
     778             : {
     779           0 :         int reading = rq_data_dir(rq) == READ;
     780           0 :         unsigned long addr = (unsigned long) kbuf;
     781             :         struct bio *bio;
     782             :         int ret;
     783             : 
     784           0 :         if (len > (queue_max_hw_sectors(q) << 9))
     785             :                 return -EINVAL;
     786           0 :         if (!len || !kbuf)
     787             :                 return -EINVAL;
     788             : 
     789           0 :         if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) ||
     790           0 :             blk_queue_may_bounce(q))
     791           0 :                 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
     792             :         else
     793           0 :                 bio = bio_map_kern(q, kbuf, len, gfp_mask);
     794             : 
     795           0 :         if (IS_ERR(bio))
     796           0 :                 return PTR_ERR(bio);
     797             : 
     798           0 :         bio->bi_opf &= ~REQ_OP_MASK;
     799           0 :         bio->bi_opf |= req_op(rq);
     800             : 
     801           0 :         ret = blk_rq_append_bio(rq, bio);
     802           0 :         if (unlikely(ret)) {
     803           0 :                 bio_uninit(bio);
     804           0 :                 kfree(bio);
     805             :         }
     806             :         return ret;
     807             : }
     808             : EXPORT_SYMBOL(blk_rq_map_kern);

Generated by: LCOV version 1.14