LCOV - code coverage report
Current view: top level - block - blk-merge.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 383 0.0 %
Date: 2023-04-06 08:38:28 Functions: 0 35 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Functions related to segment and merge handling
       4             :  */
       5             : #include <linux/kernel.h>
       6             : #include <linux/module.h>
       7             : #include <linux/bio.h>
       8             : #include <linux/blkdev.h>
       9             : #include <linux/blk-integrity.h>
      10             : #include <linux/scatterlist.h>
      11             : #include <linux/part_stat.h>
      12             : #include <linux/blk-cgroup.h>
      13             : 
      14             : #include <trace/events/block.h>
      15             : 
      16             : #include "blk.h"
      17             : #include "blk-mq-sched.h"
      18             : #include "blk-rq-qos.h"
      19             : #include "blk-throttle.h"
      20             : 
      21             : static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
      22             : {
      23           0 :         *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
      24             : }
      25             : 
      26           0 : static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
      27             : {
      28           0 :         struct bvec_iter iter = bio->bi_iter;
      29             :         int idx;
      30             : 
      31           0 :         bio_get_first_bvec(bio, bv);
      32           0 :         if (bv->bv_len == bio->bi_iter.bi_size)
      33           0 :                 return;         /* this bio only has a single bvec */
      34             : 
      35           0 :         bio_advance_iter(bio, &iter, iter.bi_size);
      36             : 
      37           0 :         if (!iter.bi_bvec_done)
      38           0 :                 idx = iter.bi_idx - 1;
      39             :         else    /* in the middle of bvec */
      40           0 :                 idx = iter.bi_idx;
      41             : 
      42           0 :         *bv = bio->bi_io_vec[idx];
      43             : 
      44             :         /*
      45             :          * iter.bi_bvec_done records actual length of the last bvec
      46             :          * if this bio ends in the middle of one io vector
      47             :          */
      48           0 :         if (iter.bi_bvec_done)
      49           0 :                 bv->bv_len = iter.bi_bvec_done;
      50             : }
      51             : 
      52           0 : static inline bool bio_will_gap(struct request_queue *q,
      53             :                 struct request *prev_rq, struct bio *prev, struct bio *next)
      54             : {
      55             :         struct bio_vec pb, nb;
      56             : 
      57           0 :         if (!bio_has_data(prev) || !queue_virt_boundary(q))
      58             :                 return false;
      59             : 
      60             :         /*
      61             :          * Don't merge if the 1st bio starts with non-zero offset, otherwise it
      62             :          * is quite difficult to respect the sg gap limit.  We work hard to
      63             :          * merge a huge number of small single bios in case of mkfs.
      64             :          */
      65           0 :         if (prev_rq)
      66           0 :                 bio_get_first_bvec(prev_rq->bio, &pb);
      67             :         else
      68             :                 bio_get_first_bvec(prev, &pb);
      69           0 :         if (pb.bv_offset & queue_virt_boundary(q))
      70             :                 return true;
      71             : 
      72             :         /*
      73             :          * We don't need to worry about the situation that the merged segment
      74             :          * ends in unaligned virt boundary:
      75             :          *
      76             :          * - if 'pb' ends aligned, the merged segment ends aligned
      77             :          * - if 'pb' ends unaligned, the next bio must include
      78             :          *   one single bvec of 'nb', otherwise the 'nb' can't
      79             :          *   merge with 'pb'
      80             :          */
      81           0 :         bio_get_last_bvec(prev, &pb);
      82           0 :         bio_get_first_bvec(next, &nb);
      83           0 :         if (biovec_phys_mergeable(q, &pb, &nb))
      84             :                 return false;
      85           0 :         return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset);
      86             : }
      87             : 
      88             : static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
      89             : {
      90           0 :         return bio_will_gap(req->q, req, req->biotail, bio);
      91             : }
      92             : 
      93             : static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
      94             : {
      95           0 :         return bio_will_gap(req->q, NULL, bio, req->bio);
      96             : }
      97             : 
      98             : /*
      99             :  * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
     100             :  * is defined as 'unsigned int', meantime it has to be aligned to with the
     101             :  * logical block size, which is the minimum accepted unit by hardware.
     102             :  */
     103             : static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim)
     104             : {
     105           0 :         return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT;
     106             : }
     107             : 
     108           0 : static struct bio *bio_split_discard(struct bio *bio,
     109             :                                      const struct queue_limits *lim,
     110             :                                      unsigned *nsegs, struct bio_set *bs)
     111             : {
     112             :         unsigned int max_discard_sectors, granularity;
     113             :         sector_t tmp;
     114             :         unsigned split_sectors;
     115             : 
     116           0 :         *nsegs = 1;
     117             : 
     118             :         /* Zero-sector (unknown) and one-sector granularities are the same.  */
     119           0 :         granularity = max(lim->discard_granularity >> 9, 1U);
     120             : 
     121           0 :         max_discard_sectors =
     122           0 :                 min(lim->max_discard_sectors, bio_allowed_max_sectors(lim));
     123           0 :         max_discard_sectors -= max_discard_sectors % granularity;
     124             : 
     125           0 :         if (unlikely(!max_discard_sectors)) {
     126             :                 /* XXX: warn */
     127             :                 return NULL;
     128             :         }
     129             : 
     130           0 :         if (bio_sectors(bio) <= max_discard_sectors)
     131             :                 return NULL;
     132             : 
     133           0 :         split_sectors = max_discard_sectors;
     134             : 
     135             :         /*
     136             :          * If the next starting sector would be misaligned, stop the discard at
     137             :          * the previous aligned sector.
     138             :          */
     139           0 :         tmp = bio->bi_iter.bi_sector + split_sectors -
     140           0 :                 ((lim->discard_alignment >> 9) % granularity);
     141           0 :         tmp = sector_div(tmp, granularity);
     142             : 
     143           0 :         if (split_sectors > tmp)
     144           0 :                 split_sectors -= tmp;
     145             : 
     146           0 :         return bio_split(bio, split_sectors, GFP_NOIO, bs);
     147             : }
     148             : 
     149             : static struct bio *bio_split_write_zeroes(struct bio *bio,
     150             :                                           const struct queue_limits *lim,
     151             :                                           unsigned *nsegs, struct bio_set *bs)
     152             : {
     153           0 :         *nsegs = 0;
     154           0 :         if (!lim->max_write_zeroes_sectors)
     155             :                 return NULL;
     156           0 :         if (bio_sectors(bio) <= lim->max_write_zeroes_sectors)
     157             :                 return NULL;
     158           0 :         return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs);
     159             : }
     160             : 
     161             : /*
     162             :  * Return the maximum number of sectors from the start of a bio that may be
     163             :  * submitted as a single request to a block device. If enough sectors remain,
     164             :  * align the end to the physical block size. Otherwise align the end to the
     165             :  * logical block size. This approach minimizes the number of non-aligned
     166             :  * requests that are submitted to a block device if the start of a bio is not
     167             :  * aligned to a physical block boundary.
     168             :  */
     169           0 : static inline unsigned get_max_io_size(struct bio *bio,
     170             :                                        const struct queue_limits *lim)
     171             : {
     172           0 :         unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
     173           0 :         unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
     174           0 :         unsigned max_sectors = lim->max_sectors, start, end;
     175             : 
     176           0 :         if (lim->chunk_sectors) {
     177           0 :                 max_sectors = min(max_sectors,
     178             :                         blk_chunk_sectors_left(bio->bi_iter.bi_sector,
     179             :                                                lim->chunk_sectors));
     180             :         }
     181             : 
     182           0 :         start = bio->bi_iter.bi_sector & (pbs - 1);
     183           0 :         end = (start + max_sectors) & ~(pbs - 1);
     184           0 :         if (end > start)
     185           0 :                 return end - start;
     186           0 :         return max_sectors & ~(lbs - 1);
     187             : }
     188             : 
     189             : /**
     190             :  * get_max_segment_size() - maximum number of bytes to add as a single segment
     191             :  * @lim: Request queue limits.
     192             :  * @start_page: See below.
     193             :  * @offset: Offset from @start_page where to add a segment.
     194             :  *
     195             :  * Returns the maximum number of bytes that can be added as a single segment.
     196             :  */
     197             : static inline unsigned get_max_segment_size(const struct queue_limits *lim,
     198             :                 struct page *start_page, unsigned long offset)
     199             : {
     200           0 :         unsigned long mask = lim->seg_boundary_mask;
     201             : 
     202           0 :         offset = mask & (page_to_phys(start_page) + offset);
     203             : 
     204             :         /*
     205             :          * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
     206             :          * after having calculated the minimum.
     207             :          */
     208           0 :         return min(mask - offset, (unsigned long)lim->max_segment_size - 1) + 1;
     209             : }
     210             : 
     211             : /**
     212             :  * bvec_split_segs - verify whether or not a bvec should be split in the middle
     213             :  * @lim:      [in] queue limits to split based on
     214             :  * @bv:       [in] bvec to examine
     215             :  * @nsegs:    [in,out] Number of segments in the bio being built. Incremented
     216             :  *            by the number of segments from @bv that may be appended to that
     217             :  *            bio without exceeding @max_segs
     218             :  * @bytes:    [in,out] Number of bytes in the bio being built. Incremented
     219             :  *            by the number of bytes from @bv that may be appended to that
     220             :  *            bio without exceeding @max_bytes
     221             :  * @max_segs: [in] upper bound for *@nsegs
     222             :  * @max_bytes: [in] upper bound for *@bytes
     223             :  *
     224             :  * When splitting a bio, it can happen that a bvec is encountered that is too
     225             :  * big to fit in a single segment and hence that it has to be split in the
     226             :  * middle. This function verifies whether or not that should happen. The value
     227             :  * %true is returned if and only if appending the entire @bv to a bio with
     228             :  * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
     229             :  * the block driver.
     230             :  */
     231           0 : static bool bvec_split_segs(const struct queue_limits *lim,
     232             :                 const struct bio_vec *bv, unsigned *nsegs, unsigned *bytes,
     233             :                 unsigned max_segs, unsigned max_bytes)
     234             : {
     235           0 :         unsigned max_len = min(max_bytes, UINT_MAX) - *bytes;
     236           0 :         unsigned len = min(bv->bv_len, max_len);
     237           0 :         unsigned total_len = 0;
     238           0 :         unsigned seg_size = 0;
     239             : 
     240           0 :         while (len && *nsegs < max_segs) {
     241           0 :                 seg_size = get_max_segment_size(lim, bv->bv_page,
     242           0 :                                                 bv->bv_offset + total_len);
     243           0 :                 seg_size = min(seg_size, len);
     244             : 
     245           0 :                 (*nsegs)++;
     246           0 :                 total_len += seg_size;
     247           0 :                 len -= seg_size;
     248             : 
     249           0 :                 if ((bv->bv_offset + total_len) & lim->virt_boundary_mask)
     250             :                         break;
     251             :         }
     252             : 
     253           0 :         *bytes += total_len;
     254             : 
     255             :         /* tell the caller to split the bvec if it is too big to fit */
     256           0 :         return len > 0 || bv->bv_len > max_len;
     257             : }
     258             : 
     259             : /**
     260             :  * bio_split_rw - split a bio in two bios
     261             :  * @bio:  [in] bio to be split
     262             :  * @lim:  [in] queue limits to split based on
     263             :  * @segs: [out] number of segments in the bio with the first half of the sectors
     264             :  * @bs:   [in] bio set to allocate the clone from
     265             :  * @max_bytes: [in] maximum number of bytes per bio
     266             :  *
     267             :  * Clone @bio, update the bi_iter of the clone to represent the first sectors
     268             :  * of @bio and update @bio->bi_iter to represent the remaining sectors. The
     269             :  * following is guaranteed for the cloned bio:
     270             :  * - That it has at most @max_bytes worth of data
     271             :  * - That it has at most queue_max_segments(@q) segments.
     272             :  *
     273             :  * Except for discard requests the cloned bio will point at the bi_io_vec of
     274             :  * the original bio. It is the responsibility of the caller to ensure that the
     275             :  * original bio is not freed before the cloned bio. The caller is also
     276             :  * responsible for ensuring that @bs is only destroyed after processing of the
     277             :  * split bio has finished.
     278             :  */
     279           0 : struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
     280             :                 unsigned *segs, struct bio_set *bs, unsigned max_bytes)
     281             : {
     282           0 :         struct bio_vec bv, bvprv, *bvprvp = NULL;
     283             :         struct bvec_iter iter;
     284           0 :         unsigned nsegs = 0, bytes = 0;
     285             : 
     286           0 :         bio_for_each_bvec(bv, bio, iter) {
     287             :                 /*
     288             :                  * If the queue doesn't support SG gaps and adding this
     289             :                  * offset would create a gap, disallow it.
     290             :                  */
     291           0 :                 if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv.bv_offset))
     292             :                         goto split;
     293             : 
     294           0 :                 if (nsegs < lim->max_segments &&
     295           0 :                     bytes + bv.bv_len <= max_bytes &&
     296           0 :                     bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
     297           0 :                         nsegs++;
     298           0 :                         bytes += bv.bv_len;
     299             :                 } else {
     300           0 :                         if (bvec_split_segs(lim, &bv, &nsegs, &bytes,
     301             :                                         lim->max_segments, max_bytes))
     302             :                                 goto split;
     303             :                 }
     304             : 
     305           0 :                 bvprv = bv;
     306           0 :                 bvprvp = &bvprv;
     307             :         }
     308             : 
     309           0 :         *segs = nsegs;
     310           0 :         return NULL;
     311             : split:
     312             :         /*
     313             :          * We can't sanely support splitting for a REQ_NOWAIT bio. End it
     314             :          * with EAGAIN if splitting is required and return an error pointer.
     315             :          */
     316           0 :         if (bio->bi_opf & REQ_NOWAIT) {
     317           0 :                 bio->bi_status = BLK_STS_AGAIN;
     318           0 :                 bio_endio(bio);
     319           0 :                 return ERR_PTR(-EAGAIN);
     320             :         }
     321             : 
     322           0 :         *segs = nsegs;
     323             : 
     324             :         /*
     325             :          * Individual bvecs might not be logical block aligned. Round down the
     326             :          * split size so that each bio is properly block size aligned, even if
     327             :          * we do not use the full hardware limits.
     328             :          */
     329           0 :         bytes = ALIGN_DOWN(bytes, lim->logical_block_size);
     330             : 
     331             :         /*
     332             :          * Bio splitting may cause subtle trouble such as hang when doing sync
     333             :          * iopoll in direct IO routine. Given performance gain of iopoll for
     334             :          * big IO can be trival, disable iopoll when split needed.
     335             :          */
     336           0 :         bio_clear_polled(bio);
     337           0 :         return bio_split(bio, bytes >> SECTOR_SHIFT, GFP_NOIO, bs);
     338             : }
     339             : EXPORT_SYMBOL_GPL(bio_split_rw);
     340             : 
     341             : /**
     342             :  * __bio_split_to_limits - split a bio to fit the queue limits
     343             :  * @bio:     bio to be split
     344             :  * @lim:     queue limits to split based on
     345             :  * @nr_segs: returns the number of segments in the returned bio
     346             :  *
     347             :  * Check if @bio needs splitting based on the queue limits, and if so split off
     348             :  * a bio fitting the limits from the beginning of @bio and return it.  @bio is
     349             :  * shortened to the remainder and re-submitted.
     350             :  *
     351             :  * The split bio is allocated from @q->bio_split, which is provided by the
     352             :  * block layer.
     353             :  */
     354           0 : struct bio *__bio_split_to_limits(struct bio *bio,
     355             :                                   const struct queue_limits *lim,
     356             :                                   unsigned int *nr_segs)
     357             : {
     358           0 :         struct bio_set *bs = &bio->bi_bdev->bd_disk->bio_split;
     359             :         struct bio *split;
     360             : 
     361           0 :         switch (bio_op(bio)) {
     362             :         case REQ_OP_DISCARD:
     363             :         case REQ_OP_SECURE_ERASE:
     364           0 :                 split = bio_split_discard(bio, lim, nr_segs, bs);
     365           0 :                 break;
     366             :         case REQ_OP_WRITE_ZEROES:
     367           0 :                 split = bio_split_write_zeroes(bio, lim, nr_segs, bs);
     368             :                 break;
     369             :         default:
     370           0 :                 split = bio_split_rw(bio, lim, nr_segs, bs,
     371           0 :                                 get_max_io_size(bio, lim) << SECTOR_SHIFT);
     372           0 :                 if (IS_ERR(split))
     373             :                         return NULL;
     374             :                 break;
     375             :         }
     376             : 
     377           0 :         if (split) {
     378             :                 /* there isn't chance to merge the split bio */
     379           0 :                 split->bi_opf |= REQ_NOMERGE;
     380             : 
     381           0 :                 blkcg_bio_issue_init(split);
     382           0 :                 bio_chain(split, bio);
     383           0 :                 trace_block_split(split, bio->bi_iter.bi_sector);
     384           0 :                 submit_bio_noacct(bio);
     385           0 :                 return split;
     386             :         }
     387             :         return bio;
     388             : }
     389             : 
     390             : /**
     391             :  * bio_split_to_limits - split a bio to fit the queue limits
     392             :  * @bio:     bio to be split
     393             :  *
     394             :  * Check if @bio needs splitting based on the queue limits of @bio->bi_bdev, and
     395             :  * if so split off a bio fitting the limits from the beginning of @bio and
     396             :  * return it.  @bio is shortened to the remainder and re-submitted.
     397             :  *
     398             :  * The split bio is allocated from @q->bio_split, which is provided by the
     399             :  * block layer.
     400             :  */
     401           0 : struct bio *bio_split_to_limits(struct bio *bio)
     402             : {
     403           0 :         const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
     404             :         unsigned int nr_segs;
     405             : 
     406           0 :         if (bio_may_exceed_limits(bio, lim))
     407           0 :                 return __bio_split_to_limits(bio, lim, &nr_segs);
     408             :         return bio;
     409             : }
     410             : EXPORT_SYMBOL(bio_split_to_limits);
     411             : 
     412           0 : unsigned int blk_recalc_rq_segments(struct request *rq)
     413             : {
     414           0 :         unsigned int nr_phys_segs = 0;
     415           0 :         unsigned int bytes = 0;
     416             :         struct req_iterator iter;
     417             :         struct bio_vec bv;
     418             : 
     419           0 :         if (!rq->bio)
     420             :                 return 0;
     421             : 
     422           0 :         switch (bio_op(rq->bio)) {
     423             :         case REQ_OP_DISCARD:
     424             :         case REQ_OP_SECURE_ERASE:
     425           0 :                 if (queue_max_discard_segments(rq->q) > 1) {
     426             :                         struct bio *bio = rq->bio;
     427             : 
     428           0 :                         for_each_bio(bio)
     429           0 :                                 nr_phys_segs++;
     430           0 :                         return nr_phys_segs;
     431             :                 }
     432             :                 return 1;
     433             :         case REQ_OP_WRITE_ZEROES:
     434             :                 return 0;
     435             :         default:
     436             :                 break;
     437             :         }
     438             : 
     439           0 :         rq_for_each_bvec(bv, rq, iter)
     440           0 :                 bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes,
     441             :                                 UINT_MAX, UINT_MAX);
     442           0 :         return nr_phys_segs;
     443             : }
     444             : 
     445             : static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
     446             :                 struct scatterlist *sglist)
     447             : {
     448           0 :         if (!*sg)
     449             :                 return sglist;
     450             : 
     451             :         /*
     452             :          * If the driver previously mapped a shorter list, we could see a
     453             :          * termination bit prematurely unless it fully inits the sg table
     454             :          * on each mapping. We KNOW that there must be more entries here
     455             :          * or the driver would be buggy, so force clear the termination bit
     456             :          * to avoid doing a full sg_init_table() in drivers for each command.
     457             :          */
     458           0 :         sg_unmark_end(*sg);
     459           0 :         return sg_next(*sg);
     460             : }
     461             : 
     462           0 : static unsigned blk_bvec_map_sg(struct request_queue *q,
     463             :                 struct bio_vec *bvec, struct scatterlist *sglist,
     464             :                 struct scatterlist **sg)
     465             : {
     466           0 :         unsigned nbytes = bvec->bv_len;
     467           0 :         unsigned nsegs = 0, total = 0;
     468             : 
     469           0 :         while (nbytes > 0) {
     470           0 :                 unsigned offset = bvec->bv_offset + total;
     471           0 :                 unsigned len = min(get_max_segment_size(&q->limits,
     472             :                                    bvec->bv_page, offset), nbytes);
     473           0 :                 struct page *page = bvec->bv_page;
     474             : 
     475             :                 /*
     476             :                  * Unfortunately a fair number of drivers barf on scatterlists
     477             :                  * that have an offset larger than PAGE_SIZE, despite other
     478             :                  * subsystems dealing with that invariant just fine.  For now
     479             :                  * stick to the legacy format where we never present those from
     480             :                  * the block layer, but the code below should be removed once
     481             :                  * these offenders (mostly MMC/SD drivers) are fixed.
     482             :                  */
     483           0 :                 page += (offset >> PAGE_SHIFT);
     484           0 :                 offset &= ~PAGE_MASK;
     485             : 
     486           0 :                 *sg = blk_next_sg(sg, sglist);
     487           0 :                 sg_set_page(*sg, page, len, offset);
     488             : 
     489           0 :                 total += len;
     490           0 :                 nbytes -= len;
     491           0 :                 nsegs++;
     492             :         }
     493             : 
     494           0 :         return nsegs;
     495             : }
     496             : 
     497           0 : static inline int __blk_bvec_map_sg(struct bio_vec bv,
     498             :                 struct scatterlist *sglist, struct scatterlist **sg)
     499             : {
     500           0 :         *sg = blk_next_sg(sg, sglist);
     501           0 :         sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
     502           0 :         return 1;
     503             : }
     504             : 
     505             : /* only try to merge bvecs into one sg if they are from two bios */
     506             : static inline bool
     507           0 : __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
     508             :                            struct bio_vec *bvprv, struct scatterlist **sg)
     509             : {
     510             : 
     511           0 :         int nbytes = bvec->bv_len;
     512             : 
     513           0 :         if (!*sg)
     514             :                 return false;
     515             : 
     516           0 :         if ((*sg)->length + nbytes > queue_max_segment_size(q))
     517             :                 return false;
     518             : 
     519           0 :         if (!biovec_phys_mergeable(q, bvprv, bvec))
     520             :                 return false;
     521             : 
     522           0 :         (*sg)->length += nbytes;
     523             : 
     524             :         return true;
     525             : }
     526             : 
     527           0 : static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
     528             :                              struct scatterlist *sglist,
     529             :                              struct scatterlist **sg)
     530             : {
     531           0 :         struct bio_vec bvec, bvprv = { NULL };
     532             :         struct bvec_iter iter;
     533           0 :         int nsegs = 0;
     534           0 :         bool new_bio = false;
     535             : 
     536           0 :         for_each_bio(bio) {
     537           0 :                 bio_for_each_bvec(bvec, bio, iter) {
     538             :                         /*
     539             :                          * Only try to merge bvecs from two bios given we
     540             :                          * have done bio internal merge when adding pages
     541             :                          * to bio
     542             :                          */
     543           0 :                         if (new_bio &&
     544           0 :                             __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
     545             :                                 goto next_bvec;
     546             : 
     547           0 :                         if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
     548           0 :                                 nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
     549             :                         else
     550           0 :                                 nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
     551             :  next_bvec:
     552           0 :                         new_bio = false;
     553             :                 }
     554           0 :                 if (likely(bio->bi_iter.bi_size)) {
     555           0 :                         bvprv = bvec;
     556           0 :                         new_bio = true;
     557             :                 }
     558             :         }
     559             : 
     560           0 :         return nsegs;
     561             : }
     562             : 
     563             : /*
     564             :  * map a request to scatterlist, return number of sg entries setup. Caller
     565             :  * must make sure sg can hold rq->nr_phys_segments entries
     566             :  */
     567           0 : int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
     568             :                 struct scatterlist *sglist, struct scatterlist **last_sg)
     569             : {
     570           0 :         int nsegs = 0;
     571             : 
     572           0 :         if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
     573           0 :                 nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
     574           0 :         else if (rq->bio)
     575           0 :                 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
     576             : 
     577           0 :         if (*last_sg)
     578           0 :                 sg_mark_end(*last_sg);
     579             : 
     580             :         /*
     581             :          * Something must have been wrong if the figured number of
     582             :          * segment is bigger than number of req's physical segments
     583             :          */
     584           0 :         WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
     585             : 
     586           0 :         return nsegs;
     587             : }
     588             : EXPORT_SYMBOL(__blk_rq_map_sg);
     589             : 
     590           0 : static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
     591             :                                                   sector_t offset)
     592             : {
     593           0 :         struct request_queue *q = rq->q;
     594             :         unsigned int max_sectors;
     595             : 
     596           0 :         if (blk_rq_is_passthrough(rq))
     597           0 :                 return q->limits.max_hw_sectors;
     598             : 
     599           0 :         max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
     600           0 :         if (!q->limits.chunk_sectors ||
     601           0 :             req_op(rq) == REQ_OP_DISCARD ||
     602           0 :             req_op(rq) == REQ_OP_SECURE_ERASE)
     603             :                 return max_sectors;
     604           0 :         return min(max_sectors,
     605             :                    blk_chunk_sectors_left(offset, q->limits.chunk_sectors));
     606             : }
     607             : 
     608             : static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
     609             :                 unsigned int nr_phys_segs)
     610             : {
     611           0 :         if (!blk_cgroup_mergeable(req, bio))
     612             :                 goto no_merge;
     613             : 
     614           0 :         if (blk_integrity_merge_bio(req->q, req, bio) == false)
     615             :                 goto no_merge;
     616             : 
     617             :         /* discard request merge won't add new segment */
     618           0 :         if (req_op(req) == REQ_OP_DISCARD)
     619             :                 return 1;
     620             : 
     621           0 :         if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
     622             :                 goto no_merge;
     623             : 
     624             :         /*
     625             :          * This will form the start of a new hw segment.  Bump both
     626             :          * counters.
     627             :          */
     628           0 :         req->nr_phys_segments += nr_phys_segs;
     629             :         return 1;
     630             : 
     631             : no_merge:
     632           0 :         req_set_nomerge(req->q, req);
     633             :         return 0;
     634             : }
     635             : 
     636           0 : int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
     637             : {
     638           0 :         if (req_gap_back_merge(req, bio))
     639             :                 return 0;
     640           0 :         if (blk_integrity_rq(req) &&
     641             :             integrity_req_gap_back_merge(req, bio))
     642             :                 return 0;
     643           0 :         if (!bio_crypt_ctx_back_mergeable(req, bio))
     644             :                 return 0;
     645           0 :         if (blk_rq_sectors(req) + bio_sectors(bio) >
     646           0 :             blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
     647           0 :                 req_set_nomerge(req->q, req);
     648             :                 return 0;
     649             :         }
     650             : 
     651           0 :         return ll_new_hw_segment(req, bio, nr_segs);
     652             : }
     653             : 
     654           0 : static int ll_front_merge_fn(struct request *req, struct bio *bio,
     655             :                 unsigned int nr_segs)
     656             : {
     657           0 :         if (req_gap_front_merge(req, bio))
     658             :                 return 0;
     659           0 :         if (blk_integrity_rq(req) &&
     660             :             integrity_req_gap_front_merge(req, bio))
     661             :                 return 0;
     662           0 :         if (!bio_crypt_ctx_front_mergeable(req, bio))
     663             :                 return 0;
     664           0 :         if (blk_rq_sectors(req) + bio_sectors(bio) >
     665           0 :             blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
     666           0 :                 req_set_nomerge(req->q, req);
     667             :                 return 0;
     668             :         }
     669             : 
     670           0 :         return ll_new_hw_segment(req, bio, nr_segs);
     671             : }
     672             : 
     673           0 : static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
     674             :                 struct request *next)
     675             : {
     676           0 :         unsigned short segments = blk_rq_nr_discard_segments(req);
     677             : 
     678           0 :         if (segments >= queue_max_discard_segments(q))
     679             :                 goto no_merge;
     680           0 :         if (blk_rq_sectors(req) + bio_sectors(next->bio) >
     681           0 :             blk_rq_get_max_sectors(req, blk_rq_pos(req)))
     682             :                 goto no_merge;
     683             : 
     684           0 :         req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
     685           0 :         return true;
     686             : no_merge:
     687           0 :         req_set_nomerge(q, req);
     688             :         return false;
     689             : }
     690             : 
     691           0 : static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
     692             :                                 struct request *next)
     693             : {
     694             :         int total_phys_segments;
     695             : 
     696           0 :         if (req_gap_back_merge(req, next->bio))
     697             :                 return 0;
     698             : 
     699             :         /*
     700             :          * Will it become too large?
     701             :          */
     702           0 :         if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
     703           0 :             blk_rq_get_max_sectors(req, blk_rq_pos(req)))
     704             :                 return 0;
     705             : 
     706           0 :         total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
     707           0 :         if (total_phys_segments > blk_rq_get_max_segments(req))
     708             :                 return 0;
     709             : 
     710           0 :         if (!blk_cgroup_mergeable(req, next->bio))
     711             :                 return 0;
     712             : 
     713           0 :         if (blk_integrity_merge_rq(q, req, next) == false)
     714             :                 return 0;
     715             : 
     716           0 :         if (!bio_crypt_ctx_merge_rq(req, next))
     717             :                 return 0;
     718             : 
     719             :         /* Merge is OK... */
     720           0 :         req->nr_phys_segments = total_phys_segments;
     721             :         return 1;
     722             : }
     723             : 
     724             : /**
     725             :  * blk_rq_set_mixed_merge - mark a request as mixed merge
     726             :  * @rq: request to mark as mixed merge
     727             :  *
     728             :  * Description:
     729             :  *     @rq is about to be mixed merged.  Make sure the attributes
     730             :  *     which can be mixed are set in each bio and mark @rq as mixed
     731             :  *     merged.
     732             :  */
     733           0 : void blk_rq_set_mixed_merge(struct request *rq)
     734             : {
     735           0 :         blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
     736             :         struct bio *bio;
     737             : 
     738           0 :         if (rq->rq_flags & RQF_MIXED_MERGE)
     739             :                 return;
     740             : 
     741             :         /*
     742             :          * @rq will no longer represent mixable attributes for all the
     743             :          * contained bios.  It will just track those of the first one.
     744             :          * Distributes the attributs to each bio.
     745             :          */
     746           0 :         for (bio = rq->bio; bio; bio = bio->bi_next) {
     747           0 :                 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
     748             :                              (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
     749           0 :                 bio->bi_opf |= ff;
     750             :         }
     751           0 :         rq->rq_flags |= RQF_MIXED_MERGE;
     752             : }
     753             : 
     754             : static inline blk_opf_t bio_failfast(const struct bio *bio)
     755             : {
     756           0 :         if (bio->bi_opf & REQ_RAHEAD)
     757             :                 return REQ_FAILFAST_MASK;
     758             : 
     759           0 :         return bio->bi_opf & REQ_FAILFAST_MASK;
     760             : }
     761             : 
     762             : /*
     763             :  * After we are marked as MIXED_MERGE, any new RA bio has to be updated
     764             :  * as failfast, and request's failfast has to be updated in case of
     765             :  * front merge.
     766             :  */
     767             : static inline void blk_update_mixed_merge(struct request *req,
     768             :                 struct bio *bio, bool front_merge)
     769             : {
     770           0 :         if (req->rq_flags & RQF_MIXED_MERGE) {
     771           0 :                 if (bio->bi_opf & REQ_RAHEAD)
     772           0 :                         bio->bi_opf |= REQ_FAILFAST_MASK;
     773             : 
     774             :                 if (front_merge) {
     775           0 :                         req->cmd_flags &= ~REQ_FAILFAST_MASK;
     776           0 :                         req->cmd_flags |= bio->bi_opf & REQ_FAILFAST_MASK;
     777             :                 }
     778             :         }
     779             : }
     780             : 
     781           0 : static void blk_account_io_merge_request(struct request *req)
     782             : {
     783           0 :         if (blk_do_io_stat(req)) {
     784           0 :                 part_stat_lock();
     785           0 :                 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
     786           0 :                 part_stat_unlock();
     787             :         }
     788           0 : }
     789             : 
     790             : static enum elv_merge blk_try_req_merge(struct request *req,
     791             :                                         struct request *next)
     792             : {
     793           0 :         if (blk_discard_mergable(req))
     794             :                 return ELEVATOR_DISCARD_MERGE;
     795           0 :         else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
     796             :                 return ELEVATOR_BACK_MERGE;
     797             : 
     798             :         return ELEVATOR_NO_MERGE;
     799             : }
     800             : 
     801             : /*
     802             :  * For non-mq, this has to be called with the request spinlock acquired.
     803             :  * For mq with scheduling, the appropriate queue wide lock should be held.
     804             :  */
     805           0 : static struct request *attempt_merge(struct request_queue *q,
     806             :                                      struct request *req, struct request *next)
     807             : {
     808           0 :         if (!rq_mergeable(req) || !rq_mergeable(next))
     809             :                 return NULL;
     810             : 
     811           0 :         if (req_op(req) != req_op(next))
     812             :                 return NULL;
     813             : 
     814           0 :         if (rq_data_dir(req) != rq_data_dir(next))
     815             :                 return NULL;
     816             : 
     817           0 :         if (req->ioprio != next->ioprio)
     818             :                 return NULL;
     819             : 
     820             :         /*
     821             :          * If we are allowed to merge, then append bio list
     822             :          * from next to rq and release next. merge_requests_fn
     823             :          * will have updated segment counts, update sector
     824             :          * counts here. Handle DISCARDs separately, as they
     825             :          * have separate settings.
     826             :          */
     827             : 
     828           0 :         switch (blk_try_req_merge(req, next)) {
     829             :         case ELEVATOR_DISCARD_MERGE:
     830           0 :                 if (!req_attempt_discard_merge(q, req, next))
     831             :                         return NULL;
     832             :                 break;
     833             :         case ELEVATOR_BACK_MERGE:
     834           0 :                 if (!ll_merge_requests_fn(q, req, next))
     835             :                         return NULL;
     836             :                 break;
     837             :         default:
     838             :                 return NULL;
     839             :         }
     840             : 
     841             :         /*
     842             :          * If failfast settings disagree or any of the two is already
     843             :          * a mixed merge, mark both as mixed before proceeding.  This
     844             :          * makes sure that all involved bios have mixable attributes
     845             :          * set properly.
     846             :          */
     847           0 :         if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
     848           0 :             (req->cmd_flags & REQ_FAILFAST_MASK) !=
     849           0 :             (next->cmd_flags & REQ_FAILFAST_MASK)) {
     850           0 :                 blk_rq_set_mixed_merge(req);
     851           0 :                 blk_rq_set_mixed_merge(next);
     852             :         }
     853             : 
     854             :         /*
     855             :          * At this point we have either done a back merge or front merge. We
     856             :          * need the smaller start_time_ns of the merged requests to be the
     857             :          * current request for accounting purposes.
     858             :          */
     859           0 :         if (next->start_time_ns < req->start_time_ns)
     860           0 :                 req->start_time_ns = next->start_time_ns;
     861             : 
     862           0 :         req->biotail->bi_next = next->bio;
     863           0 :         req->biotail = next->biotail;
     864             : 
     865           0 :         req->__data_len += blk_rq_bytes(next);
     866             : 
     867           0 :         if (!blk_discard_mergable(req))
     868           0 :                 elv_merge_requests(q, req, next);
     869             : 
     870             :         /*
     871             :          * 'next' is going away, so update stats accordingly
     872             :          */
     873           0 :         blk_account_io_merge_request(next);
     874             : 
     875           0 :         trace_block_rq_merge(next);
     876             : 
     877             :         /*
     878             :          * ownership of bio passed from next to req, return 'next' for
     879             :          * the caller to free
     880             :          */
     881           0 :         next->bio = NULL;
     882           0 :         return next;
     883             : }
     884             : 
     885           0 : static struct request *attempt_back_merge(struct request_queue *q,
     886             :                 struct request *rq)
     887             : {
     888           0 :         struct request *next = elv_latter_request(q, rq);
     889             : 
     890           0 :         if (next)
     891           0 :                 return attempt_merge(q, rq, next);
     892             : 
     893             :         return NULL;
     894             : }
     895             : 
     896           0 : static struct request *attempt_front_merge(struct request_queue *q,
     897             :                 struct request *rq)
     898             : {
     899           0 :         struct request *prev = elv_former_request(q, rq);
     900             : 
     901           0 :         if (prev)
     902           0 :                 return attempt_merge(q, prev, rq);
     903             : 
     904             :         return NULL;
     905             : }
     906             : 
     907             : /*
     908             :  * Try to merge 'next' into 'rq'. Return true if the merge happened, false
     909             :  * otherwise. The caller is responsible for freeing 'next' if the merge
     910             :  * happened.
     911             :  */
     912           0 : bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
     913             :                            struct request *next)
     914             : {
     915           0 :         return attempt_merge(q, rq, next);
     916             : }
     917             : 
     918           0 : bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
     919             : {
     920           0 :         if (!rq_mergeable(rq) || !bio_mergeable(bio))
     921             :                 return false;
     922             : 
     923           0 :         if (req_op(rq) != bio_op(bio))
     924             :                 return false;
     925             : 
     926             :         /* different data direction or already started, don't merge */
     927           0 :         if (bio_data_dir(bio) != rq_data_dir(rq))
     928             :                 return false;
     929             : 
     930             :         /* don't merge across cgroup boundaries */
     931           0 :         if (!blk_cgroup_mergeable(rq, bio))
     932             :                 return false;
     933             : 
     934             :         /* only merge integrity protected bio into ditto rq */
     935           0 :         if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
     936             :                 return false;
     937             : 
     938             :         /* Only merge if the crypt contexts are compatible */
     939           0 :         if (!bio_crypt_rq_ctx_compatible(rq, bio))
     940             :                 return false;
     941             : 
     942           0 :         if (rq->ioprio != bio_prio(bio))
     943             :                 return false;
     944             : 
     945           0 :         return true;
     946             : }
     947             : 
     948           0 : enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
     949             : {
     950           0 :         if (blk_discard_mergable(rq))
     951             :                 return ELEVATOR_DISCARD_MERGE;
     952           0 :         else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
     953             :                 return ELEVATOR_BACK_MERGE;
     954           0 :         else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
     955             :                 return ELEVATOR_FRONT_MERGE;
     956           0 :         return ELEVATOR_NO_MERGE;
     957             : }
     958             : 
     959           0 : static void blk_account_io_merge_bio(struct request *req)
     960             : {
     961           0 :         if (!blk_do_io_stat(req))
     962             :                 return;
     963             : 
     964           0 :         part_stat_lock();
     965           0 :         part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
     966           0 :         part_stat_unlock();
     967             : }
     968             : 
     969             : enum bio_merge_status {
     970             :         BIO_MERGE_OK,
     971             :         BIO_MERGE_NONE,
     972             :         BIO_MERGE_FAILED,
     973             : };
     974             : 
     975           0 : static enum bio_merge_status bio_attempt_back_merge(struct request *req,
     976             :                 struct bio *bio, unsigned int nr_segs)
     977             : {
     978           0 :         const blk_opf_t ff = bio_failfast(bio);
     979             : 
     980           0 :         if (!ll_back_merge_fn(req, bio, nr_segs))
     981             :                 return BIO_MERGE_FAILED;
     982             : 
     983           0 :         trace_block_bio_backmerge(bio);
     984           0 :         rq_qos_merge(req->q, req, bio);
     985             : 
     986           0 :         if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
     987           0 :                 blk_rq_set_mixed_merge(req);
     988             : 
     989           0 :         blk_update_mixed_merge(req, bio, false);
     990             : 
     991           0 :         req->biotail->bi_next = bio;
     992           0 :         req->biotail = bio;
     993           0 :         req->__data_len += bio->bi_iter.bi_size;
     994             : 
     995           0 :         bio_crypt_free_ctx(bio);
     996             : 
     997           0 :         blk_account_io_merge_bio(req);
     998           0 :         return BIO_MERGE_OK;
     999             : }
    1000             : 
    1001           0 : static enum bio_merge_status bio_attempt_front_merge(struct request *req,
    1002             :                 struct bio *bio, unsigned int nr_segs)
    1003             : {
    1004           0 :         const blk_opf_t ff = bio_failfast(bio);
    1005             : 
    1006           0 :         if (!ll_front_merge_fn(req, bio, nr_segs))
    1007             :                 return BIO_MERGE_FAILED;
    1008             : 
    1009           0 :         trace_block_bio_frontmerge(bio);
    1010           0 :         rq_qos_merge(req->q, req, bio);
    1011             : 
    1012           0 :         if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
    1013           0 :                 blk_rq_set_mixed_merge(req);
    1014             : 
    1015           0 :         blk_update_mixed_merge(req, bio, true);
    1016             : 
    1017           0 :         bio->bi_next = req->bio;
    1018           0 :         req->bio = bio;
    1019             : 
    1020           0 :         req->__sector = bio->bi_iter.bi_sector;
    1021           0 :         req->__data_len += bio->bi_iter.bi_size;
    1022             : 
    1023           0 :         bio_crypt_do_front_merge(req, bio);
    1024             : 
    1025           0 :         blk_account_io_merge_bio(req);
    1026           0 :         return BIO_MERGE_OK;
    1027             : }
    1028             : 
    1029           0 : static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
    1030             :                 struct request *req, struct bio *bio)
    1031             : {
    1032           0 :         unsigned short segments = blk_rq_nr_discard_segments(req);
    1033             : 
    1034           0 :         if (segments >= queue_max_discard_segments(q))
    1035             :                 goto no_merge;
    1036           0 :         if (blk_rq_sectors(req) + bio_sectors(bio) >
    1037           0 :             blk_rq_get_max_sectors(req, blk_rq_pos(req)))
    1038             :                 goto no_merge;
    1039             : 
    1040           0 :         rq_qos_merge(q, req, bio);
    1041             : 
    1042           0 :         req->biotail->bi_next = bio;
    1043           0 :         req->biotail = bio;
    1044           0 :         req->__data_len += bio->bi_iter.bi_size;
    1045           0 :         req->nr_phys_segments = segments + 1;
    1046             : 
    1047           0 :         blk_account_io_merge_bio(req);
    1048           0 :         return BIO_MERGE_OK;
    1049             : no_merge:
    1050           0 :         req_set_nomerge(q, req);
    1051             :         return BIO_MERGE_FAILED;
    1052             : }
    1053             : 
    1054           0 : static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
    1055             :                                                    struct request *rq,
    1056             :                                                    struct bio *bio,
    1057             :                                                    unsigned int nr_segs,
    1058             :                                                    bool sched_allow_merge)
    1059             : {
    1060           0 :         if (!blk_rq_merge_ok(rq, bio))
    1061             :                 return BIO_MERGE_NONE;
    1062             : 
    1063           0 :         switch (blk_try_merge(rq, bio)) {
    1064             :         case ELEVATOR_BACK_MERGE:
    1065           0 :                 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
    1066           0 :                         return bio_attempt_back_merge(rq, bio, nr_segs);
    1067             :                 break;
    1068             :         case ELEVATOR_FRONT_MERGE:
    1069           0 :                 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
    1070           0 :                         return bio_attempt_front_merge(rq, bio, nr_segs);
    1071             :                 break;
    1072             :         case ELEVATOR_DISCARD_MERGE:
    1073           0 :                 return bio_attempt_discard_merge(q, rq, bio);
    1074             :         default:
    1075             :                 return BIO_MERGE_NONE;
    1076             :         }
    1077             : 
    1078             :         return BIO_MERGE_FAILED;
    1079             : }
    1080             : 
    1081             : /**
    1082             :  * blk_attempt_plug_merge - try to merge with %current's plugged list
    1083             :  * @q: request_queue new bio is being queued at
    1084             :  * @bio: new bio being queued
    1085             :  * @nr_segs: number of segments in @bio
    1086             :  * from the passed in @q already in the plug list
    1087             :  *
    1088             :  * Determine whether @bio being queued on @q can be merged with the previous
    1089             :  * request on %current's plugged list.  Returns %true if merge was successful,
    1090             :  * otherwise %false.
    1091             :  *
    1092             :  * Plugging coalesces IOs from the same issuer for the same purpose without
    1093             :  * going through @q->queue_lock.  As such it's more of an issuing mechanism
    1094             :  * than scheduling, and the request, while may have elvpriv data, is not
    1095             :  * added on the elevator at this point.  In addition, we don't have
    1096             :  * reliable access to the elevator outside queue lock.  Only check basic
    1097             :  * merging parameters without querying the elevator.
    1098             :  *
    1099             :  * Caller must ensure !blk_queue_nomerges(q) beforehand.
    1100             :  */
    1101           0 : bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
    1102             :                 unsigned int nr_segs)
    1103             : {
    1104             :         struct blk_plug *plug;
    1105             :         struct request *rq;
    1106             : 
    1107           0 :         plug = blk_mq_plug(bio);
    1108           0 :         if (!plug || rq_list_empty(plug->mq_list))
    1109             :                 return false;
    1110             : 
    1111           0 :         rq_list_for_each(&plug->mq_list, rq) {
    1112           0 :                 if (rq->q == q) {
    1113           0 :                         if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
    1114             :                             BIO_MERGE_OK)
    1115             :                                 return true;
    1116             :                         break;
    1117             :                 }
    1118             : 
    1119             :                 /*
    1120             :                  * Only keep iterating plug list for merges if we have multiple
    1121             :                  * queues
    1122             :                  */
    1123           0 :                 if (!plug->multiple_queues)
    1124             :                         break;
    1125             :         }
    1126             :         return false;
    1127             : }
    1128             : 
    1129             : /*
    1130             :  * Iterate list of requests and see if we can merge this bio with any
    1131             :  * of them.
    1132             :  */
    1133           0 : bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
    1134             :                         struct bio *bio, unsigned int nr_segs)
    1135             : {
    1136             :         struct request *rq;
    1137           0 :         int checked = 8;
    1138             : 
    1139           0 :         list_for_each_entry_reverse(rq, list, queuelist) {
    1140           0 :                 if (!checked--)
    1141             :                         break;
    1142             : 
    1143           0 :                 switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
    1144             :                 case BIO_MERGE_NONE:
    1145           0 :                         continue;
    1146             :                 case BIO_MERGE_OK:
    1147             :                         return true;
    1148             :                 case BIO_MERGE_FAILED:
    1149           0 :                         return false;
    1150             :                 }
    1151             : 
    1152             :         }
    1153             : 
    1154             :         return false;
    1155             : }
    1156             : EXPORT_SYMBOL_GPL(blk_bio_list_merge);
    1157             : 
    1158           0 : bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
    1159             :                 unsigned int nr_segs, struct request **merged_request)
    1160             : {
    1161             :         struct request *rq;
    1162             : 
    1163           0 :         switch (elv_merge(q, &rq, bio)) {
    1164             :         case ELEVATOR_BACK_MERGE:
    1165           0 :                 if (!blk_mq_sched_allow_merge(q, rq, bio))
    1166             :                         return false;
    1167           0 :                 if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
    1168             :                         return false;
    1169           0 :                 *merged_request = attempt_back_merge(q, rq);
    1170           0 :                 if (!*merged_request)
    1171           0 :                         elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
    1172             :                 return true;
    1173             :         case ELEVATOR_FRONT_MERGE:
    1174           0 :                 if (!blk_mq_sched_allow_merge(q, rq, bio))
    1175             :                         return false;
    1176           0 :                 if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
    1177             :                         return false;
    1178           0 :                 *merged_request = attempt_front_merge(q, rq);
    1179           0 :                 if (!*merged_request)
    1180           0 :                         elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
    1181             :                 return true;
    1182             :         case ELEVATOR_DISCARD_MERGE:
    1183           0 :                 return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
    1184             :         default:
    1185             :                 return false;
    1186             :         }
    1187             : }
    1188             : EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);

Generated by: LCOV version 1.14