LCOV - code coverage report
Current view: top level - include/linux - bio.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 1 67 1.5 %
Date: 2023-03-27 20:00:47 Functions: 0 3 0.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : /*
       3             :  * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
       4             :  */
       5             : #ifndef __LINUX_BIO_H
       6             : #define __LINUX_BIO_H
       7             : 
       8             : #include <linux/mempool.h>
       9             : /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
      10             : #include <linux/blk_types.h>
      11             : #include <linux/uio.h>
      12             : 
      13             : #define BIO_MAX_VECS            256U
      14             : 
      15             : struct queue_limits;
      16             : 
      17             : static inline unsigned int bio_max_segs(unsigned int nr_segs)
      18             : {
      19           0 :         return min(nr_segs, BIO_MAX_VECS);
      20             : }
      21             : 
      22             : #define bio_prio(bio)                   (bio)->bi_ioprio
      23             : #define bio_set_prio(bio, prio)         ((bio)->bi_ioprio = prio)
      24             : 
      25             : #define bio_iter_iovec(bio, iter)                               \
      26             :         bvec_iter_bvec((bio)->bi_io_vec, (iter))
      27             : 
      28             : #define bio_iter_page(bio, iter)                                \
      29             :         bvec_iter_page((bio)->bi_io_vec, (iter))
      30             : #define bio_iter_len(bio, iter)                                 \
      31             :         bvec_iter_len((bio)->bi_io_vec, (iter))
      32             : #define bio_iter_offset(bio, iter)                              \
      33             :         bvec_iter_offset((bio)->bi_io_vec, (iter))
      34             : 
      35             : #define bio_page(bio)           bio_iter_page((bio), (bio)->bi_iter)
      36             : #define bio_offset(bio)         bio_iter_offset((bio), (bio)->bi_iter)
      37             : #define bio_iovec(bio)          bio_iter_iovec((bio), (bio)->bi_iter)
      38             : 
      39             : #define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
      40             : #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
      41             : 
      42             : #define bio_sectors(bio)        bvec_iter_sectors((bio)->bi_iter)
      43             : #define bio_end_sector(bio)     bvec_iter_end_sector((bio)->bi_iter)
      44             : 
      45             : /*
      46             :  * Return the data direction, READ or WRITE.
      47             :  */
      48             : #define bio_data_dir(bio) \
      49             :         (op_is_write(bio_op(bio)) ? WRITE : READ)
      50             : 
      51             : /*
      52             :  * Check whether this bio carries any data or not. A NULL bio is allowed.
      53             :  */
      54             : static inline bool bio_has_data(struct bio *bio)
      55             : {
      56           0 :         if (bio &&
      57           0 :             bio->bi_iter.bi_size &&
      58           0 :             bio_op(bio) != REQ_OP_DISCARD &&
      59           0 :             bio_op(bio) != REQ_OP_SECURE_ERASE &&
      60           0 :             bio_op(bio) != REQ_OP_WRITE_ZEROES)
      61             :                 return true;
      62             : 
      63             :         return false;
      64             : }
      65             : 
      66             : static inline bool bio_no_advance_iter(const struct bio *bio)
      67             : {
      68           0 :         return bio_op(bio) == REQ_OP_DISCARD ||
      69           0 :                bio_op(bio) == REQ_OP_SECURE_ERASE ||
      70           0 :                bio_op(bio) == REQ_OP_WRITE_ZEROES;
      71             : }
      72             : 
      73             : static inline void *bio_data(struct bio *bio)
      74             : {
      75             :         if (bio_has_data(bio))
      76             :                 return page_address(bio_page(bio)) + bio_offset(bio);
      77             : 
      78             :         return NULL;
      79             : }
      80             : 
      81             : static inline bool bio_next_segment(const struct bio *bio,
      82             :                                     struct bvec_iter_all *iter)
      83             : {
      84           0 :         if (iter->idx >= bio->bi_vcnt)
      85             :                 return false;
      86             : 
      87           0 :         bvec_advance(&bio->bi_io_vec[iter->idx], iter);
      88             :         return true;
      89             : }
      90             : 
      91             : /*
      92             :  * drivers should _never_ use the all version - the bio may have been split
      93             :  * before it got to the driver and the driver won't own all of it
      94             :  */
      95             : #define bio_for_each_segment_all(bvl, bio, iter) \
      96             :         for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
      97             : 
      98           0 : static inline void bio_advance_iter(const struct bio *bio,
      99             :                                     struct bvec_iter *iter, unsigned int bytes)
     100             : {
     101           0 :         iter->bi_sector += bytes >> 9;
     102             : 
     103           0 :         if (bio_no_advance_iter(bio))
     104           0 :                 iter->bi_size -= bytes;
     105             :         else
     106           0 :                 bvec_iter_advance(bio->bi_io_vec, iter, bytes);
     107             :                 /* TODO: It is reasonable to complete bio with error here. */
     108           0 : }
     109             : 
     110             : /* @bytes should be less or equal to bvec[i->bi_idx].bv_len */
     111           0 : static inline void bio_advance_iter_single(const struct bio *bio,
     112             :                                            struct bvec_iter *iter,
     113             :                                            unsigned int bytes)
     114             : {
     115           0 :         iter->bi_sector += bytes >> 9;
     116             : 
     117           0 :         if (bio_no_advance_iter(bio))
     118           0 :                 iter->bi_size -= bytes;
     119             :         else
     120           0 :                 bvec_iter_advance_single(bio->bi_io_vec, iter, bytes);
     121           0 : }
     122             : 
     123             : void __bio_advance(struct bio *, unsigned bytes);
     124             : 
     125             : /**
     126             :  * bio_advance - increment/complete a bio by some number of bytes
     127             :  * @bio:        bio to advance
     128             :  * @nbytes:     number of bytes to complete
     129             :  *
     130             :  * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
     131             :  * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
     132             :  * be updated on the last bvec as well.
     133             :  *
     134             :  * @bio will then represent the remaining, uncompleted portion of the io.
     135             :  */
     136             : static inline void bio_advance(struct bio *bio, unsigned int nbytes)
     137             : {
     138           0 :         if (nbytes == bio->bi_iter.bi_size) {
     139           0 :                 bio->bi_iter.bi_size = 0;
     140             :                 return;
     141             :         }
     142           0 :         __bio_advance(bio, nbytes);
     143             : }
     144             : 
     145             : #define __bio_for_each_segment(bvl, bio, iter, start)                   \
     146             :         for (iter = (start);                                            \
     147             :              (iter).bi_size &&                                          \
     148             :                 ((bvl = bio_iter_iovec((bio), (iter))), 1);             \
     149             :              bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
     150             : 
     151             : #define bio_for_each_segment(bvl, bio, iter)                            \
     152             :         __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
     153             : 
     154             : #define __bio_for_each_bvec(bvl, bio, iter, start)              \
     155             :         for (iter = (start);                                            \
     156             :              (iter).bi_size &&                                          \
     157             :                 ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
     158             :              bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
     159             : 
     160             : /* iterate over multi-page bvec */
     161             : #define bio_for_each_bvec(bvl, bio, iter)                       \
     162             :         __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
     163             : 
     164             : /*
     165             :  * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the
     166             :  * same reasons as bio_for_each_segment_all().
     167             :  */
     168             : #define bio_for_each_bvec_all(bvl, bio, i)              \
     169             :         for (i = 0, bvl = bio_first_bvec_all(bio);      \
     170             :              i < (bio)->bi_vcnt; i++, bvl++)
     171             : 
     172             : #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
     173             : 
     174             : static inline unsigned bio_segments(struct bio *bio)
     175             : {
     176             :         unsigned segs = 0;
     177             :         struct bio_vec bv;
     178             :         struct bvec_iter iter;
     179             : 
     180             :         /*
     181             :          * We special case discard/write same/write zeroes, because they
     182             :          * interpret bi_size differently:
     183             :          */
     184             : 
     185             :         switch (bio_op(bio)) {
     186             :         case REQ_OP_DISCARD:
     187             :         case REQ_OP_SECURE_ERASE:
     188             :         case REQ_OP_WRITE_ZEROES:
     189             :                 return 0;
     190             :         default:
     191             :                 break;
     192             :         }
     193             : 
     194             :         bio_for_each_segment(bv, bio, iter)
     195             :                 segs++;
     196             : 
     197             :         return segs;
     198             : }
     199             : 
     200             : /*
     201             :  * get a reference to a bio, so it won't disappear. the intended use is
     202             :  * something like:
     203             :  *
     204             :  * bio_get(bio);
     205             :  * submit_bio(rw, bio);
     206             :  * if (bio->bi_flags ...)
     207             :  *      do_something
     208             :  * bio_put(bio);
     209             :  *
     210             :  * without the bio_get(), it could potentially complete I/O before submit_bio
     211             :  * returns. and then bio would be freed memory when if (bio->bi_flags ...)
     212             :  * runs
     213             :  */
     214             : static inline void bio_get(struct bio *bio)
     215             : {
     216           0 :         bio->bi_flags |= (1 << BIO_REFFED);
     217           0 :         smp_mb__before_atomic();
     218           0 :         atomic_inc(&bio->__bi_cnt);
     219             : }
     220             : 
     221             : static inline void bio_cnt_set(struct bio *bio, unsigned int count)
     222             : {
     223             :         if (count != 1) {
     224             :                 bio->bi_flags |= (1 << BIO_REFFED);
     225             :                 smp_mb();
     226             :         }
     227             :         atomic_set(&bio->__bi_cnt, count);
     228             : }
     229             : 
     230             : static inline bool bio_flagged(struct bio *bio, unsigned int bit)
     231             : {
     232           0 :         return (bio->bi_flags & (1U << bit)) != 0;
     233             : }
     234             : 
     235             : static inline void bio_set_flag(struct bio *bio, unsigned int bit)
     236             : {
     237           0 :         bio->bi_flags |= (1U << bit);
     238             : }
     239             : 
     240             : static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
     241             : {
     242           0 :         bio->bi_flags &= ~(1U << bit);
     243             : }
     244             : 
     245           0 : static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
     246             : {
     247           0 :         WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
     248           0 :         return bio->bi_io_vec;
     249             : }
     250             : 
     251             : static inline struct page *bio_first_page_all(struct bio *bio)
     252             : {
     253           0 :         return bio_first_bvec_all(bio)->bv_page;
     254             : }
     255             : 
     256             : static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
     257             : {
     258             :         WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
     259             :         return &bio->bi_io_vec[bio->bi_vcnt - 1];
     260             : }
     261             : 
     262             : /**
     263             :  * struct folio_iter - State for iterating all folios in a bio.
     264             :  * @folio: The current folio we're iterating.  NULL after the last folio.
     265             :  * @offset: The byte offset within the current folio.
     266             :  * @length: The number of bytes in this iteration (will not cross folio
     267             :  *      boundary).
     268             :  */
     269             : struct folio_iter {
     270             :         struct folio *folio;
     271             :         size_t offset;
     272             :         size_t length;
     273             :         /* private: for use by the iterator */
     274             :         struct folio *_next;
     275             :         size_t _seg_count;
     276             :         int _i;
     277             : };
     278             : 
     279             : static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
     280             :                                    int i)
     281             : {
     282             :         struct bio_vec *bvec = bio_first_bvec_all(bio) + i;
     283             : 
     284             :         fi->folio = page_folio(bvec->bv_page);
     285             :         fi->offset = bvec->bv_offset +
     286             :                         PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
     287             :         fi->_seg_count = bvec->bv_len;
     288             :         fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count);
     289             :         fi->_next = folio_next(fi->folio);
     290             :         fi->_i = i;
     291             : }
     292             : 
     293             : static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
     294             : {
     295             :         fi->_seg_count -= fi->length;
     296             :         if (fi->_seg_count) {
     297             :                 fi->folio = fi->_next;
     298             :                 fi->offset = 0;
     299             :                 fi->length = min(folio_size(fi->folio), fi->_seg_count);
     300             :                 fi->_next = folio_next(fi->folio);
     301             :         } else if (fi->_i + 1 < bio->bi_vcnt) {
     302             :                 bio_first_folio(fi, bio, fi->_i + 1);
     303             :         } else {
     304             :                 fi->folio = NULL;
     305             :         }
     306             : }
     307             : 
     308             : /**
     309             :  * bio_for_each_folio_all - Iterate over each folio in a bio.
     310             :  * @fi: struct folio_iter which is updated for each folio.
     311             :  * @bio: struct bio to iterate over.
     312             :  */
     313             : #define bio_for_each_folio_all(fi, bio)                         \
     314             :         for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio))
     315             : 
     316             : enum bip_flags {
     317             :         BIP_BLOCK_INTEGRITY     = 1 << 0, /* block layer owns integrity data */
     318             :         BIP_MAPPED_INTEGRITY    = 1 << 1, /* ref tag has been remapped */
     319             :         BIP_CTRL_NOCHECK        = 1 << 2, /* disable HBA integrity checking */
     320             :         BIP_DISK_NOCHECK        = 1 << 3, /* disable disk integrity checking */
     321             :         BIP_IP_CHECKSUM         = 1 << 4, /* IP checksum */
     322             : };
     323             : 
     324             : /*
     325             :  * bio integrity payload
     326             :  */
     327             : struct bio_integrity_payload {
     328             :         struct bio              *bip_bio;       /* parent bio */
     329             : 
     330             :         struct bvec_iter        bip_iter;
     331             : 
     332             :         unsigned short          bip_vcnt;       /* # of integrity bio_vecs */
     333             :         unsigned short          bip_max_vcnt;   /* integrity bio_vec slots */
     334             :         unsigned short          bip_flags;      /* control flags */
     335             : 
     336             :         struct bvec_iter        bio_iter;       /* for rewinding parent bio */
     337             : 
     338             :         struct work_struct      bip_work;       /* I/O completion */
     339             : 
     340             :         struct bio_vec          *bip_vec;
     341             :         struct bio_vec          bip_inline_vecs[];/* embedded bvec array */
     342             : };
     343             : 
     344             : #if defined(CONFIG_BLK_DEV_INTEGRITY)
     345             : 
     346             : static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
     347             : {
     348             :         if (bio->bi_opf & REQ_INTEGRITY)
     349             :                 return bio->bi_integrity;
     350             : 
     351             :         return NULL;
     352             : }
     353             : 
     354             : static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
     355             : {
     356             :         struct bio_integrity_payload *bip = bio_integrity(bio);
     357             : 
     358             :         if (bip)
     359             :                 return bip->bip_flags & flag;
     360             : 
     361             :         return false;
     362             : }
     363             : 
     364             : static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
     365             : {
     366             :         return bip->bip_iter.bi_sector;
     367             : }
     368             : 
     369             : static inline void bip_set_seed(struct bio_integrity_payload *bip,
     370             :                                 sector_t seed)
     371             : {
     372             :         bip->bip_iter.bi_sector = seed;
     373             : }
     374             : 
     375             : #endif /* CONFIG_BLK_DEV_INTEGRITY */
     376             : 
     377             : void bio_trim(struct bio *bio, sector_t offset, sector_t size);
     378             : extern struct bio *bio_split(struct bio *bio, int sectors,
     379             :                              gfp_t gfp, struct bio_set *bs);
     380             : struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
     381             :                 unsigned *segs, struct bio_set *bs, unsigned max_bytes);
     382             : 
     383             : /**
     384             :  * bio_next_split - get next @sectors from a bio, splitting if necessary
     385             :  * @bio:        bio to split
     386             :  * @sectors:    number of sectors to split from the front of @bio
     387             :  * @gfp:        gfp mask
     388             :  * @bs:         bio set to allocate from
     389             :  *
     390             :  * Return: a bio representing the next @sectors of @bio - if the bio is smaller
     391             :  * than @sectors, returns the original bio unchanged.
     392             :  */
     393             : static inline struct bio *bio_next_split(struct bio *bio, int sectors,
     394             :                                          gfp_t gfp, struct bio_set *bs)
     395             : {
     396             :         if (sectors >= bio_sectors(bio))
     397             :                 return bio;
     398             : 
     399             :         return bio_split(bio, sectors, gfp, bs);
     400             : }
     401             : 
     402             : enum {
     403             :         BIOSET_NEED_BVECS = BIT(0),
     404             :         BIOSET_NEED_RESCUER = BIT(1),
     405             :         BIOSET_PERCPU_CACHE = BIT(2),
     406             : };
     407             : extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
     408             : extern void bioset_exit(struct bio_set *);
     409             : extern int biovec_init_pool(mempool_t *pool, int pool_entries);
     410             : 
     411             : struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
     412             :                              blk_opf_t opf, gfp_t gfp_mask,
     413             :                              struct bio_set *bs);
     414             : struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask);
     415             : extern void bio_put(struct bio *);
     416             : 
     417             : struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
     418             :                 gfp_t gfp, struct bio_set *bs);
     419             : int bio_init_clone(struct block_device *bdev, struct bio *bio,
     420             :                 struct bio *bio_src, gfp_t gfp);
     421             : 
     422             : extern struct bio_set fs_bio_set;
     423             : 
     424             : static inline struct bio *bio_alloc(struct block_device *bdev,
     425             :                 unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask)
     426             : {
     427           0 :         return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set);
     428             : }
     429             : 
     430             : void submit_bio(struct bio *bio);
     431             : 
     432             : extern void bio_endio(struct bio *);
     433             : 
     434             : static inline void bio_io_error(struct bio *bio)
     435             : {
     436           0 :         bio->bi_status = BLK_STS_IOERR;
     437           0 :         bio_endio(bio);
     438             : }
     439             : 
     440             : static inline void bio_wouldblock_error(struct bio *bio)
     441             : {
     442           0 :         bio_set_flag(bio, BIO_QUIET);
     443           0 :         bio->bi_status = BLK_STS_AGAIN;
     444           0 :         bio_endio(bio);
     445             : }
     446             : 
     447             : /*
     448             :  * Calculate number of bvec segments that should be allocated to fit data
     449             :  * pointed by @iter. If @iter is backed by bvec it's going to be reused
     450             :  * instead of allocating a new one.
     451             :  */
     452             : static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
     453             : {
     454           0 :         if (iov_iter_is_bvec(iter))
     455             :                 return 0;
     456           0 :         return iov_iter_npages(iter, max_segs);
     457             : }
     458             : 
     459             : struct request_queue;
     460             : 
     461             : extern int submit_bio_wait(struct bio *bio);
     462             : void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
     463             :               unsigned short max_vecs, blk_opf_t opf);
     464             : extern void bio_uninit(struct bio *);
     465             : void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf);
     466             : void bio_chain(struct bio *, struct bio *);
     467             : 
     468             : int bio_add_page(struct bio *, struct page *, unsigned len, unsigned off);
     469             : bool bio_add_folio(struct bio *, struct folio *, size_t len, size_t off);
     470             : extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
     471             :                            unsigned int, unsigned int);
     472             : int bio_add_zone_append_page(struct bio *bio, struct page *page,
     473             :                              unsigned int len, unsigned int offset);
     474             : void __bio_add_page(struct bio *bio, struct page *page,
     475             :                 unsigned int len, unsigned int off);
     476             : int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
     477             : void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter);
     478             : void __bio_release_pages(struct bio *bio, bool mark_dirty);
     479             : extern void bio_set_pages_dirty(struct bio *bio);
     480             : extern void bio_check_pages_dirty(struct bio *bio);
     481             : 
     482             : extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
     483             :                                struct bio *src, struct bvec_iter *src_iter);
     484             : extern void bio_copy_data(struct bio *dst, struct bio *src);
     485             : extern void bio_free_pages(struct bio *bio);
     486             : void guard_bio_eod(struct bio *bio);
     487             : void zero_fill_bio(struct bio *bio);
     488             : 
     489             : static inline void bio_release_pages(struct bio *bio, bool mark_dirty)
     490             : {
     491           0 :         if (!bio_flagged(bio, BIO_NO_PAGE_REF))
     492           0 :                 __bio_release_pages(bio, mark_dirty);
     493             : }
     494             : 
     495             : #define bio_dev(bio) \
     496             :         disk_devt((bio)->bi_bdev->bd_disk)
     497             : 
     498             : #ifdef CONFIG_BLK_CGROUP
     499             : void bio_associate_blkg(struct bio *bio);
     500             : void bio_associate_blkg_from_css(struct bio *bio,
     501             :                                  struct cgroup_subsys_state *css);
     502             : void bio_clone_blkg_association(struct bio *dst, struct bio *src);
     503             : #else   /* CONFIG_BLK_CGROUP */
     504             : static inline void bio_associate_blkg(struct bio *bio) { }
     505             : static inline void bio_associate_blkg_from_css(struct bio *bio,
     506             :                                                struct cgroup_subsys_state *css)
     507             : { }
     508             : static inline void bio_clone_blkg_association(struct bio *dst,
     509             :                                               struct bio *src) { }
     510             : #endif  /* CONFIG_BLK_CGROUP */
     511             : 
     512             : static inline void bio_set_dev(struct bio *bio, struct block_device *bdev)
     513             : {
     514             :         bio_clear_flag(bio, BIO_REMAPPED);
     515             :         if (bio->bi_bdev != bdev)
     516             :                 bio_clear_flag(bio, BIO_BPS_THROTTLED);
     517             :         bio->bi_bdev = bdev;
     518             :         bio_associate_blkg(bio);
     519             : }
     520             : 
     521             : /*
     522             :  * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
     523             :  *
     524             :  * A bio_list anchors a singly-linked list of bios chained through the bi_next
     525             :  * member of the bio.  The bio_list also caches the last list member to allow
     526             :  * fast access to the tail.
     527             :  */
     528             : struct bio_list {
     529             :         struct bio *head;
     530             :         struct bio *tail;
     531             : };
     532             : 
     533             : static inline int bio_list_empty(const struct bio_list *bl)
     534             : {
     535             :         return bl->head == NULL;
     536             : }
     537             : 
     538             : static inline void bio_list_init(struct bio_list *bl)
     539             : {
     540           2 :         bl->head = bl->tail = NULL;
     541             : }
     542             : 
     543             : #define BIO_EMPTY_LIST  { NULL, NULL }
     544             : 
     545             : #define bio_list_for_each(bio, bl) \
     546             :         for (bio = (bl)->head; bio; bio = bio->bi_next)
     547             : 
     548             : static inline unsigned bio_list_size(const struct bio_list *bl)
     549             : {
     550             :         unsigned sz = 0;
     551             :         struct bio *bio;
     552             : 
     553             :         bio_list_for_each(bio, bl)
     554             :                 sz++;
     555             : 
     556             :         return sz;
     557             : }
     558             : 
     559             : static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
     560             : {
     561           0 :         bio->bi_next = NULL;
     562             : 
     563           0 :         if (bl->tail)
     564           0 :                 bl->tail->bi_next = bio;
     565             :         else
     566           0 :                 bl->head = bio;
     567             : 
     568           0 :         bl->tail = bio;
     569             : }
     570             : 
     571             : static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
     572             : {
     573             :         bio->bi_next = bl->head;
     574             : 
     575             :         bl->head = bio;
     576             : 
     577             :         if (!bl->tail)
     578             :                 bl->tail = bio;
     579             : }
     580             : 
     581             : static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
     582             : {
     583           0 :         if (!bl2->head)
     584             :                 return;
     585             : 
     586           0 :         if (bl->tail)
     587           0 :                 bl->tail->bi_next = bl2->head;
     588             :         else
     589           0 :                 bl->head = bl2->head;
     590             : 
     591           0 :         bl->tail = bl2->tail;
     592             : }
     593             : 
     594             : static inline void bio_list_merge_head(struct bio_list *bl,
     595             :                                        struct bio_list *bl2)
     596             : {
     597             :         if (!bl2->head)
     598             :                 return;
     599             : 
     600             :         if (bl->head)
     601             :                 bl2->tail->bi_next = bl->head;
     602             :         else
     603             :                 bl->tail = bl2->tail;
     604             : 
     605             :         bl->head = bl2->head;
     606             : }
     607             : 
     608             : static inline struct bio *bio_list_peek(struct bio_list *bl)
     609             : {
     610             :         return bl->head;
     611             : }
     612             : 
     613             : static inline struct bio *bio_list_pop(struct bio_list *bl)
     614             : {
     615           0 :         struct bio *bio = bl->head;
     616             : 
     617           0 :         if (bio) {
     618           0 :                 bl->head = bl->head->bi_next;
     619           0 :                 if (!bl->head)
     620           0 :                         bl->tail = NULL;
     621             : 
     622           0 :                 bio->bi_next = NULL;
     623             :         }
     624             : 
     625             :         return bio;
     626             : }
     627             : 
     628             : static inline struct bio *bio_list_get(struct bio_list *bl)
     629             : {
     630             :         struct bio *bio = bl->head;
     631             : 
     632             :         bl->head = bl->tail = NULL;
     633             : 
     634             :         return bio;
     635             : }
     636             : 
     637             : /*
     638             :  * Increment chain count for the bio. Make sure the CHAIN flag update
     639             :  * is visible before the raised count.
     640             :  */
     641             : static inline void bio_inc_remaining(struct bio *bio)
     642             : {
     643           0 :         bio_set_flag(bio, BIO_CHAIN);
     644           0 :         smp_mb__before_atomic();
     645           0 :         atomic_inc(&bio->__bi_remaining);
     646             : }
     647             : 
     648             : /*
     649             :  * bio_set is used to allow other portions of the IO system to
     650             :  * allocate their own private memory pools for bio and iovec structures.
     651             :  * These memory pools in turn all allocate from the bio_slab
     652             :  * and the bvec_slabs[].
     653             :  */
     654             : #define BIO_POOL_SIZE 2
     655             : 
     656             : struct bio_set {
     657             :         struct kmem_cache *bio_slab;
     658             :         unsigned int front_pad;
     659             : 
     660             :         /*
     661             :          * per-cpu bio alloc cache
     662             :          */
     663             :         struct bio_alloc_cache __percpu *cache;
     664             : 
     665             :         mempool_t bio_pool;
     666             :         mempool_t bvec_pool;
     667             : #if defined(CONFIG_BLK_DEV_INTEGRITY)
     668             :         mempool_t bio_integrity_pool;
     669             :         mempool_t bvec_integrity_pool;
     670             : #endif
     671             : 
     672             :         unsigned int back_pad;
     673             :         /*
     674             :          * Deadlock avoidance for stacking block drivers: see comments in
     675             :          * bio_alloc_bioset() for details
     676             :          */
     677             :         spinlock_t              rescue_lock;
     678             :         struct bio_list         rescue_list;
     679             :         struct work_struct      rescue_work;
     680             :         struct workqueue_struct *rescue_workqueue;
     681             : 
     682             :         /*
     683             :          * Hot un-plug notifier for the per-cpu cache, if used
     684             :          */
     685             :         struct hlist_node cpuhp_dead;
     686             : };
     687             : 
     688             : static inline bool bioset_initialized(struct bio_set *bs)
     689             : {
     690             :         return bs->bio_slab != NULL;
     691             : }
     692             : 
     693             : #if defined(CONFIG_BLK_DEV_INTEGRITY)
     694             : 
     695             : #define bip_for_each_vec(bvl, bip, iter)                                \
     696             :         for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
     697             : 
     698             : #define bio_for_each_integrity_vec(_bvl, _bio, _iter)                   \
     699             :         for_each_bio(_bio)                                              \
     700             :                 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
     701             : 
     702             : extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
     703             : extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
     704             : extern bool bio_integrity_prep(struct bio *);
     705             : extern void bio_integrity_advance(struct bio *, unsigned int);
     706             : extern void bio_integrity_trim(struct bio *);
     707             : extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
     708             : extern int bioset_integrity_create(struct bio_set *, int);
     709             : extern void bioset_integrity_free(struct bio_set *);
     710             : extern void bio_integrity_init(void);
     711             : 
     712             : #else /* CONFIG_BLK_DEV_INTEGRITY */
     713             : 
     714             : static inline void *bio_integrity(struct bio *bio)
     715             : {
     716             :         return NULL;
     717             : }
     718             : 
     719             : static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
     720             : {
     721             :         return 0;
     722             : }
     723             : 
     724             : static inline void bioset_integrity_free (struct bio_set *bs)
     725             : {
     726             :         return;
     727             : }
     728             : 
     729             : static inline bool bio_integrity_prep(struct bio *bio)
     730             : {
     731             :         return true;
     732             : }
     733             : 
     734             : static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
     735             :                                       gfp_t gfp_mask)
     736             : {
     737             :         return 0;
     738             : }
     739             : 
     740             : static inline void bio_integrity_advance(struct bio *bio,
     741             :                                          unsigned int bytes_done)
     742             : {
     743             :         return;
     744             : }
     745             : 
     746             : static inline void bio_integrity_trim(struct bio *bio)
     747             : {
     748             :         return;
     749             : }
     750             : 
     751             : static inline void bio_integrity_init(void)
     752             : {
     753             :         return;
     754             : }
     755             : 
     756             : static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
     757             : {
     758             :         return false;
     759             : }
     760             : 
     761             : static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
     762             :                                                                 unsigned int nr)
     763             : {
     764             :         return ERR_PTR(-EINVAL);
     765             : }
     766             : 
     767             : static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
     768             :                                         unsigned int len, unsigned int offset)
     769             : {
     770             :         return 0;
     771             : }
     772             : 
     773             : #endif /* CONFIG_BLK_DEV_INTEGRITY */
     774             : 
     775             : /*
     776             :  * Mark a bio as polled. Note that for async polled IO, the caller must
     777             :  * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
     778             :  * We cannot block waiting for requests on polled IO, as those completions
     779             :  * must be found by the caller. This is different than IRQ driven IO, where
     780             :  * it's safe to wait for IO to complete.
     781             :  */
     782             : static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
     783             : {
     784             :         bio->bi_opf |= REQ_POLLED;
     785             :         if (!is_sync_kiocb(kiocb))
     786             :                 bio->bi_opf |= REQ_NOWAIT;
     787             : }
     788             : 
     789             : static inline void bio_clear_polled(struct bio *bio)
     790             : {
     791           0 :         bio->bi_opf &= ~REQ_POLLED;
     792             : }
     793             : 
     794             : struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
     795             :                 unsigned int nr_pages, blk_opf_t opf, gfp_t gfp);
     796             : 
     797             : #endif /* __LINUX_BIO_H */

Generated by: LCOV version 1.14