LCOV - code coverage report
Current view: top level - io_uring - kbuf.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 29 0.0 %
Date: 2023-03-27 20:00:47 Functions: 0 2 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : #ifndef IOU_KBUF_H
       3             : #define IOU_KBUF_H
       4             : 
       5             : #include <uapi/linux/io_uring.h>
       6             : 
       7             : struct io_buffer_list {
       8             :         /*
       9             :          * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
      10             :          * then these are classic provided buffers and ->buf_list is used.
      11             :          */
      12             :         union {
      13             :                 struct list_head buf_list;
      14             :                 struct {
      15             :                         struct page **buf_pages;
      16             :                         struct io_uring_buf_ring *buf_ring;
      17             :                 };
      18             :         };
      19             :         __u16 bgid;
      20             : 
      21             :         /* below is for ring provided buffers */
      22             :         __u16 buf_nr_pages;
      23             :         __u16 nr_entries;
      24             :         __u16 head;
      25             :         __u16 mask;
      26             : };
      27             : 
      28             : struct io_buffer {
      29             :         struct list_head list;
      30             :         __u64 addr;
      31             :         __u32 len;
      32             :         __u16 bid;
      33             :         __u16 bgid;
      34             : };
      35             : 
      36             : void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
      37             :                               unsigned int issue_flags);
      38             : void io_destroy_buffers(struct io_ring_ctx *ctx);
      39             : 
      40             : int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
      41             : int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
      42             : 
      43             : int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
      44             : int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
      45             : 
      46             : int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
      47             : int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
      48             : 
      49             : unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
      50             : 
      51             : void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
      52             : 
      53             : static inline void io_kbuf_recycle_ring(struct io_kiocb *req)
      54             : {
      55             :         /*
      56             :          * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
      57             :          * the flag and hence ensure that bl->head doesn't get incremented.
      58             :          * If the tail has already been incremented, hang on to it.
      59             :          * The exception is partial io, that case we should increment bl->head
      60             :          * to monopolize the buffer.
      61             :          */
      62           0 :         if (req->buf_list) {
      63           0 :                 if (req->flags & REQ_F_PARTIAL_IO) {
      64             :                         /*
      65             :                          * If we end up here, then the io_uring_lock has
      66             :                          * been kept held since we retrieved the buffer.
      67             :                          * For the io-wq case, we already cleared
      68             :                          * req->buf_list when the buffer was retrieved,
      69             :                          * hence it cannot be set here for that case.
      70             :                          */
      71           0 :                         req->buf_list->head++;
      72           0 :                         req->buf_list = NULL;
      73             :                 } else {
      74           0 :                         req->buf_index = req->buf_list->bgid;
      75           0 :                         req->flags &= ~REQ_F_BUFFER_RING;
      76             :                 }
      77             :         }
      78             : }
      79             : 
      80             : static inline bool io_do_buffer_select(struct io_kiocb *req)
      81             : {
      82           0 :         if (!(req->flags & REQ_F_BUFFER_SELECT))
      83             :                 return false;
      84           0 :         return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
      85             : }
      86             : 
      87           0 : static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
      88             : {
      89           0 :         if (req->flags & REQ_F_BUFFER_SELECTED)
      90           0 :                 io_kbuf_recycle_legacy(req, issue_flags);
      91           0 :         if (req->flags & REQ_F_BUFFER_RING)
      92             :                 io_kbuf_recycle_ring(req);
      93           0 : }
      94             : 
      95           0 : static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
      96             :                                               struct list_head *list)
      97             : {
      98           0 :         unsigned int ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
      99             : 
     100           0 :         if (req->flags & REQ_F_BUFFER_RING) {
     101           0 :                 if (req->buf_list) {
     102           0 :                         req->buf_index = req->buf_list->bgid;
     103           0 :                         req->buf_list->head++;
     104             :                 }
     105           0 :                 req->flags &= ~REQ_F_BUFFER_RING;
     106             :         } else {
     107           0 :                 req->buf_index = req->kbuf->bgid;
     108           0 :                 list_add(&req->kbuf->list, list);
     109           0 :                 req->flags &= ~REQ_F_BUFFER_SELECTED;
     110             :         }
     111             : 
     112           0 :         return ret;
     113             : }
     114             : 
     115             : static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
     116             : {
     117           0 :         lockdep_assert_held(&req->ctx->completion_lock);
     118             : 
     119           0 :         if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
     120             :                 return 0;
     121           0 :         return __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
     122             : }
     123             : 
     124             : static inline unsigned int io_put_kbuf(struct io_kiocb *req,
     125             :                                        unsigned issue_flags)
     126             : {
     127             : 
     128           0 :         if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
     129             :                 return 0;
     130           0 :         return __io_put_kbuf(req, issue_flags);
     131             : }
     132             : #endif

Generated by: LCOV version 1.14