LCOV - code coverage report
Current view: top level - io_uring - io_uring.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 86 0.0 %
Date: 2023-04-06 08:38:28 Functions: 0 5 0.0 %

          Line data    Source code
       1             : #ifndef IOU_CORE_H
       2             : #define IOU_CORE_H
       3             : 
       4             : #include <linux/errno.h>
       5             : #include <linux/lockdep.h>
       6             : #include <linux/resume_user_mode.h>
       7             : #include <linux/kasan.h>
       8             : #include <linux/io_uring_types.h>
       9             : #include <uapi/linux/eventpoll.h>
      10             : #include "io-wq.h"
      11             : #include "slist.h"
      12             : #include "filetable.h"
      13             : 
      14             : #ifndef CREATE_TRACE_POINTS
      15             : #include <trace/events/io_uring.h>
      16             : #endif
      17             : 
      18             : enum {
      19             :         IOU_OK                  = 0,
      20             :         IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
      21             : 
      22             :         /*
      23             :          * Intended only when both IO_URING_F_MULTISHOT is passed
      24             :          * to indicate to the poll runner that multishot should be
      25             :          * removed and the result is set on req->cqe.res.
      26             :          */
      27             :         IOU_STOP_MULTISHOT      = -ECANCELED,
      28             : };
      29             : 
      30             : struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow);
      31             : bool io_req_cqe_overflow(struct io_kiocb *req);
      32             : int io_run_task_work_sig(struct io_ring_ctx *ctx);
      33             : void io_req_defer_failed(struct io_kiocb *req, s32 res);
      34             : void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
      35             : bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
      36             : bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags,
      37             :                 bool allow_overflow);
      38             : void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
      39             : 
      40             : struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
      41             : 
      42             : struct file *io_file_get_normal(struct io_kiocb *req, int fd);
      43             : struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
      44             :                                unsigned issue_flags);
      45             : 
      46             : static inline bool io_req_ffs_set(struct io_kiocb *req)
      47             : {
      48           0 :         return req->flags & REQ_F_FIXED_FILE;
      49             : }
      50             : 
      51             : void __io_req_task_work_add(struct io_kiocb *req, bool allow_local);
      52             : bool io_is_uring_fops(struct file *file);
      53             : bool io_alloc_async_data(struct io_kiocb *req);
      54             : void io_req_task_queue(struct io_kiocb *req);
      55             : void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
      56             : void io_req_task_complete(struct io_kiocb *req, bool *locked);
      57             : void io_req_task_queue_fail(struct io_kiocb *req, int ret);
      58             : void io_req_task_submit(struct io_kiocb *req, bool *locked);
      59             : void tctx_task_work(struct callback_head *cb);
      60             : __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
      61             : int io_uring_alloc_task_context(struct task_struct *task,
      62             :                                 struct io_ring_ctx *ctx);
      63             : 
      64             : int io_poll_issue(struct io_kiocb *req, bool *locked);
      65             : int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
      66             : int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
      67             : void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node);
      68             : int io_req_prep_async(struct io_kiocb *req);
      69             : 
      70             : struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
      71             : void io_wq_submit_work(struct io_wq_work *work);
      72             : 
      73             : void io_free_req(struct io_kiocb *req);
      74             : void io_queue_next(struct io_kiocb *req);
      75             : void io_task_refs_refill(struct io_uring_task *tctx);
      76             : bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
      77             : 
      78             : bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
      79             :                         bool cancel_all);
      80             : 
      81             : #define io_lockdep_assert_cq_locked(ctx)                                \
      82             :         do {                                                            \
      83             :                 if (ctx->flags & IORING_SETUP_IOPOLL) {                  \
      84             :                         lockdep_assert_held(&ctx->uring_lock);           \
      85             :                 } else if (!ctx->task_complete) {                    \
      86             :                         lockdep_assert_held(&ctx->completion_lock);      \
      87             :                 } else if (ctx->submitter_task->flags & PF_EXITING) { \
      88             :                         lockdep_assert(current_work());                 \
      89             :                 } else {                                                \
      90             :                         lockdep_assert(current == ctx->submitter_task);      \
      91             :                 }                                                       \
      92             :         } while (0)
      93             : 
      94             : static inline void io_req_task_work_add(struct io_kiocb *req)
      95             : {
      96           0 :         __io_req_task_work_add(req, true);
      97             : }
      98             : 
      99             : #define io_for_each_link(pos, head) \
     100             :         for (pos = (head); pos; pos = pos->link)
     101             : 
     102             : void io_cq_unlock_post(struct io_ring_ctx *ctx);
     103             : 
     104           0 : static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
     105             :                                                        bool overflow)
     106             : {
     107           0 :         io_lockdep_assert_cq_locked(ctx);
     108             : 
     109           0 :         if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
     110           0 :                 struct io_uring_cqe *cqe = ctx->cqe_cached;
     111             : 
     112           0 :                 ctx->cached_cq_tail++;
     113           0 :                 ctx->cqe_cached++;
     114           0 :                 if (ctx->flags & IORING_SETUP_CQE32)
     115           0 :                         ctx->cqe_cached++;
     116             :                 return cqe;
     117             :         }
     118             : 
     119           0 :         return __io_get_cqe(ctx, overflow);
     120             : }
     121             : 
     122             : static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
     123             : {
     124           0 :         return io_get_cqe_overflow(ctx, false);
     125             : }
     126             : 
     127           0 : static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
     128             :                                      struct io_kiocb *req)
     129             : {
     130             :         struct io_uring_cqe *cqe;
     131             : 
     132             :         /*
     133             :          * If we can't get a cq entry, userspace overflowed the
     134             :          * submission (by quite a lot). Increment the overflow count in
     135             :          * the ring.
     136             :          */
     137           0 :         cqe = io_get_cqe(ctx);
     138           0 :         if (unlikely(!cqe))
     139             :                 return false;
     140             : 
     141           0 :         trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
     142             :                                 req->cqe.res, req->cqe.flags,
     143             :                                 (req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0,
     144             :                                 (req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0);
     145             : 
     146           0 :         memcpy(cqe, &req->cqe, sizeof(*cqe));
     147             : 
     148           0 :         if (ctx->flags & IORING_SETUP_CQE32) {
     149           0 :                 u64 extra1 = 0, extra2 = 0;
     150             : 
     151           0 :                 if (req->flags & REQ_F_CQE32_INIT) {
     152           0 :                         extra1 = req->extra1;
     153           0 :                         extra2 = req->extra2;
     154             :                 }
     155             : 
     156           0 :                 WRITE_ONCE(cqe->big_cqe[0], extra1);
     157           0 :                 WRITE_ONCE(cqe->big_cqe[1], extra2);
     158             :         }
     159             :         return true;
     160             : }
     161             : 
     162           0 : static inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
     163             :                                    struct io_kiocb *req)
     164             : {
     165           0 :         if (likely(__io_fill_cqe_req(ctx, req)))
     166             :                 return true;
     167           0 :         return io_req_cqe_overflow(req);
     168             : }
     169             : 
     170             : static inline void req_set_fail(struct io_kiocb *req)
     171             : {
     172           0 :         req->flags |= REQ_F_FAIL;
     173           0 :         if (req->flags & REQ_F_CQE_SKIP) {
     174             :                 req->flags &= ~REQ_F_CQE_SKIP;
     175           0 :                 req->flags |= REQ_F_SKIP_LINK_CQES;
     176             :         }
     177             : }
     178             : 
     179             : static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
     180             : {
     181           0 :         req->cqe.res = res;
     182           0 :         req->cqe.flags = cflags;
     183             : }
     184             : 
     185             : static inline bool req_has_async_data(struct io_kiocb *req)
     186             : {
     187           0 :         return req->flags & REQ_F_ASYNC_DATA;
     188             : }
     189             : 
     190             : static inline void io_put_file(struct file *file)
     191             : {
     192           0 :         if (file)
     193           0 :                 fput(file);
     194             : }
     195             : 
     196             : static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
     197             :                                          unsigned issue_flags)
     198             : {
     199             :         lockdep_assert_held(&ctx->uring_lock);
     200           0 :         if (issue_flags & IO_URING_F_UNLOCKED)
     201           0 :                 mutex_unlock(&ctx->uring_lock);
     202             : }
     203             : 
     204             : static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
     205             :                                        unsigned issue_flags)
     206             : {
     207             :         /*
     208             :          * "Normal" inline submissions always hold the uring_lock, since we
     209             :          * grab it from the system call. Same is true for the SQPOLL offload.
     210             :          * The only exception is when we've detached the request and issue it
     211             :          * from an async worker thread, grab the lock for that case.
     212             :          */
     213           0 :         if (issue_flags & IO_URING_F_UNLOCKED)
     214           0 :                 mutex_lock(&ctx->uring_lock);
     215             :         lockdep_assert_held(&ctx->uring_lock);
     216             : }
     217             : 
     218             : static inline void io_commit_cqring(struct io_ring_ctx *ctx)
     219             : {
     220             :         /* order cqe stores with ring update */
     221           0 :         smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
     222             : }
     223             : 
     224             : static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
     225             : {
     226           0 :         if (wq_has_sleeper(&ctx->poll_wq))
     227           0 :                 __wake_up(&ctx->poll_wq, TASK_NORMAL, 0,
     228             :                                 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
     229             : }
     230             : 
     231             : /* requires smb_mb() prior, see wq_has_sleeper() */
     232             : static inline void __io_cqring_wake(struct io_ring_ctx *ctx)
     233             : {
     234             :         /*
     235             :          * Trigger waitqueue handler on all waiters on our waitqueue. This
     236             :          * won't necessarily wake up all the tasks, io_should_wake() will make
     237             :          * that decision.
     238             :          *
     239             :          * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
     240             :          * set in the mask so that if we recurse back into our own poll
     241             :          * waitqueue handlers, we know we have a dependency between eventfd or
     242             :          * epoll and should terminate multishot poll at that point.
     243             :          */
     244           0 :         if (waitqueue_active(&ctx->cq_wait))
     245           0 :                 __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
     246             :                                 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
     247             : }
     248             : 
     249             : static inline void io_cqring_wake(struct io_ring_ctx *ctx)
     250             : {
     251           0 :         smp_mb();
     252           0 :         __io_cqring_wake(ctx);
     253             : }
     254             : 
     255             : static inline bool io_sqring_full(struct io_ring_ctx *ctx)
     256             : {
     257           0 :         struct io_rings *r = ctx->rings;
     258             : 
     259           0 :         return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
     260             : }
     261             : 
     262             : static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
     263             : {
     264           0 :         struct io_rings *rings = ctx->rings;
     265             : 
     266             :         /* make sure SQ entry isn't read before tail */
     267           0 :         return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
     268             : }
     269             : 
     270           0 : static inline int io_run_task_work(void)
     271             : {
     272             :         /*
     273             :          * Always check-and-clear the task_work notification signal. With how
     274             :          * signaling works for task_work, we can find it set with nothing to
     275             :          * run. We need to clear it for that case, like get_signal() does.
     276             :          */
     277           0 :         if (test_thread_flag(TIF_NOTIFY_SIGNAL))
     278             :                 clear_notify_signal();
     279             :         /*
     280             :          * PF_IO_WORKER never returns to userspace, so check here if we have
     281             :          * notify work that needs processing.
     282             :          */
     283           0 :         if (current->flags & PF_IO_WORKER &&
     284           0 :             test_thread_flag(TIF_NOTIFY_RESUME)) {
     285           0 :                 __set_current_state(TASK_RUNNING);
     286           0 :                 resume_user_mode_work(NULL);
     287             :         }
     288           0 :         if (task_work_pending(current)) {
     289           0 :                 __set_current_state(TASK_RUNNING);
     290           0 :                 task_work_run();
     291           0 :                 return 1;
     292             :         }
     293             : 
     294             :         return 0;
     295             : }
     296             : 
     297             : static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
     298             : {
     299           0 :         return task_work_pending(current) || !wq_list_empty(&ctx->work_llist);
     300             : }
     301             : 
     302             : static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
     303             : {
     304           0 :         if (!*locked) {
     305           0 :                 mutex_lock(&ctx->uring_lock);
     306           0 :                 *locked = true;
     307             :         }
     308             : }
     309             : 
     310             : /*
     311             :  * Don't complete immediately but use deferred completion infrastructure.
     312             :  * Protected by ->uring_lock and can only be used either with
     313             :  * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
     314             :  */
     315             : static inline void io_req_complete_defer(struct io_kiocb *req)
     316             :         __must_hold(&req->ctx->uring_lock)
     317             : {
     318           0 :         struct io_submit_state *state = &req->ctx->submit_state;
     319             : 
     320             :         lockdep_assert_held(&req->ctx->uring_lock);
     321             : 
     322           0 :         wq_list_add_tail(&req->comp_list, &state->compl_reqs);
     323             : }
     324             : 
     325             : static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
     326             : {
     327           0 :         if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
     328             :                      ctx->has_evfd || ctx->poll_activated))
     329           0 :                 __io_commit_cqring_flush(ctx);
     330             : }
     331             : 
     332           0 : static inline void io_get_task_refs(int nr)
     333             : {
     334           0 :         struct io_uring_task *tctx = current->io_uring;
     335             : 
     336           0 :         tctx->cached_refs -= nr;
     337           0 :         if (unlikely(tctx->cached_refs < 0))
     338           0 :                 io_task_refs_refill(tctx);
     339           0 : }
     340             : 
     341             : static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
     342             : {
     343           0 :         return !ctx->submit_state.free_list.next;
     344             : }
     345             : 
     346             : extern struct kmem_cache *req_cachep;
     347             : 
     348             : static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
     349             : {
     350             :         struct io_kiocb *req;
     351             : 
     352           0 :         req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
     353           0 :         kasan_unpoison_object_data(req_cachep, req);
     354           0 :         wq_stack_extract(&ctx->submit_state.free_list);
     355             :         return req;
     356             : }
     357             : 
     358             : static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req)
     359             : {
     360           0 :         if (unlikely(io_req_cache_empty(ctx))) {
     361           0 :                 if (!__io_alloc_req_refill(ctx))
     362             :                         return false;
     363             :         }
     364           0 :         *req = io_extract_req(ctx);
     365             :         return true;
     366             : }
     367             : 
     368             : static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx)
     369             : {
     370           0 :         return likely(ctx->submitter_task == current);
     371             : }
     372             : 
     373             : static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
     374             : {
     375           0 :         return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
     376             :                       ctx->submitter_task == current);
     377             : }
     378             : 
     379             : static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
     380             : {
     381           0 :         io_req_set_res(req, res, 0);
     382           0 :         req->io_task_work.func = io_req_task_complete;
     383           0 :         io_req_task_work_add(req);
     384             : }
     385             : 
     386             : #endif

Generated by: LCOV version 1.14