LCOV - code coverage report
Current view: top level - io_uring - timeout.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 306 0.0 %
Date: 2023-03-27 20:00:47 Functions: 0 23 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : #include <linux/kernel.h>
       3             : #include <linux/errno.h>
       4             : #include <linux/file.h>
       5             : #include <linux/io_uring.h>
       6             : 
       7             : #include <trace/events/io_uring.h>
       8             : 
       9             : #include <uapi/linux/io_uring.h>
      10             : 
      11             : #include "io_uring.h"
      12             : #include "refs.h"
      13             : #include "cancel.h"
      14             : #include "timeout.h"
      15             : 
      16             : struct io_timeout {
      17             :         struct file                     *file;
      18             :         u32                             off;
      19             :         u32                             target_seq;
      20             :         struct list_head                list;
      21             :         /* head of the link, used by linked timeouts only */
      22             :         struct io_kiocb                 *head;
      23             :         /* for linked completions */
      24             :         struct io_kiocb                 *prev;
      25             : };
      26             : 
      27             : struct io_timeout_rem {
      28             :         struct file                     *file;
      29             :         u64                             addr;
      30             : 
      31             :         /* timeout update */
      32             :         struct timespec64               ts;
      33             :         u32                             flags;
      34             :         bool                            ltimeout;
      35             : };
      36             : 
      37             : static inline bool io_is_timeout_noseq(struct io_kiocb *req)
      38             : {
      39           0 :         struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
      40             : 
      41           0 :         return !timeout->off;
      42             : }
      43             : 
      44           0 : static inline void io_put_req(struct io_kiocb *req)
      45             : {
      46           0 :         if (req_ref_put_and_test(req)) {
      47           0 :                 io_queue_next(req);
      48           0 :                 io_free_req(req);
      49             :         }
      50           0 : }
      51             : 
      52           0 : static bool io_kill_timeout(struct io_kiocb *req, int status)
      53             :         __must_hold(&req->ctx->timeout_lock)
      54             : {
      55           0 :         struct io_timeout_data *io = req->async_data;
      56             : 
      57           0 :         if (hrtimer_try_to_cancel(&io->timer) != -1) {
      58           0 :                 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
      59             : 
      60           0 :                 if (status)
      61           0 :                         req_set_fail(req);
      62           0 :                 atomic_set(&req->ctx->cq_timeouts,
      63           0 :                         atomic_read(&req->ctx->cq_timeouts) + 1);
      64           0 :                 list_del_init(&timeout->list);
      65           0 :                 io_req_queue_tw_complete(req, status);
      66           0 :                 return true;
      67             :         }
      68             :         return false;
      69             : }
      70             : 
      71           0 : __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
      72             : {
      73             :         u32 seq;
      74             :         struct io_timeout *timeout, *tmp;
      75             : 
      76           0 :         spin_lock_irq(&ctx->timeout_lock);
      77           0 :         seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
      78             : 
      79           0 :         list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
      80           0 :                 struct io_kiocb *req = cmd_to_io_kiocb(timeout);
      81             :                 u32 events_needed, events_got;
      82             : 
      83           0 :                 if (io_is_timeout_noseq(req))
      84             :                         break;
      85             : 
      86             :                 /*
      87             :                  * Since seq can easily wrap around over time, subtract
      88             :                  * the last seq at which timeouts were flushed before comparing.
      89             :                  * Assuming not more than 2^31-1 events have happened since,
      90             :                  * these subtractions won't have wrapped, so we can check if
      91             :                  * target is in [last_seq, current_seq] by comparing the two.
      92             :                  */
      93           0 :                 events_needed = timeout->target_seq - ctx->cq_last_tm_flush;
      94           0 :                 events_got = seq - ctx->cq_last_tm_flush;
      95           0 :                 if (events_got < events_needed)
      96             :                         break;
      97             : 
      98           0 :                 io_kill_timeout(req, 0);
      99             :         }
     100           0 :         ctx->cq_last_tm_flush = seq;
     101           0 :         spin_unlock_irq(&ctx->timeout_lock);
     102           0 : }
     103             : 
     104           0 : static void io_req_tw_fail_links(struct io_kiocb *link, bool *locked)
     105             : {
     106           0 :         io_tw_lock(link->ctx, locked);
     107           0 :         while (link) {
     108           0 :                 struct io_kiocb *nxt = link->link;
     109           0 :                 long res = -ECANCELED;
     110             : 
     111           0 :                 if (link->flags & REQ_F_FAIL)
     112           0 :                         res = link->cqe.res;
     113           0 :                 link->link = NULL;
     114           0 :                 io_req_set_res(link, res, 0);
     115           0 :                 io_req_task_complete(link, locked);
     116           0 :                 link = nxt;
     117             :         }
     118           0 : }
     119             : 
     120           0 : static void io_fail_links(struct io_kiocb *req)
     121             :         __must_hold(&req->ctx->completion_lock)
     122             : {
     123           0 :         struct io_kiocb *link = req->link;
     124           0 :         bool ignore_cqes = req->flags & REQ_F_SKIP_LINK_CQES;
     125             : 
     126           0 :         if (!link)
     127             :                 return;
     128             : 
     129           0 :         while (link) {
     130           0 :                 if (ignore_cqes)
     131           0 :                         link->flags |= REQ_F_CQE_SKIP;
     132             :                 else
     133           0 :                         link->flags &= ~REQ_F_CQE_SKIP;
     134           0 :                 trace_io_uring_fail_link(req, link);
     135           0 :                 link = link->link;
     136             :         }
     137             : 
     138           0 :         link = req->link;
     139           0 :         link->io_task_work.func = io_req_tw_fail_links;
     140           0 :         io_req_task_work_add(link);
     141           0 :         req->link = NULL;
     142             : }
     143             : 
     144             : static inline void io_remove_next_linked(struct io_kiocb *req)
     145             : {
     146           0 :         struct io_kiocb *nxt = req->link;
     147             : 
     148           0 :         req->link = nxt->link;
     149           0 :         nxt->link = NULL;
     150             : }
     151             : 
     152           0 : void io_disarm_next(struct io_kiocb *req)
     153             :         __must_hold(&req->ctx->completion_lock)
     154             : {
     155           0 :         struct io_kiocb *link = NULL;
     156             : 
     157           0 :         if (req->flags & REQ_F_ARM_LTIMEOUT) {
     158           0 :                 link = req->link;
     159           0 :                 req->flags &= ~REQ_F_ARM_LTIMEOUT;
     160           0 :                 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
     161           0 :                         io_remove_next_linked(req);
     162             :                         io_req_queue_tw_complete(link, -ECANCELED);
     163             :                 }
     164           0 :         } else if (req->flags & REQ_F_LINK_TIMEOUT) {
     165           0 :                 struct io_ring_ctx *ctx = req->ctx;
     166             : 
     167           0 :                 spin_lock_irq(&ctx->timeout_lock);
     168           0 :                 link = io_disarm_linked_timeout(req);
     169           0 :                 spin_unlock_irq(&ctx->timeout_lock);
     170           0 :                 if (link)
     171             :                         io_req_queue_tw_complete(link, -ECANCELED);
     172             :         }
     173           0 :         if (unlikely((req->flags & REQ_F_FAIL) &&
     174             :                      !(req->flags & REQ_F_HARDLINK)))
     175           0 :                 io_fail_links(req);
     176           0 : }
     177             : 
     178           0 : struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
     179             :                                             struct io_kiocb *link)
     180             :         __must_hold(&req->ctx->completion_lock)
     181             :         __must_hold(&req->ctx->timeout_lock)
     182             : {
     183           0 :         struct io_timeout_data *io = link->async_data;
     184           0 :         struct io_timeout *timeout = io_kiocb_to_cmd(link, struct io_timeout);
     185             : 
     186           0 :         io_remove_next_linked(req);
     187           0 :         timeout->head = NULL;
     188           0 :         if (hrtimer_try_to_cancel(&io->timer) != -1) {
     189           0 :                 list_del(&timeout->list);
     190           0 :                 return link;
     191             :         }
     192             : 
     193             :         return NULL;
     194             : }
     195             : 
     196           0 : static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
     197             : {
     198           0 :         struct io_timeout_data *data = container_of(timer,
     199             :                                                 struct io_timeout_data, timer);
     200           0 :         struct io_kiocb *req = data->req;
     201           0 :         struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
     202           0 :         struct io_ring_ctx *ctx = req->ctx;
     203             :         unsigned long flags;
     204             : 
     205           0 :         spin_lock_irqsave(&ctx->timeout_lock, flags);
     206           0 :         list_del_init(&timeout->list);
     207           0 :         atomic_set(&req->ctx->cq_timeouts,
     208           0 :                 atomic_read(&req->ctx->cq_timeouts) + 1);
     209           0 :         spin_unlock_irqrestore(&ctx->timeout_lock, flags);
     210             : 
     211           0 :         if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
     212           0 :                 req_set_fail(req);
     213             : 
     214           0 :         io_req_set_res(req, -ETIME, 0);
     215           0 :         req->io_task_work.func = io_req_task_complete;
     216           0 :         io_req_task_work_add(req);
     217           0 :         return HRTIMER_NORESTART;
     218             : }
     219             : 
     220           0 : static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
     221             :                                            struct io_cancel_data *cd)
     222             :         __must_hold(&ctx->timeout_lock)
     223             : {
     224             :         struct io_timeout *timeout;
     225             :         struct io_timeout_data *io;
     226           0 :         struct io_kiocb *req = NULL;
     227             : 
     228           0 :         list_for_each_entry(timeout, &ctx->timeout_list, list) {
     229           0 :                 struct io_kiocb *tmp = cmd_to_io_kiocb(timeout);
     230             : 
     231           0 :                 if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
     232           0 :                     cd->data != tmp->cqe.user_data)
     233           0 :                         continue;
     234           0 :                 if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) {
     235           0 :                         if (cd->seq == tmp->work.cancel_seq)
     236           0 :                                 continue;
     237           0 :                         tmp->work.cancel_seq = cd->seq;
     238             :                 }
     239             :                 req = tmp;
     240             :                 break;
     241             :         }
     242           0 :         if (!req)
     243             :                 return ERR_PTR(-ENOENT);
     244             : 
     245           0 :         io = req->async_data;
     246           0 :         if (hrtimer_try_to_cancel(&io->timer) == -1)
     247             :                 return ERR_PTR(-EALREADY);
     248           0 :         timeout = io_kiocb_to_cmd(req, struct io_timeout);
     249           0 :         list_del_init(&timeout->list);
     250           0 :         return req;
     251             : }
     252             : 
     253           0 : int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
     254             :         __must_hold(&ctx->completion_lock)
     255             : {
     256             :         struct io_kiocb *req;
     257             : 
     258           0 :         spin_lock_irq(&ctx->timeout_lock);
     259           0 :         req = io_timeout_extract(ctx, cd);
     260           0 :         spin_unlock_irq(&ctx->timeout_lock);
     261             : 
     262           0 :         if (IS_ERR(req))
     263           0 :                 return PTR_ERR(req);
     264           0 :         io_req_task_queue_fail(req, -ECANCELED);
     265           0 :         return 0;
     266             : }
     267             : 
     268           0 : static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
     269             : {
     270           0 :         unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
     271           0 :         struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
     272           0 :         struct io_kiocb *prev = timeout->prev;
     273           0 :         int ret = -ENOENT;
     274             : 
     275           0 :         if (prev) {
     276           0 :                 if (!(req->task->flags & PF_EXITING)) {
     277           0 :                         struct io_cancel_data cd = {
     278           0 :                                 .ctx            = req->ctx,
     279           0 :                                 .data           = prev->cqe.user_data,
     280             :                         };
     281             : 
     282           0 :                         ret = io_try_cancel(req->task->io_uring, &cd, issue_flags);
     283             :                 }
     284           0 :                 io_req_set_res(req, ret ?: -ETIME, 0);
     285           0 :                 io_req_task_complete(req, locked);
     286           0 :                 io_put_req(prev);
     287             :         } else {
     288           0 :                 io_req_set_res(req, -ETIME, 0);
     289           0 :                 io_req_task_complete(req, locked);
     290             :         }
     291           0 : }
     292             : 
     293           0 : static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
     294             : {
     295           0 :         struct io_timeout_data *data = container_of(timer,
     296             :                                                 struct io_timeout_data, timer);
     297           0 :         struct io_kiocb *prev, *req = data->req;
     298           0 :         struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
     299           0 :         struct io_ring_ctx *ctx = req->ctx;
     300             :         unsigned long flags;
     301             : 
     302           0 :         spin_lock_irqsave(&ctx->timeout_lock, flags);
     303           0 :         prev = timeout->head;
     304           0 :         timeout->head = NULL;
     305             : 
     306             :         /*
     307             :          * We don't expect the list to be empty, that will only happen if we
     308             :          * race with the completion of the linked work.
     309             :          */
     310           0 :         if (prev) {
     311           0 :                 io_remove_next_linked(prev);
     312           0 :                 if (!req_ref_inc_not_zero(prev))
     313           0 :                         prev = NULL;
     314             :         }
     315           0 :         list_del(&timeout->list);
     316           0 :         timeout->prev = prev;
     317           0 :         spin_unlock_irqrestore(&ctx->timeout_lock, flags);
     318             : 
     319           0 :         req->io_task_work.func = io_req_task_link_timeout;
     320           0 :         io_req_task_work_add(req);
     321           0 :         return HRTIMER_NORESTART;
     322             : }
     323             : 
     324           0 : static clockid_t io_timeout_get_clock(struct io_timeout_data *data)
     325             : {
     326           0 :         switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) {
     327             :         case IORING_TIMEOUT_BOOTTIME:
     328             :                 return CLOCK_BOOTTIME;
     329             :         case IORING_TIMEOUT_REALTIME:
     330             :                 return CLOCK_REALTIME;
     331             :         default:
     332             :                 /* can't happen, vetted at prep time */
     333           0 :                 WARN_ON_ONCE(1);
     334             :                 fallthrough;
     335             :         case 0:
     336             :                 return CLOCK_MONOTONIC;
     337             :         }
     338             : }
     339             : 
     340           0 : static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
     341             :                                     struct timespec64 *ts, enum hrtimer_mode mode)
     342             :         __must_hold(&ctx->timeout_lock)
     343             : {
     344             :         struct io_timeout_data *io;
     345             :         struct io_timeout *timeout;
     346           0 :         struct io_kiocb *req = NULL;
     347             : 
     348           0 :         list_for_each_entry(timeout, &ctx->ltimeout_list, list) {
     349           0 :                 struct io_kiocb *tmp = cmd_to_io_kiocb(timeout);
     350             : 
     351           0 :                 if (user_data == tmp->cqe.user_data) {
     352             :                         req = tmp;
     353             :                         break;
     354             :                 }
     355             :         }
     356           0 :         if (!req)
     357             :                 return -ENOENT;
     358             : 
     359           0 :         io = req->async_data;
     360           0 :         if (hrtimer_try_to_cancel(&io->timer) == -1)
     361             :                 return -EALREADY;
     362           0 :         hrtimer_init(&io->timer, io_timeout_get_clock(io), mode);
     363           0 :         io->timer.function = io_link_timeout_fn;
     364           0 :         hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode);
     365           0 :         return 0;
     366             : }
     367             : 
     368           0 : static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
     369             :                              struct timespec64 *ts, enum hrtimer_mode mode)
     370             :         __must_hold(&ctx->timeout_lock)
     371             : {
     372           0 :         struct io_cancel_data cd = { .data = user_data, };
     373           0 :         struct io_kiocb *req = io_timeout_extract(ctx, &cd);
     374           0 :         struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
     375             :         struct io_timeout_data *data;
     376             : 
     377           0 :         if (IS_ERR(req))
     378           0 :                 return PTR_ERR(req);
     379             : 
     380           0 :         timeout->off = 0; /* noseq */
     381           0 :         data = req->async_data;
     382           0 :         list_add_tail(&timeout->list, &ctx->timeout_list);
     383           0 :         hrtimer_init(&data->timer, io_timeout_get_clock(data), mode);
     384           0 :         data->timer.function = io_timeout_fn;
     385           0 :         hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
     386           0 :         return 0;
     387             : }
     388             : 
     389           0 : int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
     390             : {
     391           0 :         struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem);
     392             : 
     393           0 :         if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
     394             :                 return -EINVAL;
     395           0 :         if (sqe->buf_index || sqe->len || sqe->splice_fd_in)
     396             :                 return -EINVAL;
     397             : 
     398           0 :         tr->ltimeout = false;
     399           0 :         tr->addr = READ_ONCE(sqe->addr);
     400           0 :         tr->flags = READ_ONCE(sqe->timeout_flags);
     401           0 :         if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) {
     402           0 :                 if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
     403             :                         return -EINVAL;
     404           0 :                 if (tr->flags & IORING_LINK_TIMEOUT_UPDATE)
     405           0 :                         tr->ltimeout = true;
     406           0 :                 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS))
     407             :                         return -EINVAL;
     408           0 :                 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
     409             :                         return -EFAULT;
     410           0 :                 if (tr->ts.tv_sec < 0 || tr->ts.tv_nsec < 0)
     411             :                         return -EINVAL;
     412           0 :         } else if (tr->flags) {
     413             :                 /* timeout removal doesn't support flags */
     414             :                 return -EINVAL;
     415             :         }
     416             : 
     417             :         return 0;
     418             : }
     419             : 
     420             : static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
     421             : {
     422           0 :         return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
     423           0 :                                             : HRTIMER_MODE_REL;
     424             : }
     425             : 
     426             : /*
     427             :  * Remove or update an existing timeout command
     428             :  */
     429           0 : int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
     430             : {
     431           0 :         struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem);
     432           0 :         struct io_ring_ctx *ctx = req->ctx;
     433             :         int ret;
     434             : 
     435           0 :         if (!(tr->flags & IORING_TIMEOUT_UPDATE)) {
     436           0 :                 struct io_cancel_data cd = { .data = tr->addr, };
     437             : 
     438           0 :                 spin_lock(&ctx->completion_lock);
     439           0 :                 ret = io_timeout_cancel(ctx, &cd);
     440           0 :                 spin_unlock(&ctx->completion_lock);
     441             :         } else {
     442           0 :                 enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
     443             : 
     444           0 :                 spin_lock_irq(&ctx->timeout_lock);
     445           0 :                 if (tr->ltimeout)
     446           0 :                         ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
     447             :                 else
     448           0 :                         ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
     449           0 :                 spin_unlock_irq(&ctx->timeout_lock);
     450             :         }
     451             : 
     452           0 :         if (ret < 0)
     453           0 :                 req_set_fail(req);
     454           0 :         io_req_set_res(req, ret, 0);
     455           0 :         return IOU_OK;
     456             : }
     457             : 
     458           0 : static int __io_timeout_prep(struct io_kiocb *req,
     459             :                              const struct io_uring_sqe *sqe,
     460             :                              bool is_timeout_link)
     461             : {
     462           0 :         struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
     463             :         struct io_timeout_data *data;
     464             :         unsigned flags;
     465           0 :         u32 off = READ_ONCE(sqe->off);
     466             : 
     467           0 :         if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in)
     468             :                 return -EINVAL;
     469           0 :         if (off && is_timeout_link)
     470             :                 return -EINVAL;
     471           0 :         flags = READ_ONCE(sqe->timeout_flags);
     472           0 :         if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK |
     473             :                       IORING_TIMEOUT_ETIME_SUCCESS))
     474             :                 return -EINVAL;
     475             :         /* more than one clock specified is invalid, obviously */
     476           0 :         if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
     477             :                 return -EINVAL;
     478             : 
     479           0 :         INIT_LIST_HEAD(&timeout->list);
     480           0 :         timeout->off = off;
     481           0 :         if (unlikely(off && !req->ctx->off_timeout_used))
     482           0 :                 req->ctx->off_timeout_used = true;
     483             : 
     484           0 :         if (WARN_ON_ONCE(req_has_async_data(req)))
     485             :                 return -EFAULT;
     486           0 :         if (io_alloc_async_data(req))
     487             :                 return -ENOMEM;
     488             : 
     489           0 :         data = req->async_data;
     490           0 :         data->req = req;
     491           0 :         data->flags = flags;
     492             : 
     493           0 :         if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
     494             :                 return -EFAULT;
     495             : 
     496           0 :         if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0)
     497             :                 return -EINVAL;
     498             : 
     499           0 :         INIT_LIST_HEAD(&timeout->list);
     500           0 :         data->mode = io_translate_timeout_mode(flags);
     501           0 :         hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
     502             : 
     503           0 :         if (is_timeout_link) {
     504           0 :                 struct io_submit_link *link = &req->ctx->submit_state.link;
     505             : 
     506           0 :                 if (!link->head)
     507             :                         return -EINVAL;
     508           0 :                 if (link->last->opcode == IORING_OP_LINK_TIMEOUT)
     509             :                         return -EINVAL;
     510           0 :                 timeout->head = link->last;
     511           0 :                 link->last->flags |= REQ_F_ARM_LTIMEOUT;
     512             :         }
     513             :         return 0;
     514             : }
     515             : 
     516           0 : int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
     517             : {
     518           0 :         return __io_timeout_prep(req, sqe, false);
     519             : }
     520             : 
     521           0 : int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
     522             : {
     523           0 :         return __io_timeout_prep(req, sqe, true);
     524             : }
     525             : 
     526           0 : int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
     527             : {
     528           0 :         struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
     529           0 :         struct io_ring_ctx *ctx = req->ctx;
     530           0 :         struct io_timeout_data *data = req->async_data;
     531             :         struct list_head *entry;
     532           0 :         u32 tail, off = timeout->off;
     533             : 
     534           0 :         spin_lock_irq(&ctx->timeout_lock);
     535             : 
     536             :         /*
     537             :          * sqe->off holds how many events that need to occur for this
     538             :          * timeout event to be satisfied. If it isn't set, then this is
     539             :          * a pure timeout request, sequence isn't used.
     540             :          */
     541           0 :         if (io_is_timeout_noseq(req)) {
     542           0 :                 entry = ctx->timeout_list.prev;
     543           0 :                 goto add;
     544             :         }
     545             : 
     546           0 :         tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
     547           0 :         timeout->target_seq = tail + off;
     548             : 
     549             :         /* Update the last seq here in case io_flush_timeouts() hasn't.
     550             :          * This is safe because ->completion_lock is held, and submissions
     551             :          * and completions are never mixed in the same ->completion_lock section.
     552             :          */
     553           0 :         ctx->cq_last_tm_flush = tail;
     554             : 
     555             :         /*
     556             :          * Insertion sort, ensuring the first entry in the list is always
     557             :          * the one we need first.
     558             :          */
     559           0 :         list_for_each_prev(entry, &ctx->timeout_list) {
     560           0 :                 struct io_timeout *nextt = list_entry(entry, struct io_timeout, list);
     561           0 :                 struct io_kiocb *nxt = cmd_to_io_kiocb(nextt);
     562             : 
     563           0 :                 if (io_is_timeout_noseq(nxt))
     564           0 :                         continue;
     565             :                 /* nxt.seq is behind @tail, otherwise would've been completed */
     566           0 :                 if (off >= nextt->target_seq - tail)
     567             :                         break;
     568             :         }
     569             : add:
     570           0 :         list_add(&timeout->list, entry);
     571           0 :         data->timer.function = io_timeout_fn;
     572           0 :         hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
     573           0 :         spin_unlock_irq(&ctx->timeout_lock);
     574           0 :         return IOU_ISSUE_SKIP_COMPLETE;
     575             : }
     576             : 
     577           0 : void io_queue_linked_timeout(struct io_kiocb *req)
     578             : {
     579           0 :         struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
     580           0 :         struct io_ring_ctx *ctx = req->ctx;
     581             : 
     582           0 :         spin_lock_irq(&ctx->timeout_lock);
     583             :         /*
     584             :          * If the back reference is NULL, then our linked request finished
     585             :          * before we got a chance to setup the timer
     586             :          */
     587           0 :         if (timeout->head) {
     588           0 :                 struct io_timeout_data *data = req->async_data;
     589             : 
     590           0 :                 data->timer.function = io_link_timeout_fn;
     591           0 :                 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
     592             :                                 data->mode);
     593           0 :                 list_add_tail(&timeout->list, &ctx->ltimeout_list);
     594             :         }
     595           0 :         spin_unlock_irq(&ctx->timeout_lock);
     596             :         /* drop submission reference */
     597           0 :         io_put_req(req);
     598           0 : }
     599             : 
     600             : static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
     601             :                           bool cancel_all)
     602             :         __must_hold(&req->ctx->timeout_lock)
     603             : {
     604             :         struct io_kiocb *req;
     605             : 
     606           0 :         if (task && head->task != task)
     607             :                 return false;
     608           0 :         if (cancel_all)
     609             :                 return true;
     610             : 
     611           0 :         io_for_each_link(req, head) {
     612           0 :                 if (req->flags & REQ_F_INFLIGHT)
     613             :                         return true;
     614             :         }
     615             :         return false;
     616             : }
     617             : 
     618             : /* Returns true if we found and killed one or more timeouts */
     619           0 : __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
     620             :                              bool cancel_all)
     621             : {
     622             :         struct io_timeout *timeout, *tmp;
     623           0 :         int canceled = 0;
     624             : 
     625             :         /*
     626             :          * completion_lock is needed for io_match_task(). Take it before
     627             :          * timeout_lockfirst to keep locking ordering.
     628             :          */
     629           0 :         spin_lock(&ctx->completion_lock);
     630           0 :         spin_lock_irq(&ctx->timeout_lock);
     631           0 :         list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
     632           0 :                 struct io_kiocb *req = cmd_to_io_kiocb(timeout);
     633             : 
     634           0 :                 if (io_match_task(req, tsk, cancel_all) &&
     635           0 :                     io_kill_timeout(req, -ECANCELED))
     636           0 :                         canceled++;
     637             :         }
     638           0 :         spin_unlock_irq(&ctx->timeout_lock);
     639           0 :         spin_unlock(&ctx->completion_lock);
     640           0 :         return canceled != 0;
     641             : }

Generated by: LCOV version 1.14