LCOV - code coverage report
Current view: top level - io_uring - timeout.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 326 0.0 %
Date: 2023-07-19 18:55:55 Functions: 0 24 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : #include <linux/kernel.h>
       3             : #include <linux/errno.h>
       4             : #include <linux/file.h>
       5             : #include <linux/io_uring.h>
       6             : 
       7             : #include <trace/events/io_uring.h>
       8             : 
       9             : #include <uapi/linux/io_uring.h>
      10             : 
      11             : #include "io_uring.h"
      12             : #include "refs.h"
      13             : #include "cancel.h"
      14             : #include "timeout.h"
      15             : 
      16             : struct io_timeout {
      17             :         struct file                     *file;
      18             :         u32                             off;
      19             :         u32                             target_seq;
      20             :         u32                             repeats;
      21             :         struct list_head                list;
      22             :         /* head of the link, used by linked timeouts only */
      23             :         struct io_kiocb                 *head;
      24             :         /* for linked completions */
      25             :         struct io_kiocb                 *prev;
      26             : };
      27             : 
      28             : struct io_timeout_rem {
      29             :         struct file                     *file;
      30             :         u64                             addr;
      31             : 
      32             :         /* timeout update */
      33             :         struct timespec64               ts;
      34             :         u32                             flags;
      35             :         bool                            ltimeout;
      36             : };
      37             : 
      38             : static inline bool io_is_timeout_noseq(struct io_kiocb *req)
      39             : {
      40           0 :         struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
      41           0 :         struct io_timeout_data *data = req->async_data;
      42             : 
      43           0 :         return !timeout->off || data->flags & IORING_TIMEOUT_MULTISHOT;
      44             : }
      45             : 
      46           0 : static inline void io_put_req(struct io_kiocb *req)
      47             : {
      48           0 :         if (req_ref_put_and_test(req)) {
      49           0 :                 io_queue_next(req);
      50           0 :                 io_free_req(req);
      51             :         }
      52           0 : }
      53             : 
      54             : static inline bool io_timeout_finish(struct io_timeout *timeout,
      55             :                                      struct io_timeout_data *data)
      56             : {
      57           0 :         if (!(data->flags & IORING_TIMEOUT_MULTISHOT))
      58             :                 return true;
      59             : 
      60           0 :         if (!timeout->off || (timeout->repeats && --timeout->repeats))
      61             :                 return false;
      62             : 
      63             :         return true;
      64             : }
      65             : 
      66             : static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer);
      67             : 
      68           0 : static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts)
      69             : {
      70           0 :         struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
      71           0 :         struct io_timeout_data *data = req->async_data;
      72           0 :         struct io_ring_ctx *ctx = req->ctx;
      73             : 
      74           0 :         if (!io_timeout_finish(timeout, data)) {
      75             :                 bool filled;
      76           0 :                 filled = io_aux_cqe(ctx, ts->locked, req->cqe.user_data, -ETIME,
      77             :                                     IORING_CQE_F_MORE, false);
      78           0 :                 if (filled) {
      79             :                         /* re-arm timer */
      80           0 :                         spin_lock_irq(&ctx->timeout_lock);
      81           0 :                         list_add(&timeout->list, ctx->timeout_list.prev);
      82           0 :                         data->timer.function = io_timeout_fn;
      83           0 :                         hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
      84           0 :                         spin_unlock_irq(&ctx->timeout_lock);
      85             :                         return;
      86             :                 }
      87             :         }
      88             : 
      89           0 :         io_req_task_complete(req, ts);
      90             : }
      91             : 
      92           0 : static bool io_kill_timeout(struct io_kiocb *req, int status)
      93             :         __must_hold(&req->ctx->timeout_lock)
      94             : {
      95           0 :         struct io_timeout_data *io = req->async_data;
      96             : 
      97           0 :         if (hrtimer_try_to_cancel(&io->timer) != -1) {
      98           0 :                 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
      99             : 
     100           0 :                 if (status)
     101           0 :                         req_set_fail(req);
     102           0 :                 atomic_set(&req->ctx->cq_timeouts,
     103           0 :                         atomic_read(&req->ctx->cq_timeouts) + 1);
     104           0 :                 list_del_init(&timeout->list);
     105           0 :                 io_req_queue_tw_complete(req, status);
     106           0 :                 return true;
     107             :         }
     108             :         return false;
     109             : }
     110             : 
     111           0 : __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
     112             : {
     113             :         u32 seq;
     114             :         struct io_timeout *timeout, *tmp;
     115             : 
     116           0 :         spin_lock_irq(&ctx->timeout_lock);
     117           0 :         seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
     118             : 
     119           0 :         list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
     120           0 :                 struct io_kiocb *req = cmd_to_io_kiocb(timeout);
     121             :                 u32 events_needed, events_got;
     122             : 
     123           0 :                 if (io_is_timeout_noseq(req))
     124             :                         break;
     125             : 
     126             :                 /*
     127             :                  * Since seq can easily wrap around over time, subtract
     128             :                  * the last seq at which timeouts were flushed before comparing.
     129             :                  * Assuming not more than 2^31-1 events have happened since,
     130             :                  * these subtractions won't have wrapped, so we can check if
     131             :                  * target is in [last_seq, current_seq] by comparing the two.
     132             :                  */
     133           0 :                 events_needed = timeout->target_seq - ctx->cq_last_tm_flush;
     134           0 :                 events_got = seq - ctx->cq_last_tm_flush;
     135           0 :                 if (events_got < events_needed)
     136             :                         break;
     137             : 
     138           0 :                 io_kill_timeout(req, 0);
     139             :         }
     140           0 :         ctx->cq_last_tm_flush = seq;
     141           0 :         spin_unlock_irq(&ctx->timeout_lock);
     142           0 : }
     143             : 
     144           0 : static void io_req_tw_fail_links(struct io_kiocb *link, struct io_tw_state *ts)
     145             : {
     146           0 :         io_tw_lock(link->ctx, ts);
     147           0 :         while (link) {
     148           0 :                 struct io_kiocb *nxt = link->link;
     149           0 :                 long res = -ECANCELED;
     150             : 
     151           0 :                 if (link->flags & REQ_F_FAIL)
     152           0 :                         res = link->cqe.res;
     153           0 :                 link->link = NULL;
     154           0 :                 io_req_set_res(link, res, 0);
     155           0 :                 io_req_task_complete(link, ts);
     156           0 :                 link = nxt;
     157             :         }
     158           0 : }
     159             : 
     160           0 : static void io_fail_links(struct io_kiocb *req)
     161             :         __must_hold(&req->ctx->completion_lock)
     162             : {
     163           0 :         struct io_kiocb *link = req->link;
     164           0 :         bool ignore_cqes = req->flags & REQ_F_SKIP_LINK_CQES;
     165             : 
     166           0 :         if (!link)
     167             :                 return;
     168             : 
     169           0 :         while (link) {
     170           0 :                 if (ignore_cqes)
     171           0 :                         link->flags |= REQ_F_CQE_SKIP;
     172             :                 else
     173           0 :                         link->flags &= ~REQ_F_CQE_SKIP;
     174           0 :                 trace_io_uring_fail_link(req, link);
     175           0 :                 link = link->link;
     176             :         }
     177             : 
     178           0 :         link = req->link;
     179           0 :         link->io_task_work.func = io_req_tw_fail_links;
     180           0 :         io_req_task_work_add(link);
     181           0 :         req->link = NULL;
     182             : }
     183             : 
     184             : static inline void io_remove_next_linked(struct io_kiocb *req)
     185             : {
     186           0 :         struct io_kiocb *nxt = req->link;
     187             : 
     188           0 :         req->link = nxt->link;
     189           0 :         nxt->link = NULL;
     190             : }
     191             : 
     192           0 : void io_disarm_next(struct io_kiocb *req)
     193             :         __must_hold(&req->ctx->completion_lock)
     194             : {
     195           0 :         struct io_kiocb *link = NULL;
     196             : 
     197           0 :         if (req->flags & REQ_F_ARM_LTIMEOUT) {
     198           0 :                 link = req->link;
     199           0 :                 req->flags &= ~REQ_F_ARM_LTIMEOUT;
     200           0 :                 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
     201           0 :                         io_remove_next_linked(req);
     202             :                         io_req_queue_tw_complete(link, -ECANCELED);
     203             :                 }
     204           0 :         } else if (req->flags & REQ_F_LINK_TIMEOUT) {
     205           0 :                 struct io_ring_ctx *ctx = req->ctx;
     206             : 
     207           0 :                 spin_lock_irq(&ctx->timeout_lock);
     208           0 :                 link = io_disarm_linked_timeout(req);
     209           0 :                 spin_unlock_irq(&ctx->timeout_lock);
     210           0 :                 if (link)
     211             :                         io_req_queue_tw_complete(link, -ECANCELED);
     212             :         }
     213           0 :         if (unlikely((req->flags & REQ_F_FAIL) &&
     214             :                      !(req->flags & REQ_F_HARDLINK)))
     215           0 :                 io_fail_links(req);
     216           0 : }
     217             : 
     218           0 : struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
     219             :                                             struct io_kiocb *link)
     220             :         __must_hold(&req->ctx->completion_lock)
     221             :         __must_hold(&req->ctx->timeout_lock)
     222             : {
     223           0 :         struct io_timeout_data *io = link->async_data;
     224           0 :         struct io_timeout *timeout = io_kiocb_to_cmd(link, struct io_timeout);
     225             : 
     226           0 :         io_remove_next_linked(req);
     227           0 :         timeout->head = NULL;
     228           0 :         if (hrtimer_try_to_cancel(&io->timer) != -1) {
     229           0 :                 list_del(&timeout->list);
     230           0 :                 return link;
     231             :         }
     232             : 
     233             :         return NULL;
     234             : }
     235             : 
     236           0 : static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
     237             : {
     238           0 :         struct io_timeout_data *data = container_of(timer,
     239             :                                                 struct io_timeout_data, timer);
     240           0 :         struct io_kiocb *req = data->req;
     241           0 :         struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
     242           0 :         struct io_ring_ctx *ctx = req->ctx;
     243             :         unsigned long flags;
     244             : 
     245           0 :         spin_lock_irqsave(&ctx->timeout_lock, flags);
     246           0 :         list_del_init(&timeout->list);
     247           0 :         atomic_set(&req->ctx->cq_timeouts,
     248           0 :                 atomic_read(&req->ctx->cq_timeouts) + 1);
     249           0 :         spin_unlock_irqrestore(&ctx->timeout_lock, flags);
     250             : 
     251           0 :         if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
     252           0 :                 req_set_fail(req);
     253             : 
     254           0 :         io_req_set_res(req, -ETIME, 0);
     255           0 :         req->io_task_work.func = io_timeout_complete;
     256           0 :         io_req_task_work_add(req);
     257           0 :         return HRTIMER_NORESTART;
     258             : }
     259             : 
     260           0 : static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
     261             :                                            struct io_cancel_data *cd)
     262             :         __must_hold(&ctx->timeout_lock)
     263             : {
     264             :         struct io_timeout *timeout;
     265             :         struct io_timeout_data *io;
     266           0 :         struct io_kiocb *req = NULL;
     267             : 
     268           0 :         list_for_each_entry(timeout, &ctx->timeout_list, list) {
     269           0 :                 struct io_kiocb *tmp = cmd_to_io_kiocb(timeout);
     270             : 
     271           0 :                 if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
     272           0 :                     cd->data != tmp->cqe.user_data)
     273           0 :                         continue;
     274           0 :                 if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) {
     275           0 :                         if (cd->seq == tmp->work.cancel_seq)
     276           0 :                                 continue;
     277           0 :                         tmp->work.cancel_seq = cd->seq;
     278             :                 }
     279             :                 req = tmp;
     280             :                 break;
     281             :         }
     282           0 :         if (!req)
     283             :                 return ERR_PTR(-ENOENT);
     284             : 
     285           0 :         io = req->async_data;
     286           0 :         if (hrtimer_try_to_cancel(&io->timer) == -1)
     287             :                 return ERR_PTR(-EALREADY);
     288           0 :         timeout = io_kiocb_to_cmd(req, struct io_timeout);
     289           0 :         list_del_init(&timeout->list);
     290           0 :         return req;
     291             : }
     292             : 
     293           0 : int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
     294             :         __must_hold(&ctx->completion_lock)
     295             : {
     296             :         struct io_kiocb *req;
     297             : 
     298           0 :         spin_lock_irq(&ctx->timeout_lock);
     299           0 :         req = io_timeout_extract(ctx, cd);
     300           0 :         spin_unlock_irq(&ctx->timeout_lock);
     301             : 
     302           0 :         if (IS_ERR(req))
     303           0 :                 return PTR_ERR(req);
     304           0 :         io_req_task_queue_fail(req, -ECANCELED);
     305           0 :         return 0;
     306             : }
     307             : 
     308           0 : static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *ts)
     309             : {
     310           0 :         unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
     311           0 :         struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
     312           0 :         struct io_kiocb *prev = timeout->prev;
     313           0 :         int ret = -ENOENT;
     314             : 
     315           0 :         if (prev) {
     316           0 :                 if (!(req->task->flags & PF_EXITING)) {
     317           0 :                         struct io_cancel_data cd = {
     318           0 :                                 .ctx            = req->ctx,
     319           0 :                                 .data           = prev->cqe.user_data,
     320             :                         };
     321             : 
     322           0 :                         ret = io_try_cancel(req->task->io_uring, &cd, issue_flags);
     323             :                 }
     324           0 :                 io_req_set_res(req, ret ?: -ETIME, 0);
     325           0 :                 io_req_task_complete(req, ts);
     326           0 :                 io_put_req(prev);
     327             :         } else {
     328           0 :                 io_req_set_res(req, -ETIME, 0);
     329           0 :                 io_req_task_complete(req, ts);
     330             :         }
     331           0 : }
     332             : 
     333           0 : static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
     334             : {
     335           0 :         struct io_timeout_data *data = container_of(timer,
     336             :                                                 struct io_timeout_data, timer);
     337           0 :         struct io_kiocb *prev, *req = data->req;
     338           0 :         struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
     339           0 :         struct io_ring_ctx *ctx = req->ctx;
     340             :         unsigned long flags;
     341             : 
     342           0 :         spin_lock_irqsave(&ctx->timeout_lock, flags);
     343           0 :         prev = timeout->head;
     344           0 :         timeout->head = NULL;
     345             : 
     346             :         /*
     347             :          * We don't expect the list to be empty, that will only happen if we
     348             :          * race with the completion of the linked work.
     349             :          */
     350           0 :         if (prev) {
     351           0 :                 io_remove_next_linked(prev);
     352           0 :                 if (!req_ref_inc_not_zero(prev))
     353           0 :                         prev = NULL;
     354             :         }
     355           0 :         list_del(&timeout->list);
     356           0 :         timeout->prev = prev;
     357           0 :         spin_unlock_irqrestore(&ctx->timeout_lock, flags);
     358             : 
     359           0 :         req->io_task_work.func = io_req_task_link_timeout;
     360           0 :         io_req_task_work_add(req);
     361           0 :         return HRTIMER_NORESTART;
     362             : }
     363             : 
     364           0 : static clockid_t io_timeout_get_clock(struct io_timeout_data *data)
     365             : {
     366           0 :         switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) {
     367             :         case IORING_TIMEOUT_BOOTTIME:
     368             :                 return CLOCK_BOOTTIME;
     369             :         case IORING_TIMEOUT_REALTIME:
     370             :                 return CLOCK_REALTIME;
     371             :         default:
     372             :                 /* can't happen, vetted at prep time */
     373           0 :                 WARN_ON_ONCE(1);
     374             :                 fallthrough;
     375             :         case 0:
     376             :                 return CLOCK_MONOTONIC;
     377             :         }
     378             : }
     379             : 
     380           0 : static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
     381             :                                     struct timespec64 *ts, enum hrtimer_mode mode)
     382             :         __must_hold(&ctx->timeout_lock)
     383             : {
     384             :         struct io_timeout_data *io;
     385             :         struct io_timeout *timeout;
     386           0 :         struct io_kiocb *req = NULL;
     387             : 
     388           0 :         list_for_each_entry(timeout, &ctx->ltimeout_list, list) {
     389           0 :                 struct io_kiocb *tmp = cmd_to_io_kiocb(timeout);
     390             : 
     391           0 :                 if (user_data == tmp->cqe.user_data) {
     392             :                         req = tmp;
     393             :                         break;
     394             :                 }
     395             :         }
     396           0 :         if (!req)
     397             :                 return -ENOENT;
     398             : 
     399           0 :         io = req->async_data;
     400           0 :         if (hrtimer_try_to_cancel(&io->timer) == -1)
     401             :                 return -EALREADY;
     402           0 :         hrtimer_init(&io->timer, io_timeout_get_clock(io), mode);
     403           0 :         io->timer.function = io_link_timeout_fn;
     404           0 :         hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode);
     405           0 :         return 0;
     406             : }
     407             : 
     408           0 : static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
     409             :                              struct timespec64 *ts, enum hrtimer_mode mode)
     410             :         __must_hold(&ctx->timeout_lock)
     411             : {
     412           0 :         struct io_cancel_data cd = { .data = user_data, };
     413           0 :         struct io_kiocb *req = io_timeout_extract(ctx, &cd);
     414           0 :         struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
     415             :         struct io_timeout_data *data;
     416             : 
     417           0 :         if (IS_ERR(req))
     418           0 :                 return PTR_ERR(req);
     419             : 
     420           0 :         timeout->off = 0; /* noseq */
     421           0 :         data = req->async_data;
     422           0 :         list_add_tail(&timeout->list, &ctx->timeout_list);
     423           0 :         hrtimer_init(&data->timer, io_timeout_get_clock(data), mode);
     424           0 :         data->timer.function = io_timeout_fn;
     425           0 :         hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
     426           0 :         return 0;
     427             : }
     428             : 
     429           0 : int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
     430             : {
     431           0 :         struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem);
     432             : 
     433           0 :         if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
     434             :                 return -EINVAL;
     435           0 :         if (sqe->buf_index || sqe->len || sqe->splice_fd_in)
     436             :                 return -EINVAL;
     437             : 
     438           0 :         tr->ltimeout = false;
     439           0 :         tr->addr = READ_ONCE(sqe->addr);
     440           0 :         tr->flags = READ_ONCE(sqe->timeout_flags);
     441           0 :         if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) {
     442           0 :                 if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
     443             :                         return -EINVAL;
     444           0 :                 if (tr->flags & IORING_LINK_TIMEOUT_UPDATE)
     445           0 :                         tr->ltimeout = true;
     446           0 :                 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS))
     447             :                         return -EINVAL;
     448           0 :                 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
     449             :                         return -EFAULT;
     450           0 :                 if (tr->ts.tv_sec < 0 || tr->ts.tv_nsec < 0)
     451             :                         return -EINVAL;
     452           0 :         } else if (tr->flags) {
     453             :                 /* timeout removal doesn't support flags */
     454             :                 return -EINVAL;
     455             :         }
     456             : 
     457             :         return 0;
     458             : }
     459             : 
     460             : static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
     461             : {
     462           0 :         return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
     463           0 :                                             : HRTIMER_MODE_REL;
     464             : }
     465             : 
     466             : /*
     467             :  * Remove or update an existing timeout command
     468             :  */
     469           0 : int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
     470             : {
     471           0 :         struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem);
     472           0 :         struct io_ring_ctx *ctx = req->ctx;
     473             :         int ret;
     474             : 
     475           0 :         if (!(tr->flags & IORING_TIMEOUT_UPDATE)) {
     476           0 :                 struct io_cancel_data cd = { .data = tr->addr, };
     477             : 
     478           0 :                 spin_lock(&ctx->completion_lock);
     479           0 :                 ret = io_timeout_cancel(ctx, &cd);
     480           0 :                 spin_unlock(&ctx->completion_lock);
     481             :         } else {
     482           0 :                 enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
     483             : 
     484           0 :                 spin_lock_irq(&ctx->timeout_lock);
     485           0 :                 if (tr->ltimeout)
     486           0 :                         ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
     487             :                 else
     488           0 :                         ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
     489           0 :                 spin_unlock_irq(&ctx->timeout_lock);
     490             :         }
     491             : 
     492           0 :         if (ret < 0)
     493           0 :                 req_set_fail(req);
     494           0 :         io_req_set_res(req, ret, 0);
     495           0 :         return IOU_OK;
     496             : }
     497             : 
     498           0 : static int __io_timeout_prep(struct io_kiocb *req,
     499             :                              const struct io_uring_sqe *sqe,
     500             :                              bool is_timeout_link)
     501             : {
     502           0 :         struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
     503             :         struct io_timeout_data *data;
     504             :         unsigned flags;
     505           0 :         u32 off = READ_ONCE(sqe->off);
     506             : 
     507           0 :         if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in)
     508             :                 return -EINVAL;
     509           0 :         if (off && is_timeout_link)
     510             :                 return -EINVAL;
     511           0 :         flags = READ_ONCE(sqe->timeout_flags);
     512           0 :         if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK |
     513             :                       IORING_TIMEOUT_ETIME_SUCCESS |
     514             :                       IORING_TIMEOUT_MULTISHOT))
     515             :                 return -EINVAL;
     516             :         /* more than one clock specified is invalid, obviously */
     517           0 :         if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
     518             :                 return -EINVAL;
     519             :         /* multishot requests only make sense with rel values */
     520           0 :         if (!(~flags & (IORING_TIMEOUT_MULTISHOT | IORING_TIMEOUT_ABS)))
     521             :                 return -EINVAL;
     522             : 
     523           0 :         INIT_LIST_HEAD(&timeout->list);
     524           0 :         timeout->off = off;
     525           0 :         if (unlikely(off && !req->ctx->off_timeout_used))
     526           0 :                 req->ctx->off_timeout_used = true;
     527             :         /*
     528             :          * for multishot reqs w/ fixed nr of repeats, repeats tracks the
     529             :          * remaining nr
     530             :          */
     531           0 :         timeout->repeats = 0;
     532           0 :         if ((flags & IORING_TIMEOUT_MULTISHOT) && off > 0)
     533           0 :                 timeout->repeats = off;
     534             : 
     535           0 :         if (WARN_ON_ONCE(req_has_async_data(req)))
     536             :                 return -EFAULT;
     537           0 :         if (io_alloc_async_data(req))
     538             :                 return -ENOMEM;
     539             : 
     540           0 :         data = req->async_data;
     541           0 :         data->req = req;
     542           0 :         data->flags = flags;
     543             : 
     544           0 :         if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
     545             :                 return -EFAULT;
     546             : 
     547           0 :         if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0)
     548             :                 return -EINVAL;
     549             : 
     550           0 :         INIT_LIST_HEAD(&timeout->list);
     551           0 :         data->mode = io_translate_timeout_mode(flags);
     552           0 :         hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
     553             : 
     554           0 :         if (is_timeout_link) {
     555           0 :                 struct io_submit_link *link = &req->ctx->submit_state.link;
     556             : 
     557           0 :                 if (!link->head)
     558             :                         return -EINVAL;
     559           0 :                 if (link->last->opcode == IORING_OP_LINK_TIMEOUT)
     560             :                         return -EINVAL;
     561           0 :                 timeout->head = link->last;
     562           0 :                 link->last->flags |= REQ_F_ARM_LTIMEOUT;
     563             :         }
     564             :         return 0;
     565             : }
     566             : 
     567           0 : int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
     568             : {
     569           0 :         return __io_timeout_prep(req, sqe, false);
     570             : }
     571             : 
     572           0 : int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
     573             : {
     574           0 :         return __io_timeout_prep(req, sqe, true);
     575             : }
     576             : 
     577           0 : int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
     578             : {
     579           0 :         struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
     580           0 :         struct io_ring_ctx *ctx = req->ctx;
     581           0 :         struct io_timeout_data *data = req->async_data;
     582             :         struct list_head *entry;
     583           0 :         u32 tail, off = timeout->off;
     584             : 
     585           0 :         spin_lock_irq(&ctx->timeout_lock);
     586             : 
     587             :         /*
     588             :          * sqe->off holds how many events that need to occur for this
     589             :          * timeout event to be satisfied. If it isn't set, then this is
     590             :          * a pure timeout request, sequence isn't used.
     591             :          */
     592           0 :         if (io_is_timeout_noseq(req)) {
     593           0 :                 entry = ctx->timeout_list.prev;
     594           0 :                 goto add;
     595             :         }
     596             : 
     597           0 :         tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
     598           0 :         timeout->target_seq = tail + off;
     599             : 
     600             :         /* Update the last seq here in case io_flush_timeouts() hasn't.
     601             :          * This is safe because ->completion_lock is held, and submissions
     602             :          * and completions are never mixed in the same ->completion_lock section.
     603             :          */
     604           0 :         ctx->cq_last_tm_flush = tail;
     605             : 
     606             :         /*
     607             :          * Insertion sort, ensuring the first entry in the list is always
     608             :          * the one we need first.
     609             :          */
     610           0 :         list_for_each_prev(entry, &ctx->timeout_list) {
     611           0 :                 struct io_timeout *nextt = list_entry(entry, struct io_timeout, list);
     612           0 :                 struct io_kiocb *nxt = cmd_to_io_kiocb(nextt);
     613             : 
     614           0 :                 if (io_is_timeout_noseq(nxt))
     615           0 :                         continue;
     616             :                 /* nxt.seq is behind @tail, otherwise would've been completed */
     617           0 :                 if (off >= nextt->target_seq - tail)
     618             :                         break;
     619             :         }
     620             : add:
     621           0 :         list_add(&timeout->list, entry);
     622           0 :         data->timer.function = io_timeout_fn;
     623           0 :         hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
     624           0 :         spin_unlock_irq(&ctx->timeout_lock);
     625           0 :         return IOU_ISSUE_SKIP_COMPLETE;
     626             : }
     627             : 
     628           0 : void io_queue_linked_timeout(struct io_kiocb *req)
     629             : {
     630           0 :         struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
     631           0 :         struct io_ring_ctx *ctx = req->ctx;
     632             : 
     633           0 :         spin_lock_irq(&ctx->timeout_lock);
     634             :         /*
     635             :          * If the back reference is NULL, then our linked request finished
     636             :          * before we got a chance to setup the timer
     637             :          */
     638           0 :         if (timeout->head) {
     639           0 :                 struct io_timeout_data *data = req->async_data;
     640             : 
     641           0 :                 data->timer.function = io_link_timeout_fn;
     642           0 :                 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
     643             :                                 data->mode);
     644           0 :                 list_add_tail(&timeout->list, &ctx->ltimeout_list);
     645             :         }
     646           0 :         spin_unlock_irq(&ctx->timeout_lock);
     647             :         /* drop submission reference */
     648           0 :         io_put_req(req);
     649           0 : }
     650             : 
     651             : static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
     652             :                           bool cancel_all)
     653             :         __must_hold(&req->ctx->timeout_lock)
     654             : {
     655             :         struct io_kiocb *req;
     656             : 
     657           0 :         if (task && head->task != task)
     658             :                 return false;
     659           0 :         if (cancel_all)
     660             :                 return true;
     661             : 
     662           0 :         io_for_each_link(req, head) {
     663           0 :                 if (req->flags & REQ_F_INFLIGHT)
     664             :                         return true;
     665             :         }
     666             :         return false;
     667             : }
     668             : 
     669             : /* Returns true if we found and killed one or more timeouts */
     670           0 : __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
     671             :                              bool cancel_all)
     672             : {
     673             :         struct io_timeout *timeout, *tmp;
     674           0 :         int canceled = 0;
     675             : 
     676             :         /*
     677             :          * completion_lock is needed for io_match_task(). Take it before
     678             :          * timeout_lockfirst to keep locking ordering.
     679             :          */
     680           0 :         spin_lock(&ctx->completion_lock);
     681           0 :         spin_lock_irq(&ctx->timeout_lock);
     682           0 :         list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
     683           0 :                 struct io_kiocb *req = cmd_to_io_kiocb(timeout);
     684             : 
     685           0 :                 if (io_match_task(req, tsk, cancel_all) &&
     686           0 :                     io_kill_timeout(req, -ECANCELED))
     687           0 :                         canceled++;
     688             :         }
     689           0 :         spin_unlock_irq(&ctx->timeout_lock);
     690           0 :         spin_unlock(&ctx->completion_lock);
     691           0 :         return canceled != 0;
     692             : }

Generated by: LCOV version 1.14