LCOV - code coverage report
Current view: top level - io_uring - cancel.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 131 0.0 %
Date: 2023-08-24 13:40:31 Functions: 0 9 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : #include <linux/kernel.h>
       3             : #include <linux/errno.h>
       4             : #include <linux/fs.h>
       5             : #include <linux/file.h>
       6             : #include <linux/mm.h>
       7             : #include <linux/slab.h>
       8             : #include <linux/namei.h>
       9             : #include <linux/nospec.h>
      10             : #include <linux/io_uring.h>
      11             : 
      12             : #include <uapi/linux/io_uring.h>
      13             : 
      14             : #include "io_uring.h"
      15             : #include "tctx.h"
      16             : #include "poll.h"
      17             : #include "timeout.h"
      18             : #include "cancel.h"
      19             : 
      20             : struct io_cancel {
      21             :         struct file                     *file;
      22             :         u64                             addr;
      23             :         u32                             flags;
      24             :         s32                             fd;
      25             : };
      26             : 
      27             : #define CANCEL_FLAGS    (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
      28             :                          IORING_ASYNC_CANCEL_ANY | IORING_ASYNC_CANCEL_FD_FIXED)
      29             : 
      30           0 : static bool io_cancel_cb(struct io_wq_work *work, void *data)
      31             : {
      32           0 :         struct io_kiocb *req = container_of(work, struct io_kiocb, work);
      33           0 :         struct io_cancel_data *cd = data;
      34             : 
      35           0 :         if (req->ctx != cd->ctx)
      36             :                 return false;
      37           0 :         if (cd->flags & IORING_ASYNC_CANCEL_ANY) {
      38             :                 ;
      39           0 :         } else if (cd->flags & IORING_ASYNC_CANCEL_FD) {
      40           0 :                 if (req->file != cd->file)
      41             :                         return false;
      42             :         } else {
      43           0 :                 if (req->cqe.user_data != cd->data)
      44             :                         return false;
      45             :         }
      46           0 :         if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) {
      47           0 :                 if (cd->seq == req->work.cancel_seq)
      48             :                         return false;
      49           0 :                 req->work.cancel_seq = cd->seq;
      50             :         }
      51             :         return true;
      52             : }
      53             : 
      54           0 : static int io_async_cancel_one(struct io_uring_task *tctx,
      55             :                                struct io_cancel_data *cd)
      56             : {
      57             :         enum io_wq_cancel cancel_ret;
      58           0 :         int ret = 0;
      59             :         bool all;
      60             : 
      61           0 :         if (!tctx || !tctx->io_wq)
      62             :                 return -ENOENT;
      63             : 
      64           0 :         all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
      65           0 :         cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
      66           0 :         switch (cancel_ret) {
      67             :         case IO_WQ_CANCEL_OK:
      68             :                 ret = 0;
      69             :                 break;
      70             :         case IO_WQ_CANCEL_RUNNING:
      71           0 :                 ret = -EALREADY;
      72           0 :                 break;
      73             :         case IO_WQ_CANCEL_NOTFOUND:
      74           0 :                 ret = -ENOENT;
      75           0 :                 break;
      76             :         }
      77             : 
      78             :         return ret;
      79             : }
      80             : 
      81           0 : int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
      82             :                   unsigned issue_flags)
      83             : {
      84           0 :         struct io_ring_ctx *ctx = cd->ctx;
      85             :         int ret;
      86             : 
      87           0 :         WARN_ON_ONCE(!io_wq_current_is_worker() && tctx != current->io_uring);
      88             : 
      89           0 :         ret = io_async_cancel_one(tctx, cd);
      90             :         /*
      91             :          * Fall-through even for -EALREADY, as we may have poll armed
      92             :          * that need unarming.
      93             :          */
      94           0 :         if (!ret)
      95             :                 return 0;
      96             : 
      97           0 :         ret = io_poll_cancel(ctx, cd, issue_flags);
      98           0 :         if (ret != -ENOENT)
      99             :                 return ret;
     100             : 
     101           0 :         spin_lock(&ctx->completion_lock);
     102           0 :         if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
     103           0 :                 ret = io_timeout_cancel(ctx, cd);
     104           0 :         spin_unlock(&ctx->completion_lock);
     105           0 :         return ret;
     106             : }
     107             : 
     108           0 : int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
     109             : {
     110           0 :         struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
     111             : 
     112           0 :         if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
     113             :                 return -EINVAL;
     114           0 :         if (sqe->off || sqe->len || sqe->splice_fd_in)
     115             :                 return -EINVAL;
     116             : 
     117           0 :         cancel->addr = READ_ONCE(sqe->addr);
     118           0 :         cancel->flags = READ_ONCE(sqe->cancel_flags);
     119           0 :         if (cancel->flags & ~CANCEL_FLAGS)
     120             :                 return -EINVAL;
     121           0 :         if (cancel->flags & IORING_ASYNC_CANCEL_FD) {
     122           0 :                 if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
     123             :                         return -EINVAL;
     124           0 :                 cancel->fd = READ_ONCE(sqe->fd);
     125             :         }
     126             : 
     127             :         return 0;
     128             : }
     129             : 
     130           0 : static int __io_async_cancel(struct io_cancel_data *cd,
     131             :                              struct io_uring_task *tctx,
     132             :                              unsigned int issue_flags)
     133             : {
     134           0 :         bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
     135           0 :         struct io_ring_ctx *ctx = cd->ctx;
     136             :         struct io_tctx_node *node;
     137           0 :         int ret, nr = 0;
     138             : 
     139             :         do {
     140           0 :                 ret = io_try_cancel(tctx, cd, issue_flags);
     141           0 :                 if (ret == -ENOENT)
     142             :                         break;
     143           0 :                 if (!all)
     144             :                         return ret;
     145           0 :                 nr++;
     146             :         } while (1);
     147             : 
     148             :         /* slow path, try all io-wq's */
     149           0 :         io_ring_submit_lock(ctx, issue_flags);
     150           0 :         ret = -ENOENT;
     151           0 :         list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
     152           0 :                 struct io_uring_task *tctx = node->task->io_uring;
     153             : 
     154           0 :                 ret = io_async_cancel_one(tctx, cd);
     155           0 :                 if (ret != -ENOENT) {
     156           0 :                         if (!all)
     157             :                                 break;
     158           0 :                         nr++;
     159             :                 }
     160             :         }
     161           0 :         io_ring_submit_unlock(ctx, issue_flags);
     162           0 :         return all ? nr : ret;
     163             : }
     164             : 
     165           0 : int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
     166             : {
     167           0 :         struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
     168           0 :         struct io_cancel_data cd = {
     169           0 :                 .ctx    = req->ctx,
     170           0 :                 .data   = cancel->addr,
     171           0 :                 .flags  = cancel->flags,
     172           0 :                 .seq    = atomic_inc_return(&req->ctx->cancel_seq),
     173             :         };
     174           0 :         struct io_uring_task *tctx = req->task->io_uring;
     175             :         int ret;
     176             : 
     177           0 :         if (cd.flags & IORING_ASYNC_CANCEL_FD) {
     178           0 :                 if (req->flags & REQ_F_FIXED_FILE ||
     179           0 :                     cd.flags & IORING_ASYNC_CANCEL_FD_FIXED) {
     180           0 :                         req->flags |= REQ_F_FIXED_FILE;
     181           0 :                         req->file = io_file_get_fixed(req, cancel->fd,
     182             :                                                         issue_flags);
     183             :                 } else {
     184           0 :                         req->file = io_file_get_normal(req, cancel->fd);
     185             :                 }
     186           0 :                 if (!req->file) {
     187             :                         ret = -EBADF;
     188             :                         goto done;
     189             :                 }
     190           0 :                 cd.file = req->file;
     191             :         }
     192             : 
     193           0 :         ret = __io_async_cancel(&cd, tctx, issue_flags);
     194             : done:
     195           0 :         if (ret < 0)
     196           0 :                 req_set_fail(req);
     197           0 :         io_req_set_res(req, ret, 0);
     198           0 :         return IOU_OK;
     199             : }
     200             : 
     201           0 : void init_hash_table(struct io_hash_table *table, unsigned size)
     202             : {
     203             :         unsigned int i;
     204             : 
     205           0 :         for (i = 0; i < size; i++) {
     206           0 :                 spin_lock_init(&table->hbs[i].lock);
     207           0 :                 INIT_HLIST_HEAD(&table->hbs[i].list);
     208             :         }
     209           0 : }
     210             : 
     211           0 : static int __io_sync_cancel(struct io_uring_task *tctx,
     212             :                             struct io_cancel_data *cd, int fd)
     213             : {
     214           0 :         struct io_ring_ctx *ctx = cd->ctx;
     215             : 
     216             :         /* fixed must be grabbed every time since we drop the uring_lock */
     217           0 :         if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
     218             :             (cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
     219           0 :                 if (unlikely(fd >= ctx->nr_user_files))
     220             :                         return -EBADF;
     221           0 :                 fd = array_index_nospec(fd, ctx->nr_user_files);
     222           0 :                 cd->file = io_file_from_index(&ctx->file_table, fd);
     223           0 :                 if (!cd->file)
     224             :                         return -EBADF;
     225             :         }
     226             : 
     227           0 :         return __io_async_cancel(cd, tctx, 0);
     228             : }
     229             : 
     230           0 : int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
     231             :         __must_hold(&ctx->uring_lock)
     232             : {
     233           0 :         struct io_cancel_data cd = {
     234             :                 .ctx    = ctx,
     235           0 :                 .seq    = atomic_inc_return(&ctx->cancel_seq),
     236             :         };
     237           0 :         ktime_t timeout = KTIME_MAX;
     238             :         struct io_uring_sync_cancel_reg sc;
     239           0 :         struct fd f = { };
     240           0 :         DEFINE_WAIT(wait);
     241             :         int ret;
     242             : 
     243           0 :         if (copy_from_user(&sc, arg, sizeof(sc)))
     244             :                 return -EFAULT;
     245           0 :         if (sc.flags & ~CANCEL_FLAGS)
     246             :                 return -EINVAL;
     247           0 :         if (sc.pad[0] || sc.pad[1] || sc.pad[2] || sc.pad[3])
     248             :                 return -EINVAL;
     249             : 
     250           0 :         cd.data = sc.addr;
     251           0 :         cd.flags = sc.flags;
     252             : 
     253             :         /* we can grab a normal file descriptor upfront */
     254           0 :         if ((cd.flags & IORING_ASYNC_CANCEL_FD) &&
     255             :            !(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
     256           0 :                 f = fdget(sc.fd);
     257           0 :                 if (!f.file)
     258             :                         return -EBADF;
     259           0 :                 cd.file = f.file;
     260             :         }
     261             : 
     262           0 :         ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
     263             : 
     264             :         /* found something, done! */
     265           0 :         if (ret != -EALREADY)
     266             :                 goto out;
     267             : 
     268           0 :         if (sc.timeout.tv_sec != -1UL || sc.timeout.tv_nsec != -1UL) {
     269           0 :                 struct timespec64 ts = {
     270             :                         .tv_sec         = sc.timeout.tv_sec,
     271           0 :                         .tv_nsec        = sc.timeout.tv_nsec
     272             :                 };
     273             : 
     274           0 :                 timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
     275             :         }
     276             : 
     277             :         /*
     278             :          * Keep looking until we get -ENOENT. we'll get woken everytime
     279             :          * every time a request completes and will retry the cancelation.
     280             :          */
     281             :         do {
     282           0 :                 cd.seq = atomic_inc_return(&ctx->cancel_seq);
     283             : 
     284           0 :                 prepare_to_wait(&ctx->cq_wait, &wait, TASK_INTERRUPTIBLE);
     285             : 
     286           0 :                 ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
     287             : 
     288           0 :                 mutex_unlock(&ctx->uring_lock);
     289           0 :                 if (ret != -EALREADY)
     290             :                         break;
     291             : 
     292           0 :                 ret = io_run_task_work_sig(ctx);
     293           0 :                 if (ret < 0)
     294             :                         break;
     295           0 :                 ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
     296           0 :                 if (!ret) {
     297             :                         ret = -ETIME;
     298             :                         break;
     299             :                 }
     300           0 :                 mutex_lock(&ctx->uring_lock);
     301             :         } while (1);
     302             : 
     303           0 :         finish_wait(&ctx->cq_wait, &wait);
     304           0 :         mutex_lock(&ctx->uring_lock);
     305             : 
     306           0 :         if (ret == -ENOENT || ret > 0)
     307           0 :                 ret = 0;
     308             : out:
     309           0 :         fdput(f);
     310             :         return ret;
     311             : }

Generated by: LCOV version 1.14