Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : #include <linux/kernel.h>
3 : #include <linux/errno.h>
4 : #include <linux/file.h>
5 : #include <linux/io_uring.h>
6 : #include <linux/security.h>
7 : #include <linux/nospec.h>
8 :
9 : #include <uapi/linux/io_uring.h>
10 :
11 : #include "io_uring.h"
12 : #include "rsrc.h"
13 : #include "uring_cmd.h"
14 :
15 0 : static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
16 : {
17 0 : struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
18 0 : unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
19 :
20 0 : ioucmd->task_work_cb(ioucmd, issue_flags);
21 0 : }
22 :
23 0 : void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
24 : void (*task_work_cb)(struct io_uring_cmd *, unsigned),
25 : unsigned flags)
26 : {
27 0 : struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
28 :
29 0 : ioucmd->task_work_cb = task_work_cb;
30 0 : req->io_task_work.func = io_uring_cmd_work;
31 0 : __io_req_task_work_add(req, flags);
32 0 : }
33 : EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task);
34 :
35 0 : void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
36 : void (*task_work_cb)(struct io_uring_cmd *, unsigned))
37 : {
38 0 : __io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE);
39 0 : }
40 : EXPORT_SYMBOL_GPL(io_uring_cmd_do_in_task_lazy);
41 :
42 : static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
43 : u64 extra1, u64 extra2)
44 : {
45 0 : req->extra1 = extra1;
46 0 : req->extra2 = extra2;
47 0 : req->flags |= REQ_F_CQE32_INIT;
48 : }
49 :
50 : /*
51 : * Called by consumers of io_uring_cmd, if they originally returned
52 : * -EIOCBQUEUED upon receiving the command.
53 : */
54 0 : void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
55 : unsigned issue_flags)
56 : {
57 0 : struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
58 :
59 0 : if (ret < 0)
60 0 : req_set_fail(req);
61 :
62 0 : io_req_set_res(req, ret, 0);
63 0 : if (req->ctx->flags & IORING_SETUP_CQE32)
64 0 : io_req_set_cqe32_extra(req, res2, 0);
65 0 : if (req->ctx->flags & IORING_SETUP_IOPOLL) {
66 : /* order with io_iopoll_req_issued() checking ->iopoll_complete */
67 0 : smp_store_release(&req->iopoll_completed, 1);
68 : } else {
69 0 : struct io_tw_state ts = {
70 0 : .locked = !(issue_flags & IO_URING_F_UNLOCKED),
71 : };
72 0 : io_req_task_complete(req, &ts);
73 : }
74 0 : }
75 : EXPORT_SYMBOL_GPL(io_uring_cmd_done);
76 :
77 0 : int io_uring_cmd_prep_async(struct io_kiocb *req)
78 : {
79 0 : struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
80 :
81 0 : memcpy(req->async_data, ioucmd->sqe, uring_sqe_size(req->ctx));
82 0 : ioucmd->sqe = req->async_data;
83 0 : return 0;
84 : }
85 :
86 0 : int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
87 : {
88 0 : struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
89 :
90 0 : if (sqe->__pad1)
91 : return -EINVAL;
92 :
93 0 : ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
94 0 : if (ioucmd->flags & ~IORING_URING_CMD_FIXED)
95 : return -EINVAL;
96 :
97 0 : if (ioucmd->flags & IORING_URING_CMD_FIXED) {
98 0 : struct io_ring_ctx *ctx = req->ctx;
99 : u16 index;
100 :
101 0 : req->buf_index = READ_ONCE(sqe->buf_index);
102 0 : if (unlikely(req->buf_index >= ctx->nr_user_bufs))
103 : return -EFAULT;
104 0 : index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
105 0 : req->imu = ctx->user_bufs[index];
106 0 : io_req_set_rsrc_node(req, ctx, 0);
107 : }
108 0 : ioucmd->sqe = sqe;
109 0 : ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
110 0 : return 0;
111 : }
112 :
113 0 : int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
114 : {
115 0 : struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
116 0 : struct io_ring_ctx *ctx = req->ctx;
117 0 : struct file *file = req->file;
118 : int ret;
119 :
120 0 : if (!file->f_op->uring_cmd)
121 : return -EOPNOTSUPP;
122 :
123 0 : ret = security_uring_cmd(ioucmd);
124 : if (ret)
125 : return ret;
126 :
127 0 : if (ctx->flags & IORING_SETUP_SQE128)
128 0 : issue_flags |= IO_URING_F_SQE128;
129 0 : if (ctx->flags & IORING_SETUP_CQE32)
130 0 : issue_flags |= IO_URING_F_CQE32;
131 0 : if (ctx->flags & IORING_SETUP_IOPOLL) {
132 0 : if (!file->f_op->uring_cmd_iopoll)
133 : return -EOPNOTSUPP;
134 0 : issue_flags |= IO_URING_F_IOPOLL;
135 0 : req->iopoll_completed = 0;
136 0 : WRITE_ONCE(ioucmd->cookie, NULL);
137 : }
138 :
139 0 : ret = file->f_op->uring_cmd(ioucmd, issue_flags);
140 0 : if (ret == -EAGAIN) {
141 0 : if (!req_has_async_data(req)) {
142 0 : if (io_alloc_async_data(req))
143 : return -ENOMEM;
144 0 : io_uring_cmd_prep_async(req);
145 : }
146 : return -EAGAIN;
147 : }
148 :
149 0 : if (ret != -EIOCBQUEUED) {
150 0 : if (ret < 0)
151 0 : req_set_fail(req);
152 0 : io_req_set_res(req, ret, 0);
153 0 : return ret;
154 : }
155 :
156 : return IOU_ISSUE_SKIP_COMPLETE;
157 : }
158 :
159 0 : int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
160 : struct iov_iter *iter, void *ioucmd)
161 : {
162 0 : struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
163 :
164 0 : return io_import_fixed(rw, iter, req->imu, ubuf, len);
165 : }
166 : EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
|