Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : #include <linux/kernel.h>
3 : #include <linux/errno.h>
4 : #include <linux/file.h>
5 : #include <linux/io_uring.h>
6 : #include <linux/security.h>
7 : #include <linux/nospec.h>
8 :
9 : #include <uapi/linux/io_uring.h>
10 :
11 : #include "io_uring.h"
12 : #include "rsrc.h"
13 : #include "uring_cmd.h"
14 :
15 0 : static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
16 : {
17 0 : struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
18 0 : unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
19 :
20 0 : ioucmd->task_work_cb(ioucmd, issue_flags);
21 0 : }
22 :
23 0 : void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
24 : void (*task_work_cb)(struct io_uring_cmd *, unsigned))
25 : {
26 0 : struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
27 :
28 0 : ioucmd->task_work_cb = task_work_cb;
29 0 : req->io_task_work.func = io_uring_cmd_work;
30 0 : io_req_task_work_add(req);
31 0 : }
32 : EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task);
33 :
34 : static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
35 : u64 extra1, u64 extra2)
36 : {
37 0 : req->extra1 = extra1;
38 0 : req->extra2 = extra2;
39 0 : req->flags |= REQ_F_CQE32_INIT;
40 : }
41 :
42 : /*
43 : * Called by consumers of io_uring_cmd, if they originally returned
44 : * -EIOCBQUEUED upon receiving the command.
45 : */
46 0 : void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
47 : unsigned issue_flags)
48 : {
49 0 : struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
50 :
51 0 : if (ret < 0)
52 0 : req_set_fail(req);
53 :
54 0 : io_req_set_res(req, ret, 0);
55 0 : if (req->ctx->flags & IORING_SETUP_CQE32)
56 0 : io_req_set_cqe32_extra(req, res2, 0);
57 0 : if (req->ctx->flags & IORING_SETUP_IOPOLL) {
58 : /* order with io_iopoll_req_issued() checking ->iopoll_complete */
59 0 : smp_store_release(&req->iopoll_completed, 1);
60 : } else {
61 0 : struct io_tw_state ts = {
62 0 : .locked = !(issue_flags & IO_URING_F_UNLOCKED),
63 : };
64 0 : io_req_task_complete(req, &ts);
65 : }
66 0 : }
67 : EXPORT_SYMBOL_GPL(io_uring_cmd_done);
68 :
69 0 : int io_uring_cmd_prep_async(struct io_kiocb *req)
70 : {
71 0 : struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
72 :
73 0 : memcpy(req->async_data, ioucmd->sqe, uring_sqe_size(req->ctx));
74 0 : ioucmd->sqe = req->async_data;
75 0 : return 0;
76 : }
77 :
78 0 : int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
79 : {
80 0 : struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
81 :
82 0 : if (sqe->__pad1)
83 : return -EINVAL;
84 :
85 0 : ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
86 0 : if (ioucmd->flags & ~IORING_URING_CMD_FIXED)
87 : return -EINVAL;
88 :
89 0 : if (ioucmd->flags & IORING_URING_CMD_FIXED) {
90 0 : struct io_ring_ctx *ctx = req->ctx;
91 : u16 index;
92 :
93 0 : req->buf_index = READ_ONCE(sqe->buf_index);
94 0 : if (unlikely(req->buf_index >= ctx->nr_user_bufs))
95 : return -EFAULT;
96 0 : index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
97 0 : req->imu = ctx->user_bufs[index];
98 0 : io_req_set_rsrc_node(req, ctx, 0);
99 : }
100 0 : ioucmd->sqe = sqe;
101 0 : ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
102 0 : return 0;
103 : }
104 :
105 0 : int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
106 : {
107 0 : struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
108 0 : struct io_ring_ctx *ctx = req->ctx;
109 0 : struct file *file = req->file;
110 : int ret;
111 :
112 0 : if (!file->f_op->uring_cmd)
113 : return -EOPNOTSUPP;
114 :
115 0 : ret = security_uring_cmd(ioucmd);
116 : if (ret)
117 : return ret;
118 :
119 0 : if (ctx->flags & IORING_SETUP_SQE128)
120 0 : issue_flags |= IO_URING_F_SQE128;
121 0 : if (ctx->flags & IORING_SETUP_CQE32)
122 0 : issue_flags |= IO_URING_F_CQE32;
123 0 : if (ctx->flags & IORING_SETUP_IOPOLL) {
124 0 : if (!file->f_op->uring_cmd_iopoll)
125 : return -EOPNOTSUPP;
126 0 : issue_flags |= IO_URING_F_IOPOLL;
127 0 : req->iopoll_completed = 0;
128 0 : WRITE_ONCE(ioucmd->cookie, NULL);
129 : }
130 :
131 0 : ret = file->f_op->uring_cmd(ioucmd, issue_flags);
132 0 : if (ret == -EAGAIN) {
133 0 : if (!req_has_async_data(req)) {
134 0 : if (io_alloc_async_data(req))
135 : return -ENOMEM;
136 0 : io_uring_cmd_prep_async(req);
137 : }
138 : return -EAGAIN;
139 : }
140 :
141 0 : if (ret != -EIOCBQUEUED) {
142 0 : if (ret < 0)
143 0 : req_set_fail(req);
144 0 : io_req_set_res(req, ret, 0);
145 0 : return ret;
146 : }
147 :
148 : return IOU_ISSUE_SKIP_COMPLETE;
149 : }
150 :
151 0 : int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
152 : struct iov_iter *iter, void *ioucmd)
153 : {
154 0 : struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
155 :
156 0 : return io_import_fixed(rw, iter, req->imu, ubuf, len);
157 : }
158 : EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
|