Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : #include <linux/kernel.h>
3 : #include <linux/errno.h>
4 : #include <linux/fs.h>
5 : #include <linux/file.h>
6 : #include <linux/blk-mq.h>
7 : #include <linux/mm.h>
8 : #include <linux/slab.h>
9 : #include <linux/fsnotify.h>
10 : #include <linux/poll.h>
11 : #include <linux/nospec.h>
12 : #include <linux/compat.h>
13 : #include <linux/io_uring.h>
14 :
15 : #include <uapi/linux/io_uring.h>
16 :
17 : #include "io_uring.h"
18 : #include "opdef.h"
19 : #include "kbuf.h"
20 : #include "rsrc.h"
21 : #include "rw.h"
22 :
23 : struct io_rw {
24 : /* NOTE: kiocb has the file as the first member, so don't do it here */
25 : struct kiocb kiocb;
26 : u64 addr;
27 : u32 len;
28 : rwf_t flags;
29 : };
30 :
31 : static inline bool io_file_supports_nowait(struct io_kiocb *req)
32 : {
33 0 : return req->flags & REQ_F_SUPPORT_NOWAIT;
34 : }
35 :
36 : #ifdef CONFIG_COMPAT
37 : static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
38 : {
39 : struct compat_iovec __user *uiov;
40 : compat_ssize_t clen;
41 :
42 : uiov = u64_to_user_ptr(rw->addr);
43 : if (!access_ok(uiov, sizeof(*uiov)))
44 : return -EFAULT;
45 : if (__get_user(clen, &uiov->iov_len))
46 : return -EFAULT;
47 : if (clen < 0)
48 : return -EINVAL;
49 :
50 : rw->len = clen;
51 : return 0;
52 : }
53 : #endif
54 :
55 0 : static int io_iov_buffer_select_prep(struct io_kiocb *req)
56 : {
57 : struct iovec __user *uiov;
58 : struct iovec iov;
59 0 : struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
60 :
61 0 : if (rw->len != 1)
62 : return -EINVAL;
63 :
64 : #ifdef CONFIG_COMPAT
65 : if (req->ctx->compat)
66 : return io_iov_compat_buffer_select_prep(rw);
67 : #endif
68 :
69 0 : uiov = u64_to_user_ptr(rw->addr);
70 0 : if (copy_from_user(&iov, uiov, sizeof(*uiov)))
71 : return -EFAULT;
72 0 : rw->len = iov.iov_len;
73 0 : return 0;
74 : }
75 :
76 0 : int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
77 : {
78 0 : struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
79 : unsigned ioprio;
80 : int ret;
81 :
82 0 : rw->kiocb.ki_pos = READ_ONCE(sqe->off);
83 : /* used for fixed read/write too - just read unconditionally */
84 0 : req->buf_index = READ_ONCE(sqe->buf_index);
85 :
86 0 : if (req->opcode == IORING_OP_READ_FIXED ||
87 : req->opcode == IORING_OP_WRITE_FIXED) {
88 0 : struct io_ring_ctx *ctx = req->ctx;
89 : u16 index;
90 :
91 0 : if (unlikely(req->buf_index >= ctx->nr_user_bufs))
92 : return -EFAULT;
93 0 : index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
94 0 : req->imu = ctx->user_bufs[index];
95 0 : io_req_set_rsrc_node(req, ctx, 0);
96 : }
97 :
98 0 : ioprio = READ_ONCE(sqe->ioprio);
99 0 : if (ioprio) {
100 0 : ret = ioprio_check_cap(ioprio);
101 0 : if (ret)
102 : return ret;
103 :
104 0 : rw->kiocb.ki_ioprio = ioprio;
105 : } else {
106 0 : rw->kiocb.ki_ioprio = get_current_ioprio();
107 : }
108 :
109 0 : rw->addr = READ_ONCE(sqe->addr);
110 0 : rw->len = READ_ONCE(sqe->len);
111 0 : rw->flags = READ_ONCE(sqe->rw_flags);
112 :
113 : /* Have to do this validation here, as this is in io_read() rw->len might
114 : * have chanaged due to buffer selection
115 : */
116 0 : if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) {
117 0 : ret = io_iov_buffer_select_prep(req);
118 0 : if (ret)
119 : return ret;
120 : }
121 :
122 : return 0;
123 : }
124 :
125 0 : void io_readv_writev_cleanup(struct io_kiocb *req)
126 : {
127 0 : struct io_async_rw *io = req->async_data;
128 :
129 0 : kfree(io->free_iovec);
130 0 : }
131 :
132 : static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
133 : {
134 0 : switch (ret) {
135 : case -EIOCBQUEUED:
136 : break;
137 : case -ERESTARTSYS:
138 : case -ERESTARTNOINTR:
139 : case -ERESTARTNOHAND:
140 : case -ERESTART_RESTARTBLOCK:
141 : /*
142 : * We can't just restart the syscall, since previously
143 : * submitted sqes may already be in progress. Just fail this
144 : * IO with EINTR.
145 : */
146 0 : ret = -EINTR;
147 : fallthrough;
148 : default:
149 0 : kiocb->ki_complete(kiocb, ret);
150 : }
151 : }
152 :
153 : static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
154 : {
155 0 : struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
156 :
157 0 : if (rw->kiocb.ki_pos != -1)
158 0 : return &rw->kiocb.ki_pos;
159 :
160 0 : if (!(req->file->f_mode & FMODE_STREAM)) {
161 0 : req->flags |= REQ_F_CUR_POS;
162 0 : rw->kiocb.ki_pos = req->file->f_pos;
163 0 : return &rw->kiocb.ki_pos;
164 : }
165 :
166 0 : rw->kiocb.ki_pos = 0;
167 : return NULL;
168 : }
169 :
170 : static void io_req_task_queue_reissue(struct io_kiocb *req)
171 : {
172 0 : req->io_task_work.func = io_queue_iowq;
173 0 : io_req_task_work_add(req);
174 : }
175 :
176 : #ifdef CONFIG_BLOCK
177 0 : static bool io_resubmit_prep(struct io_kiocb *req)
178 : {
179 0 : struct io_async_rw *io = req->async_data;
180 :
181 0 : if (!req_has_async_data(req))
182 0 : return !io_req_prep_async(req);
183 0 : iov_iter_restore(&io->s.iter, &io->s.iter_state);
184 0 : return true;
185 : }
186 :
187 0 : static bool io_rw_should_reissue(struct io_kiocb *req)
188 : {
189 0 : umode_t mode = file_inode(req->file)->i_mode;
190 0 : struct io_ring_ctx *ctx = req->ctx;
191 :
192 0 : if (!S_ISBLK(mode) && !S_ISREG(mode))
193 : return false;
194 0 : if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
195 0 : !(ctx->flags & IORING_SETUP_IOPOLL)))
196 : return false;
197 : /*
198 : * If ref is dying, we might be running poll reap from the exit work.
199 : * Don't attempt to reissue from that path, just let it fail with
200 : * -EAGAIN.
201 : */
202 0 : if (percpu_ref_is_dying(&ctx->refs))
203 : return false;
204 : /*
205 : * Play it safe and assume not safe to re-import and reissue if we're
206 : * not in the original thread group (or in task context).
207 : */
208 0 : if (!same_thread_group(req->task, current) || !in_task())
209 : return false;
210 : return true;
211 : }
212 : #else
213 : static bool io_resubmit_prep(struct io_kiocb *req)
214 : {
215 : return false;
216 : }
217 : static bool io_rw_should_reissue(struct io_kiocb *req)
218 : {
219 : return false;
220 : }
221 : #endif
222 :
223 : static void kiocb_end_write(struct io_kiocb *req)
224 : {
225 : /*
226 : * Tell lockdep we inherited freeze protection from submission
227 : * thread.
228 : */
229 0 : if (req->flags & REQ_F_ISREG) {
230 0 : struct super_block *sb = file_inode(req->file)->i_sb;
231 :
232 0 : __sb_writers_acquired(sb, SB_FREEZE_WRITE);
233 : sb_end_write(sb);
234 : }
235 : }
236 :
237 : /*
238 : * Trigger the notifications after having done some IO, and finish the write
239 : * accounting, if any.
240 : */
241 0 : static void io_req_io_end(struct io_kiocb *req)
242 : {
243 0 : struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
244 :
245 0 : if (rw->kiocb.ki_flags & IOCB_WRITE) {
246 0 : kiocb_end_write(req);
247 0 : fsnotify_modify(req->file);
248 : } else {
249 0 : fsnotify_access(req->file);
250 : }
251 0 : }
252 :
253 0 : static bool __io_complete_rw_common(struct io_kiocb *req, long res)
254 : {
255 0 : if (unlikely(res != req->cqe.res)) {
256 0 : if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
257 0 : io_rw_should_reissue(req)) {
258 : /*
259 : * Reissue will start accounting again, finish the
260 : * current cycle.
261 : */
262 0 : io_req_io_end(req);
263 0 : req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
264 0 : return true;
265 : }
266 0 : req_set_fail(req);
267 0 : req->cqe.res = res;
268 : }
269 : return false;
270 : }
271 :
272 : static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
273 : {
274 0 : struct io_async_rw *io = req->async_data;
275 :
276 : /* add previously done IO, if any */
277 0 : if (req_has_async_data(req) && io->bytes_done > 0) {
278 0 : if (res < 0)
279 0 : res = io->bytes_done;
280 : else
281 0 : res += io->bytes_done;
282 : }
283 0 : return res;
284 : }
285 :
286 0 : static void io_req_rw_complete(struct io_kiocb *req, bool *locked)
287 : {
288 0 : io_req_io_end(req);
289 :
290 0 : if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
291 0 : unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
292 :
293 0 : req->cqe.flags |= io_put_kbuf(req, issue_flags);
294 : }
295 0 : io_req_task_complete(req, locked);
296 0 : }
297 :
298 0 : static void io_complete_rw(struct kiocb *kiocb, long res)
299 : {
300 0 : struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
301 0 : struct io_kiocb *req = cmd_to_io_kiocb(rw);
302 :
303 0 : if (__io_complete_rw_common(req, res))
304 : return;
305 0 : io_req_set_res(req, io_fixup_rw_res(req, res), 0);
306 0 : req->io_task_work.func = io_req_rw_complete;
307 : io_req_task_work_add(req);
308 : }
309 :
310 0 : static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
311 : {
312 0 : struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
313 0 : struct io_kiocb *req = cmd_to_io_kiocb(rw);
314 :
315 0 : if (kiocb->ki_flags & IOCB_WRITE)
316 0 : kiocb_end_write(req);
317 0 : if (unlikely(res != req->cqe.res)) {
318 0 : if (res == -EAGAIN && io_rw_should_reissue(req)) {
319 0 : req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
320 0 : return;
321 : }
322 0 : req->cqe.res = res;
323 : }
324 :
325 : /* order with io_iopoll_complete() checking ->iopoll_completed */
326 0 : smp_store_release(&req->iopoll_completed, 1);
327 : }
328 :
329 0 : static int kiocb_done(struct io_kiocb *req, ssize_t ret,
330 : unsigned int issue_flags)
331 : {
332 0 : struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
333 0 : unsigned final_ret = io_fixup_rw_res(req, ret);
334 :
335 0 : if (req->flags & REQ_F_CUR_POS)
336 0 : req->file->f_pos = rw->kiocb.ki_pos;
337 0 : if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
338 0 : if (!__io_complete_rw_common(req, ret)) {
339 : /*
340 : * Safe to call io_end from here as we're inline
341 : * from the submission path.
342 : */
343 0 : io_req_io_end(req);
344 0 : io_req_set_res(req, final_ret,
345 : io_put_kbuf(req, issue_flags));
346 0 : return IOU_OK;
347 : }
348 : } else {
349 0 : io_rw_done(&rw->kiocb, ret);
350 : }
351 :
352 0 : if (req->flags & REQ_F_REISSUE) {
353 0 : req->flags &= ~REQ_F_REISSUE;
354 0 : if (io_resubmit_prep(req))
355 : io_req_task_queue_reissue(req);
356 : else
357 0 : io_req_task_queue_fail(req, final_ret);
358 : }
359 : return IOU_ISSUE_SKIP_COMPLETE;
360 : }
361 :
362 0 : static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
363 : struct io_rw_state *s,
364 : unsigned int issue_flags)
365 : {
366 0 : struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
367 0 : struct iov_iter *iter = &s->iter;
368 0 : u8 opcode = req->opcode;
369 : struct iovec *iovec;
370 : void __user *buf;
371 : size_t sqe_len;
372 : ssize_t ret;
373 :
374 0 : if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
375 0 : ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len);
376 0 : if (ret)
377 0 : return ERR_PTR(ret);
378 : return NULL;
379 : }
380 :
381 0 : buf = u64_to_user_ptr(rw->addr);
382 0 : sqe_len = rw->len;
383 :
384 0 : if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE ||
385 0 : (req->flags & REQ_F_BUFFER_SELECT)) {
386 0 : if (io_do_buffer_select(req)) {
387 0 : buf = io_buffer_select(req, &sqe_len, issue_flags);
388 0 : if (!buf)
389 : return ERR_PTR(-ENOBUFS);
390 0 : rw->addr = (unsigned long) buf;
391 0 : rw->len = sqe_len;
392 : }
393 :
394 0 : ret = import_ubuf(ddir, buf, sqe_len, iter);
395 0 : if (ret)
396 0 : return ERR_PTR(ret);
397 : return NULL;
398 : }
399 :
400 0 : iovec = s->fast_iov;
401 0 : ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
402 0 : req->ctx->compat);
403 0 : if (unlikely(ret < 0))
404 0 : return ERR_PTR(ret);
405 0 : return iovec;
406 : }
407 :
408 : static inline int io_import_iovec(int rw, struct io_kiocb *req,
409 : struct iovec **iovec, struct io_rw_state *s,
410 : unsigned int issue_flags)
411 : {
412 0 : *iovec = __io_import_iovec(rw, req, s, issue_flags);
413 0 : if (IS_ERR(*iovec))
414 0 : return PTR_ERR(*iovec);
415 :
416 0 : iov_iter_save_state(&s->iter, &s->iter_state);
417 : return 0;
418 : }
419 :
420 : static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
421 : {
422 0 : return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
423 : }
424 :
425 : /*
426 : * For files that don't have ->read_iter() and ->write_iter(), handle them
427 : * by looping over ->read() or ->write() manually.
428 : */
429 0 : static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
430 : {
431 0 : struct kiocb *kiocb = &rw->kiocb;
432 0 : struct file *file = kiocb->ki_filp;
433 0 : ssize_t ret = 0;
434 : loff_t *ppos;
435 :
436 : /*
437 : * Don't support polled IO through this interface, and we can't
438 : * support non-blocking either. For the latter, this just causes
439 : * the kiocb to be handled from an async context.
440 : */
441 0 : if (kiocb->ki_flags & IOCB_HIPRI)
442 : return -EOPNOTSUPP;
443 0 : if ((kiocb->ki_flags & IOCB_NOWAIT) &&
444 0 : !(kiocb->ki_filp->f_flags & O_NONBLOCK))
445 : return -EAGAIN;
446 :
447 0 : ppos = io_kiocb_ppos(kiocb);
448 :
449 0 : while (iov_iter_count(iter)) {
450 : struct iovec iovec;
451 : ssize_t nr;
452 :
453 0 : if (iter_is_ubuf(iter)) {
454 0 : iovec.iov_base = iter->ubuf + iter->iov_offset;
455 0 : iovec.iov_len = iov_iter_count(iter);
456 0 : } else if (!iov_iter_is_bvec(iter)) {
457 : iovec = iov_iter_iovec(iter);
458 : } else {
459 0 : iovec.iov_base = u64_to_user_ptr(rw->addr);
460 0 : iovec.iov_len = rw->len;
461 : }
462 :
463 0 : if (ddir == READ) {
464 0 : nr = file->f_op->read(file, iovec.iov_base,
465 : iovec.iov_len, ppos);
466 : } else {
467 0 : nr = file->f_op->write(file, iovec.iov_base,
468 : iovec.iov_len, ppos);
469 : }
470 :
471 0 : if (nr < 0) {
472 0 : if (!ret)
473 0 : ret = nr;
474 : break;
475 : }
476 0 : ret += nr;
477 0 : if (!iov_iter_is_bvec(iter)) {
478 0 : iov_iter_advance(iter, nr);
479 : } else {
480 0 : rw->addr += nr;
481 0 : rw->len -= nr;
482 0 : if (!rw->len)
483 : break;
484 : }
485 0 : if (nr != iovec.iov_len)
486 : break;
487 : }
488 :
489 : return ret;
490 : }
491 :
492 0 : static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
493 : const struct iovec *fast_iov, struct iov_iter *iter)
494 : {
495 0 : struct io_async_rw *io = req->async_data;
496 :
497 0 : memcpy(&io->s.iter, iter, sizeof(*iter));
498 0 : io->free_iovec = iovec;
499 0 : io->bytes_done = 0;
500 : /* can only be fixed buffers, no need to do anything */
501 0 : if (iov_iter_is_bvec(iter) || iter_is_ubuf(iter))
502 : return;
503 0 : if (!iovec) {
504 0 : unsigned iov_off = 0;
505 :
506 0 : io->s.iter.iov = io->s.fast_iov;
507 0 : if (iter->iov != fast_iov) {
508 0 : iov_off = iter->iov - fast_iov;
509 0 : io->s.iter.iov += iov_off;
510 : }
511 0 : if (io->s.fast_iov != fast_iov)
512 0 : memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off,
513 0 : sizeof(struct iovec) * iter->nr_segs);
514 : } else {
515 0 : req->flags |= REQ_F_NEED_CLEANUP;
516 : }
517 : }
518 :
519 0 : static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
520 : struct io_rw_state *s, bool force)
521 : {
522 0 : if (!force && !io_cold_defs[req->opcode].prep_async)
523 : return 0;
524 0 : if (!req_has_async_data(req)) {
525 : struct io_async_rw *iorw;
526 :
527 0 : if (io_alloc_async_data(req)) {
528 0 : kfree(iovec);
529 0 : return -ENOMEM;
530 : }
531 :
532 0 : io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
533 0 : iorw = req->async_data;
534 : /* we've copied and mapped the iter, ensure state is saved */
535 0 : iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
536 : }
537 : return 0;
538 : }
539 :
540 0 : static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
541 : {
542 0 : struct io_async_rw *iorw = req->async_data;
543 : struct iovec *iov;
544 : int ret;
545 :
546 : /* submission path, ->uring_lock should already be taken */
547 0 : ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
548 0 : if (unlikely(ret < 0))
549 : return ret;
550 :
551 0 : iorw->bytes_done = 0;
552 0 : iorw->free_iovec = iov;
553 0 : if (iov)
554 0 : req->flags |= REQ_F_NEED_CLEANUP;
555 : return 0;
556 : }
557 :
558 0 : int io_readv_prep_async(struct io_kiocb *req)
559 : {
560 0 : return io_rw_prep_async(req, ITER_DEST);
561 : }
562 :
563 0 : int io_writev_prep_async(struct io_kiocb *req)
564 : {
565 0 : return io_rw_prep_async(req, ITER_SOURCE);
566 : }
567 :
568 : /*
569 : * This is our waitqueue callback handler, registered through __folio_lock_async()
570 : * when we initially tried to do the IO with the iocb armed our waitqueue.
571 : * This gets called when the page is unlocked, and we generally expect that to
572 : * happen when the page IO is completed and the page is now uptodate. This will
573 : * queue a task_work based retry of the operation, attempting to copy the data
574 : * again. If the latter fails because the page was NOT uptodate, then we will
575 : * do a thread based blocking retry of the operation. That's the unexpected
576 : * slow path.
577 : */
578 0 : static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
579 : int sync, void *arg)
580 : {
581 : struct wait_page_queue *wpq;
582 0 : struct io_kiocb *req = wait->private;
583 0 : struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
584 0 : struct wait_page_key *key = arg;
585 :
586 0 : wpq = container_of(wait, struct wait_page_queue, wait);
587 :
588 0 : if (!wake_page_match(wpq, key))
589 : return 0;
590 :
591 0 : rw->kiocb.ki_flags &= ~IOCB_WAITQ;
592 0 : list_del_init(&wait->entry);
593 0 : io_req_task_queue(req);
594 0 : return 1;
595 : }
596 :
597 : /*
598 : * This controls whether a given IO request should be armed for async page
599 : * based retry. If we return false here, the request is handed to the async
600 : * worker threads for retry. If we're doing buffered reads on a regular file,
601 : * we prepare a private wait_page_queue entry and retry the operation. This
602 : * will either succeed because the page is now uptodate and unlocked, or it
603 : * will register a callback when the page is unlocked at IO completion. Through
604 : * that callback, io_uring uses task_work to setup a retry of the operation.
605 : * That retry will attempt the buffered read again. The retry will generally
606 : * succeed, or in rare cases where it fails, we then fall back to using the
607 : * async worker threads for a blocking retry.
608 : */
609 0 : static bool io_rw_should_retry(struct io_kiocb *req)
610 : {
611 0 : struct io_async_rw *io = req->async_data;
612 0 : struct wait_page_queue *wait = &io->wpq;
613 0 : struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
614 0 : struct kiocb *kiocb = &rw->kiocb;
615 :
616 : /* never retry for NOWAIT, we just complete with -EAGAIN */
617 0 : if (req->flags & REQ_F_NOWAIT)
618 : return false;
619 :
620 : /* Only for buffered IO */
621 0 : if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
622 : return false;
623 :
624 : /*
625 : * just use poll if we can, and don't attempt if the fs doesn't
626 : * support callback based unlocks
627 : */
628 0 : if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
629 : return false;
630 :
631 0 : wait->wait.func = io_async_buf_func;
632 0 : wait->wait.private = req;
633 0 : wait->wait.flags = 0;
634 0 : INIT_LIST_HEAD(&wait->wait.entry);
635 0 : kiocb->ki_flags |= IOCB_WAITQ;
636 0 : kiocb->ki_flags &= ~IOCB_NOWAIT;
637 0 : kiocb->ki_waitq = wait;
638 0 : return true;
639 : }
640 :
641 0 : static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
642 : {
643 0 : struct file *file = rw->kiocb.ki_filp;
644 :
645 0 : if (likely(file->f_op->read_iter))
646 0 : return call_read_iter(file, &rw->kiocb, iter);
647 0 : else if (file->f_op->read)
648 0 : return loop_rw_iter(READ, rw, iter);
649 : else
650 : return -EINVAL;
651 : }
652 :
653 : static bool need_complete_io(struct io_kiocb *req)
654 : {
655 0 : return req->flags & REQ_F_ISREG ||
656 0 : S_ISBLK(file_inode(req->file)->i_mode);
657 : }
658 :
659 0 : static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
660 : {
661 0 : struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
662 0 : struct kiocb *kiocb = &rw->kiocb;
663 0 : struct io_ring_ctx *ctx = req->ctx;
664 0 : struct file *file = req->file;
665 : int ret;
666 :
667 0 : if (unlikely(!file || !(file->f_mode & mode)))
668 : return -EBADF;
669 :
670 0 : if (!io_req_ffs_set(req))
671 0 : req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
672 :
673 0 : kiocb->ki_flags = file->f_iocb_flags;
674 0 : ret = kiocb_set_rw_flags(kiocb, rw->flags);
675 0 : if (unlikely(ret))
676 : return ret;
677 0 : kiocb->ki_flags |= IOCB_ALLOC_CACHE;
678 :
679 : /*
680 : * If the file is marked O_NONBLOCK, still allow retry for it if it
681 : * supports async. Otherwise it's impossible to use O_NONBLOCK files
682 : * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
683 : */
684 0 : if ((kiocb->ki_flags & IOCB_NOWAIT) ||
685 0 : ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
686 0 : req->flags |= REQ_F_NOWAIT;
687 :
688 0 : if (ctx->flags & IORING_SETUP_IOPOLL) {
689 0 : if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
690 : return -EOPNOTSUPP;
691 :
692 0 : kiocb->private = NULL;
693 0 : kiocb->ki_flags |= IOCB_HIPRI;
694 0 : kiocb->ki_complete = io_complete_rw_iopoll;
695 0 : req->iopoll_completed = 0;
696 : } else {
697 0 : if (kiocb->ki_flags & IOCB_HIPRI)
698 : return -EINVAL;
699 0 : kiocb->ki_complete = io_complete_rw;
700 : }
701 :
702 : return 0;
703 : }
704 :
705 0 : int io_read(struct io_kiocb *req, unsigned int issue_flags)
706 : {
707 0 : struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
708 0 : struct io_rw_state __s, *s = &__s;
709 : struct iovec *iovec;
710 0 : struct kiocb *kiocb = &rw->kiocb;
711 0 : bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
712 : struct io_async_rw *io;
713 : ssize_t ret, ret2;
714 : loff_t *ppos;
715 :
716 0 : if (!req_has_async_data(req)) {
717 0 : ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
718 0 : if (unlikely(ret < 0))
719 : return ret;
720 : } else {
721 0 : io = req->async_data;
722 0 : s = &io->s;
723 :
724 : /*
725 : * Safe and required to re-import if we're using provided
726 : * buffers, as we dropped the selected one before retry.
727 : */
728 0 : if (io_do_buffer_select(req)) {
729 0 : ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
730 0 : if (unlikely(ret < 0))
731 : return ret;
732 : }
733 :
734 : /*
735 : * We come here from an earlier attempt, restore our state to
736 : * match in case it doesn't. It's cheap enough that we don't
737 : * need to make this conditional.
738 : */
739 0 : iov_iter_restore(&s->iter, &s->iter_state);
740 0 : iovec = NULL;
741 : }
742 0 : ret = io_rw_init_file(req, FMODE_READ);
743 0 : if (unlikely(ret)) {
744 0 : kfree(iovec);
745 0 : return ret;
746 : }
747 0 : req->cqe.res = iov_iter_count(&s->iter);
748 :
749 0 : if (force_nonblock) {
750 : /* If the file doesn't support async, just async punt */
751 0 : if (unlikely(!io_file_supports_nowait(req))) {
752 0 : ret = io_setup_async_rw(req, iovec, s, true);
753 0 : return ret ?: -EAGAIN;
754 : }
755 0 : kiocb->ki_flags |= IOCB_NOWAIT;
756 : } else {
757 : /* Ensure we clear previously set non-block flag */
758 0 : kiocb->ki_flags &= ~IOCB_NOWAIT;
759 : }
760 :
761 0 : ppos = io_kiocb_update_pos(req);
762 :
763 0 : ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
764 0 : if (unlikely(ret)) {
765 0 : kfree(iovec);
766 0 : return ret;
767 : }
768 :
769 0 : ret = io_iter_do_read(rw, &s->iter);
770 :
771 0 : if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
772 0 : req->flags &= ~REQ_F_REISSUE;
773 : /* if we can poll, just do that */
774 0 : if (req->opcode == IORING_OP_READ && file_can_poll(req->file))
775 : return -EAGAIN;
776 : /* IOPOLL retry should happen for io-wq threads */
777 0 : if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
778 : goto done;
779 : /* no retry on NONBLOCK nor RWF_NOWAIT */
780 0 : if (req->flags & REQ_F_NOWAIT)
781 : goto done;
782 : ret = 0;
783 0 : } else if (ret == -EIOCBQUEUED) {
784 0 : if (iovec)
785 0 : kfree(iovec);
786 : return IOU_ISSUE_SKIP_COMPLETE;
787 0 : } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
788 0 : (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
789 : /* read all, failed, already did sync or don't want to retry */
790 : goto done;
791 : }
792 :
793 : /*
794 : * Don't depend on the iter state matching what was consumed, or being
795 : * untouched in case of error. Restore it and we'll advance it
796 : * manually if we need to.
797 : */
798 0 : iov_iter_restore(&s->iter, &s->iter_state);
799 :
800 0 : ret2 = io_setup_async_rw(req, iovec, s, true);
801 0 : iovec = NULL;
802 0 : if (ret2) {
803 0 : ret = ret > 0 ? ret : ret2;
804 : goto done;
805 : }
806 :
807 0 : io = req->async_data;
808 0 : s = &io->s;
809 : /*
810 : * Now use our persistent iterator and state, if we aren't already.
811 : * We've restored and mapped the iter to match.
812 : */
813 :
814 : do {
815 : /*
816 : * We end up here because of a partial read, either from
817 : * above or inside this loop. Advance the iter by the bytes
818 : * that were consumed.
819 : */
820 0 : iov_iter_advance(&s->iter, ret);
821 0 : if (!iov_iter_count(&s->iter))
822 : break;
823 0 : io->bytes_done += ret;
824 0 : iov_iter_save_state(&s->iter, &s->iter_state);
825 :
826 : /* if we can retry, do so with the callbacks armed */
827 0 : if (!io_rw_should_retry(req)) {
828 0 : kiocb->ki_flags &= ~IOCB_WAITQ;
829 0 : return -EAGAIN;
830 : }
831 :
832 0 : req->cqe.res = iov_iter_count(&s->iter);
833 : /*
834 : * Now retry read with the IOCB_WAITQ parts set in the iocb. If
835 : * we get -EIOCBQUEUED, then we'll get a notification when the
836 : * desired page gets unlocked. We can also get a partial read
837 : * here, and if we do, then just retry at the new offset.
838 : */
839 0 : ret = io_iter_do_read(rw, &s->iter);
840 0 : if (ret == -EIOCBQUEUED)
841 : return IOU_ISSUE_SKIP_COMPLETE;
842 : /* we got some bytes, but not all. retry. */
843 0 : kiocb->ki_flags &= ~IOCB_WAITQ;
844 0 : iov_iter_restore(&s->iter, &s->iter_state);
845 0 : } while (ret > 0);
846 : done:
847 : /* it's faster to check here then delegate to kfree */
848 0 : if (iovec)
849 0 : kfree(iovec);
850 0 : return kiocb_done(req, ret, issue_flags);
851 : }
852 :
853 0 : int io_write(struct io_kiocb *req, unsigned int issue_flags)
854 : {
855 0 : struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
856 0 : struct io_rw_state __s, *s = &__s;
857 : struct iovec *iovec;
858 0 : struct kiocb *kiocb = &rw->kiocb;
859 0 : bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
860 : ssize_t ret, ret2;
861 : loff_t *ppos;
862 :
863 0 : if (!req_has_async_data(req)) {
864 0 : ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags);
865 0 : if (unlikely(ret < 0))
866 : return ret;
867 : } else {
868 0 : struct io_async_rw *io = req->async_data;
869 :
870 0 : s = &io->s;
871 0 : iov_iter_restore(&s->iter, &s->iter_state);
872 0 : iovec = NULL;
873 : }
874 0 : ret = io_rw_init_file(req, FMODE_WRITE);
875 0 : if (unlikely(ret)) {
876 0 : kfree(iovec);
877 0 : return ret;
878 : }
879 0 : req->cqe.res = iov_iter_count(&s->iter);
880 :
881 0 : if (force_nonblock) {
882 : /* If the file doesn't support async, just async punt */
883 0 : if (unlikely(!io_file_supports_nowait(req)))
884 : goto copy_iov;
885 :
886 : /* File path supports NOWAIT for non-direct_IO only for block devices. */
887 0 : if (!(kiocb->ki_flags & IOCB_DIRECT) &&
888 0 : !(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) &&
889 : (req->flags & REQ_F_ISREG))
890 : goto copy_iov;
891 :
892 0 : kiocb->ki_flags |= IOCB_NOWAIT;
893 : } else {
894 : /* Ensure we clear previously set non-block flag */
895 0 : kiocb->ki_flags &= ~IOCB_NOWAIT;
896 : }
897 :
898 0 : ppos = io_kiocb_update_pos(req);
899 :
900 0 : ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
901 0 : if (unlikely(ret)) {
902 0 : kfree(iovec);
903 0 : return ret;
904 : }
905 :
906 : /*
907 : * Open-code file_start_write here to grab freeze protection,
908 : * which will be released by another thread in
909 : * io_complete_rw(). Fool lockdep by telling it the lock got
910 : * released so that it doesn't complain about the held lock when
911 : * we return to userspace.
912 : */
913 0 : if (req->flags & REQ_F_ISREG) {
914 0 : sb_start_write(file_inode(req->file)->i_sb);
915 : __sb_writers_release(file_inode(req->file)->i_sb,
916 : SB_FREEZE_WRITE);
917 : }
918 0 : kiocb->ki_flags |= IOCB_WRITE;
919 :
920 0 : if (likely(req->file->f_op->write_iter))
921 0 : ret2 = call_write_iter(req->file, kiocb, &s->iter);
922 0 : else if (req->file->f_op->write)
923 0 : ret2 = loop_rw_iter(WRITE, rw, &s->iter);
924 : else
925 : ret2 = -EINVAL;
926 :
927 0 : if (req->flags & REQ_F_REISSUE) {
928 0 : req->flags &= ~REQ_F_REISSUE;
929 0 : ret2 = -EAGAIN;
930 : }
931 :
932 : /*
933 : * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
934 : * retry them without IOCB_NOWAIT.
935 : */
936 0 : if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
937 0 : ret2 = -EAGAIN;
938 : /* no retry on NONBLOCK nor RWF_NOWAIT */
939 0 : if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
940 : goto done;
941 0 : if (!force_nonblock || ret2 != -EAGAIN) {
942 : /* IOPOLL retry should happen for io-wq threads */
943 0 : if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
944 : goto copy_iov;
945 :
946 0 : if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
947 : struct io_async_rw *io;
948 :
949 0 : trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
950 : req->cqe.res, ret2);
951 :
952 : /* This is a partial write. The file pos has already been
953 : * updated, setup the async struct to complete the request
954 : * in the worker. Also update bytes_done to account for
955 : * the bytes already written.
956 : */
957 0 : iov_iter_save_state(&s->iter, &s->iter_state);
958 0 : ret = io_setup_async_rw(req, iovec, s, true);
959 :
960 0 : io = req->async_data;
961 0 : if (io)
962 0 : io->bytes_done += ret2;
963 :
964 0 : if (kiocb->ki_flags & IOCB_WRITE)
965 0 : kiocb_end_write(req);
966 0 : return ret ? ret : -EAGAIN;
967 : }
968 : done:
969 0 : ret = kiocb_done(req, ret2, issue_flags);
970 : } else {
971 : copy_iov:
972 0 : iov_iter_restore(&s->iter, &s->iter_state);
973 0 : ret = io_setup_async_rw(req, iovec, s, false);
974 0 : if (!ret) {
975 0 : if (kiocb->ki_flags & IOCB_WRITE)
976 0 : kiocb_end_write(req);
977 : return -EAGAIN;
978 : }
979 : return ret;
980 : }
981 : /* it's reportedly faster than delegating the null check to kfree() */
982 0 : if (iovec)
983 0 : kfree(iovec);
984 : return ret;
985 : }
986 :
987 0 : static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
988 : {
989 0 : io_commit_cqring_flush(ctx);
990 0 : if (ctx->flags & IORING_SETUP_SQPOLL)
991 : io_cqring_wake(ctx);
992 0 : }
993 :
994 0 : void io_rw_fail(struct io_kiocb *req)
995 : {
996 : int res;
997 :
998 0 : res = io_fixup_rw_res(req, req->cqe.res);
999 0 : io_req_set_res(req, res, req->cqe.flags);
1000 0 : }
1001 :
1002 0 : int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
1003 : {
1004 : struct io_wq_work_node *pos, *start, *prev;
1005 0 : unsigned int poll_flags = BLK_POLL_NOSLEEP;
1006 0 : DEFINE_IO_COMP_BATCH(iob);
1007 0 : int nr_events = 0;
1008 :
1009 : /*
1010 : * Only spin for completions if we don't have multiple devices hanging
1011 : * off our complete list.
1012 : */
1013 0 : if (ctx->poll_multi_queue || force_nonspin)
1014 0 : poll_flags |= BLK_POLL_ONESHOT;
1015 :
1016 0 : wq_list_for_each(pos, start, &ctx->iopoll_list) {
1017 0 : struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1018 0 : struct file *file = req->file;
1019 : int ret;
1020 :
1021 : /*
1022 : * Move completed and retryable entries to our local lists.
1023 : * If we find a request that requires polling, break out
1024 : * and complete those lists first, if we have entries there.
1025 : */
1026 0 : if (READ_ONCE(req->iopoll_completed))
1027 : break;
1028 :
1029 0 : if (req->opcode == IORING_OP_URING_CMD) {
1030 : struct io_uring_cmd *ioucmd;
1031 :
1032 0 : ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
1033 0 : ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
1034 : poll_flags);
1035 : } else {
1036 0 : struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1037 :
1038 0 : ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1039 : }
1040 0 : if (unlikely(ret < 0))
1041 : return ret;
1042 0 : else if (ret)
1043 0 : poll_flags |= BLK_POLL_ONESHOT;
1044 :
1045 : /* iopoll may have completed current req */
1046 0 : if (!rq_list_empty(iob.req_list) ||
1047 0 : READ_ONCE(req->iopoll_completed))
1048 : break;
1049 : }
1050 :
1051 0 : if (!rq_list_empty(iob.req_list))
1052 0 : iob.complete(&iob);
1053 0 : else if (!pos)
1054 : return 0;
1055 :
1056 : prev = start;
1057 0 : wq_list_for_each_resume(pos, prev) {
1058 0 : struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1059 :
1060 : /* order with io_complete_rw_iopoll(), e.g. ->result updates */
1061 0 : if (!smp_load_acquire(&req->iopoll_completed))
1062 : break;
1063 0 : nr_events++;
1064 0 : if (unlikely(req->flags & REQ_F_CQE_SKIP))
1065 0 : continue;
1066 :
1067 0 : req->cqe.flags = io_put_kbuf(req, 0);
1068 0 : if (unlikely(!__io_fill_cqe_req(ctx, req))) {
1069 0 : spin_lock(&ctx->completion_lock);
1070 0 : io_req_cqe_overflow(req);
1071 0 : spin_unlock(&ctx->completion_lock);
1072 : }
1073 : }
1074 :
1075 0 : if (unlikely(!nr_events))
1076 : return 0;
1077 :
1078 0 : io_commit_cqring(ctx);
1079 0 : io_cqring_ev_posted_iopoll(ctx);
1080 0 : pos = start ? start->next : ctx->iopoll_list.first;
1081 0 : wq_list_cut(&ctx->iopoll_list, prev, start);
1082 0 : io_free_batch_list(ctx, pos);
1083 0 : return nr_events;
1084 : }
|