Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : #include <linux/kernel.h>
3 : #include <linux/errno.h>
4 : #include <linux/fs.h>
5 : #include <linux/file.h>
6 : #include <linux/mm.h>
7 : #include <linux/slab.h>
8 : #include <linux/poll.h>
9 : #include <linux/hashtable.h>
10 : #include <linux/io_uring.h>
11 :
12 : #include <trace/events/io_uring.h>
13 :
14 : #include <uapi/linux/io_uring.h>
15 :
16 : #include "io_uring.h"
17 : #include "refs.h"
18 : #include "opdef.h"
19 : #include "kbuf.h"
20 : #include "poll.h"
21 : #include "cancel.h"
22 :
23 : struct io_poll_update {
24 : struct file *file;
25 : u64 old_user_data;
26 : u64 new_user_data;
27 : __poll_t events;
28 : bool update_events;
29 : bool update_user_data;
30 : };
31 :
32 : struct io_poll_table {
33 : struct poll_table_struct pt;
34 : struct io_kiocb *req;
35 : int nr_entries;
36 : int error;
37 : bool owning;
38 : /* output value, set only if arm poll returns >0 */
39 : __poll_t result_mask;
40 : };
41 :
42 : #define IO_POLL_CANCEL_FLAG BIT(31)
43 : #define IO_POLL_RETRY_FLAG BIT(30)
44 : #define IO_POLL_REF_MASK GENMASK(29, 0)
45 :
46 : /*
47 : * We usually have 1-2 refs taken, 128 is more than enough and we want to
48 : * maximise the margin between this amount and the moment when it overflows.
49 : */
50 : #define IO_POLL_REF_BIAS 128
51 :
52 : #define IO_WQE_F_DOUBLE 1
53 :
54 : static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
55 : void *key);
56 :
57 : static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
58 : {
59 0 : unsigned long priv = (unsigned long)wqe->private;
60 :
61 0 : return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE);
62 : }
63 :
64 : static inline bool wqe_is_double(struct wait_queue_entry *wqe)
65 : {
66 0 : unsigned long priv = (unsigned long)wqe->private;
67 :
68 0 : return priv & IO_WQE_F_DOUBLE;
69 : }
70 :
71 : static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
72 : {
73 : int v;
74 :
75 : /*
76 : * poll_refs are already elevated and we don't have much hope for
77 : * grabbing the ownership. Instead of incrementing set a retry flag
78 : * to notify the loop that there might have been some change.
79 : */
80 0 : v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
81 0 : if (v & IO_POLL_REF_MASK)
82 : return false;
83 0 : return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
84 : }
85 :
86 : /*
87 : * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
88 : * bump it and acquire ownership. It's disallowed to modify requests while not
89 : * owning it, that prevents from races for enqueueing task_work's and b/w
90 : * arming poll and wakeups.
91 : */
92 0 : static inline bool io_poll_get_ownership(struct io_kiocb *req)
93 : {
94 0 : if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
95 : return io_poll_get_ownership_slowpath(req);
96 0 : return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
97 : }
98 :
99 : static void io_poll_mark_cancelled(struct io_kiocb *req)
100 : {
101 0 : atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
102 : }
103 :
104 : static struct io_poll *io_poll_get_double(struct io_kiocb *req)
105 : {
106 : /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
107 0 : if (req->opcode == IORING_OP_POLL_ADD)
108 0 : return req->async_data;
109 0 : return req->apoll->double_poll;
110 : }
111 :
112 : static struct io_poll *io_poll_get_single(struct io_kiocb *req)
113 : {
114 0 : if (req->opcode == IORING_OP_POLL_ADD)
115 0 : return io_kiocb_to_cmd(req, struct io_poll);
116 0 : return &req->apoll->poll;
117 : }
118 :
119 : static void io_poll_req_insert(struct io_kiocb *req)
120 : {
121 0 : struct io_hash_table *table = &req->ctx->cancel_table;
122 0 : u32 index = hash_long(req->cqe.user_data, table->hash_bits);
123 0 : struct io_hash_bucket *hb = &table->hbs[index];
124 :
125 0 : spin_lock(&hb->lock);
126 0 : hlist_add_head(&req->hash_node, &hb->list);
127 0 : spin_unlock(&hb->lock);
128 : }
129 :
130 : static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
131 : {
132 0 : struct io_hash_table *table = &req->ctx->cancel_table;
133 0 : u32 index = hash_long(req->cqe.user_data, table->hash_bits);
134 0 : spinlock_t *lock = &table->hbs[index].lock;
135 :
136 0 : spin_lock(lock);
137 0 : hash_del(&req->hash_node);
138 0 : spin_unlock(lock);
139 : }
140 :
141 : static void io_poll_req_insert_locked(struct io_kiocb *req)
142 : {
143 0 : struct io_hash_table *table = &req->ctx->cancel_table_locked;
144 0 : u32 index = hash_long(req->cqe.user_data, table->hash_bits);
145 :
146 : lockdep_assert_held(&req->ctx->uring_lock);
147 :
148 0 : hlist_add_head(&req->hash_node, &table->hbs[index].list);
149 : }
150 :
151 0 : static void io_poll_tw_hash_eject(struct io_kiocb *req, struct io_tw_state *ts)
152 : {
153 0 : struct io_ring_ctx *ctx = req->ctx;
154 :
155 0 : if (req->flags & REQ_F_HASH_LOCKED) {
156 : /*
157 : * ->cancel_table_locked is protected by ->uring_lock in
158 : * contrast to per bucket spinlocks. Likely, tctx_task_work()
159 : * already grabbed the mutex for us, but there is a chance it
160 : * failed.
161 : */
162 0 : io_tw_lock(ctx, ts);
163 0 : hash_del(&req->hash_node);
164 0 : req->flags &= ~REQ_F_HASH_LOCKED;
165 : } else {
166 0 : io_poll_req_delete(req, ctx);
167 : }
168 0 : }
169 :
170 : static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
171 : {
172 0 : poll->head = NULL;
173 : #define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
174 : /* mask in events that we always want/need */
175 0 : poll->events = events | IO_POLL_UNMASK;
176 0 : INIT_LIST_HEAD(&poll->wait.entry);
177 0 : init_waitqueue_func_entry(&poll->wait, io_poll_wake);
178 : }
179 :
180 0 : static inline void io_poll_remove_entry(struct io_poll *poll)
181 : {
182 0 : struct wait_queue_head *head = smp_load_acquire(&poll->head);
183 :
184 0 : if (head) {
185 0 : spin_lock_irq(&head->lock);
186 0 : list_del_init(&poll->wait.entry);
187 0 : poll->head = NULL;
188 0 : spin_unlock_irq(&head->lock);
189 : }
190 0 : }
191 :
192 0 : static void io_poll_remove_entries(struct io_kiocb *req)
193 : {
194 : /*
195 : * Nothing to do if neither of those flags are set. Avoid dipping
196 : * into the poll/apoll/double cachelines if we can.
197 : */
198 0 : if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
199 : return;
200 :
201 : /*
202 : * While we hold the waitqueue lock and the waitqueue is nonempty,
203 : * wake_up_pollfree() will wait for us. However, taking the waitqueue
204 : * lock in the first place can race with the waitqueue being freed.
205 : *
206 : * We solve this as eventpoll does: by taking advantage of the fact that
207 : * all users of wake_up_pollfree() will RCU-delay the actual free. If
208 : * we enter rcu_read_lock() and see that the pointer to the queue is
209 : * non-NULL, we can then lock it without the memory being freed out from
210 : * under us.
211 : *
212 : * Keep holding rcu_read_lock() as long as we hold the queue lock, in
213 : * case the caller deletes the entry from the queue, leaving it empty.
214 : * In that case, only RCU prevents the queue memory from being freed.
215 : */
216 : rcu_read_lock();
217 0 : if (req->flags & REQ_F_SINGLE_POLL)
218 0 : io_poll_remove_entry(io_poll_get_single(req));
219 0 : if (req->flags & REQ_F_DOUBLE_POLL)
220 0 : io_poll_remove_entry(io_poll_get_double(req));
221 : rcu_read_unlock();
222 : }
223 :
224 : enum {
225 : IOU_POLL_DONE = 0,
226 : IOU_POLL_NO_ACTION = 1,
227 : IOU_POLL_REMOVE_POLL_USE_RES = 2,
228 : IOU_POLL_REISSUE = 3,
229 : };
230 :
231 : /*
232 : * All poll tw should go through this. Checks for poll events, manages
233 : * references, does rewait, etc.
234 : *
235 : * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
236 : * require, which is either spurious wakeup or multishot CQE is served.
237 : * IOU_POLL_DONE when it's done with the request, then the mask is stored in
238 : * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
239 : * poll and that the result is stored in req->cqe.
240 : */
241 0 : static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
242 : {
243 : int v;
244 :
245 : /* req->task == current here, checking PF_EXITING is safe */
246 0 : if (unlikely(req->task->flags & PF_EXITING))
247 : return -ECANCELED;
248 :
249 : do {
250 0 : v = atomic_read(&req->poll_refs);
251 :
252 0 : if (unlikely(v != 1)) {
253 : /* tw should be the owner and so have some refs */
254 0 : if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
255 : return IOU_POLL_NO_ACTION;
256 0 : if (v & IO_POLL_CANCEL_FLAG)
257 : return -ECANCELED;
258 : /*
259 : * cqe.res contains only events of the first wake up
260 : * and all others are to be lost. Redo vfs_poll() to get
261 : * up to date state.
262 : */
263 0 : if ((v & IO_POLL_REF_MASK) != 1)
264 0 : req->cqe.res = 0;
265 :
266 0 : if (v & IO_POLL_RETRY_FLAG) {
267 0 : req->cqe.res = 0;
268 : /*
269 : * We won't find new events that came in between
270 : * vfs_poll and the ref put unless we clear the
271 : * flag in advance.
272 : */
273 0 : atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
274 0 : v &= ~IO_POLL_RETRY_FLAG;
275 : }
276 : }
277 :
278 : /* the mask was stashed in __io_poll_execute */
279 0 : if (!req->cqe.res) {
280 0 : struct poll_table_struct pt = { ._key = req->apoll_events };
281 0 : req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
282 : /*
283 : * We got woken with a mask, but someone else got to
284 : * it first. The above vfs_poll() doesn't add us back
285 : * to the waitqueue, so if we get nothing back, we
286 : * should be safe and attempt a reissue.
287 : */
288 0 : if (unlikely(!req->cqe.res)) {
289 : /* Multishot armed need not reissue */
290 0 : if (!(req->apoll_events & EPOLLONESHOT))
291 0 : continue;
292 0 : return IOU_POLL_REISSUE;
293 : }
294 : }
295 0 : if (req->apoll_events & EPOLLONESHOT)
296 : return IOU_POLL_DONE;
297 :
298 : /* multishot, just fill a CQE and proceed */
299 0 : if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
300 0 : __poll_t mask = mangle_poll(req->cqe.res &
301 : req->apoll_events);
302 :
303 0 : if (!io_aux_cqe(req, ts->locked, mask,
304 : IORING_CQE_F_MORE, false)) {
305 0 : io_req_set_res(req, mask, 0);
306 0 : return IOU_POLL_REMOVE_POLL_USE_RES;
307 : }
308 : } else {
309 0 : int ret = io_poll_issue(req, ts);
310 0 : if (ret == IOU_STOP_MULTISHOT)
311 : return IOU_POLL_REMOVE_POLL_USE_RES;
312 0 : if (ret < 0)
313 : return ret;
314 : }
315 :
316 : /* force the next iteration to vfs_poll() */
317 0 : req->cqe.res = 0;
318 :
319 : /*
320 : * Release all references, retry if someone tried to restart
321 : * task_work while we were executing it.
322 : */
323 0 : } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) &
324 0 : IO_POLL_REF_MASK);
325 :
326 : return IOU_POLL_NO_ACTION;
327 : }
328 :
329 0 : void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
330 : {
331 : int ret;
332 :
333 0 : ret = io_poll_check_events(req, ts);
334 0 : if (ret == IOU_POLL_NO_ACTION)
335 : return;
336 0 : io_poll_remove_entries(req);
337 0 : io_poll_tw_hash_eject(req, ts);
338 :
339 0 : if (req->opcode == IORING_OP_POLL_ADD) {
340 0 : if (ret == IOU_POLL_DONE) {
341 : struct io_poll *poll;
342 :
343 0 : poll = io_kiocb_to_cmd(req, struct io_poll);
344 0 : req->cqe.res = mangle_poll(req->cqe.res & poll->events);
345 0 : } else if (ret == IOU_POLL_REISSUE) {
346 0 : io_req_task_submit(req, ts);
347 0 : return;
348 0 : } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
349 0 : req->cqe.res = ret;
350 0 : req_set_fail(req);
351 : }
352 :
353 0 : io_req_set_res(req, req->cqe.res, 0);
354 0 : io_req_task_complete(req, ts);
355 : } else {
356 0 : io_tw_lock(req->ctx, ts);
357 :
358 0 : if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
359 0 : io_req_task_complete(req, ts);
360 0 : else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
361 0 : io_req_task_submit(req, ts);
362 : else
363 0 : io_req_defer_failed(req, ret);
364 : }
365 : }
366 :
367 : static void __io_poll_execute(struct io_kiocb *req, int mask)
368 : {
369 0 : io_req_set_res(req, mask, 0);
370 0 : req->io_task_work.func = io_poll_task_func;
371 :
372 0 : trace_io_uring_task_add(req, mask);
373 0 : io_req_task_work_add(req);
374 : }
375 :
376 0 : static inline void io_poll_execute(struct io_kiocb *req, int res)
377 : {
378 0 : if (io_poll_get_ownership(req))
379 : __io_poll_execute(req, res);
380 0 : }
381 :
382 : static void io_poll_cancel_req(struct io_kiocb *req)
383 : {
384 0 : io_poll_mark_cancelled(req);
385 : /* kick tw, which should complete the request */
386 0 : io_poll_execute(req, 0);
387 : }
388 :
389 : #define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI)
390 :
391 0 : static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
392 : {
393 0 : io_poll_mark_cancelled(req);
394 : /* we have to kick tw in case it's not already */
395 0 : io_poll_execute(req, 0);
396 :
397 : /*
398 : * If the waitqueue is being freed early but someone is already
399 : * holds ownership over it, we have to tear down the request as
400 : * best we can. That means immediately removing the request from
401 : * its waitqueue and preventing all further accesses to the
402 : * waitqueue via the request.
403 : */
404 0 : list_del_init(&poll->wait.entry);
405 :
406 : /*
407 : * Careful: this *must* be the last step, since as soon
408 : * as req->head is NULL'ed out, the request can be
409 : * completed and freed, since aio_poll_complete_work()
410 : * will no longer need to take the waitqueue lock.
411 : */
412 0 : smp_store_release(&poll->head, NULL);
413 0 : return 1;
414 : }
415 :
416 0 : static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
417 : void *key)
418 : {
419 0 : struct io_kiocb *req = wqe_to_req(wait);
420 0 : struct io_poll *poll = container_of(wait, struct io_poll, wait);
421 0 : __poll_t mask = key_to_poll(key);
422 :
423 0 : if (unlikely(mask & POLLFREE))
424 0 : return io_pollfree_wake(req, poll);
425 :
426 : /* for instances that support it check for an event match first */
427 0 : if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
428 : return 0;
429 :
430 0 : if (io_poll_get_ownership(req)) {
431 : /*
432 : * If we trigger a multishot poll off our own wakeup path,
433 : * disable multishot as there is a circular dependency between
434 : * CQ posting and triggering the event.
435 : */
436 0 : if (mask & EPOLL_URING_WAKE)
437 0 : poll->events |= EPOLLONESHOT;
438 :
439 : /* optional, saves extra locking for removal in tw handler */
440 0 : if (mask && poll->events & EPOLLONESHOT) {
441 0 : list_del_init(&poll->wait.entry);
442 0 : poll->head = NULL;
443 0 : if (wqe_is_double(wait))
444 0 : req->flags &= ~REQ_F_DOUBLE_POLL;
445 : else
446 0 : req->flags &= ~REQ_F_SINGLE_POLL;
447 : }
448 0 : __io_poll_execute(req, mask);
449 : }
450 : return 1;
451 : }
452 :
453 : /* fails only when polling is already completing by the first entry */
454 0 : static bool io_poll_double_prepare(struct io_kiocb *req)
455 : {
456 : struct wait_queue_head *head;
457 0 : struct io_poll *poll = io_poll_get_single(req);
458 :
459 : /* head is RCU protected, see io_poll_remove_entries() comments */
460 : rcu_read_lock();
461 0 : head = smp_load_acquire(&poll->head);
462 : /*
463 : * poll arm might not hold ownership and so race for req->flags with
464 : * io_poll_wake(). There is only one poll entry queued, serialise with
465 : * it by taking its head lock. As we're still arming the tw hanlder
466 : * is not going to be run, so there are no races with it.
467 : */
468 0 : if (head) {
469 0 : spin_lock_irq(&head->lock);
470 0 : req->flags |= REQ_F_DOUBLE_POLL;
471 0 : if (req->opcode == IORING_OP_POLL_ADD)
472 0 : req->flags |= REQ_F_ASYNC_DATA;
473 0 : spin_unlock_irq(&head->lock);
474 : }
475 : rcu_read_unlock();
476 0 : return !!head;
477 : }
478 :
479 0 : static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
480 : struct wait_queue_head *head,
481 : struct io_poll **poll_ptr)
482 : {
483 0 : struct io_kiocb *req = pt->req;
484 0 : unsigned long wqe_private = (unsigned long) req;
485 :
486 : /*
487 : * The file being polled uses multiple waitqueues for poll handling
488 : * (e.g. one for read, one for write). Setup a separate io_poll
489 : * if this happens.
490 : */
491 0 : if (unlikely(pt->nr_entries)) {
492 0 : struct io_poll *first = poll;
493 :
494 : /* double add on the same waitqueue head, ignore */
495 0 : if (first->head == head)
496 : return;
497 : /* already have a 2nd entry, fail a third attempt */
498 0 : if (*poll_ptr) {
499 0 : if ((*poll_ptr)->head == head)
500 : return;
501 0 : pt->error = -EINVAL;
502 0 : return;
503 : }
504 :
505 0 : poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
506 0 : if (!poll) {
507 0 : pt->error = -ENOMEM;
508 0 : return;
509 : }
510 :
511 : /* mark as double wq entry */
512 0 : wqe_private |= IO_WQE_F_DOUBLE;
513 0 : io_init_poll_iocb(poll, first->events);
514 0 : if (!io_poll_double_prepare(req)) {
515 : /* the request is completing, just back off */
516 0 : kfree(poll);
517 0 : return;
518 : }
519 0 : *poll_ptr = poll;
520 : } else {
521 : /* fine to modify, there is no poll queued to race with us */
522 0 : req->flags |= REQ_F_SINGLE_POLL;
523 : }
524 :
525 0 : pt->nr_entries++;
526 0 : poll->head = head;
527 0 : poll->wait.private = (void *) wqe_private;
528 :
529 0 : if (poll->events & EPOLLEXCLUSIVE)
530 0 : add_wait_queue_exclusive(head, &poll->wait);
531 : else
532 0 : add_wait_queue(head, &poll->wait);
533 : }
534 :
535 0 : static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
536 : struct poll_table_struct *p)
537 : {
538 0 : struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
539 0 : struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
540 :
541 0 : __io_queue_proc(poll, pt, head,
542 0 : (struct io_poll **) &pt->req->async_data);
543 0 : }
544 :
545 : static bool io_poll_can_finish_inline(struct io_kiocb *req,
546 : struct io_poll_table *pt)
547 : {
548 0 : return pt->owning || io_poll_get_ownership(req);
549 : }
550 :
551 0 : static void io_poll_add_hash(struct io_kiocb *req)
552 : {
553 0 : if (req->flags & REQ_F_HASH_LOCKED)
554 : io_poll_req_insert_locked(req);
555 : else
556 : io_poll_req_insert(req);
557 0 : }
558 :
559 : /*
560 : * Returns 0 when it's handed over for polling. The caller owns the requests if
561 : * it returns non-zero, but otherwise should not touch it. Negative values
562 : * contain an error code. When the result is >0, the polling has completed
563 : * inline and ipt.result_mask is set to the mask.
564 : */
565 0 : static int __io_arm_poll_handler(struct io_kiocb *req,
566 : struct io_poll *poll,
567 : struct io_poll_table *ipt, __poll_t mask,
568 : unsigned issue_flags)
569 : {
570 0 : struct io_ring_ctx *ctx = req->ctx;
571 :
572 0 : INIT_HLIST_NODE(&req->hash_node);
573 0 : req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
574 0 : io_init_poll_iocb(poll, mask);
575 0 : poll->file = req->file;
576 0 : req->apoll_events = poll->events;
577 :
578 0 : ipt->pt._key = mask;
579 0 : ipt->req = req;
580 0 : ipt->error = 0;
581 0 : ipt->nr_entries = 0;
582 : /*
583 : * Polling is either completed here or via task_work, so if we're in the
584 : * task context we're naturally serialised with tw by merit of running
585 : * the same task. When it's io-wq, take the ownership to prevent tw
586 : * from running. However, when we're in the task context, skip taking
587 : * it as an optimisation.
588 : *
589 : * Note: even though the request won't be completed/freed, without
590 : * ownership we still can race with io_poll_wake().
591 : * io_poll_can_finish_inline() tries to deal with that.
592 : */
593 0 : ipt->owning = issue_flags & IO_URING_F_UNLOCKED;
594 0 : atomic_set(&req->poll_refs, (int)ipt->owning);
595 :
596 : /* io-wq doesn't hold uring_lock */
597 0 : if (issue_flags & IO_URING_F_UNLOCKED)
598 0 : req->flags &= ~REQ_F_HASH_LOCKED;
599 :
600 0 : mask = vfs_poll(req->file, &ipt->pt) & poll->events;
601 :
602 0 : if (unlikely(ipt->error || !ipt->nr_entries)) {
603 0 : io_poll_remove_entries(req);
604 :
605 0 : if (!io_poll_can_finish_inline(req, ipt)) {
606 0 : io_poll_mark_cancelled(req);
607 0 : return 0;
608 0 : } else if (mask && (poll->events & EPOLLET)) {
609 0 : ipt->result_mask = mask;
610 0 : return 1;
611 : }
612 0 : return ipt->error ?: -EINVAL;
613 : }
614 :
615 0 : if (mask &&
616 0 : ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
617 0 : if (!io_poll_can_finish_inline(req, ipt)) {
618 0 : io_poll_add_hash(req);
619 0 : return 0;
620 : }
621 0 : io_poll_remove_entries(req);
622 0 : ipt->result_mask = mask;
623 : /* no one else has access to the req, forget about the ref */
624 0 : return 1;
625 : }
626 :
627 0 : io_poll_add_hash(req);
628 :
629 0 : if (mask && (poll->events & EPOLLET) &&
630 0 : io_poll_can_finish_inline(req, ipt)) {
631 0 : __io_poll_execute(req, mask);
632 0 : return 0;
633 : }
634 :
635 0 : if (ipt->owning) {
636 : /*
637 : * Try to release ownership. If we see a change of state, e.g.
638 : * poll was waken up, queue up a tw, it'll deal with it.
639 : */
640 0 : if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
641 : __io_poll_execute(req, 0);
642 : }
643 : return 0;
644 : }
645 :
646 0 : static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
647 : struct poll_table_struct *p)
648 : {
649 0 : struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
650 0 : struct async_poll *apoll = pt->req->apoll;
651 :
652 0 : __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
653 0 : }
654 :
655 : /*
656 : * We can't reliably detect loops in repeated poll triggers and issue
657 : * subsequently failing. But rather than fail these immediately, allow a
658 : * certain amount of retries before we give up. Given that this condition
659 : * should _rarely_ trigger even once, we should be fine with a larger value.
660 : */
661 : #define APOLL_MAX_RETRY 128
662 :
663 0 : static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
664 : unsigned issue_flags)
665 : {
666 0 : struct io_ring_ctx *ctx = req->ctx;
667 : struct io_cache_entry *entry;
668 : struct async_poll *apoll;
669 :
670 0 : if (req->flags & REQ_F_POLLED) {
671 0 : apoll = req->apoll;
672 0 : kfree(apoll->double_poll);
673 0 : } else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
674 0 : entry = io_alloc_cache_get(&ctx->apoll_cache);
675 0 : if (entry == NULL)
676 : goto alloc_apoll;
677 0 : apoll = container_of(entry, struct async_poll, cache);
678 0 : apoll->poll.retries = APOLL_MAX_RETRY;
679 : } else {
680 : alloc_apoll:
681 0 : apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
682 0 : if (unlikely(!apoll))
683 : return NULL;
684 0 : apoll->poll.retries = APOLL_MAX_RETRY;
685 : }
686 0 : apoll->double_poll = NULL;
687 0 : req->apoll = apoll;
688 0 : if (unlikely(!--apoll->poll.retries))
689 : return NULL;
690 0 : return apoll;
691 : }
692 :
693 0 : int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
694 : {
695 0 : const struct io_issue_def *def = &io_issue_defs[req->opcode];
696 : struct async_poll *apoll;
697 : struct io_poll_table ipt;
698 0 : __poll_t mask = POLLPRI | POLLERR | EPOLLET;
699 : int ret;
700 :
701 : /*
702 : * apoll requests already grab the mutex to complete in the tw handler,
703 : * so removal from the mutex-backed hash is free, use it by default.
704 : */
705 0 : req->flags |= REQ_F_HASH_LOCKED;
706 :
707 0 : if (!def->pollin && !def->pollout)
708 : return IO_APOLL_ABORTED;
709 0 : if (!file_can_poll(req->file))
710 : return IO_APOLL_ABORTED;
711 0 : if (!(req->flags & REQ_F_APOLL_MULTISHOT))
712 0 : mask |= EPOLLONESHOT;
713 :
714 0 : if (def->pollin) {
715 0 : mask |= EPOLLIN | EPOLLRDNORM;
716 :
717 : /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
718 0 : if (req->flags & REQ_F_CLEAR_POLLIN)
719 0 : mask &= ~EPOLLIN;
720 : } else {
721 0 : mask |= EPOLLOUT | EPOLLWRNORM;
722 : }
723 0 : if (def->poll_exclusive)
724 0 : mask |= EPOLLEXCLUSIVE;
725 :
726 0 : apoll = io_req_alloc_apoll(req, issue_flags);
727 0 : if (!apoll)
728 : return IO_APOLL_ABORTED;
729 0 : req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL);
730 0 : req->flags |= REQ_F_POLLED;
731 0 : ipt.pt._qproc = io_async_queue_proc;
732 :
733 0 : io_kbuf_recycle(req, issue_flags);
734 :
735 0 : ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
736 0 : if (ret)
737 0 : return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
738 : trace_io_uring_poll_arm(req, mask, apoll->poll.events);
739 : return IO_APOLL_OK;
740 : }
741 :
742 0 : static __cold bool io_poll_remove_all_table(struct task_struct *tsk,
743 : struct io_hash_table *table,
744 : bool cancel_all)
745 : {
746 0 : unsigned nr_buckets = 1U << table->hash_bits;
747 : struct hlist_node *tmp;
748 : struct io_kiocb *req;
749 0 : bool found = false;
750 : int i;
751 :
752 0 : for (i = 0; i < nr_buckets; i++) {
753 0 : struct io_hash_bucket *hb = &table->hbs[i];
754 :
755 0 : spin_lock(&hb->lock);
756 0 : hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
757 0 : if (io_match_task_safe(req, tsk, cancel_all)) {
758 0 : hlist_del_init(&req->hash_node);
759 0 : io_poll_cancel_req(req);
760 0 : found = true;
761 : }
762 : }
763 0 : spin_unlock(&hb->lock);
764 : }
765 0 : return found;
766 : }
767 :
768 : /*
769 : * Returns true if we found and killed one or more poll requests
770 : */
771 0 : __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
772 : bool cancel_all)
773 : __must_hold(&ctx->uring_lock)
774 : {
775 : bool ret;
776 :
777 0 : ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all);
778 0 : ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all);
779 0 : return ret;
780 : }
781 :
782 0 : static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
783 : struct io_cancel_data *cd,
784 : struct io_hash_table *table,
785 : struct io_hash_bucket **out_bucket)
786 : {
787 : struct io_kiocb *req;
788 0 : u32 index = hash_long(cd->data, table->hash_bits);
789 0 : struct io_hash_bucket *hb = &table->hbs[index];
790 :
791 0 : *out_bucket = NULL;
792 :
793 0 : spin_lock(&hb->lock);
794 0 : hlist_for_each_entry(req, &hb->list, hash_node) {
795 0 : if (cd->data != req->cqe.user_data)
796 0 : continue;
797 0 : if (poll_only && req->opcode != IORING_OP_POLL_ADD)
798 0 : continue;
799 0 : if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
800 0 : if (cd->seq == req->work.cancel_seq)
801 0 : continue;
802 0 : req->work.cancel_seq = cd->seq;
803 : }
804 0 : *out_bucket = hb;
805 0 : return req;
806 : }
807 0 : spin_unlock(&hb->lock);
808 0 : return NULL;
809 : }
810 :
811 0 : static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
812 : struct io_cancel_data *cd,
813 : struct io_hash_table *table,
814 : struct io_hash_bucket **out_bucket)
815 : {
816 0 : unsigned nr_buckets = 1U << table->hash_bits;
817 : struct io_kiocb *req;
818 : int i;
819 :
820 0 : *out_bucket = NULL;
821 :
822 0 : for (i = 0; i < nr_buckets; i++) {
823 0 : struct io_hash_bucket *hb = &table->hbs[i];
824 :
825 0 : spin_lock(&hb->lock);
826 0 : hlist_for_each_entry(req, &hb->list, hash_node) {
827 0 : if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
828 0 : req->file != cd->file)
829 0 : continue;
830 0 : if (cd->seq == req->work.cancel_seq)
831 0 : continue;
832 0 : req->work.cancel_seq = cd->seq;
833 0 : *out_bucket = hb;
834 : return req;
835 : }
836 0 : spin_unlock(&hb->lock);
837 : }
838 : return NULL;
839 : }
840 :
841 0 : static int io_poll_disarm(struct io_kiocb *req)
842 : {
843 0 : if (!req)
844 : return -ENOENT;
845 0 : if (!io_poll_get_ownership(req))
846 : return -EALREADY;
847 0 : io_poll_remove_entries(req);
848 0 : hash_del(&req->hash_node);
849 : return 0;
850 : }
851 :
852 0 : static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
853 : struct io_hash_table *table)
854 : {
855 : struct io_hash_bucket *bucket;
856 : struct io_kiocb *req;
857 :
858 0 : if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY))
859 0 : req = io_poll_file_find(ctx, cd, table, &bucket);
860 : else
861 0 : req = io_poll_find(ctx, false, cd, table, &bucket);
862 :
863 0 : if (req)
864 : io_poll_cancel_req(req);
865 0 : if (bucket)
866 0 : spin_unlock(&bucket->lock);
867 0 : return req ? 0 : -ENOENT;
868 : }
869 :
870 0 : int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
871 : unsigned issue_flags)
872 : {
873 : int ret;
874 :
875 0 : ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table);
876 0 : if (ret != -ENOENT)
877 : return ret;
878 :
879 0 : io_ring_submit_lock(ctx, issue_flags);
880 0 : ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked);
881 : io_ring_submit_unlock(ctx, issue_flags);
882 : return ret;
883 : }
884 :
885 : static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
886 : unsigned int flags)
887 : {
888 : u32 events;
889 :
890 0 : events = READ_ONCE(sqe->poll32_events);
891 : #ifdef __BIG_ENDIAN
892 : events = swahw32(events);
893 : #endif
894 0 : if (!(flags & IORING_POLL_ADD_MULTI))
895 0 : events |= EPOLLONESHOT;
896 0 : if (!(flags & IORING_POLL_ADD_LEVEL))
897 0 : events |= EPOLLET;
898 0 : return demangle_poll(events) |
899 0 : (events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
900 : }
901 :
902 0 : int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
903 : {
904 0 : struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
905 : u32 flags;
906 :
907 0 : if (sqe->buf_index || sqe->splice_fd_in)
908 : return -EINVAL;
909 0 : flags = READ_ONCE(sqe->len);
910 0 : if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
911 : IORING_POLL_ADD_MULTI))
912 : return -EINVAL;
913 : /* meaningless without update */
914 0 : if (flags == IORING_POLL_ADD_MULTI)
915 : return -EINVAL;
916 :
917 0 : upd->old_user_data = READ_ONCE(sqe->addr);
918 0 : upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
919 0 : upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
920 :
921 0 : upd->new_user_data = READ_ONCE(sqe->off);
922 0 : if (!upd->update_user_data && upd->new_user_data)
923 : return -EINVAL;
924 0 : if (upd->update_events)
925 0 : upd->events = io_poll_parse_events(sqe, flags);
926 0 : else if (sqe->poll32_events)
927 : return -EINVAL;
928 :
929 : return 0;
930 : }
931 :
932 0 : int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
933 : {
934 0 : struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
935 : u32 flags;
936 :
937 0 : if (sqe->buf_index || sqe->off || sqe->addr)
938 : return -EINVAL;
939 0 : flags = READ_ONCE(sqe->len);
940 0 : if (flags & ~IORING_POLL_ADD_MULTI)
941 : return -EINVAL;
942 0 : if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
943 : return -EINVAL;
944 :
945 0 : poll->events = io_poll_parse_events(sqe, flags);
946 0 : return 0;
947 : }
948 :
949 0 : int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
950 : {
951 0 : struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
952 : struct io_poll_table ipt;
953 : int ret;
954 :
955 0 : ipt.pt._qproc = io_poll_queue_proc;
956 :
957 : /*
958 : * If sqpoll or single issuer, there is no contention for ->uring_lock
959 : * and we'll end up holding it in tw handlers anyway.
960 : */
961 0 : if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER))
962 0 : req->flags |= REQ_F_HASH_LOCKED;
963 :
964 0 : ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
965 0 : if (ret > 0) {
966 0 : io_req_set_res(req, ipt.result_mask, 0);
967 0 : return IOU_OK;
968 : }
969 0 : return ret ?: IOU_ISSUE_SKIP_COMPLETE;
970 : }
971 :
972 0 : int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
973 : {
974 0 : struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
975 0 : struct io_cancel_data cd = { .data = poll_update->old_user_data, };
976 0 : struct io_ring_ctx *ctx = req->ctx;
977 : struct io_hash_bucket *bucket;
978 : struct io_kiocb *preq;
979 0 : int ret2, ret = 0;
980 0 : struct io_tw_state ts = { .locked = true };
981 :
982 0 : io_ring_submit_lock(ctx, issue_flags);
983 0 : preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
984 0 : ret2 = io_poll_disarm(preq);
985 0 : if (bucket)
986 0 : spin_unlock(&bucket->lock);
987 0 : if (!ret2)
988 : goto found;
989 0 : if (ret2 != -ENOENT) {
990 : ret = ret2;
991 : goto out;
992 : }
993 :
994 0 : preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket);
995 0 : ret2 = io_poll_disarm(preq);
996 0 : if (bucket)
997 0 : spin_unlock(&bucket->lock);
998 0 : if (ret2) {
999 : ret = ret2;
1000 : goto out;
1001 : }
1002 :
1003 : found:
1004 0 : if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
1005 : ret = -EFAULT;
1006 : goto out;
1007 : }
1008 :
1009 0 : if (poll_update->update_events || poll_update->update_user_data) {
1010 : /* only mask one event flags, keep behavior flags */
1011 0 : if (poll_update->update_events) {
1012 0 : struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
1013 :
1014 0 : poll->events &= ~0xffff;
1015 0 : poll->events |= poll_update->events & 0xffff;
1016 0 : poll->events |= IO_POLL_UNMASK;
1017 : }
1018 0 : if (poll_update->update_user_data)
1019 0 : preq->cqe.user_data = poll_update->new_user_data;
1020 :
1021 0 : ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED);
1022 : /* successfully updated, don't complete poll request */
1023 0 : if (!ret2 || ret2 == -EIOCBQUEUED)
1024 : goto out;
1025 : }
1026 :
1027 0 : req_set_fail(preq);
1028 0 : io_req_set_res(preq, -ECANCELED, 0);
1029 0 : io_req_task_complete(preq, &ts);
1030 : out:
1031 0 : io_ring_submit_unlock(ctx, issue_flags);
1032 0 : if (ret < 0) {
1033 0 : req_set_fail(req);
1034 : return ret;
1035 : }
1036 : /* complete update request, we're done with it */
1037 0 : io_req_set_res(req, ret, 0);
1038 0 : return IOU_OK;
1039 : }
1040 :
1041 0 : void io_apoll_cache_free(struct io_cache_entry *entry)
1042 : {
1043 0 : kfree(container_of(entry, struct async_poll, cache));
1044 0 : }
|