Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : #include <linux/kernel.h>
3 : #include <linux/errno.h>
4 : #include <linux/fs.h>
5 : #include <linux/file.h>
6 : #include <linux/mm.h>
7 : #include <linux/slab.h>
8 : #include <linux/namei.h>
9 : #include <linux/poll.h>
10 : #include <linux/io_uring.h>
11 :
12 : #include <uapi/linux/io_uring.h>
13 :
14 : #include "io_uring.h"
15 : #include "opdef.h"
16 : #include "kbuf.h"
17 :
18 : #define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
19 :
20 : #define BGID_ARRAY 64
21 :
22 : struct io_provide_buf {
23 : struct file *file;
24 : __u64 addr;
25 : __u32 len;
26 : __u32 bgid;
27 : __u16 nbufs;
28 : __u16 bid;
29 : };
30 :
31 : static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
32 : unsigned int bgid)
33 : {
34 0 : if (ctx->io_bl && bgid < BGID_ARRAY)
35 0 : return &ctx->io_bl[bgid];
36 :
37 0 : return xa_load(&ctx->io_bl_xa, bgid);
38 : }
39 :
40 0 : static int io_buffer_add_list(struct io_ring_ctx *ctx,
41 : struct io_buffer_list *bl, unsigned int bgid)
42 : {
43 0 : bl->bgid = bgid;
44 0 : if (bgid < BGID_ARRAY)
45 : return 0;
46 :
47 0 : return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
48 : }
49 :
50 0 : void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
51 : {
52 0 : struct io_ring_ctx *ctx = req->ctx;
53 : struct io_buffer_list *bl;
54 : struct io_buffer *buf;
55 :
56 : /*
57 : * For legacy provided buffer mode, don't recycle if we already did
58 : * IO to this buffer. For ring-mapped provided buffer mode, we should
59 : * increment ring->head to explicitly monopolize the buffer to avoid
60 : * multiple use.
61 : */
62 0 : if (req->flags & REQ_F_PARTIAL_IO)
63 : return;
64 :
65 0 : io_ring_submit_lock(ctx, issue_flags);
66 :
67 0 : buf = req->kbuf;
68 0 : bl = io_buffer_get_list(ctx, buf->bgid);
69 0 : list_add(&buf->list, &bl->buf_list);
70 0 : req->flags &= ~REQ_F_BUFFER_SELECTED;
71 0 : req->buf_index = buf->bgid;
72 :
73 : io_ring_submit_unlock(ctx, issue_flags);
74 : return;
75 : }
76 :
77 0 : unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
78 : {
79 : unsigned int cflags;
80 :
81 : /*
82 : * We can add this buffer back to two lists:
83 : *
84 : * 1) The io_buffers_cache list. This one is protected by the
85 : * ctx->uring_lock. If we already hold this lock, add back to this
86 : * list as we can grab it from issue as well.
87 : * 2) The io_buffers_comp list. This one is protected by the
88 : * ctx->completion_lock.
89 : *
90 : * We migrate buffers from the comp_list to the issue cache list
91 : * when we need one.
92 : */
93 0 : if (req->flags & REQ_F_BUFFER_RING) {
94 : /* no buffers to recycle for this case */
95 0 : cflags = __io_put_kbuf_list(req, NULL);
96 0 : } else if (issue_flags & IO_URING_F_UNLOCKED) {
97 0 : struct io_ring_ctx *ctx = req->ctx;
98 :
99 0 : spin_lock(&ctx->completion_lock);
100 0 : cflags = __io_put_kbuf_list(req, &ctx->io_buffers_comp);
101 0 : spin_unlock(&ctx->completion_lock);
102 : } else {
103 0 : lockdep_assert_held(&req->ctx->uring_lock);
104 :
105 0 : cflags = __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
106 : }
107 0 : return cflags;
108 : }
109 :
110 0 : static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
111 : struct io_buffer_list *bl)
112 : {
113 0 : if (!list_empty(&bl->buf_list)) {
114 : struct io_buffer *kbuf;
115 :
116 0 : kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
117 0 : list_del(&kbuf->list);
118 0 : if (*len == 0 || *len > kbuf->len)
119 0 : *len = kbuf->len;
120 0 : req->flags |= REQ_F_BUFFER_SELECTED;
121 0 : req->kbuf = kbuf;
122 0 : req->buf_index = kbuf->bid;
123 0 : return u64_to_user_ptr(kbuf->addr);
124 : }
125 : return NULL;
126 : }
127 :
128 0 : static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
129 : struct io_buffer_list *bl,
130 : unsigned int issue_flags)
131 : {
132 0 : struct io_uring_buf_ring *br = bl->buf_ring;
133 : struct io_uring_buf *buf;
134 0 : __u16 head = bl->head;
135 :
136 0 : if (unlikely(smp_load_acquire(&br->tail) == head))
137 : return NULL;
138 :
139 0 : head &= bl->mask;
140 : /* mmaped buffers are always contig */
141 0 : if (bl->is_mmap || head < IO_BUFFER_LIST_BUF_PER_PAGE) {
142 0 : buf = &br->bufs[head];
143 : } else {
144 0 : int off = head & (IO_BUFFER_LIST_BUF_PER_PAGE - 1);
145 0 : int index = head / IO_BUFFER_LIST_BUF_PER_PAGE;
146 0 : buf = page_address(bl->buf_pages[index]);
147 0 : buf += off;
148 : }
149 0 : if (*len == 0 || *len > buf->len)
150 0 : *len = buf->len;
151 0 : req->flags |= REQ_F_BUFFER_RING;
152 0 : req->buf_list = bl;
153 0 : req->buf_index = buf->bid;
154 :
155 0 : if (issue_flags & IO_URING_F_UNLOCKED || !file_can_poll(req->file)) {
156 : /*
157 : * If we came in unlocked, we have no choice but to consume the
158 : * buffer here, otherwise nothing ensures that the buffer won't
159 : * get used by others. This does mean it'll be pinned until the
160 : * IO completes, coming in unlocked means we're being called from
161 : * io-wq context and there may be further retries in async hybrid
162 : * mode. For the locked case, the caller must call commit when
163 : * the transfer completes (or if we get -EAGAIN and must poll of
164 : * retry).
165 : */
166 0 : req->buf_list = NULL;
167 0 : bl->head++;
168 : }
169 0 : return u64_to_user_ptr(buf->addr);
170 : }
171 :
172 0 : void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
173 : unsigned int issue_flags)
174 : {
175 0 : struct io_ring_ctx *ctx = req->ctx;
176 : struct io_buffer_list *bl;
177 0 : void __user *ret = NULL;
178 :
179 0 : io_ring_submit_lock(req->ctx, issue_flags);
180 :
181 0 : bl = io_buffer_get_list(ctx, req->buf_index);
182 0 : if (likely(bl)) {
183 0 : if (bl->is_mapped)
184 0 : ret = io_ring_buffer_select(req, len, bl, issue_flags);
185 : else
186 0 : ret = io_provided_buffer_select(req, len, bl);
187 : }
188 0 : io_ring_submit_unlock(req->ctx, issue_flags);
189 0 : return ret;
190 : }
191 :
192 0 : static __cold int io_init_bl_list(struct io_ring_ctx *ctx)
193 : {
194 : int i;
195 :
196 0 : ctx->io_bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list),
197 : GFP_KERNEL);
198 0 : if (!ctx->io_bl)
199 : return -ENOMEM;
200 :
201 0 : for (i = 0; i < BGID_ARRAY; i++) {
202 0 : INIT_LIST_HEAD(&ctx->io_bl[i].buf_list);
203 0 : ctx->io_bl[i].bgid = i;
204 : }
205 :
206 : return 0;
207 : }
208 :
209 0 : static int __io_remove_buffers(struct io_ring_ctx *ctx,
210 : struct io_buffer_list *bl, unsigned nbufs)
211 : {
212 0 : unsigned i = 0;
213 :
214 : /* shouldn't happen */
215 0 : if (!nbufs)
216 : return 0;
217 :
218 0 : if (bl->is_mapped) {
219 0 : i = bl->buf_ring->tail - bl->head;
220 0 : if (bl->is_mmap) {
221 : struct page *page;
222 :
223 0 : page = virt_to_head_page(bl->buf_ring);
224 0 : if (put_page_testzero(page))
225 0 : free_compound_page(page);
226 : bl->buf_ring = NULL;
227 0 : bl->is_mmap = 0;
228 0 : } else if (bl->buf_nr_pages) {
229 : int j;
230 :
231 0 : for (j = 0; j < bl->buf_nr_pages; j++)
232 0 : unpin_user_page(bl->buf_pages[j]);
233 0 : kvfree(bl->buf_pages);
234 : bl->buf_pages = NULL;
235 0 : bl->buf_nr_pages = 0;
236 : }
237 : /* make sure it's seen as empty */
238 0 : INIT_LIST_HEAD(&bl->buf_list);
239 0 : bl->is_mapped = 0;
240 0 : return i;
241 : }
242 :
243 : /* protects io_buffers_cache */
244 : lockdep_assert_held(&ctx->uring_lock);
245 :
246 0 : while (!list_empty(&bl->buf_list)) {
247 : struct io_buffer *nxt;
248 :
249 0 : nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
250 0 : list_move(&nxt->list, &ctx->io_buffers_cache);
251 0 : if (++i == nbufs)
252 0 : return i;
253 0 : cond_resched();
254 : }
255 :
256 0 : return i;
257 : }
258 :
259 0 : void io_destroy_buffers(struct io_ring_ctx *ctx)
260 : {
261 : struct io_buffer_list *bl;
262 : unsigned long index;
263 : int i;
264 :
265 0 : for (i = 0; i < BGID_ARRAY; i++) {
266 0 : if (!ctx->io_bl)
267 : break;
268 0 : __io_remove_buffers(ctx, &ctx->io_bl[i], -1U);
269 : }
270 :
271 0 : xa_for_each(&ctx->io_bl_xa, index, bl) {
272 0 : xa_erase(&ctx->io_bl_xa, bl->bgid);
273 0 : __io_remove_buffers(ctx, bl, -1U);
274 0 : kfree(bl);
275 : }
276 :
277 0 : while (!list_empty(&ctx->io_buffers_pages)) {
278 : struct page *page;
279 :
280 0 : page = list_first_entry(&ctx->io_buffers_pages, struct page, lru);
281 0 : list_del_init(&page->lru);
282 0 : __free_page(page);
283 : }
284 0 : }
285 :
286 0 : int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
287 : {
288 0 : struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
289 : u64 tmp;
290 :
291 0 : if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
292 0 : sqe->splice_fd_in)
293 : return -EINVAL;
294 :
295 0 : tmp = READ_ONCE(sqe->fd);
296 0 : if (!tmp || tmp > USHRT_MAX)
297 : return -EINVAL;
298 :
299 0 : memset(p, 0, sizeof(*p));
300 0 : p->nbufs = tmp;
301 0 : p->bgid = READ_ONCE(sqe->buf_group);
302 0 : return 0;
303 : }
304 :
305 0 : int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
306 : {
307 0 : struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
308 0 : struct io_ring_ctx *ctx = req->ctx;
309 : struct io_buffer_list *bl;
310 0 : int ret = 0;
311 :
312 0 : io_ring_submit_lock(ctx, issue_flags);
313 :
314 0 : ret = -ENOENT;
315 0 : bl = io_buffer_get_list(ctx, p->bgid);
316 0 : if (bl) {
317 0 : ret = -EINVAL;
318 : /* can't use provide/remove buffers command on mapped buffers */
319 0 : if (!bl->is_mapped)
320 0 : ret = __io_remove_buffers(ctx, bl, p->nbufs);
321 : }
322 0 : io_ring_submit_unlock(ctx, issue_flags);
323 0 : if (ret < 0)
324 0 : req_set_fail(req);
325 0 : io_req_set_res(req, ret, 0);
326 0 : return IOU_OK;
327 : }
328 :
329 0 : int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
330 : {
331 : unsigned long size, tmp_check;
332 0 : struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
333 : u64 tmp;
334 :
335 0 : if (sqe->rw_flags || sqe->splice_fd_in)
336 : return -EINVAL;
337 :
338 0 : tmp = READ_ONCE(sqe->fd);
339 0 : if (!tmp || tmp > USHRT_MAX)
340 : return -E2BIG;
341 0 : p->nbufs = tmp;
342 0 : p->addr = READ_ONCE(sqe->addr);
343 0 : p->len = READ_ONCE(sqe->len);
344 :
345 0 : if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
346 : &size))
347 : return -EOVERFLOW;
348 0 : if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
349 : return -EOVERFLOW;
350 :
351 0 : size = (unsigned long)p->len * p->nbufs;
352 0 : if (!access_ok(u64_to_user_ptr(p->addr), size))
353 : return -EFAULT;
354 :
355 0 : p->bgid = READ_ONCE(sqe->buf_group);
356 0 : tmp = READ_ONCE(sqe->off);
357 0 : if (tmp > USHRT_MAX)
358 : return -E2BIG;
359 0 : if (tmp + p->nbufs >= USHRT_MAX)
360 : return -EINVAL;
361 0 : p->bid = tmp;
362 0 : return 0;
363 : }
364 :
365 0 : static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
366 : {
367 : struct io_buffer *buf;
368 : struct page *page;
369 : int bufs_in_page;
370 :
371 : /*
372 : * Completions that don't happen inline (eg not under uring_lock) will
373 : * add to ->io_buffers_comp. If we don't have any free buffers, check
374 : * the completion list and splice those entries first.
375 : */
376 0 : if (!list_empty_careful(&ctx->io_buffers_comp)) {
377 0 : spin_lock(&ctx->completion_lock);
378 0 : if (!list_empty(&ctx->io_buffers_comp)) {
379 0 : list_splice_init(&ctx->io_buffers_comp,
380 : &ctx->io_buffers_cache);
381 0 : spin_unlock(&ctx->completion_lock);
382 0 : return 0;
383 : }
384 0 : spin_unlock(&ctx->completion_lock);
385 : }
386 :
387 : /*
388 : * No free buffers and no completion entries either. Allocate a new
389 : * page worth of buffer entries and add those to our freelist.
390 : */
391 0 : page = alloc_page(GFP_KERNEL_ACCOUNT);
392 0 : if (!page)
393 : return -ENOMEM;
394 :
395 0 : list_add(&page->lru, &ctx->io_buffers_pages);
396 :
397 0 : buf = page_address(page);
398 0 : bufs_in_page = PAGE_SIZE / sizeof(*buf);
399 0 : while (bufs_in_page) {
400 0 : list_add_tail(&buf->list, &ctx->io_buffers_cache);
401 0 : buf++;
402 0 : bufs_in_page--;
403 : }
404 :
405 : return 0;
406 : }
407 :
408 0 : static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
409 : struct io_buffer_list *bl)
410 : {
411 : struct io_buffer *buf;
412 0 : u64 addr = pbuf->addr;
413 0 : int i, bid = pbuf->bid;
414 :
415 0 : for (i = 0; i < pbuf->nbufs; i++) {
416 0 : if (list_empty(&ctx->io_buffers_cache) &&
417 0 : io_refill_buffer_cache(ctx))
418 : break;
419 0 : buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
420 : list);
421 0 : list_move_tail(&buf->list, &bl->buf_list);
422 0 : buf->addr = addr;
423 0 : buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
424 0 : buf->bid = bid;
425 0 : buf->bgid = pbuf->bgid;
426 0 : addr += pbuf->len;
427 0 : bid++;
428 0 : cond_resched();
429 : }
430 :
431 0 : return i ? 0 : -ENOMEM;
432 : }
433 :
434 0 : int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
435 : {
436 0 : struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
437 0 : struct io_ring_ctx *ctx = req->ctx;
438 : struct io_buffer_list *bl;
439 0 : int ret = 0;
440 :
441 0 : io_ring_submit_lock(ctx, issue_flags);
442 :
443 0 : if (unlikely(p->bgid < BGID_ARRAY && !ctx->io_bl)) {
444 0 : ret = io_init_bl_list(ctx);
445 0 : if (ret)
446 : goto err;
447 : }
448 :
449 0 : bl = io_buffer_get_list(ctx, p->bgid);
450 0 : if (unlikely(!bl)) {
451 0 : bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
452 0 : if (!bl) {
453 : ret = -ENOMEM;
454 : goto err;
455 : }
456 0 : INIT_LIST_HEAD(&bl->buf_list);
457 0 : ret = io_buffer_add_list(ctx, bl, p->bgid);
458 0 : if (ret) {
459 0 : kfree(bl);
460 0 : goto err;
461 : }
462 : }
463 : /* can't add buffers via this command for a mapped buffer ring */
464 0 : if (bl->is_mapped) {
465 : ret = -EINVAL;
466 : goto err;
467 : }
468 :
469 0 : ret = io_add_buffers(ctx, p, bl);
470 : err:
471 0 : io_ring_submit_unlock(ctx, issue_flags);
472 :
473 0 : if (ret < 0)
474 0 : req_set_fail(req);
475 0 : io_req_set_res(req, ret, 0);
476 0 : return IOU_OK;
477 : }
478 :
479 0 : static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
480 : struct io_buffer_list *bl)
481 : {
482 : struct io_uring_buf_ring *br;
483 : struct page **pages;
484 : int nr_pages;
485 :
486 0 : pages = io_pin_pages(reg->ring_addr,
487 0 : flex_array_size(br, bufs, reg->ring_entries),
488 : &nr_pages);
489 0 : if (IS_ERR(pages))
490 0 : return PTR_ERR(pages);
491 :
492 0 : br = page_address(pages[0]);
493 : #ifdef SHM_COLOUR
494 : /*
495 : * On platforms that have specific aliasing requirements, SHM_COLOUR
496 : * is set and we must guarantee that the kernel and user side align
497 : * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and
498 : * the application mmap's the provided ring buffer. Fail the request
499 : * if we, by chance, don't end up with aligned addresses. The app
500 : * should use IOU_PBUF_RING_MMAP instead, and liburing will handle
501 : * this transparently.
502 : */
503 : if ((reg->ring_addr | (unsigned long) br) & (SHM_COLOUR - 1)) {
504 : int i;
505 :
506 : for (i = 0; i < nr_pages; i++)
507 : unpin_user_page(pages[i]);
508 : return -EINVAL;
509 : }
510 : #endif
511 0 : bl->buf_pages = pages;
512 0 : bl->buf_nr_pages = nr_pages;
513 0 : bl->buf_ring = br;
514 0 : bl->is_mapped = 1;
515 0 : bl->is_mmap = 0;
516 : return 0;
517 : }
518 :
519 0 : static int io_alloc_pbuf_ring(struct io_uring_buf_reg *reg,
520 : struct io_buffer_list *bl)
521 : {
522 0 : gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
523 : size_t ring_size;
524 : void *ptr;
525 :
526 0 : ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring);
527 0 : ptr = (void *) __get_free_pages(gfp, get_order(ring_size));
528 0 : if (!ptr)
529 : return -ENOMEM;
530 :
531 0 : bl->buf_ring = ptr;
532 0 : bl->is_mapped = 1;
533 0 : bl->is_mmap = 1;
534 : return 0;
535 : }
536 :
537 0 : int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
538 : {
539 : struct io_uring_buf_reg reg;
540 0 : struct io_buffer_list *bl, *free_bl = NULL;
541 : int ret;
542 :
543 0 : if (copy_from_user(®, arg, sizeof(reg)))
544 : return -EFAULT;
545 :
546 0 : if (reg.resv[0] || reg.resv[1] || reg.resv[2])
547 : return -EINVAL;
548 0 : if (reg.flags & ~IOU_PBUF_RING_MMAP)
549 : return -EINVAL;
550 0 : if (!(reg.flags & IOU_PBUF_RING_MMAP)) {
551 0 : if (!reg.ring_addr)
552 : return -EFAULT;
553 0 : if (reg.ring_addr & ~PAGE_MASK)
554 : return -EINVAL;
555 : } else {
556 0 : if (reg.ring_addr)
557 : return -EINVAL;
558 : }
559 :
560 0 : if (!is_power_of_2(reg.ring_entries))
561 : return -EINVAL;
562 :
563 : /* cannot disambiguate full vs empty due to head/tail size */
564 0 : if (reg.ring_entries >= 65536)
565 : return -EINVAL;
566 :
567 0 : if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) {
568 0 : int ret = io_init_bl_list(ctx);
569 0 : if (ret)
570 : return ret;
571 : }
572 :
573 0 : bl = io_buffer_get_list(ctx, reg.bgid);
574 0 : if (bl) {
575 : /* if mapped buffer ring OR classic exists, don't allow */
576 0 : if (bl->is_mapped || !list_empty(&bl->buf_list))
577 : return -EEXIST;
578 : } else {
579 0 : free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
580 0 : if (!bl)
581 : return -ENOMEM;
582 : }
583 :
584 0 : if (!(reg.flags & IOU_PBUF_RING_MMAP))
585 0 : ret = io_pin_pbuf_ring(®, bl);
586 : else
587 0 : ret = io_alloc_pbuf_ring(®, bl);
588 :
589 0 : if (!ret) {
590 0 : bl->nr_entries = reg.ring_entries;
591 0 : bl->mask = reg.ring_entries - 1;
592 :
593 0 : io_buffer_add_list(ctx, bl, reg.bgid);
594 0 : return 0;
595 : }
596 :
597 0 : kfree(free_bl);
598 0 : return ret;
599 : }
600 :
601 0 : int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
602 : {
603 : struct io_uring_buf_reg reg;
604 : struct io_buffer_list *bl;
605 :
606 0 : if (copy_from_user(®, arg, sizeof(reg)))
607 : return -EFAULT;
608 0 : if (reg.resv[0] || reg.resv[1] || reg.resv[2])
609 : return -EINVAL;
610 0 : if (reg.flags)
611 : return -EINVAL;
612 :
613 0 : bl = io_buffer_get_list(ctx, reg.bgid);
614 0 : if (!bl)
615 : return -ENOENT;
616 0 : if (!bl->is_mapped)
617 : return -EINVAL;
618 :
619 0 : __io_remove_buffers(ctx, bl, -1U);
620 0 : if (bl->bgid >= BGID_ARRAY) {
621 0 : xa_erase(&ctx->io_bl_xa, bl->bgid);
622 0 : kfree(bl);
623 : }
624 : return 0;
625 : }
626 :
627 0 : void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid)
628 : {
629 : struct io_buffer_list *bl;
630 :
631 0 : bl = io_buffer_get_list(ctx, bgid);
632 0 : if (!bl || !bl->is_mmap)
633 : return NULL;
634 :
635 0 : return bl->buf_ring;
636 : }
|