LCOV - code coverage report
Current view: top level - block - blk-ioc.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 3 55 5.5 %
Date: 2023-07-19 18:55:55 Functions: 1 5 20.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Functions related to io context handling
       4             :  */
       5             : #include <linux/kernel.h>
       6             : #include <linux/module.h>
       7             : #include <linux/init.h>
       8             : #include <linux/bio.h>
       9             : #include <linux/blkdev.h>
      10             : #include <linux/slab.h>
      11             : #include <linux/security.h>
      12             : #include <linux/sched/task.h>
      13             : 
      14             : #include "blk.h"
      15             : #include "blk-mq-sched.h"
      16             : 
      17             : /*
      18             :  * For io context allocations
      19             :  */
      20             : static struct kmem_cache *iocontext_cachep;
      21             : 
      22             : #ifdef CONFIG_BLK_ICQ
      23             : /**
      24             :  * get_io_context - increment reference count to io_context
      25             :  * @ioc: io_context to get
      26             :  *
      27             :  * Increment reference count to @ioc.
      28             :  */
      29             : static void get_io_context(struct io_context *ioc)
      30             : {
      31             :         BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
      32             :         atomic_long_inc(&ioc->refcount);
      33             : }
      34             : 
      35             : static void icq_free_icq_rcu(struct rcu_head *head)
      36             : {
      37             :         struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
      38             : 
      39             :         kmem_cache_free(icq->__rcu_icq_cache, icq);
      40             : }
      41             : 
      42             : /*
      43             :  * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
      44             :  * and queue locked for legacy.
      45             :  */
      46             : static void ioc_exit_icq(struct io_cq *icq)
      47             : {
      48             :         struct elevator_type *et = icq->q->elevator->type;
      49             : 
      50             :         if (icq->flags & ICQ_EXITED)
      51             :                 return;
      52             : 
      53             :         if (et->ops.exit_icq)
      54             :                 et->ops.exit_icq(icq);
      55             : 
      56             :         icq->flags |= ICQ_EXITED;
      57             : }
      58             : 
      59             : static void ioc_exit_icqs(struct io_context *ioc)
      60             : {
      61             :         struct io_cq *icq;
      62             : 
      63             :         spin_lock_irq(&ioc->lock);
      64             :         hlist_for_each_entry(icq, &ioc->icq_list, ioc_node)
      65             :                 ioc_exit_icq(icq);
      66             :         spin_unlock_irq(&ioc->lock);
      67             : }
      68             : 
      69             : /*
      70             :  * Release an icq. Called with ioc locked for blk-mq, and with both ioc
      71             :  * and queue locked for legacy.
      72             :  */
      73             : static void ioc_destroy_icq(struct io_cq *icq)
      74             : {
      75             :         struct io_context *ioc = icq->ioc;
      76             :         struct request_queue *q = icq->q;
      77             :         struct elevator_type *et = q->elevator->type;
      78             : 
      79             :         lockdep_assert_held(&ioc->lock);
      80             : 
      81             :         radix_tree_delete(&ioc->icq_tree, icq->q->id);
      82             :         hlist_del_init(&icq->ioc_node);
      83             :         list_del_init(&icq->q_node);
      84             : 
      85             :         /*
      86             :          * Both setting lookup hint to and clearing it from @icq are done
      87             :          * under queue_lock.  If it's not pointing to @icq now, it never
      88             :          * will.  Hint assignment itself can race safely.
      89             :          */
      90             :         if (rcu_access_pointer(ioc->icq_hint) == icq)
      91             :                 rcu_assign_pointer(ioc->icq_hint, NULL);
      92             : 
      93             :         ioc_exit_icq(icq);
      94             : 
      95             :         /*
      96             :          * @icq->q might have gone away by the time RCU callback runs
      97             :          * making it impossible to determine icq_cache.  Record it in @icq.
      98             :          */
      99             :         icq->__rcu_icq_cache = et->icq_cache;
     100             :         icq->flags |= ICQ_DESTROYED;
     101             :         call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
     102             : }
     103             : 
     104             : /*
     105             :  * Slow path for ioc release in put_io_context().  Performs double-lock
     106             :  * dancing to unlink all icq's and then frees ioc.
     107             :  */
     108             : static void ioc_release_fn(struct work_struct *work)
     109             : {
     110             :         struct io_context *ioc = container_of(work, struct io_context,
     111             :                                               release_work);
     112             :         spin_lock_irq(&ioc->lock);
     113             : 
     114             :         while (!hlist_empty(&ioc->icq_list)) {
     115             :                 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
     116             :                                                 struct io_cq, ioc_node);
     117             :                 struct request_queue *q = icq->q;
     118             : 
     119             :                 if (spin_trylock(&q->queue_lock)) {
     120             :                         ioc_destroy_icq(icq);
     121             :                         spin_unlock(&q->queue_lock);
     122             :                 } else {
     123             :                         /* Make sure q and icq cannot be freed. */
     124             :                         rcu_read_lock();
     125             : 
     126             :                         /* Re-acquire the locks in the correct order. */
     127             :                         spin_unlock(&ioc->lock);
     128             :                         spin_lock(&q->queue_lock);
     129             :                         spin_lock(&ioc->lock);
     130             : 
     131             :                         /*
     132             :                          * The icq may have been destroyed when the ioc lock
     133             :                          * was released.
     134             :                          */
     135             :                         if (!(icq->flags & ICQ_DESTROYED))
     136             :                                 ioc_destroy_icq(icq);
     137             : 
     138             :                         spin_unlock(&q->queue_lock);
     139             :                         rcu_read_unlock();
     140             :                 }
     141             :         }
     142             : 
     143             :         spin_unlock_irq(&ioc->lock);
     144             : 
     145             :         kmem_cache_free(iocontext_cachep, ioc);
     146             : }
     147             : 
     148             : /*
     149             :  * Releasing icqs requires reverse order double locking and we may already be
     150             :  * holding a queue_lock.  Do it asynchronously from a workqueue.
     151             :  */
     152             : static bool ioc_delay_free(struct io_context *ioc)
     153             : {
     154             :         unsigned long flags;
     155             : 
     156             :         spin_lock_irqsave(&ioc->lock, flags);
     157             :         if (!hlist_empty(&ioc->icq_list)) {
     158             :                 queue_work(system_power_efficient_wq, &ioc->release_work);
     159             :                 spin_unlock_irqrestore(&ioc->lock, flags);
     160             :                 return true;
     161             :         }
     162             :         spin_unlock_irqrestore(&ioc->lock, flags);
     163             :         return false;
     164             : }
     165             : 
     166             : /**
     167             :  * ioc_clear_queue - break any ioc association with the specified queue
     168             :  * @q: request_queue being cleared
     169             :  *
     170             :  * Walk @q->icq_list and exit all io_cq's.
     171             :  */
     172             : void ioc_clear_queue(struct request_queue *q)
     173             : {
     174             :         LIST_HEAD(icq_list);
     175             : 
     176             :         spin_lock_irq(&q->queue_lock);
     177             :         list_splice_init(&q->icq_list, &icq_list);
     178             :         spin_unlock_irq(&q->queue_lock);
     179             : 
     180             :         rcu_read_lock();
     181             :         while (!list_empty(&icq_list)) {
     182             :                 struct io_cq *icq =
     183             :                         list_entry(icq_list.next, struct io_cq, q_node);
     184             : 
     185             :                 spin_lock_irq(&icq->ioc->lock);
     186             :                 if (!(icq->flags & ICQ_DESTROYED))
     187             :                         ioc_destroy_icq(icq);
     188             :                 spin_unlock_irq(&icq->ioc->lock);
     189             :         }
     190             :         rcu_read_unlock();
     191             : }
     192             : #else /* CONFIG_BLK_ICQ */
     193             : static inline void ioc_exit_icqs(struct io_context *ioc)
     194             : {
     195             : }
     196             : static inline bool ioc_delay_free(struct io_context *ioc)
     197             : {
     198             :         return false;
     199             : }
     200             : #endif /* CONFIG_BLK_ICQ */
     201             : 
     202             : /**
     203             :  * put_io_context - put a reference of io_context
     204             :  * @ioc: io_context to put
     205             :  *
     206             :  * Decrement reference count of @ioc and release it if the count reaches
     207             :  * zero.
     208             :  */
     209           0 : void put_io_context(struct io_context *ioc)
     210             : {
     211           0 :         BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
     212           0 :         if (atomic_long_dec_and_test(&ioc->refcount) && !ioc_delay_free(ioc))
     213           0 :                 kmem_cache_free(iocontext_cachep, ioc);
     214           0 : }
     215             : EXPORT_SYMBOL_GPL(put_io_context);
     216             : 
     217             : /* Called by the exiting task */
     218           0 : void exit_io_context(struct task_struct *task)
     219             : {
     220             :         struct io_context *ioc;
     221             : 
     222           0 :         task_lock(task);
     223           0 :         ioc = task->io_context;
     224           0 :         task->io_context = NULL;
     225           0 :         task_unlock(task);
     226             : 
     227           0 :         if (atomic_dec_and_test(&ioc->active_ref)) {
     228           0 :                 ioc_exit_icqs(ioc);
     229           0 :                 put_io_context(ioc);
     230             :         }
     231           0 : }
     232             : 
     233             : static struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
     234             : {
     235             :         struct io_context *ioc;
     236             : 
     237           0 :         ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
     238             :                                     node);
     239           0 :         if (unlikely(!ioc))
     240             :                 return NULL;
     241             : 
     242           0 :         atomic_long_set(&ioc->refcount, 1);
     243           0 :         atomic_set(&ioc->active_ref, 1);
     244             : #ifdef CONFIG_BLK_ICQ
     245             :         spin_lock_init(&ioc->lock);
     246             :         INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
     247             :         INIT_HLIST_HEAD(&ioc->icq_list);
     248             :         INIT_WORK(&ioc->release_work, ioc_release_fn);
     249             : #endif
     250           0 :         ioc->ioprio = IOPRIO_DEFAULT;
     251             : 
     252             :         return ioc;
     253             : }
     254             : 
     255           0 : int set_task_ioprio(struct task_struct *task, int ioprio)
     256             : {
     257             :         int err;
     258           0 :         const struct cred *cred = current_cred(), *tcred;
     259             : 
     260             :         rcu_read_lock();
     261           0 :         tcred = __task_cred(task);
     262           0 :         if (!uid_eq(tcred->uid, cred->euid) &&
     263           0 :             !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) {
     264             :                 rcu_read_unlock();
     265           0 :                 return -EPERM;
     266             :         }
     267           0 :         rcu_read_unlock();
     268             : 
     269           0 :         err = security_task_setioprio(task, ioprio);
     270           0 :         if (err)
     271             :                 return err;
     272             : 
     273           0 :         task_lock(task);
     274           0 :         if (unlikely(!task->io_context)) {
     275             :                 struct io_context *ioc;
     276             : 
     277           0 :                 task_unlock(task);
     278             : 
     279           0 :                 ioc = alloc_io_context(GFP_ATOMIC, NUMA_NO_NODE);
     280           0 :                 if (!ioc)
     281             :                         return -ENOMEM;
     282             : 
     283           0 :                 task_lock(task);
     284           0 :                 if (task->flags & PF_EXITING) {
     285           0 :                         kmem_cache_free(iocontext_cachep, ioc);
     286           0 :                         goto out;
     287             :                 }
     288           0 :                 if (task->io_context)
     289           0 :                         kmem_cache_free(iocontext_cachep, ioc);
     290             :                 else
     291           0 :                         task->io_context = ioc;
     292             :         }
     293           0 :         task->io_context->ioprio = ioprio;
     294             : out:
     295           0 :         task_unlock(task);
     296           0 :         return 0;
     297             : }
     298             : EXPORT_SYMBOL_GPL(set_task_ioprio);
     299             : 
     300           0 : int __copy_io(unsigned long clone_flags, struct task_struct *tsk)
     301             : {
     302           0 :         struct io_context *ioc = current->io_context;
     303             : 
     304             :         /*
     305             :          * Share io context with parent, if CLONE_IO is set
     306             :          */
     307           0 :         if (clone_flags & CLONE_IO) {
     308           0 :                 atomic_inc(&ioc->active_ref);
     309           0 :                 tsk->io_context = ioc;
     310           0 :         } else if (ioprio_valid(ioc->ioprio)) {
     311           0 :                 tsk->io_context = alloc_io_context(GFP_KERNEL, NUMA_NO_NODE);
     312           0 :                 if (!tsk->io_context)
     313             :                         return -ENOMEM;
     314           0 :                 tsk->io_context->ioprio = ioc->ioprio;
     315             :         }
     316             : 
     317             :         return 0;
     318             : }
     319             : 
     320             : #ifdef CONFIG_BLK_ICQ
     321             : /**
     322             :  * ioc_lookup_icq - lookup io_cq from ioc
     323             :  * @q: the associated request_queue
     324             :  *
     325             :  * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
     326             :  * with @q->queue_lock held.
     327             :  */
     328             : struct io_cq *ioc_lookup_icq(struct request_queue *q)
     329             : {
     330             :         struct io_context *ioc = current->io_context;
     331             :         struct io_cq *icq;
     332             : 
     333             :         lockdep_assert_held(&q->queue_lock);
     334             : 
     335             :         /*
     336             :          * icq's are indexed from @ioc using radix tree and hint pointer,
     337             :          * both of which are protected with RCU.  All removals are done
     338             :          * holding both q and ioc locks, and we're holding q lock - if we
     339             :          * find a icq which points to us, it's guaranteed to be valid.
     340             :          */
     341             :         rcu_read_lock();
     342             :         icq = rcu_dereference(ioc->icq_hint);
     343             :         if (icq && icq->q == q)
     344             :                 goto out;
     345             : 
     346             :         icq = radix_tree_lookup(&ioc->icq_tree, q->id);
     347             :         if (icq && icq->q == q)
     348             :                 rcu_assign_pointer(ioc->icq_hint, icq);      /* allowed to race */
     349             :         else
     350             :                 icq = NULL;
     351             : out:
     352             :         rcu_read_unlock();
     353             :         return icq;
     354             : }
     355             : EXPORT_SYMBOL(ioc_lookup_icq);
     356             : 
     357             : /**
     358             :  * ioc_create_icq - create and link io_cq
     359             :  * @q: request_queue of interest
     360             :  *
     361             :  * Make sure io_cq linking @ioc and @q exists.  If icq doesn't exist, they
     362             :  * will be created using @gfp_mask.
     363             :  *
     364             :  * The caller is responsible for ensuring @ioc won't go away and @q is
     365             :  * alive and will stay alive until this function returns.
     366             :  */
     367             : static struct io_cq *ioc_create_icq(struct request_queue *q)
     368             : {
     369             :         struct io_context *ioc = current->io_context;
     370             :         struct elevator_type *et = q->elevator->type;
     371             :         struct io_cq *icq;
     372             : 
     373             :         /* allocate stuff */
     374             :         icq = kmem_cache_alloc_node(et->icq_cache, GFP_ATOMIC | __GFP_ZERO,
     375             :                                     q->node);
     376             :         if (!icq)
     377             :                 return NULL;
     378             : 
     379             :         if (radix_tree_maybe_preload(GFP_ATOMIC) < 0) {
     380             :                 kmem_cache_free(et->icq_cache, icq);
     381             :                 return NULL;
     382             :         }
     383             : 
     384             :         icq->ioc = ioc;
     385             :         icq->q = q;
     386             :         INIT_LIST_HEAD(&icq->q_node);
     387             :         INIT_HLIST_NODE(&icq->ioc_node);
     388             : 
     389             :         /* lock both q and ioc and try to link @icq */
     390             :         spin_lock_irq(&q->queue_lock);
     391             :         spin_lock(&ioc->lock);
     392             : 
     393             :         if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
     394             :                 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
     395             :                 list_add(&icq->q_node, &q->icq_list);
     396             :                 if (et->ops.init_icq)
     397             :                         et->ops.init_icq(icq);
     398             :         } else {
     399             :                 kmem_cache_free(et->icq_cache, icq);
     400             :                 icq = ioc_lookup_icq(q);
     401             :                 if (!icq)
     402             :                         printk(KERN_ERR "cfq: icq link failed!\n");
     403             :         }
     404             : 
     405             :         spin_unlock(&ioc->lock);
     406             :         spin_unlock_irq(&q->queue_lock);
     407             :         radix_tree_preload_end();
     408             :         return icq;
     409             : }
     410             : 
     411             : struct io_cq *ioc_find_get_icq(struct request_queue *q)
     412             : {
     413             :         struct io_context *ioc = current->io_context;
     414             :         struct io_cq *icq = NULL;
     415             : 
     416             :         if (unlikely(!ioc)) {
     417             :                 ioc = alloc_io_context(GFP_ATOMIC, q->node);
     418             :                 if (!ioc)
     419             :                         return NULL;
     420             : 
     421             :                 task_lock(current);
     422             :                 if (current->io_context) {
     423             :                         kmem_cache_free(iocontext_cachep, ioc);
     424             :                         ioc = current->io_context;
     425             :                 } else {
     426             :                         current->io_context = ioc;
     427             :                 }
     428             : 
     429             :                 get_io_context(ioc);
     430             :                 task_unlock(current);
     431             :         } else {
     432             :                 get_io_context(ioc);
     433             : 
     434             :                 spin_lock_irq(&q->queue_lock);
     435             :                 icq = ioc_lookup_icq(q);
     436             :                 spin_unlock_irq(&q->queue_lock);
     437             :         }
     438             : 
     439             :         if (!icq) {
     440             :                 icq = ioc_create_icq(q);
     441             :                 if (!icq) {
     442             :                         put_io_context(ioc);
     443             :                         return NULL;
     444             :                 }
     445             :         }
     446             :         return icq;
     447             : }
     448             : EXPORT_SYMBOL_GPL(ioc_find_get_icq);
     449             : #endif /* CONFIG_BLK_ICQ */
     450             : 
     451           1 : static int __init blk_ioc_init(void)
     452             : {
     453           1 :         iocontext_cachep = kmem_cache_create("blkdev_ioc",
     454             :                         sizeof(struct io_context), 0, SLAB_PANIC, NULL);
     455           1 :         return 0;
     456             : }
     457             : subsys_initcall(blk_ioc_init);

Generated by: LCOV version 1.14