LCOV - code coverage report
Current view: top level - kernel/sched - completion.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 43 78 55.1 %
Date: 2023-07-19 18:55:55 Functions: 7 16 43.8 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : 
       3             : /*
       4             :  * Generic wait-for-completion handler;
       5             :  *
       6             :  * It differs from semaphores in that their default case is the opposite,
       7             :  * wait_for_completion default blocks whereas semaphore default non-block. The
       8             :  * interface also makes it easy to 'complete' multiple waiting threads,
       9             :  * something which isn't entirely natural for semaphores.
      10             :  *
      11             :  * But more importantly, the primitive documents the usage. Semaphores would
      12             :  * typically be used for exclusion which gives rise to priority inversion.
      13             :  * Waiting for completion is a typically sync point, but not an exclusion point.
      14             :  */
      15             : 
      16             : /**
      17             :  * complete: - signals a single thread waiting on this completion
      18             :  * @x:  holds the state of this particular completion
      19             :  *
      20             :  * This will wake up a single thread waiting on this completion. Threads will be
      21             :  * awakened in the same order in which they were queued.
      22             :  *
      23             :  * See also complete_all(), wait_for_completion() and related routines.
      24             :  *
      25             :  * If this function wakes up a task, it executes a full memory barrier before
      26             :  * accessing the task state.
      27             :  */
      28        1140 : void complete(struct completion *x)
      29             : {
      30             :         unsigned long flags;
      31             : 
      32        1140 :         raw_spin_lock_irqsave(&x->wait.lock, flags);
      33             : 
      34        1140 :         if (x->done != UINT_MAX)
      35        1140 :                 x->done++;
      36        1140 :         swake_up_locked(&x->wait);
      37        2280 :         raw_spin_unlock_irqrestore(&x->wait.lock, flags);
      38        1140 : }
      39             : EXPORT_SYMBOL(complete);
      40             : 
      41             : /**
      42             :  * complete_all: - signals all threads waiting on this completion
      43             :  * @x:  holds the state of this particular completion
      44             :  *
      45             :  * This will wake up all threads waiting on this particular completion event.
      46             :  *
      47             :  * If this function wakes up a task, it executes a full memory barrier before
      48             :  * accessing the task state.
      49             :  *
      50             :  * Since complete_all() sets the completion of @x permanently to done
      51             :  * to allow multiple waiters to finish, a call to reinit_completion()
      52             :  * must be used on @x if @x is to be used again. The code must make
      53             :  * sure that all waiters have woken and finished before reinitializing
      54             :  * @x. Also note that the function completion_done() can not be used
      55             :  * to know if there are still waiters after complete_all() has been called.
      56             :  */
      57         603 : void complete_all(struct completion *x)
      58             : {
      59             :         unsigned long flags;
      60             : 
      61             :         lockdep_assert_RT_in_threaded_ctx();
      62             : 
      63         603 :         raw_spin_lock_irqsave(&x->wait.lock, flags);
      64         603 :         x->done = UINT_MAX;
      65        1206 :         swake_up_all_locked(&x->wait);
      66        1206 :         raw_spin_unlock_irqrestore(&x->wait.lock, flags);
      67         603 : }
      68             : EXPORT_SYMBOL(complete_all);
      69             : 
      70             : static inline long __sched
      71         773 : do_wait_for_common(struct completion *x,
      72             :                    long (*action)(long), long timeout, int state)
      73             : {
      74         773 :         if (!x->done) {
      75         773 :                 DECLARE_SWAITQUEUE(wait);
      76             : 
      77             :                 do {
      78         773 :                         if (signal_pending_state(state, current)) {
      79             :                                 timeout = -ERESTARTSYS;
      80             :                                 break;
      81             :                         }
      82        1546 :                         __prepare_to_swait(&x->wait, &wait);
      83         773 :                         __set_current_state(state);
      84         773 :                         raw_spin_unlock_irq(&x->wait.lock);
      85         773 :                         timeout = action(timeout);
      86         773 :                         raw_spin_lock_irq(&x->wait.lock);
      87         773 :                 } while (!x->done && timeout);
      88        1546 :                 __finish_swait(&x->wait, &wait);
      89         773 :                 if (!x->done)
      90           0 :                         return timeout;
      91             :         }
      92         773 :         if (x->done != UINT_MAX)
      93         773 :                 x->done--;
      94         773 :         return timeout ?: 1;
      95             : }
      96             : 
      97             : static inline long __sched
      98             : __wait_for_common(struct completion *x,
      99             :                   long (*action)(long), long timeout, int state)
     100             : {
     101             :         might_sleep();
     102             : 
     103         773 :         complete_acquire(x);
     104             : 
     105         773 :         raw_spin_lock_irq(&x->wait.lock);
     106         773 :         timeout = do_wait_for_common(x, action, timeout, state);
     107         773 :         raw_spin_unlock_irq(&x->wait.lock);
     108             : 
     109         773 :         complete_release(x);
     110             : 
     111             :         return timeout;
     112             : }
     113             : 
     114             : static long __sched
     115          26 : wait_for_common(struct completion *x, long timeout, int state)
     116             : {
     117         773 :         return __wait_for_common(x, schedule_timeout, timeout, state);
     118             : }
     119             : 
     120             : static long __sched
     121           0 : wait_for_common_io(struct completion *x, long timeout, int state)
     122             : {
     123           0 :         return __wait_for_common(x, io_schedule_timeout, timeout, state);
     124             : }
     125             : 
     126             : /**
     127             :  * wait_for_completion: - waits for completion of a task
     128             :  * @x:  holds the state of this particular completion
     129             :  *
     130             :  * This waits to be signaled for completion of a specific task. It is NOT
     131             :  * interruptible and there is no timeout.
     132             :  *
     133             :  * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
     134             :  * and interrupt capability. Also see complete().
     135             :  */
     136          26 : void __sched wait_for_completion(struct completion *x)
     137             : {
     138          26 :         wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
     139          26 : }
     140             : EXPORT_SYMBOL(wait_for_completion);
     141             : 
     142             : /**
     143             :  * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
     144             :  * @x:  holds the state of this particular completion
     145             :  * @timeout:  timeout value in jiffies
     146             :  *
     147             :  * This waits for either a completion of a specific task to be signaled or for a
     148             :  * specified timeout to expire. The timeout is in jiffies. It is not
     149             :  * interruptible.
     150             :  *
     151             :  * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
     152             :  * till timeout) if completed.
     153             :  */
     154             : unsigned long __sched
     155         367 : wait_for_completion_timeout(struct completion *x, unsigned long timeout)
     156             : {
     157         734 :         return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
     158             : }
     159             : EXPORT_SYMBOL(wait_for_completion_timeout);
     160             : 
     161             : /**
     162             :  * wait_for_completion_io: - waits for completion of a task
     163             :  * @x:  holds the state of this particular completion
     164             :  *
     165             :  * This waits to be signaled for completion of a specific task. It is NOT
     166             :  * interruptible and there is no timeout. The caller is accounted as waiting
     167             :  * for IO (which traditionally means blkio only).
     168             :  */
     169           0 : void __sched wait_for_completion_io(struct completion *x)
     170             : {
     171           0 :         wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
     172           0 : }
     173             : EXPORT_SYMBOL(wait_for_completion_io);
     174             : 
     175             : /**
     176             :  * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
     177             :  * @x:  holds the state of this particular completion
     178             :  * @timeout:  timeout value in jiffies
     179             :  *
     180             :  * This waits for either a completion of a specific task to be signaled or for a
     181             :  * specified timeout to expire. The timeout is in jiffies. It is not
     182             :  * interruptible. The caller is accounted as waiting for IO (which traditionally
     183             :  * means blkio only).
     184             :  *
     185             :  * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
     186             :  * till timeout) if completed.
     187             :  */
     188             : unsigned long __sched
     189           0 : wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
     190             : {
     191           0 :         return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
     192             : }
     193             : EXPORT_SYMBOL(wait_for_completion_io_timeout);
     194             : 
     195             : /**
     196             :  * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
     197             :  * @x:  holds the state of this particular completion
     198             :  *
     199             :  * This waits for completion of a specific task to be signaled. It is
     200             :  * interruptible.
     201             :  *
     202             :  * Return: -ERESTARTSYS if interrupted, 0 if completed.
     203             :  */
     204           0 : int __sched wait_for_completion_interruptible(struct completion *x)
     205             : {
     206           0 :         long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
     207             : 
     208           0 :         if (t == -ERESTARTSYS)
     209           0 :                 return t;
     210             :         return 0;
     211             : }
     212             : EXPORT_SYMBOL(wait_for_completion_interruptible);
     213             : 
     214             : /**
     215             :  * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
     216             :  * @x:  holds the state of this particular completion
     217             :  * @timeout:  timeout value in jiffies
     218             :  *
     219             :  * This waits for either a completion of a specific task to be signaled or for a
     220             :  * specified timeout to expire. It is interruptible. The timeout is in jiffies.
     221             :  *
     222             :  * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
     223             :  * or number of jiffies left till timeout) if completed.
     224             :  */
     225             : long __sched
     226           0 : wait_for_completion_interruptible_timeout(struct completion *x,
     227             :                                           unsigned long timeout)
     228             : {
     229           0 :         return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
     230             : }
     231             : EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
     232             : 
     233             : /**
     234             :  * wait_for_completion_killable: - waits for completion of a task (killable)
     235             :  * @x:  holds the state of this particular completion
     236             :  *
     237             :  * This waits to be signaled for completion of a specific task. It can be
     238             :  * interrupted by a kill signal.
     239             :  *
     240             :  * Return: -ERESTARTSYS if interrupted, 0 if completed.
     241             :  */
     242         380 : int __sched wait_for_completion_killable(struct completion *x)
     243             : {
     244         380 :         long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
     245             : 
     246         380 :         if (t == -ERESTARTSYS)
     247           0 :                 return t;
     248             :         return 0;
     249             : }
     250             : EXPORT_SYMBOL(wait_for_completion_killable);
     251             : 
     252           0 : int __sched wait_for_completion_state(struct completion *x, unsigned int state)
     253             : {
     254           0 :         long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, state);
     255             : 
     256           0 :         if (t == -ERESTARTSYS)
     257           0 :                 return t;
     258             :         return 0;
     259             : }
     260             : EXPORT_SYMBOL(wait_for_completion_state);
     261             : 
     262             : /**
     263             :  * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
     264             :  * @x:  holds the state of this particular completion
     265             :  * @timeout:  timeout value in jiffies
     266             :  *
     267             :  * This waits for either a completion of a specific task to be
     268             :  * signaled or for a specified timeout to expire. It can be
     269             :  * interrupted by a kill signal. The timeout is in jiffies.
     270             :  *
     271             :  * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
     272             :  * or number of jiffies left till timeout) if completed.
     273             :  */
     274             : long __sched
     275           0 : wait_for_completion_killable_timeout(struct completion *x,
     276             :                                      unsigned long timeout)
     277             : {
     278           0 :         return wait_for_common(x, timeout, TASK_KILLABLE);
     279             : }
     280             : EXPORT_SYMBOL(wait_for_completion_killable_timeout);
     281             : 
     282             : /**
     283             :  *      try_wait_for_completion - try to decrement a completion without blocking
     284             :  *      @x:     completion structure
     285             :  *
     286             :  *      Return: 0 if a decrement cannot be done without blocking
     287             :  *               1 if a decrement succeeded.
     288             :  *
     289             :  *      If a completion is being used as a counting completion,
     290             :  *      attempt to decrement the counter without blocking. This
     291             :  *      enables us to avoid waiting if the resource the completion
     292             :  *      is protecting is not available.
     293             :  */
     294           0 : bool try_wait_for_completion(struct completion *x)
     295             : {
     296             :         unsigned long flags;
     297           0 :         bool ret = true;
     298             : 
     299             :         /*
     300             :          * Since x->done will need to be locked only
     301             :          * in the non-blocking case, we check x->done
     302             :          * first without taking the lock so we can
     303             :          * return early in the blocking case.
     304             :          */
     305           0 :         if (!READ_ONCE(x->done))
     306             :                 return false;
     307             : 
     308           0 :         raw_spin_lock_irqsave(&x->wait.lock, flags);
     309           0 :         if (!x->done)
     310             :                 ret = false;
     311           0 :         else if (x->done != UINT_MAX)
     312           0 :                 x->done--;
     313           0 :         raw_spin_unlock_irqrestore(&x->wait.lock, flags);
     314           0 :         return ret;
     315             : }
     316             : EXPORT_SYMBOL(try_wait_for_completion);
     317             : 
     318             : /**
     319             :  *      completion_done - Test to see if a completion has any waiters
     320             :  *      @x:     completion structure
     321             :  *
     322             :  *      Return: 0 if there are waiters (wait_for_completion() in progress)
     323             :  *               1 if there are no waiters.
     324             :  *
     325             :  *      Note, this will always return true if complete_all() was called on @X.
     326             :  */
     327           0 : bool completion_done(struct completion *x)
     328             : {
     329             :         unsigned long flags;
     330             : 
     331           0 :         if (!READ_ONCE(x->done))
     332             :                 return false;
     333             : 
     334             :         /*
     335             :          * If ->done, we need to wait for complete() to release ->wait.lock
     336             :          * otherwise we can end up freeing the completion before complete()
     337             :          * is done referencing it.
     338             :          */
     339           0 :         raw_spin_lock_irqsave(&x->wait.lock, flags);
     340           0 :         raw_spin_unlock_irqrestore(&x->wait.lock, flags);
     341           0 :         return true;
     342             : }
     343             : EXPORT_SYMBOL(completion_done);

Generated by: LCOV version 1.14