LCOV - code coverage report
Current view: top level - kernel/locking - mutex.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 35 174 20.1 %
Date: 2023-04-06 08:38:28 Functions: 9 23 39.1 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : /*
       3             :  * kernel/locking/mutex.c
       4             :  *
       5             :  * Mutexes: blocking mutual exclusion locks
       6             :  *
       7             :  * Started by Ingo Molnar:
       8             :  *
       9             :  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
      10             :  *
      11             :  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
      12             :  * David Howells for suggestions and improvements.
      13             :  *
      14             :  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
      15             :  *    from the -rt tree, where it was originally implemented for rtmutexes
      16             :  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
      17             :  *    and Sven Dietrich.
      18             :  *
      19             :  * Also see Documentation/locking/mutex-design.rst.
      20             :  */
      21             : #include <linux/mutex.h>
      22             : #include <linux/ww_mutex.h>
      23             : #include <linux/sched/signal.h>
      24             : #include <linux/sched/rt.h>
      25             : #include <linux/sched/wake_q.h>
      26             : #include <linux/sched/debug.h>
      27             : #include <linux/export.h>
      28             : #include <linux/spinlock.h>
      29             : #include <linux/interrupt.h>
      30             : #include <linux/debug_locks.h>
      31             : #include <linux/osq_lock.h>
      32             : 
      33             : #define CREATE_TRACE_POINTS
      34             : #include <trace/events/lock.h>
      35             : 
      36             : #ifndef CONFIG_PREEMPT_RT
      37             : #include "mutex.h"
      38             : 
      39             : #ifdef CONFIG_DEBUG_MUTEXES
      40             : # define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
      41             : #else
      42             : # define MUTEX_WARN_ON(cond)
      43             : #endif
      44             : 
      45             : void
      46        1758 : __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
      47             : {
      48        3516 :         atomic_long_set(&lock->owner, 0);
      49             :         raw_spin_lock_init(&lock->wait_lock);
      50        3516 :         INIT_LIST_HEAD(&lock->wait_list);
      51             : #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
      52             :         osq_lock_init(&lock->osq);
      53             : #endif
      54             : 
      55             :         debug_mutex_init(lock, name, key);
      56        1758 : }
      57             : EXPORT_SYMBOL(__mutex_init);
      58             : 
      59             : /*
      60             :  * @owner: contains: 'struct task_struct *' to the current lock owner,
      61             :  * NULL means not owned. Since task_struct pointers are aligned at
      62             :  * at least L1_CACHE_BYTES, we have low bits to store extra state.
      63             :  *
      64             :  * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
      65             :  * Bit1 indicates unlock needs to hand the lock to the top-waiter
      66             :  * Bit2 indicates handoff has been done and we're waiting for pickup.
      67             :  */
      68             : #define MUTEX_FLAG_WAITERS      0x01
      69             : #define MUTEX_FLAG_HANDOFF      0x02
      70             : #define MUTEX_FLAG_PICKUP       0x04
      71             : 
      72             : #define MUTEX_FLAGS             0x07
      73             : 
      74             : /*
      75             :  * Internal helper function; C doesn't allow us to hide it :/
      76             :  *
      77             :  * DO NOT USE (outside of mutex code).
      78             :  */
      79             : static inline struct task_struct *__mutex_owner(struct mutex *lock)
      80             : {
      81         320 :         return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
      82             : }
      83             : 
      84             : static inline struct task_struct *__owner_task(unsigned long owner)
      85             : {
      86           0 :         return (struct task_struct *)(owner & ~MUTEX_FLAGS);
      87             : }
      88             : 
      89         160 : bool mutex_is_locked(struct mutex *lock)
      90             : {
      91         160 :         return __mutex_owner(lock) != NULL;
      92             : }
      93             : EXPORT_SYMBOL(mutex_is_locked);
      94             : 
      95             : static inline unsigned long __owner_flags(unsigned long owner)
      96             : {
      97           0 :         return owner & MUTEX_FLAGS;
      98             : }
      99             : 
     100             : /*
     101             :  * Returns: __mutex_owner(lock) on failure or NULL on success.
     102             :  */
     103           0 : static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
     104             : {
     105           0 :         unsigned long owner, curr = (unsigned long)current;
     106             : 
     107           0 :         owner = atomic_long_read(&lock->owner);
     108             :         for (;;) { /* must loop, can race against a flag */
     109           0 :                 unsigned long flags = __owner_flags(owner);
     110           0 :                 unsigned long task = owner & ~MUTEX_FLAGS;
     111             : 
     112           0 :                 if (task) {
     113           0 :                         if (flags & MUTEX_FLAG_PICKUP) {
     114           0 :                                 if (task != curr)
     115             :                                         break;
     116           0 :                                 flags &= ~MUTEX_FLAG_PICKUP;
     117           0 :                         } else if (handoff) {
     118           0 :                                 if (flags & MUTEX_FLAG_HANDOFF)
     119             :                                         break;
     120           0 :                                 flags |= MUTEX_FLAG_HANDOFF;
     121             :                         } else {
     122             :                                 break;
     123             :                         }
     124             :                 } else {
     125             :                         MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
     126             :                         task = curr;
     127             :                 }
     128             : 
     129           0 :                 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
     130           0 :                         if (task == curr)
     131             :                                 return NULL;
     132             :                         break;
     133             :                 }
     134             :         }
     135             : 
     136           0 :         return __owner_task(owner);
     137             : }
     138             : 
     139             : /*
     140             :  * Trylock or set HANDOFF
     141             :  */
     142             : static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
     143             : {
     144           0 :         return !__mutex_trylock_common(lock, handoff);
     145             : }
     146             : 
     147             : /*
     148             :  * Actual trylock that will work on any unlocked state.
     149             :  */
     150             : static inline bool __mutex_trylock(struct mutex *lock)
     151             : {
     152           0 :         return !__mutex_trylock_common(lock, false);
     153             : }
     154             : 
     155             : #ifndef CONFIG_DEBUG_LOCK_ALLOC
     156             : /*
     157             :  * Lockdep annotations are contained to the slow paths for simplicity.
     158             :  * There is nothing that would stop spreading the lockdep annotations outwards
     159             :  * except more code.
     160             :  */
     161             : 
     162             : /*
     163             :  * Optimistic trylock that only works in the uncontended case. Make sure to
     164             :  * follow with a __mutex_trylock() before failing.
     165             :  */
     166             : static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
     167             : {
     168        5393 :         unsigned long curr = (unsigned long)current;
     169        5393 :         unsigned long zero = 0UL;
     170             : 
     171       10786 :         if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
     172             :                 return true;
     173             : 
     174             :         return false;
     175             : }
     176             : 
     177             : static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
     178             : {
     179        5388 :         unsigned long curr = (unsigned long)current;
     180             : 
     181       10776 :         return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
     182             : }
     183             : #endif
     184             : 
     185             : static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
     186             : {
     187           0 :         atomic_long_or(flag, &lock->owner);
     188             : }
     189             : 
     190             : static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
     191             : {
     192           0 :         atomic_long_andnot(flag, &lock->owner);
     193             : }
     194             : 
     195             : static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
     196             : {
     197           0 :         return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
     198             : }
     199             : 
     200             : /*
     201             :  * Add @waiter to a given location in the lock wait_list and set the
     202             :  * FLAG_WAITERS flag if it's the first waiter.
     203             :  */
     204             : static void
     205             : __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
     206             :                    struct list_head *list)
     207             : {
     208             :         debug_mutex_add_waiter(lock, waiter, current);
     209             : 
     210           0 :         list_add_tail(&waiter->list, list);
     211           0 :         if (__mutex_waiter_is_first(lock, waiter))
     212             :                 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
     213             : }
     214             : 
     215             : static void
     216           0 : __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
     217             : {
     218           0 :         list_del(&waiter->list);
     219           0 :         if (likely(list_empty(&lock->wait_list)))
     220             :                 __mutex_clear_flag(lock, MUTEX_FLAGS);
     221             : 
     222             :         debug_mutex_remove_waiter(lock, waiter, current);
     223           0 : }
     224             : 
     225             : /*
     226             :  * Give up ownership to a specific task, when @task = NULL, this is equivalent
     227             :  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
     228             :  * WAITERS. Provides RELEASE semantics like a regular unlock, the
     229             :  * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
     230             :  */
     231             : static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
     232             : {
     233           0 :         unsigned long owner = atomic_long_read(&lock->owner);
     234             : 
     235             :         for (;;) {
     236             :                 unsigned long new;
     237             : 
     238             :                 MUTEX_WARN_ON(__owner_task(owner) != current);
     239             :                 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
     240             : 
     241           0 :                 new = (owner & MUTEX_FLAG_WAITERS);
     242           0 :                 new |= (unsigned long)task;
     243           0 :                 if (task)
     244           0 :                         new |= MUTEX_FLAG_PICKUP;
     245             : 
     246           0 :                 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
     247             :                         break;
     248             :         }
     249             : }
     250             : 
     251             : #ifndef CONFIG_DEBUG_LOCK_ALLOC
     252             : /*
     253             :  * We split the mutex lock/unlock logic into separate fastpath and
     254             :  * slowpath functions, to reduce the register pressure on the fastpath.
     255             :  * We also put the fastpath first in the kernel image, to make sure the
     256             :  * branch is predicted by the CPU as default-untaken.
     257             :  */
     258             : static void __sched __mutex_lock_slowpath(struct mutex *lock);
     259             : 
     260             : /**
     261             :  * mutex_lock - acquire the mutex
     262             :  * @lock: the mutex to be acquired
     263             :  *
     264             :  * Lock the mutex exclusively for this task. If the mutex is not
     265             :  * available right now, it will sleep until it can get it.
     266             :  *
     267             :  * The mutex must later on be released by the same task that
     268             :  * acquired it. Recursive locking is not allowed. The task
     269             :  * may not exit without first unlocking the mutex. Also, kernel
     270             :  * memory where the mutex resides must not be freed with
     271             :  * the mutex still locked. The mutex must first be initialized
     272             :  * (or statically defined) before it can be locked. memset()-ing
     273             :  * the mutex to 0 is not allowed.
     274             :  *
     275             :  * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
     276             :  * checks that will enforce the restrictions and will also do
     277             :  * deadlock debugging)
     278             :  *
     279             :  * This function is similar to (but not equivalent to) down().
     280             :  */
     281        5106 : void __sched mutex_lock(struct mutex *lock)
     282             : {
     283             :         might_sleep();
     284             : 
     285        5106 :         if (!__mutex_trylock_fast(lock))
     286           0 :                 __mutex_lock_slowpath(lock);
     287        5106 : }
     288             : EXPORT_SYMBOL(mutex_lock);
     289             : #endif
     290             : 
     291             : #include "ww_mutex.h"
     292             : 
     293             : #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
     294             : 
     295             : /*
     296             :  * Trylock variant that returns the owning task on failure.
     297             :  */
     298             : static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
     299             : {
     300             :         return __mutex_trylock_common(lock, false);
     301             : }
     302             : 
     303             : static inline
     304             : bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
     305             :                             struct mutex_waiter *waiter)
     306             : {
     307             :         struct ww_mutex *ww;
     308             : 
     309             :         ww = container_of(lock, struct ww_mutex, base);
     310             : 
     311             :         /*
     312             :          * If ww->ctx is set the contents are undefined, only
     313             :          * by acquiring wait_lock there is a guarantee that
     314             :          * they are not invalid when reading.
     315             :          *
     316             :          * As such, when deadlock detection needs to be
     317             :          * performed the optimistic spinning cannot be done.
     318             :          *
     319             :          * Check this in every inner iteration because we may
     320             :          * be racing against another thread's ww_mutex_lock.
     321             :          */
     322             :         if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
     323             :                 return false;
     324             : 
     325             :         /*
     326             :          * If we aren't on the wait list yet, cancel the spin
     327             :          * if there are waiters. We want  to avoid stealing the
     328             :          * lock from a waiter with an earlier stamp, since the
     329             :          * other thread may already own a lock that we also
     330             :          * need.
     331             :          */
     332             :         if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
     333             :                 return false;
     334             : 
     335             :         /*
     336             :          * Similarly, stop spinning if we are no longer the
     337             :          * first waiter.
     338             :          */
     339             :         if (waiter && !__mutex_waiter_is_first(lock, waiter))
     340             :                 return false;
     341             : 
     342             :         return true;
     343             : }
     344             : 
     345             : /*
     346             :  * Look out! "owner" is an entirely speculative pointer access and not
     347             :  * reliable.
     348             :  *
     349             :  * "noinline" so that this function shows up on perf profiles.
     350             :  */
     351             : static noinline
     352             : bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
     353             :                          struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
     354             : {
     355             :         bool ret = true;
     356             : 
     357             :         lockdep_assert_preemption_disabled();
     358             : 
     359             :         while (__mutex_owner(lock) == owner) {
     360             :                 /*
     361             :                  * Ensure we emit the owner->on_cpu, dereference _after_
     362             :                  * checking lock->owner still matches owner. And we already
     363             :                  * disabled preemption which is equal to the RCU read-side
     364             :                  * crital section in optimistic spinning code. Thus the
     365             :                  * task_strcut structure won't go away during the spinning
     366             :                  * period
     367             :                  */
     368             :                 barrier();
     369             : 
     370             :                 /*
     371             :                  * Use vcpu_is_preempted to detect lock holder preemption issue.
     372             :                  */
     373             :                 if (!owner_on_cpu(owner) || need_resched()) {
     374             :                         ret = false;
     375             :                         break;
     376             :                 }
     377             : 
     378             :                 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
     379             :                         ret = false;
     380             :                         break;
     381             :                 }
     382             : 
     383             :                 cpu_relax();
     384             :         }
     385             : 
     386             :         return ret;
     387             : }
     388             : 
     389             : /*
     390             :  * Initial check for entering the mutex spinning loop
     391             :  */
     392             : static inline int mutex_can_spin_on_owner(struct mutex *lock)
     393             : {
     394             :         struct task_struct *owner;
     395             :         int retval = 1;
     396             : 
     397             :         lockdep_assert_preemption_disabled();
     398             : 
     399             :         if (need_resched())
     400             :                 return 0;
     401             : 
     402             :         /*
     403             :          * We already disabled preemption which is equal to the RCU read-side
     404             :          * crital section in optimistic spinning code. Thus the task_strcut
     405             :          * structure won't go away during the spinning period.
     406             :          */
     407             :         owner = __mutex_owner(lock);
     408             :         if (owner)
     409             :                 retval = owner_on_cpu(owner);
     410             : 
     411             :         /*
     412             :          * If lock->owner is not set, the mutex has been released. Return true
     413             :          * such that we'll trylock in the spin path, which is a faster option
     414             :          * than the blocking slow path.
     415             :          */
     416             :         return retval;
     417             : }
     418             : 
     419             : /*
     420             :  * Optimistic spinning.
     421             :  *
     422             :  * We try to spin for acquisition when we find that the lock owner
     423             :  * is currently running on a (different) CPU and while we don't
     424             :  * need to reschedule. The rationale is that if the lock owner is
     425             :  * running, it is likely to release the lock soon.
     426             :  *
     427             :  * The mutex spinners are queued up using MCS lock so that only one
     428             :  * spinner can compete for the mutex. However, if mutex spinning isn't
     429             :  * going to happen, there is no point in going through the lock/unlock
     430             :  * overhead.
     431             :  *
     432             :  * Returns true when the lock was taken, otherwise false, indicating
     433             :  * that we need to jump to the slowpath and sleep.
     434             :  *
     435             :  * The waiter flag is set to true if the spinner is a waiter in the wait
     436             :  * queue. The waiter-spinner will spin on the lock directly and concurrently
     437             :  * with the spinner at the head of the OSQ, if present, until the owner is
     438             :  * changed to itself.
     439             :  */
     440             : static __always_inline bool
     441             : mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
     442             :                       struct mutex_waiter *waiter)
     443             : {
     444             :         if (!waiter) {
     445             :                 /*
     446             :                  * The purpose of the mutex_can_spin_on_owner() function is
     447             :                  * to eliminate the overhead of osq_lock() and osq_unlock()
     448             :                  * in case spinning isn't possible. As a waiter-spinner
     449             :                  * is not going to take OSQ lock anyway, there is no need
     450             :                  * to call mutex_can_spin_on_owner().
     451             :                  */
     452             :                 if (!mutex_can_spin_on_owner(lock))
     453             :                         goto fail;
     454             : 
     455             :                 /*
     456             :                  * In order to avoid a stampede of mutex spinners trying to
     457             :                  * acquire the mutex all at once, the spinners need to take a
     458             :                  * MCS (queued) lock first before spinning on the owner field.
     459             :                  */
     460             :                 if (!osq_lock(&lock->osq))
     461             :                         goto fail;
     462             :         }
     463             : 
     464             :         for (;;) {
     465             :                 struct task_struct *owner;
     466             : 
     467             :                 /* Try to acquire the mutex... */
     468             :                 owner = __mutex_trylock_or_owner(lock);
     469             :                 if (!owner)
     470             :                         break;
     471             : 
     472             :                 /*
     473             :                  * There's an owner, wait for it to either
     474             :                  * release the lock or go to sleep.
     475             :                  */
     476             :                 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
     477             :                         goto fail_unlock;
     478             : 
     479             :                 /*
     480             :                  * The cpu_relax() call is a compiler barrier which forces
     481             :                  * everything in this loop to be re-loaded. We don't need
     482             :                  * memory barriers as we'll eventually observe the right
     483             :                  * values at the cost of a few extra spins.
     484             :                  */
     485             :                 cpu_relax();
     486             :         }
     487             : 
     488             :         if (!waiter)
     489             :                 osq_unlock(&lock->osq);
     490             : 
     491             :         return true;
     492             : 
     493             : 
     494             : fail_unlock:
     495             :         if (!waiter)
     496             :                 osq_unlock(&lock->osq);
     497             : 
     498             : fail:
     499             :         /*
     500             :          * If we fell out of the spin path because of need_resched(),
     501             :          * reschedule now, before we try-lock the mutex. This avoids getting
     502             :          * scheduled out right after we obtained the mutex.
     503             :          */
     504             :         if (need_resched()) {
     505             :                 /*
     506             :                  * We _should_ have TASK_RUNNING here, but just in case
     507             :                  * we do not, make it so, otherwise we might get stuck.
     508             :                  */
     509             :                 __set_current_state(TASK_RUNNING);
     510             :                 schedule_preempt_disabled();
     511             :         }
     512             : 
     513             :         return false;
     514             : }
     515             : #else
     516             : static __always_inline bool
     517             : mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
     518             :                       struct mutex_waiter *waiter)
     519             : {
     520             :         return false;
     521             : }
     522             : #endif
     523             : 
     524             : static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
     525             : 
     526             : /**
     527             :  * mutex_unlock - release the mutex
     528             :  * @lock: the mutex to be released
     529             :  *
     530             :  * Unlock a mutex that has been locked by this task previously.
     531             :  *
     532             :  * This function must not be used in interrupt context. Unlocking
     533             :  * of a not locked mutex is not allowed.
     534             :  *
     535             :  * This function is similar to (but not equivalent to) up().
     536             :  */
     537        5383 : void __sched mutex_unlock(struct mutex *lock)
     538             : {
     539             : #ifndef CONFIG_DEBUG_LOCK_ALLOC
     540        5388 :         if (__mutex_unlock_fast(lock))
     541             :                 return;
     542             : #endif
     543           0 :         __mutex_unlock_slowpath(lock, _RET_IP_);
     544             : }
     545             : EXPORT_SYMBOL(mutex_unlock);
     546             : 
     547             : /**
     548             :  * ww_mutex_unlock - release the w/w mutex
     549             :  * @lock: the mutex to be released
     550             :  *
     551             :  * Unlock a mutex that has been locked by this task previously with any of the
     552             :  * ww_mutex_lock* functions (with or without an acquire context). It is
     553             :  * forbidden to release the locks after releasing the acquire context.
     554             :  *
     555             :  * This function must not be used in interrupt context. Unlocking
     556             :  * of a unlocked mutex is not allowed.
     557             :  */
     558           5 : void __sched ww_mutex_unlock(struct ww_mutex *lock)
     559             : {
     560           5 :         __ww_mutex_unlock(lock);
     561          10 :         mutex_unlock(&lock->base);
     562           5 : }
     563             : EXPORT_SYMBOL(ww_mutex_unlock);
     564             : 
     565             : /*
     566             :  * Lock a mutex (possibly interruptible), slowpath:
     567             :  */
     568             : static __always_inline int __sched
     569             : __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
     570             :                     struct lockdep_map *nest_lock, unsigned long ip,
     571             :                     struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
     572             : {
     573             :         struct mutex_waiter waiter;
     574             :         struct ww_mutex *ww;
     575             :         int ret;
     576             : 
     577             :         if (!use_ww_ctx)
     578           0 :                 ww_ctx = NULL;
     579             : 
     580             :         might_sleep();
     581             : 
     582             :         MUTEX_WARN_ON(lock->magic != lock);
     583             : 
     584           5 :         ww = container_of(lock, struct ww_mutex, base);
     585           5 :         if (ww_ctx) {
     586           5 :                 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
     587             :                         return -EALREADY;
     588             : 
     589             :                 /*
     590             :                  * Reset the wounded flag after a kill. No other process can
     591             :                  * race and wound us here since they can't have a valid owner
     592             :                  * pointer if we don't have any locks held.
     593             :                  */
     594           0 :                 if (ww_ctx->acquired == 0)
     595           0 :                         ww_ctx->wounded = 0;
     596             : 
     597             : #ifdef CONFIG_DEBUG_LOCK_ALLOC
     598             :                 nest_lock = &ww_ctx->dep_map;
     599             : #endif
     600             :         }
     601             : 
     602           0 :         preempt_disable();
     603             :         mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
     604             : 
     605           0 :         trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
     606           0 :         if (__mutex_trylock(lock) ||
     607           0 :             mutex_optimistic_spin(lock, ww_ctx, NULL)) {
     608             :                 /* got the lock, yay! */
     609             :                 lock_acquired(&lock->dep_map, ip);
     610           0 :                 if (ww_ctx)
     611             :                         ww_mutex_set_context_fastpath(ww, ww_ctx);
     612           0 :                 trace_contention_end(lock, 0);
     613           0 :                 preempt_enable();
     614             :                 return 0;
     615             :         }
     616             : 
     617           0 :         raw_spin_lock(&lock->wait_lock);
     618             :         /*
     619             :          * After waiting to acquire the wait_lock, try again.
     620             :          */
     621           0 :         if (__mutex_trylock(lock)) {
     622           0 :                 if (ww_ctx)
     623           0 :                         __ww_mutex_check_waiters(lock, ww_ctx);
     624             : 
     625             :                 goto skip_wait;
     626             :         }
     627             : 
     628             :         debug_mutex_lock_common(lock, &waiter);
     629           0 :         waiter.task = current;
     630             :         if (use_ww_ctx)
     631           0 :                 waiter.ww_ctx = ww_ctx;
     632             : 
     633             :         lock_contended(&lock->dep_map, ip);
     634             : 
     635             :         if (!use_ww_ctx) {
     636             :                 /* add waiting tasks to the end of the waitqueue (FIFO): */
     637           0 :                 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
     638             :         } else {
     639             :                 /*
     640             :                  * Add in stamp order, waking up waiters that must kill
     641             :                  * themselves.
     642             :                  */
     643           0 :                 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
     644           0 :                 if (ret)
     645             :                         goto err_early_kill;
     646             :         }
     647             : 
     648           0 :         set_current_state(state);
     649           0 :         trace_contention_begin(lock, LCB_F_MUTEX);
     650             :         for (;;) {
     651             :                 bool first;
     652             : 
     653             :                 /*
     654             :                  * Once we hold wait_lock, we're serialized against
     655             :                  * mutex_unlock() handing the lock off to us, do a trylock
     656             :                  * before testing the error conditions to make sure we pick up
     657             :                  * the handoff.
     658             :                  */
     659           0 :                 if (__mutex_trylock(lock))
     660             :                         goto acquired;
     661             : 
     662             :                 /*
     663             :                  * Check for signals and kill conditions while holding
     664             :                  * wait_lock. This ensures the lock cancellation is ordered
     665             :                  * against mutex_unlock() and wake-ups do not go missing.
     666             :                  */
     667           0 :                 if (signal_pending_state(state, current)) {
     668             :                         ret = -EINTR;
     669             :                         goto err;
     670             :                 }
     671             : 
     672           0 :                 if (ww_ctx) {
     673           0 :                         ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
     674           0 :                         if (ret)
     675             :                                 goto err;
     676             :                 }
     677             : 
     678           0 :                 raw_spin_unlock(&lock->wait_lock);
     679           0 :                 schedule_preempt_disabled();
     680             : 
     681           0 :                 first = __mutex_waiter_is_first(lock, &waiter);
     682             : 
     683           0 :                 set_current_state(state);
     684             :                 /*
     685             :                  * Here we order against unlock; we must either see it change
     686             :                  * state back to RUNNING and fall through the next schedule(),
     687             :                  * or we must see its unlock and acquire.
     688             :                  */
     689           0 :                 if (__mutex_trylock_or_handoff(lock, first))
     690             :                         break;
     691             : 
     692             :                 if (first) {
     693             :                         trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
     694             :                         if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
     695             :                                 break;
     696             :                         trace_contention_begin(lock, LCB_F_MUTEX);
     697             :                 }
     698             : 
     699           0 :                 raw_spin_lock(&lock->wait_lock);
     700             :         }
     701           0 :         raw_spin_lock(&lock->wait_lock);
     702             : acquired:
     703           0 :         __set_current_state(TASK_RUNNING);
     704             : 
     705           0 :         if (ww_ctx) {
     706             :                 /*
     707             :                  * Wound-Wait; we stole the lock (!first_waiter), check the
     708             :                  * waiters as anyone might want to wound us.
     709             :                  */
     710           0 :                 if (!ww_ctx->is_wait_die &&
     711           0 :                     !__mutex_waiter_is_first(lock, &waiter))
     712           0 :                         __ww_mutex_check_waiters(lock, ww_ctx);
     713             :         }
     714             : 
     715           0 :         __mutex_remove_waiter(lock, &waiter);
     716             : 
     717             :         debug_mutex_free_waiter(&waiter);
     718             : 
     719             : skip_wait:
     720             :         /* got the lock - cleanup and rejoice! */
     721             :         lock_acquired(&lock->dep_map, ip);
     722           0 :         trace_contention_end(lock, 0);
     723             : 
     724           0 :         if (ww_ctx)
     725             :                 ww_mutex_lock_acquired(ww, ww_ctx);
     726             : 
     727           0 :         raw_spin_unlock(&lock->wait_lock);
     728           0 :         preempt_enable();
     729             :         return 0;
     730             : 
     731             : err:
     732           0 :         __set_current_state(TASK_RUNNING);
     733           0 :         __mutex_remove_waiter(lock, &waiter);
     734             : err_early_kill:
     735           0 :         trace_contention_end(lock, ret);
     736           0 :         raw_spin_unlock(&lock->wait_lock);
     737             :         debug_mutex_free_waiter(&waiter);
     738             :         mutex_release(&lock->dep_map, ip);
     739           0 :         preempt_enable();
     740             :         return ret;
     741             : }
     742             : 
     743             : static int __sched
     744           0 : __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
     745             :              struct lockdep_map *nest_lock, unsigned long ip)
     746             : {
     747           0 :         return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
     748             : }
     749             : 
     750             : static int __sched
     751           5 : __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
     752             :                 unsigned long ip, struct ww_acquire_ctx *ww_ctx)
     753             : {
     754           5 :         return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
     755             : }
     756             : 
     757             : /**
     758             :  * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
     759             :  * @ww: mutex to lock
     760             :  * @ww_ctx: optional w/w acquire context
     761             :  *
     762             :  * Trylocks a mutex with the optional acquire context; no deadlock detection is
     763             :  * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
     764             :  *
     765             :  * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is
     766             :  * specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
     767             :  *
     768             :  * A mutex acquired with this function must be released with ww_mutex_unlock.
     769             :  */
     770           0 : int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
     771             : {
     772           0 :         if (!ww_ctx)
     773           0 :                 return mutex_trylock(&ww->base);
     774             : 
     775             :         MUTEX_WARN_ON(ww->base.magic != &ww->base);
     776             : 
     777             :         /*
     778             :          * Reset the wounded flag after a kill. No other process can
     779             :          * race and wound us here, since they can't have a valid owner
     780             :          * pointer if we don't have any locks held.
     781             :          */
     782           0 :         if (ww_ctx->acquired == 0)
     783           0 :                 ww_ctx->wounded = 0;
     784             : 
     785           0 :         if (__mutex_trylock(&ww->base)) {
     786             :                 ww_mutex_set_context_fastpath(ww, ww_ctx);
     787             :                 mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
     788             :                 return 1;
     789             :         }
     790             : 
     791             :         return 0;
     792             : }
     793             : EXPORT_SYMBOL(ww_mutex_trylock);
     794             : 
     795             : #ifdef CONFIG_DEBUG_LOCK_ALLOC
     796             : void __sched
     797             : mutex_lock_nested(struct mutex *lock, unsigned int subclass)
     798             : {
     799             :         __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
     800             : }
     801             : 
     802             : EXPORT_SYMBOL_GPL(mutex_lock_nested);
     803             : 
     804             : void __sched
     805             : _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
     806             : {
     807             :         __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
     808             : }
     809             : EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
     810             : 
     811             : int __sched
     812             : mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
     813             : {
     814             :         return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
     815             : }
     816             : EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
     817             : 
     818             : int __sched
     819             : mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
     820             : {
     821             :         return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
     822             : }
     823             : EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
     824             : 
     825             : void __sched
     826             : mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
     827             : {
     828             :         int token;
     829             : 
     830             :         might_sleep();
     831             : 
     832             :         token = io_schedule_prepare();
     833             :         __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
     834             :                             subclass, NULL, _RET_IP_, NULL, 0);
     835             :         io_schedule_finish(token);
     836             : }
     837             : EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
     838             : 
     839             : static inline int
     840             : ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
     841             : {
     842             : #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
     843             :         unsigned tmp;
     844             : 
     845             :         if (ctx->deadlock_inject_countdown-- == 0) {
     846             :                 tmp = ctx->deadlock_inject_interval;
     847             :                 if (tmp > UINT_MAX/4)
     848             :                         tmp = UINT_MAX;
     849             :                 else
     850             :                         tmp = tmp*2 + tmp + tmp/2;
     851             : 
     852             :                 ctx->deadlock_inject_interval = tmp;
     853             :                 ctx->deadlock_inject_countdown = tmp;
     854             :                 ctx->contending_lock = lock;
     855             : 
     856             :                 ww_mutex_unlock(lock);
     857             : 
     858             :                 return -EDEADLK;
     859             :         }
     860             : #endif
     861             : 
     862             :         return 0;
     863             : }
     864             : 
     865             : int __sched
     866             : ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
     867             : {
     868             :         int ret;
     869             : 
     870             :         might_sleep();
     871             :         ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
     872             :                                0, _RET_IP_, ctx);
     873             :         if (!ret && ctx && ctx->acquired > 1)
     874             :                 return ww_mutex_deadlock_injection(lock, ctx);
     875             : 
     876             :         return ret;
     877             : }
     878             : EXPORT_SYMBOL_GPL(ww_mutex_lock);
     879             : 
     880             : int __sched
     881             : ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
     882             : {
     883             :         int ret;
     884             : 
     885             :         might_sleep();
     886             :         ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
     887             :                               0, _RET_IP_, ctx);
     888             : 
     889             :         if (!ret && ctx && ctx->acquired > 1)
     890             :                 return ww_mutex_deadlock_injection(lock, ctx);
     891             : 
     892             :         return ret;
     893             : }
     894             : EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
     895             : 
     896             : #endif
     897             : 
     898             : /*
     899             :  * Release the lock, slowpath:
     900             :  */
     901           0 : static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
     902             : {
     903           0 :         struct task_struct *next = NULL;
     904           0 :         DEFINE_WAKE_Q(wake_q);
     905             :         unsigned long owner;
     906             : 
     907             :         mutex_release(&lock->dep_map, ip);
     908             : 
     909             :         /*
     910             :          * Release the lock before (potentially) taking the spinlock such that
     911             :          * other contenders can get on with things ASAP.
     912             :          *
     913             :          * Except when HANDOFF, in that case we must not clear the owner field,
     914             :          * but instead set it to the top waiter.
     915             :          */
     916           0 :         owner = atomic_long_read(&lock->owner);
     917             :         for (;;) {
     918             :                 MUTEX_WARN_ON(__owner_task(owner) != current);
     919             :                 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
     920             : 
     921           0 :                 if (owner & MUTEX_FLAG_HANDOFF)
     922             :                         break;
     923             : 
     924           0 :                 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
     925           0 :                         if (owner & MUTEX_FLAG_WAITERS)
     926             :                                 break;
     927             : 
     928           0 :                         return;
     929             :                 }
     930             :         }
     931             : 
     932           0 :         raw_spin_lock(&lock->wait_lock);
     933             :         debug_mutex_unlock(lock);
     934           0 :         if (!list_empty(&lock->wait_list)) {
     935             :                 /* get the first entry from the wait-list: */
     936           0 :                 struct mutex_waiter *waiter =
     937           0 :                         list_first_entry(&lock->wait_list,
     938             :                                          struct mutex_waiter, list);
     939             : 
     940           0 :                 next = waiter->task;
     941             : 
     942             :                 debug_mutex_wake_waiter(lock, waiter);
     943           0 :                 wake_q_add(&wake_q, next);
     944             :         }
     945             : 
     946           0 :         if (owner & MUTEX_FLAG_HANDOFF)
     947             :                 __mutex_handoff(lock, next);
     948             : 
     949           0 :         raw_spin_unlock(&lock->wait_lock);
     950             : 
     951           0 :         wake_up_q(&wake_q);
     952             : }
     953             : 
     954             : #ifndef CONFIG_DEBUG_LOCK_ALLOC
     955             : /*
     956             :  * Here come the less common (and hence less performance-critical) APIs:
     957             :  * mutex_lock_interruptible() and mutex_trylock().
     958             :  */
     959             : static noinline int __sched
     960             : __mutex_lock_killable_slowpath(struct mutex *lock);
     961             : 
     962             : static noinline int __sched
     963             : __mutex_lock_interruptible_slowpath(struct mutex *lock);
     964             : 
     965             : /**
     966             :  * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
     967             :  * @lock: The mutex to be acquired.
     968             :  *
     969             :  * Lock the mutex like mutex_lock().  If a signal is delivered while the
     970             :  * process is sleeping, this function will return without acquiring the
     971             :  * mutex.
     972             :  *
     973             :  * Context: Process context.
     974             :  * Return: 0 if the lock was successfully acquired or %-EINTR if a
     975             :  * signal arrived.
     976             :  */
     977           0 : int __sched mutex_lock_interruptible(struct mutex *lock)
     978             : {
     979             :         might_sleep();
     980             : 
     981           0 :         if (__mutex_trylock_fast(lock))
     982             :                 return 0;
     983             : 
     984           0 :         return __mutex_lock_interruptible_slowpath(lock);
     985             : }
     986             : 
     987             : EXPORT_SYMBOL(mutex_lock_interruptible);
     988             : 
     989             : /**
     990             :  * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
     991             :  * @lock: The mutex to be acquired.
     992             :  *
     993             :  * Lock the mutex like mutex_lock().  If a signal which will be fatal to
     994             :  * the current process is delivered while the process is sleeping, this
     995             :  * function will return without acquiring the mutex.
     996             :  *
     997             :  * Context: Process context.
     998             :  * Return: 0 if the lock was successfully acquired or %-EINTR if a
     999             :  * fatal signal arrived.
    1000             :  */
    1001         277 : int __sched mutex_lock_killable(struct mutex *lock)
    1002             : {
    1003             :         might_sleep();
    1004             : 
    1005         277 :         if (__mutex_trylock_fast(lock))
    1006             :                 return 0;
    1007             : 
    1008           0 :         return __mutex_lock_killable_slowpath(lock);
    1009             : }
    1010             : EXPORT_SYMBOL(mutex_lock_killable);
    1011             : 
    1012             : /**
    1013             :  * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
    1014             :  * @lock: The mutex to be acquired.
    1015             :  *
    1016             :  * Lock the mutex like mutex_lock().  While the task is waiting for this
    1017             :  * mutex, it will be accounted as being in the IO wait state by the
    1018             :  * scheduler.
    1019             :  *
    1020             :  * Context: Process context.
    1021             :  */
    1022           0 : void __sched mutex_lock_io(struct mutex *lock)
    1023             : {
    1024             :         int token;
    1025             : 
    1026           0 :         token = io_schedule_prepare();
    1027           0 :         mutex_lock(lock);
    1028           0 :         io_schedule_finish(token);
    1029           0 : }
    1030             : EXPORT_SYMBOL_GPL(mutex_lock_io);
    1031             : 
    1032             : static noinline void __sched
    1033           0 : __mutex_lock_slowpath(struct mutex *lock)
    1034             : {
    1035           0 :         __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
    1036           0 : }
    1037             : 
    1038             : static noinline int __sched
    1039           0 : __mutex_lock_killable_slowpath(struct mutex *lock)
    1040             : {
    1041           0 :         return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
    1042             : }
    1043             : 
    1044             : static noinline int __sched
    1045           0 : __mutex_lock_interruptible_slowpath(struct mutex *lock)
    1046             : {
    1047           0 :         return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
    1048             : }
    1049             : 
    1050             : static noinline int __sched
    1051           5 : __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
    1052             : {
    1053           5 :         return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
    1054           5 :                                _RET_IP_, ctx);
    1055             : }
    1056             : 
    1057             : static noinline int __sched
    1058           0 : __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
    1059             :                                             struct ww_acquire_ctx *ctx)
    1060             : {
    1061           0 :         return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
    1062           0 :                                _RET_IP_, ctx);
    1063             : }
    1064             : 
    1065             : #endif
    1066             : 
    1067             : /**
    1068             :  * mutex_trylock - try to acquire the mutex, without waiting
    1069             :  * @lock: the mutex to be acquired
    1070             :  *
    1071             :  * Try to acquire the mutex atomically. Returns 1 if the mutex
    1072             :  * has been acquired successfully, and 0 on contention.
    1073             :  *
    1074             :  * NOTE: this function follows the spin_trylock() convention, so
    1075             :  * it is negated from the down_trylock() return values! Be careful
    1076             :  * about this when converting semaphore users to mutexes.
    1077             :  *
    1078             :  * This function must not be used in interrupt context. The
    1079             :  * mutex must be released by the same task that acquired it.
    1080             :  */
    1081           0 : int __sched mutex_trylock(struct mutex *lock)
    1082             : {
    1083             :         bool locked;
    1084             : 
    1085             :         MUTEX_WARN_ON(lock->magic != lock);
    1086             : 
    1087           0 :         locked = __mutex_trylock(lock);
    1088             :         if (locked)
    1089             :                 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
    1090             : 
    1091           0 :         return locked;
    1092             : }
    1093             : EXPORT_SYMBOL(mutex_trylock);
    1094             : 
    1095             : #ifndef CONFIG_DEBUG_LOCK_ALLOC
    1096             : int __sched
    1097          10 : ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
    1098             : {
    1099             :         might_sleep();
    1100             : 
    1101          20 :         if (__mutex_trylock_fast(&lock->base)) {
    1102           5 :                 if (ctx)
    1103             :                         ww_mutex_set_context_fastpath(lock, ctx);
    1104             :                 return 0;
    1105             :         }
    1106             : 
    1107           5 :         return __ww_mutex_lock_slowpath(lock, ctx);
    1108             : }
    1109             : EXPORT_SYMBOL(ww_mutex_lock);
    1110             : 
    1111             : int __sched
    1112           0 : ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
    1113             : {
    1114             :         might_sleep();
    1115             : 
    1116           0 :         if (__mutex_trylock_fast(&lock->base)) {
    1117           0 :                 if (ctx)
    1118             :                         ww_mutex_set_context_fastpath(lock, ctx);
    1119             :                 return 0;
    1120             :         }
    1121             : 
    1122           0 :         return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
    1123             : }
    1124             : EXPORT_SYMBOL(ww_mutex_lock_interruptible);
    1125             : 
    1126             : #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
    1127             : #endif /* !CONFIG_PREEMPT_RT */
    1128             : 
    1129             : /**
    1130             :  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
    1131             :  * @cnt: the atomic which we are to dec
    1132             :  * @lock: the mutex to return holding if we dec to 0
    1133             :  *
    1134             :  * return true and hold lock if we dec to 0, return false otherwise
    1135             :  */
    1136           0 : int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
    1137             : {
    1138             :         /* dec if we can't possibly hit 0 */
    1139           0 :         if (atomic_add_unless(cnt, -1, 1))
    1140             :                 return 0;
    1141             :         /* we might hit 0, so take the lock */
    1142           0 :         mutex_lock(lock);
    1143           0 :         if (!atomic_dec_and_test(cnt)) {
    1144             :                 /* when we actually did the dec, we didn't hit 0 */
    1145             :                 mutex_unlock(lock);
    1146             :                 return 0;
    1147             :         }
    1148             :         /* we hit 0, and we hold the lock */
    1149             :         return 1;
    1150             : }
    1151             : EXPORT_SYMBOL(atomic_dec_and_mutex_lock);

Generated by: LCOV version 1.14