LCOV - code coverage report
Current view: top level - include/linux - mmap_lock.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 28 0.0 %
Date: 2023-08-24 13:40:31 Functions: 0 0 -

          Line data    Source code
       1             : #ifndef _LINUX_MMAP_LOCK_H
       2             : #define _LINUX_MMAP_LOCK_H
       3             : 
       4             : #include <linux/lockdep.h>
       5             : #include <linux/mm_types.h>
       6             : #include <linux/mmdebug.h>
       7             : #include <linux/rwsem.h>
       8             : #include <linux/tracepoint-defs.h>
       9             : #include <linux/types.h>
      10             : 
      11             : #define MMAP_LOCK_INITIALIZER(name) \
      12             :         .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
      13             : 
      14             : DECLARE_TRACEPOINT(mmap_lock_start_locking);
      15             : DECLARE_TRACEPOINT(mmap_lock_acquire_returned);
      16             : DECLARE_TRACEPOINT(mmap_lock_released);
      17             : 
      18             : #ifdef CONFIG_TRACING
      19             : 
      20             : void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write);
      21             : void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
      22             :                                            bool success);
      23             : void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write);
      24             : 
      25             : static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
      26             :                                                    bool write)
      27             : {
      28             :         if (tracepoint_enabled(mmap_lock_start_locking))
      29             :                 __mmap_lock_do_trace_start_locking(mm, write);
      30             : }
      31             : 
      32             : static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
      33             :                                                       bool write, bool success)
      34             : {
      35             :         if (tracepoint_enabled(mmap_lock_acquire_returned))
      36             :                 __mmap_lock_do_trace_acquire_returned(mm, write, success);
      37             : }
      38             : 
      39             : static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
      40             : {
      41             :         if (tracepoint_enabled(mmap_lock_released))
      42             :                 __mmap_lock_do_trace_released(mm, write);
      43             : }
      44             : 
      45             : #else /* !CONFIG_TRACING */
      46             : 
      47             : static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
      48             :                                                    bool write)
      49             : {
      50             : }
      51             : 
      52             : static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
      53             :                                                       bool write, bool success)
      54             : {
      55             : }
      56             : 
      57             : static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
      58             : {
      59             : }
      60             : 
      61             : #endif /* CONFIG_TRACING */
      62             : 
      63             : static inline void mmap_assert_locked(struct mm_struct *mm)
      64             : {
      65             :         lockdep_assert_held(&mm->mmap_lock);
      66             :         VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
      67             : }
      68             : 
      69             : static inline void mmap_assert_write_locked(struct mm_struct *mm)
      70             : {
      71             :         lockdep_assert_held_write(&mm->mmap_lock);
      72             :         VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
      73             : }
      74             : 
      75             : #ifdef CONFIG_PER_VMA_LOCK
      76             : static inline void vma_end_write_all(struct mm_struct *mm)
      77             : {
      78             :         mmap_assert_write_locked(mm);
      79             :         /* No races during update due to exclusive mmap_lock being held */
      80             :         WRITE_ONCE(mm->mm_lock_seq, mm->mm_lock_seq + 1);
      81             : }
      82             : #else
      83             : static inline void vma_end_write_all(struct mm_struct *mm) {}
      84             : #endif
      85             : 
      86             : static inline void mmap_init_lock(struct mm_struct *mm)
      87             : {
      88           0 :         init_rwsem(&mm->mmap_lock);
      89             : }
      90             : 
      91             : static inline void mmap_write_lock(struct mm_struct *mm)
      92             : {
      93           0 :         __mmap_lock_trace_start_locking(mm, true);
      94           0 :         down_write(&mm->mmap_lock);
      95           0 :         __mmap_lock_trace_acquire_returned(mm, true, true);
      96             : }
      97             : 
      98             : static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
      99             : {
     100           0 :         __mmap_lock_trace_start_locking(mm, true);
     101           0 :         down_write_nested(&mm->mmap_lock, subclass);
     102           0 :         __mmap_lock_trace_acquire_returned(mm, true, true);
     103             : }
     104             : 
     105             : static inline int mmap_write_lock_killable(struct mm_struct *mm)
     106             : {
     107             :         int ret;
     108             : 
     109           0 :         __mmap_lock_trace_start_locking(mm, true);
     110           0 :         ret = down_write_killable(&mm->mmap_lock);
     111           0 :         __mmap_lock_trace_acquire_returned(mm, true, ret == 0);
     112             :         return ret;
     113             : }
     114             : 
     115             : static inline bool mmap_write_trylock(struct mm_struct *mm)
     116             : {
     117             :         bool ret;
     118             : 
     119             :         __mmap_lock_trace_start_locking(mm, true);
     120             :         ret = down_write_trylock(&mm->mmap_lock) != 0;
     121             :         __mmap_lock_trace_acquire_returned(mm, true, ret);
     122             :         return ret;
     123             : }
     124             : 
     125             : static inline void mmap_write_unlock(struct mm_struct *mm)
     126             : {
     127           0 :         __mmap_lock_trace_released(mm, true);
     128           0 :         vma_end_write_all(mm);
     129           0 :         up_write(&mm->mmap_lock);
     130             : }
     131             : 
     132             : static inline void mmap_write_downgrade(struct mm_struct *mm)
     133             : {
     134           0 :         __mmap_lock_trace_acquire_returned(mm, false, true);
     135           0 :         vma_end_write_all(mm);
     136           0 :         downgrade_write(&mm->mmap_lock);
     137             : }
     138             : 
     139             : static inline void mmap_read_lock(struct mm_struct *mm)
     140             : {
     141           0 :         __mmap_lock_trace_start_locking(mm, false);
     142           0 :         down_read(&mm->mmap_lock);
     143           0 :         __mmap_lock_trace_acquire_returned(mm, false, true);
     144             : }
     145             : 
     146             : static inline int mmap_read_lock_killable(struct mm_struct *mm)
     147             : {
     148             :         int ret;
     149             : 
     150           0 :         __mmap_lock_trace_start_locking(mm, false);
     151           0 :         ret = down_read_killable(&mm->mmap_lock);
     152           0 :         __mmap_lock_trace_acquire_returned(mm, false, ret == 0);
     153             :         return ret;
     154             : }
     155             : 
     156             : static inline bool mmap_read_trylock(struct mm_struct *mm)
     157             : {
     158             :         bool ret;
     159             : 
     160           0 :         __mmap_lock_trace_start_locking(mm, false);
     161           0 :         ret = down_read_trylock(&mm->mmap_lock) != 0;
     162           0 :         __mmap_lock_trace_acquire_returned(mm, false, ret);
     163             :         return ret;
     164             : }
     165             : 
     166             : static inline void mmap_read_unlock(struct mm_struct *mm)
     167             : {
     168           0 :         __mmap_lock_trace_released(mm, false);
     169           0 :         up_read(&mm->mmap_lock);
     170             : }
     171             : 
     172             : static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
     173             : {
     174             :         __mmap_lock_trace_released(mm, false);
     175             :         up_read_non_owner(&mm->mmap_lock);
     176             : }
     177             : 
     178             : static inline int mmap_lock_is_contended(struct mm_struct *mm)
     179             : {
     180           0 :         return rwsem_is_contended(&mm->mmap_lock);
     181             : }
     182             : 
     183             : #endif /* _LINUX_MMAP_LOCK_H */

Generated by: LCOV version 1.14