LCOV - code coverage report
Current view: top level - include/linux/sched - mm.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 11 34 32.4 %
Date: 2023-04-06 08:38:28 Functions: 1 1 100.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _LINUX_SCHED_MM_H
       3             : #define _LINUX_SCHED_MM_H
       4             : 
       5             : #include <linux/kernel.h>
       6             : #include <linux/atomic.h>
       7             : #include <linux/sched.h>
       8             : #include <linux/mm_types.h>
       9             : #include <linux/gfp.h>
      10             : #include <linux/sync_core.h>
      11             : #include <linux/ioasid.h>
      12             : 
      13             : /*
      14             :  * Routines for handling mm_structs
      15             :  */
      16             : extern struct mm_struct *mm_alloc(void);
      17             : 
      18             : /**
      19             :  * mmgrab() - Pin a &struct mm_struct.
      20             :  * @mm: The &struct mm_struct to pin.
      21             :  *
      22             :  * Make sure that @mm will not get freed even after the owning task
      23             :  * exits. This doesn't guarantee that the associated address space
      24             :  * will still exist later on and mmget_not_zero() has to be used before
      25             :  * accessing it.
      26             :  *
      27             :  * This is a preferred way to pin @mm for a longer/unbounded amount
      28             :  * of time.
      29             :  *
      30             :  * Use mmdrop() to release the reference acquired by mmgrab().
      31             :  *
      32             :  * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
      33             :  * of &mm_struct.mm_count vs &mm_struct.mm_users.
      34             :  */
      35             : static inline void mmgrab(struct mm_struct *mm)
      36             : {
      37           2 :         atomic_inc(&mm->mm_count);
      38             : }
      39             : 
      40             : extern void __mmdrop(struct mm_struct *mm);
      41             : 
      42             : static inline void mmdrop(struct mm_struct *mm)
      43             : {
      44             :         /*
      45             :          * The implicit full barrier implied by atomic_dec_and_test() is
      46             :          * required by the membarrier system call before returning to
      47             :          * user-space, after storing to rq->curr.
      48             :          */
      49           0 :         if (unlikely(atomic_dec_and_test(&mm->mm_count)))
      50           0 :                 __mmdrop(mm);
      51             : }
      52             : 
      53             : #ifdef CONFIG_PREEMPT_RT
      54             : /*
      55             :  * RCU callback for delayed mm drop. Not strictly RCU, but call_rcu() is
      56             :  * by far the least expensive way to do that.
      57             :  */
      58             : static inline void __mmdrop_delayed(struct rcu_head *rhp)
      59             : {
      60             :         struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
      61             : 
      62             :         __mmdrop(mm);
      63             : }
      64             : 
      65             : /*
      66             :  * Invoked from finish_task_switch(). Delegates the heavy lifting on RT
      67             :  * kernels via RCU.
      68             :  */
      69             : static inline void mmdrop_sched(struct mm_struct *mm)
      70             : {
      71             :         /* Provides a full memory barrier. See mmdrop() */
      72             :         if (atomic_dec_and_test(&mm->mm_count))
      73             :                 call_rcu(&mm->delayed_drop, __mmdrop_delayed);
      74             : }
      75             : #else
      76             : static inline void mmdrop_sched(struct mm_struct *mm)
      77             : {
      78             :         mmdrop(mm);
      79             : }
      80             : #endif
      81             : 
      82             : /**
      83             :  * mmget() - Pin the address space associated with a &struct mm_struct.
      84             :  * @mm: The address space to pin.
      85             :  *
      86             :  * Make sure that the address space of the given &struct mm_struct doesn't
      87             :  * go away. This does not protect against parts of the address space being
      88             :  * modified or freed, however.
      89             :  *
      90             :  * Never use this function to pin this address space for an
      91             :  * unbounded/indefinite amount of time.
      92             :  *
      93             :  * Use mmput() to release the reference acquired by mmget().
      94             :  *
      95             :  * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
      96             :  * of &mm_struct.mm_count vs &mm_struct.mm_users.
      97             :  */
      98             : static inline void mmget(struct mm_struct *mm)
      99             : {
     100           0 :         atomic_inc(&mm->mm_users);
     101             : }
     102             : 
     103             : static inline bool mmget_not_zero(struct mm_struct *mm)
     104             : {
     105           0 :         return atomic_inc_not_zero(&mm->mm_users);
     106             : }
     107             : 
     108             : /* mmput gets rid of the mappings and all user-space */
     109             : extern void mmput(struct mm_struct *);
     110             : #ifdef CONFIG_MMU
     111             : /* same as above but performs the slow path from the async context. Can
     112             :  * be called from the atomic context as well
     113             :  */
     114             : void mmput_async(struct mm_struct *);
     115             : #endif
     116             : 
     117             : /* Grab a reference to a task's mm, if it is not already going away */
     118             : extern struct mm_struct *get_task_mm(struct task_struct *task);
     119             : /*
     120             :  * Grab a reference to a task's mm, if it is not already going away
     121             :  * and ptrace_may_access with the mode parameter passed to it
     122             :  * succeeds.
     123             :  */
     124             : extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
     125             : /* Remove the current tasks stale references to the old mm_struct on exit() */
     126             : extern void exit_mm_release(struct task_struct *, struct mm_struct *);
     127             : /* Remove the current tasks stale references to the old mm_struct on exec() */
     128             : extern void exec_mm_release(struct task_struct *, struct mm_struct *);
     129             : 
     130             : #ifdef CONFIG_MEMCG
     131             : extern void mm_update_next_owner(struct mm_struct *mm);
     132             : #else
     133             : static inline void mm_update_next_owner(struct mm_struct *mm)
     134             : {
     135             : }
     136             : #endif /* CONFIG_MEMCG */
     137             : 
     138             : #ifdef CONFIG_MMU
     139             : #ifndef arch_get_mmap_end
     140             : #define arch_get_mmap_end(addr, len, flags)     (TASK_SIZE)
     141             : #endif
     142             : 
     143             : #ifndef arch_get_mmap_base
     144             : #define arch_get_mmap_base(addr, base) (base)
     145             : #endif
     146             : 
     147             : extern void arch_pick_mmap_layout(struct mm_struct *mm,
     148             :                                   struct rlimit *rlim_stack);
     149             : extern unsigned long
     150             : arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
     151             :                        unsigned long, unsigned long);
     152             : extern unsigned long
     153             : arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
     154             :                           unsigned long len, unsigned long pgoff,
     155             :                           unsigned long flags);
     156             : 
     157             : unsigned long
     158             : generic_get_unmapped_area(struct file *filp, unsigned long addr,
     159             :                           unsigned long len, unsigned long pgoff,
     160             :                           unsigned long flags);
     161             : unsigned long
     162             : generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
     163             :                                   unsigned long len, unsigned long pgoff,
     164             :                                   unsigned long flags);
     165             : #else
     166             : static inline void arch_pick_mmap_layout(struct mm_struct *mm,
     167             :                                          struct rlimit *rlim_stack) {}
     168             : #endif
     169             : 
     170             : static inline bool in_vfork(struct task_struct *tsk)
     171             : {
     172             :         bool ret;
     173             : 
     174             :         /*
     175             :          * need RCU to access ->real_parent if CLONE_VM was used along with
     176             :          * CLONE_PARENT.
     177             :          *
     178             :          * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
     179             :          * imply CLONE_VM
     180             :          *
     181             :          * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
     182             :          * ->real_parent is not necessarily the task doing vfork(), so in
     183             :          * theory we can't rely on task_lock() if we want to dereference it.
     184             :          *
     185             :          * And in this case we can't trust the real_parent->mm == tsk->mm
     186             :          * check, it can be false negative. But we do not care, if init or
     187             :          * another oom-unkillable task does this it should blame itself.
     188             :          */
     189             :         rcu_read_lock();
     190           0 :         ret = tsk->vfork_done &&
     191           0 :                         rcu_dereference(tsk->real_parent)->mm == tsk->mm;
     192             :         rcu_read_unlock();
     193             : 
     194             :         return ret;
     195             : }
     196             : 
     197             : /*
     198             :  * Applies per-task gfp context to the given allocation flags.
     199             :  * PF_MEMALLOC_NOIO implies GFP_NOIO
     200             :  * PF_MEMALLOC_NOFS implies GFP_NOFS
     201             :  * PF_MEMALLOC_PIN  implies !GFP_MOVABLE
     202             :  */
     203        8905 : static inline gfp_t current_gfp_context(gfp_t flags)
     204             : {
     205        8905 :         unsigned int pflags = READ_ONCE(current->flags);
     206             : 
     207        8905 :         if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) {
     208             :                 /*
     209             :                  * NOIO implies both NOIO and NOFS and it is a weaker context
     210             :                  * so always make sure it makes precedence
     211             :                  */
     212           0 :                 if (pflags & PF_MEMALLOC_NOIO)
     213           0 :                         flags &= ~(__GFP_IO | __GFP_FS);
     214           0 :                 else if (pflags & PF_MEMALLOC_NOFS)
     215           0 :                         flags &= ~__GFP_FS;
     216             : 
     217           0 :                 if (pflags & PF_MEMALLOC_PIN)
     218           0 :                         flags &= ~__GFP_MOVABLE;
     219             :         }
     220        8905 :         return flags;
     221             : }
     222             : 
     223             : #ifdef CONFIG_LOCKDEP
     224             : extern void __fs_reclaim_acquire(unsigned long ip);
     225             : extern void __fs_reclaim_release(unsigned long ip);
     226             : extern void fs_reclaim_acquire(gfp_t gfp_mask);
     227             : extern void fs_reclaim_release(gfp_t gfp_mask);
     228             : #else
     229             : static inline void __fs_reclaim_acquire(unsigned long ip) { }
     230             : static inline void __fs_reclaim_release(unsigned long ip) { }
     231             : static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
     232             : static inline void fs_reclaim_release(gfp_t gfp_mask) { }
     233             : #endif
     234             : 
     235             : /* Any memory-allocation retry loop should use
     236             :  * memalloc_retry_wait(), and pass the flags for the most
     237             :  * constrained allocation attempt that might have failed.
     238             :  * This provides useful documentation of where loops are,
     239             :  * and a central place to fine tune the waiting as the MM
     240             :  * implementation changes.
     241             :  */
     242             : static inline void memalloc_retry_wait(gfp_t gfp_flags)
     243             : {
     244             :         /* We use io_schedule_timeout because waiting for memory
     245             :          * typically included waiting for dirty pages to be
     246             :          * written out, which requires IO.
     247             :          */
     248             :         __set_current_state(TASK_UNINTERRUPTIBLE);
     249             :         gfp_flags = current_gfp_context(gfp_flags);
     250             :         if (gfpflags_allow_blocking(gfp_flags) &&
     251             :             !(gfp_flags & __GFP_NORETRY))
     252             :                 /* Probably waited already, no need for much more */
     253             :                 io_schedule_timeout(1);
     254             :         else
     255             :                 /* Probably didn't wait, and has now released a lock,
     256             :                  * so now is a good time to wait
     257             :                  */
     258             :                 io_schedule_timeout(HZ/50);
     259             : }
     260             : 
     261             : /**
     262             :  * might_alloc - Mark possible allocation sites
     263             :  * @gfp_mask: gfp_t flags that would be used to allocate
     264             :  *
     265             :  * Similar to might_sleep() and other annotations, this can be used in functions
     266             :  * that might allocate, but often don't. Compiles to nothing without
     267             :  * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking.
     268             :  */
     269             : static inline void might_alloc(gfp_t gfp_mask)
     270             : {
     271      430106 :         fs_reclaim_acquire(gfp_mask);
     272      430106 :         fs_reclaim_release(gfp_mask);
     273             : 
     274      430106 :         might_sleep_if(gfpflags_allow_blocking(gfp_mask));
     275             : }
     276             : 
     277             : /**
     278             :  * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
     279             :  *
     280             :  * This functions marks the beginning of the GFP_NOIO allocation scope.
     281             :  * All further allocations will implicitly drop __GFP_IO flag and so
     282             :  * they are safe for the IO critical section from the allocation recursion
     283             :  * point of view. Use memalloc_noio_restore to end the scope with flags
     284             :  * returned by this function.
     285             :  *
     286             :  * This function is safe to be used from any context.
     287             :  */
     288             : static inline unsigned int memalloc_noio_save(void)
     289             : {
     290          18 :         unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
     291          18 :         current->flags |= PF_MEMALLOC_NOIO;
     292             :         return flags;
     293             : }
     294             : 
     295             : /**
     296             :  * memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
     297             :  * @flags: Flags to restore.
     298             :  *
     299             :  * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
     300             :  * Always make sure that the given flags is the return value from the
     301             :  * pairing memalloc_noio_save call.
     302             :  */
     303             : static inline void memalloc_noio_restore(unsigned int flags)
     304             : {
     305          36 :         current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
     306             : }
     307             : 
     308             : /**
     309             :  * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
     310             :  *
     311             :  * This functions marks the beginning of the GFP_NOFS allocation scope.
     312             :  * All further allocations will implicitly drop __GFP_FS flag and so
     313             :  * they are safe for the FS critical section from the allocation recursion
     314             :  * point of view. Use memalloc_nofs_restore to end the scope with flags
     315             :  * returned by this function.
     316             :  *
     317             :  * This function is safe to be used from any context.
     318             :  */
     319             : static inline unsigned int memalloc_nofs_save(void)
     320             : {
     321           0 :         unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
     322           0 :         current->flags |= PF_MEMALLOC_NOFS;
     323             :         return flags;
     324             : }
     325             : 
     326             : /**
     327             :  * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
     328             :  * @flags: Flags to restore.
     329             :  *
     330             :  * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
     331             :  * Always make sure that the given flags is the return value from the
     332             :  * pairing memalloc_nofs_save call.
     333             :  */
     334             : static inline void memalloc_nofs_restore(unsigned int flags)
     335             : {
     336           0 :         current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
     337             : }
     338             : 
     339             : static inline unsigned int memalloc_noreclaim_save(void)
     340             : {
     341           0 :         unsigned int flags = current->flags & PF_MEMALLOC;
     342           0 :         current->flags |= PF_MEMALLOC;
     343             :         return flags;
     344             : }
     345             : 
     346             : static inline void memalloc_noreclaim_restore(unsigned int flags)
     347             : {
     348           0 :         current->flags = (current->flags & ~PF_MEMALLOC) | flags;
     349             : }
     350             : 
     351             : static inline unsigned int memalloc_pin_save(void)
     352             : {
     353           0 :         unsigned int flags = current->flags & PF_MEMALLOC_PIN;
     354             : 
     355           0 :         current->flags |= PF_MEMALLOC_PIN;
     356             :         return flags;
     357             : }
     358             : 
     359             : static inline void memalloc_pin_restore(unsigned int flags)
     360             : {
     361           0 :         current->flags = (current->flags & ~PF_MEMALLOC_PIN) | flags;
     362             : }
     363             : 
     364             : #ifdef CONFIG_MEMCG
     365             : DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
     366             : /**
     367             :  * set_active_memcg - Starts the remote memcg charging scope.
     368             :  * @memcg: memcg to charge.
     369             :  *
     370             :  * This function marks the beginning of the remote memcg charging scope. All the
     371             :  * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
     372             :  * given memcg.
     373             :  *
     374             :  * NOTE: This function can nest. Users must save the return value and
     375             :  * reset the previous value after their own charging scope is over.
     376             :  */
     377             : static inline struct mem_cgroup *
     378             : set_active_memcg(struct mem_cgroup *memcg)
     379             : {
     380             :         struct mem_cgroup *old;
     381             : 
     382             :         if (!in_task()) {
     383             :                 old = this_cpu_read(int_active_memcg);
     384             :                 this_cpu_write(int_active_memcg, memcg);
     385             :         } else {
     386             :                 old = current->active_memcg;
     387             :                 current->active_memcg = memcg;
     388             :         }
     389             : 
     390             :         return old;
     391             : }
     392             : #else
     393             : static inline struct mem_cgroup *
     394             : set_active_memcg(struct mem_cgroup *memcg)
     395             : {
     396             :         return NULL;
     397             : }
     398             : #endif
     399             : 
     400             : #ifdef CONFIG_MEMBARRIER
     401             : enum {
     402             :         MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY                = (1U << 0),
     403             :         MEMBARRIER_STATE_PRIVATE_EXPEDITED                      = (1U << 1),
     404             :         MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY                 = (1U << 2),
     405             :         MEMBARRIER_STATE_GLOBAL_EXPEDITED                       = (1U << 3),
     406             :         MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY      = (1U << 4),
     407             :         MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE            = (1U << 5),
     408             :         MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY           = (1U << 6),
     409             :         MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ                 = (1U << 7),
     410             : };
     411             : 
     412             : enum {
     413             :         MEMBARRIER_FLAG_SYNC_CORE       = (1U << 0),
     414             :         MEMBARRIER_FLAG_RSEQ            = (1U << 1),
     415             : };
     416             : 
     417             : #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
     418             : #include <asm/membarrier.h>
     419             : #endif
     420             : 
     421             : static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
     422             : {
     423           0 :         if (current->mm != mm)
     424             :                 return;
     425           0 :         if (likely(!(atomic_read(&mm->membarrier_state) &
     426             :                      MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
     427             :                 return;
     428             :         sync_core_before_usermode();
     429             : }
     430             : 
     431             : extern void membarrier_exec_mmap(struct mm_struct *mm);
     432             : 
     433             : extern void membarrier_update_current_mm(struct mm_struct *next_mm);
     434             : 
     435             : #else
     436             : #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
     437             : static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
     438             :                                              struct mm_struct *next,
     439             :                                              struct task_struct *tsk)
     440             : {
     441             : }
     442             : #endif
     443             : static inline void membarrier_exec_mmap(struct mm_struct *mm)
     444             : {
     445             : }
     446             : static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
     447             : {
     448             : }
     449             : static inline void membarrier_update_current_mm(struct mm_struct *next_mm)
     450             : {
     451             : }
     452             : #endif
     453             : 
     454             : #ifdef CONFIG_IOMMU_SVA
     455             : static inline void mm_pasid_init(struct mm_struct *mm)
     456             : {
     457             :         mm->pasid = INVALID_IOASID;
     458             : }
     459             : 
     460             : /* Associate a PASID with an mm_struct: */
     461             : static inline void mm_pasid_set(struct mm_struct *mm, u32 pasid)
     462             : {
     463             :         mm->pasid = pasid;
     464             : }
     465             : 
     466             : static inline void mm_pasid_drop(struct mm_struct *mm)
     467             : {
     468             :         if (pasid_valid(mm->pasid)) {
     469             :                 ioasid_free(mm->pasid);
     470             :                 mm->pasid = INVALID_IOASID;
     471             :         }
     472             : }
     473             : #else
     474             : static inline void mm_pasid_init(struct mm_struct *mm) {}
     475             : static inline void mm_pasid_set(struct mm_struct *mm, u32 pasid) {}
     476             : static inline void mm_pasid_drop(struct mm_struct *mm) {}
     477             : #endif
     478             : 
     479             : #endif /* _LINUX_SCHED_MM_H */

Generated by: LCOV version 1.14