LCOV - code coverage report
Current view: top level - include/linux - memcontrol.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 30 0.0 %
Date: 2023-03-27 20:00:47 Functions: 0 4 0.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0-or-later */
       2             : /* memcontrol.h - Memory Controller
       3             :  *
       4             :  * Copyright IBM Corporation, 2007
       5             :  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
       6             :  *
       7             :  * Copyright 2007 OpenVZ SWsoft Inc
       8             :  * Author: Pavel Emelianov <xemul@openvz.org>
       9             :  */
      10             : 
      11             : #ifndef _LINUX_MEMCONTROL_H
      12             : #define _LINUX_MEMCONTROL_H
      13             : #include <linux/cgroup.h>
      14             : #include <linux/vm_event_item.h>
      15             : #include <linux/hardirq.h>
      16             : #include <linux/jump_label.h>
      17             : #include <linux/page_counter.h>
      18             : #include <linux/vmpressure.h>
      19             : #include <linux/eventfd.h>
      20             : #include <linux/mm.h>
      21             : #include <linux/vmstat.h>
      22             : #include <linux/writeback.h>
      23             : #include <linux/page-flags.h>
      24             : 
      25             : struct mem_cgroup;
      26             : struct obj_cgroup;
      27             : struct page;
      28             : struct mm_struct;
      29             : struct kmem_cache;
      30             : 
      31             : /* Cgroup-specific page state, on top of universal node page state */
      32             : enum memcg_stat_item {
      33             :         MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
      34             :         MEMCG_SOCK,
      35             :         MEMCG_PERCPU_B,
      36             :         MEMCG_VMALLOC,
      37             :         MEMCG_KMEM,
      38             :         MEMCG_ZSWAP_B,
      39             :         MEMCG_ZSWAPPED,
      40             :         MEMCG_NR_STAT,
      41             : };
      42             : 
      43             : enum memcg_memory_event {
      44             :         MEMCG_LOW,
      45             :         MEMCG_HIGH,
      46             :         MEMCG_MAX,
      47             :         MEMCG_OOM,
      48             :         MEMCG_OOM_KILL,
      49             :         MEMCG_OOM_GROUP_KILL,
      50             :         MEMCG_SWAP_HIGH,
      51             :         MEMCG_SWAP_MAX,
      52             :         MEMCG_SWAP_FAIL,
      53             :         MEMCG_NR_MEMORY_EVENTS,
      54             : };
      55             : 
      56             : struct mem_cgroup_reclaim_cookie {
      57             :         pg_data_t *pgdat;
      58             :         unsigned int generation;
      59             : };
      60             : 
      61             : #ifdef CONFIG_MEMCG
      62             : 
      63             : #define MEM_CGROUP_ID_SHIFT     16
      64             : #define MEM_CGROUP_ID_MAX       USHRT_MAX
      65             : 
      66             : struct mem_cgroup_id {
      67             :         int id;
      68             :         refcount_t ref;
      69             : };
      70             : 
      71             : /*
      72             :  * Per memcg event counter is incremented at every pagein/pageout. With THP,
      73             :  * it will be incremented by the number of pages. This counter is used
      74             :  * to trigger some periodic events. This is straightforward and better
      75             :  * than using jiffies etc. to handle periodic memcg event.
      76             :  */
      77             : enum mem_cgroup_events_target {
      78             :         MEM_CGROUP_TARGET_THRESH,
      79             :         MEM_CGROUP_TARGET_SOFTLIMIT,
      80             :         MEM_CGROUP_NTARGETS,
      81             : };
      82             : 
      83             : struct memcg_vmstats_percpu;
      84             : struct memcg_vmstats;
      85             : 
      86             : struct mem_cgroup_reclaim_iter {
      87             :         struct mem_cgroup *position;
      88             :         /* scan generation, increased every round-trip */
      89             :         unsigned int generation;
      90             : };
      91             : 
      92             : /*
      93             :  * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
      94             :  * shrinkers, which have elements charged to this memcg.
      95             :  */
      96             : struct shrinker_info {
      97             :         struct rcu_head rcu;
      98             :         atomic_long_t *nr_deferred;
      99             :         unsigned long *map;
     100             : };
     101             : 
     102             : struct lruvec_stats_percpu {
     103             :         /* Local (CPU and cgroup) state */
     104             :         long state[NR_VM_NODE_STAT_ITEMS];
     105             : 
     106             :         /* Delta calculation for lockless upward propagation */
     107             :         long state_prev[NR_VM_NODE_STAT_ITEMS];
     108             : };
     109             : 
     110             : struct lruvec_stats {
     111             :         /* Aggregated (CPU and subtree) state */
     112             :         long state[NR_VM_NODE_STAT_ITEMS];
     113             : 
     114             :         /* Pending child counts during tree propagation */
     115             :         long state_pending[NR_VM_NODE_STAT_ITEMS];
     116             : };
     117             : 
     118             : /*
     119             :  * per-node information in memory controller.
     120             :  */
     121             : struct mem_cgroup_per_node {
     122             :         struct lruvec           lruvec;
     123             : 
     124             :         struct lruvec_stats_percpu __percpu     *lruvec_stats_percpu;
     125             :         struct lruvec_stats                     lruvec_stats;
     126             : 
     127             :         unsigned long           lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
     128             : 
     129             :         struct mem_cgroup_reclaim_iter  iter;
     130             : 
     131             :         struct shrinker_info __rcu      *shrinker_info;
     132             : 
     133             :         struct rb_node          tree_node;      /* RB tree node */
     134             :         unsigned long           usage_in_excess;/* Set to the value by which */
     135             :                                                 /* the soft limit is exceeded*/
     136             :         bool                    on_tree;
     137             :         struct mem_cgroup       *memcg;         /* Back pointer, we cannot */
     138             :                                                 /* use container_of        */
     139             : };
     140             : 
     141             : struct mem_cgroup_threshold {
     142             :         struct eventfd_ctx *eventfd;
     143             :         unsigned long threshold;
     144             : };
     145             : 
     146             : /* For threshold */
     147             : struct mem_cgroup_threshold_ary {
     148             :         /* An array index points to threshold just below or equal to usage. */
     149             :         int current_threshold;
     150             :         /* Size of entries[] */
     151             :         unsigned int size;
     152             :         /* Array of thresholds */
     153             :         struct mem_cgroup_threshold entries[];
     154             : };
     155             : 
     156             : struct mem_cgroup_thresholds {
     157             :         /* Primary thresholds array */
     158             :         struct mem_cgroup_threshold_ary *primary;
     159             :         /*
     160             :          * Spare threshold array.
     161             :          * This is needed to make mem_cgroup_unregister_event() "never fail".
     162             :          * It must be able to store at least primary->size - 1 entries.
     163             :          */
     164             :         struct mem_cgroup_threshold_ary *spare;
     165             : };
     166             : 
     167             : /*
     168             :  * Remember four most recent foreign writebacks with dirty pages in this
     169             :  * cgroup.  Inode sharing is expected to be uncommon and, even if we miss
     170             :  * one in a given round, we're likely to catch it later if it keeps
     171             :  * foreign-dirtying, so a fairly low count should be enough.
     172             :  *
     173             :  * See mem_cgroup_track_foreign_dirty_slowpath() for details.
     174             :  */
     175             : #define MEMCG_CGWB_FRN_CNT      4
     176             : 
     177             : struct memcg_cgwb_frn {
     178             :         u64 bdi_id;                     /* bdi->id of the foreign inode */
     179             :         int memcg_id;                   /* memcg->css.id of foreign inode */
     180             :         u64 at;                         /* jiffies_64 at the time of dirtying */
     181             :         struct wb_completion done;      /* tracks in-flight foreign writebacks */
     182             : };
     183             : 
     184             : /*
     185             :  * Bucket for arbitrarily byte-sized objects charged to a memory
     186             :  * cgroup. The bucket can be reparented in one piece when the cgroup
     187             :  * is destroyed, without having to round up the individual references
     188             :  * of all live memory objects in the wild.
     189             :  */
     190             : struct obj_cgroup {
     191             :         struct percpu_ref refcnt;
     192             :         struct mem_cgroup *memcg;
     193             :         atomic_t nr_charged_bytes;
     194             :         union {
     195             :                 struct list_head list; /* protected by objcg_lock */
     196             :                 struct rcu_head rcu;
     197             :         };
     198             : };
     199             : 
     200             : /*
     201             :  * The memory controller data structure. The memory controller controls both
     202             :  * page cache and RSS per cgroup. We would eventually like to provide
     203             :  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
     204             :  * to help the administrator determine what knobs to tune.
     205             :  */
     206             : struct mem_cgroup {
     207             :         struct cgroup_subsys_state css;
     208             : 
     209             :         /* Private memcg ID. Used to ID objects that outlive the cgroup */
     210             :         struct mem_cgroup_id id;
     211             : 
     212             :         /* Accounted resources */
     213             :         struct page_counter memory;             /* Both v1 & v2 */
     214             : 
     215             :         union {
     216             :                 struct page_counter swap;       /* v2 only */
     217             :                 struct page_counter memsw;      /* v1 only */
     218             :         };
     219             : 
     220             :         /* Legacy consumer-oriented counters */
     221             :         struct page_counter kmem;               /* v1 only */
     222             :         struct page_counter tcpmem;             /* v1 only */
     223             : 
     224             :         /* Range enforcement for interrupt charges */
     225             :         struct work_struct high_work;
     226             : 
     227             : #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
     228             :         unsigned long zswap_max;
     229             : #endif
     230             : 
     231             :         unsigned long soft_limit;
     232             : 
     233             :         /* vmpressure notifications */
     234             :         struct vmpressure vmpressure;
     235             : 
     236             :         /*
     237             :          * Should the OOM killer kill all belonging tasks, had it kill one?
     238             :          */
     239             :         bool oom_group;
     240             : 
     241             :         /* protected by memcg_oom_lock */
     242             :         bool            oom_lock;
     243             :         int             under_oom;
     244             : 
     245             :         int     swappiness;
     246             :         /* OOM-Killer disable */
     247             :         int             oom_kill_disable;
     248             : 
     249             :         /* memory.events and memory.events.local */
     250             :         struct cgroup_file events_file;
     251             :         struct cgroup_file events_local_file;
     252             : 
     253             :         /* handle for "memory.swap.events" */
     254             :         struct cgroup_file swap_events_file;
     255             : 
     256             :         /* protect arrays of thresholds */
     257             :         struct mutex thresholds_lock;
     258             : 
     259             :         /* thresholds for memory usage. RCU-protected */
     260             :         struct mem_cgroup_thresholds thresholds;
     261             : 
     262             :         /* thresholds for mem+swap usage. RCU-protected */
     263             :         struct mem_cgroup_thresholds memsw_thresholds;
     264             : 
     265             :         /* For oom notifier event fd */
     266             :         struct list_head oom_notify;
     267             : 
     268             :         /*
     269             :          * Should we move charges of a task when a task is moved into this
     270             :          * mem_cgroup ? And what type of charges should we move ?
     271             :          */
     272             :         unsigned long move_charge_at_immigrate;
     273             :         /* taken only while moving_account > 0 */
     274             :         spinlock_t              move_lock;
     275             :         unsigned long           move_lock_flags;
     276             : 
     277             :         CACHELINE_PADDING(_pad1_);
     278             : 
     279             :         /* memory.stat */
     280             :         struct memcg_vmstats    *vmstats;
     281             : 
     282             :         /* memory.events */
     283             :         atomic_long_t           memory_events[MEMCG_NR_MEMORY_EVENTS];
     284             :         atomic_long_t           memory_events_local[MEMCG_NR_MEMORY_EVENTS];
     285             : 
     286             :         unsigned long           socket_pressure;
     287             : 
     288             :         /* Legacy tcp memory accounting */
     289             :         bool                    tcpmem_active;
     290             :         int                     tcpmem_pressure;
     291             : 
     292             : #ifdef CONFIG_MEMCG_KMEM
     293             :         int kmemcg_id;
     294             :         struct obj_cgroup __rcu *objcg;
     295             :         /* list of inherited objcgs, protected by objcg_lock */
     296             :         struct list_head objcg_list;
     297             : #endif
     298             : 
     299             :         CACHELINE_PADDING(_pad2_);
     300             : 
     301             :         /*
     302             :          * set > 0 if pages under this cgroup are moving to other cgroup.
     303             :          */
     304             :         atomic_t                moving_account;
     305             :         struct task_struct      *move_lock_task;
     306             : 
     307             :         struct memcg_vmstats_percpu __percpu *vmstats_percpu;
     308             : 
     309             : #ifdef CONFIG_CGROUP_WRITEBACK
     310             :         struct list_head cgwb_list;
     311             :         struct wb_domain cgwb_domain;
     312             :         struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
     313             : #endif
     314             : 
     315             :         /* List of events which userspace want to receive */
     316             :         struct list_head event_list;
     317             :         spinlock_t event_list_lock;
     318             : 
     319             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     320             :         struct deferred_split deferred_split_queue;
     321             : #endif
     322             : 
     323             : #ifdef CONFIG_LRU_GEN
     324             :         /* per-memcg mm_struct list */
     325             :         struct lru_gen_mm_list mm_list;
     326             : #endif
     327             : 
     328             :         struct mem_cgroup_per_node *nodeinfo[];
     329             : };
     330             : 
     331             : /*
     332             :  * size of first charge trial.
     333             :  * TODO: maybe necessary to use big numbers in big irons or dynamic based of the
     334             :  * workload.
     335             :  */
     336             : #define MEMCG_CHARGE_BATCH 64U
     337             : 
     338             : extern struct mem_cgroup *root_mem_cgroup;
     339             : 
     340             : enum page_memcg_data_flags {
     341             :         /* page->memcg_data is a pointer to an objcgs vector */
     342             :         MEMCG_DATA_OBJCGS = (1UL << 0),
     343             :         /* page has been accounted as a non-slab kernel page */
     344             :         MEMCG_DATA_KMEM = (1UL << 1),
     345             :         /* the next bit after the last actual flag */
     346             :         __NR_MEMCG_DATA_FLAGS  = (1UL << 2),
     347             : };
     348             : 
     349             : #define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
     350             : 
     351             : static inline bool folio_memcg_kmem(struct folio *folio);
     352             : 
     353             : /*
     354             :  * After the initialization objcg->memcg is always pointing at
     355             :  * a valid memcg, but can be atomically swapped to the parent memcg.
     356             :  *
     357             :  * The caller must ensure that the returned memcg won't be released:
     358             :  * e.g. acquire the rcu_read_lock or css_set_lock.
     359             :  */
     360             : static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
     361             : {
     362             :         return READ_ONCE(objcg->memcg);
     363             : }
     364             : 
     365             : /*
     366             :  * __folio_memcg - Get the memory cgroup associated with a non-kmem folio
     367             :  * @folio: Pointer to the folio.
     368             :  *
     369             :  * Returns a pointer to the memory cgroup associated with the folio,
     370             :  * or NULL. This function assumes that the folio is known to have a
     371             :  * proper memory cgroup pointer. It's not safe to call this function
     372             :  * against some type of folios, e.g. slab folios or ex-slab folios or
     373             :  * kmem folios.
     374             :  */
     375             : static inline struct mem_cgroup *__folio_memcg(struct folio *folio)
     376             : {
     377             :         unsigned long memcg_data = folio->memcg_data;
     378             : 
     379             :         VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
     380             :         VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
     381             :         VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio);
     382             : 
     383             :         return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     384             : }
     385             : 
     386             : /*
     387             :  * __folio_objcg - get the object cgroup associated with a kmem folio.
     388             :  * @folio: Pointer to the folio.
     389             :  *
     390             :  * Returns a pointer to the object cgroup associated with the folio,
     391             :  * or NULL. This function assumes that the folio is known to have a
     392             :  * proper object cgroup pointer. It's not safe to call this function
     393             :  * against some type of folios, e.g. slab folios or ex-slab folios or
     394             :  * LRU folios.
     395             :  */
     396             : static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
     397             : {
     398             :         unsigned long memcg_data = folio->memcg_data;
     399             : 
     400             :         VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
     401             :         VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
     402             :         VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio);
     403             : 
     404             :         return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     405             : }
     406             : 
     407             : /*
     408             :  * folio_memcg - Get the memory cgroup associated with a folio.
     409             :  * @folio: Pointer to the folio.
     410             :  *
     411             :  * Returns a pointer to the memory cgroup associated with the folio,
     412             :  * or NULL. This function assumes that the folio is known to have a
     413             :  * proper memory cgroup pointer. It's not safe to call this function
     414             :  * against some type of folios, e.g. slab folios or ex-slab folios.
     415             :  *
     416             :  * For a non-kmem folio any of the following ensures folio and memcg binding
     417             :  * stability:
     418             :  *
     419             :  * - the folio lock
     420             :  * - LRU isolation
     421             :  * - lock_page_memcg()
     422             :  * - exclusive reference
     423             :  * - mem_cgroup_trylock_pages()
     424             :  *
     425             :  * For a kmem folio a caller should hold an rcu read lock to protect memcg
     426             :  * associated with a kmem folio from being released.
     427             :  */
     428             : static inline struct mem_cgroup *folio_memcg(struct folio *folio)
     429             : {
     430             :         if (folio_memcg_kmem(folio))
     431             :                 return obj_cgroup_memcg(__folio_objcg(folio));
     432             :         return __folio_memcg(folio);
     433             : }
     434             : 
     435             : static inline struct mem_cgroup *page_memcg(struct page *page)
     436             : {
     437             :         return folio_memcg(page_folio(page));
     438             : }
     439             : 
     440             : /**
     441             :  * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio.
     442             :  * @folio: Pointer to the folio.
     443             :  *
     444             :  * This function assumes that the folio is known to have a
     445             :  * proper memory cgroup pointer. It's not safe to call this function
     446             :  * against some type of folios, e.g. slab folios or ex-slab folios.
     447             :  *
     448             :  * Return: A pointer to the memory cgroup associated with the folio,
     449             :  * or NULL.
     450             :  */
     451             : static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
     452             : {
     453             :         unsigned long memcg_data = READ_ONCE(folio->memcg_data);
     454             : 
     455             :         VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
     456             :         WARN_ON_ONCE(!rcu_read_lock_held());
     457             : 
     458             :         if (memcg_data & MEMCG_DATA_KMEM) {
     459             :                 struct obj_cgroup *objcg;
     460             : 
     461             :                 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     462             :                 return obj_cgroup_memcg(objcg);
     463             :         }
     464             : 
     465             :         return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     466             : }
     467             : 
     468             : /*
     469             :  * folio_memcg_check - Get the memory cgroup associated with a folio.
     470             :  * @folio: Pointer to the folio.
     471             :  *
     472             :  * Returns a pointer to the memory cgroup associated with the folio,
     473             :  * or NULL. This function unlike folio_memcg() can take any folio
     474             :  * as an argument. It has to be used in cases when it's not known if a folio
     475             :  * has an associated memory cgroup pointer or an object cgroups vector or
     476             :  * an object cgroup.
     477             :  *
     478             :  * For a non-kmem folio any of the following ensures folio and memcg binding
     479             :  * stability:
     480             :  *
     481             :  * - the folio lock
     482             :  * - LRU isolation
     483             :  * - lock_folio_memcg()
     484             :  * - exclusive reference
     485             :  * - mem_cgroup_trylock_pages()
     486             :  *
     487             :  * For a kmem folio a caller should hold an rcu read lock to protect memcg
     488             :  * associated with a kmem folio from being released.
     489             :  */
     490             : static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
     491             : {
     492             :         /*
     493             :          * Because folio->memcg_data might be changed asynchronously
     494             :          * for slabs, READ_ONCE() should be used here.
     495             :          */
     496             :         unsigned long memcg_data = READ_ONCE(folio->memcg_data);
     497             : 
     498             :         if (memcg_data & MEMCG_DATA_OBJCGS)
     499             :                 return NULL;
     500             : 
     501             :         if (memcg_data & MEMCG_DATA_KMEM) {
     502             :                 struct obj_cgroup *objcg;
     503             : 
     504             :                 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     505             :                 return obj_cgroup_memcg(objcg);
     506             :         }
     507             : 
     508             :         return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     509             : }
     510             : 
     511             : static inline struct mem_cgroup *page_memcg_check(struct page *page)
     512             : {
     513             :         if (PageTail(page))
     514             :                 return NULL;
     515             :         return folio_memcg_check((struct folio *)page);
     516             : }
     517             : 
     518             : static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
     519             : {
     520             :         struct mem_cgroup *memcg;
     521             : 
     522             :         rcu_read_lock();
     523             : retry:
     524             :         memcg = obj_cgroup_memcg(objcg);
     525             :         if (unlikely(!css_tryget(&memcg->css)))
     526             :                 goto retry;
     527             :         rcu_read_unlock();
     528             : 
     529             :         return memcg;
     530             : }
     531             : 
     532             : #ifdef CONFIG_MEMCG_KMEM
     533             : /*
     534             :  * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
     535             :  * @folio: Pointer to the folio.
     536             :  *
     537             :  * Checks if the folio has MemcgKmem flag set. The caller must ensure
     538             :  * that the folio has an associated memory cgroup. It's not safe to call
     539             :  * this function against some types of folios, e.g. slab folios.
     540             :  */
     541             : static inline bool folio_memcg_kmem(struct folio *folio)
     542             : {
     543             :         VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page);
     544             :         VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio);
     545             :         return folio->memcg_data & MEMCG_DATA_KMEM;
     546             : }
     547             : 
     548             : 
     549             : #else
     550             : static inline bool folio_memcg_kmem(struct folio *folio)
     551             : {
     552             :         return false;
     553             : }
     554             : 
     555             : #endif
     556             : 
     557             : static inline bool PageMemcgKmem(struct page *page)
     558             : {
     559             :         return folio_memcg_kmem(page_folio(page));
     560             : }
     561             : 
     562             : static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
     563             : {
     564             :         return (memcg == root_mem_cgroup);
     565             : }
     566             : 
     567             : static inline bool mem_cgroup_disabled(void)
     568             : {
     569             :         return !cgroup_subsys_enabled(memory_cgrp_subsys);
     570             : }
     571             : 
     572             : static inline void mem_cgroup_protection(struct mem_cgroup *root,
     573             :                                          struct mem_cgroup *memcg,
     574             :                                          unsigned long *min,
     575             :                                          unsigned long *low)
     576             : {
     577             :         *min = *low = 0;
     578             : 
     579             :         if (mem_cgroup_disabled())
     580             :                 return;
     581             : 
     582             :         /*
     583             :          * There is no reclaim protection applied to a targeted reclaim.
     584             :          * We are special casing this specific case here because
     585             :          * mem_cgroup_protected calculation is not robust enough to keep
     586             :          * the protection invariant for calculated effective values for
     587             :          * parallel reclaimers with different reclaim target. This is
     588             :          * especially a problem for tail memcgs (as they have pages on LRU)
     589             :          * which would want to have effective values 0 for targeted reclaim
     590             :          * but a different value for external reclaim.
     591             :          *
     592             :          * Example
     593             :          * Let's have global and A's reclaim in parallel:
     594             :          *  |
     595             :          *  A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
     596             :          *  |\
     597             :          *  | C (low = 1G, usage = 2.5G)
     598             :          *  B (low = 1G, usage = 0.5G)
     599             :          *
     600             :          * For the global reclaim
     601             :          * A.elow = A.low
     602             :          * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
     603             :          * C.elow = min(C.usage, C.low)
     604             :          *
     605             :          * With the effective values resetting we have A reclaim
     606             :          * A.elow = 0
     607             :          * B.elow = B.low
     608             :          * C.elow = C.low
     609             :          *
     610             :          * If the global reclaim races with A's reclaim then
     611             :          * B.elow = C.elow = 0 because children_low_usage > A.elow)
     612             :          * is possible and reclaiming B would be violating the protection.
     613             :          *
     614             :          */
     615             :         if (root == memcg)
     616             :                 return;
     617             : 
     618             :         *min = READ_ONCE(memcg->memory.emin);
     619             :         *low = READ_ONCE(memcg->memory.elow);
     620             : }
     621             : 
     622             : void mem_cgroup_calculate_protection(struct mem_cgroup *root,
     623             :                                      struct mem_cgroup *memcg);
     624             : 
     625             : static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
     626             :                                           struct mem_cgroup *memcg)
     627             : {
     628             :         /*
     629             :          * The root memcg doesn't account charges, and doesn't support
     630             :          * protection. The target memcg's protection is ignored, see
     631             :          * mem_cgroup_calculate_protection() and mem_cgroup_protection()
     632             :          */
     633             :         return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) ||
     634             :                 memcg == target;
     635             : }
     636             : 
     637             : static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
     638             :                                         struct mem_cgroup *memcg)
     639             : {
     640             :         if (mem_cgroup_unprotected(target, memcg))
     641             :                 return false;
     642             : 
     643             :         return READ_ONCE(memcg->memory.elow) >=
     644             :                 page_counter_read(&memcg->memory);
     645             : }
     646             : 
     647             : static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
     648             :                                         struct mem_cgroup *memcg)
     649             : {
     650             :         if (mem_cgroup_unprotected(target, memcg))
     651             :                 return false;
     652             : 
     653             :         return READ_ONCE(memcg->memory.emin) >=
     654             :                 page_counter_read(&memcg->memory);
     655             : }
     656             : 
     657             : int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
     658             : 
     659             : /**
     660             :  * mem_cgroup_charge - Charge a newly allocated folio to a cgroup.
     661             :  * @folio: Folio to charge.
     662             :  * @mm: mm context of the allocating task.
     663             :  * @gfp: Reclaim mode.
     664             :  *
     665             :  * Try to charge @folio to the memcg that @mm belongs to, reclaiming
     666             :  * pages according to @gfp if necessary.  If @mm is NULL, try to
     667             :  * charge to the active memcg.
     668             :  *
     669             :  * Do not use this for folios allocated for swapin.
     670             :  *
     671             :  * Return: 0 on success. Otherwise, an error code is returned.
     672             :  */
     673             : static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
     674             :                                     gfp_t gfp)
     675             : {
     676             :         if (mem_cgroup_disabled())
     677             :                 return 0;
     678             :         return __mem_cgroup_charge(folio, mm, gfp);
     679             : }
     680             : 
     681             : int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
     682             :                                   gfp_t gfp, swp_entry_t entry);
     683             : void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
     684             : 
     685             : void __mem_cgroup_uncharge(struct folio *folio);
     686             : 
     687             : /**
     688             :  * mem_cgroup_uncharge - Uncharge a folio.
     689             :  * @folio: Folio to uncharge.
     690             :  *
     691             :  * Uncharge a folio previously charged with mem_cgroup_charge().
     692             :  */
     693             : static inline void mem_cgroup_uncharge(struct folio *folio)
     694             : {
     695             :         if (mem_cgroup_disabled())
     696             :                 return;
     697             :         __mem_cgroup_uncharge(folio);
     698             : }
     699             : 
     700             : void __mem_cgroup_uncharge_list(struct list_head *page_list);
     701             : static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
     702             : {
     703             :         if (mem_cgroup_disabled())
     704             :                 return;
     705             :         __mem_cgroup_uncharge_list(page_list);
     706             : }
     707             : 
     708             : void mem_cgroup_migrate(struct folio *old, struct folio *new);
     709             : 
     710             : /**
     711             :  * mem_cgroup_lruvec - get the lru list vector for a memcg & node
     712             :  * @memcg: memcg of the wanted lruvec
     713             :  * @pgdat: pglist_data
     714             :  *
     715             :  * Returns the lru list vector holding pages for a given @memcg &
     716             :  * @pgdat combination. This can be the node lruvec, if the memory
     717             :  * controller is disabled.
     718             :  */
     719             : static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
     720             :                                                struct pglist_data *pgdat)
     721             : {
     722             :         struct mem_cgroup_per_node *mz;
     723             :         struct lruvec *lruvec;
     724             : 
     725             :         if (mem_cgroup_disabled()) {
     726             :                 lruvec = &pgdat->__lruvec;
     727             :                 goto out;
     728             :         }
     729             : 
     730             :         if (!memcg)
     731             :                 memcg = root_mem_cgroup;
     732             : 
     733             :         mz = memcg->nodeinfo[pgdat->node_id];
     734             :         lruvec = &mz->lruvec;
     735             : out:
     736             :         /*
     737             :          * Since a node can be onlined after the mem_cgroup was created,
     738             :          * we have to be prepared to initialize lruvec->pgdat here;
     739             :          * and if offlined then reonlined, we need to reinitialize it.
     740             :          */
     741             :         if (unlikely(lruvec->pgdat != pgdat))
     742             :                 lruvec->pgdat = pgdat;
     743             :         return lruvec;
     744             : }
     745             : 
     746             : /**
     747             :  * folio_lruvec - return lruvec for isolating/putting an LRU folio
     748             :  * @folio: Pointer to the folio.
     749             :  *
     750             :  * This function relies on folio->mem_cgroup being stable.
     751             :  */
     752             : static inline struct lruvec *folio_lruvec(struct folio *folio)
     753             : {
     754             :         struct mem_cgroup *memcg = folio_memcg(folio);
     755             : 
     756             :         VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
     757             :         return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
     758             : }
     759             : 
     760             : struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
     761             : 
     762             : struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
     763             : 
     764             : struct lruvec *folio_lruvec_lock(struct folio *folio);
     765             : struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
     766             : struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
     767             :                                                 unsigned long *flags);
     768             : 
     769             : #ifdef CONFIG_DEBUG_VM
     770             : void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
     771             : #else
     772             : static inline
     773             : void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
     774             : {
     775             : }
     776             : #endif
     777             : 
     778             : static inline
     779             : struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
     780             :         return css ? container_of(css, struct mem_cgroup, css) : NULL;
     781             : }
     782             : 
     783             : static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
     784             : {
     785             :         return percpu_ref_tryget(&objcg->refcnt);
     786             : }
     787             : 
     788             : static inline void obj_cgroup_get(struct obj_cgroup *objcg)
     789             : {
     790             :         percpu_ref_get(&objcg->refcnt);
     791             : }
     792             : 
     793             : static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
     794             :                                        unsigned long nr)
     795             : {
     796             :         percpu_ref_get_many(&objcg->refcnt, nr);
     797             : }
     798             : 
     799             : static inline void obj_cgroup_put(struct obj_cgroup *objcg)
     800             : {
     801             :         percpu_ref_put(&objcg->refcnt);
     802             : }
     803             : 
     804             : static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
     805             : {
     806             :         return !memcg || css_tryget(&memcg->css);
     807             : }
     808             : 
     809             : static inline void mem_cgroup_put(struct mem_cgroup *memcg)
     810             : {
     811             :         if (memcg)
     812             :                 css_put(&memcg->css);
     813             : }
     814             : 
     815             : #define mem_cgroup_from_counter(counter, member)        \
     816             :         container_of(counter, struct mem_cgroup, member)
     817             : 
     818             : struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
     819             :                                    struct mem_cgroup *,
     820             :                                    struct mem_cgroup_reclaim_cookie *);
     821             : void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
     822             : int mem_cgroup_scan_tasks(struct mem_cgroup *,
     823             :                           int (*)(struct task_struct *, void *), void *);
     824             : 
     825             : static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
     826             : {
     827             :         if (mem_cgroup_disabled())
     828             :                 return 0;
     829             : 
     830             :         return memcg->id.id;
     831             : }
     832             : struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
     833             : 
     834             : #ifdef CONFIG_SHRINKER_DEBUG
     835             : static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
     836             : {
     837             :         return memcg ? cgroup_ino(memcg->css.cgroup) : 0;
     838             : }
     839             : 
     840             : struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino);
     841             : #endif
     842             : 
     843             : static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
     844             : {
     845             :         return mem_cgroup_from_css(seq_css(m));
     846             : }
     847             : 
     848             : static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
     849             : {
     850             :         struct mem_cgroup_per_node *mz;
     851             : 
     852             :         if (mem_cgroup_disabled())
     853             :                 return NULL;
     854             : 
     855             :         mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
     856             :         return mz->memcg;
     857             : }
     858             : 
     859             : /**
     860             :  * parent_mem_cgroup - find the accounting parent of a memcg
     861             :  * @memcg: memcg whose parent to find
     862             :  *
     863             :  * Returns the parent memcg, or NULL if this is the root or the memory
     864             :  * controller is in legacy no-hierarchy mode.
     865             :  */
     866             : static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
     867             : {
     868             :         return mem_cgroup_from_css(memcg->css.parent);
     869             : }
     870             : 
     871             : static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
     872             :                               struct mem_cgroup *root)
     873             : {
     874             :         if (root == memcg)
     875             :                 return true;
     876             :         return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
     877             : }
     878             : 
     879             : static inline bool mm_match_cgroup(struct mm_struct *mm,
     880             :                                    struct mem_cgroup *memcg)
     881             : {
     882             :         struct mem_cgroup *task_memcg;
     883             :         bool match = false;
     884             : 
     885             :         rcu_read_lock();
     886             :         task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
     887             :         if (task_memcg)
     888             :                 match = mem_cgroup_is_descendant(task_memcg, memcg);
     889             :         rcu_read_unlock();
     890             :         return match;
     891             : }
     892             : 
     893             : struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio);
     894             : ino_t page_cgroup_ino(struct page *page);
     895             : 
     896             : static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
     897             : {
     898             :         if (mem_cgroup_disabled())
     899             :                 return true;
     900             :         return !!(memcg->css.flags & CSS_ONLINE);
     901             : }
     902             : 
     903             : void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
     904             :                 int zid, int nr_pages);
     905             : 
     906             : static inline
     907             : unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
     908             :                 enum lru_list lru, int zone_idx)
     909             : {
     910             :         struct mem_cgroup_per_node *mz;
     911             : 
     912             :         mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
     913             :         return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
     914             : }
     915             : 
     916             : void mem_cgroup_handle_over_high(void);
     917             : 
     918             : unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
     919             : 
     920             : unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
     921             : 
     922             : void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
     923             :                                 struct task_struct *p);
     924             : 
     925             : void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
     926             : 
     927             : static inline void mem_cgroup_enter_user_fault(void)
     928             : {
     929             :         WARN_ON(current->in_user_fault);
     930             :         current->in_user_fault = 1;
     931             : }
     932             : 
     933             : static inline void mem_cgroup_exit_user_fault(void)
     934             : {
     935             :         WARN_ON(!current->in_user_fault);
     936             :         current->in_user_fault = 0;
     937             : }
     938             : 
     939             : static inline bool task_in_memcg_oom(struct task_struct *p)
     940             : {
     941             :         return p->memcg_in_oom;
     942             : }
     943             : 
     944             : bool mem_cgroup_oom_synchronize(bool wait);
     945             : struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
     946             :                                             struct mem_cgroup *oom_domain);
     947             : void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
     948             : 
     949             : void folio_memcg_lock(struct folio *folio);
     950             : void folio_memcg_unlock(struct folio *folio);
     951             : void lock_page_memcg(struct page *page);
     952             : void unlock_page_memcg(struct page *page);
     953             : 
     954             : void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
     955             : 
     956             : /* try to stablize folio_memcg() for all the pages in a memcg */
     957             : static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
     958             : {
     959             :         rcu_read_lock();
     960             : 
     961             :         if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
     962             :                 return true;
     963             : 
     964             :         rcu_read_unlock();
     965             :         return false;
     966             : }
     967             : 
     968             : static inline void mem_cgroup_unlock_pages(void)
     969             : {
     970             :         rcu_read_unlock();
     971             : }
     972             : 
     973             : /* idx can be of type enum memcg_stat_item or node_stat_item */
     974             : static inline void mod_memcg_state(struct mem_cgroup *memcg,
     975             :                                    int idx, int val)
     976             : {
     977             :         unsigned long flags;
     978             : 
     979             :         local_irq_save(flags);
     980             :         __mod_memcg_state(memcg, idx, val);
     981             :         local_irq_restore(flags);
     982             : }
     983             : 
     984             : static inline void mod_memcg_page_state(struct page *page,
     985             :                                         int idx, int val)
     986             : {
     987             :         struct mem_cgroup *memcg;
     988             : 
     989             :         if (mem_cgroup_disabled())
     990             :                 return;
     991             : 
     992             :         rcu_read_lock();
     993             :         memcg = page_memcg(page);
     994             :         if (memcg)
     995             :                 mod_memcg_state(memcg, idx, val);
     996             :         rcu_read_unlock();
     997             : }
     998             : 
     999             : unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
    1000             : 
    1001             : static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
    1002             :                                               enum node_stat_item idx)
    1003             : {
    1004             :         struct mem_cgroup_per_node *pn;
    1005             :         long x;
    1006             : 
    1007             :         if (mem_cgroup_disabled())
    1008             :                 return node_page_state(lruvec_pgdat(lruvec), idx);
    1009             : 
    1010             :         pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
    1011             :         x = READ_ONCE(pn->lruvec_stats.state[idx]);
    1012             : #ifdef CONFIG_SMP
    1013             :         if (x < 0)
    1014             :                 x = 0;
    1015             : #endif
    1016             :         return x;
    1017             : }
    1018             : 
    1019             : static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
    1020             :                                                     enum node_stat_item idx)
    1021             : {
    1022             :         struct mem_cgroup_per_node *pn;
    1023             :         long x = 0;
    1024             :         int cpu;
    1025             : 
    1026             :         if (mem_cgroup_disabled())
    1027             :                 return node_page_state(lruvec_pgdat(lruvec), idx);
    1028             : 
    1029             :         pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
    1030             :         for_each_possible_cpu(cpu)
    1031             :                 x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu);
    1032             : #ifdef CONFIG_SMP
    1033             :         if (x < 0)
    1034             :                 x = 0;
    1035             : #endif
    1036             :         return x;
    1037             : }
    1038             : 
    1039             : void mem_cgroup_flush_stats(void);
    1040             : void mem_cgroup_flush_stats_delayed(void);
    1041             : 
    1042             : void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
    1043             :                               int val);
    1044             : void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
    1045             : 
    1046             : static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
    1047             :                                          int val)
    1048             : {
    1049             :         unsigned long flags;
    1050             : 
    1051             :         local_irq_save(flags);
    1052             :         __mod_lruvec_kmem_state(p, idx, val);
    1053             :         local_irq_restore(flags);
    1054             : }
    1055             : 
    1056             : static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
    1057             :                                           enum node_stat_item idx, int val)
    1058             : {
    1059             :         unsigned long flags;
    1060             : 
    1061             :         local_irq_save(flags);
    1062             :         __mod_memcg_lruvec_state(lruvec, idx, val);
    1063             :         local_irq_restore(flags);
    1064             : }
    1065             : 
    1066             : void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
    1067             :                           unsigned long count);
    1068             : 
    1069             : static inline void count_memcg_events(struct mem_cgroup *memcg,
    1070             :                                       enum vm_event_item idx,
    1071             :                                       unsigned long count)
    1072             : {
    1073             :         unsigned long flags;
    1074             : 
    1075             :         local_irq_save(flags);
    1076             :         __count_memcg_events(memcg, idx, count);
    1077             :         local_irq_restore(flags);
    1078             : }
    1079             : 
    1080             : static inline void count_memcg_page_event(struct page *page,
    1081             :                                           enum vm_event_item idx)
    1082             : {
    1083             :         struct mem_cgroup *memcg = page_memcg(page);
    1084             : 
    1085             :         if (memcg)
    1086             :                 count_memcg_events(memcg, idx, 1);
    1087             : }
    1088             : 
    1089             : static inline void count_memcg_folio_events(struct folio *folio,
    1090             :                 enum vm_event_item idx, unsigned long nr)
    1091             : {
    1092             :         struct mem_cgroup *memcg = folio_memcg(folio);
    1093             : 
    1094             :         if (memcg)
    1095             :                 count_memcg_events(memcg, idx, nr);
    1096             : }
    1097             : 
    1098             : static inline void count_memcg_event_mm(struct mm_struct *mm,
    1099             :                                         enum vm_event_item idx)
    1100             : {
    1101             :         struct mem_cgroup *memcg;
    1102             : 
    1103             :         if (mem_cgroup_disabled())
    1104             :                 return;
    1105             : 
    1106             :         rcu_read_lock();
    1107             :         memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
    1108             :         if (likely(memcg))
    1109             :                 count_memcg_events(memcg, idx, 1);
    1110             :         rcu_read_unlock();
    1111             : }
    1112             : 
    1113             : static inline void memcg_memory_event(struct mem_cgroup *memcg,
    1114             :                                       enum memcg_memory_event event)
    1115             : {
    1116             :         bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
    1117             :                           event == MEMCG_SWAP_FAIL;
    1118             : 
    1119             :         atomic_long_inc(&memcg->memory_events_local[event]);
    1120             :         if (!swap_event)
    1121             :                 cgroup_file_notify(&memcg->events_local_file);
    1122             : 
    1123             :         do {
    1124             :                 atomic_long_inc(&memcg->memory_events[event]);
    1125             :                 if (swap_event)
    1126             :                         cgroup_file_notify(&memcg->swap_events_file);
    1127             :                 else
    1128             :                         cgroup_file_notify(&memcg->events_file);
    1129             : 
    1130             :                 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
    1131             :                         break;
    1132             :                 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
    1133             :                         break;
    1134             :         } while ((memcg = parent_mem_cgroup(memcg)) &&
    1135             :                  !mem_cgroup_is_root(memcg));
    1136             : }
    1137             : 
    1138             : static inline void memcg_memory_event_mm(struct mm_struct *mm,
    1139             :                                          enum memcg_memory_event event)
    1140             : {
    1141             :         struct mem_cgroup *memcg;
    1142             : 
    1143             :         if (mem_cgroup_disabled())
    1144             :                 return;
    1145             : 
    1146             :         rcu_read_lock();
    1147             :         memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
    1148             :         if (likely(memcg))
    1149             :                 memcg_memory_event(memcg, event);
    1150             :         rcu_read_unlock();
    1151             : }
    1152             : 
    1153             : void split_page_memcg(struct page *head, unsigned int nr);
    1154             : 
    1155             : unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
    1156             :                                                 gfp_t gfp_mask,
    1157             :                                                 unsigned long *total_scanned);
    1158             : 
    1159             : #else /* CONFIG_MEMCG */
    1160             : 
    1161             : #define MEM_CGROUP_ID_SHIFT     0
    1162             : #define MEM_CGROUP_ID_MAX       0
    1163             : 
    1164             : static inline struct mem_cgroup *folio_memcg(struct folio *folio)
    1165             : {
    1166             :         return NULL;
    1167             : }
    1168             : 
    1169             : static inline struct mem_cgroup *page_memcg(struct page *page)
    1170             : {
    1171             :         return NULL;
    1172             : }
    1173             : 
    1174             : static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
    1175             : {
    1176           0 :         WARN_ON_ONCE(!rcu_read_lock_held());
    1177             :         return NULL;
    1178             : }
    1179             : 
    1180             : static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
    1181             : {
    1182             :         return NULL;
    1183             : }
    1184             : 
    1185             : static inline struct mem_cgroup *page_memcg_check(struct page *page)
    1186             : {
    1187             :         return NULL;
    1188             : }
    1189             : 
    1190             : static inline bool folio_memcg_kmem(struct folio *folio)
    1191             : {
    1192             :         return false;
    1193             : }
    1194             : 
    1195             : static inline bool PageMemcgKmem(struct page *page)
    1196             : {
    1197             :         return false;
    1198             : }
    1199             : 
    1200             : static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
    1201             : {
    1202             :         return true;
    1203             : }
    1204             : 
    1205             : static inline bool mem_cgroup_disabled(void)
    1206             : {
    1207             :         return true;
    1208             : }
    1209             : 
    1210             : static inline void memcg_memory_event(struct mem_cgroup *memcg,
    1211             :                                       enum memcg_memory_event event)
    1212             : {
    1213             : }
    1214             : 
    1215             : static inline void memcg_memory_event_mm(struct mm_struct *mm,
    1216             :                                          enum memcg_memory_event event)
    1217             : {
    1218             : }
    1219             : 
    1220             : static inline void mem_cgroup_protection(struct mem_cgroup *root,
    1221             :                                          struct mem_cgroup *memcg,
    1222             :                                          unsigned long *min,
    1223             :                                          unsigned long *low)
    1224             : {
    1225           0 :         *min = *low = 0;
    1226             : }
    1227             : 
    1228             : static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
    1229             :                                                    struct mem_cgroup *memcg)
    1230             : {
    1231             : }
    1232             : 
    1233             : static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
    1234             :                                           struct mem_cgroup *memcg)
    1235             : {
    1236             :         return true;
    1237             : }
    1238             : static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
    1239             :                                         struct mem_cgroup *memcg)
    1240             : {
    1241             :         return false;
    1242             : }
    1243             : 
    1244             : static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
    1245             :                                         struct mem_cgroup *memcg)
    1246             : {
    1247             :         return false;
    1248             : }
    1249             : 
    1250             : static inline int mem_cgroup_charge(struct folio *folio,
    1251             :                 struct mm_struct *mm, gfp_t gfp)
    1252             : {
    1253             :         return 0;
    1254             : }
    1255             : 
    1256             : static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
    1257             :                         struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
    1258             : {
    1259             :         return 0;
    1260             : }
    1261             : 
    1262             : static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
    1263             : {
    1264             : }
    1265             : 
    1266             : static inline void mem_cgroup_uncharge(struct folio *folio)
    1267             : {
    1268             : }
    1269             : 
    1270             : static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
    1271             : {
    1272             : }
    1273             : 
    1274             : static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
    1275             : {
    1276             : }
    1277             : 
    1278             : static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
    1279             :                                                struct pglist_data *pgdat)
    1280             : {
    1281           0 :         return &pgdat->__lruvec;
    1282             : }
    1283             : 
    1284             : static inline struct lruvec *folio_lruvec(struct folio *folio)
    1285             : {
    1286           0 :         struct pglist_data *pgdat = folio_pgdat(folio);
    1287             :         return &pgdat->__lruvec;
    1288             : }
    1289             : 
    1290             : static inline
    1291             : void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
    1292             : {
    1293             : }
    1294             : 
    1295             : static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
    1296             : {
    1297             :         return NULL;
    1298             : }
    1299             : 
    1300             : static inline bool mm_match_cgroup(struct mm_struct *mm,
    1301             :                 struct mem_cgroup *memcg)
    1302             : {
    1303             :         return true;
    1304             : }
    1305             : 
    1306             : static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
    1307             : {
    1308             :         return NULL;
    1309             : }
    1310             : 
    1311             : static inline
    1312             : struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
    1313             : {
    1314             :         return NULL;
    1315             : }
    1316             : 
    1317             : static inline void obj_cgroup_put(struct obj_cgroup *objcg)
    1318             : {
    1319             : }
    1320             : 
    1321             : static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
    1322             : {
    1323             :         return true;
    1324             : }
    1325             : 
    1326             : static inline void mem_cgroup_put(struct mem_cgroup *memcg)
    1327             : {
    1328             : }
    1329             : 
    1330             : static inline struct lruvec *folio_lruvec_lock(struct folio *folio)
    1331             : {
    1332             :         struct pglist_data *pgdat = folio_pgdat(folio);
    1333             : 
    1334             :         spin_lock(&pgdat->__lruvec.lru_lock);
    1335             :         return &pgdat->__lruvec;
    1336             : }
    1337             : 
    1338             : static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
    1339             : {
    1340           0 :         struct pglist_data *pgdat = folio_pgdat(folio);
    1341             : 
    1342           0 :         spin_lock_irq(&pgdat->__lruvec.lru_lock);
    1343             :         return &pgdat->__lruvec;
    1344             : }
    1345             : 
    1346             : static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
    1347             :                 unsigned long *flagsp)
    1348             : {
    1349           0 :         struct pglist_data *pgdat = folio_pgdat(folio);
    1350             : 
    1351           0 :         spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
    1352             :         return &pgdat->__lruvec;
    1353             : }
    1354             : 
    1355             : static inline struct mem_cgroup *
    1356             : mem_cgroup_iter(struct mem_cgroup *root,
    1357             :                 struct mem_cgroup *prev,
    1358             :                 struct mem_cgroup_reclaim_cookie *reclaim)
    1359             : {
    1360             :         return NULL;
    1361             : }
    1362             : 
    1363             : static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
    1364             :                                          struct mem_cgroup *prev)
    1365             : {
    1366             : }
    1367             : 
    1368             : static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
    1369             :                 int (*fn)(struct task_struct *, void *), void *arg)
    1370             : {
    1371             :         return 0;
    1372             : }
    1373             : 
    1374             : static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
    1375             : {
    1376             :         return 0;
    1377             : }
    1378             : 
    1379           0 : static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
    1380             : {
    1381           0 :         WARN_ON_ONCE(id);
    1382             :         /* XXX: This should always return root_mem_cgroup */
    1383           0 :         return NULL;
    1384             : }
    1385             : 
    1386             : #ifdef CONFIG_SHRINKER_DEBUG
    1387             : static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
    1388             : {
    1389             :         return 0;
    1390             : }
    1391             : 
    1392             : static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
    1393             : {
    1394             :         return NULL;
    1395             : }
    1396             : #endif
    1397             : 
    1398             : static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
    1399             : {
    1400             :         return NULL;
    1401             : }
    1402             : 
    1403             : static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
    1404             : {
    1405             :         return NULL;
    1406             : }
    1407             : 
    1408             : static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
    1409             : {
    1410             :         return true;
    1411             : }
    1412             : 
    1413             : static inline
    1414             : unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
    1415             :                 enum lru_list lru, int zone_idx)
    1416             : {
    1417             :         return 0;
    1418             : }
    1419             : 
    1420             : static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
    1421             : {
    1422             :         return 0;
    1423             : }
    1424             : 
    1425             : static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
    1426             : {
    1427             :         return 0;
    1428             : }
    1429             : 
    1430             : static inline void
    1431             : mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
    1432             : {
    1433             : }
    1434             : 
    1435             : static inline void
    1436             : mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
    1437             : {
    1438             : }
    1439             : 
    1440             : static inline void lock_page_memcg(struct page *page)
    1441             : {
    1442             : }
    1443             : 
    1444             : static inline void unlock_page_memcg(struct page *page)
    1445             : {
    1446             : }
    1447             : 
    1448             : static inline void folio_memcg_lock(struct folio *folio)
    1449             : {
    1450             : }
    1451             : 
    1452             : static inline void folio_memcg_unlock(struct folio *folio)
    1453             : {
    1454             : }
    1455             : 
    1456             : static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
    1457             : {
    1458             :         /* to match folio_memcg_rcu() */
    1459             :         rcu_read_lock();
    1460             :         return true;
    1461             : }
    1462             : 
    1463             : static inline void mem_cgroup_unlock_pages(void)
    1464             : {
    1465             :         rcu_read_unlock();
    1466             : }
    1467             : 
    1468             : static inline void mem_cgroup_handle_over_high(void)
    1469             : {
    1470             : }
    1471             : 
    1472             : static inline void mem_cgroup_enter_user_fault(void)
    1473             : {
    1474             : }
    1475             : 
    1476             : static inline void mem_cgroup_exit_user_fault(void)
    1477             : {
    1478             : }
    1479             : 
    1480             : static inline bool task_in_memcg_oom(struct task_struct *p)
    1481             : {
    1482             :         return false;
    1483             : }
    1484             : 
    1485             : static inline bool mem_cgroup_oom_synchronize(bool wait)
    1486             : {
    1487             :         return false;
    1488             : }
    1489             : 
    1490             : static inline struct mem_cgroup *mem_cgroup_get_oom_group(
    1491             :         struct task_struct *victim, struct mem_cgroup *oom_domain)
    1492             : {
    1493             :         return NULL;
    1494             : }
    1495             : 
    1496             : static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
    1497             : {
    1498             : }
    1499             : 
    1500             : static inline void __mod_memcg_state(struct mem_cgroup *memcg,
    1501             :                                      int idx,
    1502             :                                      int nr)
    1503             : {
    1504             : }
    1505             : 
    1506             : static inline void mod_memcg_state(struct mem_cgroup *memcg,
    1507             :                                    int idx,
    1508             :                                    int nr)
    1509             : {
    1510             : }
    1511             : 
    1512             : static inline void mod_memcg_page_state(struct page *page,
    1513             :                                         int idx, int val)
    1514             : {
    1515             : }
    1516             : 
    1517             : static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
    1518             : {
    1519             :         return 0;
    1520             : }
    1521             : 
    1522             : static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
    1523             :                                               enum node_stat_item idx)
    1524             : {
    1525           0 :         return node_page_state(lruvec_pgdat(lruvec), idx);
    1526             : }
    1527             : 
    1528             : static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
    1529             :                                                     enum node_stat_item idx)
    1530             : {
    1531             :         return node_page_state(lruvec_pgdat(lruvec), idx);
    1532             : }
    1533             : 
    1534             : static inline void mem_cgroup_flush_stats(void)
    1535             : {
    1536             : }
    1537             : 
    1538             : static inline void mem_cgroup_flush_stats_delayed(void)
    1539             : {
    1540             : }
    1541             : 
    1542             : static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
    1543             :                                             enum node_stat_item idx, int val)
    1544             : {
    1545             : }
    1546             : 
    1547             : static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
    1548             :                                            int val)
    1549             : {
    1550           0 :         struct page *page = virt_to_head_page(p);
    1551             : 
    1552           0 :         __mod_node_page_state(page_pgdat(page), idx, val);
    1553             : }
    1554             : 
    1555             : static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
    1556             :                                          int val)
    1557             : {
    1558             :         struct page *page = virt_to_head_page(p);
    1559             : 
    1560             :         mod_node_page_state(page_pgdat(page), idx, val);
    1561             : }
    1562             : 
    1563             : static inline void count_memcg_events(struct mem_cgroup *memcg,
    1564             :                                       enum vm_event_item idx,
    1565             :                                       unsigned long count)
    1566             : {
    1567             : }
    1568             : 
    1569             : static inline void __count_memcg_events(struct mem_cgroup *memcg,
    1570             :                                         enum vm_event_item idx,
    1571             :                                         unsigned long count)
    1572             : {
    1573             : }
    1574             : 
    1575             : static inline void count_memcg_page_event(struct page *page,
    1576             :                                           int idx)
    1577             : {
    1578             : }
    1579             : 
    1580             : static inline void count_memcg_folio_events(struct folio *folio,
    1581             :                 enum vm_event_item idx, unsigned long nr)
    1582             : {
    1583             : }
    1584             : 
    1585             : static inline
    1586             : void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
    1587             : {
    1588             : }
    1589             : 
    1590             : static inline void split_page_memcg(struct page *head, unsigned int nr)
    1591             : {
    1592             : }
    1593             : 
    1594             : static inline
    1595             : unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
    1596             :                                             gfp_t gfp_mask,
    1597             :                                             unsigned long *total_scanned)
    1598             : {
    1599             :         return 0;
    1600             : }
    1601             : #endif /* CONFIG_MEMCG */
    1602             : 
    1603             : static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
    1604             : {
    1605           0 :         __mod_lruvec_kmem_state(p, idx, 1);
    1606             : }
    1607             : 
    1608             : static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
    1609             : {
    1610           0 :         __mod_lruvec_kmem_state(p, idx, -1);
    1611             : }
    1612             : 
    1613             : static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
    1614             : {
    1615             :         struct mem_cgroup *memcg;
    1616             : 
    1617           0 :         memcg = lruvec_memcg(lruvec);
    1618             :         if (!memcg)
    1619             :                 return NULL;
    1620             :         memcg = parent_mem_cgroup(memcg);
    1621             :         if (!memcg)
    1622             :                 return NULL;
    1623             :         return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
    1624             : }
    1625             : 
    1626             : static inline void unlock_page_lruvec(struct lruvec *lruvec)
    1627             : {
    1628             :         spin_unlock(&lruvec->lru_lock);
    1629             : }
    1630             : 
    1631             : static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
    1632             : {
    1633           0 :         spin_unlock_irq(&lruvec->lru_lock);
    1634             : }
    1635             : 
    1636             : static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
    1637             :                 unsigned long flags)
    1638             : {
    1639           0 :         spin_unlock_irqrestore(&lruvec->lru_lock, flags);
    1640             : }
    1641             : 
    1642             : /* Test requires a stable page->memcg binding, see page_memcg() */
    1643             : static inline bool folio_matches_lruvec(struct folio *folio,
    1644             :                 struct lruvec *lruvec)
    1645             : {
    1646           0 :         return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
    1647             :                lruvec_memcg(lruvec) == folio_memcg(folio);
    1648             : }
    1649             : 
    1650             : /* Don't lock again iff page's lruvec locked */
    1651           0 : static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
    1652             :                 struct lruvec *locked_lruvec)
    1653             : {
    1654           0 :         if (locked_lruvec) {
    1655           0 :                 if (folio_matches_lruvec(folio, locked_lruvec))
    1656             :                         return locked_lruvec;
    1657             : 
    1658           0 :                 unlock_page_lruvec_irq(locked_lruvec);
    1659             :         }
    1660             : 
    1661           0 :         return folio_lruvec_lock_irq(folio);
    1662             : }
    1663             : 
    1664             : /* Don't lock again iff page's lruvec locked */
    1665           0 : static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio,
    1666             :                 struct lruvec *locked_lruvec, unsigned long *flags)
    1667             : {
    1668           0 :         if (locked_lruvec) {
    1669           0 :                 if (folio_matches_lruvec(folio, locked_lruvec))
    1670             :                         return locked_lruvec;
    1671             : 
    1672           0 :                 unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
    1673             :         }
    1674             : 
    1675           0 :         return folio_lruvec_lock_irqsave(folio, flags);
    1676             : }
    1677             : 
    1678             : #ifdef CONFIG_CGROUP_WRITEBACK
    1679             : 
    1680             : struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
    1681             : void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
    1682             :                          unsigned long *pheadroom, unsigned long *pdirty,
    1683             :                          unsigned long *pwriteback);
    1684             : 
    1685             : void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
    1686             :                                              struct bdi_writeback *wb);
    1687             : 
    1688             : static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
    1689             :                                                   struct bdi_writeback *wb)
    1690             : {
    1691             :         struct mem_cgroup *memcg;
    1692             : 
    1693             :         if (mem_cgroup_disabled())
    1694             :                 return;
    1695             : 
    1696             :         memcg = folio_memcg(folio);
    1697             :         if (unlikely(memcg && &memcg->css != wb->memcg_css))
    1698             :                 mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
    1699             : }
    1700             : 
    1701             : void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
    1702             : 
    1703             : #else   /* CONFIG_CGROUP_WRITEBACK */
    1704             : 
    1705             : static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
    1706             : {
    1707             :         return NULL;
    1708             : }
    1709             : 
    1710             : static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
    1711             :                                        unsigned long *pfilepages,
    1712             :                                        unsigned long *pheadroom,
    1713             :                                        unsigned long *pdirty,
    1714             :                                        unsigned long *pwriteback)
    1715             : {
    1716             : }
    1717             : 
    1718             : static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
    1719             :                                                   struct bdi_writeback *wb)
    1720             : {
    1721             : }
    1722             : 
    1723             : static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
    1724             : {
    1725             : }
    1726             : 
    1727             : #endif  /* CONFIG_CGROUP_WRITEBACK */
    1728             : 
    1729             : struct sock;
    1730             : bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
    1731             :                              gfp_t gfp_mask);
    1732             : void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
    1733             : #ifdef CONFIG_MEMCG
    1734             : extern struct static_key_false memcg_sockets_enabled_key;
    1735             : #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
    1736             : void mem_cgroup_sk_alloc(struct sock *sk);
    1737             : void mem_cgroup_sk_free(struct sock *sk);
    1738             : static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
    1739             : {
    1740             :         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
    1741             :                 return true;
    1742             :         do {
    1743             :                 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
    1744             :                         return true;
    1745             :         } while ((memcg = parent_mem_cgroup(memcg)));
    1746             :         return false;
    1747             : }
    1748             : 
    1749             : int alloc_shrinker_info(struct mem_cgroup *memcg);
    1750             : void free_shrinker_info(struct mem_cgroup *memcg);
    1751             : void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
    1752             : void reparent_shrinker_deferred(struct mem_cgroup *memcg);
    1753             : #else
    1754             : #define mem_cgroup_sockets_enabled 0
    1755             : static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
    1756             : static inline void mem_cgroup_sk_free(struct sock *sk) { };
    1757             : static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
    1758             : {
    1759             :         return false;
    1760             : }
    1761             : 
    1762             : static inline void set_shrinker_bit(struct mem_cgroup *memcg,
    1763             :                                     int nid, int shrinker_id)
    1764             : {
    1765             : }
    1766             : #endif
    1767             : 
    1768             : #ifdef CONFIG_MEMCG_KMEM
    1769             : bool mem_cgroup_kmem_disabled(void);
    1770             : int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
    1771             : void __memcg_kmem_uncharge_page(struct page *page, int order);
    1772             : 
    1773             : struct obj_cgroup *get_obj_cgroup_from_current(void);
    1774             : struct obj_cgroup *get_obj_cgroup_from_page(struct page *page);
    1775             : 
    1776             : int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
    1777             : void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
    1778             : 
    1779             : extern struct static_key_false memcg_bpf_enabled_key;
    1780             : static inline bool memcg_bpf_enabled(void)
    1781             : {
    1782             :         return static_branch_likely(&memcg_bpf_enabled_key);
    1783             : }
    1784             : 
    1785             : extern struct static_key_false memcg_kmem_online_key;
    1786             : 
    1787             : static inline bool memcg_kmem_online(void)
    1788             : {
    1789             :         return static_branch_likely(&memcg_kmem_online_key);
    1790             : }
    1791             : 
    1792             : static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
    1793             :                                          int order)
    1794             : {
    1795             :         if (memcg_kmem_online())
    1796             :                 return __memcg_kmem_charge_page(page, gfp, order);
    1797             :         return 0;
    1798             : }
    1799             : 
    1800             : static inline void memcg_kmem_uncharge_page(struct page *page, int order)
    1801             : {
    1802             :         if (memcg_kmem_online())
    1803             :                 __memcg_kmem_uncharge_page(page, order);
    1804             : }
    1805             : 
    1806             : /*
    1807             :  * A helper for accessing memcg's kmem_id, used for getting
    1808             :  * corresponding LRU lists.
    1809             :  */
    1810             : static inline int memcg_kmem_id(struct mem_cgroup *memcg)
    1811             : {
    1812             :         return memcg ? memcg->kmemcg_id : -1;
    1813             : }
    1814             : 
    1815             : struct mem_cgroup *mem_cgroup_from_obj(void *p);
    1816             : struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
    1817             : 
    1818             : static inline void count_objcg_event(struct obj_cgroup *objcg,
    1819             :                                      enum vm_event_item idx)
    1820             : {
    1821             :         struct mem_cgroup *memcg;
    1822             : 
    1823             :         if (!memcg_kmem_online())
    1824             :                 return;
    1825             : 
    1826             :         rcu_read_lock();
    1827             :         memcg = obj_cgroup_memcg(objcg);
    1828             :         count_memcg_events(memcg, idx, 1);
    1829             :         rcu_read_unlock();
    1830             : }
    1831             : 
    1832             : #else
    1833             : static inline bool mem_cgroup_kmem_disabled(void)
    1834             : {
    1835             :         return true;
    1836             : }
    1837             : 
    1838             : static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
    1839             :                                          int order)
    1840             : {
    1841             :         return 0;
    1842             : }
    1843             : 
    1844             : static inline void memcg_kmem_uncharge_page(struct page *page, int order)
    1845             : {
    1846             : }
    1847             : 
    1848             : static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
    1849             :                                            int order)
    1850             : {
    1851             :         return 0;
    1852             : }
    1853             : 
    1854             : static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
    1855             : {
    1856             : }
    1857             : 
    1858             : static inline struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
    1859             : {
    1860             :         return NULL;
    1861             : }
    1862             : 
    1863             : static inline bool memcg_bpf_enabled(void)
    1864             : {
    1865             :         return false;
    1866             : }
    1867             : 
    1868             : static inline bool memcg_kmem_online(void)
    1869             : {
    1870             :         return false;
    1871             : }
    1872             : 
    1873             : static inline int memcg_kmem_id(struct mem_cgroup *memcg)
    1874             : {
    1875             :         return -1;
    1876             : }
    1877             : 
    1878             : static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
    1879             : {
    1880             :         return NULL;
    1881             : }
    1882             : 
    1883             : static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
    1884             : {
    1885             :         return NULL;
    1886             : }
    1887             : 
    1888             : static inline void count_objcg_event(struct obj_cgroup *objcg,
    1889             :                                      enum vm_event_item idx)
    1890             : {
    1891             : }
    1892             : 
    1893             : #endif /* CONFIG_MEMCG_KMEM */
    1894             : 
    1895             : #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
    1896             : bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
    1897             : void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
    1898             : void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
    1899             : #else
    1900             : static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
    1901             : {
    1902             :         return true;
    1903             : }
    1904             : static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg,
    1905             :                                            size_t size)
    1906             : {
    1907             : }
    1908             : static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg,
    1909             :                                              size_t size)
    1910             : {
    1911             : }
    1912             : #endif
    1913             : 
    1914             : #endif /* _LINUX_MEMCONTROL_H */

Generated by: LCOV version 1.14