LCOV - code coverage report
Current view: top level - include/linux - memcontrol.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 30 0.0 %
Date: 2023-07-19 18:55:55 Functions: 0 4 0.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0-or-later */
       2             : /* memcontrol.h - Memory Controller
       3             :  *
       4             :  * Copyright IBM Corporation, 2007
       5             :  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
       6             :  *
       7             :  * Copyright 2007 OpenVZ SWsoft Inc
       8             :  * Author: Pavel Emelianov <xemul@openvz.org>
       9             :  */
      10             : 
      11             : #ifndef _LINUX_MEMCONTROL_H
      12             : #define _LINUX_MEMCONTROL_H
      13             : #include <linux/cgroup.h>
      14             : #include <linux/vm_event_item.h>
      15             : #include <linux/hardirq.h>
      16             : #include <linux/jump_label.h>
      17             : #include <linux/page_counter.h>
      18             : #include <linux/vmpressure.h>
      19             : #include <linux/eventfd.h>
      20             : #include <linux/mm.h>
      21             : #include <linux/vmstat.h>
      22             : #include <linux/writeback.h>
      23             : #include <linux/page-flags.h>
      24             : 
      25             : struct mem_cgroup;
      26             : struct obj_cgroup;
      27             : struct page;
      28             : struct mm_struct;
      29             : struct kmem_cache;
      30             : 
      31             : /* Cgroup-specific page state, on top of universal node page state */
      32             : enum memcg_stat_item {
      33             :         MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
      34             :         MEMCG_SOCK,
      35             :         MEMCG_PERCPU_B,
      36             :         MEMCG_VMALLOC,
      37             :         MEMCG_KMEM,
      38             :         MEMCG_ZSWAP_B,
      39             :         MEMCG_ZSWAPPED,
      40             :         MEMCG_NR_STAT,
      41             : };
      42             : 
      43             : enum memcg_memory_event {
      44             :         MEMCG_LOW,
      45             :         MEMCG_HIGH,
      46             :         MEMCG_MAX,
      47             :         MEMCG_OOM,
      48             :         MEMCG_OOM_KILL,
      49             :         MEMCG_OOM_GROUP_KILL,
      50             :         MEMCG_SWAP_HIGH,
      51             :         MEMCG_SWAP_MAX,
      52             :         MEMCG_SWAP_FAIL,
      53             :         MEMCG_NR_MEMORY_EVENTS,
      54             : };
      55             : 
      56             : struct mem_cgroup_reclaim_cookie {
      57             :         pg_data_t *pgdat;
      58             :         unsigned int generation;
      59             : };
      60             : 
      61             : #ifdef CONFIG_MEMCG
      62             : 
      63             : #define MEM_CGROUP_ID_SHIFT     16
      64             : #define MEM_CGROUP_ID_MAX       USHRT_MAX
      65             : 
      66             : struct mem_cgroup_id {
      67             :         int id;
      68             :         refcount_t ref;
      69             : };
      70             : 
      71             : /*
      72             :  * Per memcg event counter is incremented at every pagein/pageout. With THP,
      73             :  * it will be incremented by the number of pages. This counter is used
      74             :  * to trigger some periodic events. This is straightforward and better
      75             :  * than using jiffies etc. to handle periodic memcg event.
      76             :  */
      77             : enum mem_cgroup_events_target {
      78             :         MEM_CGROUP_TARGET_THRESH,
      79             :         MEM_CGROUP_TARGET_SOFTLIMIT,
      80             :         MEM_CGROUP_NTARGETS,
      81             : };
      82             : 
      83             : struct memcg_vmstats_percpu;
      84             : struct memcg_vmstats;
      85             : 
      86             : struct mem_cgroup_reclaim_iter {
      87             :         struct mem_cgroup *position;
      88             :         /* scan generation, increased every round-trip */
      89             :         unsigned int generation;
      90             : };
      91             : 
      92             : /*
      93             :  * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
      94             :  * shrinkers, which have elements charged to this memcg.
      95             :  */
      96             : struct shrinker_info {
      97             :         struct rcu_head rcu;
      98             :         atomic_long_t *nr_deferred;
      99             :         unsigned long *map;
     100             :         int map_nr_max;
     101             : };
     102             : 
     103             : struct lruvec_stats_percpu {
     104             :         /* Local (CPU and cgroup) state */
     105             :         long state[NR_VM_NODE_STAT_ITEMS];
     106             : 
     107             :         /* Delta calculation for lockless upward propagation */
     108             :         long state_prev[NR_VM_NODE_STAT_ITEMS];
     109             : };
     110             : 
     111             : struct lruvec_stats {
     112             :         /* Aggregated (CPU and subtree) state */
     113             :         long state[NR_VM_NODE_STAT_ITEMS];
     114             : 
     115             :         /* Pending child counts during tree propagation */
     116             :         long state_pending[NR_VM_NODE_STAT_ITEMS];
     117             : };
     118             : 
     119             : /*
     120             :  * per-node information in memory controller.
     121             :  */
     122             : struct mem_cgroup_per_node {
     123             :         struct lruvec           lruvec;
     124             : 
     125             :         struct lruvec_stats_percpu __percpu     *lruvec_stats_percpu;
     126             :         struct lruvec_stats                     lruvec_stats;
     127             : 
     128             :         unsigned long           lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
     129             : 
     130             :         struct mem_cgroup_reclaim_iter  iter;
     131             : 
     132             :         struct shrinker_info __rcu      *shrinker_info;
     133             : 
     134             :         struct rb_node          tree_node;      /* RB tree node */
     135             :         unsigned long           usage_in_excess;/* Set to the value by which */
     136             :                                                 /* the soft limit is exceeded*/
     137             :         bool                    on_tree;
     138             :         struct mem_cgroup       *memcg;         /* Back pointer, we cannot */
     139             :                                                 /* use container_of        */
     140             : };
     141             : 
     142             : struct mem_cgroup_threshold {
     143             :         struct eventfd_ctx *eventfd;
     144             :         unsigned long threshold;
     145             : };
     146             : 
     147             : /* For threshold */
     148             : struct mem_cgroup_threshold_ary {
     149             :         /* An array index points to threshold just below or equal to usage. */
     150             :         int current_threshold;
     151             :         /* Size of entries[] */
     152             :         unsigned int size;
     153             :         /* Array of thresholds */
     154             :         struct mem_cgroup_threshold entries[];
     155             : };
     156             : 
     157             : struct mem_cgroup_thresholds {
     158             :         /* Primary thresholds array */
     159             :         struct mem_cgroup_threshold_ary *primary;
     160             :         /*
     161             :          * Spare threshold array.
     162             :          * This is needed to make mem_cgroup_unregister_event() "never fail".
     163             :          * It must be able to store at least primary->size - 1 entries.
     164             :          */
     165             :         struct mem_cgroup_threshold_ary *spare;
     166             : };
     167             : 
     168             : /*
     169             :  * Remember four most recent foreign writebacks with dirty pages in this
     170             :  * cgroup.  Inode sharing is expected to be uncommon and, even if we miss
     171             :  * one in a given round, we're likely to catch it later if it keeps
     172             :  * foreign-dirtying, so a fairly low count should be enough.
     173             :  *
     174             :  * See mem_cgroup_track_foreign_dirty_slowpath() for details.
     175             :  */
     176             : #define MEMCG_CGWB_FRN_CNT      4
     177             : 
     178             : struct memcg_cgwb_frn {
     179             :         u64 bdi_id;                     /* bdi->id of the foreign inode */
     180             :         int memcg_id;                   /* memcg->css.id of foreign inode */
     181             :         u64 at;                         /* jiffies_64 at the time of dirtying */
     182             :         struct wb_completion done;      /* tracks in-flight foreign writebacks */
     183             : };
     184             : 
     185             : /*
     186             :  * Bucket for arbitrarily byte-sized objects charged to a memory
     187             :  * cgroup. The bucket can be reparented in one piece when the cgroup
     188             :  * is destroyed, without having to round up the individual references
     189             :  * of all live memory objects in the wild.
     190             :  */
     191             : struct obj_cgroup {
     192             :         struct percpu_ref refcnt;
     193             :         struct mem_cgroup *memcg;
     194             :         atomic_t nr_charged_bytes;
     195             :         union {
     196             :                 struct list_head list; /* protected by objcg_lock */
     197             :                 struct rcu_head rcu;
     198             :         };
     199             : };
     200             : 
     201             : /*
     202             :  * The memory controller data structure. The memory controller controls both
     203             :  * page cache and RSS per cgroup. We would eventually like to provide
     204             :  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
     205             :  * to help the administrator determine what knobs to tune.
     206             :  */
     207             : struct mem_cgroup {
     208             :         struct cgroup_subsys_state css;
     209             : 
     210             :         /* Private memcg ID. Used to ID objects that outlive the cgroup */
     211             :         struct mem_cgroup_id id;
     212             : 
     213             :         /* Accounted resources */
     214             :         struct page_counter memory;             /* Both v1 & v2 */
     215             : 
     216             :         union {
     217             :                 struct page_counter swap;       /* v2 only */
     218             :                 struct page_counter memsw;      /* v1 only */
     219             :         };
     220             : 
     221             :         /* Legacy consumer-oriented counters */
     222             :         struct page_counter kmem;               /* v1 only */
     223             :         struct page_counter tcpmem;             /* v1 only */
     224             : 
     225             :         /* Range enforcement for interrupt charges */
     226             :         struct work_struct high_work;
     227             : 
     228             : #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
     229             :         unsigned long zswap_max;
     230             : #endif
     231             : 
     232             :         unsigned long soft_limit;
     233             : 
     234             :         /* vmpressure notifications */
     235             :         struct vmpressure vmpressure;
     236             : 
     237             :         /*
     238             :          * Should the OOM killer kill all belonging tasks, had it kill one?
     239             :          */
     240             :         bool oom_group;
     241             : 
     242             :         /* protected by memcg_oom_lock */
     243             :         bool            oom_lock;
     244             :         int             under_oom;
     245             : 
     246             :         int     swappiness;
     247             :         /* OOM-Killer disable */
     248             :         int             oom_kill_disable;
     249             : 
     250             :         /* memory.events and memory.events.local */
     251             :         struct cgroup_file events_file;
     252             :         struct cgroup_file events_local_file;
     253             : 
     254             :         /* handle for "memory.swap.events" */
     255             :         struct cgroup_file swap_events_file;
     256             : 
     257             :         /* protect arrays of thresholds */
     258             :         struct mutex thresholds_lock;
     259             : 
     260             :         /* thresholds for memory usage. RCU-protected */
     261             :         struct mem_cgroup_thresholds thresholds;
     262             : 
     263             :         /* thresholds for mem+swap usage. RCU-protected */
     264             :         struct mem_cgroup_thresholds memsw_thresholds;
     265             : 
     266             :         /* For oom notifier event fd */
     267             :         struct list_head oom_notify;
     268             : 
     269             :         /*
     270             :          * Should we move charges of a task when a task is moved into this
     271             :          * mem_cgroup ? And what type of charges should we move ?
     272             :          */
     273             :         unsigned long move_charge_at_immigrate;
     274             :         /* taken only while moving_account > 0 */
     275             :         spinlock_t              move_lock;
     276             :         unsigned long           move_lock_flags;
     277             : 
     278             :         CACHELINE_PADDING(_pad1_);
     279             : 
     280             :         /* memory.stat */
     281             :         struct memcg_vmstats    *vmstats;
     282             : 
     283             :         /* memory.events */
     284             :         atomic_long_t           memory_events[MEMCG_NR_MEMORY_EVENTS];
     285             :         atomic_long_t           memory_events_local[MEMCG_NR_MEMORY_EVENTS];
     286             : 
     287             :         unsigned long           socket_pressure;
     288             : 
     289             :         /* Legacy tcp memory accounting */
     290             :         bool                    tcpmem_active;
     291             :         int                     tcpmem_pressure;
     292             : 
     293             : #ifdef CONFIG_MEMCG_KMEM
     294             :         int kmemcg_id;
     295             :         struct obj_cgroup __rcu *objcg;
     296             :         /* list of inherited objcgs, protected by objcg_lock */
     297             :         struct list_head objcg_list;
     298             : #endif
     299             : 
     300             :         CACHELINE_PADDING(_pad2_);
     301             : 
     302             :         /*
     303             :          * set > 0 if pages under this cgroup are moving to other cgroup.
     304             :          */
     305             :         atomic_t                moving_account;
     306             :         struct task_struct      *move_lock_task;
     307             : 
     308             :         struct memcg_vmstats_percpu __percpu *vmstats_percpu;
     309             : 
     310             : #ifdef CONFIG_CGROUP_WRITEBACK
     311             :         struct list_head cgwb_list;
     312             :         struct wb_domain cgwb_domain;
     313             :         struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
     314             : #endif
     315             : 
     316             :         /* List of events which userspace want to receive */
     317             :         struct list_head event_list;
     318             :         spinlock_t event_list_lock;
     319             : 
     320             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     321             :         struct deferred_split deferred_split_queue;
     322             : #endif
     323             : 
     324             : #ifdef CONFIG_LRU_GEN
     325             :         /* per-memcg mm_struct list */
     326             :         struct lru_gen_mm_list mm_list;
     327             : #endif
     328             : 
     329             :         struct mem_cgroup_per_node *nodeinfo[];
     330             : };
     331             : 
     332             : /*
     333             :  * size of first charge trial.
     334             :  * TODO: maybe necessary to use big numbers in big irons or dynamic based of the
     335             :  * workload.
     336             :  */
     337             : #define MEMCG_CHARGE_BATCH 64U
     338             : 
     339             : extern struct mem_cgroup *root_mem_cgroup;
     340             : 
     341             : enum page_memcg_data_flags {
     342             :         /* page->memcg_data is a pointer to an objcgs vector */
     343             :         MEMCG_DATA_OBJCGS = (1UL << 0),
     344             :         /* page has been accounted as a non-slab kernel page */
     345             :         MEMCG_DATA_KMEM = (1UL << 1),
     346             :         /* the next bit after the last actual flag */
     347             :         __NR_MEMCG_DATA_FLAGS  = (1UL << 2),
     348             : };
     349             : 
     350             : #define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
     351             : 
     352             : static inline bool folio_memcg_kmem(struct folio *folio);
     353             : 
     354             : /*
     355             :  * After the initialization objcg->memcg is always pointing at
     356             :  * a valid memcg, but can be atomically swapped to the parent memcg.
     357             :  *
     358             :  * The caller must ensure that the returned memcg won't be released:
     359             :  * e.g. acquire the rcu_read_lock or css_set_lock.
     360             :  */
     361             : static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
     362             : {
     363             :         return READ_ONCE(objcg->memcg);
     364             : }
     365             : 
     366             : /*
     367             :  * __folio_memcg - Get the memory cgroup associated with a non-kmem folio
     368             :  * @folio: Pointer to the folio.
     369             :  *
     370             :  * Returns a pointer to the memory cgroup associated with the folio,
     371             :  * or NULL. This function assumes that the folio is known to have a
     372             :  * proper memory cgroup pointer. It's not safe to call this function
     373             :  * against some type of folios, e.g. slab folios or ex-slab folios or
     374             :  * kmem folios.
     375             :  */
     376             : static inline struct mem_cgroup *__folio_memcg(struct folio *folio)
     377             : {
     378             :         unsigned long memcg_data = folio->memcg_data;
     379             : 
     380             :         VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
     381             :         VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
     382             :         VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio);
     383             : 
     384             :         return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     385             : }
     386             : 
     387             : /*
     388             :  * __folio_objcg - get the object cgroup associated with a kmem folio.
     389             :  * @folio: Pointer to the folio.
     390             :  *
     391             :  * Returns a pointer to the object cgroup associated with the folio,
     392             :  * or NULL. This function assumes that the folio is known to have a
     393             :  * proper object cgroup pointer. It's not safe to call this function
     394             :  * against some type of folios, e.g. slab folios or ex-slab folios or
     395             :  * LRU folios.
     396             :  */
     397             : static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
     398             : {
     399             :         unsigned long memcg_data = folio->memcg_data;
     400             : 
     401             :         VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
     402             :         VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
     403             :         VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio);
     404             : 
     405             :         return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     406             : }
     407             : 
     408             : /*
     409             :  * folio_memcg - Get the memory cgroup associated with a folio.
     410             :  * @folio: Pointer to the folio.
     411             :  *
     412             :  * Returns a pointer to the memory cgroup associated with the folio,
     413             :  * or NULL. This function assumes that the folio is known to have a
     414             :  * proper memory cgroup pointer. It's not safe to call this function
     415             :  * against some type of folios, e.g. slab folios or ex-slab folios.
     416             :  *
     417             :  * For a non-kmem folio any of the following ensures folio and memcg binding
     418             :  * stability:
     419             :  *
     420             :  * - the folio lock
     421             :  * - LRU isolation
     422             :  * - lock_page_memcg()
     423             :  * - exclusive reference
     424             :  * - mem_cgroup_trylock_pages()
     425             :  *
     426             :  * For a kmem folio a caller should hold an rcu read lock to protect memcg
     427             :  * associated with a kmem folio from being released.
     428             :  */
     429             : static inline struct mem_cgroup *folio_memcg(struct folio *folio)
     430             : {
     431             :         if (folio_memcg_kmem(folio))
     432             :                 return obj_cgroup_memcg(__folio_objcg(folio));
     433             :         return __folio_memcg(folio);
     434             : }
     435             : 
     436             : static inline struct mem_cgroup *page_memcg(struct page *page)
     437             : {
     438             :         return folio_memcg(page_folio(page));
     439             : }
     440             : 
     441             : /**
     442             :  * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio.
     443             :  * @folio: Pointer to the folio.
     444             :  *
     445             :  * This function assumes that the folio is known to have a
     446             :  * proper memory cgroup pointer. It's not safe to call this function
     447             :  * against some type of folios, e.g. slab folios or ex-slab folios.
     448             :  *
     449             :  * Return: A pointer to the memory cgroup associated with the folio,
     450             :  * or NULL.
     451             :  */
     452             : static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
     453             : {
     454             :         unsigned long memcg_data = READ_ONCE(folio->memcg_data);
     455             : 
     456             :         VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
     457             :         WARN_ON_ONCE(!rcu_read_lock_held());
     458             : 
     459             :         if (memcg_data & MEMCG_DATA_KMEM) {
     460             :                 struct obj_cgroup *objcg;
     461             : 
     462             :                 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     463             :                 return obj_cgroup_memcg(objcg);
     464             :         }
     465             : 
     466             :         return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     467             : }
     468             : 
     469             : /*
     470             :  * folio_memcg_check - Get the memory cgroup associated with a folio.
     471             :  * @folio: Pointer to the folio.
     472             :  *
     473             :  * Returns a pointer to the memory cgroup associated with the folio,
     474             :  * or NULL. This function unlike folio_memcg() can take any folio
     475             :  * as an argument. It has to be used in cases when it's not known if a folio
     476             :  * has an associated memory cgroup pointer or an object cgroups vector or
     477             :  * an object cgroup.
     478             :  *
     479             :  * For a non-kmem folio any of the following ensures folio and memcg binding
     480             :  * stability:
     481             :  *
     482             :  * - the folio lock
     483             :  * - LRU isolation
     484             :  * - lock_folio_memcg()
     485             :  * - exclusive reference
     486             :  * - mem_cgroup_trylock_pages()
     487             :  *
     488             :  * For a kmem folio a caller should hold an rcu read lock to protect memcg
     489             :  * associated with a kmem folio from being released.
     490             :  */
     491             : static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
     492             : {
     493             :         /*
     494             :          * Because folio->memcg_data might be changed asynchronously
     495             :          * for slabs, READ_ONCE() should be used here.
     496             :          */
     497             :         unsigned long memcg_data = READ_ONCE(folio->memcg_data);
     498             : 
     499             :         if (memcg_data & MEMCG_DATA_OBJCGS)
     500             :                 return NULL;
     501             : 
     502             :         if (memcg_data & MEMCG_DATA_KMEM) {
     503             :                 struct obj_cgroup *objcg;
     504             : 
     505             :                 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     506             :                 return obj_cgroup_memcg(objcg);
     507             :         }
     508             : 
     509             :         return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     510             : }
     511             : 
     512             : static inline struct mem_cgroup *page_memcg_check(struct page *page)
     513             : {
     514             :         if (PageTail(page))
     515             :                 return NULL;
     516             :         return folio_memcg_check((struct folio *)page);
     517             : }
     518             : 
     519             : static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
     520             : {
     521             :         struct mem_cgroup *memcg;
     522             : 
     523             :         rcu_read_lock();
     524             : retry:
     525             :         memcg = obj_cgroup_memcg(objcg);
     526             :         if (unlikely(!css_tryget(&memcg->css)))
     527             :                 goto retry;
     528             :         rcu_read_unlock();
     529             : 
     530             :         return memcg;
     531             : }
     532             : 
     533             : #ifdef CONFIG_MEMCG_KMEM
     534             : /*
     535             :  * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
     536             :  * @folio: Pointer to the folio.
     537             :  *
     538             :  * Checks if the folio has MemcgKmem flag set. The caller must ensure
     539             :  * that the folio has an associated memory cgroup. It's not safe to call
     540             :  * this function against some types of folios, e.g. slab folios.
     541             :  */
     542             : static inline bool folio_memcg_kmem(struct folio *folio)
     543             : {
     544             :         VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page);
     545             :         VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio);
     546             :         return folio->memcg_data & MEMCG_DATA_KMEM;
     547             : }
     548             : 
     549             : 
     550             : #else
     551             : static inline bool folio_memcg_kmem(struct folio *folio)
     552             : {
     553             :         return false;
     554             : }
     555             : 
     556             : #endif
     557             : 
     558             : static inline bool PageMemcgKmem(struct page *page)
     559             : {
     560             :         return folio_memcg_kmem(page_folio(page));
     561             : }
     562             : 
     563             : static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
     564             : {
     565             :         return (memcg == root_mem_cgroup);
     566             : }
     567             : 
     568             : static inline bool mem_cgroup_disabled(void)
     569             : {
     570             :         return !cgroup_subsys_enabled(memory_cgrp_subsys);
     571             : }
     572             : 
     573             : static inline void mem_cgroup_protection(struct mem_cgroup *root,
     574             :                                          struct mem_cgroup *memcg,
     575             :                                          unsigned long *min,
     576             :                                          unsigned long *low)
     577             : {
     578             :         *min = *low = 0;
     579             : 
     580             :         if (mem_cgroup_disabled())
     581             :                 return;
     582             : 
     583             :         /*
     584             :          * There is no reclaim protection applied to a targeted reclaim.
     585             :          * We are special casing this specific case here because
     586             :          * mem_cgroup_protected calculation is not robust enough to keep
     587             :          * the protection invariant for calculated effective values for
     588             :          * parallel reclaimers with different reclaim target. This is
     589             :          * especially a problem for tail memcgs (as they have pages on LRU)
     590             :          * which would want to have effective values 0 for targeted reclaim
     591             :          * but a different value for external reclaim.
     592             :          *
     593             :          * Example
     594             :          * Let's have global and A's reclaim in parallel:
     595             :          *  |
     596             :          *  A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
     597             :          *  |\
     598             :          *  | C (low = 1G, usage = 2.5G)
     599             :          *  B (low = 1G, usage = 0.5G)
     600             :          *
     601             :          * For the global reclaim
     602             :          * A.elow = A.low
     603             :          * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
     604             :          * C.elow = min(C.usage, C.low)
     605             :          *
     606             :          * With the effective values resetting we have A reclaim
     607             :          * A.elow = 0
     608             :          * B.elow = B.low
     609             :          * C.elow = C.low
     610             :          *
     611             :          * If the global reclaim races with A's reclaim then
     612             :          * B.elow = C.elow = 0 because children_low_usage > A.elow)
     613             :          * is possible and reclaiming B would be violating the protection.
     614             :          *
     615             :          */
     616             :         if (root == memcg)
     617             :                 return;
     618             : 
     619             :         *min = READ_ONCE(memcg->memory.emin);
     620             :         *low = READ_ONCE(memcg->memory.elow);
     621             : }
     622             : 
     623             : void mem_cgroup_calculate_protection(struct mem_cgroup *root,
     624             :                                      struct mem_cgroup *memcg);
     625             : 
     626             : static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
     627             :                                           struct mem_cgroup *memcg)
     628             : {
     629             :         /*
     630             :          * The root memcg doesn't account charges, and doesn't support
     631             :          * protection. The target memcg's protection is ignored, see
     632             :          * mem_cgroup_calculate_protection() and mem_cgroup_protection()
     633             :          */
     634             :         return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) ||
     635             :                 memcg == target;
     636             : }
     637             : 
     638             : static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
     639             :                                         struct mem_cgroup *memcg)
     640             : {
     641             :         if (mem_cgroup_unprotected(target, memcg))
     642             :                 return false;
     643             : 
     644             :         return READ_ONCE(memcg->memory.elow) >=
     645             :                 page_counter_read(&memcg->memory);
     646             : }
     647             : 
     648             : static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
     649             :                                         struct mem_cgroup *memcg)
     650             : {
     651             :         if (mem_cgroup_unprotected(target, memcg))
     652             :                 return false;
     653             : 
     654             :         return READ_ONCE(memcg->memory.emin) >=
     655             :                 page_counter_read(&memcg->memory);
     656             : }
     657             : 
     658             : int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
     659             : 
     660             : /**
     661             :  * mem_cgroup_charge - Charge a newly allocated folio to a cgroup.
     662             :  * @folio: Folio to charge.
     663             :  * @mm: mm context of the allocating task.
     664             :  * @gfp: Reclaim mode.
     665             :  *
     666             :  * Try to charge @folio to the memcg that @mm belongs to, reclaiming
     667             :  * pages according to @gfp if necessary.  If @mm is NULL, try to
     668             :  * charge to the active memcg.
     669             :  *
     670             :  * Do not use this for folios allocated for swapin.
     671             :  *
     672             :  * Return: 0 on success. Otherwise, an error code is returned.
     673             :  */
     674             : static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
     675             :                                     gfp_t gfp)
     676             : {
     677             :         if (mem_cgroup_disabled())
     678             :                 return 0;
     679             :         return __mem_cgroup_charge(folio, mm, gfp);
     680             : }
     681             : 
     682             : int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
     683             :                                   gfp_t gfp, swp_entry_t entry);
     684             : void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
     685             : 
     686             : void __mem_cgroup_uncharge(struct folio *folio);
     687             : 
     688             : /**
     689             :  * mem_cgroup_uncharge - Uncharge a folio.
     690             :  * @folio: Folio to uncharge.
     691             :  *
     692             :  * Uncharge a folio previously charged with mem_cgroup_charge().
     693             :  */
     694             : static inline void mem_cgroup_uncharge(struct folio *folio)
     695             : {
     696             :         if (mem_cgroup_disabled())
     697             :                 return;
     698             :         __mem_cgroup_uncharge(folio);
     699             : }
     700             : 
     701             : void __mem_cgroup_uncharge_list(struct list_head *page_list);
     702             : static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
     703             : {
     704             :         if (mem_cgroup_disabled())
     705             :                 return;
     706             :         __mem_cgroup_uncharge_list(page_list);
     707             : }
     708             : 
     709             : void mem_cgroup_migrate(struct folio *old, struct folio *new);
     710             : 
     711             : /**
     712             :  * mem_cgroup_lruvec - get the lru list vector for a memcg & node
     713             :  * @memcg: memcg of the wanted lruvec
     714             :  * @pgdat: pglist_data
     715             :  *
     716             :  * Returns the lru list vector holding pages for a given @memcg &
     717             :  * @pgdat combination. This can be the node lruvec, if the memory
     718             :  * controller is disabled.
     719             :  */
     720             : static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
     721             :                                                struct pglist_data *pgdat)
     722             : {
     723             :         struct mem_cgroup_per_node *mz;
     724             :         struct lruvec *lruvec;
     725             : 
     726             :         if (mem_cgroup_disabled()) {
     727             :                 lruvec = &pgdat->__lruvec;
     728             :                 goto out;
     729             :         }
     730             : 
     731             :         if (!memcg)
     732             :                 memcg = root_mem_cgroup;
     733             : 
     734             :         mz = memcg->nodeinfo[pgdat->node_id];
     735             :         lruvec = &mz->lruvec;
     736             : out:
     737             :         /*
     738             :          * Since a node can be onlined after the mem_cgroup was created,
     739             :          * we have to be prepared to initialize lruvec->pgdat here;
     740             :          * and if offlined then reonlined, we need to reinitialize it.
     741             :          */
     742             :         if (unlikely(lruvec->pgdat != pgdat))
     743             :                 lruvec->pgdat = pgdat;
     744             :         return lruvec;
     745             : }
     746             : 
     747             : /**
     748             :  * folio_lruvec - return lruvec for isolating/putting an LRU folio
     749             :  * @folio: Pointer to the folio.
     750             :  *
     751             :  * This function relies on folio->mem_cgroup being stable.
     752             :  */
     753             : static inline struct lruvec *folio_lruvec(struct folio *folio)
     754             : {
     755             :         struct mem_cgroup *memcg = folio_memcg(folio);
     756             : 
     757             :         VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
     758             :         return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
     759             : }
     760             : 
     761             : struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
     762             : 
     763             : struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
     764             : 
     765             : struct lruvec *folio_lruvec_lock(struct folio *folio);
     766             : struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
     767             : struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
     768             :                                                 unsigned long *flags);
     769             : 
     770             : #ifdef CONFIG_DEBUG_VM
     771             : void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
     772             : #else
     773             : static inline
     774             : void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
     775             : {
     776             : }
     777             : #endif
     778             : 
     779             : static inline
     780             : struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
     781             :         return css ? container_of(css, struct mem_cgroup, css) : NULL;
     782             : }
     783             : 
     784             : static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
     785             : {
     786             :         return percpu_ref_tryget(&objcg->refcnt);
     787             : }
     788             : 
     789             : static inline void obj_cgroup_get(struct obj_cgroup *objcg)
     790             : {
     791             :         percpu_ref_get(&objcg->refcnt);
     792             : }
     793             : 
     794             : static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
     795             :                                        unsigned long nr)
     796             : {
     797             :         percpu_ref_get_many(&objcg->refcnt, nr);
     798             : }
     799             : 
     800             : static inline void obj_cgroup_put(struct obj_cgroup *objcg)
     801             : {
     802             :         percpu_ref_put(&objcg->refcnt);
     803             : }
     804             : 
     805             : static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
     806             : {
     807             :         return !memcg || css_tryget(&memcg->css);
     808             : }
     809             : 
     810             : static inline void mem_cgroup_put(struct mem_cgroup *memcg)
     811             : {
     812             :         if (memcg)
     813             :                 css_put(&memcg->css);
     814             : }
     815             : 
     816             : #define mem_cgroup_from_counter(counter, member)        \
     817             :         container_of(counter, struct mem_cgroup, member)
     818             : 
     819             : struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
     820             :                                    struct mem_cgroup *,
     821             :                                    struct mem_cgroup_reclaim_cookie *);
     822             : void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
     823             : int mem_cgroup_scan_tasks(struct mem_cgroup *,
     824             :                           int (*)(struct task_struct *, void *), void *);
     825             : 
     826             : static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
     827             : {
     828             :         if (mem_cgroup_disabled())
     829             :                 return 0;
     830             : 
     831             :         return memcg->id.id;
     832             : }
     833             : struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
     834             : 
     835             : #ifdef CONFIG_SHRINKER_DEBUG
     836             : static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
     837             : {
     838             :         return memcg ? cgroup_ino(memcg->css.cgroup) : 0;
     839             : }
     840             : 
     841             : struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino);
     842             : #endif
     843             : 
     844             : static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
     845             : {
     846             :         return mem_cgroup_from_css(seq_css(m));
     847             : }
     848             : 
     849             : static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
     850             : {
     851             :         struct mem_cgroup_per_node *mz;
     852             : 
     853             :         if (mem_cgroup_disabled())
     854             :                 return NULL;
     855             : 
     856             :         mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
     857             :         return mz->memcg;
     858             : }
     859             : 
     860             : /**
     861             :  * parent_mem_cgroup - find the accounting parent of a memcg
     862             :  * @memcg: memcg whose parent to find
     863             :  *
     864             :  * Returns the parent memcg, or NULL if this is the root or the memory
     865             :  * controller is in legacy no-hierarchy mode.
     866             :  */
     867             : static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
     868             : {
     869             :         return mem_cgroup_from_css(memcg->css.parent);
     870             : }
     871             : 
     872             : static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
     873             :                               struct mem_cgroup *root)
     874             : {
     875             :         if (root == memcg)
     876             :                 return true;
     877             :         return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
     878             : }
     879             : 
     880             : static inline bool mm_match_cgroup(struct mm_struct *mm,
     881             :                                    struct mem_cgroup *memcg)
     882             : {
     883             :         struct mem_cgroup *task_memcg;
     884             :         bool match = false;
     885             : 
     886             :         rcu_read_lock();
     887             :         task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
     888             :         if (task_memcg)
     889             :                 match = mem_cgroup_is_descendant(task_memcg, memcg);
     890             :         rcu_read_unlock();
     891             :         return match;
     892             : }
     893             : 
     894             : struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio);
     895             : ino_t page_cgroup_ino(struct page *page);
     896             : 
     897             : static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
     898             : {
     899             :         if (mem_cgroup_disabled())
     900             :                 return true;
     901             :         return !!(memcg->css.flags & CSS_ONLINE);
     902             : }
     903             : 
     904             : void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
     905             :                 int zid, int nr_pages);
     906             : 
     907             : static inline
     908             : unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
     909             :                 enum lru_list lru, int zone_idx)
     910             : {
     911             :         struct mem_cgroup_per_node *mz;
     912             : 
     913             :         mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
     914             :         return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
     915             : }
     916             : 
     917             : void mem_cgroup_handle_over_high(void);
     918             : 
     919             : unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
     920             : 
     921             : unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
     922             : 
     923             : void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
     924             :                                 struct task_struct *p);
     925             : 
     926             : void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
     927             : 
     928             : static inline void mem_cgroup_enter_user_fault(void)
     929             : {
     930             :         WARN_ON(current->in_user_fault);
     931             :         current->in_user_fault = 1;
     932             : }
     933             : 
     934             : static inline void mem_cgroup_exit_user_fault(void)
     935             : {
     936             :         WARN_ON(!current->in_user_fault);
     937             :         current->in_user_fault = 0;
     938             : }
     939             : 
     940             : static inline bool task_in_memcg_oom(struct task_struct *p)
     941             : {
     942             :         return p->memcg_in_oom;
     943             : }
     944             : 
     945             : bool mem_cgroup_oom_synchronize(bool wait);
     946             : struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
     947             :                                             struct mem_cgroup *oom_domain);
     948             : void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
     949             : 
     950             : void folio_memcg_lock(struct folio *folio);
     951             : void folio_memcg_unlock(struct folio *folio);
     952             : void lock_page_memcg(struct page *page);
     953             : void unlock_page_memcg(struct page *page);
     954             : 
     955             : void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
     956             : 
     957             : /* try to stablize folio_memcg() for all the pages in a memcg */
     958             : static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
     959             : {
     960             :         rcu_read_lock();
     961             : 
     962             :         if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
     963             :                 return true;
     964             : 
     965             :         rcu_read_unlock();
     966             :         return false;
     967             : }
     968             : 
     969             : static inline void mem_cgroup_unlock_pages(void)
     970             : {
     971             :         rcu_read_unlock();
     972             : }
     973             : 
     974             : /* idx can be of type enum memcg_stat_item or node_stat_item */
     975             : static inline void mod_memcg_state(struct mem_cgroup *memcg,
     976             :                                    int idx, int val)
     977             : {
     978             :         unsigned long flags;
     979             : 
     980             :         local_irq_save(flags);
     981             :         __mod_memcg_state(memcg, idx, val);
     982             :         local_irq_restore(flags);
     983             : }
     984             : 
     985             : static inline void mod_memcg_page_state(struct page *page,
     986             :                                         int idx, int val)
     987             : {
     988             :         struct mem_cgroup *memcg;
     989             : 
     990             :         if (mem_cgroup_disabled())
     991             :                 return;
     992             : 
     993             :         rcu_read_lock();
     994             :         memcg = page_memcg(page);
     995             :         if (memcg)
     996             :                 mod_memcg_state(memcg, idx, val);
     997             :         rcu_read_unlock();
     998             : }
     999             : 
    1000             : unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
    1001             : 
    1002             : static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
    1003             :                                               enum node_stat_item idx)
    1004             : {
    1005             :         struct mem_cgroup_per_node *pn;
    1006             :         long x;
    1007             : 
    1008             :         if (mem_cgroup_disabled())
    1009             :                 return node_page_state(lruvec_pgdat(lruvec), idx);
    1010             : 
    1011             :         pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
    1012             :         x = READ_ONCE(pn->lruvec_stats.state[idx]);
    1013             : #ifdef CONFIG_SMP
    1014             :         if (x < 0)
    1015             :                 x = 0;
    1016             : #endif
    1017             :         return x;
    1018             : }
    1019             : 
    1020             : static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
    1021             :                                                     enum node_stat_item idx)
    1022             : {
    1023             :         struct mem_cgroup_per_node *pn;
    1024             :         long x = 0;
    1025             :         int cpu;
    1026             : 
    1027             :         if (mem_cgroup_disabled())
    1028             :                 return node_page_state(lruvec_pgdat(lruvec), idx);
    1029             : 
    1030             :         pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
    1031             :         for_each_possible_cpu(cpu)
    1032             :                 x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu);
    1033             : #ifdef CONFIG_SMP
    1034             :         if (x < 0)
    1035             :                 x = 0;
    1036             : #endif
    1037             :         return x;
    1038             : }
    1039             : 
    1040             : void mem_cgroup_flush_stats(void);
    1041             : void mem_cgroup_flush_stats_atomic(void);
    1042             : void mem_cgroup_flush_stats_ratelimited(void);
    1043             : 
    1044             : void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
    1045             :                               int val);
    1046             : void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
    1047             : 
    1048             : static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
    1049             :                                          int val)
    1050             : {
    1051             :         unsigned long flags;
    1052             : 
    1053             :         local_irq_save(flags);
    1054             :         __mod_lruvec_kmem_state(p, idx, val);
    1055             :         local_irq_restore(flags);
    1056             : }
    1057             : 
    1058             : static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
    1059             :                                           enum node_stat_item idx, int val)
    1060             : {
    1061             :         unsigned long flags;
    1062             : 
    1063             :         local_irq_save(flags);
    1064             :         __mod_memcg_lruvec_state(lruvec, idx, val);
    1065             :         local_irq_restore(flags);
    1066             : }
    1067             : 
    1068             : void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
    1069             :                           unsigned long count);
    1070             : 
    1071             : static inline void count_memcg_events(struct mem_cgroup *memcg,
    1072             :                                       enum vm_event_item idx,
    1073             :                                       unsigned long count)
    1074             : {
    1075             :         unsigned long flags;
    1076             : 
    1077             :         local_irq_save(flags);
    1078             :         __count_memcg_events(memcg, idx, count);
    1079             :         local_irq_restore(flags);
    1080             : }
    1081             : 
    1082             : static inline void count_memcg_page_event(struct page *page,
    1083             :                                           enum vm_event_item idx)
    1084             : {
    1085             :         struct mem_cgroup *memcg = page_memcg(page);
    1086             : 
    1087             :         if (memcg)
    1088             :                 count_memcg_events(memcg, idx, 1);
    1089             : }
    1090             : 
    1091             : static inline void count_memcg_folio_events(struct folio *folio,
    1092             :                 enum vm_event_item idx, unsigned long nr)
    1093             : {
    1094             :         struct mem_cgroup *memcg = folio_memcg(folio);
    1095             : 
    1096             :         if (memcg)
    1097             :                 count_memcg_events(memcg, idx, nr);
    1098             : }
    1099             : 
    1100             : static inline void count_memcg_event_mm(struct mm_struct *mm,
    1101             :                                         enum vm_event_item idx)
    1102             : {
    1103             :         struct mem_cgroup *memcg;
    1104             : 
    1105             :         if (mem_cgroup_disabled())
    1106             :                 return;
    1107             : 
    1108             :         rcu_read_lock();
    1109             :         memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
    1110             :         if (likely(memcg))
    1111             :                 count_memcg_events(memcg, idx, 1);
    1112             :         rcu_read_unlock();
    1113             : }
    1114             : 
    1115             : static inline void memcg_memory_event(struct mem_cgroup *memcg,
    1116             :                                       enum memcg_memory_event event)
    1117             : {
    1118             :         bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
    1119             :                           event == MEMCG_SWAP_FAIL;
    1120             : 
    1121             :         atomic_long_inc(&memcg->memory_events_local[event]);
    1122             :         if (!swap_event)
    1123             :                 cgroup_file_notify(&memcg->events_local_file);
    1124             : 
    1125             :         do {
    1126             :                 atomic_long_inc(&memcg->memory_events[event]);
    1127             :                 if (swap_event)
    1128             :                         cgroup_file_notify(&memcg->swap_events_file);
    1129             :                 else
    1130             :                         cgroup_file_notify(&memcg->events_file);
    1131             : 
    1132             :                 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
    1133             :                         break;
    1134             :                 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
    1135             :                         break;
    1136             :         } while ((memcg = parent_mem_cgroup(memcg)) &&
    1137             :                  !mem_cgroup_is_root(memcg));
    1138             : }
    1139             : 
    1140             : static inline void memcg_memory_event_mm(struct mm_struct *mm,
    1141             :                                          enum memcg_memory_event event)
    1142             : {
    1143             :         struct mem_cgroup *memcg;
    1144             : 
    1145             :         if (mem_cgroup_disabled())
    1146             :                 return;
    1147             : 
    1148             :         rcu_read_lock();
    1149             :         memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
    1150             :         if (likely(memcg))
    1151             :                 memcg_memory_event(memcg, event);
    1152             :         rcu_read_unlock();
    1153             : }
    1154             : 
    1155             : void split_page_memcg(struct page *head, unsigned int nr);
    1156             : 
    1157             : unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
    1158             :                                                 gfp_t gfp_mask,
    1159             :                                                 unsigned long *total_scanned);
    1160             : 
    1161             : #else /* CONFIG_MEMCG */
    1162             : 
    1163             : #define MEM_CGROUP_ID_SHIFT     0
    1164             : #define MEM_CGROUP_ID_MAX       0
    1165             : 
    1166             : static inline struct mem_cgroup *folio_memcg(struct folio *folio)
    1167             : {
    1168             :         return NULL;
    1169             : }
    1170             : 
    1171             : static inline struct mem_cgroup *page_memcg(struct page *page)
    1172             : {
    1173             :         return NULL;
    1174             : }
    1175             : 
    1176             : static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
    1177             : {
    1178           0 :         WARN_ON_ONCE(!rcu_read_lock_held());
    1179             :         return NULL;
    1180             : }
    1181             : 
    1182             : static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
    1183             : {
    1184             :         return NULL;
    1185             : }
    1186             : 
    1187             : static inline struct mem_cgroup *page_memcg_check(struct page *page)
    1188             : {
    1189             :         return NULL;
    1190             : }
    1191             : 
    1192             : static inline bool folio_memcg_kmem(struct folio *folio)
    1193             : {
    1194             :         return false;
    1195             : }
    1196             : 
    1197             : static inline bool PageMemcgKmem(struct page *page)
    1198             : {
    1199             :         return false;
    1200             : }
    1201             : 
    1202             : static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
    1203             : {
    1204             :         return true;
    1205             : }
    1206             : 
    1207             : static inline bool mem_cgroup_disabled(void)
    1208             : {
    1209             :         return true;
    1210             : }
    1211             : 
    1212             : static inline void memcg_memory_event(struct mem_cgroup *memcg,
    1213             :                                       enum memcg_memory_event event)
    1214             : {
    1215             : }
    1216             : 
    1217             : static inline void memcg_memory_event_mm(struct mm_struct *mm,
    1218             :                                          enum memcg_memory_event event)
    1219             : {
    1220             : }
    1221             : 
    1222             : static inline void mem_cgroup_protection(struct mem_cgroup *root,
    1223             :                                          struct mem_cgroup *memcg,
    1224             :                                          unsigned long *min,
    1225             :                                          unsigned long *low)
    1226             : {
    1227           0 :         *min = *low = 0;
    1228             : }
    1229             : 
    1230             : static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
    1231             :                                                    struct mem_cgroup *memcg)
    1232             : {
    1233             : }
    1234             : 
    1235             : static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
    1236             :                                           struct mem_cgroup *memcg)
    1237             : {
    1238             :         return true;
    1239             : }
    1240             : static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
    1241             :                                         struct mem_cgroup *memcg)
    1242             : {
    1243             :         return false;
    1244             : }
    1245             : 
    1246             : static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
    1247             :                                         struct mem_cgroup *memcg)
    1248             : {
    1249             :         return false;
    1250             : }
    1251             : 
    1252             : static inline int mem_cgroup_charge(struct folio *folio,
    1253             :                 struct mm_struct *mm, gfp_t gfp)
    1254             : {
    1255             :         return 0;
    1256             : }
    1257             : 
    1258             : static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
    1259             :                         struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
    1260             : {
    1261             :         return 0;
    1262             : }
    1263             : 
    1264             : static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
    1265             : {
    1266             : }
    1267             : 
    1268             : static inline void mem_cgroup_uncharge(struct folio *folio)
    1269             : {
    1270             : }
    1271             : 
    1272             : static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
    1273             : {
    1274             : }
    1275             : 
    1276             : static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
    1277             : {
    1278             : }
    1279             : 
    1280             : static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
    1281             :                                                struct pglist_data *pgdat)
    1282             : {
    1283           0 :         return &pgdat->__lruvec;
    1284             : }
    1285             : 
    1286             : static inline struct lruvec *folio_lruvec(struct folio *folio)
    1287             : {
    1288           0 :         struct pglist_data *pgdat = folio_pgdat(folio);
    1289             :         return &pgdat->__lruvec;
    1290             : }
    1291             : 
    1292             : static inline
    1293             : void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
    1294             : {
    1295             : }
    1296             : 
    1297             : static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
    1298             : {
    1299             :         return NULL;
    1300             : }
    1301             : 
    1302             : static inline bool mm_match_cgroup(struct mm_struct *mm,
    1303             :                 struct mem_cgroup *memcg)
    1304             : {
    1305             :         return true;
    1306             : }
    1307             : 
    1308             : static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
    1309             : {
    1310             :         return NULL;
    1311             : }
    1312             : 
    1313             : static inline
    1314             : struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
    1315             : {
    1316             :         return NULL;
    1317             : }
    1318             : 
    1319             : static inline void obj_cgroup_put(struct obj_cgroup *objcg)
    1320             : {
    1321             : }
    1322             : 
    1323             : static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
    1324             : {
    1325             :         return true;
    1326             : }
    1327             : 
    1328             : static inline void mem_cgroup_put(struct mem_cgroup *memcg)
    1329             : {
    1330             : }
    1331             : 
    1332             : static inline struct lruvec *folio_lruvec_lock(struct folio *folio)
    1333             : {
    1334             :         struct pglist_data *pgdat = folio_pgdat(folio);
    1335             : 
    1336             :         spin_lock(&pgdat->__lruvec.lru_lock);
    1337             :         return &pgdat->__lruvec;
    1338             : }
    1339             : 
    1340             : static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
    1341             : {
    1342           0 :         struct pglist_data *pgdat = folio_pgdat(folio);
    1343             : 
    1344           0 :         spin_lock_irq(&pgdat->__lruvec.lru_lock);
    1345             :         return &pgdat->__lruvec;
    1346             : }
    1347             : 
    1348             : static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
    1349             :                 unsigned long *flagsp)
    1350             : {
    1351           0 :         struct pglist_data *pgdat = folio_pgdat(folio);
    1352             : 
    1353           0 :         spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
    1354             :         return &pgdat->__lruvec;
    1355             : }
    1356             : 
    1357             : static inline struct mem_cgroup *
    1358             : mem_cgroup_iter(struct mem_cgroup *root,
    1359             :                 struct mem_cgroup *prev,
    1360             :                 struct mem_cgroup_reclaim_cookie *reclaim)
    1361             : {
    1362             :         return NULL;
    1363             : }
    1364             : 
    1365             : static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
    1366             :                                          struct mem_cgroup *prev)
    1367             : {
    1368             : }
    1369             : 
    1370             : static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
    1371             :                 int (*fn)(struct task_struct *, void *), void *arg)
    1372             : {
    1373             :         return 0;
    1374             : }
    1375             : 
    1376             : static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
    1377             : {
    1378             :         return 0;
    1379             : }
    1380             : 
    1381           0 : static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
    1382             : {
    1383           0 :         WARN_ON_ONCE(id);
    1384             :         /* XXX: This should always return root_mem_cgroup */
    1385           0 :         return NULL;
    1386             : }
    1387             : 
    1388             : #ifdef CONFIG_SHRINKER_DEBUG
    1389             : static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
    1390             : {
    1391             :         return 0;
    1392             : }
    1393             : 
    1394             : static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
    1395             : {
    1396             :         return NULL;
    1397             : }
    1398             : #endif
    1399             : 
    1400             : static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
    1401             : {
    1402             :         return NULL;
    1403             : }
    1404             : 
    1405             : static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
    1406             : {
    1407             :         return NULL;
    1408             : }
    1409             : 
    1410             : static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
    1411             : {
    1412             :         return true;
    1413             : }
    1414             : 
    1415             : static inline
    1416             : unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
    1417             :                 enum lru_list lru, int zone_idx)
    1418             : {
    1419             :         return 0;
    1420             : }
    1421             : 
    1422             : static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
    1423             : {
    1424             :         return 0;
    1425             : }
    1426             : 
    1427             : static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
    1428             : {
    1429             :         return 0;
    1430             : }
    1431             : 
    1432             : static inline void
    1433             : mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
    1434             : {
    1435             : }
    1436             : 
    1437             : static inline void
    1438             : mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
    1439             : {
    1440             : }
    1441             : 
    1442             : static inline void lock_page_memcg(struct page *page)
    1443             : {
    1444             : }
    1445             : 
    1446             : static inline void unlock_page_memcg(struct page *page)
    1447             : {
    1448             : }
    1449             : 
    1450             : static inline void folio_memcg_lock(struct folio *folio)
    1451             : {
    1452             : }
    1453             : 
    1454             : static inline void folio_memcg_unlock(struct folio *folio)
    1455             : {
    1456             : }
    1457             : 
    1458             : static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
    1459             : {
    1460             :         /* to match folio_memcg_rcu() */
    1461             :         rcu_read_lock();
    1462             :         return true;
    1463             : }
    1464             : 
    1465             : static inline void mem_cgroup_unlock_pages(void)
    1466             : {
    1467             :         rcu_read_unlock();
    1468             : }
    1469             : 
    1470             : static inline void mem_cgroup_handle_over_high(void)
    1471             : {
    1472             : }
    1473             : 
    1474             : static inline void mem_cgroup_enter_user_fault(void)
    1475             : {
    1476             : }
    1477             : 
    1478             : static inline void mem_cgroup_exit_user_fault(void)
    1479             : {
    1480             : }
    1481             : 
    1482             : static inline bool task_in_memcg_oom(struct task_struct *p)
    1483             : {
    1484             :         return false;
    1485             : }
    1486             : 
    1487             : static inline bool mem_cgroup_oom_synchronize(bool wait)
    1488             : {
    1489             :         return false;
    1490             : }
    1491             : 
    1492             : static inline struct mem_cgroup *mem_cgroup_get_oom_group(
    1493             :         struct task_struct *victim, struct mem_cgroup *oom_domain)
    1494             : {
    1495             :         return NULL;
    1496             : }
    1497             : 
    1498             : static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
    1499             : {
    1500             : }
    1501             : 
    1502             : static inline void __mod_memcg_state(struct mem_cgroup *memcg,
    1503             :                                      int idx,
    1504             :                                      int nr)
    1505             : {
    1506             : }
    1507             : 
    1508             : static inline void mod_memcg_state(struct mem_cgroup *memcg,
    1509             :                                    int idx,
    1510             :                                    int nr)
    1511             : {
    1512             : }
    1513             : 
    1514             : static inline void mod_memcg_page_state(struct page *page,
    1515             :                                         int idx, int val)
    1516             : {
    1517             : }
    1518             : 
    1519             : static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
    1520             : {
    1521             :         return 0;
    1522             : }
    1523             : 
    1524             : static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
    1525             :                                               enum node_stat_item idx)
    1526             : {
    1527           0 :         return node_page_state(lruvec_pgdat(lruvec), idx);
    1528             : }
    1529             : 
    1530             : static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
    1531             :                                                     enum node_stat_item idx)
    1532             : {
    1533             :         return node_page_state(lruvec_pgdat(lruvec), idx);
    1534             : }
    1535             : 
    1536             : static inline void mem_cgroup_flush_stats(void)
    1537             : {
    1538             : }
    1539             : 
    1540             : static inline void mem_cgroup_flush_stats_atomic(void)
    1541             : {
    1542             : }
    1543             : 
    1544             : static inline void mem_cgroup_flush_stats_ratelimited(void)
    1545             : {
    1546             : }
    1547             : 
    1548             : static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
    1549             :                                             enum node_stat_item idx, int val)
    1550             : {
    1551             : }
    1552             : 
    1553             : static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
    1554             :                                            int val)
    1555             : {
    1556           0 :         struct page *page = virt_to_head_page(p);
    1557             : 
    1558           0 :         __mod_node_page_state(page_pgdat(page), idx, val);
    1559             : }
    1560             : 
    1561             : static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
    1562             :                                          int val)
    1563             : {
    1564             :         struct page *page = virt_to_head_page(p);
    1565             : 
    1566             :         mod_node_page_state(page_pgdat(page), idx, val);
    1567             : }
    1568             : 
    1569             : static inline void count_memcg_events(struct mem_cgroup *memcg,
    1570             :                                       enum vm_event_item idx,
    1571             :                                       unsigned long count)
    1572             : {
    1573             : }
    1574             : 
    1575             : static inline void __count_memcg_events(struct mem_cgroup *memcg,
    1576             :                                         enum vm_event_item idx,
    1577             :                                         unsigned long count)
    1578             : {
    1579             : }
    1580             : 
    1581             : static inline void count_memcg_page_event(struct page *page,
    1582             :                                           int idx)
    1583             : {
    1584             : }
    1585             : 
    1586             : static inline void count_memcg_folio_events(struct folio *folio,
    1587             :                 enum vm_event_item idx, unsigned long nr)
    1588             : {
    1589             : }
    1590             : 
    1591             : static inline
    1592             : void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
    1593             : {
    1594             : }
    1595             : 
    1596             : static inline void split_page_memcg(struct page *head, unsigned int nr)
    1597             : {
    1598             : }
    1599             : 
    1600             : static inline
    1601             : unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
    1602             :                                             gfp_t gfp_mask,
    1603             :                                             unsigned long *total_scanned)
    1604             : {
    1605             :         return 0;
    1606             : }
    1607             : #endif /* CONFIG_MEMCG */
    1608             : 
    1609             : static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
    1610             : {
    1611           0 :         __mod_lruvec_kmem_state(p, idx, 1);
    1612             : }
    1613             : 
    1614             : static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
    1615             : {
    1616           0 :         __mod_lruvec_kmem_state(p, idx, -1);
    1617             : }
    1618             : 
    1619             : static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
    1620             : {
    1621             :         struct mem_cgroup *memcg;
    1622             : 
    1623           0 :         memcg = lruvec_memcg(lruvec);
    1624             :         if (!memcg)
    1625             :                 return NULL;
    1626             :         memcg = parent_mem_cgroup(memcg);
    1627             :         if (!memcg)
    1628             :                 return NULL;
    1629             :         return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
    1630             : }
    1631             : 
    1632             : static inline void unlock_page_lruvec(struct lruvec *lruvec)
    1633             : {
    1634             :         spin_unlock(&lruvec->lru_lock);
    1635             : }
    1636             : 
    1637             : static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
    1638             : {
    1639           0 :         spin_unlock_irq(&lruvec->lru_lock);
    1640             : }
    1641             : 
    1642             : static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
    1643             :                 unsigned long flags)
    1644             : {
    1645           0 :         spin_unlock_irqrestore(&lruvec->lru_lock, flags);
    1646             : }
    1647             : 
    1648             : /* Test requires a stable page->memcg binding, see page_memcg() */
    1649             : static inline bool folio_matches_lruvec(struct folio *folio,
    1650             :                 struct lruvec *lruvec)
    1651             : {
    1652           0 :         return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
    1653             :                lruvec_memcg(lruvec) == folio_memcg(folio);
    1654             : }
    1655             : 
    1656             : /* Don't lock again iff page's lruvec locked */
    1657           0 : static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
    1658             :                 struct lruvec *locked_lruvec)
    1659             : {
    1660           0 :         if (locked_lruvec) {
    1661           0 :                 if (folio_matches_lruvec(folio, locked_lruvec))
    1662             :                         return locked_lruvec;
    1663             : 
    1664           0 :                 unlock_page_lruvec_irq(locked_lruvec);
    1665             :         }
    1666             : 
    1667           0 :         return folio_lruvec_lock_irq(folio);
    1668             : }
    1669             : 
    1670             : /* Don't lock again iff page's lruvec locked */
    1671           0 : static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio,
    1672             :                 struct lruvec *locked_lruvec, unsigned long *flags)
    1673             : {
    1674           0 :         if (locked_lruvec) {
    1675           0 :                 if (folio_matches_lruvec(folio, locked_lruvec))
    1676             :                         return locked_lruvec;
    1677             : 
    1678           0 :                 unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
    1679             :         }
    1680             : 
    1681           0 :         return folio_lruvec_lock_irqsave(folio, flags);
    1682             : }
    1683             : 
    1684             : #ifdef CONFIG_CGROUP_WRITEBACK
    1685             : 
    1686             : struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
    1687             : void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
    1688             :                          unsigned long *pheadroom, unsigned long *pdirty,
    1689             :                          unsigned long *pwriteback);
    1690             : 
    1691             : void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
    1692             :                                              struct bdi_writeback *wb);
    1693             : 
    1694             : static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
    1695             :                                                   struct bdi_writeback *wb)
    1696             : {
    1697             :         struct mem_cgroup *memcg;
    1698             : 
    1699             :         if (mem_cgroup_disabled())
    1700             :                 return;
    1701             : 
    1702             :         memcg = folio_memcg(folio);
    1703             :         if (unlikely(memcg && &memcg->css != wb->memcg_css))
    1704             :                 mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
    1705             : }
    1706             : 
    1707             : void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
    1708             : 
    1709             : #else   /* CONFIG_CGROUP_WRITEBACK */
    1710             : 
    1711             : static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
    1712             : {
    1713             :         return NULL;
    1714             : }
    1715             : 
    1716             : static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
    1717             :                                        unsigned long *pfilepages,
    1718             :                                        unsigned long *pheadroom,
    1719             :                                        unsigned long *pdirty,
    1720             :                                        unsigned long *pwriteback)
    1721             : {
    1722             : }
    1723             : 
    1724             : static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
    1725             :                                                   struct bdi_writeback *wb)
    1726             : {
    1727             : }
    1728             : 
    1729             : static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
    1730             : {
    1731             : }
    1732             : 
    1733             : #endif  /* CONFIG_CGROUP_WRITEBACK */
    1734             : 
    1735             : struct sock;
    1736             : bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
    1737             :                              gfp_t gfp_mask);
    1738             : void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
    1739             : #ifdef CONFIG_MEMCG
    1740             : extern struct static_key_false memcg_sockets_enabled_key;
    1741             : #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
    1742             : void mem_cgroup_sk_alloc(struct sock *sk);
    1743             : void mem_cgroup_sk_free(struct sock *sk);
    1744             : static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
    1745             : {
    1746             :         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
    1747             :                 return true;
    1748             :         do {
    1749             :                 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
    1750             :                         return true;
    1751             :         } while ((memcg = parent_mem_cgroup(memcg)));
    1752             :         return false;
    1753             : }
    1754             : 
    1755             : int alloc_shrinker_info(struct mem_cgroup *memcg);
    1756             : void free_shrinker_info(struct mem_cgroup *memcg);
    1757             : void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
    1758             : void reparent_shrinker_deferred(struct mem_cgroup *memcg);
    1759             : #else
    1760             : #define mem_cgroup_sockets_enabled 0
    1761             : static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
    1762             : static inline void mem_cgroup_sk_free(struct sock *sk) { };
    1763             : static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
    1764             : {
    1765             :         return false;
    1766             : }
    1767             : 
    1768             : static inline void set_shrinker_bit(struct mem_cgroup *memcg,
    1769             :                                     int nid, int shrinker_id)
    1770             : {
    1771             : }
    1772             : #endif
    1773             : 
    1774             : #ifdef CONFIG_MEMCG_KMEM
    1775             : bool mem_cgroup_kmem_disabled(void);
    1776             : int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
    1777             : void __memcg_kmem_uncharge_page(struct page *page, int order);
    1778             : 
    1779             : struct obj_cgroup *get_obj_cgroup_from_current(void);
    1780             : struct obj_cgroup *get_obj_cgroup_from_page(struct page *page);
    1781             : 
    1782             : int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
    1783             : void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
    1784             : 
    1785             : extern struct static_key_false memcg_bpf_enabled_key;
    1786             : static inline bool memcg_bpf_enabled(void)
    1787             : {
    1788             :         return static_branch_likely(&memcg_bpf_enabled_key);
    1789             : }
    1790             : 
    1791             : extern struct static_key_false memcg_kmem_online_key;
    1792             : 
    1793             : static inline bool memcg_kmem_online(void)
    1794             : {
    1795             :         return static_branch_likely(&memcg_kmem_online_key);
    1796             : }
    1797             : 
    1798             : static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
    1799             :                                          int order)
    1800             : {
    1801             :         if (memcg_kmem_online())
    1802             :                 return __memcg_kmem_charge_page(page, gfp, order);
    1803             :         return 0;
    1804             : }
    1805             : 
    1806             : static inline void memcg_kmem_uncharge_page(struct page *page, int order)
    1807             : {
    1808             :         if (memcg_kmem_online())
    1809             :                 __memcg_kmem_uncharge_page(page, order);
    1810             : }
    1811             : 
    1812             : /*
    1813             :  * A helper for accessing memcg's kmem_id, used for getting
    1814             :  * corresponding LRU lists.
    1815             :  */
    1816             : static inline int memcg_kmem_id(struct mem_cgroup *memcg)
    1817             : {
    1818             :         return memcg ? memcg->kmemcg_id : -1;
    1819             : }
    1820             : 
    1821             : struct mem_cgroup *mem_cgroup_from_obj(void *p);
    1822             : struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
    1823             : 
    1824             : static inline void count_objcg_event(struct obj_cgroup *objcg,
    1825             :                                      enum vm_event_item idx)
    1826             : {
    1827             :         struct mem_cgroup *memcg;
    1828             : 
    1829             :         if (!memcg_kmem_online())
    1830             :                 return;
    1831             : 
    1832             :         rcu_read_lock();
    1833             :         memcg = obj_cgroup_memcg(objcg);
    1834             :         count_memcg_events(memcg, idx, 1);
    1835             :         rcu_read_unlock();
    1836             : }
    1837             : 
    1838             : #else
    1839             : static inline bool mem_cgroup_kmem_disabled(void)
    1840             : {
    1841             :         return true;
    1842             : }
    1843             : 
    1844             : static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
    1845             :                                          int order)
    1846             : {
    1847             :         return 0;
    1848             : }
    1849             : 
    1850             : static inline void memcg_kmem_uncharge_page(struct page *page, int order)
    1851             : {
    1852             : }
    1853             : 
    1854             : static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
    1855             :                                            int order)
    1856             : {
    1857             :         return 0;
    1858             : }
    1859             : 
    1860             : static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
    1861             : {
    1862             : }
    1863             : 
    1864             : static inline struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
    1865             : {
    1866             :         return NULL;
    1867             : }
    1868             : 
    1869             : static inline bool memcg_bpf_enabled(void)
    1870             : {
    1871             :         return false;
    1872             : }
    1873             : 
    1874             : static inline bool memcg_kmem_online(void)
    1875             : {
    1876             :         return false;
    1877             : }
    1878             : 
    1879             : static inline int memcg_kmem_id(struct mem_cgroup *memcg)
    1880             : {
    1881             :         return -1;
    1882             : }
    1883             : 
    1884             : static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
    1885             : {
    1886             :         return NULL;
    1887             : }
    1888             : 
    1889             : static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
    1890             : {
    1891             :         return NULL;
    1892             : }
    1893             : 
    1894             : static inline void count_objcg_event(struct obj_cgroup *objcg,
    1895             :                                      enum vm_event_item idx)
    1896             : {
    1897             : }
    1898             : 
    1899             : #endif /* CONFIG_MEMCG_KMEM */
    1900             : 
    1901             : #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
    1902             : bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
    1903             : void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
    1904             : void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
    1905             : #else
    1906             : static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
    1907             : {
    1908             :         return true;
    1909             : }
    1910             : static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg,
    1911             :                                            size_t size)
    1912             : {
    1913             : }
    1914             : static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg,
    1915             :                                              size_t size)
    1916             : {
    1917             : }
    1918             : #endif
    1919             : 
    1920             : #endif /* _LINUX_MEMCONTROL_H */

Generated by: LCOV version 1.14