LCOV - code coverage report
Current view: top level - mm - slab.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 41 61 67.2 %
Date: 2023-04-06 08:38:28 Functions: 2 3 66.7 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef MM_SLAB_H
       3             : #define MM_SLAB_H
       4             : /*
       5             :  * Internal slab definitions
       6             :  */
       7             : 
       8             : /* Reuses the bits in struct page */
       9             : struct slab {
      10             :         unsigned long __page_flags;
      11             : 
      12             : #if defined(CONFIG_SLAB)
      13             : 
      14             :         struct kmem_cache *slab_cache;
      15             :         union {
      16             :                 struct {
      17             :                         struct list_head slab_list;
      18             :                         void *freelist; /* array of free object indexes */
      19             :                         void *s_mem;    /* first object */
      20             :                 };
      21             :                 struct rcu_head rcu_head;
      22             :         };
      23             :         unsigned int active;
      24             : 
      25             : #elif defined(CONFIG_SLUB)
      26             : 
      27             :         struct kmem_cache *slab_cache;
      28             :         union {
      29             :                 struct {
      30             :                         union {
      31             :                                 struct list_head slab_list;
      32             : #ifdef CONFIG_SLUB_CPU_PARTIAL
      33             :                                 struct {
      34             :                                         struct slab *next;
      35             :                                         int slabs;      /* Nr of slabs left */
      36             :                                 };
      37             : #endif
      38             :                         };
      39             :                         /* Double-word boundary */
      40             :                         void *freelist;         /* first free object */
      41             :                         union {
      42             :                                 unsigned long counters;
      43             :                                 struct {
      44             :                                         unsigned inuse:16;
      45             :                                         unsigned objects:15;
      46             :                                         unsigned frozen:1;
      47             :                                 };
      48             :                         };
      49             :                 };
      50             :                 struct rcu_head rcu_head;
      51             :         };
      52             :         unsigned int __unused;
      53             : 
      54             : #elif defined(CONFIG_SLOB)
      55             : 
      56             :         struct list_head slab_list;
      57             :         void *__unused_1;
      58             :         void *freelist;         /* first free block */
      59             :         long units;
      60             :         unsigned int __unused_2;
      61             : 
      62             : #else
      63             : #error "Unexpected slab allocator configured"
      64             : #endif
      65             : 
      66             :         atomic_t __page_refcount;
      67             : #ifdef CONFIG_MEMCG
      68             :         unsigned long memcg_data;
      69             : #endif
      70             : };
      71             : 
      72             : #define SLAB_MATCH(pg, sl)                                              \
      73             :         static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
      74             : SLAB_MATCH(flags, __page_flags);
      75             : #ifndef CONFIG_SLOB
      76             : SLAB_MATCH(compound_head, slab_cache);  /* Ensure bit 0 is clear */
      77             : #else
      78             : SLAB_MATCH(compound_head, slab_list);   /* Ensure bit 0 is clear */
      79             : #endif
      80             : SLAB_MATCH(_refcount, __page_refcount);
      81             : #ifdef CONFIG_MEMCG
      82             : SLAB_MATCH(memcg_data, memcg_data);
      83             : #endif
      84             : #undef SLAB_MATCH
      85             : static_assert(sizeof(struct slab) <= sizeof(struct page));
      86             : #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && defined(CONFIG_SLUB)
      87             : static_assert(IS_ALIGNED(offsetof(struct slab, freelist), 2*sizeof(void *)));
      88             : #endif
      89             : 
      90             : /**
      91             :  * folio_slab - Converts from folio to slab.
      92             :  * @folio: The folio.
      93             :  *
      94             :  * Currently struct slab is a different representation of a folio where
      95             :  * folio_test_slab() is true.
      96             :  *
      97             :  * Return: The slab which contains this folio.
      98             :  */
      99             : #define folio_slab(folio)       (_Generic((folio),                      \
     100             :         const struct folio *:   (const struct slab *)(folio),           \
     101             :         struct folio *:         (struct slab *)(folio)))
     102             : 
     103             : /**
     104             :  * slab_folio - The folio allocated for a slab
     105             :  * @slab: The slab.
     106             :  *
     107             :  * Slabs are allocated as folios that contain the individual objects and are
     108             :  * using some fields in the first struct page of the folio - those fields are
     109             :  * now accessed by struct slab. It is occasionally necessary to convert back to
     110             :  * a folio in order to communicate with the rest of the mm.  Please use this
     111             :  * helper function instead of casting yourself, as the implementation may change
     112             :  * in the future.
     113             :  */
     114             : #define slab_folio(s)           (_Generic((s),                          \
     115             :         const struct slab *:    (const struct folio *)s,                \
     116             :         struct slab *:          (struct folio *)s))
     117             : 
     118             : /**
     119             :  * page_slab - Converts from first struct page to slab.
     120             :  * @p: The first (either head of compound or single) page of slab.
     121             :  *
     122             :  * A temporary wrapper to convert struct page to struct slab in situations where
     123             :  * we know the page is the compound head, or single order-0 page.
     124             :  *
     125             :  * Long-term ideally everything would work with struct slab directly or go
     126             :  * through folio to struct slab.
     127             :  *
     128             :  * Return: The slab which contains this page
     129             :  */
     130             : #define page_slab(p)            (_Generic((p),                          \
     131             :         const struct page *:    (const struct slab *)(p),               \
     132             :         struct page *:          (struct slab *)(p)))
     133             : 
     134             : /**
     135             :  * slab_page - The first struct page allocated for a slab
     136             :  * @slab: The slab.
     137             :  *
     138             :  * A convenience wrapper for converting slab to the first struct page of the
     139             :  * underlying folio, to communicate with code not yet converted to folio or
     140             :  * struct slab.
     141             :  */
     142             : #define slab_page(s) folio_page(slab_folio(s), 0)
     143             : 
     144             : /*
     145             :  * If network-based swap is enabled, sl*b must keep track of whether pages
     146             :  * were allocated from pfmemalloc reserves.
     147             :  */
     148             : static inline bool slab_test_pfmemalloc(const struct slab *slab)
     149             : {
     150       17501 :         return folio_test_active((struct folio *)slab_folio(slab));
     151             : }
     152             : 
     153             : static inline void slab_set_pfmemalloc(struct slab *slab)
     154             : {
     155           0 :         folio_set_active(slab_folio(slab));
     156             : }
     157             : 
     158             : static inline void slab_clear_pfmemalloc(struct slab *slab)
     159             : {
     160             :         folio_clear_active(slab_folio(slab));
     161             : }
     162             : 
     163             : static inline void __slab_clear_pfmemalloc(struct slab *slab)
     164             : {
     165        8160 :         __folio_clear_active(slab_folio(slab));
     166             : }
     167             : 
     168             : static inline void *slab_address(const struct slab *slab)
     169             : {
     170        8600 :         return folio_address(slab_folio(slab));
     171             : }
     172             : 
     173             : static inline int slab_nid(const struct slab *slab)
     174             : {
     175       33257 :         return folio_nid(slab_folio(slab));
     176             : }
     177             : 
     178             : static inline pg_data_t *slab_pgdat(const struct slab *slab)
     179             : {
     180       16760 :         return folio_pgdat(slab_folio(slab));
     181             : }
     182             : 
     183             : static inline struct slab *virt_to_slab(const void *addr)
     184             : {
     185      408338 :         struct folio *folio = virt_to_folio(addr);
     186             : 
     187      408338 :         if (!folio_test_slab(folio))
     188             :                 return NULL;
     189             : 
     190             :         return folio_slab(folio);
     191             : }
     192             : 
     193             : static inline int slab_order(const struct slab *slab)
     194             : {
     195           0 :         return folio_order((struct folio *)slab_folio(slab));
     196             : }
     197             : 
     198             : static inline size_t slab_size(const struct slab *slab)
     199             : {
     200           0 :         return PAGE_SIZE << slab_order(slab);
     201             : }
     202             : 
     203             : #ifdef CONFIG_SLOB
     204             : /*
     205             :  * Common fields provided in kmem_cache by all slab allocators
     206             :  * This struct is either used directly by the allocator (SLOB)
     207             :  * or the allocator must include definitions for all fields
     208             :  * provided in kmem_cache_common in their definition of kmem_cache.
     209             :  *
     210             :  * Once we can do anonymous structs (C11 standard) we could put a
     211             :  * anonymous struct definition in these allocators so that the
     212             :  * separate allocations in the kmem_cache structure of SLAB and
     213             :  * SLUB is no longer needed.
     214             :  */
     215             : struct kmem_cache {
     216             :         unsigned int object_size;/* The original size of the object */
     217             :         unsigned int size;      /* The aligned/padded/added on size  */
     218             :         unsigned int align;     /* Alignment as calculated */
     219             :         slab_flags_t flags;     /* Active flags on the slab */
     220             :         const char *name;       /* Slab name for sysfs */
     221             :         int refcount;           /* Use counter */
     222             :         void (*ctor)(void *);   /* Called on object slot creation */
     223             :         struct list_head list;  /* List of all slab caches on the system */
     224             : };
     225             : 
     226             : #endif /* CONFIG_SLOB */
     227             : 
     228             : #ifdef CONFIG_SLAB
     229             : #include <linux/slab_def.h>
     230             : #endif
     231             : 
     232             : #ifdef CONFIG_SLUB
     233             : #include <linux/slub_def.h>
     234             : #endif
     235             : 
     236             : #include <linux/memcontrol.h>
     237             : #include <linux/fault-inject.h>
     238             : #include <linux/kasan.h>
     239             : #include <linux/kmemleak.h>
     240             : #include <linux/random.h>
     241             : #include <linux/sched/mm.h>
     242             : #include <linux/list_lru.h>
     243             : 
     244             : /*
     245             :  * State of the slab allocator.
     246             :  *
     247             :  * This is used to describe the states of the allocator during bootup.
     248             :  * Allocators use this to gradually bootstrap themselves. Most allocators
     249             :  * have the problem that the structures used for managing slab caches are
     250             :  * allocated from slab caches themselves.
     251             :  */
     252             : enum slab_state {
     253             :         DOWN,                   /* No slab functionality yet */
     254             :         PARTIAL,                /* SLUB: kmem_cache_node available */
     255             :         PARTIAL_NODE,           /* SLAB: kmalloc size for node struct available */
     256             :         UP,                     /* Slab caches usable but not all extras yet */
     257             :         FULL                    /* Everything is working */
     258             : };
     259             : 
     260             : extern enum slab_state slab_state;
     261             : 
     262             : /* The slab cache mutex protects the management structures during changes */
     263             : extern struct mutex slab_mutex;
     264             : 
     265             : /* The list of all slab caches on the system */
     266             : extern struct list_head slab_caches;
     267             : 
     268             : /* The slab cache that manages slab cache information */
     269             : extern struct kmem_cache *kmem_cache;
     270             : 
     271             : /* A table of kmalloc cache names and sizes */
     272             : extern const struct kmalloc_info_struct {
     273             :         const char *name[NR_KMALLOC_TYPES];
     274             :         unsigned int size;
     275             : } kmalloc_info[];
     276             : 
     277             : #ifndef CONFIG_SLOB
     278             : /* Kmalloc array related functions */
     279             : void setup_kmalloc_cache_index_table(void);
     280             : void create_kmalloc_caches(slab_flags_t);
     281             : 
     282             : /* Find the kmalloc slab corresponding for a certain size */
     283             : struct kmem_cache *kmalloc_slab(size_t, gfp_t);
     284             : 
     285             : void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
     286             :                               int node, size_t orig_size,
     287             :                               unsigned long caller);
     288             : void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller);
     289             : #endif
     290             : 
     291             : gfp_t kmalloc_fix_flags(gfp_t flags);
     292             : 
     293             : /* Functions provided by the slab allocators */
     294             : int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
     295             : 
     296             : struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
     297             :                         slab_flags_t flags, unsigned int useroffset,
     298             :                         unsigned int usersize);
     299             : extern void create_boot_cache(struct kmem_cache *, const char *name,
     300             :                         unsigned int size, slab_flags_t flags,
     301             :                         unsigned int useroffset, unsigned int usersize);
     302             : 
     303             : int slab_unmergeable(struct kmem_cache *s);
     304             : struct kmem_cache *find_mergeable(unsigned size, unsigned align,
     305             :                 slab_flags_t flags, const char *name, void (*ctor)(void *));
     306             : #ifndef CONFIG_SLOB
     307             : struct kmem_cache *
     308             : __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
     309             :                    slab_flags_t flags, void (*ctor)(void *));
     310             : 
     311             : slab_flags_t kmem_cache_flags(unsigned int object_size,
     312             :         slab_flags_t flags, const char *name);
     313             : #else
     314             : static inline struct kmem_cache *
     315             : __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
     316             :                    slab_flags_t flags, void (*ctor)(void *))
     317             : { return NULL; }
     318             : 
     319             : static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
     320             :         slab_flags_t flags, const char *name)
     321             : {
     322             :         return flags;
     323             : }
     324             : #endif
     325             : 
     326             : static inline bool is_kmalloc_cache(struct kmem_cache *s)
     327             : {
     328             : #ifndef CONFIG_SLOB
     329             :         return (s->flags & SLAB_KMALLOC);
     330             : #else
     331             :         return false;
     332             : #endif
     333             : }
     334             : 
     335             : /* Legal flag mask for kmem_cache_create(), for various configurations */
     336             : #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
     337             :                          SLAB_CACHE_DMA32 | SLAB_PANIC | \
     338             :                          SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
     339             : 
     340             : #if defined(CONFIG_DEBUG_SLAB)
     341             : #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
     342             : #elif defined(CONFIG_SLUB_DEBUG)
     343             : #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
     344             :                           SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
     345             : #else
     346             : #define SLAB_DEBUG_FLAGS (0)
     347             : #endif
     348             : 
     349             : #if defined(CONFIG_SLAB)
     350             : #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
     351             :                           SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
     352             :                           SLAB_ACCOUNT)
     353             : #elif defined(CONFIG_SLUB)
     354             : #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
     355             :                           SLAB_TEMPORARY | SLAB_ACCOUNT | \
     356             :                           SLAB_NO_USER_FLAGS | SLAB_KMALLOC)
     357             : #else
     358             : #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
     359             : #endif
     360             : 
     361             : /* Common flags available with current configuration */
     362             : #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
     363             : 
     364             : /* Common flags permitted for kmem_cache_create */
     365             : #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
     366             :                               SLAB_RED_ZONE | \
     367             :                               SLAB_POISON | \
     368             :                               SLAB_STORE_USER | \
     369             :                               SLAB_TRACE | \
     370             :                               SLAB_CONSISTENCY_CHECKS | \
     371             :                               SLAB_MEM_SPREAD | \
     372             :                               SLAB_NOLEAKTRACE | \
     373             :                               SLAB_RECLAIM_ACCOUNT | \
     374             :                               SLAB_TEMPORARY | \
     375             :                               SLAB_ACCOUNT | \
     376             :                               SLAB_KMALLOC | \
     377             :                               SLAB_NO_USER_FLAGS)
     378             : 
     379             : bool __kmem_cache_empty(struct kmem_cache *);
     380             : int __kmem_cache_shutdown(struct kmem_cache *);
     381             : void __kmem_cache_release(struct kmem_cache *);
     382             : int __kmem_cache_shrink(struct kmem_cache *);
     383             : void slab_kmem_cache_release(struct kmem_cache *);
     384             : 
     385             : struct seq_file;
     386             : struct file;
     387             : 
     388             : struct slabinfo {
     389             :         unsigned long active_objs;
     390             :         unsigned long num_objs;
     391             :         unsigned long active_slabs;
     392             :         unsigned long num_slabs;
     393             :         unsigned long shared_avail;
     394             :         unsigned int limit;
     395             :         unsigned int batchcount;
     396             :         unsigned int shared;
     397             :         unsigned int objects_per_slab;
     398             :         unsigned int cache_order;
     399             : };
     400             : 
     401             : void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
     402             : void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
     403             : ssize_t slabinfo_write(struct file *file, const char __user *buffer,
     404             :                        size_t count, loff_t *ppos);
     405             : 
     406             : static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
     407             : {
     408       16760 :         return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
     409       16760 :                 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
     410             : }
     411             : 
     412             : #ifdef CONFIG_SLUB_DEBUG
     413             : #ifdef CONFIG_SLUB_DEBUG_ON
     414             : DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
     415             : #else
     416             : DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
     417             : #endif
     418             : extern void print_tracking(struct kmem_cache *s, void *object);
     419             : long validate_slab_cache(struct kmem_cache *s);
     420             : static inline bool __slub_debug_enabled(void)
     421             : {
     422     1639128 :         return static_branch_unlikely(&slub_debug_enabled);
     423             : }
     424             : #else
     425             : static inline void print_tracking(struct kmem_cache *s, void *object)
     426             : {
     427             : }
     428             : static inline bool __slub_debug_enabled(void)
     429             : {
     430             :         return false;
     431             : }
     432             : #endif
     433             : 
     434             : /*
     435             :  * Returns true if any of the specified slub_debug flags is enabled for the
     436             :  * cache. Use only for flags parsed by setup_slub_debug() as it also enables
     437             :  * the static key.
     438             :  */
     439             : static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
     440             : {
     441             :         if (IS_ENABLED(CONFIG_SLUB_DEBUG))
     442             :                 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
     443     1639127 :         if (__slub_debug_enabled())
     444           0 :                 return s->flags & flags;
     445             :         return false;
     446             : }
     447             : 
     448             : #ifdef CONFIG_MEMCG_KMEM
     449             : /*
     450             :  * slab_objcgs - get the object cgroups vector associated with a slab
     451             :  * @slab: a pointer to the slab struct
     452             :  *
     453             :  * Returns a pointer to the object cgroups vector associated with the slab,
     454             :  * or NULL if no such vector has been associated yet.
     455             :  */
     456             : static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
     457             : {
     458             :         unsigned long memcg_data = READ_ONCE(slab->memcg_data);
     459             : 
     460             :         VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS),
     461             :                                                         slab_page(slab));
     462             :         VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab));
     463             : 
     464             :         return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     465             : }
     466             : 
     467             : int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
     468             :                                  gfp_t gfp, bool new_slab);
     469             : void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
     470             :                      enum node_stat_item idx, int nr);
     471             : 
     472             : static inline void memcg_free_slab_cgroups(struct slab *slab)
     473             : {
     474             :         kfree(slab_objcgs(slab));
     475             :         slab->memcg_data = 0;
     476             : }
     477             : 
     478             : static inline size_t obj_full_size(struct kmem_cache *s)
     479             : {
     480             :         /*
     481             :          * For each accounted object there is an extra space which is used
     482             :          * to store obj_cgroup membership. Charge it too.
     483             :          */
     484             :         return s->size + sizeof(struct obj_cgroup *);
     485             : }
     486             : 
     487             : /*
     488             :  * Returns false if the allocation should fail.
     489             :  */
     490             : static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
     491             :                                              struct list_lru *lru,
     492             :                                              struct obj_cgroup **objcgp,
     493             :                                              size_t objects, gfp_t flags)
     494             : {
     495             :         struct obj_cgroup *objcg;
     496             : 
     497             :         if (!memcg_kmem_online())
     498             :                 return true;
     499             : 
     500             :         if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
     501             :                 return true;
     502             : 
     503             :         objcg = get_obj_cgroup_from_current();
     504             :         if (!objcg)
     505             :                 return true;
     506             : 
     507             :         if (lru) {
     508             :                 int ret;
     509             :                 struct mem_cgroup *memcg;
     510             : 
     511             :                 memcg = get_mem_cgroup_from_objcg(objcg);
     512             :                 ret = memcg_list_lru_alloc(memcg, lru, flags);
     513             :                 css_put(&memcg->css);
     514             : 
     515             :                 if (ret)
     516             :                         goto out;
     517             :         }
     518             : 
     519             :         if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s)))
     520             :                 goto out;
     521             : 
     522             :         *objcgp = objcg;
     523             :         return true;
     524             : out:
     525             :         obj_cgroup_put(objcg);
     526             :         return false;
     527             : }
     528             : 
     529             : static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
     530             :                                               struct obj_cgroup *objcg,
     531             :                                               gfp_t flags, size_t size,
     532             :                                               void **p)
     533             : {
     534             :         struct slab *slab;
     535             :         unsigned long off;
     536             :         size_t i;
     537             : 
     538             :         if (!memcg_kmem_online() || !objcg)
     539             :                 return;
     540             : 
     541             :         for (i = 0; i < size; i++) {
     542             :                 if (likely(p[i])) {
     543             :                         slab = virt_to_slab(p[i]);
     544             : 
     545             :                         if (!slab_objcgs(slab) &&
     546             :                             memcg_alloc_slab_cgroups(slab, s, flags,
     547             :                                                          false)) {
     548             :                                 obj_cgroup_uncharge(objcg, obj_full_size(s));
     549             :                                 continue;
     550             :                         }
     551             : 
     552             :                         off = obj_to_index(s, slab, p[i]);
     553             :                         obj_cgroup_get(objcg);
     554             :                         slab_objcgs(slab)[off] = objcg;
     555             :                         mod_objcg_state(objcg, slab_pgdat(slab),
     556             :                                         cache_vmstat_idx(s), obj_full_size(s));
     557             :                 } else {
     558             :                         obj_cgroup_uncharge(objcg, obj_full_size(s));
     559             :                 }
     560             :         }
     561             :         obj_cgroup_put(objcg);
     562             : }
     563             : 
     564             : static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
     565             :                                         void **p, int objects)
     566             : {
     567             :         struct obj_cgroup **objcgs;
     568             :         int i;
     569             : 
     570             :         if (!memcg_kmem_online())
     571             :                 return;
     572             : 
     573             :         objcgs = slab_objcgs(slab);
     574             :         if (!objcgs)
     575             :                 return;
     576             : 
     577             :         for (i = 0; i < objects; i++) {
     578             :                 struct obj_cgroup *objcg;
     579             :                 unsigned int off;
     580             : 
     581             :                 off = obj_to_index(s, slab, p[i]);
     582             :                 objcg = objcgs[off];
     583             :                 if (!objcg)
     584             :                         continue;
     585             : 
     586             :                 objcgs[off] = NULL;
     587             :                 obj_cgroup_uncharge(objcg, obj_full_size(s));
     588             :                 mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
     589             :                                 -obj_full_size(s));
     590             :                 obj_cgroup_put(objcg);
     591             :         }
     592             : }
     593             : 
     594             : #else /* CONFIG_MEMCG_KMEM */
     595             : static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
     596             : {
     597             :         return NULL;
     598             : }
     599             : 
     600             : static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
     601             : {
     602             :         return NULL;
     603             : }
     604             : 
     605             : static inline int memcg_alloc_slab_cgroups(struct slab *slab,
     606             :                                                struct kmem_cache *s, gfp_t gfp,
     607             :                                                bool new_slab)
     608             : {
     609             :         return 0;
     610             : }
     611             : 
     612             : static inline void memcg_free_slab_cgroups(struct slab *slab)
     613             : {
     614             : }
     615             : 
     616             : static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
     617             :                                              struct list_lru *lru,
     618             :                                              struct obj_cgroup **objcgp,
     619             :                                              size_t objects, gfp_t flags)
     620             : {
     621             :         return true;
     622             : }
     623             : 
     624             : static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
     625             :                                               struct obj_cgroup *objcg,
     626             :                                               gfp_t flags, size_t size,
     627             :                                               void **p)
     628             : {
     629             : }
     630             : 
     631             : static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
     632             :                                         void **p, int objects)
     633             : {
     634             : }
     635             : #endif /* CONFIG_MEMCG_KMEM */
     636             : 
     637             : #ifndef CONFIG_SLOB
     638           0 : static inline struct kmem_cache *virt_to_cache(const void *obj)
     639             : {
     640             :         struct slab *slab;
     641             : 
     642           0 :         slab = virt_to_slab(obj);
     643           0 :         if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n",
     644             :                                         __func__))
     645             :                 return NULL;
     646           0 :         return slab->slab_cache;
     647             : }
     648             : 
     649             : static __always_inline void account_slab(struct slab *slab, int order,
     650             :                                          struct kmem_cache *s, gfp_t gfp)
     651             : {
     652             :         if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
     653             :                 memcg_alloc_slab_cgroups(slab, s, gfp, true);
     654             : 
     655       34400 :         mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
     656        8600 :                             PAGE_SIZE << order);
     657             : }
     658             : 
     659             : static __always_inline void unaccount_slab(struct slab *slab, int order,
     660             :                                            struct kmem_cache *s)
     661             : {
     662             :         if (memcg_kmem_online())
     663             :                 memcg_free_slab_cgroups(slab);
     664             : 
     665       32640 :         mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
     666        8160 :                             -(PAGE_SIZE << order));
     667             : }
     668             : 
     669      365486 : static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
     670             : {
     671             :         struct kmem_cache *cachep;
     672             : 
     673      365486 :         if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
     674      730972 :             !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
     675             :                 return s;
     676             : 
     677           0 :         cachep = virt_to_cache(x);
     678           0 :         if (WARN(cachep && cachep != s,
     679             :                   "%s: Wrong slab cache. %s but object is from %s\n",
     680             :                   __func__, s->name, cachep->name))
     681           0 :                 print_tracking(cachep, x);
     682             :         return cachep;
     683             : }
     684             : 
     685             : void free_large_kmalloc(struct folio *folio, void *object);
     686             : 
     687             : #endif /* CONFIG_SLOB */
     688             : 
     689             : size_t __ksize(const void *objp);
     690             : 
     691             : static inline size_t slab_ksize(const struct kmem_cache *s)
     692             : {
     693             : #ifndef CONFIG_SLUB
     694             :         return s->object_size;
     695             : 
     696             : #else /* CONFIG_SLUB */
     697             : # ifdef CONFIG_SLUB_DEBUG
     698             :         /*
     699             :          * Debugging requires use of the padding between object
     700             :          * and whatever may come after it.
     701             :          */
     702         113 :         if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
     703           0 :                 return s->object_size;
     704             : # endif
     705             :         if (s->flags & SLAB_KASAN)
     706             :                 return s->object_size;
     707             :         /*
     708             :          * If we have the need to store the freelist pointer
     709             :          * back there or track user information then we can
     710             :          * only use the space before that information.
     711             :          */
     712         113 :         if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
     713           0 :                 return s->inuse;
     714             :         /*
     715             :          * Else we can use all the padding etc for the allocation
     716             :          */
     717         113 :         return s->size;
     718             : #endif
     719             : }
     720             : 
     721             : static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
     722             :                                                      struct list_lru *lru,
     723             :                                                      struct obj_cgroup **objcgp,
     724             :                                                      size_t size, gfp_t flags)
     725             : {
     726      420882 :         flags &= gfp_allowed_mask;
     727             : 
     728      420882 :         might_alloc(flags);
     729             : 
     730      420882 :         if (should_failslab(s, flags))
     731             :                 return NULL;
     732             : 
     733      420882 :         if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags))
     734             :                 return NULL;
     735             : 
     736             :         return s;
     737             : }
     738             : 
     739      420882 : static inline void slab_post_alloc_hook(struct kmem_cache *s,
     740             :                                         struct obj_cgroup *objcg, gfp_t flags,
     741             :                                         size_t size, void **p, bool init,
     742             :                                         unsigned int orig_size)
     743             : {
     744      420882 :         unsigned int zero_size = s->object_size;
     745             :         size_t i;
     746             : 
     747      420882 :         flags &= gfp_allowed_mask;
     748             : 
     749             :         /*
     750             :          * For kmalloc object, the allocated memory size(object_size) is likely
     751             :          * larger than the requested size(orig_size). If redzone check is
     752             :          * enabled for the extra space, don't zero it, as it will be redzoned
     753             :          * soon. The redzone operation for this extra space could be seen as a
     754             :          * replacement of current poisoning under certain debug option, and
     755             :          * won't break other sanity checks.
     756             :          */
     757      841764 :         if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
     758           0 :             (s->flags & SLAB_KMALLOC))
     759           0 :                 zero_size = orig_size;
     760             : 
     761             :         /*
     762             :          * As memory initialization might be integrated into KASAN,
     763             :          * kasan_slab_alloc and initialization memset must be
     764             :          * kept together to avoid discrepancies in behavior.
     765             :          *
     766             :          * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
     767             :          */
     768      841764 :         for (i = 0; i < size; i++) {
     769      420882 :                 p[i] = kasan_slab_alloc(s, p[i], flags, init);
     770      420882 :                 if (p[i] && init && !kasan_has_integrated_init())
     771      414777 :                         memset(p[i], 0, zero_size);
     772      420882 :                 kmemleak_alloc_recursive(p[i], s->object_size, 1,
     773             :                                          s->flags, flags);
     774      420882 :                 kmsan_slab_alloc(s, p[i], flags);
     775             :         }
     776             : 
     777      420882 :         memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
     778      420882 : }
     779             : 
     780             : #ifndef CONFIG_SLOB
     781             : /*
     782             :  * The slab lists for all objects.
     783             :  */
     784             : struct kmem_cache_node {
     785             : #ifdef CONFIG_SLAB
     786             :         raw_spinlock_t list_lock;
     787             :         struct list_head slabs_partial; /* partial list first, better asm code */
     788             :         struct list_head slabs_full;
     789             :         struct list_head slabs_free;
     790             :         unsigned long total_slabs;      /* length of all slab lists */
     791             :         unsigned long free_slabs;       /* length of free slab list only */
     792             :         unsigned long free_objects;
     793             :         unsigned int free_limit;
     794             :         unsigned int colour_next;       /* Per-node cache coloring */
     795             :         struct array_cache *shared;     /* shared per node */
     796             :         struct alien_cache **alien;     /* on other nodes */
     797             :         unsigned long next_reap;        /* updated without locking */
     798             :         int free_touched;               /* updated without locking */
     799             : #endif
     800             : 
     801             : #ifdef CONFIG_SLUB
     802             :         spinlock_t list_lock;
     803             :         unsigned long nr_partial;
     804             :         struct list_head partial;
     805             : #ifdef CONFIG_SLUB_DEBUG
     806             :         atomic_long_t nr_slabs;
     807             :         atomic_long_t total_objects;
     808             :         struct list_head full;
     809             : #endif
     810             : #endif
     811             : 
     812             : };
     813             : 
     814             : static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
     815             : {
     816       41969 :         return s->node[node];
     817             : }
     818             : 
     819             : /*
     820             :  * Iterator over all nodes. The body will be executed for each node that has
     821             :  * a kmem_cache_node structure allocated (which is true for all online nodes)
     822             :  */
     823             : #define for_each_kmem_cache_node(__s, __node, __n) \
     824             :         for (__node = 0; __node < nr_node_ids; __node++) \
     825             :                  if ((__n = get_node(__s, __node)))
     826             : 
     827             : #endif
     828             : 
     829             : #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
     830             : void dump_unreclaimable_slab(void);
     831             : #else
     832             : static inline void dump_unreclaimable_slab(void)
     833             : {
     834             : }
     835             : #endif
     836             : 
     837             : void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
     838             : 
     839             : #ifdef CONFIG_SLAB_FREELIST_RANDOM
     840             : int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
     841             :                         gfp_t gfp);
     842             : void cache_random_seq_destroy(struct kmem_cache *cachep);
     843             : #else
     844             : static inline int cache_random_seq_create(struct kmem_cache *cachep,
     845             :                                         unsigned int count, gfp_t gfp)
     846             : {
     847             :         return 0;
     848             : }
     849             : static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
     850             : #endif /* CONFIG_SLAB_FREELIST_RANDOM */
     851             : 
     852             : static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
     853             : {
     854      420882 :         if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
     855             :                                 &init_on_alloc)) {
     856           0 :                 if (c->ctor)
     857             :                         return false;
     858           0 :                 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
     859           0 :                         return flags & __GFP_ZERO;
     860             :                 return true;
     861             :         }
     862      420882 :         return flags & __GFP_ZERO;
     863             : }
     864             : 
     865             : static inline bool slab_want_init_on_free(struct kmem_cache *c)
     866             : {
     867      829220 :         if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
     868             :                                 &init_on_free))
     869           0 :                 return !(c->ctor ||
     870           0 :                          (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
     871             :         return false;
     872             : }
     873             : 
     874             : #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
     875             : void debugfs_slab_release(struct kmem_cache *);
     876             : #else
     877             : static inline void debugfs_slab_release(struct kmem_cache *s) { }
     878             : #endif
     879             : 
     880             : #ifdef CONFIG_PRINTK
     881             : #define KS_ADDRS_COUNT 16
     882             : struct kmem_obj_info {
     883             :         void *kp_ptr;
     884             :         struct slab *kp_slab;
     885             :         void *kp_objp;
     886             :         unsigned long kp_data_offset;
     887             :         struct kmem_cache *kp_slab_cache;
     888             :         void *kp_ret;
     889             :         void *kp_stack[KS_ADDRS_COUNT];
     890             :         void *kp_free_stack[KS_ADDRS_COUNT];
     891             : };
     892             : void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
     893             : #endif
     894             : 
     895             : #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
     896             : void __check_heap_object(const void *ptr, unsigned long n,
     897             :                          const struct slab *slab, bool to_user);
     898             : #else
     899             : static inline
     900             : void __check_heap_object(const void *ptr, unsigned long n,
     901             :                          const struct slab *slab, bool to_user)
     902             : {
     903             : }
     904             : #endif
     905             : 
     906             : #ifdef CONFIG_SLUB_DEBUG
     907             : void skip_orig_size_check(struct kmem_cache *s, const void *object);
     908             : #endif
     909             : 
     910             : #endif /* MM_SLAB_H */

Generated by: LCOV version 1.14