LCOV - code coverage report
Current view: top level - mm - slab.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 41 61 67.2 %
Date: 2023-07-19 18:55:55 Functions: 2 3 66.7 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef MM_SLAB_H
       3             : #define MM_SLAB_H
       4             : /*
       5             :  * Internal slab definitions
       6             :  */
       7             : void __init kmem_cache_init(void);
       8             : 
       9             : /* Reuses the bits in struct page */
      10             : struct slab {
      11             :         unsigned long __page_flags;
      12             : 
      13             : #if defined(CONFIG_SLAB)
      14             : 
      15             :         struct kmem_cache *slab_cache;
      16             :         union {
      17             :                 struct {
      18             :                         struct list_head slab_list;
      19             :                         void *freelist; /* array of free object indexes */
      20             :                         void *s_mem;    /* first object */
      21             :                 };
      22             :                 struct rcu_head rcu_head;
      23             :         };
      24             :         unsigned int active;
      25             : 
      26             : #elif defined(CONFIG_SLUB)
      27             : 
      28             :         struct kmem_cache *slab_cache;
      29             :         union {
      30             :                 struct {
      31             :                         union {
      32             :                                 struct list_head slab_list;
      33             : #ifdef CONFIG_SLUB_CPU_PARTIAL
      34             :                                 struct {
      35             :                                         struct slab *next;
      36             :                                         int slabs;      /* Nr of slabs left */
      37             :                                 };
      38             : #endif
      39             :                         };
      40             :                         /* Double-word boundary */
      41             :                         void *freelist;         /* first free object */
      42             :                         union {
      43             :                                 unsigned long counters;
      44             :                                 struct {
      45             :                                         unsigned inuse:16;
      46             :                                         unsigned objects:15;
      47             :                                         unsigned frozen:1;
      48             :                                 };
      49             :                         };
      50             :                 };
      51             :                 struct rcu_head rcu_head;
      52             :         };
      53             :         unsigned int __unused;
      54             : 
      55             : #else
      56             : #error "Unexpected slab allocator configured"
      57             : #endif
      58             : 
      59             :         atomic_t __page_refcount;
      60             : #ifdef CONFIG_MEMCG
      61             :         unsigned long memcg_data;
      62             : #endif
      63             : };
      64             : 
      65             : #define SLAB_MATCH(pg, sl)                                              \
      66             :         static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
      67             : SLAB_MATCH(flags, __page_flags);
      68             : SLAB_MATCH(compound_head, slab_cache);  /* Ensure bit 0 is clear */
      69             : SLAB_MATCH(_refcount, __page_refcount);
      70             : #ifdef CONFIG_MEMCG
      71             : SLAB_MATCH(memcg_data, memcg_data);
      72             : #endif
      73             : #undef SLAB_MATCH
      74             : static_assert(sizeof(struct slab) <= sizeof(struct page));
      75             : #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && defined(CONFIG_SLUB)
      76             : static_assert(IS_ALIGNED(offsetof(struct slab, freelist), 2*sizeof(void *)));
      77             : #endif
      78             : 
      79             : /**
      80             :  * folio_slab - Converts from folio to slab.
      81             :  * @folio: The folio.
      82             :  *
      83             :  * Currently struct slab is a different representation of a folio where
      84             :  * folio_test_slab() is true.
      85             :  *
      86             :  * Return: The slab which contains this folio.
      87             :  */
      88             : #define folio_slab(folio)       (_Generic((folio),                      \
      89             :         const struct folio *:   (const struct slab *)(folio),           \
      90             :         struct folio *:         (struct slab *)(folio)))
      91             : 
      92             : /**
      93             :  * slab_folio - The folio allocated for a slab
      94             :  * @slab: The slab.
      95             :  *
      96             :  * Slabs are allocated as folios that contain the individual objects and are
      97             :  * using some fields in the first struct page of the folio - those fields are
      98             :  * now accessed by struct slab. It is occasionally necessary to convert back to
      99             :  * a folio in order to communicate with the rest of the mm.  Please use this
     100             :  * helper function instead of casting yourself, as the implementation may change
     101             :  * in the future.
     102             :  */
     103             : #define slab_folio(s)           (_Generic((s),                          \
     104             :         const struct slab *:    (const struct folio *)s,                \
     105             :         struct slab *:          (struct folio *)s))
     106             : 
     107             : /**
     108             :  * page_slab - Converts from first struct page to slab.
     109             :  * @p: The first (either head of compound or single) page of slab.
     110             :  *
     111             :  * A temporary wrapper to convert struct page to struct slab in situations where
     112             :  * we know the page is the compound head, or single order-0 page.
     113             :  *
     114             :  * Long-term ideally everything would work with struct slab directly or go
     115             :  * through folio to struct slab.
     116             :  *
     117             :  * Return: The slab which contains this page
     118             :  */
     119             : #define page_slab(p)            (_Generic((p),                          \
     120             :         const struct page *:    (const struct slab *)(p),               \
     121             :         struct page *:          (struct slab *)(p)))
     122             : 
     123             : /**
     124             :  * slab_page - The first struct page allocated for a slab
     125             :  * @slab: The slab.
     126             :  *
     127             :  * A convenience wrapper for converting slab to the first struct page of the
     128             :  * underlying folio, to communicate with code not yet converted to folio or
     129             :  * struct slab.
     130             :  */
     131             : #define slab_page(s) folio_page(slab_folio(s), 0)
     132             : 
     133             : /*
     134             :  * If network-based swap is enabled, sl*b must keep track of whether pages
     135             :  * were allocated from pfmemalloc reserves.
     136             :  */
     137             : static inline bool slab_test_pfmemalloc(const struct slab *slab)
     138             : {
     139        4602 :         return folio_test_active((struct folio *)slab_folio(slab));
     140             : }
     141             : 
     142             : static inline void slab_set_pfmemalloc(struct slab *slab)
     143             : {
     144           0 :         folio_set_active(slab_folio(slab));
     145             : }
     146             : 
     147             : static inline void slab_clear_pfmemalloc(struct slab *slab)
     148             : {
     149             :         folio_clear_active(slab_folio(slab));
     150             : }
     151             : 
     152             : static inline void __slab_clear_pfmemalloc(struct slab *slab)
     153             : {
     154        1753 :         __folio_clear_active(slab_folio(slab));
     155             : }
     156             : 
     157             : static inline void *slab_address(const struct slab *slab)
     158             : {
     159        2194 :         return folio_address(slab_folio(slab));
     160             : }
     161             : 
     162             : static inline int slab_nid(const struct slab *slab)
     163             : {
     164        7584 :         return folio_nid(slab_folio(slab));
     165             : }
     166             : 
     167             : static inline pg_data_t *slab_pgdat(const struct slab *slab)
     168             : {
     169        3947 :         return folio_pgdat(slab_folio(slab));
     170             : }
     171             : 
     172             : static inline struct slab *virt_to_slab(const void *addr)
     173             : {
     174       49059 :         struct folio *folio = virt_to_folio(addr);
     175             : 
     176       49059 :         if (!folio_test_slab(folio))
     177             :                 return NULL;
     178             : 
     179             :         return folio_slab(folio);
     180             : }
     181             : 
     182             : static inline int slab_order(const struct slab *slab)
     183             : {
     184           0 :         return folio_order((struct folio *)slab_folio(slab));
     185             : }
     186             : 
     187             : static inline size_t slab_size(const struct slab *slab)
     188             : {
     189           0 :         return PAGE_SIZE << slab_order(slab);
     190             : }
     191             : 
     192             : #ifdef CONFIG_SLAB
     193             : #include <linux/slab_def.h>
     194             : #endif
     195             : 
     196             : #ifdef CONFIG_SLUB
     197             : #include <linux/slub_def.h>
     198             : #endif
     199             : 
     200             : #include <linux/memcontrol.h>
     201             : #include <linux/fault-inject.h>
     202             : #include <linux/kasan.h>
     203             : #include <linux/kmemleak.h>
     204             : #include <linux/random.h>
     205             : #include <linux/sched/mm.h>
     206             : #include <linux/list_lru.h>
     207             : 
     208             : /*
     209             :  * State of the slab allocator.
     210             :  *
     211             :  * This is used to describe the states of the allocator during bootup.
     212             :  * Allocators use this to gradually bootstrap themselves. Most allocators
     213             :  * have the problem that the structures used for managing slab caches are
     214             :  * allocated from slab caches themselves.
     215             :  */
     216             : enum slab_state {
     217             :         DOWN,                   /* No slab functionality yet */
     218             :         PARTIAL,                /* SLUB: kmem_cache_node available */
     219             :         PARTIAL_NODE,           /* SLAB: kmalloc size for node struct available */
     220             :         UP,                     /* Slab caches usable but not all extras yet */
     221             :         FULL                    /* Everything is working */
     222             : };
     223             : 
     224             : extern enum slab_state slab_state;
     225             : 
     226             : /* The slab cache mutex protects the management structures during changes */
     227             : extern struct mutex slab_mutex;
     228             : 
     229             : /* The list of all slab caches on the system */
     230             : extern struct list_head slab_caches;
     231             : 
     232             : /* The slab cache that manages slab cache information */
     233             : extern struct kmem_cache *kmem_cache;
     234             : 
     235             : /* A table of kmalloc cache names and sizes */
     236             : extern const struct kmalloc_info_struct {
     237             :         const char *name[NR_KMALLOC_TYPES];
     238             :         unsigned int size;
     239             : } kmalloc_info[];
     240             : 
     241             : /* Kmalloc array related functions */
     242             : void setup_kmalloc_cache_index_table(void);
     243             : void create_kmalloc_caches(slab_flags_t);
     244             : 
     245             : /* Find the kmalloc slab corresponding for a certain size */
     246             : struct kmem_cache *kmalloc_slab(size_t, gfp_t);
     247             : 
     248             : void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
     249             :                               int node, size_t orig_size,
     250             :                               unsigned long caller);
     251             : void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller);
     252             : 
     253             : gfp_t kmalloc_fix_flags(gfp_t flags);
     254             : 
     255             : /* Functions provided by the slab allocators */
     256             : int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
     257             : 
     258             : struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
     259             :                         slab_flags_t flags, unsigned int useroffset,
     260             :                         unsigned int usersize);
     261             : extern void create_boot_cache(struct kmem_cache *, const char *name,
     262             :                         unsigned int size, slab_flags_t flags,
     263             :                         unsigned int useroffset, unsigned int usersize);
     264             : 
     265             : int slab_unmergeable(struct kmem_cache *s);
     266             : struct kmem_cache *find_mergeable(unsigned size, unsigned align,
     267             :                 slab_flags_t flags, const char *name, void (*ctor)(void *));
     268             : struct kmem_cache *
     269             : __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
     270             :                    slab_flags_t flags, void (*ctor)(void *));
     271             : 
     272             : slab_flags_t kmem_cache_flags(unsigned int object_size,
     273             :         slab_flags_t flags, const char *name);
     274             : 
     275             : static inline bool is_kmalloc_cache(struct kmem_cache *s)
     276             : {
     277             :         return (s->flags & SLAB_KMALLOC);
     278             : }
     279             : 
     280             : /* Legal flag mask for kmem_cache_create(), for various configurations */
     281             : #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
     282             :                          SLAB_CACHE_DMA32 | SLAB_PANIC | \
     283             :                          SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
     284             : 
     285             : #if defined(CONFIG_DEBUG_SLAB)
     286             : #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
     287             : #elif defined(CONFIG_SLUB_DEBUG)
     288             : #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
     289             :                           SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
     290             : #else
     291             : #define SLAB_DEBUG_FLAGS (0)
     292             : #endif
     293             : 
     294             : #if defined(CONFIG_SLAB)
     295             : #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
     296             :                           SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
     297             :                           SLAB_ACCOUNT)
     298             : #elif defined(CONFIG_SLUB)
     299             : #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
     300             :                           SLAB_TEMPORARY | SLAB_ACCOUNT | \
     301             :                           SLAB_NO_USER_FLAGS | SLAB_KMALLOC)
     302             : #else
     303             : #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
     304             : #endif
     305             : 
     306             : /* Common flags available with current configuration */
     307             : #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
     308             : 
     309             : /* Common flags permitted for kmem_cache_create */
     310             : #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
     311             :                               SLAB_RED_ZONE | \
     312             :                               SLAB_POISON | \
     313             :                               SLAB_STORE_USER | \
     314             :                               SLAB_TRACE | \
     315             :                               SLAB_CONSISTENCY_CHECKS | \
     316             :                               SLAB_MEM_SPREAD | \
     317             :                               SLAB_NOLEAKTRACE | \
     318             :                               SLAB_RECLAIM_ACCOUNT | \
     319             :                               SLAB_TEMPORARY | \
     320             :                               SLAB_ACCOUNT | \
     321             :                               SLAB_KMALLOC | \
     322             :                               SLAB_NO_USER_FLAGS)
     323             : 
     324             : bool __kmem_cache_empty(struct kmem_cache *);
     325             : int __kmem_cache_shutdown(struct kmem_cache *);
     326             : void __kmem_cache_release(struct kmem_cache *);
     327             : int __kmem_cache_shrink(struct kmem_cache *);
     328             : void slab_kmem_cache_release(struct kmem_cache *);
     329             : 
     330             : struct seq_file;
     331             : struct file;
     332             : 
     333             : struct slabinfo {
     334             :         unsigned long active_objs;
     335             :         unsigned long num_objs;
     336             :         unsigned long active_slabs;
     337             :         unsigned long num_slabs;
     338             :         unsigned long shared_avail;
     339             :         unsigned int limit;
     340             :         unsigned int batchcount;
     341             :         unsigned int shared;
     342             :         unsigned int objects_per_slab;
     343             :         unsigned int cache_order;
     344             : };
     345             : 
     346             : void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
     347             : void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
     348             : ssize_t slabinfo_write(struct file *file, const char __user *buffer,
     349             :                        size_t count, loff_t *ppos);
     350             : 
     351             : static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
     352             : {
     353        3947 :         return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
     354        3947 :                 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
     355             : }
     356             : 
     357             : #ifdef CONFIG_SLUB_DEBUG
     358             : #ifdef CONFIG_SLUB_DEBUG_ON
     359             : DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
     360             : #else
     361             : DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
     362             : #endif
     363             : extern void print_tracking(struct kmem_cache *s, void *object);
     364             : long validate_slab_cache(struct kmem_cache *s);
     365             : static inline bool __slub_debug_enabled(void)
     366             : {
     367      169265 :         return static_branch_unlikely(&slub_debug_enabled);
     368             : }
     369             : #else
     370             : static inline void print_tracking(struct kmem_cache *s, void *object)
     371             : {
     372             : }
     373             : static inline bool __slub_debug_enabled(void)
     374             : {
     375             :         return false;
     376             : }
     377             : #endif
     378             : 
     379             : /*
     380             :  * Returns true if any of the specified slub_debug flags is enabled for the
     381             :  * cache. Use only for flags parsed by setup_slub_debug() as it also enables
     382             :  * the static key.
     383             :  */
     384             : static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
     385             : {
     386             :         if (IS_ENABLED(CONFIG_SLUB_DEBUG))
     387             :                 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
     388      169264 :         if (__slub_debug_enabled())
     389           0 :                 return s->flags & flags;
     390             :         return false;
     391             : }
     392             : 
     393             : #ifdef CONFIG_MEMCG_KMEM
     394             : /*
     395             :  * slab_objcgs - get the object cgroups vector associated with a slab
     396             :  * @slab: a pointer to the slab struct
     397             :  *
     398             :  * Returns a pointer to the object cgroups vector associated with the slab,
     399             :  * or NULL if no such vector has been associated yet.
     400             :  */
     401             : static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
     402             : {
     403             :         unsigned long memcg_data = READ_ONCE(slab->memcg_data);
     404             : 
     405             :         VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS),
     406             :                                                         slab_page(slab));
     407             :         VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab));
     408             : 
     409             :         return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     410             : }
     411             : 
     412             : int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
     413             :                                  gfp_t gfp, bool new_slab);
     414             : void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
     415             :                      enum node_stat_item idx, int nr);
     416             : 
     417             : static inline void memcg_free_slab_cgroups(struct slab *slab)
     418             : {
     419             :         kfree(slab_objcgs(slab));
     420             :         slab->memcg_data = 0;
     421             : }
     422             : 
     423             : static inline size_t obj_full_size(struct kmem_cache *s)
     424             : {
     425             :         /*
     426             :          * For each accounted object there is an extra space which is used
     427             :          * to store obj_cgroup membership. Charge it too.
     428             :          */
     429             :         return s->size + sizeof(struct obj_cgroup *);
     430             : }
     431             : 
     432             : /*
     433             :  * Returns false if the allocation should fail.
     434             :  */
     435             : static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
     436             :                                              struct list_lru *lru,
     437             :                                              struct obj_cgroup **objcgp,
     438             :                                              size_t objects, gfp_t flags)
     439             : {
     440             :         struct obj_cgroup *objcg;
     441             : 
     442             :         if (!memcg_kmem_online())
     443             :                 return true;
     444             : 
     445             :         if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
     446             :                 return true;
     447             : 
     448             :         objcg = get_obj_cgroup_from_current();
     449             :         if (!objcg)
     450             :                 return true;
     451             : 
     452             :         if (lru) {
     453             :                 int ret;
     454             :                 struct mem_cgroup *memcg;
     455             : 
     456             :                 memcg = get_mem_cgroup_from_objcg(objcg);
     457             :                 ret = memcg_list_lru_alloc(memcg, lru, flags);
     458             :                 css_put(&memcg->css);
     459             : 
     460             :                 if (ret)
     461             :                         goto out;
     462             :         }
     463             : 
     464             :         if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s)))
     465             :                 goto out;
     466             : 
     467             :         *objcgp = objcg;
     468             :         return true;
     469             : out:
     470             :         obj_cgroup_put(objcg);
     471             :         return false;
     472             : }
     473             : 
     474             : static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
     475             :                                               struct obj_cgroup *objcg,
     476             :                                               gfp_t flags, size_t size,
     477             :                                               void **p)
     478             : {
     479             :         struct slab *slab;
     480             :         unsigned long off;
     481             :         size_t i;
     482             : 
     483             :         if (!memcg_kmem_online() || !objcg)
     484             :                 return;
     485             : 
     486             :         for (i = 0; i < size; i++) {
     487             :                 if (likely(p[i])) {
     488             :                         slab = virt_to_slab(p[i]);
     489             : 
     490             :                         if (!slab_objcgs(slab) &&
     491             :                             memcg_alloc_slab_cgroups(slab, s, flags,
     492             :                                                          false)) {
     493             :                                 obj_cgroup_uncharge(objcg, obj_full_size(s));
     494             :                                 continue;
     495             :                         }
     496             : 
     497             :                         off = obj_to_index(s, slab, p[i]);
     498             :                         obj_cgroup_get(objcg);
     499             :                         slab_objcgs(slab)[off] = objcg;
     500             :                         mod_objcg_state(objcg, slab_pgdat(slab),
     501             :                                         cache_vmstat_idx(s), obj_full_size(s));
     502             :                 } else {
     503             :                         obj_cgroup_uncharge(objcg, obj_full_size(s));
     504             :                 }
     505             :         }
     506             :         obj_cgroup_put(objcg);
     507             : }
     508             : 
     509             : static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
     510             :                                         void **p, int objects)
     511             : {
     512             :         struct obj_cgroup **objcgs;
     513             :         int i;
     514             : 
     515             :         if (!memcg_kmem_online())
     516             :                 return;
     517             : 
     518             :         objcgs = slab_objcgs(slab);
     519             :         if (!objcgs)
     520             :                 return;
     521             : 
     522             :         for (i = 0; i < objects; i++) {
     523             :                 struct obj_cgroup *objcg;
     524             :                 unsigned int off;
     525             : 
     526             :                 off = obj_to_index(s, slab, p[i]);
     527             :                 objcg = objcgs[off];
     528             :                 if (!objcg)
     529             :                         continue;
     530             : 
     531             :                 objcgs[off] = NULL;
     532             :                 obj_cgroup_uncharge(objcg, obj_full_size(s));
     533             :                 mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
     534             :                                 -obj_full_size(s));
     535             :                 obj_cgroup_put(objcg);
     536             :         }
     537             : }
     538             : 
     539             : #else /* CONFIG_MEMCG_KMEM */
     540             : static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
     541             : {
     542             :         return NULL;
     543             : }
     544             : 
     545             : static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
     546             : {
     547             :         return NULL;
     548             : }
     549             : 
     550             : static inline int memcg_alloc_slab_cgroups(struct slab *slab,
     551             :                                                struct kmem_cache *s, gfp_t gfp,
     552             :                                                bool new_slab)
     553             : {
     554             :         return 0;
     555             : }
     556             : 
     557             : static inline void memcg_free_slab_cgroups(struct slab *slab)
     558             : {
     559             : }
     560             : 
     561             : static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
     562             :                                              struct list_lru *lru,
     563             :                                              struct obj_cgroup **objcgp,
     564             :                                              size_t objects, gfp_t flags)
     565             : {
     566             :         return true;
     567             : }
     568             : 
     569             : static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
     570             :                                               struct obj_cgroup *objcg,
     571             :                                               gfp_t flags, size_t size,
     572             :                                               void **p)
     573             : {
     574             : }
     575             : 
     576             : static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
     577             :                                         void **p, int objects)
     578             : {
     579             : }
     580             : #endif /* CONFIG_MEMCG_KMEM */
     581             : 
     582           0 : static inline struct kmem_cache *virt_to_cache(const void *obj)
     583             : {
     584             :         struct slab *slab;
     585             : 
     586           0 :         slab = virt_to_slab(obj);
     587           0 :         if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n",
     588             :                                         __func__))
     589             :                 return NULL;
     590           0 :         return slab->slab_cache;
     591             : }
     592             : 
     593             : static __always_inline void account_slab(struct slab *slab, int order,
     594             :                                          struct kmem_cache *s, gfp_t gfp)
     595             : {
     596             :         if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
     597             :                 memcg_alloc_slab_cgroups(slab, s, gfp, true);
     598             : 
     599        8776 :         mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
     600        2194 :                             PAGE_SIZE << order);
     601             : }
     602             : 
     603             : static __always_inline void unaccount_slab(struct slab *slab, int order,
     604             :                                            struct kmem_cache *s)
     605             : {
     606             :         if (memcg_kmem_online())
     607             :                 memcg_free_slab_cgroups(slab);
     608             : 
     609        7012 :         mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
     610        1753 :                             -(PAGE_SIZE << order));
     611             : }
     612             : 
     613        5553 : static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
     614             : {
     615             :         struct kmem_cache *cachep;
     616             : 
     617        5553 :         if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
     618       11106 :             !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
     619             :                 return s;
     620             : 
     621           0 :         cachep = virt_to_cache(x);
     622           0 :         if (WARN(cachep && cachep != s,
     623             :                   "%s: Wrong slab cache. %s but object is from %s\n",
     624             :                   __func__, s->name, cachep->name))
     625           0 :                 print_tracking(cachep, x);
     626             :         return cachep;
     627             : }
     628             : 
     629             : void free_large_kmalloc(struct folio *folio, void *object);
     630             : 
     631             : size_t __ksize(const void *objp);
     632             : 
     633             : static inline size_t slab_ksize(const struct kmem_cache *s)
     634             : {
     635             : #ifndef CONFIG_SLUB
     636             :         return s->object_size;
     637             : 
     638             : #else /* CONFIG_SLUB */
     639             : # ifdef CONFIG_SLUB_DEBUG
     640             :         /*
     641             :          * Debugging requires use of the padding between object
     642             :          * and whatever may come after it.
     643             :          */
     644         118 :         if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
     645           0 :                 return s->object_size;
     646             : # endif
     647             :         if (s->flags & SLAB_KASAN)
     648             :                 return s->object_size;
     649             :         /*
     650             :          * If we have the need to store the freelist pointer
     651             :          * back there or track user information then we can
     652             :          * only use the space before that information.
     653             :          */
     654         118 :         if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
     655           0 :                 return s->inuse;
     656             :         /*
     657             :          * Else we can use all the padding etc for the allocation
     658             :          */
     659         118 :         return s->size;
     660             : #endif
     661             : }
     662             : 
     663             : static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
     664             :                                                      struct list_lru *lru,
     665             :                                                      struct obj_cgroup **objcgp,
     666             :                                                      size_t size, gfp_t flags)
     667             : {
     668       61648 :         flags &= gfp_allowed_mask;
     669             : 
     670       61648 :         might_alloc(flags);
     671             : 
     672       61648 :         if (should_failslab(s, flags))
     673             :                 return NULL;
     674             : 
     675       61648 :         if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags))
     676             :                 return NULL;
     677             : 
     678             :         return s;
     679             : }
     680             : 
     681       61648 : static inline void slab_post_alloc_hook(struct kmem_cache *s,
     682             :                                         struct obj_cgroup *objcg, gfp_t flags,
     683             :                                         size_t size, void **p, bool init,
     684             :                                         unsigned int orig_size)
     685             : {
     686       61648 :         unsigned int zero_size = s->object_size;
     687             :         size_t i;
     688             : 
     689       61648 :         flags &= gfp_allowed_mask;
     690             : 
     691             :         /*
     692             :          * For kmalloc object, the allocated memory size(object_size) is likely
     693             :          * larger than the requested size(orig_size). If redzone check is
     694             :          * enabled for the extra space, don't zero it, as it will be redzoned
     695             :          * soon. The redzone operation for this extra space could be seen as a
     696             :          * replacement of current poisoning under certain debug option, and
     697             :          * won't break other sanity checks.
     698             :          */
     699      123296 :         if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
     700           0 :             (s->flags & SLAB_KMALLOC))
     701           0 :                 zero_size = orig_size;
     702             : 
     703             :         /*
     704             :          * As memory initialization might be integrated into KASAN,
     705             :          * kasan_slab_alloc and initialization memset must be
     706             :          * kept together to avoid discrepancies in behavior.
     707             :          *
     708             :          * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
     709             :          */
     710      123296 :         for (i = 0; i < size; i++) {
     711       61648 :                 p[i] = kasan_slab_alloc(s, p[i], flags, init);
     712       61648 :                 if (p[i] && init && !kasan_has_integrated_init())
     713       55179 :                         memset(p[i], 0, zero_size);
     714       61648 :                 kmemleak_alloc_recursive(p[i], s->object_size, 1,
     715             :                                          s->flags, flags);
     716       61648 :                 kmsan_slab_alloc(s, p[i], flags);
     717             :         }
     718             : 
     719       61648 :         memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
     720       61648 : }
     721             : 
     722             : /*
     723             :  * The slab lists for all objects.
     724             :  */
     725             : struct kmem_cache_node {
     726             : #ifdef CONFIG_SLAB
     727             :         raw_spinlock_t list_lock;
     728             :         struct list_head slabs_partial; /* partial list first, better asm code */
     729             :         struct list_head slabs_full;
     730             :         struct list_head slabs_free;
     731             :         unsigned long total_slabs;      /* length of all slab lists */
     732             :         unsigned long free_slabs;       /* length of free slab list only */
     733             :         unsigned long free_objects;
     734             :         unsigned int free_limit;
     735             :         unsigned int colour_next;       /* Per-node cache coloring */
     736             :         struct array_cache *shared;     /* shared per node */
     737             :         struct alien_cache **alien;     /* on other nodes */
     738             :         unsigned long next_reap;        /* updated without locking */
     739             :         int free_touched;               /* updated without locking */
     740             : #endif
     741             : 
     742             : #ifdef CONFIG_SLUB
     743             :         spinlock_t list_lock;
     744             :         unsigned long nr_partial;
     745             :         struct list_head partial;
     746             : #ifdef CONFIG_SLUB_DEBUG
     747             :         atomic_long_t nr_slabs;
     748             :         atomic_long_t total_objects;
     749             :         struct list_head full;
     750             : #endif
     751             : #endif
     752             : 
     753             : };
     754             : 
     755             : static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
     756             : {
     757        9861 :         return s->node[node];
     758             : }
     759             : 
     760             : /*
     761             :  * Iterator over all nodes. The body will be executed for each node that has
     762             :  * a kmem_cache_node structure allocated (which is true for all online nodes)
     763             :  */
     764             : #define for_each_kmem_cache_node(__s, __node, __n) \
     765             :         for (__node = 0; __node < nr_node_ids; __node++) \
     766             :                  if ((__n = get_node(__s, __node)))
     767             : 
     768             : 
     769             : #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
     770             : void dump_unreclaimable_slab(void);
     771             : #else
     772             : static inline void dump_unreclaimable_slab(void)
     773             : {
     774             : }
     775             : #endif
     776             : 
     777             : void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
     778             : 
     779             : #ifdef CONFIG_SLAB_FREELIST_RANDOM
     780             : int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
     781             :                         gfp_t gfp);
     782             : void cache_random_seq_destroy(struct kmem_cache *cachep);
     783             : #else
     784             : static inline int cache_random_seq_create(struct kmem_cache *cachep,
     785             :                                         unsigned int count, gfp_t gfp)
     786             : {
     787             :         return 0;
     788             : }
     789             : static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
     790             : #endif /* CONFIG_SLAB_FREELIST_RANDOM */
     791             : 
     792             : static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
     793             : {
     794       61648 :         if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
     795             :                                 &init_on_alloc)) {
     796           0 :                 if (c->ctor)
     797             :                         return false;
     798           0 :                 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
     799           0 :                         return flags & __GFP_ZERO;
     800             :                 return true;
     801             :         }
     802       61648 :         return flags & __GFP_ZERO;
     803             : }
     804             : 
     805             : static inline bool slab_want_init_on_free(struct kmem_cache *c)
     806             : {
     807      110707 :         if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
     808             :                                 &init_on_free))
     809           0 :                 return !(c->ctor ||
     810           0 :                          (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
     811             :         return false;
     812             : }
     813             : 
     814             : #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
     815             : void debugfs_slab_release(struct kmem_cache *);
     816             : #else
     817             : static inline void debugfs_slab_release(struct kmem_cache *s) { }
     818             : #endif
     819             : 
     820             : #ifdef CONFIG_PRINTK
     821             : #define KS_ADDRS_COUNT 16
     822             : struct kmem_obj_info {
     823             :         void *kp_ptr;
     824             :         struct slab *kp_slab;
     825             :         void *kp_objp;
     826             :         unsigned long kp_data_offset;
     827             :         struct kmem_cache *kp_slab_cache;
     828             :         void *kp_ret;
     829             :         void *kp_stack[KS_ADDRS_COUNT];
     830             :         void *kp_free_stack[KS_ADDRS_COUNT];
     831             : };
     832             : void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
     833             : #endif
     834             : 
     835             : #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
     836             : void __check_heap_object(const void *ptr, unsigned long n,
     837             :                          const struct slab *slab, bool to_user);
     838             : #else
     839             : static inline
     840             : void __check_heap_object(const void *ptr, unsigned long n,
     841             :                          const struct slab *slab, bool to_user)
     842             : {
     843             : }
     844             : #endif
     845             : 
     846             : #ifdef CONFIG_SLUB_DEBUG
     847             : void skip_orig_size_check(struct kmem_cache *s, const void *object);
     848             : #endif
     849             : 
     850             : #endif /* MM_SLAB_H */

Generated by: LCOV version 1.14