LCOV - code coverage report
Current view: top level - mm - slub.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 604 1620 37.3 %
Date: 2023-08-24 13:40:31 Functions: 46 144 31.9 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * SLUB: A slab allocator that limits cache line use instead of queuing
       4             :  * objects in per cpu and per node lists.
       5             :  *
       6             :  * The allocator synchronizes using per slab locks or atomic operations
       7             :  * and only uses a centralized lock to manage a pool of partial slabs.
       8             :  *
       9             :  * (C) 2007 SGI, Christoph Lameter
      10             :  * (C) 2011 Linux Foundation, Christoph Lameter
      11             :  */
      12             : 
      13             : #include <linux/mm.h>
      14             : #include <linux/swap.h> /* mm_account_reclaimed_pages() */
      15             : #include <linux/module.h>
      16             : #include <linux/bit_spinlock.h>
      17             : #include <linux/interrupt.h>
      18             : #include <linux/swab.h>
      19             : #include <linux/bitops.h>
      20             : #include <linux/slab.h>
      21             : #include "slab.h"
      22             : #include <linux/proc_fs.h>
      23             : #include <linux/seq_file.h>
      24             : #include <linux/kasan.h>
      25             : #include <linux/kmsan.h>
      26             : #include <linux/cpu.h>
      27             : #include <linux/cpuset.h>
      28             : #include <linux/mempolicy.h>
      29             : #include <linux/ctype.h>
      30             : #include <linux/stackdepot.h>
      31             : #include <linux/debugobjects.h>
      32             : #include <linux/kallsyms.h>
      33             : #include <linux/kfence.h>
      34             : #include <linux/memory.h>
      35             : #include <linux/math64.h>
      36             : #include <linux/fault-inject.h>
      37             : #include <linux/stacktrace.h>
      38             : #include <linux/prefetch.h>
      39             : #include <linux/memcontrol.h>
      40             : #include <linux/random.h>
      41             : #include <kunit/test.h>
      42             : #include <kunit/test-bug.h>
      43             : #include <linux/sort.h>
      44             : 
      45             : #include <linux/debugfs.h>
      46             : #include <trace/events/kmem.h>
      47             : 
      48             : #include "internal.h"
      49             : 
      50             : /*
      51             :  * Lock order:
      52             :  *   1. slab_mutex (Global Mutex)
      53             :  *   2. node->list_lock (Spinlock)
      54             :  *   3. kmem_cache->cpu_slab->lock (Local lock)
      55             :  *   4. slab_lock(slab) (Only on some arches)
      56             :  *   5. object_map_lock (Only for debugging)
      57             :  *
      58             :  *   slab_mutex
      59             :  *
      60             :  *   The role of the slab_mutex is to protect the list of all the slabs
      61             :  *   and to synchronize major metadata changes to slab cache structures.
      62             :  *   Also synchronizes memory hotplug callbacks.
      63             :  *
      64             :  *   slab_lock
      65             :  *
      66             :  *   The slab_lock is a wrapper around the page lock, thus it is a bit
      67             :  *   spinlock.
      68             :  *
      69             :  *   The slab_lock is only used on arches that do not have the ability
      70             :  *   to do a cmpxchg_double. It only protects:
      71             :  *
      72             :  *      A. slab->freelist    -> List of free objects in a slab
      73             :  *      B. slab->inuse               -> Number of objects in use
      74             :  *      C. slab->objects     -> Number of objects in slab
      75             :  *      D. slab->frozen              -> frozen state
      76             :  *
      77             :  *   Frozen slabs
      78             :  *
      79             :  *   If a slab is frozen then it is exempt from list management. It is not
      80             :  *   on any list except per cpu partial list. The processor that froze the
      81             :  *   slab is the one who can perform list operations on the slab. Other
      82             :  *   processors may put objects onto the freelist but the processor that
      83             :  *   froze the slab is the only one that can retrieve the objects from the
      84             :  *   slab's freelist.
      85             :  *
      86             :  *   list_lock
      87             :  *
      88             :  *   The list_lock protects the partial and full list on each node and
      89             :  *   the partial slab counter. If taken then no new slabs may be added or
      90             :  *   removed from the lists nor make the number of partial slabs be modified.
      91             :  *   (Note that the total number of slabs is an atomic value that may be
      92             :  *   modified without taking the list lock).
      93             :  *
      94             :  *   The list_lock is a centralized lock and thus we avoid taking it as
      95             :  *   much as possible. As long as SLUB does not have to handle partial
      96             :  *   slabs, operations can continue without any centralized lock. F.e.
      97             :  *   allocating a long series of objects that fill up slabs does not require
      98             :  *   the list lock.
      99             :  *
     100             :  *   For debug caches, all allocations are forced to go through a list_lock
     101             :  *   protected region to serialize against concurrent validation.
     102             :  *
     103             :  *   cpu_slab->lock local lock
     104             :  *
     105             :  *   This locks protect slowpath manipulation of all kmem_cache_cpu fields
     106             :  *   except the stat counters. This is a percpu structure manipulated only by
     107             :  *   the local cpu, so the lock protects against being preempted or interrupted
     108             :  *   by an irq. Fast path operations rely on lockless operations instead.
     109             :  *
     110             :  *   On PREEMPT_RT, the local lock neither disables interrupts nor preemption
     111             :  *   which means the lockless fastpath cannot be used as it might interfere with
     112             :  *   an in-progress slow path operations. In this case the local lock is always
     113             :  *   taken but it still utilizes the freelist for the common operations.
     114             :  *
     115             :  *   lockless fastpaths
     116             :  *
     117             :  *   The fast path allocation (slab_alloc_node()) and freeing (do_slab_free())
     118             :  *   are fully lockless when satisfied from the percpu slab (and when
     119             :  *   cmpxchg_double is possible to use, otherwise slab_lock is taken).
     120             :  *   They also don't disable preemption or migration or irqs. They rely on
     121             :  *   the transaction id (tid) field to detect being preempted or moved to
     122             :  *   another cpu.
     123             :  *
     124             :  *   irq, preemption, migration considerations
     125             :  *
     126             :  *   Interrupts are disabled as part of list_lock or local_lock operations, or
     127             :  *   around the slab_lock operation, in order to make the slab allocator safe
     128             :  *   to use in the context of an irq.
     129             :  *
     130             :  *   In addition, preemption (or migration on PREEMPT_RT) is disabled in the
     131             :  *   allocation slowpath, bulk allocation, and put_cpu_partial(), so that the
     132             :  *   local cpu doesn't change in the process and e.g. the kmem_cache_cpu pointer
     133             :  *   doesn't have to be revalidated in each section protected by the local lock.
     134             :  *
     135             :  * SLUB assigns one slab for allocation to each processor.
     136             :  * Allocations only occur from these slabs called cpu slabs.
     137             :  *
     138             :  * Slabs with free elements are kept on a partial list and during regular
     139             :  * operations no list for full slabs is used. If an object in a full slab is
     140             :  * freed then the slab will show up again on the partial lists.
     141             :  * We track full slabs for debugging purposes though because otherwise we
     142             :  * cannot scan all objects.
     143             :  *
     144             :  * Slabs are freed when they become empty. Teardown and setup is
     145             :  * minimal so we rely on the page allocators per cpu caches for
     146             :  * fast frees and allocs.
     147             :  *
     148             :  * slab->frozen              The slab is frozen and exempt from list processing.
     149             :  *                      This means that the slab is dedicated to a purpose
     150             :  *                      such as satisfying allocations for a specific
     151             :  *                      processor. Objects may be freed in the slab while
     152             :  *                      it is frozen but slab_free will then skip the usual
     153             :  *                      list operations. It is up to the processor holding
     154             :  *                      the slab to integrate the slab into the slab lists
     155             :  *                      when the slab is no longer needed.
     156             :  *
     157             :  *                      One use of this flag is to mark slabs that are
     158             :  *                      used for allocations. Then such a slab becomes a cpu
     159             :  *                      slab. The cpu slab may be equipped with an additional
     160             :  *                      freelist that allows lockless access to
     161             :  *                      free objects in addition to the regular freelist
     162             :  *                      that requires the slab lock.
     163             :  *
     164             :  * SLAB_DEBUG_FLAGS     Slab requires special handling due to debug
     165             :  *                      options set. This moves slab handling out of
     166             :  *                      the fast path and disables lockless freelists.
     167             :  */
     168             : 
     169             : /*
     170             :  * We could simply use migrate_disable()/enable() but as long as it's a
     171             :  * function call even on !PREEMPT_RT, use inline preempt_disable() there.
     172             :  */
     173             : #ifndef CONFIG_PREEMPT_RT
     174             : #define slub_get_cpu_ptr(var)           get_cpu_ptr(var)
     175             : #define slub_put_cpu_ptr(var)           put_cpu_ptr(var)
     176             : #define USE_LOCKLESS_FAST_PATH()        (true)
     177             : #else
     178             : #define slub_get_cpu_ptr(var)           \
     179             : ({                                      \
     180             :         migrate_disable();              \
     181             :         this_cpu_ptr(var);              \
     182             : })
     183             : #define slub_put_cpu_ptr(var)           \
     184             : do {                                    \
     185             :         (void)(var);                    \
     186             :         migrate_enable();               \
     187             : } while (0)
     188             : #define USE_LOCKLESS_FAST_PATH()        (false)
     189             : #endif
     190             : 
     191             : #ifndef CONFIG_SLUB_TINY
     192             : #define __fastpath_inline __always_inline
     193             : #else
     194             : #define __fastpath_inline
     195             : #endif
     196             : 
     197             : #ifdef CONFIG_SLUB_DEBUG
     198             : #ifdef CONFIG_SLUB_DEBUG_ON
     199             : DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
     200             : #else
     201             : DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
     202             : #endif
     203             : #endif          /* CONFIG_SLUB_DEBUG */
     204             : 
     205             : /* Structure holding parameters for get_partial() call chain */
     206             : struct partial_context {
     207             :         struct slab **slab;
     208             :         gfp_t flags;
     209             :         unsigned int orig_size;
     210             : };
     211             : 
     212             : static inline bool kmem_cache_debug(struct kmem_cache *s)
     213             : {
     214        2304 :         return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS);
     215             : }
     216             : 
     217             : static inline bool slub_debug_orig_size(struct kmem_cache *s)
     218             : {
     219         320 :         return (kmem_cache_debug_flags(s, SLAB_STORE_USER) &&
     220           0 :                         (s->flags & SLAB_KMALLOC));
     221             : }
     222             : 
     223           0 : void *fixup_red_left(struct kmem_cache *s, void *p)
     224             : {
     225         870 :         if (kmem_cache_debug_flags(s, SLAB_RED_ZONE))
     226           0 :                 p += s->red_left_pad;
     227             : 
     228           0 :         return p;
     229             : }
     230             : 
     231             : static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
     232             : {
     233             : #ifdef CONFIG_SLUB_CPU_PARTIAL
     234             :         return !kmem_cache_debug(s);
     235             : #else
     236             :         return false;
     237             : #endif
     238             : }
     239             : 
     240             : /*
     241             :  * Issues still to be resolved:
     242             :  *
     243             :  * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
     244             :  *
     245             :  * - Variable sizing of the per node arrays
     246             :  */
     247             : 
     248             : /* Enable to log cmpxchg failures */
     249             : #undef SLUB_DEBUG_CMPXCHG
     250             : 
     251             : #ifndef CONFIG_SLUB_TINY
     252             : /*
     253             :  * Minimum number of partial slabs. These will be left on the partial
     254             :  * lists even if they are empty. kmem_cache_shrink may reclaim them.
     255             :  */
     256             : #define MIN_PARTIAL 5
     257             : 
     258             : /*
     259             :  * Maximum number of desirable partial slabs.
     260             :  * The existence of more partial slabs makes kmem_cache_shrink
     261             :  * sort the partial list by the number of objects in use.
     262             :  */
     263             : #define MAX_PARTIAL 10
     264             : #else
     265             : #define MIN_PARTIAL 0
     266             : #define MAX_PARTIAL 0
     267             : #endif
     268             : 
     269             : #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
     270             :                                 SLAB_POISON | SLAB_STORE_USER)
     271             : 
     272             : /*
     273             :  * These debug flags cannot use CMPXCHG because there might be consistency
     274             :  * issues when checking or reading debug information
     275             :  */
     276             : #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
     277             :                                 SLAB_TRACE)
     278             : 
     279             : 
     280             : /*
     281             :  * Debugging flags that require metadata to be stored in the slab.  These get
     282             :  * disabled when slub_debug=O is used and a cache's min order increases with
     283             :  * metadata.
     284             :  */
     285             : #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
     286             : 
     287             : #define OO_SHIFT        16
     288             : #define OO_MASK         ((1 << OO_SHIFT) - 1)
     289             : #define MAX_OBJS_PER_PAGE       32767 /* since slab.objects is u15 */
     290             : 
     291             : /* Internal SLUB flags */
     292             : /* Poison object */
     293             : #define __OBJECT_POISON         ((slab_flags_t __force)0x80000000U)
     294             : /* Use cmpxchg_double */
     295             : 
     296             : #ifdef system_has_freelist_aba
     297             : #define __CMPXCHG_DOUBLE        ((slab_flags_t __force)0x40000000U)
     298             : #else
     299             : #define __CMPXCHG_DOUBLE        ((slab_flags_t __force)0U)
     300             : #endif
     301             : 
     302             : /*
     303             :  * Tracking user of a slab.
     304             :  */
     305             : #define TRACK_ADDRS_COUNT 16
     306             : struct track {
     307             :         unsigned long addr;     /* Called from address */
     308             : #ifdef CONFIG_STACKDEPOT
     309             :         depot_stack_handle_t handle;
     310             : #endif
     311             :         int cpu;                /* Was running on cpu */
     312             :         int pid;                /* Pid context */
     313             :         unsigned long when;     /* When did the operation occur */
     314             : };
     315             : 
     316             : enum track_item { TRACK_ALLOC, TRACK_FREE };
     317             : 
     318             : #ifdef SLAB_SUPPORTS_SYSFS
     319             : static int sysfs_slab_add(struct kmem_cache *);
     320             : static int sysfs_slab_alias(struct kmem_cache *, const char *);
     321             : #else
     322             : static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
     323             : static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
     324             :                                                         { return 0; }
     325             : #endif
     326             : 
     327             : #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
     328             : static void debugfs_slab_add(struct kmem_cache *);
     329             : #else
     330             : static inline void debugfs_slab_add(struct kmem_cache *s) { }
     331             : #endif
     332             : 
     333             : static inline void stat(const struct kmem_cache *s, enum stat_item si)
     334             : {
     335             : #ifdef CONFIG_SLUB_STATS
     336             :         /*
     337             :          * The rmw is racy on a preemptible kernel but this is acceptable, so
     338             :          * avoid this_cpu_add()'s irq-disable overhead.
     339             :          */
     340             :         raw_cpu_inc(s->cpu_slab->stat[si]);
     341             : #endif
     342             : }
     343             : 
     344             : /*
     345             :  * Tracks for which NUMA nodes we have kmem_cache_nodes allocated.
     346             :  * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily
     347             :  * differ during memory hotplug/hotremove operations.
     348             :  * Protected by slab_mutex.
     349             :  */
     350             : static nodemask_t slab_nodes;
     351             : 
     352             : #ifndef CONFIG_SLUB_TINY
     353             : /*
     354             :  * Workqueue used for flush_cpu_slab().
     355             :  */
     356             : static struct workqueue_struct *flushwq;
     357             : #endif
     358             : 
     359             : /********************************************************************
     360             :  *                      Core slab cache functions
     361             :  *******************************************************************/
     362             : 
     363             : /*
     364             :  * Returns freelist pointer (ptr). With hardening, this is obfuscated
     365             :  * with an XOR of the address where the pointer is held and a per-cache
     366             :  * random number.
     367             :  */
     368             : static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
     369             :                                  unsigned long ptr_addr)
     370             : {
     371             : #ifdef CONFIG_SLAB_FREELIST_HARDENED
     372             :         /*
     373             :          * When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged.
     374             :          * Normally, this doesn't cause any issues, as both set_freepointer()
     375             :          * and get_freepointer() are called with a pointer with the same tag.
     376             :          * However, there are some issues with CONFIG_SLUB_DEBUG code. For
     377             :          * example, when __free_slub() iterates over objects in a cache, it
     378             :          * passes untagged pointers to check_object(). check_object() in turns
     379             :          * calls get_freepointer() with an untagged pointer, which causes the
     380             :          * freepointer to be restored incorrectly.
     381             :          */
     382             :         return (void *)((unsigned long)ptr ^ s->random ^
     383             :                         swab((unsigned long)kasan_reset_tag((void *)ptr_addr)));
     384             : #else
     385             :         return ptr;
     386             : #endif
     387             : }
     388             : 
     389             : /* Returns the freelist pointer recorded at location ptr_addr. */
     390             : static inline void *freelist_dereference(const struct kmem_cache *s,
     391             :                                          void *ptr_addr)
     392             : {
     393       21481 :         return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr),
     394             :                             (unsigned long)ptr_addr);
     395             : }
     396             : 
     397             : static inline void *get_freepointer(struct kmem_cache *s, void *object)
     398             : {
     399       21481 :         object = kasan_reset_tag(object);
     400       42962 :         return freelist_dereference(s, object + s->offset);
     401             : }
     402             : 
     403             : #ifndef CONFIG_SLUB_TINY
     404             : static void prefetch_freepointer(const struct kmem_cache *s, void *object)
     405             : {
     406       16488 :         prefetchw(object + s->offset);
     407             : }
     408             : #endif
     409             : 
     410             : /*
     411             :  * When running under KMSAN, get_freepointer_safe() may return an uninitialized
     412             :  * pointer value in the case the current thread loses the race for the next
     413             :  * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in
     414             :  * slab_alloc_node() will fail, so the uninitialized value won't be used, but
     415             :  * KMSAN will still check all arguments of cmpxchg because of imperfect
     416             :  * handling of inline assembly.
     417             :  * To work around this problem, we apply __no_kmsan_checks to ensure that
     418             :  * get_freepointer_safe() returns initialized memory.
     419             :  */
     420             : __no_kmsan_checks
     421             : static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
     422             : {
     423             :         unsigned long freepointer_addr;
     424             :         void *p;
     425             : 
     426             :         if (!debug_pagealloc_enabled_static())
     427       32976 :                 return get_freepointer(s, object);
     428             : 
     429             :         object = kasan_reset_tag(object);
     430             :         freepointer_addr = (unsigned long)object + s->offset;
     431             :         copy_from_kernel_nofault(&p, (void **)freepointer_addr, sizeof(p));
     432             :         return freelist_ptr(s, p, freepointer_addr);
     433             : }
     434             : 
     435             : static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
     436             : {
     437       23310 :         unsigned long freeptr_addr = (unsigned long)object + s->offset;
     438             : 
     439             : #ifdef CONFIG_SLAB_FREELIST_HARDENED
     440             :         BUG_ON(object == fp); /* naive detection of double free or corruption */
     441             : #endif
     442             : 
     443       23310 :         freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
     444       23310 :         *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
     445             : }
     446             : 
     447             : /* Loop over all objects in a slab */
     448             : #define for_each_object(__p, __s, __addr, __objects) \
     449             :         for (__p = fixup_red_left(__s, __addr); \
     450             :                 __p < (__addr) + (__objects) * (__s)->size; \
     451             :                 __p += (__s)->size)
     452             : 
     453             : static inline unsigned int order_objects(unsigned int order, unsigned int size)
     454             : {
     455         214 :         return ((unsigned int)PAGE_SIZE << order) / size;
     456             : }
     457             : 
     458             : static inline struct kmem_cache_order_objects oo_make(unsigned int order,
     459             :                 unsigned int size)
     460             : {
     461         106 :         struct kmem_cache_order_objects x = {
     462         212 :                 (order << OO_SHIFT) + order_objects(order, size)
     463             :         };
     464             : 
     465             :         return x;
     466             : }
     467             : 
     468             : static inline unsigned int oo_order(struct kmem_cache_order_objects x)
     469             : {
     470        1267 :         return x.x >> OO_SHIFT;
     471             : }
     472             : 
     473             : static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
     474             : {
     475          53 :         return x.x & OO_MASK;
     476             : }
     477             : 
     478             : #ifdef CONFIG_SLUB_CPU_PARTIAL
     479             : static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
     480             : {
     481             :         unsigned int nr_slabs;
     482             : 
     483             :         s->cpu_partial = nr_objects;
     484             : 
     485             :         /*
     486             :          * We take the number of objects but actually limit the number of
     487             :          * slabs on the per cpu partial list, in order to limit excessive
     488             :          * growth of the list. For simplicity we assume that the slabs will
     489             :          * be half-full.
     490             :          */
     491             :         nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo));
     492             :         s->cpu_partial_slabs = nr_slabs;
     493             : }
     494             : #else
     495             : static inline void
     496             : slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
     497             : {
     498             : }
     499             : #endif /* CONFIG_SLUB_CPU_PARTIAL */
     500             : 
     501             : /*
     502             :  * Per slab locking using the pagelock
     503             :  */
     504             : static __always_inline void slab_lock(struct slab *slab)
     505             : {
     506         690 :         struct page *page = slab_page(slab);
     507             : 
     508             :         VM_BUG_ON_PAGE(PageTail(page), page);
     509         690 :         bit_spin_lock(PG_locked, &page->flags);
     510             : }
     511             : 
     512             : static __always_inline void slab_unlock(struct slab *slab)
     513             : {
     514         690 :         struct page *page = slab_page(slab);
     515             : 
     516             :         VM_BUG_ON_PAGE(PageTail(page), page);
     517         690 :         __bit_spin_unlock(PG_locked, &page->flags);
     518             : }
     519             : 
     520             : static inline bool
     521             : __update_freelist_fast(struct slab *slab,
     522             :                       void *freelist_old, unsigned long counters_old,
     523             :                       void *freelist_new, unsigned long counters_new)
     524             : {
     525             : #ifdef system_has_freelist_aba
     526             :         freelist_aba_t old = { .freelist = freelist_old, .counter = counters_old };
     527             :         freelist_aba_t new = { .freelist = freelist_new, .counter = counters_new };
     528             : 
     529             :         return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full);
     530             : #else
     531             :         return false;
     532             : #endif
     533             : }
     534             : 
     535             : static inline bool
     536             : __update_freelist_slow(struct slab *slab,
     537             :                       void *freelist_old, unsigned long counters_old,
     538             :                       void *freelist_new, unsigned long counters_new)
     539             : {
     540         690 :         bool ret = false;
     541             : 
     542         690 :         slab_lock(slab);
     543        1380 :         if (slab->freelist == freelist_old &&
     544         690 :             slab->counters == counters_old) {
     545         690 :                 slab->freelist = freelist_new;
     546         690 :                 slab->counters = counters_new;
     547         690 :                 ret = true;
     548             :         }
     549         690 :         slab_unlock(slab);
     550             : 
     551             :         return ret;
     552             : }
     553             : 
     554             : /*
     555             :  * Interrupts must be disabled (for the fallback code to work right), typically
     556             :  * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is
     557             :  * part of bit_spin_lock(), is sufficient because the policy is not to allow any
     558             :  * allocation/ free operation in hardirq context. Therefore nothing can
     559             :  * interrupt the operation.
     560             :  */
     561             : static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab,
     562             :                 void *freelist_old, unsigned long counters_old,
     563             :                 void *freelist_new, unsigned long counters_new,
     564             :                 const char *n)
     565             : {
     566             :         bool ret;
     567             : 
     568             :         if (USE_LOCKLESS_FAST_PATH())
     569             :                 lockdep_assert_irqs_disabled();
     570             : 
     571             :         if (s->flags & __CMPXCHG_DOUBLE) {
     572             :                 ret = __update_freelist_fast(slab, freelist_old, counters_old,
     573             :                                             freelist_new, counters_new);
     574             :         } else {
     575         920 :                 ret = __update_freelist_slow(slab, freelist_old, counters_old,
     576             :                                             freelist_new, counters_new);
     577             :         }
     578         460 :         if (likely(ret))
     579             :                 return true;
     580             : 
     581             :         cpu_relax();
     582           0 :         stat(s, CMPXCHG_DOUBLE_FAIL);
     583             : 
     584             : #ifdef SLUB_DEBUG_CMPXCHG
     585             :         pr_info("%s %s: cmpxchg double redo ", n, s->name);
     586             : #endif
     587             : 
     588             :         return false;
     589             : }
     590             : 
     591         230 : static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
     592             :                 void *freelist_old, unsigned long counters_old,
     593             :                 void *freelist_new, unsigned long counters_new,
     594             :                 const char *n)
     595             : {
     596             :         bool ret;
     597             : 
     598             :         if (s->flags & __CMPXCHG_DOUBLE) {
     599             :                 ret = __update_freelist_fast(slab, freelist_old, counters_old,
     600             :                                             freelist_new, counters_new);
     601             :         } else {
     602             :                 unsigned long flags;
     603             : 
     604         230 :                 local_irq_save(flags);
     605         460 :                 ret = __update_freelist_slow(slab, freelist_old, counters_old,
     606             :                                             freelist_new, counters_new);
     607         460 :                 local_irq_restore(flags);
     608             :         }
     609         230 :         if (likely(ret))
     610             :                 return true;
     611             : 
     612             :         cpu_relax();
     613           0 :         stat(s, CMPXCHG_DOUBLE_FAIL);
     614             : 
     615             : #ifdef SLUB_DEBUG_CMPXCHG
     616             :         pr_info("%s %s: cmpxchg double redo ", n, s->name);
     617             : #endif
     618             : 
     619             :         return false;
     620             : }
     621             : 
     622             : #ifdef CONFIG_SLUB_DEBUG
     623             : static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
     624             : static DEFINE_SPINLOCK(object_map_lock);
     625             : 
     626           0 : static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
     627             :                        struct slab *slab)
     628             : {
     629           0 :         void *addr = slab_address(slab);
     630             :         void *p;
     631             : 
     632           0 :         bitmap_zero(obj_map, slab->objects);
     633             : 
     634           0 :         for (p = slab->freelist; p; p = get_freepointer(s, p))
     635           0 :                 set_bit(__obj_to_index(s, addr, p), obj_map);
     636           0 : }
     637             : 
     638             : #if IS_ENABLED(CONFIG_KUNIT)
     639           0 : static bool slab_add_kunit_errors(void)
     640             : {
     641             :         struct kunit_resource *resource;
     642             : 
     643           0 :         if (!kunit_get_current_test())
     644             :                 return false;
     645             : 
     646           0 :         resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
     647           0 :         if (!resource)
     648             :                 return false;
     649             : 
     650           0 :         (*(int *)resource->data)++;
     651           0 :         kunit_put_resource(resource);
     652           0 :         return true;
     653             : }
     654             : #else
     655             : static inline bool slab_add_kunit_errors(void) { return false; }
     656             : #endif
     657             : 
     658             : static inline unsigned int size_from_object(struct kmem_cache *s)
     659             : {
     660           0 :         if (s->flags & SLAB_RED_ZONE)
     661           0 :                 return s->size - s->red_left_pad;
     662             : 
     663             :         return s->size;
     664             : }
     665             : 
     666             : static inline void *restore_red_left(struct kmem_cache *s, void *p)
     667             : {
     668           0 :         if (s->flags & SLAB_RED_ZONE)
     669           0 :                 p -= s->red_left_pad;
     670             : 
     671             :         return p;
     672             : }
     673             : 
     674             : /*
     675             :  * Debug settings:
     676             :  */
     677             : #if defined(CONFIG_SLUB_DEBUG_ON)
     678             : static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
     679             : #else
     680             : static slab_flags_t slub_debug;
     681             : #endif
     682             : 
     683             : static char *slub_debug_string;
     684             : static int disable_higher_order_debug;
     685             : 
     686             : /*
     687             :  * slub is about to manipulate internal object metadata.  This memory lies
     688             :  * outside the range of the allocated object, so accessing it would normally
     689             :  * be reported by kasan as a bounds error.  metadata_access_enable() is used
     690             :  * to tell kasan that these accesses are OK.
     691             :  */
     692             : static inline void metadata_access_enable(void)
     693             : {
     694             :         kasan_disable_current();
     695             : }
     696             : 
     697             : static inline void metadata_access_disable(void)
     698             : {
     699             :         kasan_enable_current();
     700             : }
     701             : 
     702             : /*
     703             :  * Object debugging
     704             :  */
     705             : 
     706             : /* Verify that a pointer has an address that is valid within a slab page */
     707           0 : static inline int check_valid_pointer(struct kmem_cache *s,
     708             :                                 struct slab *slab, void *object)
     709             : {
     710             :         void *base;
     711             : 
     712           0 :         if (!object)
     713             :                 return 1;
     714             : 
     715           0 :         base = slab_address(slab);
     716           0 :         object = kasan_reset_tag(object);
     717           0 :         object = restore_red_left(s, object);
     718           0 :         if (object < base || object >= base + slab->objects * s->size ||
     719           0 :                 (object - base) % s->size) {
     720             :                 return 0;
     721             :         }
     722             : 
     723           0 :         return 1;
     724             : }
     725             : 
     726             : static void print_section(char *level, char *text, u8 *addr,
     727             :                           unsigned int length)
     728             : {
     729             :         metadata_access_enable();
     730           0 :         print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
     731           0 :                         16, 1, kasan_reset_tag((void *)addr), length, 1);
     732             :         metadata_access_disable();
     733             : }
     734             : 
     735             : /*
     736             :  * See comment in calculate_sizes().
     737             :  */
     738             : static inline bool freeptr_outside_object(struct kmem_cache *s)
     739             : {
     740             :         return s->offset >= s->inuse;
     741             : }
     742             : 
     743             : /*
     744             :  * Return offset of the end of info block which is inuse + free pointer if
     745             :  * not overlapping with object.
     746             :  */
     747             : static inline unsigned int get_info_end(struct kmem_cache *s)
     748             : {
     749           0 :         if (freeptr_outside_object(s))
     750           0 :                 return s->inuse + sizeof(void *);
     751             :         else
     752             :                 return s->inuse;
     753             : }
     754             : 
     755             : static struct track *get_track(struct kmem_cache *s, void *object,
     756             :         enum track_item alloc)
     757             : {
     758             :         struct track *p;
     759             : 
     760           0 :         p = object + get_info_end(s);
     761             : 
     762           0 :         return kasan_reset_tag(p + alloc);
     763             : }
     764             : 
     765             : #ifdef CONFIG_STACKDEPOT
     766           0 : static noinline depot_stack_handle_t set_track_prepare(void)
     767             : {
     768             :         depot_stack_handle_t handle;
     769             :         unsigned long entries[TRACK_ADDRS_COUNT];
     770             :         unsigned int nr_entries;
     771             : 
     772           0 :         nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
     773           0 :         handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
     774             : 
     775           0 :         return handle;
     776             : }
     777             : #else
     778             : static inline depot_stack_handle_t set_track_prepare(void)
     779             : {
     780             :         return 0;
     781             : }
     782             : #endif
     783             : 
     784             : static void set_track_update(struct kmem_cache *s, void *object,
     785             :                              enum track_item alloc, unsigned long addr,
     786             :                              depot_stack_handle_t handle)
     787             : {
     788           0 :         struct track *p = get_track(s, object, alloc);
     789             : 
     790             : #ifdef CONFIG_STACKDEPOT
     791           0 :         p->handle = handle;
     792             : #endif
     793           0 :         p->addr = addr;
     794           0 :         p->cpu = smp_processor_id();
     795           0 :         p->pid = current->pid;
     796           0 :         p->when = jiffies;
     797             : }
     798             : 
     799             : static __always_inline void set_track(struct kmem_cache *s, void *object,
     800             :                                       enum track_item alloc, unsigned long addr)
     801             : {
     802           0 :         depot_stack_handle_t handle = set_track_prepare();
     803             : 
     804             :         set_track_update(s, object, alloc, addr, handle);
     805             : }
     806             : 
     807           1 : static void init_tracking(struct kmem_cache *s, void *object)
     808             : {
     809             :         struct track *p;
     810             : 
     811           1 :         if (!(s->flags & SLAB_STORE_USER))
     812             :                 return;
     813             : 
     814           0 :         p = get_track(s, object, TRACK_ALLOC);
     815           0 :         memset(p, 0, 2*sizeof(struct track));
     816             : }
     817             : 
     818           0 : static void print_track(const char *s, struct track *t, unsigned long pr_time)
     819             : {
     820             :         depot_stack_handle_t handle __maybe_unused;
     821             : 
     822           0 :         if (!t->addr)
     823             :                 return;
     824             : 
     825           0 :         pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
     826             :                s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
     827             : #ifdef CONFIG_STACKDEPOT
     828           0 :         handle = READ_ONCE(t->handle);
     829           0 :         if (handle)
     830           0 :                 stack_depot_print(handle);
     831             :         else
     832           0 :                 pr_err("object allocation/free stack trace missing\n");
     833             : #endif
     834             : }
     835             : 
     836           0 : void print_tracking(struct kmem_cache *s, void *object)
     837             : {
     838           0 :         unsigned long pr_time = jiffies;
     839           0 :         if (!(s->flags & SLAB_STORE_USER))
     840             :                 return;
     841             : 
     842           0 :         print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
     843           0 :         print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
     844             : }
     845             : 
     846             : static void print_slab_info(const struct slab *slab)
     847             : {
     848           0 :         struct folio *folio = (struct folio *)slab_folio(slab);
     849             : 
     850           0 :         pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
     851             :                slab, slab->objects, slab->inuse, slab->freelist,
     852             :                folio_flags(folio, 0));
     853             : }
     854             : 
     855             : /*
     856             :  * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API
     857             :  * family will round up the real request size to these fixed ones, so
     858             :  * there could be an extra area than what is requested. Save the original
     859             :  * request size in the meta data area, for better debug and sanity check.
     860             :  */
     861         107 : static inline void set_orig_size(struct kmem_cache *s,
     862             :                                 void *object, unsigned int orig_size)
     863             : {
     864         107 :         void *p = kasan_reset_tag(object);
     865             : 
     866         107 :         if (!slub_debug_orig_size(s))
     867             :                 return;
     868             : 
     869             : #ifdef CONFIG_KASAN_GENERIC
     870             :         /*
     871             :          * KASAN could save its free meta data in object's data area at
     872             :          * offset 0, if the size is larger than 'orig_size', it will
     873             :          * overlap the data redzone in [orig_size+1, object_size], and
     874             :          * the check should be skipped.
     875             :          */
     876             :         if (kasan_metadata_size(s, true) > orig_size)
     877             :                 orig_size = s->object_size;
     878             : #endif
     879             : 
     880           0 :         p += get_info_end(s);
     881           0 :         p += sizeof(struct track) * 2;
     882             : 
     883           0 :         *(unsigned int *)p = orig_size;
     884             : }
     885             : 
     886           0 : static inline unsigned int get_orig_size(struct kmem_cache *s, void *object)
     887             : {
     888           0 :         void *p = kasan_reset_tag(object);
     889             : 
     890           0 :         if (!slub_debug_orig_size(s))
     891           0 :                 return s->object_size;
     892             : 
     893           0 :         p += get_info_end(s);
     894           0 :         p += sizeof(struct track) * 2;
     895             : 
     896           0 :         return *(unsigned int *)p;
     897             : }
     898             : 
     899         107 : void skip_orig_size_check(struct kmem_cache *s, const void *object)
     900             : {
     901         107 :         set_orig_size(s, (void *)object, s->object_size);
     902         107 : }
     903             : 
     904           0 : static void slab_bug(struct kmem_cache *s, char *fmt, ...)
     905             : {
     906             :         struct va_format vaf;
     907             :         va_list args;
     908             : 
     909           0 :         va_start(args, fmt);
     910           0 :         vaf.fmt = fmt;
     911           0 :         vaf.va = &args;
     912           0 :         pr_err("=============================================================================\n");
     913           0 :         pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
     914           0 :         pr_err("-----------------------------------------------------------------------------\n\n");
     915           0 :         va_end(args);
     916           0 : }
     917             : 
     918             : __printf(2, 3)
     919           0 : static void slab_fix(struct kmem_cache *s, char *fmt, ...)
     920             : {
     921             :         struct va_format vaf;
     922             :         va_list args;
     923             : 
     924           0 :         if (slab_add_kunit_errors())
     925           0 :                 return;
     926             : 
     927           0 :         va_start(args, fmt);
     928           0 :         vaf.fmt = fmt;
     929           0 :         vaf.va = &args;
     930           0 :         pr_err("FIX %s: %pV\n", s->name, &vaf);
     931           0 :         va_end(args);
     932             : }
     933             : 
     934           0 : static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p)
     935             : {
     936             :         unsigned int off;       /* Offset of last byte */
     937           0 :         u8 *addr = slab_address(slab);
     938             : 
     939           0 :         print_tracking(s, p);
     940             : 
     941           0 :         print_slab_info(slab);
     942             : 
     943           0 :         pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n",
     944             :                p, p - addr, get_freepointer(s, p));
     945             : 
     946           0 :         if (s->flags & SLAB_RED_ZONE)
     947           0 :                 print_section(KERN_ERR, "Redzone  ", p - s->red_left_pad,
     948             :                               s->red_left_pad);
     949           0 :         else if (p > addr + 16)
     950           0 :                 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
     951             : 
     952           0 :         print_section(KERN_ERR,         "Object   ", p,
     953           0 :                       min_t(unsigned int, s->object_size, PAGE_SIZE));
     954           0 :         if (s->flags & SLAB_RED_ZONE)
     955           0 :                 print_section(KERN_ERR, "Redzone  ", p + s->object_size,
     956           0 :                         s->inuse - s->object_size);
     957             : 
     958           0 :         off = get_info_end(s);
     959             : 
     960           0 :         if (s->flags & SLAB_STORE_USER)
     961           0 :                 off += 2 * sizeof(struct track);
     962             : 
     963           0 :         if (slub_debug_orig_size(s))
     964           0 :                 off += sizeof(unsigned int);
     965             : 
     966           0 :         off += kasan_metadata_size(s, false);
     967             : 
     968           0 :         if (off != size_from_object(s))
     969             :                 /* Beginning of the filler is the free pointer */
     970           0 :                 print_section(KERN_ERR, "Padding  ", p + off,
     971           0 :                               size_from_object(s) - off);
     972             : 
     973           0 :         dump_stack();
     974           0 : }
     975             : 
     976           0 : static void object_err(struct kmem_cache *s, struct slab *slab,
     977             :                         u8 *object, char *reason)
     978             : {
     979           0 :         if (slab_add_kunit_errors())
     980             :                 return;
     981             : 
     982           0 :         slab_bug(s, "%s", reason);
     983           0 :         print_trailer(s, slab, object);
     984           0 :         add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
     985             : }
     986             : 
     987          82 : static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
     988             :                                void **freelist, void *nextfree)
     989             : {
     990          82 :         if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
     991           0 :             !check_valid_pointer(s, slab, nextfree) && freelist) {
     992           0 :                 object_err(s, slab, *freelist, "Freechain corrupt");
     993           0 :                 *freelist = NULL;
     994           0 :                 slab_fix(s, "Isolate corrupted freechain");
     995           0 :                 return true;
     996             :         }
     997             : 
     998             :         return false;
     999             : }
    1000             : 
    1001           0 : static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
    1002             :                         const char *fmt, ...)
    1003             : {
    1004             :         va_list args;
    1005             :         char buf[100];
    1006             : 
    1007           0 :         if (slab_add_kunit_errors())
    1008           0 :                 return;
    1009             : 
    1010           0 :         va_start(args, fmt);
    1011           0 :         vsnprintf(buf, sizeof(buf), fmt, args);
    1012           0 :         va_end(args);
    1013           0 :         slab_bug(s, "%s", buf);
    1014           0 :         print_slab_info(slab);
    1015           0 :         dump_stack();
    1016           0 :         add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
    1017             : }
    1018             : 
    1019           1 : static void init_object(struct kmem_cache *s, void *object, u8 val)
    1020             : {
    1021           1 :         u8 *p = kasan_reset_tag(object);
    1022           1 :         unsigned int poison_size = s->object_size;
    1023             : 
    1024           1 :         if (s->flags & SLAB_RED_ZONE) {
    1025           0 :                 memset(p - s->red_left_pad, val, s->red_left_pad);
    1026             : 
    1027           0 :                 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
    1028             :                         /*
    1029             :                          * Redzone the extra allocated space by kmalloc than
    1030             :                          * requested, and the poison size will be limited to
    1031             :                          * the original request size accordingly.
    1032             :                          */
    1033           0 :                         poison_size = get_orig_size(s, object);
    1034             :                 }
    1035             :         }
    1036             : 
    1037           1 :         if (s->flags & __OBJECT_POISON) {
    1038           0 :                 memset(p, POISON_FREE, poison_size - 1);
    1039           0 :                 p[poison_size - 1] = POISON_END;
    1040             :         }
    1041             : 
    1042           1 :         if (s->flags & SLAB_RED_ZONE)
    1043           0 :                 memset(p + poison_size, val, s->inuse - poison_size);
    1044           1 : }
    1045             : 
    1046           0 : static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
    1047             :                                                 void *from, void *to)
    1048             : {
    1049           0 :         slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data);
    1050           0 :         memset(from, data, to - from);
    1051           0 : }
    1052             : 
    1053           0 : static int check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
    1054             :                         u8 *object, char *what,
    1055             :                         u8 *start, unsigned int value, unsigned int bytes)
    1056             : {
    1057             :         u8 *fault;
    1058             :         u8 *end;
    1059           0 :         u8 *addr = slab_address(slab);
    1060             : 
    1061             :         metadata_access_enable();
    1062           0 :         fault = memchr_inv(kasan_reset_tag(start), value, bytes);
    1063             :         metadata_access_disable();
    1064           0 :         if (!fault)
    1065             :                 return 1;
    1066             : 
    1067           0 :         end = start + bytes;
    1068           0 :         while (end > fault && end[-1] == value)
    1069           0 :                 end--;
    1070             : 
    1071           0 :         if (slab_add_kunit_errors())
    1072             :                 goto skip_bug_print;
    1073             : 
    1074           0 :         slab_bug(s, "%s overwritten", what);
    1075           0 :         pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
    1076             :                                         fault, end - 1, fault - addr,
    1077             :                                         fault[0], value);
    1078           0 :         print_trailer(s, slab, object);
    1079           0 :         add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
    1080             : 
    1081             : skip_bug_print:
    1082           0 :         restore_bytes(s, what, value, fault, end);
    1083           0 :         return 0;
    1084             : }
    1085             : 
    1086             : /*
    1087             :  * Object layout:
    1088             :  *
    1089             :  * object address
    1090             :  *      Bytes of the object to be managed.
    1091             :  *      If the freepointer may overlay the object then the free
    1092             :  *      pointer is at the middle of the object.
    1093             :  *
    1094             :  *      Poisoning uses 0x6b (POISON_FREE) and the last byte is
    1095             :  *      0xa5 (POISON_END)
    1096             :  *
    1097             :  * object + s->object_size
    1098             :  *      Padding to reach word boundary. This is also used for Redzoning.
    1099             :  *      Padding is extended by another word if Redzoning is enabled and
    1100             :  *      object_size == inuse.
    1101             :  *
    1102             :  *      We fill with 0xbb (RED_INACTIVE) for inactive objects and with
    1103             :  *      0xcc (RED_ACTIVE) for objects in use.
    1104             :  *
    1105             :  * object + s->inuse
    1106             :  *      Meta data starts here.
    1107             :  *
    1108             :  *      A. Free pointer (if we cannot overwrite object on free)
    1109             :  *      B. Tracking data for SLAB_STORE_USER
    1110             :  *      C. Original request size for kmalloc object (SLAB_STORE_USER enabled)
    1111             :  *      D. Padding to reach required alignment boundary or at minimum
    1112             :  *              one word if debugging is on to be able to detect writes
    1113             :  *              before the word boundary.
    1114             :  *
    1115             :  *      Padding is done using 0x5a (POISON_INUSE)
    1116             :  *
    1117             :  * object + s->size
    1118             :  *      Nothing is used beyond s->size.
    1119             :  *
    1120             :  * If slabcaches are merged then the object_size and inuse boundaries are mostly
    1121             :  * ignored. And therefore no slab options that rely on these boundaries
    1122             :  * may be used with merged slabcaches.
    1123             :  */
    1124             : 
    1125           0 : static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p)
    1126             : {
    1127           0 :         unsigned long off = get_info_end(s);    /* The end of info */
    1128             : 
    1129           0 :         if (s->flags & SLAB_STORE_USER) {
    1130             :                 /* We also have user information there */
    1131           0 :                 off += 2 * sizeof(struct track);
    1132             : 
    1133           0 :                 if (s->flags & SLAB_KMALLOC)
    1134           0 :                         off += sizeof(unsigned int);
    1135             :         }
    1136             : 
    1137           0 :         off += kasan_metadata_size(s, false);
    1138             : 
    1139           0 :         if (size_from_object(s) == off)
    1140             :                 return 1;
    1141             : 
    1142           0 :         return check_bytes_and_report(s, slab, p, "Object padding",
    1143           0 :                         p + off, POISON_INUSE, size_from_object(s) - off);
    1144             : }
    1145             : 
    1146             : /* Check the pad bytes at the end of a slab page */
    1147           0 : static void slab_pad_check(struct kmem_cache *s, struct slab *slab)
    1148             : {
    1149             :         u8 *start;
    1150             :         u8 *fault;
    1151             :         u8 *end;
    1152             :         u8 *pad;
    1153             :         int length;
    1154             :         int remainder;
    1155             : 
    1156           0 :         if (!(s->flags & SLAB_POISON))
    1157             :                 return;
    1158             : 
    1159           0 :         start = slab_address(slab);
    1160           0 :         length = slab_size(slab);
    1161           0 :         end = start + length;
    1162           0 :         remainder = length % s->size;
    1163           0 :         if (!remainder)
    1164             :                 return;
    1165             : 
    1166           0 :         pad = end - remainder;
    1167             :         metadata_access_enable();
    1168           0 :         fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
    1169             :         metadata_access_disable();
    1170           0 :         if (!fault)
    1171             :                 return;
    1172           0 :         while (end > fault && end[-1] == POISON_INUSE)
    1173           0 :                 end--;
    1174             : 
    1175           0 :         slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu",
    1176             :                         fault, end - 1, fault - start);
    1177           0 :         print_section(KERN_ERR, "Padding ", pad, remainder);
    1178             : 
    1179           0 :         restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
    1180             : }
    1181             : 
    1182           0 : static int check_object(struct kmem_cache *s, struct slab *slab,
    1183             :                                         void *object, u8 val)
    1184             : {
    1185           0 :         u8 *p = object;
    1186           0 :         u8 *endobject = object + s->object_size;
    1187             :         unsigned int orig_size;
    1188             : 
    1189           0 :         if (s->flags & SLAB_RED_ZONE) {
    1190           0 :                 if (!check_bytes_and_report(s, slab, object, "Left Redzone",
    1191           0 :                         object - s->red_left_pad, val, s->red_left_pad))
    1192             :                         return 0;
    1193             : 
    1194           0 :                 if (!check_bytes_and_report(s, slab, object, "Right Redzone",
    1195           0 :                         endobject, val, s->inuse - s->object_size))
    1196             :                         return 0;
    1197             : 
    1198           0 :                 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
    1199           0 :                         orig_size = get_orig_size(s, object);
    1200             : 
    1201           0 :                         if (s->object_size > orig_size  &&
    1202           0 :                                 !check_bytes_and_report(s, slab, object,
    1203             :                                         "kmalloc Redzone", p + orig_size,
    1204             :                                         val, s->object_size - orig_size)) {
    1205             :                                 return 0;
    1206             :                         }
    1207             :                 }
    1208             :         } else {
    1209           0 :                 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
    1210           0 :                         check_bytes_and_report(s, slab, p, "Alignment padding",
    1211             :                                 endobject, POISON_INUSE,
    1212             :                                 s->inuse - s->object_size);
    1213             :                 }
    1214             :         }
    1215             : 
    1216           0 :         if (s->flags & SLAB_POISON) {
    1217           0 :                 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
    1218           0 :                         (!check_bytes_and_report(s, slab, p, "Poison", p,
    1219           0 :                                         POISON_FREE, s->object_size - 1) ||
    1220           0 :                          !check_bytes_and_report(s, slab, p, "End Poison",
    1221           0 :                                 p + s->object_size - 1, POISON_END, 1)))
    1222             :                         return 0;
    1223             :                 /*
    1224             :                  * check_pad_bytes cleans up on its own.
    1225             :                  */
    1226           0 :                 check_pad_bytes(s, slab, p);
    1227             :         }
    1228             : 
    1229           0 :         if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE)
    1230             :                 /*
    1231             :                  * Object and freepointer overlap. Cannot check
    1232             :                  * freepointer while object is allocated.
    1233             :                  */
    1234             :                 return 1;
    1235             : 
    1236             :         /* Check free pointer validity */
    1237           0 :         if (!check_valid_pointer(s, slab, get_freepointer(s, p))) {
    1238           0 :                 object_err(s, slab, p, "Freepointer corrupt");
    1239             :                 /*
    1240             :                  * No choice but to zap it and thus lose the remainder
    1241             :                  * of the free objects in this slab. May cause
    1242             :                  * another error because the object count is now wrong.
    1243             :                  */
    1244           0 :                 set_freepointer(s, p, NULL);
    1245           0 :                 return 0;
    1246             :         }
    1247             :         return 1;
    1248             : }
    1249             : 
    1250           0 : static int check_slab(struct kmem_cache *s, struct slab *slab)
    1251             : {
    1252             :         int maxobj;
    1253             : 
    1254           0 :         if (!folio_test_slab(slab_folio(slab))) {
    1255           0 :                 slab_err(s, slab, "Not a valid slab page");
    1256           0 :                 return 0;
    1257             :         }
    1258             : 
    1259           0 :         maxobj = order_objects(slab_order(slab), s->size);
    1260           0 :         if (slab->objects > maxobj) {
    1261           0 :                 slab_err(s, slab, "objects %u > max %u",
    1262             :                         slab->objects, maxobj);
    1263           0 :                 return 0;
    1264             :         }
    1265           0 :         if (slab->inuse > slab->objects) {
    1266           0 :                 slab_err(s, slab, "inuse %u > max %u",
    1267             :                         slab->inuse, slab->objects);
    1268           0 :                 return 0;
    1269             :         }
    1270             :         /* Slab_pad_check fixes things up after itself */
    1271           0 :         slab_pad_check(s, slab);
    1272           0 :         return 1;
    1273             : }
    1274             : 
    1275             : /*
    1276             :  * Determine if a certain object in a slab is on the freelist. Must hold the
    1277             :  * slab lock to guarantee that the chains are in a consistent state.
    1278             :  */
    1279           0 : static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search)
    1280             : {
    1281           0 :         int nr = 0;
    1282             :         void *fp;
    1283           0 :         void *object = NULL;
    1284             :         int max_objects;
    1285             : 
    1286           0 :         fp = slab->freelist;
    1287           0 :         while (fp && nr <= slab->objects) {
    1288           0 :                 if (fp == search)
    1289             :                         return 1;
    1290           0 :                 if (!check_valid_pointer(s, slab, fp)) {
    1291           0 :                         if (object) {
    1292           0 :                                 object_err(s, slab, object,
    1293             :                                         "Freechain corrupt");
    1294           0 :                                 set_freepointer(s, object, NULL);
    1295             :                         } else {
    1296           0 :                                 slab_err(s, slab, "Freepointer corrupt");
    1297           0 :                                 slab->freelist = NULL;
    1298           0 :                                 slab->inuse = slab->objects;
    1299           0 :                                 slab_fix(s, "Freelist cleared");
    1300           0 :                                 return 0;
    1301             :                         }
    1302             :                         break;
    1303             :                 }
    1304           0 :                 object = fp;
    1305           0 :                 fp = get_freepointer(s, object);
    1306           0 :                 nr++;
    1307             :         }
    1308             : 
    1309           0 :         max_objects = order_objects(slab_order(slab), s->size);
    1310           0 :         if (max_objects > MAX_OBJS_PER_PAGE)
    1311           0 :                 max_objects = MAX_OBJS_PER_PAGE;
    1312             : 
    1313           0 :         if (slab->objects != max_objects) {
    1314           0 :                 slab_err(s, slab, "Wrong number of objects. Found %d but should be %d",
    1315             :                          slab->objects, max_objects);
    1316           0 :                 slab->objects = max_objects;
    1317           0 :                 slab_fix(s, "Number of objects adjusted");
    1318             :         }
    1319           0 :         if (slab->inuse != slab->objects - nr) {
    1320           0 :                 slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d",
    1321             :                          slab->inuse, slab->objects - nr);
    1322           0 :                 slab->inuse = slab->objects - nr;
    1323           0 :                 slab_fix(s, "Object count adjusted");
    1324             :         }
    1325           0 :         return search == NULL;
    1326             : }
    1327             : 
    1328           0 : static void trace(struct kmem_cache *s, struct slab *slab, void *object,
    1329             :                                                                 int alloc)
    1330             : {
    1331           0 :         if (s->flags & SLAB_TRACE) {
    1332           0 :                 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
    1333             :                         s->name,
    1334             :                         alloc ? "alloc" : "free",
    1335             :                         object, slab->inuse,
    1336             :                         slab->freelist);
    1337             : 
    1338           0 :                 if (!alloc)
    1339           0 :                         print_section(KERN_INFO, "Object ", (void *)object,
    1340             :                                         s->object_size);
    1341             : 
    1342           0 :                 dump_stack();
    1343             :         }
    1344           0 : }
    1345             : 
    1346             : /*
    1347             :  * Tracking of fully allocated slabs for debugging purposes.
    1348             :  */
    1349             : static void add_full(struct kmem_cache *s,
    1350             :         struct kmem_cache_node *n, struct slab *slab)
    1351             : {
    1352           0 :         if (!(s->flags & SLAB_STORE_USER))
    1353             :                 return;
    1354             : 
    1355             :         lockdep_assert_held(&n->list_lock);
    1356           0 :         list_add(&slab->slab_list, &n->full);
    1357             : }
    1358             : 
    1359             : static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab)
    1360             : {
    1361          35 :         if (!(s->flags & SLAB_STORE_USER))
    1362             :                 return;
    1363             : 
    1364             :         lockdep_assert_held(&n->list_lock);
    1365           0 :         list_del(&slab->slab_list);
    1366             : }
    1367             : 
    1368             : static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
    1369             : {
    1370           0 :         return atomic_long_read(&n->nr_slabs);
    1371             : }
    1372             : 
    1373             : static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
    1374             : {
    1375         436 :         struct kmem_cache_node *n = get_node(s, node);
    1376             : 
    1377             :         /*
    1378             :          * May be called early in order to allocate a slab for the
    1379             :          * kmem_cache_node structure. Solve the chicken-egg
    1380             :          * dilemma by deferring the increment of the count during
    1381             :          * bootstrap (see early_kmem_cache_node_alloc).
    1382             :          */
    1383         436 :         if (likely(n)) {
    1384         870 :                 atomic_long_inc(&n->nr_slabs);
    1385         435 :                 atomic_long_add(objects, &n->total_objects);
    1386             :         }
    1387             : }
    1388             : static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
    1389             : {
    1390           0 :         struct kmem_cache_node *n = get_node(s, node);
    1391             : 
    1392           0 :         atomic_long_dec(&n->nr_slabs);
    1393           0 :         atomic_long_sub(objects, &n->total_objects);
    1394             : }
    1395             : 
    1396             : /* Object debug checks for alloc/free paths */
    1397       14462 : static void setup_object_debug(struct kmem_cache *s, void *object)
    1398             : {
    1399       28924 :         if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))
    1400             :                 return;
    1401             : 
    1402           0 :         init_object(s, object, SLUB_RED_INACTIVE);
    1403           0 :         init_tracking(s, object);
    1404             : }
    1405             : 
    1406             : static
    1407         435 : void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr)
    1408             : {
    1409         870 :         if (!kmem_cache_debug_flags(s, SLAB_POISON))
    1410             :                 return;
    1411             : 
    1412           0 :         metadata_access_enable();
    1413           0 :         memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab));
    1414             :         metadata_access_disable();
    1415             : }
    1416             : 
    1417           0 : static inline int alloc_consistency_checks(struct kmem_cache *s,
    1418             :                                         struct slab *slab, void *object)
    1419             : {
    1420           0 :         if (!check_slab(s, slab))
    1421             :                 return 0;
    1422             : 
    1423           0 :         if (!check_valid_pointer(s, slab, object)) {
    1424           0 :                 object_err(s, slab, object, "Freelist Pointer check fails");
    1425           0 :                 return 0;
    1426             :         }
    1427             : 
    1428           0 :         if (!check_object(s, slab, object, SLUB_RED_INACTIVE))
    1429             :                 return 0;
    1430             : 
    1431           0 :         return 1;
    1432             : }
    1433             : 
    1434           0 : static noinline bool alloc_debug_processing(struct kmem_cache *s,
    1435             :                         struct slab *slab, void *object, int orig_size)
    1436             : {
    1437           0 :         if (s->flags & SLAB_CONSISTENCY_CHECKS) {
    1438           0 :                 if (!alloc_consistency_checks(s, slab, object))
    1439             :                         goto bad;
    1440             :         }
    1441             : 
    1442             :         /* Success. Perform special debug activities for allocs */
    1443           0 :         trace(s, slab, object, 1);
    1444           0 :         set_orig_size(s, object, orig_size);
    1445           0 :         init_object(s, object, SLUB_RED_ACTIVE);
    1446           0 :         return true;
    1447             : 
    1448             : bad:
    1449           0 :         if (folio_test_slab(slab_folio(slab))) {
    1450             :                 /*
    1451             :                  * If this is a slab page then lets do the best we can
    1452             :                  * to avoid issues in the future. Marking all objects
    1453             :                  * as used avoids touching the remaining objects.
    1454             :                  */
    1455           0 :                 slab_fix(s, "Marking all objects used");
    1456           0 :                 slab->inuse = slab->objects;
    1457           0 :                 slab->freelist = NULL;
    1458             :         }
    1459             :         return false;
    1460             : }
    1461             : 
    1462           0 : static inline int free_consistency_checks(struct kmem_cache *s,
    1463             :                 struct slab *slab, void *object, unsigned long addr)
    1464             : {
    1465           0 :         if (!check_valid_pointer(s, slab, object)) {
    1466           0 :                 slab_err(s, slab, "Invalid object pointer 0x%p", object);
    1467             :                 return 0;
    1468             :         }
    1469             : 
    1470           0 :         if (on_freelist(s, slab, object)) {
    1471           0 :                 object_err(s, slab, object, "Object already free");
    1472             :                 return 0;
    1473             :         }
    1474             : 
    1475           0 :         if (!check_object(s, slab, object, SLUB_RED_ACTIVE))
    1476             :                 return 0;
    1477             : 
    1478           0 :         if (unlikely(s != slab->slab_cache)) {
    1479           0 :                 if (!folio_test_slab(slab_folio(slab))) {
    1480           0 :                         slab_err(s, slab, "Attempt to free object(0x%p) outside of slab",
    1481             :                                  object);
    1482           0 :                 } else if (!slab->slab_cache) {
    1483           0 :                         pr_err("SLUB <none>: no slab for object 0x%p.\n",
    1484             :                                object);
    1485           0 :                         dump_stack();
    1486             :                 } else
    1487           0 :                         object_err(s, slab, object,
    1488             :                                         "page slab pointer corrupt.");
    1489             :                 return 0;
    1490             :         }
    1491             :         return 1;
    1492             : }
    1493             : 
    1494             : /*
    1495             :  * Parse a block of slub_debug options. Blocks are delimited by ';'
    1496             :  *
    1497             :  * @str:    start of block
    1498             :  * @flags:  returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified
    1499             :  * @slabs:  return start of list of slabs, or NULL when there's no list
    1500             :  * @init:   assume this is initial parsing and not per-kmem-create parsing
    1501             :  *
    1502             :  * returns the start of next block if there's any, or NULL
    1503             :  */
    1504             : static char *
    1505           0 : parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init)
    1506             : {
    1507           0 :         bool higher_order_disable = false;
    1508             : 
    1509             :         /* Skip any completely empty blocks */
    1510           0 :         while (*str && *str == ';')
    1511           0 :                 str++;
    1512             : 
    1513           0 :         if (*str == ',') {
    1514             :                 /*
    1515             :                  * No options but restriction on slabs. This means full
    1516             :                  * debugging for slabs matching a pattern.
    1517             :                  */
    1518           0 :                 *flags = DEBUG_DEFAULT_FLAGS;
    1519           0 :                 goto check_slabs;
    1520             :         }
    1521           0 :         *flags = 0;
    1522             : 
    1523             :         /* Determine which debug features should be switched on */
    1524           0 :         for (; *str && *str != ',' && *str != ';'; str++) {
    1525           0 :                 switch (tolower(*str)) {
    1526             :                 case '-':
    1527           0 :                         *flags = 0;
    1528           0 :                         break;
    1529             :                 case 'f':
    1530           0 :                         *flags |= SLAB_CONSISTENCY_CHECKS;
    1531           0 :                         break;
    1532             :                 case 'z':
    1533           0 :                         *flags |= SLAB_RED_ZONE;
    1534           0 :                         break;
    1535             :                 case 'p':
    1536           0 :                         *flags |= SLAB_POISON;
    1537           0 :                         break;
    1538             :                 case 'u':
    1539           0 :                         *flags |= SLAB_STORE_USER;
    1540           0 :                         break;
    1541             :                 case 't':
    1542           0 :                         *flags |= SLAB_TRACE;
    1543           0 :                         break;
    1544             :                 case 'a':
    1545             :                         *flags |= SLAB_FAILSLAB;
    1546           0 :                         break;
    1547             :                 case 'o':
    1548             :                         /*
    1549             :                          * Avoid enabling debugging on caches if its minimum
    1550             :                          * order would increase as a result.
    1551             :                          */
    1552             :                         higher_order_disable = true;
    1553             :                         break;
    1554             :                 default:
    1555           0 :                         if (init)
    1556           0 :                                 pr_err("slub_debug option '%c' unknown. skipped\n", *str);
    1557             :                 }
    1558             :         }
    1559             : check_slabs:
    1560           0 :         if (*str == ',')
    1561           0 :                 *slabs = ++str;
    1562             :         else
    1563           0 :                 *slabs = NULL;
    1564             : 
    1565             :         /* Skip over the slab list */
    1566           0 :         while (*str && *str != ';')
    1567           0 :                 str++;
    1568             : 
    1569             :         /* Skip any completely empty blocks */
    1570           0 :         while (*str && *str == ';')
    1571           0 :                 str++;
    1572             : 
    1573           0 :         if (init && higher_order_disable)
    1574           0 :                 disable_higher_order_debug = 1;
    1575             : 
    1576           0 :         if (*str)
    1577             :                 return str;
    1578             :         else
    1579           0 :                 return NULL;
    1580             : }
    1581             : 
    1582           0 : static int __init setup_slub_debug(char *str)
    1583             : {
    1584             :         slab_flags_t flags;
    1585             :         slab_flags_t global_flags;
    1586             :         char *saved_str;
    1587             :         char *slab_list;
    1588           0 :         bool global_slub_debug_changed = false;
    1589           0 :         bool slab_list_specified = false;
    1590             : 
    1591           0 :         global_flags = DEBUG_DEFAULT_FLAGS;
    1592           0 :         if (*str++ != '=' || !*str)
    1593             :                 /*
    1594             :                  * No options specified. Switch on full debugging.
    1595             :                  */
    1596             :                 goto out;
    1597             : 
    1598             :         saved_str = str;
    1599           0 :         while (str) {
    1600           0 :                 str = parse_slub_debug_flags(str, &flags, &slab_list, true);
    1601             : 
    1602           0 :                 if (!slab_list) {
    1603           0 :                         global_flags = flags;
    1604           0 :                         global_slub_debug_changed = true;
    1605             :                 } else {
    1606           0 :                         slab_list_specified = true;
    1607           0 :                         if (flags & SLAB_STORE_USER)
    1608           0 :                                 stack_depot_request_early_init();
    1609             :                 }
    1610             :         }
    1611             : 
    1612             :         /*
    1613             :          * For backwards compatibility, a single list of flags with list of
    1614             :          * slabs means debugging is only changed for those slabs, so the global
    1615             :          * slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
    1616             :          * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
    1617             :          * long as there is no option specifying flags without a slab list.
    1618             :          */
    1619           0 :         if (slab_list_specified) {
    1620           0 :                 if (!global_slub_debug_changed)
    1621           0 :                         global_flags = slub_debug;
    1622           0 :                 slub_debug_string = saved_str;
    1623             :         }
    1624             : out:
    1625           0 :         slub_debug = global_flags;
    1626           0 :         if (slub_debug & SLAB_STORE_USER)
    1627           0 :                 stack_depot_request_early_init();
    1628           0 :         if (slub_debug != 0 || slub_debug_string)
    1629           0 :                 static_branch_enable(&slub_debug_enabled);
    1630             :         else
    1631           0 :                 static_branch_disable(&slub_debug_enabled);
    1632           0 :         if ((static_branch_unlikely(&init_on_alloc) ||
    1633           0 :              static_branch_unlikely(&init_on_free)) &&
    1634           0 :             (slub_debug & SLAB_POISON))
    1635           0 :                 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
    1636           0 :         return 1;
    1637             : }
    1638             : 
    1639             : __setup("slub_debug", setup_slub_debug);
    1640             : 
    1641             : /*
    1642             :  * kmem_cache_flags - apply debugging options to the cache
    1643             :  * @object_size:        the size of an object without meta data
    1644             :  * @flags:              flags to set
    1645             :  * @name:               name of the cache
    1646             :  *
    1647             :  * Debug option(s) are applied to @flags. In addition to the debug
    1648             :  * option(s), if a slab name (or multiple) is specified i.e.
    1649             :  * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ...
    1650             :  * then only the select slabs will receive the debug option(s).
    1651             :  */
    1652         103 : slab_flags_t kmem_cache_flags(unsigned int object_size,
    1653             :         slab_flags_t flags, const char *name)
    1654             : {
    1655             :         char *iter;
    1656             :         size_t len;
    1657             :         char *next_block;
    1658             :         slab_flags_t block_flags;
    1659         103 :         slab_flags_t slub_debug_local = slub_debug;
    1660             : 
    1661         103 :         if (flags & SLAB_NO_USER_FLAGS)
    1662             :                 return flags;
    1663             : 
    1664             :         /*
    1665             :          * If the slab cache is for debugging (e.g. kmemleak) then
    1666             :          * don't store user (stack trace) information by default,
    1667             :          * but let the user enable it via the command line below.
    1668             :          */
    1669         103 :         if (flags & SLAB_NOLEAKTRACE)
    1670           0 :                 slub_debug_local &= ~SLAB_STORE_USER;
    1671             : 
    1672         103 :         len = strlen(name);
    1673         103 :         next_block = slub_debug_string;
    1674             :         /* Go through all blocks of debug options, see if any matches our slab's name */
    1675         206 :         while (next_block) {
    1676           0 :                 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false);
    1677           0 :                 if (!iter)
    1678           0 :                         continue;
    1679             :                 /* Found a block that has a slab list, search it */
    1680           0 :                 while (*iter) {
    1681             :                         char *end, *glob;
    1682             :                         size_t cmplen;
    1683             : 
    1684           0 :                         end = strchrnul(iter, ',');
    1685           0 :                         if (next_block && next_block < end)
    1686           0 :                                 end = next_block - 1;
    1687             : 
    1688           0 :                         glob = strnchr(iter, end - iter, '*');
    1689           0 :                         if (glob)
    1690           0 :                                 cmplen = glob - iter;
    1691             :                         else
    1692           0 :                                 cmplen = max_t(size_t, len, (end - iter));
    1693             : 
    1694           0 :                         if (!strncmp(name, iter, cmplen)) {
    1695           0 :                                 flags |= block_flags;
    1696           0 :                                 return flags;
    1697             :                         }
    1698             : 
    1699           0 :                         if (!*end || *end == ';')
    1700             :                                 break;
    1701           0 :                         iter = end + 1;
    1702             :                 }
    1703             :         }
    1704             : 
    1705         103 :         return flags | slub_debug_local;
    1706             : }
    1707             : #else /* !CONFIG_SLUB_DEBUG */
    1708             : static inline void setup_object_debug(struct kmem_cache *s, void *object) {}
    1709             : static inline
    1710             : void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {}
    1711             : 
    1712             : static inline bool alloc_debug_processing(struct kmem_cache *s,
    1713             :         struct slab *slab, void *object, int orig_size) { return true; }
    1714             : 
    1715             : static inline bool free_debug_processing(struct kmem_cache *s,
    1716             :         struct slab *slab, void *head, void *tail, int *bulk_cnt,
    1717             :         unsigned long addr, depot_stack_handle_t handle) { return true; }
    1718             : 
    1719             : static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {}
    1720             : static inline int check_object(struct kmem_cache *s, struct slab *slab,
    1721             :                         void *object, u8 val) { return 1; }
    1722             : static inline depot_stack_handle_t set_track_prepare(void) { return 0; }
    1723             : static inline void set_track(struct kmem_cache *s, void *object,
    1724             :                              enum track_item alloc, unsigned long addr) {}
    1725             : static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
    1726             :                                         struct slab *slab) {}
    1727             : static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
    1728             :                                         struct slab *slab) {}
    1729             : slab_flags_t kmem_cache_flags(unsigned int object_size,
    1730             :         slab_flags_t flags, const char *name)
    1731             : {
    1732             :         return flags;
    1733             : }
    1734             : #define slub_debug 0
    1735             : 
    1736             : #define disable_higher_order_debug 0
    1737             : 
    1738             : static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
    1739             :                                                         { return 0; }
    1740             : static inline void inc_slabs_node(struct kmem_cache *s, int node,
    1741             :                                                         int objects) {}
    1742             : static inline void dec_slabs_node(struct kmem_cache *s, int node,
    1743             :                                                         int objects) {}
    1744             : 
    1745             : #ifndef CONFIG_SLUB_TINY
    1746             : static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
    1747             :                                void **freelist, void *nextfree)
    1748             : {
    1749             :         return false;
    1750             : }
    1751             : #endif
    1752             : #endif /* CONFIG_SLUB_DEBUG */
    1753             : 
    1754             : /*
    1755             :  * Hooks for other subsystems that check memory allocations. In a typical
    1756             :  * production configuration these hooks all should produce no code at all.
    1757             :  */
    1758             : static __always_inline bool slab_free_hook(struct kmem_cache *s,
    1759             :                                                 void *x, bool init)
    1760             : {
    1761        4423 :         kmemleak_free_recursive(x, s->flags);
    1762        4423 :         kmsan_slab_free(s, x);
    1763             : 
    1764        4423 :         debug_check_no_locks_freed(x, s->object_size);
    1765             : 
    1766             :         if (!(s->flags & SLAB_DEBUG_OBJECTS))
    1767        4423 :                 debug_check_no_obj_freed(x, s->object_size);
    1768             : 
    1769             :         /* Use KCSAN to help debug racy use-after-free. */
    1770             :         if (!(s->flags & SLAB_TYPESAFE_BY_RCU))
    1771             :                 __kcsan_check_access(x, s->object_size,
    1772             :                                      KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
    1773             : 
    1774             :         /*
    1775             :          * As memory initialization might be integrated into KASAN,
    1776             :          * kasan_slab_free and initialization memset's must be
    1777             :          * kept together to avoid discrepancies in behavior.
    1778             :          *
    1779             :          * The initialization memset's clear the object and the metadata,
    1780             :          * but don't touch the SLAB redzone.
    1781             :          */
    1782        4423 :         if (init) {
    1783             :                 int rsize;
    1784             : 
    1785             :                 if (!kasan_has_integrated_init())
    1786           0 :                         memset(kasan_reset_tag(x), 0, s->object_size);
    1787           0 :                 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
    1788           0 :                 memset((char *)kasan_reset_tag(x) + s->inuse, 0,
    1789             :                        s->size - s->inuse - rsize);
    1790             :         }
    1791             :         /* KASAN might put x into memory quarantine, delaying its reuse. */
    1792        4423 :         return kasan_slab_free(s, x, init);
    1793             : }
    1794             : 
    1795        4423 : static inline bool slab_free_freelist_hook(struct kmem_cache *s,
    1796             :                                            void **head, void **tail,
    1797             :                                            int *cnt)
    1798             : {
    1799             : 
    1800             :         void *object;
    1801        4423 :         void *next = *head;
    1802        4423 :         void *old_tail = *tail ? *tail : *head;
    1803             : 
    1804        4423 :         if (is_kfence_address(next)) {
    1805             :                 slab_free_hook(s, next, false);
    1806             :                 return true;
    1807             :         }
    1808             : 
    1809             :         /* Head and tail of the reconstructed freelist */
    1810        4423 :         *head = NULL;
    1811        4423 :         *tail = NULL;
    1812             : 
    1813             :         do {
    1814        4423 :                 object = next;
    1815        8846 :                 next = get_freepointer(s, object);
    1816             : 
    1817             :                 /* If object's reuse doesn't have to be delayed */
    1818       13269 :                 if (!slab_free_hook(s, object, slab_want_init_on_free(s))) {
    1819             :                         /* Move object to the new freelist */
    1820        8846 :                         set_freepointer(s, object, *head);
    1821        4423 :                         *head = object;
    1822        4423 :                         if (!*tail)
    1823        4423 :                                 *tail = object;
    1824             :                 } else {
    1825             :                         /*
    1826             :                          * Adjust the reconstructed freelist depth
    1827             :                          * accordingly if object's reuse is delayed.
    1828             :                          */
    1829             :                         --(*cnt);
    1830             :                 }
    1831        4423 :         } while (object != old_tail);
    1832             : 
    1833        4423 :         if (*head == *tail)
    1834        4423 :                 *tail = NULL;
    1835             : 
    1836        4423 :         return *head != NULL;
    1837             : }
    1838             : 
    1839             : static void *setup_object(struct kmem_cache *s, void *object)
    1840             : {
    1841       14462 :         setup_object_debug(s, object);
    1842       14462 :         object = kasan_init_slab_obj(s, object);
    1843       14462 :         if (unlikely(s->ctor)) {
    1844         260 :                 kasan_unpoison_object_data(s, object);
    1845         260 :                 s->ctor(object);
    1846         260 :                 kasan_poison_object_data(s, object);
    1847             :         }
    1848             :         return object;
    1849             : }
    1850             : 
    1851             : /*
    1852             :  * Slab allocation and freeing
    1853             :  */
    1854         435 : static inline struct slab *alloc_slab_page(gfp_t flags, int node,
    1855             :                 struct kmem_cache_order_objects oo)
    1856             : {
    1857             :         struct folio *folio;
    1858             :         struct slab *slab;
    1859         435 :         unsigned int order = oo_order(oo);
    1860             : 
    1861         435 :         if (node == NUMA_NO_NODE)
    1862         434 :                 folio = (struct folio *)alloc_pages(flags, order);
    1863             :         else
    1864           1 :                 folio = (struct folio *)__alloc_pages_node(node, flags, order);
    1865             : 
    1866         435 :         if (!folio)
    1867             :                 return NULL;
    1868             : 
    1869         435 :         slab = folio_slab(folio);
    1870         435 :         __folio_set_slab(folio);
    1871             :         /* Make the flag visible before any changes to folio->mapping */
    1872         435 :         smp_wmb();
    1873         870 :         if (folio_is_pfmemalloc(folio))
    1874             :                 slab_set_pfmemalloc(slab);
    1875             : 
    1876             :         return slab;
    1877             : }
    1878             : 
    1879             : #ifdef CONFIG_SLAB_FREELIST_RANDOM
    1880             : /* Pre-initialize the random sequence cache */
    1881             : static int init_cache_random_seq(struct kmem_cache *s)
    1882             : {
    1883             :         unsigned int count = oo_objects(s->oo);
    1884             :         int err;
    1885             : 
    1886             :         /* Bailout if already initialised */
    1887             :         if (s->random_seq)
    1888             :                 return 0;
    1889             : 
    1890             :         err = cache_random_seq_create(s, count, GFP_KERNEL);
    1891             :         if (err) {
    1892             :                 pr_err("SLUB: Unable to initialize free list for %s\n",
    1893             :                         s->name);
    1894             :                 return err;
    1895             :         }
    1896             : 
    1897             :         /* Transform to an offset on the set of pages */
    1898             :         if (s->random_seq) {
    1899             :                 unsigned int i;
    1900             : 
    1901             :                 for (i = 0; i < count; i++)
    1902             :                         s->random_seq[i] *= s->size;
    1903             :         }
    1904             :         return 0;
    1905             : }
    1906             : 
    1907             : /* Initialize each random sequence freelist per cache */
    1908             : static void __init init_freelist_randomization(void)
    1909             : {
    1910             :         struct kmem_cache *s;
    1911             : 
    1912             :         mutex_lock(&slab_mutex);
    1913             : 
    1914             :         list_for_each_entry(s, &slab_caches, list)
    1915             :                 init_cache_random_seq(s);
    1916             : 
    1917             :         mutex_unlock(&slab_mutex);
    1918             : }
    1919             : 
    1920             : /* Get the next entry on the pre-computed freelist randomized */
    1921             : static void *next_freelist_entry(struct kmem_cache *s, struct slab *slab,
    1922             :                                 unsigned long *pos, void *start,
    1923             :                                 unsigned long page_limit,
    1924             :                                 unsigned long freelist_count)
    1925             : {
    1926             :         unsigned int idx;
    1927             : 
    1928             :         /*
    1929             :          * If the target page allocation failed, the number of objects on the
    1930             :          * page might be smaller than the usual size defined by the cache.
    1931             :          */
    1932             :         do {
    1933             :                 idx = s->random_seq[*pos];
    1934             :                 *pos += 1;
    1935             :                 if (*pos >= freelist_count)
    1936             :                         *pos = 0;
    1937             :         } while (unlikely(idx >= page_limit));
    1938             : 
    1939             :         return (char *)start + idx;
    1940             : }
    1941             : 
    1942             : /* Shuffle the single linked freelist based on a random pre-computed sequence */
    1943             : static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
    1944             : {
    1945             :         void *start;
    1946             :         void *cur;
    1947             :         void *next;
    1948             :         unsigned long idx, pos, page_limit, freelist_count;
    1949             : 
    1950             :         if (slab->objects < 2 || !s->random_seq)
    1951             :                 return false;
    1952             : 
    1953             :         freelist_count = oo_objects(s->oo);
    1954             :         pos = get_random_u32_below(freelist_count);
    1955             : 
    1956             :         page_limit = slab->objects * s->size;
    1957             :         start = fixup_red_left(s, slab_address(slab));
    1958             : 
    1959             :         /* First entry is used as the base of the freelist */
    1960             :         cur = next_freelist_entry(s, slab, &pos, start, page_limit,
    1961             :                                 freelist_count);
    1962             :         cur = setup_object(s, cur);
    1963             :         slab->freelist = cur;
    1964             : 
    1965             :         for (idx = 1; idx < slab->objects; idx++) {
    1966             :                 next = next_freelist_entry(s, slab, &pos, start, page_limit,
    1967             :                         freelist_count);
    1968             :                 next = setup_object(s, next);
    1969             :                 set_freepointer(s, cur, next);
    1970             :                 cur = next;
    1971             :         }
    1972             :         set_freepointer(s, cur, NULL);
    1973             : 
    1974             :         return true;
    1975             : }
    1976             : #else
    1977             : static inline int init_cache_random_seq(struct kmem_cache *s)
    1978             : {
    1979             :         return 0;
    1980             : }
    1981             : static inline void init_freelist_randomization(void) { }
    1982             : static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
    1983             : {
    1984             :         return false;
    1985             : }
    1986             : #endif /* CONFIG_SLAB_FREELIST_RANDOM */
    1987             : 
    1988         435 : static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
    1989             : {
    1990             :         struct slab *slab;
    1991         435 :         struct kmem_cache_order_objects oo = s->oo;
    1992             :         gfp_t alloc_gfp;
    1993             :         void *start, *p, *next;
    1994             :         int idx;
    1995             :         bool shuffle;
    1996             : 
    1997         435 :         flags &= gfp_allowed_mask;
    1998             : 
    1999         435 :         flags |= s->allocflags;
    2000             : 
    2001             :         /*
    2002             :          * Let the initial higher-order allocation fail under memory pressure
    2003             :          * so we fall-back to the minimum order allocation.
    2004             :          */
    2005         435 :         alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
    2006         832 :         if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
    2007          76 :                 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM;
    2008             : 
    2009         435 :         slab = alloc_slab_page(alloc_gfp, node, oo);
    2010         435 :         if (unlikely(!slab)) {
    2011           0 :                 oo = s->min;
    2012           0 :                 alloc_gfp = flags;
    2013             :                 /*
    2014             :                  * Allocation may have failed due to fragmentation.
    2015             :                  * Try a lower order alloc if possible
    2016             :                  */
    2017           0 :                 slab = alloc_slab_page(alloc_gfp, node, oo);
    2018           0 :                 if (unlikely(!slab))
    2019             :                         return NULL;
    2020             :                 stat(s, ORDER_FALLBACK);
    2021             :         }
    2022             : 
    2023         435 :         slab->objects = oo_objects(oo);
    2024         435 :         slab->inuse = 0;
    2025         435 :         slab->frozen = 0;
    2026             : 
    2027         870 :         account_slab(slab, oo_order(oo), s, flags);
    2028             : 
    2029         435 :         slab->slab_cache = s;
    2030             : 
    2031         435 :         kasan_poison_slab(slab);
    2032             : 
    2033         435 :         start = slab_address(slab);
    2034             : 
    2035         435 :         setup_slab_debug(s, slab, start);
    2036             : 
    2037         435 :         shuffle = shuffle_freelist(s, slab);
    2038             : 
    2039             :         if (!shuffle) {
    2040         435 :                 start = fixup_red_left(s, start);
    2041         435 :                 start = setup_object(s, start);
    2042         435 :                 slab->freelist = start;
    2043       14462 :                 for (idx = 0, p = start; idx < slab->objects - 1; idx++) {
    2044       14027 :                         next = p + s->size;
    2045       14027 :                         next = setup_object(s, next);
    2046       28054 :                         set_freepointer(s, p, next);
    2047       14027 :                         p = next;
    2048             :                 }
    2049         435 :                 set_freepointer(s, p, NULL);
    2050             :         }
    2051             : 
    2052         435 :         return slab;
    2053             : }
    2054             : 
    2055         435 : static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node)
    2056             : {
    2057         435 :         if (unlikely(flags & GFP_SLAB_BUG_MASK))
    2058           0 :                 flags = kmalloc_fix_flags(flags);
    2059             : 
    2060         435 :         WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
    2061             : 
    2062         435 :         return allocate_slab(s,
    2063             :                 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
    2064             : }
    2065             : 
    2066           0 : static void __free_slab(struct kmem_cache *s, struct slab *slab)
    2067             : {
    2068           0 :         struct folio *folio = slab_folio(slab);
    2069           0 :         int order = folio_order(folio);
    2070           0 :         int pages = 1 << order;
    2071             : 
    2072           0 :         __slab_clear_pfmemalloc(slab);
    2073           0 :         folio->mapping = NULL;
    2074             :         /* Make the mapping reset visible before clearing the flag */
    2075           0 :         smp_wmb();
    2076           0 :         __folio_clear_slab(folio);
    2077           0 :         mm_account_reclaimed_pages(pages);
    2078           0 :         unaccount_slab(slab, order, s);
    2079           0 :         __free_pages(&folio->page, order);
    2080           0 : }
    2081             : 
    2082           0 : static void rcu_free_slab(struct rcu_head *h)
    2083             : {
    2084           0 :         struct slab *slab = container_of(h, struct slab, rcu_head);
    2085             : 
    2086           0 :         __free_slab(slab->slab_cache, slab);
    2087           0 : }
    2088             : 
    2089           0 : static void free_slab(struct kmem_cache *s, struct slab *slab)
    2090             : {
    2091           0 :         if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
    2092             :                 void *p;
    2093             : 
    2094           0 :                 slab_pad_check(s, slab);
    2095           0 :                 for_each_object(p, s, slab_address(slab), slab->objects)
    2096           0 :                         check_object(s, slab, p, SLUB_RED_INACTIVE);
    2097             :         }
    2098             : 
    2099           0 :         if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
    2100           0 :                 call_rcu(&slab->rcu_head, rcu_free_slab);
    2101             :         else
    2102           0 :                 __free_slab(s, slab);
    2103           0 : }
    2104             : 
    2105             : static void discard_slab(struct kmem_cache *s, struct slab *slab)
    2106             : {
    2107           0 :         dec_slabs_node(s, slab_nid(slab), slab->objects);
    2108           0 :         free_slab(s, slab);
    2109             : }
    2110             : 
    2111             : /*
    2112             :  * Management of partially allocated slabs.
    2113             :  */
    2114             : static inline void
    2115             : __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail)
    2116             : {
    2117          38 :         n->nr_partial++;
    2118           2 :         if (tail == DEACTIVATE_TO_TAIL)
    2119          35 :                 list_add_tail(&slab->slab_list, &n->partial);
    2120             :         else
    2121           3 :                 list_add(&slab->slab_list, &n->partial);
    2122             : }
    2123             : 
    2124             : static inline void add_partial(struct kmem_cache_node *n,
    2125             :                                 struct slab *slab, int tail)
    2126             : {
    2127             :         lockdep_assert_held(&n->list_lock);
    2128           2 :         __add_partial(n, slab, tail);
    2129             : }
    2130             : 
    2131             : static inline void remove_partial(struct kmem_cache_node *n,
    2132             :                                         struct slab *slab)
    2133             : {
    2134             :         lockdep_assert_held(&n->list_lock);
    2135          56 :         list_del(&slab->slab_list);
    2136          28 :         n->nr_partial--;
    2137             : }
    2138             : 
    2139             : /*
    2140             :  * Called only for kmem_cache_debug() caches instead of acquire_slab(), with a
    2141             :  * slab from the n->partial list. Remove only a single object from the slab, do
    2142             :  * the alloc_debug_processing() checks and leave the slab on the list, or move
    2143             :  * it to full list if it was the last free object.
    2144             :  */
    2145           0 : static void *alloc_single_from_partial(struct kmem_cache *s,
    2146             :                 struct kmem_cache_node *n, struct slab *slab, int orig_size)
    2147             : {
    2148             :         void *object;
    2149             : 
    2150             :         lockdep_assert_held(&n->list_lock);
    2151             : 
    2152           0 :         object = slab->freelist;
    2153           0 :         slab->freelist = get_freepointer(s, object);
    2154           0 :         slab->inuse++;
    2155             : 
    2156           0 :         if (!alloc_debug_processing(s, slab, object, orig_size)) {
    2157           0 :                 remove_partial(n, slab);
    2158           0 :                 return NULL;
    2159             :         }
    2160             : 
    2161           0 :         if (slab->inuse == slab->objects) {
    2162           0 :                 remove_partial(n, slab);
    2163           0 :                 add_full(s, n, slab);
    2164             :         }
    2165             : 
    2166             :         return object;
    2167             : }
    2168             : 
    2169             : /*
    2170             :  * Called only for kmem_cache_debug() caches to allocate from a freshly
    2171             :  * allocated slab. Allocate a single object instead of whole freelist
    2172             :  * and put the slab to the partial (or full) list.
    2173             :  */
    2174           0 : static void *alloc_single_from_new_slab(struct kmem_cache *s,
    2175             :                                         struct slab *slab, int orig_size)
    2176             : {
    2177           0 :         int nid = slab_nid(slab);
    2178           0 :         struct kmem_cache_node *n = get_node(s, nid);
    2179             :         unsigned long flags;
    2180             :         void *object;
    2181             : 
    2182             : 
    2183           0 :         object = slab->freelist;
    2184           0 :         slab->freelist = get_freepointer(s, object);
    2185           0 :         slab->inuse = 1;
    2186             : 
    2187           0 :         if (!alloc_debug_processing(s, slab, object, orig_size))
    2188             :                 /*
    2189             :                  * It's not really expected that this would fail on a
    2190             :                  * freshly allocated slab, but a concurrent memory
    2191             :                  * corruption in theory could cause that.
    2192             :                  */
    2193             :                 return NULL;
    2194             : 
    2195           0 :         spin_lock_irqsave(&n->list_lock, flags);
    2196             : 
    2197           0 :         if (slab->inuse == slab->objects)
    2198           0 :                 add_full(s, n, slab);
    2199             :         else
    2200             :                 add_partial(n, slab, DEACTIVATE_TO_HEAD);
    2201             : 
    2202           0 :         inc_slabs_node(s, nid, slab->objects);
    2203           0 :         spin_unlock_irqrestore(&n->list_lock, flags);
    2204             : 
    2205           0 :         return object;
    2206             : }
    2207             : 
    2208             : /*
    2209             :  * Remove slab from the partial list, freeze it and
    2210             :  * return the pointer to the freelist.
    2211             :  *
    2212             :  * Returns a list of objects or NULL if it fails.
    2213             :  */
    2214          28 : static inline void *acquire_slab(struct kmem_cache *s,
    2215             :                 struct kmem_cache_node *n, struct slab *slab,
    2216             :                 int mode)
    2217             : {
    2218             :         void *freelist;
    2219             :         unsigned long counters;
    2220             :         struct slab new;
    2221             : 
    2222             :         lockdep_assert_held(&n->list_lock);
    2223             : 
    2224             :         /*
    2225             :          * Zap the freelist and set the frozen bit.
    2226             :          * The old freelist is the list of objects for the
    2227             :          * per cpu allocation list.
    2228             :          */
    2229          28 :         freelist = slab->freelist;
    2230          28 :         counters = slab->counters;
    2231          28 :         new.counters = counters;
    2232          28 :         if (mode) {
    2233          28 :                 new.inuse = slab->objects;
    2234          28 :                 new.freelist = NULL;
    2235             :         } else {
    2236             :                 new.freelist = freelist;
    2237             :         }
    2238             : 
    2239             :         VM_BUG_ON(new.frozen);
    2240          28 :         new.frozen = 1;
    2241             : 
    2242          56 :         if (!__slab_update_freelist(s, slab,
    2243             :                         freelist, counters,
    2244             :                         new.freelist, new.counters,
    2245             :                         "acquire_slab"))
    2246             :                 return NULL;
    2247             : 
    2248          56 :         remove_partial(n, slab);
    2249          28 :         WARN_ON(!freelist);
    2250             :         return freelist;
    2251             : }
    2252             : 
    2253             : #ifdef CONFIG_SLUB_CPU_PARTIAL
    2254             : static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain);
    2255             : #else
    2256             : static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab,
    2257             :                                    int drain) { }
    2258             : #endif
    2259             : static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
    2260             : 
    2261             : /*
    2262             :  * Try to allocate a partial slab from a specific node.
    2263             :  */
    2264         462 : static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
    2265             :                               struct partial_context *pc)
    2266             : {
    2267             :         struct slab *slab, *slab2;
    2268         462 :         void *object = NULL;
    2269             :         unsigned long flags;
    2270         462 :         unsigned int partial_slabs = 0;
    2271             : 
    2272             :         /*
    2273             :          * Racy check. If we mistakenly see no partial slabs then we
    2274             :          * just allocate an empty slab. If we mistakenly try to get a
    2275             :          * partial slab and there is none available then get_partial()
    2276             :          * will return NULL.
    2277             :          */
    2278         462 :         if (!n || !n->nr_partial)
    2279             :                 return NULL;
    2280             : 
    2281          28 :         spin_lock_irqsave(&n->list_lock, flags);
    2282          28 :         list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
    2283             :                 void *t;
    2284             : 
    2285          56 :                 if (!pfmemalloc_match(slab, pc->flags))
    2286           0 :                         continue;
    2287             : 
    2288          28 :                 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
    2289           0 :                         object = alloc_single_from_partial(s, n, slab,
    2290           0 :                                                         pc->orig_size);
    2291           0 :                         if (object)
    2292             :                                 break;
    2293           0 :                         continue;
    2294             :                 }
    2295             : 
    2296          28 :                 t = acquire_slab(s, n, slab, object == NULL);
    2297          28 :                 if (!t)
    2298             :                         break;
    2299             : 
    2300          28 :                 if (!object) {
    2301          28 :                         *pc->slab = slab;
    2302          28 :                         stat(s, ALLOC_FROM_PARTIAL);
    2303          28 :                         object = t;
    2304             :                 } else {
    2305             :                         put_cpu_partial(s, slab, 0);
    2306             :                         stat(s, CPU_PARTIAL_NODE);
    2307             :                         partial_slabs++;
    2308             :                 }
    2309             : #ifdef CONFIG_SLUB_CPU_PARTIAL
    2310             :                 if (!kmem_cache_has_cpu_partial(s)
    2311             :                         || partial_slabs > s->cpu_partial_slabs / 2)
    2312             :                         break;
    2313             : #else
    2314             :                 break;
    2315             : #endif
    2316             : 
    2317             :         }
    2318          56 :         spin_unlock_irqrestore(&n->list_lock, flags);
    2319          28 :         return object;
    2320             : }
    2321             : 
    2322             : /*
    2323             :  * Get a slab from somewhere. Search in increasing NUMA distances.
    2324             :  */
    2325             : static void *get_any_partial(struct kmem_cache *s, struct partial_context *pc)
    2326             : {
    2327             : #ifdef CONFIG_NUMA
    2328             :         struct zonelist *zonelist;
    2329             :         struct zoneref *z;
    2330             :         struct zone *zone;
    2331             :         enum zone_type highest_zoneidx = gfp_zone(pc->flags);
    2332             :         void *object;
    2333             :         unsigned int cpuset_mems_cookie;
    2334             : 
    2335             :         /*
    2336             :          * The defrag ratio allows a configuration of the tradeoffs between
    2337             :          * inter node defragmentation and node local allocations. A lower
    2338             :          * defrag_ratio increases the tendency to do local allocations
    2339             :          * instead of attempting to obtain partial slabs from other nodes.
    2340             :          *
    2341             :          * If the defrag_ratio is set to 0 then kmalloc() always
    2342             :          * returns node local objects. If the ratio is higher then kmalloc()
    2343             :          * may return off node objects because partial slabs are obtained
    2344             :          * from other nodes and filled up.
    2345             :          *
    2346             :          * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
    2347             :          * (which makes defrag_ratio = 1000) then every (well almost)
    2348             :          * allocation will first attempt to defrag slab caches on other nodes.
    2349             :          * This means scanning over all nodes to look for partial slabs which
    2350             :          * may be expensive if we do it every time we are trying to find a slab
    2351             :          * with available objects.
    2352             :          */
    2353             :         if (!s->remote_node_defrag_ratio ||
    2354             :                         get_cycles() % 1024 > s->remote_node_defrag_ratio)
    2355             :                 return NULL;
    2356             : 
    2357             :         do {
    2358             :                 cpuset_mems_cookie = read_mems_allowed_begin();
    2359             :                 zonelist = node_zonelist(mempolicy_slab_node(), pc->flags);
    2360             :                 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
    2361             :                         struct kmem_cache_node *n;
    2362             : 
    2363             :                         n = get_node(s, zone_to_nid(zone));
    2364             : 
    2365             :                         if (n && cpuset_zone_allowed(zone, pc->flags) &&
    2366             :                                         n->nr_partial > s->min_partial) {
    2367             :                                 object = get_partial_node(s, n, pc);
    2368             :                                 if (object) {
    2369             :                                         /*
    2370             :                                          * Don't check read_mems_allowed_retry()
    2371             :                                          * here - if mems_allowed was updated in
    2372             :                                          * parallel, that was a harmless race
    2373             :                                          * between allocation and the cpuset
    2374             :                                          * update
    2375             :                                          */
    2376             :                                         return object;
    2377             :                                 }
    2378             :                         }
    2379             :                 }
    2380             :         } while (read_mems_allowed_retry(cpuset_mems_cookie));
    2381             : #endif  /* CONFIG_NUMA */
    2382             :         return NULL;
    2383             : }
    2384             : 
    2385             : /*
    2386             :  * Get a partial slab, lock it and return it.
    2387             :  */
    2388         462 : static void *get_partial(struct kmem_cache *s, int node, struct partial_context *pc)
    2389             : {
    2390             :         void *object;
    2391         462 :         int searchnode = node;
    2392             : 
    2393         462 :         if (node == NUMA_NO_NODE)
    2394         460 :                 searchnode = numa_mem_id();
    2395             : 
    2396         462 :         object = get_partial_node(s, get_node(s, searchnode), pc);
    2397         462 :         if (object || node != NUMA_NO_NODE)
    2398             :                 return object;
    2399             : 
    2400         434 :         return get_any_partial(s, pc);
    2401             : }
    2402             : 
    2403             : #ifndef CONFIG_SLUB_TINY
    2404             : 
    2405             : #ifdef CONFIG_PREEMPTION
    2406             : /*
    2407             :  * Calculate the next globally unique transaction for disambiguation
    2408             :  * during cmpxchg. The transactions start with the cpu number and are then
    2409             :  * incremented by CONFIG_NR_CPUS.
    2410             :  */
    2411             : #define TID_STEP  roundup_pow_of_two(CONFIG_NR_CPUS)
    2412             : #else
    2413             : /*
    2414             :  * No preemption supported therefore also no need to check for
    2415             :  * different cpus.
    2416             :  */
    2417             : #define TID_STEP 1
    2418             : #endif /* CONFIG_PREEMPTION */
    2419             : 
    2420             : static inline unsigned long next_tid(unsigned long tid)
    2421             : {
    2422       21587 :         return tid + TID_STEP;
    2423             : }
    2424             : 
    2425             : #ifdef SLUB_DEBUG_CMPXCHG
    2426             : static inline unsigned int tid_to_cpu(unsigned long tid)
    2427             : {
    2428             :         return tid % TID_STEP;
    2429             : }
    2430             : 
    2431             : static inline unsigned long tid_to_event(unsigned long tid)
    2432             : {
    2433             :         return tid / TID_STEP;
    2434             : }
    2435             : #endif
    2436             : 
    2437             : static inline unsigned int init_tid(int cpu)
    2438             : {
    2439          53 :         return cpu;
    2440             : }
    2441             : 
    2442             : static inline void note_cmpxchg_failure(const char *n,
    2443             :                 const struct kmem_cache *s, unsigned long tid)
    2444             : {
    2445             : #ifdef SLUB_DEBUG_CMPXCHG
    2446             :         unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
    2447             : 
    2448             :         pr_info("%s %s: cmpxchg redo ", n, s->name);
    2449             : 
    2450             : #ifdef CONFIG_PREEMPTION
    2451             :         if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
    2452             :                 pr_warn("due to cpu change %d -> %d\n",
    2453             :                         tid_to_cpu(tid), tid_to_cpu(actual_tid));
    2454             :         else
    2455             : #endif
    2456             :         if (tid_to_event(tid) != tid_to_event(actual_tid))
    2457             :                 pr_warn("due to cpu running other code. Event %ld->%ld\n",
    2458             :                         tid_to_event(tid), tid_to_event(actual_tid));
    2459             :         else
    2460             :                 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
    2461             :                         actual_tid, tid, next_tid(tid));
    2462             : #endif
    2463             :         stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
    2464             : }
    2465             : 
    2466             : static void init_kmem_cache_cpus(struct kmem_cache *s)
    2467             : {
    2468             :         int cpu;
    2469             :         struct kmem_cache_cpu *c;
    2470             : 
    2471          53 :         for_each_possible_cpu(cpu) {
    2472          53 :                 c = per_cpu_ptr(s->cpu_slab, cpu);
    2473          53 :                 local_lock_init(&c->lock);
    2474          53 :                 c->tid = init_tid(cpu);
    2475             :         }
    2476             : }
    2477             : 
    2478             : /*
    2479             :  * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist,
    2480             :  * unfreezes the slabs and puts it on the proper list.
    2481             :  * Assumes the slab has been already safely taken away from kmem_cache_cpu
    2482             :  * by the caller.
    2483             :  */
    2484           2 : static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
    2485             :                             void *freelist)
    2486             : {
    2487             :         enum slab_modes { M_NONE, M_PARTIAL, M_FREE, M_FULL_NOLIST };
    2488           6 :         struct kmem_cache_node *n = get_node(s, slab_nid(slab));
    2489           2 :         int free_delta = 0;
    2490           2 :         enum slab_modes mode = M_NONE;
    2491             :         void *nextfree, *freelist_iter, *freelist_tail;
    2492           2 :         int tail = DEACTIVATE_TO_HEAD;
    2493           2 :         unsigned long flags = 0;
    2494             :         struct slab new;
    2495             :         struct slab old;
    2496             : 
    2497           2 :         if (slab->freelist) {
    2498           0 :                 stat(s, DEACTIVATE_REMOTE_FREES);
    2499           0 :                 tail = DEACTIVATE_TO_TAIL;
    2500             :         }
    2501             : 
    2502             :         /*
    2503             :          * Stage one: Count the objects on cpu's freelist as free_delta and
    2504             :          * remember the last object in freelist_tail for later splicing.
    2505             :          */
    2506           2 :         freelist_tail = NULL;
    2507           2 :         freelist_iter = freelist;
    2508          86 :         while (freelist_iter) {
    2509         164 :                 nextfree = get_freepointer(s, freelist_iter);
    2510             : 
    2511             :                 /*
    2512             :                  * If 'nextfree' is invalid, it is possible that the object at
    2513             :                  * 'freelist_iter' is already corrupted.  So isolate all objects
    2514             :                  * starting at 'freelist_iter' by skipping them.
    2515             :                  */
    2516          82 :                 if (freelist_corrupted(s, slab, &freelist_iter, nextfree))
    2517             :                         break;
    2518             : 
    2519          82 :                 freelist_tail = freelist_iter;
    2520          82 :                 free_delta++;
    2521             : 
    2522          82 :                 freelist_iter = nextfree;
    2523             :         }
    2524             : 
    2525             :         /*
    2526             :          * Stage two: Unfreeze the slab while splicing the per-cpu
    2527             :          * freelist to the head of slab's freelist.
    2528             :          *
    2529             :          * Ensure that the slab is unfrozen while the list presence
    2530             :          * reflects the actual number of objects during unfreeze.
    2531             :          *
    2532             :          * We first perform cmpxchg holding lock and insert to list
    2533             :          * when it succeed. If there is mismatch then the slab is not
    2534             :          * unfrozen and number of objects in the slab may have changed.
    2535             :          * Then release lock and retry cmpxchg again.
    2536             :          */
    2537             : redo:
    2538             : 
    2539           2 :         old.freelist = READ_ONCE(slab->freelist);
    2540           2 :         old.counters = READ_ONCE(slab->counters);
    2541             :         VM_BUG_ON(!old.frozen);
    2542             : 
    2543             :         /* Determine target state of the slab */
    2544           2 :         new.counters = old.counters;
    2545           2 :         if (freelist_tail) {
    2546           2 :                 new.inuse -= free_delta;
    2547           4 :                 set_freepointer(s, freelist_tail, old.freelist);
    2548           2 :                 new.freelist = freelist;
    2549             :         } else
    2550             :                 new.freelist = old.freelist;
    2551             : 
    2552           2 :         new.frozen = 0;
    2553             : 
    2554           2 :         if (!new.inuse && n->nr_partial >= s->min_partial) {
    2555             :                 mode = M_FREE;
    2556           2 :         } else if (new.freelist) {
    2557           2 :                 mode = M_PARTIAL;
    2558             :                 /*
    2559             :                  * Taking the spinlock removes the possibility that
    2560             :                  * acquire_slab() will see a slab that is frozen
    2561             :                  */
    2562           2 :                 spin_lock_irqsave(&n->list_lock, flags);
    2563             :         } else {
    2564             :                 mode = M_FULL_NOLIST;
    2565             :         }
    2566             : 
    2567             : 
    2568           2 :         if (!slab_update_freelist(s, slab,
    2569             :                                 old.freelist, old.counters,
    2570             :                                 new.freelist, new.counters,
    2571             :                                 "unfreezing slab")) {
    2572           0 :                 if (mode == M_PARTIAL)
    2573           0 :                         spin_unlock_irqrestore(&n->list_lock, flags);
    2574             :                 goto redo;
    2575             :         }
    2576             : 
    2577             : 
    2578           2 :         if (mode == M_PARTIAL) {
    2579           2 :                 add_partial(n, slab, tail);
    2580           4 :                 spin_unlock_irqrestore(&n->list_lock, flags);
    2581           2 :                 stat(s, tail);
    2582           0 :         } else if (mode == M_FREE) {
    2583           0 :                 stat(s, DEACTIVATE_EMPTY);
    2584             :                 discard_slab(s, slab);
    2585             :                 stat(s, FREE_SLAB);
    2586             :         } else if (mode == M_FULL_NOLIST) {
    2587             :                 stat(s, DEACTIVATE_FULL);
    2588             :         }
    2589           2 : }
    2590             : 
    2591             : #ifdef CONFIG_SLUB_CPU_PARTIAL
    2592             : static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
    2593             : {
    2594             :         struct kmem_cache_node *n = NULL, *n2 = NULL;
    2595             :         struct slab *slab, *slab_to_discard = NULL;
    2596             :         unsigned long flags = 0;
    2597             : 
    2598             :         while (partial_slab) {
    2599             :                 struct slab new;
    2600             :                 struct slab old;
    2601             : 
    2602             :                 slab = partial_slab;
    2603             :                 partial_slab = slab->next;
    2604             : 
    2605             :                 n2 = get_node(s, slab_nid(slab));
    2606             :                 if (n != n2) {
    2607             :                         if (n)
    2608             :                                 spin_unlock_irqrestore(&n->list_lock, flags);
    2609             : 
    2610             :                         n = n2;
    2611             :                         spin_lock_irqsave(&n->list_lock, flags);
    2612             :                 }
    2613             : 
    2614             :                 do {
    2615             : 
    2616             :                         old.freelist = slab->freelist;
    2617             :                         old.counters = slab->counters;
    2618             :                         VM_BUG_ON(!old.frozen);
    2619             : 
    2620             :                         new.counters = old.counters;
    2621             :                         new.freelist = old.freelist;
    2622             : 
    2623             :                         new.frozen = 0;
    2624             : 
    2625             :                 } while (!__slab_update_freelist(s, slab,
    2626             :                                 old.freelist, old.counters,
    2627             :                                 new.freelist, new.counters,
    2628             :                                 "unfreezing slab"));
    2629             : 
    2630             :                 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
    2631             :                         slab->next = slab_to_discard;
    2632             :                         slab_to_discard = slab;
    2633             :                 } else {
    2634             :                         add_partial(n, slab, DEACTIVATE_TO_TAIL);
    2635             :                         stat(s, FREE_ADD_PARTIAL);
    2636             :                 }
    2637             :         }
    2638             : 
    2639             :         if (n)
    2640             :                 spin_unlock_irqrestore(&n->list_lock, flags);
    2641             : 
    2642             :         while (slab_to_discard) {
    2643             :                 slab = slab_to_discard;
    2644             :                 slab_to_discard = slab_to_discard->next;
    2645             : 
    2646             :                 stat(s, DEACTIVATE_EMPTY);
    2647             :                 discard_slab(s, slab);
    2648             :                 stat(s, FREE_SLAB);
    2649             :         }
    2650             : }
    2651             : 
    2652             : /*
    2653             :  * Unfreeze all the cpu partial slabs.
    2654             :  */
    2655             : static void unfreeze_partials(struct kmem_cache *s)
    2656             : {
    2657             :         struct slab *partial_slab;
    2658             :         unsigned long flags;
    2659             : 
    2660             :         local_lock_irqsave(&s->cpu_slab->lock, flags);
    2661             :         partial_slab = this_cpu_read(s->cpu_slab->partial);
    2662             :         this_cpu_write(s->cpu_slab->partial, NULL);
    2663             :         local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    2664             : 
    2665             :         if (partial_slab)
    2666             :                 __unfreeze_partials(s, partial_slab);
    2667             : }
    2668             : 
    2669             : static void unfreeze_partials_cpu(struct kmem_cache *s,
    2670             :                                   struct kmem_cache_cpu *c)
    2671             : {
    2672             :         struct slab *partial_slab;
    2673             : 
    2674             :         partial_slab = slub_percpu_partial(c);
    2675             :         c->partial = NULL;
    2676             : 
    2677             :         if (partial_slab)
    2678             :                 __unfreeze_partials(s, partial_slab);
    2679             : }
    2680             : 
    2681             : /*
    2682             :  * Put a slab that was just frozen (in __slab_free|get_partial_node) into a
    2683             :  * partial slab slot if available.
    2684             :  *
    2685             :  * If we did not find a slot then simply move all the partials to the
    2686             :  * per node partial list.
    2687             :  */
    2688             : static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
    2689             : {
    2690             :         struct slab *oldslab;
    2691             :         struct slab *slab_to_unfreeze = NULL;
    2692             :         unsigned long flags;
    2693             :         int slabs = 0;
    2694             : 
    2695             :         local_lock_irqsave(&s->cpu_slab->lock, flags);
    2696             : 
    2697             :         oldslab = this_cpu_read(s->cpu_slab->partial);
    2698             : 
    2699             :         if (oldslab) {
    2700             :                 if (drain && oldslab->slabs >= s->cpu_partial_slabs) {
    2701             :                         /*
    2702             :                          * Partial array is full. Move the existing set to the
    2703             :                          * per node partial list. Postpone the actual unfreezing
    2704             :                          * outside of the critical section.
    2705             :                          */
    2706             :                         slab_to_unfreeze = oldslab;
    2707             :                         oldslab = NULL;
    2708             :                 } else {
    2709             :                         slabs = oldslab->slabs;
    2710             :                 }
    2711             :         }
    2712             : 
    2713             :         slabs++;
    2714             : 
    2715             :         slab->slabs = slabs;
    2716             :         slab->next = oldslab;
    2717             : 
    2718             :         this_cpu_write(s->cpu_slab->partial, slab);
    2719             : 
    2720             :         local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    2721             : 
    2722             :         if (slab_to_unfreeze) {
    2723             :                 __unfreeze_partials(s, slab_to_unfreeze);
    2724             :                 stat(s, CPU_PARTIAL_DRAIN);
    2725             :         }
    2726             : }
    2727             : 
    2728             : #else   /* CONFIG_SLUB_CPU_PARTIAL */
    2729             : 
    2730             : static inline void unfreeze_partials(struct kmem_cache *s) { }
    2731             : static inline void unfreeze_partials_cpu(struct kmem_cache *s,
    2732             :                                   struct kmem_cache_cpu *c) { }
    2733             : 
    2734             : #endif  /* CONFIG_SLUB_CPU_PARTIAL */
    2735             : 
    2736           0 : static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
    2737             : {
    2738             :         unsigned long flags;
    2739             :         struct slab *slab;
    2740             :         void *freelist;
    2741             : 
    2742           0 :         local_lock_irqsave(&s->cpu_slab->lock, flags);
    2743             : 
    2744           0 :         slab = c->slab;
    2745           0 :         freelist = c->freelist;
    2746             : 
    2747           0 :         c->slab = NULL;
    2748           0 :         c->freelist = NULL;
    2749           0 :         c->tid = next_tid(c->tid);
    2750             : 
    2751           0 :         local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    2752             : 
    2753           0 :         if (slab) {
    2754           0 :                 deactivate_slab(s, slab, freelist);
    2755           0 :                 stat(s, CPUSLAB_FLUSH);
    2756             :         }
    2757           0 : }
    2758             : 
    2759           2 : static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
    2760             : {
    2761           2 :         struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
    2762           2 :         void *freelist = c->freelist;
    2763           2 :         struct slab *slab = c->slab;
    2764             : 
    2765           2 :         c->slab = NULL;
    2766           2 :         c->freelist = NULL;
    2767           4 :         c->tid = next_tid(c->tid);
    2768             : 
    2769           2 :         if (slab) {
    2770           2 :                 deactivate_slab(s, slab, freelist);
    2771           2 :                 stat(s, CPUSLAB_FLUSH);
    2772             :         }
    2773             : 
    2774           2 :         unfreeze_partials_cpu(s, c);
    2775           2 : }
    2776             : 
    2777             : struct slub_flush_work {
    2778             :         struct work_struct work;
    2779             :         struct kmem_cache *s;
    2780             :         bool skip;
    2781             : };
    2782             : 
    2783             : /*
    2784             :  * Flush cpu slab.
    2785             :  *
    2786             :  * Called from CPU work handler with migration disabled.
    2787             :  */
    2788           0 : static void flush_cpu_slab(struct work_struct *w)
    2789             : {
    2790             :         struct kmem_cache *s;
    2791             :         struct kmem_cache_cpu *c;
    2792             :         struct slub_flush_work *sfw;
    2793             : 
    2794           0 :         sfw = container_of(w, struct slub_flush_work, work);
    2795             : 
    2796           0 :         s = sfw->s;
    2797           0 :         c = this_cpu_ptr(s->cpu_slab);
    2798             : 
    2799           0 :         if (c->slab)
    2800           0 :                 flush_slab(s, c);
    2801             : 
    2802           0 :         unfreeze_partials(s);
    2803           0 : }
    2804             : 
    2805             : static bool has_cpu_slab(int cpu, struct kmem_cache *s)
    2806             : {
    2807           0 :         struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
    2808             : 
    2809           0 :         return c->slab || slub_percpu_partial(c);
    2810             : }
    2811             : 
    2812             : static DEFINE_MUTEX(flush_lock);
    2813             : static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);
    2814             : 
    2815           0 : static void flush_all_cpus_locked(struct kmem_cache *s)
    2816             : {
    2817             :         struct slub_flush_work *sfw;
    2818             :         unsigned int cpu;
    2819             : 
    2820             :         lockdep_assert_cpus_held();
    2821           0 :         mutex_lock(&flush_lock);
    2822             : 
    2823           0 :         for_each_online_cpu(cpu) {
    2824           0 :                 sfw = &per_cpu(slub_flush, cpu);
    2825           0 :                 if (!has_cpu_slab(cpu, s)) {
    2826           0 :                         sfw->skip = true;
    2827           0 :                         continue;
    2828             :                 }
    2829           0 :                 INIT_WORK(&sfw->work, flush_cpu_slab);
    2830           0 :                 sfw->skip = false;
    2831           0 :                 sfw->s = s;
    2832           0 :                 queue_work_on(cpu, flushwq, &sfw->work);
    2833             :         }
    2834             : 
    2835           0 :         for_each_online_cpu(cpu) {
    2836           0 :                 sfw = &per_cpu(slub_flush, cpu);
    2837           0 :                 if (sfw->skip)
    2838           0 :                         continue;
    2839           0 :                 flush_work(&sfw->work);
    2840             :         }
    2841             : 
    2842           0 :         mutex_unlock(&flush_lock);
    2843           0 : }
    2844             : 
    2845             : static void flush_all(struct kmem_cache *s)
    2846             : {
    2847             :         cpus_read_lock();
    2848           0 :         flush_all_cpus_locked(s);
    2849             :         cpus_read_unlock();
    2850             : }
    2851             : 
    2852             : /*
    2853             :  * Use the cpu notifier to insure that the cpu slabs are flushed when
    2854             :  * necessary.
    2855             :  */
    2856           0 : static int slub_cpu_dead(unsigned int cpu)
    2857             : {
    2858             :         struct kmem_cache *s;
    2859             : 
    2860           0 :         mutex_lock(&slab_mutex);
    2861           0 :         list_for_each_entry(s, &slab_caches, list)
    2862           0 :                 __flush_cpu_slab(s, cpu);
    2863           0 :         mutex_unlock(&slab_mutex);
    2864           0 :         return 0;
    2865             : }
    2866             : 
    2867             : #else /* CONFIG_SLUB_TINY */
    2868             : static inline void flush_all_cpus_locked(struct kmem_cache *s) { }
    2869             : static inline void flush_all(struct kmem_cache *s) { }
    2870             : static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { }
    2871             : static inline int slub_cpu_dead(unsigned int cpu) { return 0; }
    2872             : #endif /* CONFIG_SLUB_TINY */
    2873             : 
    2874             : /*
    2875             :  * Check if the objects in a per cpu structure fit numa
    2876             :  * locality expectations.
    2877             :  */
    2878             : static inline int node_match(struct slab *slab, int node)
    2879             : {
    2880             : #ifdef CONFIG_NUMA
    2881             :         if (node != NUMA_NO_NODE && slab_nid(slab) != node)
    2882             :                 return 0;
    2883             : #endif
    2884             :         return 1;
    2885             : }
    2886             : 
    2887             : #ifdef CONFIG_SLUB_DEBUG
    2888           0 : static int count_free(struct slab *slab)
    2889             : {
    2890           0 :         return slab->objects - slab->inuse;
    2891             : }
    2892             : 
    2893             : static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
    2894             : {
    2895           0 :         return atomic_long_read(&n->total_objects);
    2896             : }
    2897             : 
    2898             : /* Supports checking bulk free of a constructed freelist */
    2899           0 : static inline bool free_debug_processing(struct kmem_cache *s,
    2900             :         struct slab *slab, void *head, void *tail, int *bulk_cnt,
    2901             :         unsigned long addr, depot_stack_handle_t handle)
    2902             : {
    2903           0 :         bool checks_ok = false;
    2904           0 :         void *object = head;
    2905           0 :         int cnt = 0;
    2906             : 
    2907           0 :         if (s->flags & SLAB_CONSISTENCY_CHECKS) {
    2908           0 :                 if (!check_slab(s, slab))
    2909             :                         goto out;
    2910             :         }
    2911             : 
    2912           0 :         if (slab->inuse < *bulk_cnt) {
    2913           0 :                 slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n",
    2914             :                          slab->inuse, *bulk_cnt);
    2915           0 :                 goto out;
    2916             :         }
    2917             : 
    2918             : next_object:
    2919             : 
    2920           0 :         if (++cnt > *bulk_cnt)
    2921             :                 goto out_cnt;
    2922             : 
    2923           0 :         if (s->flags & SLAB_CONSISTENCY_CHECKS) {
    2924           0 :                 if (!free_consistency_checks(s, slab, object, addr))
    2925             :                         goto out;
    2926             :         }
    2927             : 
    2928           0 :         if (s->flags & SLAB_STORE_USER)
    2929             :                 set_track_update(s, object, TRACK_FREE, addr, handle);
    2930           0 :         trace(s, slab, object, 0);
    2931             :         /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
    2932           0 :         init_object(s, object, SLUB_RED_INACTIVE);
    2933             : 
    2934             :         /* Reached end of constructed freelist yet? */
    2935           0 :         if (object != tail) {
    2936           0 :                 object = get_freepointer(s, object);
    2937           0 :                 goto next_object;
    2938             :         }
    2939             :         checks_ok = true;
    2940             : 
    2941             : out_cnt:
    2942           0 :         if (cnt != *bulk_cnt) {
    2943           0 :                 slab_err(s, slab, "Bulk free expected %d objects but found %d\n",
    2944             :                          *bulk_cnt, cnt);
    2945           0 :                 *bulk_cnt = cnt;
    2946             :         }
    2947             : 
    2948             : out:
    2949             : 
    2950           0 :         if (!checks_ok)
    2951           0 :                 slab_fix(s, "Object at 0x%p not freed", object);
    2952             : 
    2953           0 :         return checks_ok;
    2954             : }
    2955             : #endif /* CONFIG_SLUB_DEBUG */
    2956             : 
    2957             : #if defined(CONFIG_SLUB_DEBUG) || defined(SLAB_SUPPORTS_SYSFS)
    2958           0 : static unsigned long count_partial(struct kmem_cache_node *n,
    2959             :                                         int (*get_count)(struct slab *))
    2960             : {
    2961             :         unsigned long flags;
    2962           0 :         unsigned long x = 0;
    2963             :         struct slab *slab;
    2964             : 
    2965           0 :         spin_lock_irqsave(&n->list_lock, flags);
    2966           0 :         list_for_each_entry(slab, &n->partial, slab_list)
    2967           0 :                 x += get_count(slab);
    2968           0 :         spin_unlock_irqrestore(&n->list_lock, flags);
    2969           0 :         return x;
    2970             : }
    2971             : #endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */
    2972             : 
    2973             : #ifdef CONFIG_SLUB_DEBUG
    2974             : static noinline void
    2975           0 : slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
    2976             : {
    2977             :         static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
    2978             :                                       DEFAULT_RATELIMIT_BURST);
    2979             :         int node;
    2980             :         struct kmem_cache_node *n;
    2981             : 
    2982           0 :         if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
    2983             :                 return;
    2984             : 
    2985           0 :         pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
    2986             :                 nid, gfpflags, &gfpflags);
    2987           0 :         pr_warn("  cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
    2988             :                 s->name, s->object_size, s->size, oo_order(s->oo),
    2989             :                 oo_order(s->min));
    2990             : 
    2991           0 :         if (oo_order(s->min) > get_order(s->object_size))
    2992           0 :                 pr_warn("  %s debugging increased min order, use slub_debug=O to disable.\n",
    2993             :                         s->name);
    2994             : 
    2995           0 :         for_each_kmem_cache_node(s, node, n) {
    2996             :                 unsigned long nr_slabs;
    2997             :                 unsigned long nr_objs;
    2998             :                 unsigned long nr_free;
    2999             : 
    3000           0 :                 nr_free  = count_partial(n, count_free);
    3001           0 :                 nr_slabs = node_nr_slabs(n);
    3002           0 :                 nr_objs  = node_nr_objs(n);
    3003             : 
    3004           0 :                 pr_warn("  node %d: slabs: %ld, objs: %ld, free: %ld\n",
    3005             :                         node, nr_slabs, nr_objs, nr_free);
    3006             :         }
    3007             : }
    3008             : #else /* CONFIG_SLUB_DEBUG */
    3009             : static inline void
    3010             : slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { }
    3011             : #endif
    3012             : 
    3013             : static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
    3014             : {
    3015         922 :         if (unlikely(slab_test_pfmemalloc(slab)))
    3016           0 :                 return gfp_pfmemalloc_allowed(gfpflags);
    3017             : 
    3018             :         return true;
    3019             : }
    3020             : 
    3021             : #ifndef CONFIG_SLUB_TINY
    3022             : static inline bool
    3023       20683 : __update_cpu_freelist_fast(struct kmem_cache *s,
    3024             :                            void *freelist_old, void *freelist_new,
    3025             :                            unsigned long tid)
    3026             : {
    3027       20683 :         freelist_aba_t old = { .freelist = freelist_old, .counter = tid };
    3028       41366 :         freelist_aba_t new = { .freelist = freelist_new, .counter = next_tid(tid) };
    3029             : 
    3030       41366 :         return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full,
    3031             :                                              &old.full, new.full);
    3032             : }
    3033             : 
    3034             : /*
    3035             :  * Check the slab->freelist and either transfer the freelist to the
    3036             :  * per cpu freelist or deactivate the slab.
    3037             :  *
    3038             :  * The slab is still frozen if the return value is not NULL.
    3039             :  *
    3040             :  * If this function returns NULL then the slab has been unfrozen.
    3041             :  */
    3042         432 : static inline void *get_freelist(struct kmem_cache *s, struct slab *slab)
    3043             : {
    3044             :         struct slab new;
    3045             :         unsigned long counters;
    3046             :         void *freelist;
    3047             : 
    3048             :         lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
    3049             : 
    3050             :         do {
    3051         432 :                 freelist = slab->freelist;
    3052         432 :                 counters = slab->counters;
    3053             : 
    3054         432 :                 new.counters = counters;
    3055             :                 VM_BUG_ON(!new.frozen);
    3056             : 
    3057         432 :                 new.inuse = slab->objects;
    3058         432 :                 new.frozen = freelist != NULL;
    3059             : 
    3060         864 :         } while (!__slab_update_freelist(s, slab,
    3061             :                 freelist, counters,
    3062             :                 NULL, new.counters,
    3063         432 :                 "get_freelist"));
    3064             : 
    3065         432 :         return freelist;
    3066             : }
    3067             : 
    3068             : /*
    3069             :  * Slow path. The lockless freelist is empty or we need to perform
    3070             :  * debugging duties.
    3071             :  *
    3072             :  * Processing is still very fast if new objects have been freed to the
    3073             :  * regular freelist. In that case we simply take over the regular freelist
    3074             :  * as the lockless freelist and zap the regular freelist.
    3075             :  *
    3076             :  * If that is not working then we fall back to the partial lists. We take the
    3077             :  * first element of the freelist as the object to allocate now and move the
    3078             :  * rest of the freelist to the lockless freelist.
    3079             :  *
    3080             :  * And if we were unable to get a new slab from the partial slab lists then
    3081             :  * we need to allocate a new slab. This is the slowest path since it involves
    3082             :  * a call to the page allocator and the setup of a new slab.
    3083             :  *
    3084             :  * Version of __slab_alloc to use when we know that preemption is
    3085             :  * already disabled (which is the case for bulk allocation).
    3086             :  */
    3087         462 : static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
    3088             :                           unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size)
    3089             : {
    3090             :         void *freelist;
    3091             :         struct slab *slab;
    3092             :         unsigned long flags;
    3093             :         struct partial_context pc;
    3094             : 
    3095         462 :         stat(s, ALLOC_SLOWPATH);
    3096             : 
    3097             : reread_slab:
    3098             : 
    3099         462 :         slab = READ_ONCE(c->slab);
    3100         462 :         if (!slab) {
    3101             :                 /*
    3102             :                  * if the node is not online or has no normal memory, just
    3103             :                  * ignore the node constraint
    3104             :                  */
    3105          32 :                 if (unlikely(node != NUMA_NO_NODE &&
    3106             :                              !node_isset(node, slab_nodes)))
    3107           0 :                         node = NUMA_NO_NODE;
    3108             :                 goto new_slab;
    3109             :         }
    3110             : redo:
    3111             : 
    3112         432 :         if (unlikely(!node_match(slab, node))) {
    3113             :                 /*
    3114             :                  * same as above but node_match() being false already
    3115             :                  * implies node != NUMA_NO_NODE
    3116             :                  */
    3117             :                 if (!node_isset(node, slab_nodes)) {
    3118             :                         node = NUMA_NO_NODE;
    3119             :                 } else {
    3120             :                         stat(s, ALLOC_NODE_MISMATCH);
    3121             :                         goto deactivate_slab;
    3122             :                 }
    3123             :         }
    3124             : 
    3125             :         /*
    3126             :          * By rights, we should be searching for a slab page that was
    3127             :          * PFMEMALLOC but right now, we are losing the pfmemalloc
    3128             :          * information when the page leaves the per-cpu allocator
    3129             :          */
    3130         864 :         if (unlikely(!pfmemalloc_match(slab, gfpflags)))
    3131             :                 goto deactivate_slab;
    3132             : 
    3133             :         /* must check again c->slab in case we got preempted and it changed */
    3134         432 :         local_lock_irqsave(&s->cpu_slab->lock, flags);
    3135         432 :         if (unlikely(slab != c->slab)) {
    3136           0 :                 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    3137             :                 goto reread_slab;
    3138             :         }
    3139         432 :         freelist = c->freelist;
    3140         432 :         if (freelist)
    3141             :                 goto load_freelist;
    3142             : 
    3143         432 :         freelist = get_freelist(s, slab);
    3144             : 
    3145         432 :         if (!freelist) {
    3146         432 :                 c->slab = NULL;
    3147         864 :                 c->tid = next_tid(c->tid);
    3148         432 :                 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    3149             :                 stat(s, DEACTIVATE_BYPASS);
    3150             :                 goto new_slab;
    3151             :         }
    3152             : 
    3153             :         stat(s, ALLOC_REFILL);
    3154             : 
    3155             : load_freelist:
    3156             : 
    3157         462 :         lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
    3158             : 
    3159             :         /*
    3160             :          * freelist is pointing to the list of objects to be used.
    3161             :          * slab is pointing to the slab from which the objects are obtained.
    3162             :          * That slab must be frozen for per cpu allocations to work.
    3163             :          */
    3164             :         VM_BUG_ON(!c->slab->frozen);
    3165         924 :         c->freelist = get_freepointer(s, freelist);
    3166         924 :         c->tid = next_tid(c->tid);
    3167         924 :         local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    3168         462 :         return freelist;
    3169             : 
    3170             : deactivate_slab:
    3171             : 
    3172           0 :         local_lock_irqsave(&s->cpu_slab->lock, flags);
    3173           0 :         if (slab != c->slab) {
    3174           0 :                 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    3175             :                 goto reread_slab;
    3176             :         }
    3177           0 :         freelist = c->freelist;
    3178           0 :         c->slab = NULL;
    3179           0 :         c->freelist = NULL;
    3180           0 :         c->tid = next_tid(c->tid);
    3181           0 :         local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    3182           0 :         deactivate_slab(s, slab, freelist);
    3183             : 
    3184             : new_slab:
    3185             : 
    3186             :         if (slub_percpu_partial(c)) {
    3187             :                 local_lock_irqsave(&s->cpu_slab->lock, flags);
    3188             :                 if (unlikely(c->slab)) {
    3189             :                         local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    3190             :                         goto reread_slab;
    3191             :                 }
    3192             :                 if (unlikely(!slub_percpu_partial(c))) {
    3193             :                         local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    3194             :                         /* we were preempted and partial list got empty */
    3195             :                         goto new_objects;
    3196             :                 }
    3197             : 
    3198             :                 slab = c->slab = slub_percpu_partial(c);
    3199             :                 slub_set_percpu_partial(c, slab);
    3200             :                 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    3201             :                 stat(s, CPU_PARTIAL_ALLOC);
    3202             :                 goto redo;
    3203             :         }
    3204             : 
    3205             : new_objects:
    3206             : 
    3207         462 :         pc.flags = gfpflags;
    3208         462 :         pc.slab = &slab;
    3209         462 :         pc.orig_size = orig_size;
    3210         462 :         freelist = get_partial(s, node, &pc);
    3211         462 :         if (freelist)
    3212             :                 goto check_new_slab;
    3213             : 
    3214         434 :         slub_put_cpu_ptr(s->cpu_slab);
    3215         434 :         slab = new_slab(s, gfpflags, node);
    3216         434 :         c = slub_get_cpu_ptr(s->cpu_slab);
    3217             : 
    3218         434 :         if (unlikely(!slab)) {
    3219           0 :                 slab_out_of_memory(s, gfpflags, node);
    3220           0 :                 return NULL;
    3221             :         }
    3222             : 
    3223         434 :         stat(s, ALLOC_SLAB);
    3224             : 
    3225         434 :         if (kmem_cache_debug(s)) {
    3226           0 :                 freelist = alloc_single_from_new_slab(s, slab, orig_size);
    3227             : 
    3228           0 :                 if (unlikely(!freelist))
    3229             :                         goto new_objects;
    3230             : 
    3231           0 :                 if (s->flags & SLAB_STORE_USER)
    3232             :                         set_track(s, freelist, TRACK_ALLOC, addr);
    3233             : 
    3234             :                 return freelist;
    3235             :         }
    3236             : 
    3237             :         /*
    3238             :          * No other reference to the slab yet so we can
    3239             :          * muck around with it freely without cmpxchg
    3240             :          */
    3241         434 :         freelist = slab->freelist;
    3242         434 :         slab->freelist = NULL;
    3243         434 :         slab->inuse = slab->objects;
    3244         434 :         slab->frozen = 1;
    3245             : 
    3246         868 :         inc_slabs_node(s, slab_nid(slab), slab->objects);
    3247             : 
    3248             : check_new_slab:
    3249             : 
    3250         462 :         if (kmem_cache_debug(s)) {
    3251             :                 /*
    3252             :                  * For debug caches here we had to go through
    3253             :                  * alloc_single_from_partial() so just store the tracking info
    3254             :                  * and return the object
    3255             :                  */
    3256           0 :                 if (s->flags & SLAB_STORE_USER)
    3257             :                         set_track(s, freelist, TRACK_ALLOC, addr);
    3258             : 
    3259             :                 return freelist;
    3260             :         }
    3261             : 
    3262         924 :         if (unlikely(!pfmemalloc_match(slab, gfpflags))) {
    3263             :                 /*
    3264             :                  * For !pfmemalloc_match() case we don't load freelist so that
    3265             :                  * we don't make further mismatched allocations easier.
    3266             :                  */
    3267           0 :                 deactivate_slab(s, slab, get_freepointer(s, freelist));
    3268           0 :                 return freelist;
    3269             :         }
    3270             : 
    3271             : retry_load_slab:
    3272             : 
    3273         462 :         local_lock_irqsave(&s->cpu_slab->lock, flags);
    3274         462 :         if (unlikely(c->slab)) {
    3275           0 :                 void *flush_freelist = c->freelist;
    3276           0 :                 struct slab *flush_slab = c->slab;
    3277             : 
    3278           0 :                 c->slab = NULL;
    3279           0 :                 c->freelist = NULL;
    3280           0 :                 c->tid = next_tid(c->tid);
    3281             : 
    3282           0 :                 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
    3283             : 
    3284           0 :                 deactivate_slab(s, flush_slab, flush_freelist);
    3285             : 
    3286           0 :                 stat(s, CPUSLAB_FLUSH);
    3287             : 
    3288             :                 goto retry_load_slab;
    3289             :         }
    3290         462 :         c->slab = slab;
    3291             : 
    3292         462 :         goto load_freelist;
    3293             : }
    3294             : 
    3295             : /*
    3296             :  * A wrapper for ___slab_alloc() for contexts where preemption is not yet
    3297             :  * disabled. Compensates for possible cpu changes by refetching the per cpu area
    3298             :  * pointer.
    3299             :  */
    3300             : static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
    3301             :                           unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size)
    3302             : {
    3303             :         void *p;
    3304             : 
    3305             : #ifdef CONFIG_PREEMPT_COUNT
    3306             :         /*
    3307             :          * We may have been preempted and rescheduled on a different
    3308             :          * cpu before disabling preemption. Need to reload cpu area
    3309             :          * pointer.
    3310             :          */
    3311             :         c = slub_get_cpu_ptr(s->cpu_slab);
    3312             : #endif
    3313             : 
    3314         461 :         p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size);
    3315             : #ifdef CONFIG_PREEMPT_COUNT
    3316             :         slub_put_cpu_ptr(s->cpu_slab);
    3317             : #endif
    3318             :         return p;
    3319             : }
    3320             : 
    3321             : static __always_inline void *__slab_alloc_node(struct kmem_cache *s,
    3322             :                 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
    3323             : {
    3324             :         struct kmem_cache_cpu *c;
    3325             :         struct slab *slab;
    3326             :         unsigned long tid;
    3327             :         void *object;
    3328             : 
    3329             : redo:
    3330             :         /*
    3331             :          * Must read kmem_cache cpu data via this cpu ptr. Preemption is
    3332             :          * enabled. We may switch back and forth between cpus while
    3333             :          * reading from one cpu area. That does not matter as long
    3334             :          * as we end up on the original cpu again when doing the cmpxchg.
    3335             :          *
    3336             :          * We must guarantee that tid and kmem_cache_cpu are retrieved on the
    3337             :          * same cpu. We read first the kmem_cache_cpu pointer and use it to read
    3338             :          * the tid. If we are preempted and switched to another cpu between the
    3339             :          * two reads, it's OK as the two are still associated with the same cpu
    3340             :          * and cmpxchg later will validate the cpu.
    3341             :          */
    3342       16949 :         c = raw_cpu_ptr(s->cpu_slab);
    3343       16949 :         tid = READ_ONCE(c->tid);
    3344             : 
    3345             :         /*
    3346             :          * Irqless object alloc/free algorithm used here depends on sequence
    3347             :          * of fetching cpu_slab's data. tid should be fetched before anything
    3348             :          * on c to guarantee that object and slab associated with previous tid
    3349             :          * won't be used with current tid. If we fetch tid first, object and
    3350             :          * slab could be one associated with next tid and our alloc/free
    3351             :          * request will be failed. In this case, we will retry. So, no problem.
    3352             :          */
    3353       16949 :         barrier();
    3354             : 
    3355             :         /*
    3356             :          * The transaction ids are globally unique per cpu and per operation on
    3357             :          * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
    3358             :          * occurs on the right processor and that there was no operation on the
    3359             :          * linked list in between.
    3360             :          */
    3361             : 
    3362       16949 :         object = c->freelist;
    3363       16949 :         slab = c->slab;
    3364             : 
    3365       16949 :         if (!USE_LOCKLESS_FAST_PATH() ||
    3366       33437 :             unlikely(!object || !slab || !node_match(slab, node))) {
    3367         622 :                 object = __slab_alloc(s, gfpflags, node, addr, c, orig_size);
    3368             :         } else {
    3369       16488 :                 void *next_object = get_freepointer_safe(s, object);
    3370             : 
    3371             :                 /*
    3372             :                  * The cmpxchg will only match if there was no additional
    3373             :                  * operation and if we are on the right processor.
    3374             :                  *
    3375             :                  * The cmpxchg does the following atomically (without lock
    3376             :                  * semantics!)
    3377             :                  * 1. Relocate first pointer to the current per cpu area.
    3378             :                  * 2. Verify that tid and freelist have not been changed
    3379             :                  * 3. If they were not changed replace tid and freelist
    3380             :                  *
    3381             :                  * Since this is without lock semantics the protection is only
    3382             :                  * against code executing on this cpu *not* from access by
    3383             :                  * other cpus.
    3384             :                  */
    3385       16488 :                 if (unlikely(!__update_cpu_freelist_fast(s, object, next_object, tid))) {
    3386             :                         note_cmpxchg_failure("slab_alloc", s, tid);
    3387             :                         goto redo;
    3388             :                 }
    3389       16488 :                 prefetch_freepointer(s, next_object);
    3390             :                 stat(s, ALLOC_FASTPATH);
    3391             :         }
    3392             : 
    3393             :         return object;
    3394             : }
    3395             : #else /* CONFIG_SLUB_TINY */
    3396             : static void *__slab_alloc_node(struct kmem_cache *s,
    3397             :                 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
    3398             : {
    3399             :         struct partial_context pc;
    3400             :         struct slab *slab;
    3401             :         void *object;
    3402             : 
    3403             :         pc.flags = gfpflags;
    3404             :         pc.slab = &slab;
    3405             :         pc.orig_size = orig_size;
    3406             :         object = get_partial(s, node, &pc);
    3407             : 
    3408             :         if (object)
    3409             :                 return object;
    3410             : 
    3411             :         slab = new_slab(s, gfpflags, node);
    3412             :         if (unlikely(!slab)) {
    3413             :                 slab_out_of_memory(s, gfpflags, node);
    3414             :                 return NULL;
    3415             :         }
    3416             : 
    3417             :         object = alloc_single_from_new_slab(s, slab, orig_size);
    3418             : 
    3419             :         return object;
    3420             : }
    3421             : #endif /* CONFIG_SLUB_TINY */
    3422             : 
    3423             : /*
    3424             :  * If the object has been wiped upon free, make sure it's fully initialized by
    3425             :  * zeroing out freelist pointer.
    3426             :  */
    3427             : static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
    3428             :                                                    void *obj)
    3429             : {
    3430       17001 :         if (unlikely(slab_want_init_on_free(s)) && obj)
    3431           0 :                 memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
    3432             :                         0, sizeof(void *));
    3433             : }
    3434             : 
    3435             : /*
    3436             :  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
    3437             :  * have the fastpath folded into their functions. So no function call
    3438             :  * overhead for requests that can be satisfied on the fastpath.
    3439             :  *
    3440             :  * The fastpath works by first checking if the lockless freelist can be used.
    3441             :  * If not then __slab_alloc is called for slow processing.
    3442             :  *
    3443             :  * Otherwise we can simply pick the next object from the lockless free list.
    3444             :  */
    3445             : static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
    3446             :                 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
    3447             : {
    3448             :         void *object;
    3449       16949 :         struct obj_cgroup *objcg = NULL;
    3450       16949 :         bool init = false;
    3451             : 
    3452       33898 :         s = slab_pre_alloc_hook(s, lru, &objcg, 1, gfpflags);
    3453       16949 :         if (!s)
    3454             :                 return NULL;
    3455             : 
    3456       16949 :         object = kfence_alloc(s, orig_size, gfpflags);
    3457             :         if (unlikely(object))
    3458             :                 goto out;
    3459             : 
    3460       16949 :         object = __slab_alloc_node(s, gfpflags, node, addr, orig_size);
    3461             : 
    3462       33898 :         maybe_wipe_obj_freeptr(s, object);
    3463       33898 :         init = slab_want_init_on_alloc(gfpflags, s);
    3464             : 
    3465             : out:
    3466             :         /*
    3467             :          * When init equals 'true', like for kzalloc() family, only
    3468             :          * @orig_size bytes might be zeroed instead of s->object_size
    3469             :          */
    3470       16949 :         slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init, orig_size);
    3471             : 
    3472       16949 :         return object;
    3473             : }
    3474             : 
    3475             : static __fastpath_inline void *slab_alloc(struct kmem_cache *s, struct list_lru *lru,
    3476             :                 gfp_t gfpflags, unsigned long addr, size_t orig_size)
    3477             : {
    3478        9849 :         return slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, addr, orig_size);
    3479             : }
    3480             : 
    3481             : static __fastpath_inline
    3482             : void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
    3483             :                              gfp_t gfpflags)
    3484             : {
    3485       19698 :         void *ret = slab_alloc(s, lru, gfpflags, _RET_IP_, s->object_size);
    3486             : 
    3487        9849 :         trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
    3488             : 
    3489             :         return ret;
    3490             : }
    3491             : 
    3492        9800 : void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
    3493             : {
    3494        9800 :         return __kmem_cache_alloc_lru(s, NULL, gfpflags);
    3495             : }
    3496             : EXPORT_SYMBOL(kmem_cache_alloc);
    3497             : 
    3498          49 : void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
    3499             :                            gfp_t gfpflags)
    3500             : {
    3501          49 :         return __kmem_cache_alloc_lru(s, lru, gfpflags);
    3502             : }
    3503             : EXPORT_SYMBOL(kmem_cache_alloc_lru);
    3504             : 
    3505        6838 : void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
    3506             :                               int node, size_t orig_size,
    3507             :                               unsigned long caller)
    3508             : {
    3509        6838 :         return slab_alloc_node(s, NULL, gfpflags, node,
    3510             :                                caller, orig_size);
    3511             : }
    3512             : 
    3513         262 : void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
    3514             : {
    3515         524 :         void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
    3516             : 
    3517         262 :         trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node);
    3518             : 
    3519         262 :         return ret;
    3520             : }
    3521             : EXPORT_SYMBOL(kmem_cache_alloc_node);
    3522             : 
    3523           0 : static noinline void free_to_partial_list(
    3524             :         struct kmem_cache *s, struct slab *slab,
    3525             :         void *head, void *tail, int bulk_cnt,
    3526             :         unsigned long addr)
    3527             : {
    3528           0 :         struct kmem_cache_node *n = get_node(s, slab_nid(slab));
    3529           0 :         struct slab *slab_free = NULL;
    3530           0 :         int cnt = bulk_cnt;
    3531             :         unsigned long flags;
    3532           0 :         depot_stack_handle_t handle = 0;
    3533             : 
    3534           0 :         if (s->flags & SLAB_STORE_USER)
    3535           0 :                 handle = set_track_prepare();
    3536             : 
    3537           0 :         spin_lock_irqsave(&n->list_lock, flags);
    3538             : 
    3539           0 :         if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) {
    3540           0 :                 void *prior = slab->freelist;
    3541             : 
    3542             :                 /* Perform the actual freeing while we still hold the locks */
    3543           0 :                 slab->inuse -= cnt;
    3544           0 :                 set_freepointer(s, tail, prior);
    3545           0 :                 slab->freelist = head;
    3546             : 
    3547             :                 /*
    3548             :                  * If the slab is empty, and node's partial list is full,
    3549             :                  * it should be discarded anyway no matter it's on full or
    3550             :                  * partial list.
    3551             :                  */
    3552           0 :                 if (slab->inuse == 0 && n->nr_partial >= s->min_partial)
    3553           0 :                         slab_free = slab;
    3554             : 
    3555           0 :                 if (!prior) {
    3556             :                         /* was on full list */
    3557           0 :                         remove_full(s, n, slab);
    3558           0 :                         if (!slab_free) {
    3559             :                                 add_partial(n, slab, DEACTIVATE_TO_TAIL);
    3560             :                                 stat(s, FREE_ADD_PARTIAL);
    3561             :                         }
    3562           0 :                 } else if (slab_free) {
    3563           0 :                         remove_partial(n, slab);
    3564             :                         stat(s, FREE_REMOVE_PARTIAL);
    3565             :                 }
    3566             :         }
    3567             : 
    3568           0 :         if (slab_free) {
    3569             :                 /*
    3570             :                  * Update the counters while still holding n->list_lock to
    3571             :                  * prevent spurious validation warnings
    3572             :                  */
    3573           0 :                 dec_slabs_node(s, slab_nid(slab_free), slab_free->objects);
    3574             :         }
    3575             : 
    3576           0 :         spin_unlock_irqrestore(&n->list_lock, flags);
    3577             : 
    3578           0 :         if (slab_free) {
    3579           0 :                 stat(s, FREE_SLAB);
    3580           0 :                 free_slab(s, slab_free);
    3581             :         }
    3582           0 : }
    3583             : 
    3584             : /*
    3585             :  * Slow path handling. This may still be called frequently since objects
    3586             :  * have a longer lifetime than the cpu slabs in most processing loads.
    3587             :  *
    3588             :  * So we still attempt to reduce cache line usage. Just take the slab
    3589             :  * lock and free the item. If there is no additional partial slab
    3590             :  * handling required then we can return immediately.
    3591             :  */
    3592         228 : static void __slab_free(struct kmem_cache *s, struct slab *slab,
    3593             :                         void *head, void *tail, int cnt,
    3594             :                         unsigned long addr)
    3595             : 
    3596             : {
    3597             :         void *prior;
    3598             :         int was_frozen;
    3599             :         struct slab new;
    3600             :         unsigned long counters;
    3601         228 :         struct kmem_cache_node *n = NULL;
    3602             :         unsigned long flags;
    3603             : 
    3604         228 :         stat(s, FREE_SLOWPATH);
    3605             : 
    3606         228 :         if (kfence_free(head))
    3607         228 :                 return;
    3608             : 
    3609         228 :         if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
    3610           0 :                 free_to_partial_list(s, slab, head, tail, cnt, addr);
    3611           0 :                 return;
    3612             :         }
    3613             : 
    3614             :         do {
    3615         228 :                 if (unlikely(n)) {
    3616           0 :                         spin_unlock_irqrestore(&n->list_lock, flags);
    3617           0 :                         n = NULL;
    3618             :                 }
    3619         228 :                 prior = slab->freelist;
    3620         228 :                 counters = slab->counters;
    3621         456 :                 set_freepointer(s, tail, prior);
    3622         228 :                 new.counters = counters;
    3623         228 :                 was_frozen = new.frozen;
    3624         228 :                 new.inuse -= cnt;
    3625         228 :                 if ((!new.inuse || !prior) && !was_frozen) {
    3626             : 
    3627          37 :                         if (kmem_cache_has_cpu_partial(s) && !prior) {
    3628             : 
    3629             :                                 /*
    3630             :                                  * Slab was on no list before and will be
    3631             :                                  * partially empty
    3632             :                                  * We can defer the list move and instead
    3633             :                                  * freeze it.
    3634             :                                  */
    3635             :                                 new.frozen = 1;
    3636             : 
    3637             :                         } else { /* Needs to be taken off a list */
    3638             : 
    3639         111 :                                 n = get_node(s, slab_nid(slab));
    3640             :                                 /*
    3641             :                                  * Speculatively acquire the list_lock.
    3642             :                                  * If the cmpxchg does not succeed then we may
    3643             :                                  * drop the list_lock without any processing.
    3644             :                                  *
    3645             :                                  * Otherwise the list_lock will synchronize with
    3646             :                                  * other processors updating the list of slabs.
    3647             :                                  */
    3648          37 :                                 spin_lock_irqsave(&n->list_lock, flags);
    3649             : 
    3650             :                         }
    3651             :                 }
    3652             : 
    3653         228 :         } while (!slab_update_freelist(s, slab,
    3654             :                 prior, counters,
    3655             :                 head, new.counters,
    3656         228 :                 "__slab_free"));
    3657             : 
    3658         228 :         if (likely(!n)) {
    3659             : 
    3660             :                 if (likely(was_frozen)) {
    3661             :                         /*
    3662             :                          * The list lock was not taken therefore no list
    3663             :                          * activity can be necessary.
    3664             :                          */
    3665             :                         stat(s, FREE_FROZEN);
    3666             :                 } else if (new.frozen) {
    3667             :                         /*
    3668             :                          * If we just froze the slab then put it onto the
    3669             :                          * per cpu partial list.
    3670             :                          */
    3671             :                         put_cpu_partial(s, slab, 1);
    3672             :                         stat(s, CPU_PARTIAL_FREE);
    3673             :                 }
    3674             : 
    3675             :                 return;
    3676             :         }
    3677             : 
    3678          37 :         if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
    3679             :                 goto slab_empty;
    3680             : 
    3681             :         /*
    3682             :          * Objects left in the slab. If it was not on the partial list before
    3683             :          * then add it.
    3684             :          */
    3685          37 :         if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
    3686          70 :                 remove_full(s, n, slab);
    3687             :                 add_partial(n, slab, DEACTIVATE_TO_TAIL);
    3688             :                 stat(s, FREE_ADD_PARTIAL);
    3689             :         }
    3690          37 :         spin_unlock_irqrestore(&n->list_lock, flags);
    3691             :         return;
    3692             : 
    3693             : slab_empty:
    3694           0 :         if (prior) {
    3695             :                 /*
    3696             :                  * Slab on the partial list.
    3697             :                  */
    3698           0 :                 remove_partial(n, slab);
    3699             :                 stat(s, FREE_REMOVE_PARTIAL);
    3700             :         } else {
    3701             :                 /* Slab must be on the full list */
    3702           0 :                 remove_full(s, n, slab);
    3703             :         }
    3704             : 
    3705           0 :         spin_unlock_irqrestore(&n->list_lock, flags);
    3706           0 :         stat(s, FREE_SLAB);
    3707           0 :         discard_slab(s, slab);
    3708             : }
    3709             : 
    3710             : #ifndef CONFIG_SLUB_TINY
    3711             : /*
    3712             :  * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
    3713             :  * can perform fastpath freeing without additional function calls.
    3714             :  *
    3715             :  * The fastpath is only possible if we are freeing to the current cpu slab
    3716             :  * of this processor. This typically the case if we have just allocated
    3717             :  * the item before.
    3718             :  *
    3719             :  * If fastpath is not possible then fall back to __slab_free where we deal
    3720             :  * with all sorts of special processing.
    3721             :  *
    3722             :  * Bulk free of a freelist with several objects (all pointing to the
    3723             :  * same slab) possible by specifying head and tail ptr, plus objects
    3724             :  * count (cnt). Bulk free indicated by tail pointer being set.
    3725             :  */
    3726             : static __always_inline void do_slab_free(struct kmem_cache *s,
    3727             :                                 struct slab *slab, void *head, void *tail,
    3728             :                                 int cnt, unsigned long addr)
    3729             : {
    3730        4423 :         void *tail_obj = tail ? : head;
    3731             :         struct kmem_cache_cpu *c;
    3732             :         unsigned long tid;
    3733             :         void **freelist;
    3734             : 
    3735             : redo:
    3736             :         /*
    3737             :          * Determine the currently cpus per cpu slab.
    3738             :          * The cpu may change afterward. However that does not matter since
    3739             :          * data is retrieved via this pointer. If we are on the same cpu
    3740             :          * during the cmpxchg then the free will succeed.
    3741             :          */
    3742        4423 :         c = raw_cpu_ptr(s->cpu_slab);
    3743        4423 :         tid = READ_ONCE(c->tid);
    3744             : 
    3745             :         /* Same with comment on barrier() in slab_alloc_node() */
    3746        4423 :         barrier();
    3747             : 
    3748        4423 :         if (unlikely(slab != c->slab)) {
    3749         228 :                 __slab_free(s, slab, head, tail_obj, cnt, addr);
    3750             :                 return;
    3751             :         }
    3752             : 
    3753             :         if (USE_LOCKLESS_FAST_PATH()) {
    3754        4195 :                 freelist = READ_ONCE(c->freelist);
    3755             : 
    3756        8390 :                 set_freepointer(s, tail_obj, freelist);
    3757             : 
    3758        4195 :                 if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) {
    3759             :                         note_cmpxchg_failure("slab_free", s, tid);
    3760             :                         goto redo;
    3761             :                 }
    3762             :         } else {
    3763             :                 /* Update the free list under the local lock */
    3764             :                 local_lock(&s->cpu_slab->lock);
    3765             :                 c = this_cpu_ptr(s->cpu_slab);
    3766             :                 if (unlikely(slab != c->slab)) {
    3767             :                         local_unlock(&s->cpu_slab->lock);
    3768             :                         goto redo;
    3769             :                 }
    3770             :                 tid = c->tid;
    3771             :                 freelist = c->freelist;
    3772             : 
    3773             :                 set_freepointer(s, tail_obj, freelist);
    3774             :                 c->freelist = head;
    3775             :                 c->tid = next_tid(tid);
    3776             : 
    3777             :                 local_unlock(&s->cpu_slab->lock);
    3778             :         }
    3779             :         stat(s, FREE_FASTPATH);
    3780             : }
    3781             : #else /* CONFIG_SLUB_TINY */
    3782             : static void do_slab_free(struct kmem_cache *s,
    3783             :                                 struct slab *slab, void *head, void *tail,
    3784             :                                 int cnt, unsigned long addr)
    3785             : {
    3786             :         void *tail_obj = tail ? : head;
    3787             : 
    3788             :         __slab_free(s, slab, head, tail_obj, cnt, addr);
    3789             : }
    3790             : #endif /* CONFIG_SLUB_TINY */
    3791             : 
    3792             : static __fastpath_inline void slab_free(struct kmem_cache *s, struct slab *slab,
    3793             :                                       void *head, void *tail, void **p, int cnt,
    3794             :                                       unsigned long addr)
    3795             : {
    3796        4423 :         memcg_slab_free_hook(s, slab, p, cnt);
    3797             :         /*
    3798             :          * With KASAN enabled slab_free_freelist_hook modifies the freelist
    3799             :          * to remove objects, whose reuse must be delayed.
    3800             :          */
    3801        4423 :         if (slab_free_freelist_hook(s, &head, &tail, &cnt))
    3802        4423 :                 do_slab_free(s, slab, head, tail, cnt, addr);
    3803             : }
    3804             : 
    3805             : #ifdef CONFIG_KASAN_GENERIC
    3806             : void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
    3807             : {
    3808             :         do_slab_free(cache, virt_to_slab(x), x, NULL, 1, addr);
    3809             : }
    3810             : #endif
    3811             : 
    3812        3127 : void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller)
    3813             : {
    3814        9381 :         slab_free(s, virt_to_slab(x), x, NULL, &x, 1, caller);
    3815        3127 : }
    3816             : 
    3817        1296 : void kmem_cache_free(struct kmem_cache *s, void *x)
    3818             : {
    3819        1296 :         s = cache_from_obj(s, x);
    3820        1296 :         if (!s)
    3821             :                 return;
    3822        1296 :         trace_kmem_cache_free(_RET_IP_, x, s);
    3823        2592 :         slab_free(s, virt_to_slab(x), x, NULL, &x, 1, _RET_IP_);
    3824             : }
    3825             : EXPORT_SYMBOL(kmem_cache_free);
    3826             : 
    3827             : struct detached_freelist {
    3828             :         struct slab *slab;
    3829             :         void *tail;
    3830             :         void *freelist;
    3831             :         int cnt;
    3832             :         struct kmem_cache *s;
    3833             : };
    3834             : 
    3835             : /*
    3836             :  * This function progressively scans the array with free objects (with
    3837             :  * a limited look ahead) and extract objects belonging to the same
    3838             :  * slab.  It builds a detached freelist directly within the given
    3839             :  * slab/objects.  This can happen without any need for
    3840             :  * synchronization, because the objects are owned by running process.
    3841             :  * The freelist is build up as a single linked list in the objects.
    3842             :  * The idea is, that this detached freelist can then be bulk
    3843             :  * transferred to the real freelist(s), but only requiring a single
    3844             :  * synchronization primitive.  Look ahead in the array is limited due
    3845             :  * to performance reasons.
    3846             :  */
    3847             : static inline
    3848           0 : int build_detached_freelist(struct kmem_cache *s, size_t size,
    3849             :                             void **p, struct detached_freelist *df)
    3850             : {
    3851           0 :         int lookahead = 3;
    3852             :         void *object;
    3853             :         struct folio *folio;
    3854             :         size_t same;
    3855             : 
    3856           0 :         object = p[--size];
    3857           0 :         folio = virt_to_folio(object);
    3858           0 :         if (!s) {
    3859             :                 /* Handle kalloc'ed objects */
    3860           0 :                 if (unlikely(!folio_test_slab(folio))) {
    3861           0 :                         free_large_kmalloc(folio, object);
    3862           0 :                         df->slab = NULL;
    3863           0 :                         return size;
    3864             :                 }
    3865             :                 /* Derive kmem_cache from object */
    3866           0 :                 df->slab = folio_slab(folio);
    3867           0 :                 df->s = df->slab->slab_cache;
    3868             :         } else {
    3869           0 :                 df->slab = folio_slab(folio);
    3870           0 :                 df->s = cache_from_obj(s, object); /* Support for memcg */
    3871             :         }
    3872             : 
    3873             :         /* Start new detached freelist */
    3874           0 :         df->tail = object;
    3875           0 :         df->freelist = object;
    3876           0 :         df->cnt = 1;
    3877             : 
    3878           0 :         if (is_kfence_address(object))
    3879             :                 return size;
    3880             : 
    3881           0 :         set_freepointer(df->s, object, NULL);
    3882             : 
    3883           0 :         same = size;
    3884           0 :         while (size) {
    3885           0 :                 object = p[--size];
    3886             :                 /* df->slab is always set at this point */
    3887           0 :                 if (df->slab == virt_to_slab(object)) {
    3888             :                         /* Opportunity build freelist */
    3889           0 :                         set_freepointer(df->s, object, df->freelist);
    3890           0 :                         df->freelist = object;
    3891           0 :                         df->cnt++;
    3892           0 :                         same--;
    3893           0 :                         if (size != same)
    3894           0 :                                 swap(p[size], p[same]);
    3895           0 :                         continue;
    3896             :                 }
    3897             : 
    3898             :                 /* Limit look ahead search */
    3899           0 :                 if (!--lookahead)
    3900             :                         break;
    3901             :         }
    3902             : 
    3903           0 :         return same;
    3904             : }
    3905             : 
    3906             : /* Note that interrupts must be enabled when calling this function. */
    3907           0 : void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
    3908             : {
    3909           0 :         if (!size)
    3910             :                 return;
    3911             : 
    3912             :         do {
    3913             :                 struct detached_freelist df;
    3914             : 
    3915           0 :                 size = build_detached_freelist(s, size, p, &df);
    3916           0 :                 if (!df.slab)
    3917           0 :                         continue;
    3918             : 
    3919           0 :                 slab_free(df.s, df.slab, df.freelist, df.tail, &p[size], df.cnt,
    3920           0 :                           _RET_IP_);
    3921           0 :         } while (likely(size));
    3922             : }
    3923             : EXPORT_SYMBOL(kmem_cache_free_bulk);
    3924             : 
    3925             : #ifndef CONFIG_SLUB_TINY
    3926           7 : static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
    3927             :                         size_t size, void **p, struct obj_cgroup *objcg)
    3928             : {
    3929             :         struct kmem_cache_cpu *c;
    3930             :         unsigned long irqflags;
    3931             :         int i;
    3932             : 
    3933             :         /*
    3934             :          * Drain objects in the per cpu slab, while disabling local
    3935             :          * IRQs, which protects against PREEMPT and interrupts
    3936             :          * handlers invoking normal fastpath.
    3937             :          */
    3938           7 :         c = slub_get_cpu_ptr(s->cpu_slab);
    3939           7 :         local_lock_irqsave(&s->cpu_slab->lock, irqflags);
    3940             : 
    3941          33 :         for (i = 0; i < size; i++) {
    3942          26 :                 void *object = kfence_alloc(s, s->object_size, flags);
    3943             : 
    3944             :                 if (unlikely(object)) {
    3945             :                         p[i] = object;
    3946             :                         continue;
    3947             :                 }
    3948             : 
    3949          26 :                 object = c->freelist;
    3950          26 :                 if (unlikely(!object)) {
    3951             :                         /*
    3952             :                          * We may have removed an object from c->freelist using
    3953             :                          * the fastpath in the previous iteration; in that case,
    3954             :                          * c->tid has not been bumped yet.
    3955             :                          * Since ___slab_alloc() may reenable interrupts while
    3956             :                          * allocating memory, we should bump c->tid now.
    3957             :                          */
    3958           2 :                         c->tid = next_tid(c->tid);
    3959             : 
    3960           2 :                         local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
    3961             : 
    3962             :                         /*
    3963             :                          * Invoking slow path likely have side-effect
    3964             :                          * of re-populating per CPU c->freelist
    3965             :                          */
    3966           2 :                         p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
    3967           1 :                                             _RET_IP_, c, s->object_size);
    3968           1 :                         if (unlikely(!p[i]))
    3969             :                                 goto error;
    3970             : 
    3971           1 :                         c = this_cpu_ptr(s->cpu_slab);
    3972           2 :                         maybe_wipe_obj_freeptr(s, p[i]);
    3973             : 
    3974           1 :                         local_lock_irqsave(&s->cpu_slab->lock, irqflags);
    3975             : 
    3976           1 :                         continue; /* goto for-loop */
    3977             :                 }
    3978          50 :                 c->freelist = get_freepointer(s, object);
    3979          25 :                 p[i] = object;
    3980          25 :                 maybe_wipe_obj_freeptr(s, p[i]);
    3981             :         }
    3982          14 :         c->tid = next_tid(c->tid);
    3983          14 :         local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
    3984           7 :         slub_put_cpu_ptr(s->cpu_slab);
    3985             : 
    3986           7 :         return i;
    3987             : 
    3988             : error:
    3989           0 :         slub_put_cpu_ptr(s->cpu_slab);
    3990           0 :         slab_post_alloc_hook(s, objcg, flags, i, p, false, s->object_size);
    3991           0 :         kmem_cache_free_bulk(s, i, p);
    3992           0 :         return 0;
    3993             : 
    3994             : }
    3995             : #else /* CONFIG_SLUB_TINY */
    3996             : static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
    3997             :                         size_t size, void **p, struct obj_cgroup *objcg)
    3998             : {
    3999             :         int i;
    4000             : 
    4001             :         for (i = 0; i < size; i++) {
    4002             :                 void *object = kfence_alloc(s, s->object_size, flags);
    4003             : 
    4004             :                 if (unlikely(object)) {
    4005             :                         p[i] = object;
    4006             :                         continue;
    4007             :                 }
    4008             : 
    4009             :                 p[i] = __slab_alloc_node(s, flags, NUMA_NO_NODE,
    4010             :                                          _RET_IP_, s->object_size);
    4011             :                 if (unlikely(!p[i]))
    4012             :                         goto error;
    4013             : 
    4014             :                 maybe_wipe_obj_freeptr(s, p[i]);
    4015             :         }
    4016             : 
    4017             :         return i;
    4018             : 
    4019             : error:
    4020             :         slab_post_alloc_hook(s, objcg, flags, i, p, false, s->object_size);
    4021             :         kmem_cache_free_bulk(s, i, p);
    4022             :         return 0;
    4023             : }
    4024             : #endif /* CONFIG_SLUB_TINY */
    4025             : 
    4026             : /* Note that interrupts must be enabled when calling this function. */
    4027           7 : int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
    4028             :                           void **p)
    4029             : {
    4030             :         int i;
    4031           7 :         struct obj_cgroup *objcg = NULL;
    4032             : 
    4033           7 :         if (!size)
    4034             :                 return 0;
    4035             : 
    4036             :         /* memcg and kmem_cache debug support */
    4037          14 :         s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
    4038           7 :         if (unlikely(!s))
    4039             :                 return 0;
    4040             : 
    4041           7 :         i = __kmem_cache_alloc_bulk(s, flags, size, p, objcg);
    4042             : 
    4043             :         /*
    4044             :          * memcg and kmem_cache debug support and memory initialization.
    4045             :          * Done outside of the IRQ disabled fastpath loop.
    4046             :          */
    4047           7 :         if (i != 0)
    4048          14 :                 slab_post_alloc_hook(s, objcg, flags, size, p,
    4049          14 :                         slab_want_init_on_alloc(flags, s), s->object_size);
    4050             :         return i;
    4051             : }
    4052             : EXPORT_SYMBOL(kmem_cache_alloc_bulk);
    4053             : 
    4054             : 
    4055             : /*
    4056             :  * Object placement in a slab is made very easy because we always start at
    4057             :  * offset 0. If we tune the size of the object to the alignment then we can
    4058             :  * get the required alignment by putting one properly sized object after
    4059             :  * another.
    4060             :  *
    4061             :  * Notice that the allocation order determines the sizes of the per cpu
    4062             :  * caches. Each processor has always one slab available for allocations.
    4063             :  * Increasing the allocation order reduces the number of times that slabs
    4064             :  * must be moved on and off the partial lists and is therefore a factor in
    4065             :  * locking overhead.
    4066             :  */
    4067             : 
    4068             : /*
    4069             :  * Minimum / Maximum order of slab pages. This influences locking overhead
    4070             :  * and slab fragmentation. A higher order reduces the number of partial slabs
    4071             :  * and increases the number of allocations possible without having to
    4072             :  * take the list_lock.
    4073             :  */
    4074             : static unsigned int slub_min_order;
    4075             : static unsigned int slub_max_order =
    4076             :         IS_ENABLED(CONFIG_SLUB_TINY) ? 1 : PAGE_ALLOC_COSTLY_ORDER;
    4077             : static unsigned int slub_min_objects;
    4078             : 
    4079             : /*
    4080             :  * Calculate the order of allocation given an slab object size.
    4081             :  *
    4082             :  * The order of allocation has significant impact on performance and other
    4083             :  * system components. Generally order 0 allocations should be preferred since
    4084             :  * order 0 does not cause fragmentation in the page allocator. Larger objects
    4085             :  * be problematic to put into order 0 slabs because there may be too much
    4086             :  * unused space left. We go to a higher order if more than 1/16th of the slab
    4087             :  * would be wasted.
    4088             :  *
    4089             :  * In order to reach satisfactory performance we must ensure that a minimum
    4090             :  * number of objects is in one slab. Otherwise we may generate too much
    4091             :  * activity on the partial lists which requires taking the list_lock. This is
    4092             :  * less a concern for large slabs though which are rarely used.
    4093             :  *
    4094             :  * slub_max_order specifies the order where we begin to stop considering the
    4095             :  * number of objects in a slab as critical. If we reach slub_max_order then
    4096             :  * we try to keep the page order as low as possible. So we accept more waste
    4097             :  * of space in favor of a small page order.
    4098             :  *
    4099             :  * Higher order allocations also allow the placement of more objects in a
    4100             :  * slab and thereby reduce object handling overhead. If the user has
    4101             :  * requested a higher minimum order then we start with that one instead of
    4102             :  * the smallest order which will fit the object.
    4103             :  */
    4104          55 : static inline unsigned int calc_slab_order(unsigned int size,
    4105             :                 unsigned int min_objects, unsigned int max_order,
    4106             :                 unsigned int fract_leftover)
    4107             : {
    4108          55 :         unsigned int min_order = slub_min_order;
    4109             :         unsigned int order;
    4110             : 
    4111          55 :         if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
    4112           0 :                 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
    4113             : 
    4114         168 :         for (order = max(min_order, (unsigned int)get_order(min_objects * size));
    4115           3 :                         order <= max_order; order++) {
    4116             : 
    4117          56 :                 unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
    4118             :                 unsigned int rem;
    4119             : 
    4120          56 :                 rem = slab_size % size;
    4121             : 
    4122          56 :                 if (rem <= slab_size / fract_leftover)
    4123             :                         break;
    4124             :         }
    4125             : 
    4126             :         return order;
    4127             : }
    4128             : 
    4129          53 : static inline int calculate_order(unsigned int size)
    4130             : {
    4131             :         unsigned int order;
    4132             :         unsigned int min_objects;
    4133             :         unsigned int max_objects;
    4134             :         unsigned int nr_cpus;
    4135             : 
    4136             :         /*
    4137             :          * Attempt to find best configuration for a slab. This
    4138             :          * works by first attempting to generate a layout with
    4139             :          * the best configuration and backing off gradually.
    4140             :          *
    4141             :          * First we increase the acceptable waste in a slab. Then
    4142             :          * we reduce the minimum objects required in a slab.
    4143             :          */
    4144          53 :         min_objects = slub_min_objects;
    4145          53 :         if (!min_objects) {
    4146             :                 /*
    4147             :                  * Some architectures will only update present cpus when
    4148             :                  * onlining them, so don't trust the number if it's just 1. But
    4149             :                  * we also don't want to use nr_cpu_ids always, as on some other
    4150             :                  * architectures, there can be many possible cpus, but never
    4151             :                  * onlined. Here we compromise between trying to avoid too high
    4152             :                  * order on systems that appear larger than they are, and too
    4153             :                  * low order on systems that appear smaller than they are.
    4154             :                  */
    4155          53 :                 nr_cpus = num_present_cpus();
    4156             :                 if (nr_cpus <= 1)
    4157          53 :                         nr_cpus = nr_cpu_ids;
    4158          53 :                 min_objects = 4 * (fls(nr_cpus) + 1);
    4159             :         }
    4160         106 :         max_objects = order_objects(slub_max_order, size);
    4161          53 :         min_objects = min(min_objects, max_objects);
    4162             : 
    4163         106 :         while (min_objects > 1) {
    4164             :                 unsigned int fraction;
    4165             : 
    4166             :                 fraction = 16;
    4167          55 :                 while (fraction >= 4) {
    4168          55 :                         order = calc_slab_order(size, min_objects,
    4169             :                                         slub_max_order, fraction);
    4170          55 :                         if (order <= slub_max_order)
    4171          53 :                                 return order;
    4172           2 :                         fraction /= 2;
    4173             :                 }
    4174           0 :                 min_objects--;
    4175             :         }
    4176             : 
    4177             :         /*
    4178             :          * We were unable to place multiple objects in a slab. Now
    4179             :          * lets see if we can place a single object there.
    4180             :          */
    4181           0 :         order = calc_slab_order(size, 1, slub_max_order, 1);
    4182           0 :         if (order <= slub_max_order)
    4183           0 :                 return order;
    4184             : 
    4185             :         /*
    4186             :          * Doh this slab cannot be placed using slub_max_order.
    4187             :          */
    4188           0 :         order = calc_slab_order(size, 1, MAX_ORDER, 1);
    4189           0 :         if (order <= MAX_ORDER)
    4190           0 :                 return order;
    4191             :         return -ENOSYS;
    4192             : }
    4193             : 
    4194             : static void
    4195             : init_kmem_cache_node(struct kmem_cache_node *n)
    4196             : {
    4197          53 :         n->nr_partial = 0;
    4198          53 :         spin_lock_init(&n->list_lock);
    4199         106 :         INIT_LIST_HEAD(&n->partial);
    4200             : #ifdef CONFIG_SLUB_DEBUG
    4201         106 :         atomic_long_set(&n->nr_slabs, 0);
    4202         106 :         atomic_long_set(&n->total_objects, 0);
    4203         106 :         INIT_LIST_HEAD(&n->full);
    4204             : #endif
    4205             : }
    4206             : 
    4207             : #ifndef CONFIG_SLUB_TINY
    4208          53 : static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
    4209             : {
    4210             :         BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
    4211             :                         NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH *
    4212             :                         sizeof(struct kmem_cache_cpu));
    4213             : 
    4214             :         /*
    4215             :          * Must align to double word boundary for the double cmpxchg
    4216             :          * instructions to work; see __pcpu_double_call_return_bool().
    4217             :          */
    4218          53 :         s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
    4219             :                                      2 * sizeof(void *));
    4220             : 
    4221          53 :         if (!s->cpu_slab)
    4222             :                 return 0;
    4223             : 
    4224             :         init_kmem_cache_cpus(s);
    4225             : 
    4226             :         return 1;
    4227             : }
    4228             : #else
    4229             : static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
    4230             : {
    4231             :         return 1;
    4232             : }
    4233             : #endif /* CONFIG_SLUB_TINY */
    4234             : 
    4235             : static struct kmem_cache *kmem_cache_node;
    4236             : 
    4237             : /*
    4238             :  * No kmalloc_node yet so do it by hand. We know that this is the first
    4239             :  * slab on the node for this slabcache. There are no concurrent accesses
    4240             :  * possible.
    4241             :  *
    4242             :  * Note that this function only works on the kmem_cache_node
    4243             :  * when allocating for the kmem_cache_node. This is used for bootstrapping
    4244             :  * memory on a fresh node that has no slab structures yet.
    4245             :  */
    4246           1 : static void early_kmem_cache_node_alloc(int node)
    4247             : {
    4248             :         struct slab *slab;
    4249             :         struct kmem_cache_node *n;
    4250             : 
    4251           1 :         BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
    4252             : 
    4253           1 :         slab = new_slab(kmem_cache_node, GFP_NOWAIT, node);
    4254             : 
    4255           1 :         BUG_ON(!slab);
    4256           3 :         inc_slabs_node(kmem_cache_node, slab_nid(slab), slab->objects);
    4257           2 :         if (slab_nid(slab) != node) {
    4258           0 :                 pr_err("SLUB: Unable to allocate memory from node %d\n", node);
    4259           0 :                 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
    4260             :         }
    4261             : 
    4262           1 :         n = slab->freelist;
    4263           1 :         BUG_ON(!n);
    4264             : #ifdef CONFIG_SLUB_DEBUG
    4265           1 :         init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
    4266           1 :         init_tracking(kmem_cache_node, n);
    4267             : #endif
    4268           1 :         n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false);
    4269           2 :         slab->freelist = get_freepointer(kmem_cache_node, n);
    4270           1 :         slab->inuse = 1;
    4271           1 :         kmem_cache_node->node[node] = n;
    4272           1 :         init_kmem_cache_node(n);
    4273           2 :         inc_slabs_node(kmem_cache_node, node, slab->objects);
    4274             : 
    4275             :         /*
    4276             :          * No locks need to be taken here as it has just been
    4277             :          * initialized and there is no concurrent access.
    4278             :          */
    4279           1 :         __add_partial(n, slab, DEACTIVATE_TO_HEAD);
    4280           1 : }
    4281             : 
    4282           0 : static void free_kmem_cache_nodes(struct kmem_cache *s)
    4283             : {
    4284             :         int node;
    4285             :         struct kmem_cache_node *n;
    4286             : 
    4287           0 :         for_each_kmem_cache_node(s, node, n) {
    4288           0 :                 s->node[node] = NULL;
    4289           0 :                 kmem_cache_free(kmem_cache_node, n);
    4290             :         }
    4291           0 : }
    4292             : 
    4293           0 : void __kmem_cache_release(struct kmem_cache *s)
    4294             : {
    4295           0 :         cache_random_seq_destroy(s);
    4296             : #ifndef CONFIG_SLUB_TINY
    4297           0 :         free_percpu(s->cpu_slab);
    4298             : #endif
    4299           0 :         free_kmem_cache_nodes(s);
    4300           0 : }
    4301             : 
    4302          53 : static int init_kmem_cache_nodes(struct kmem_cache *s)
    4303             : {
    4304             :         int node;
    4305             : 
    4306         159 :         for_each_node_mask(node, slab_nodes) {
    4307             :                 struct kmem_cache_node *n;
    4308             : 
    4309          53 :                 if (slab_state == DOWN) {
    4310           1 :                         early_kmem_cache_node_alloc(node);
    4311           1 :                         continue;
    4312             :                 }
    4313          52 :                 n = kmem_cache_alloc_node(kmem_cache_node,
    4314             :                                                 GFP_KERNEL, node);
    4315             : 
    4316          52 :                 if (!n) {
    4317           0 :                         free_kmem_cache_nodes(s);
    4318           0 :                         return 0;
    4319             :                 }
    4320             : 
    4321          52 :                 init_kmem_cache_node(n);
    4322          52 :                 s->node[node] = n;
    4323             :         }
    4324             :         return 1;
    4325             : }
    4326             : 
    4327             : static void set_cpu_partial(struct kmem_cache *s)
    4328             : {
    4329             : #ifdef CONFIG_SLUB_CPU_PARTIAL
    4330             :         unsigned int nr_objects;
    4331             : 
    4332             :         /*
    4333             :          * cpu_partial determined the maximum number of objects kept in the
    4334             :          * per cpu partial lists of a processor.
    4335             :          *
    4336             :          * Per cpu partial lists mainly contain slabs that just have one
    4337             :          * object freed. If they are used for allocation then they can be
    4338             :          * filled up again with minimal effort. The slab will never hit the
    4339             :          * per node partial lists and therefore no locking will be required.
    4340             :          *
    4341             :          * For backwards compatibility reasons, this is determined as number
    4342             :          * of objects, even though we now limit maximum number of pages, see
    4343             :          * slub_set_cpu_partial()
    4344             :          */
    4345             :         if (!kmem_cache_has_cpu_partial(s))
    4346             :                 nr_objects = 0;
    4347             :         else if (s->size >= PAGE_SIZE)
    4348             :                 nr_objects = 6;
    4349             :         else if (s->size >= 1024)
    4350             :                 nr_objects = 24;
    4351             :         else if (s->size >= 256)
    4352             :                 nr_objects = 52;
    4353             :         else
    4354             :                 nr_objects = 120;
    4355             : 
    4356             :         slub_set_cpu_partial(s, nr_objects);
    4357             : #endif
    4358             : }
    4359             : 
    4360             : /*
    4361             :  * calculate_sizes() determines the order and the distribution of data within
    4362             :  * a slab object.
    4363             :  */
    4364          53 : static int calculate_sizes(struct kmem_cache *s)
    4365             : {
    4366          53 :         slab_flags_t flags = s->flags;
    4367          53 :         unsigned int size = s->object_size;
    4368             :         unsigned int order;
    4369             : 
    4370             :         /*
    4371             :          * Round up object size to the next word boundary. We can only
    4372             :          * place the free pointer at word boundaries and this determines
    4373             :          * the possible location of the free pointer.
    4374             :          */
    4375          53 :         size = ALIGN(size, sizeof(void *));
    4376             : 
    4377             : #ifdef CONFIG_SLUB_DEBUG
    4378             :         /*
    4379             :          * Determine if we can poison the object itself. If the user of
    4380             :          * the slab may touch the object after free or before allocation
    4381             :          * then we should never poison the object itself.
    4382             :          */
    4383          53 :         if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
    4384           0 :                         !s->ctor)
    4385           0 :                 s->flags |= __OBJECT_POISON;
    4386             :         else
    4387          53 :                 s->flags &= ~__OBJECT_POISON;
    4388             : 
    4389             : 
    4390             :         /*
    4391             :          * If we are Redzoning then check if there is some space between the
    4392             :          * end of the object and the free pointer. If not then add an
    4393             :          * additional word to have some bytes to store Redzone information.
    4394             :          */
    4395          53 :         if ((flags & SLAB_RED_ZONE) && size == s->object_size)
    4396           0 :                 size += sizeof(void *);
    4397             : #endif
    4398             : 
    4399             :         /*
    4400             :          * With that we have determined the number of bytes in actual use
    4401             :          * by the object and redzoning.
    4402             :          */
    4403          53 :         s->inuse = size;
    4404             : 
    4405          53 :         if (slub_debug_orig_size(s) ||
    4406          48 :             (flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
    4407          48 :             ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
    4408          48 :             s->ctor) {
    4409             :                 /*
    4410             :                  * Relocate free pointer after the object if it is not
    4411             :                  * permitted to overwrite the first word of the object on
    4412             :                  * kmem_cache_free.
    4413             :                  *
    4414             :                  * This is the case if we do RCU, have a constructor or
    4415             :                  * destructor, are poisoning the objects, or are
    4416             :                  * redzoning an object smaller than sizeof(void *).
    4417             :                  *
    4418             :                  * The assumption that s->offset >= s->inuse means free
    4419             :                  * pointer is outside of the object is used in the
    4420             :                  * freeptr_outside_object() function. If that is no
    4421             :                  * longer true, the function needs to be modified.
    4422             :                  */
    4423          10 :                 s->offset = size;
    4424          10 :                 size += sizeof(void *);
    4425             :         } else {
    4426             :                 /*
    4427             :                  * Store freelist pointer near middle of object to keep
    4428             :                  * it away from the edges of the object to avoid small
    4429             :                  * sized over/underflows from neighboring allocations.
    4430             :                  */
    4431          43 :                 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
    4432             :         }
    4433             : 
    4434             : #ifdef CONFIG_SLUB_DEBUG
    4435          53 :         if (flags & SLAB_STORE_USER) {
    4436             :                 /*
    4437             :                  * Need to store information about allocs and frees after
    4438             :                  * the object.
    4439             :                  */
    4440           0 :                 size += 2 * sizeof(struct track);
    4441             : 
    4442             :                 /* Save the original kmalloc request size */
    4443           0 :                 if (flags & SLAB_KMALLOC)
    4444           0 :                         size += sizeof(unsigned int);
    4445             :         }
    4446             : #endif
    4447             : 
    4448          53 :         kasan_cache_create(s, &size, &s->flags);
    4449             : #ifdef CONFIG_SLUB_DEBUG
    4450          53 :         if (flags & SLAB_RED_ZONE) {
    4451             :                 /*
    4452             :                  * Add some empty padding so that we can catch
    4453             :                  * overwrites from earlier objects rather than let
    4454             :                  * tracking information or the free pointer be
    4455             :                  * corrupted if a user writes before the start
    4456             :                  * of the object.
    4457             :                  */
    4458           0 :                 size += sizeof(void *);
    4459             : 
    4460             :                 s->red_left_pad = sizeof(void *);
    4461           0 :                 s->red_left_pad = ALIGN(s->red_left_pad, s->align);
    4462           0 :                 size += s->red_left_pad;
    4463             :         }
    4464             : #endif
    4465             : 
    4466             :         /*
    4467             :          * SLUB stores one object immediately after another beginning from
    4468             :          * offset 0. In order to align the objects we have to simply size
    4469             :          * each object to conform to the alignment.
    4470             :          */
    4471          53 :         size = ALIGN(size, s->align);
    4472          53 :         s->size = size;
    4473          53 :         s->reciprocal_size = reciprocal_value(size);
    4474          53 :         order = calculate_order(size);
    4475             : 
    4476          53 :         if ((int)order < 0)
    4477             :                 return 0;
    4478             : 
    4479          53 :         s->allocflags = 0;
    4480          53 :         if (order)
    4481          18 :                 s->allocflags |= __GFP_COMP;
    4482             : 
    4483          53 :         if (s->flags & SLAB_CACHE_DMA)
    4484           0 :                 s->allocflags |= GFP_DMA;
    4485             : 
    4486          53 :         if (s->flags & SLAB_CACHE_DMA32)
    4487           0 :                 s->allocflags |= GFP_DMA32;
    4488             : 
    4489          53 :         if (s->flags & SLAB_RECLAIM_ACCOUNT)
    4490          18 :                 s->allocflags |= __GFP_RECLAIMABLE;
    4491             : 
    4492             :         /*
    4493             :          * Determine the number of objects per slab
    4494             :          */
    4495         106 :         s->oo = oo_make(order, size);
    4496         159 :         s->min = oo_make(get_order(size), size);
    4497             : 
    4498          53 :         return !!oo_objects(s->oo);
    4499             : }
    4500             : 
    4501          53 : static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
    4502             : {
    4503          53 :         s->flags = kmem_cache_flags(s->size, flags, s->name);
    4504             : #ifdef CONFIG_SLAB_FREELIST_HARDENED
    4505             :         s->random = get_random_long();
    4506             : #endif
    4507             : 
    4508          53 :         if (!calculate_sizes(s))
    4509             :                 goto error;
    4510          53 :         if (disable_higher_order_debug) {
    4511             :                 /*
    4512             :                  * Disable debugging flags that store metadata if the min slab
    4513             :                  * order increased.
    4514             :                  */
    4515           0 :                 if (get_order(s->size) > get_order(s->object_size)) {
    4516           0 :                         s->flags &= ~DEBUG_METADATA_FLAGS;
    4517           0 :                         s->offset = 0;
    4518           0 :                         if (!calculate_sizes(s))
    4519             :                                 goto error;
    4520             :                 }
    4521             :         }
    4522             : 
    4523             : #ifdef system_has_freelist_aba
    4524             :         if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
    4525             :                 /* Enable fast mode */
    4526             :                 s->flags |= __CMPXCHG_DOUBLE;
    4527             :         }
    4528             : #endif
    4529             : 
    4530             :         /*
    4531             :          * The larger the object size is, the more slabs we want on the partial
    4532             :          * list to avoid pounding the page allocator excessively.
    4533             :          */
    4534         106 :         s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
    4535          53 :         s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
    4536             : 
    4537          53 :         set_cpu_partial(s);
    4538             : 
    4539             : #ifdef CONFIG_NUMA
    4540             :         s->remote_node_defrag_ratio = 1000;
    4541             : #endif
    4542             : 
    4543             :         /* Initialize the pre-computed randomized freelist if slab is up */
    4544             :         if (slab_state >= UP) {
    4545             :                 if (init_cache_random_seq(s))
    4546             :                         goto error;
    4547             :         }
    4548             : 
    4549          53 :         if (!init_kmem_cache_nodes(s))
    4550             :                 goto error;
    4551             : 
    4552          53 :         if (alloc_kmem_cache_cpus(s))
    4553             :                 return 0;
    4554             : 
    4555             : error:
    4556           0 :         __kmem_cache_release(s);
    4557           0 :         return -EINVAL;
    4558             : }
    4559             : 
    4560           0 : static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
    4561             :                               const char *text)
    4562             : {
    4563             : #ifdef CONFIG_SLUB_DEBUG
    4564           0 :         void *addr = slab_address(slab);
    4565             :         void *p;
    4566             : 
    4567           0 :         slab_err(s, slab, text, s->name);
    4568             : 
    4569           0 :         spin_lock(&object_map_lock);
    4570           0 :         __fill_map(object_map, s, slab);
    4571             : 
    4572           0 :         for_each_object(p, s, addr, slab->objects) {
    4573             : 
    4574           0 :                 if (!test_bit(__obj_to_index(s, addr, p), object_map)) {
    4575           0 :                         pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
    4576           0 :                         print_tracking(s, p);
    4577             :                 }
    4578             :         }
    4579           0 :         spin_unlock(&object_map_lock);
    4580             : #endif
    4581           0 : }
    4582             : 
    4583             : /*
    4584             :  * Attempt to free all partial slabs on a node.
    4585             :  * This is called from __kmem_cache_shutdown(). We must take list_lock
    4586             :  * because sysfs file might still access partial list after the shutdowning.
    4587             :  */
    4588           0 : static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
    4589             : {
    4590           0 :         LIST_HEAD(discard);
    4591             :         struct slab *slab, *h;
    4592             : 
    4593           0 :         BUG_ON(irqs_disabled());
    4594           0 :         spin_lock_irq(&n->list_lock);
    4595           0 :         list_for_each_entry_safe(slab, h, &n->partial, slab_list) {
    4596           0 :                 if (!slab->inuse) {
    4597           0 :                         remove_partial(n, slab);
    4598           0 :                         list_add(&slab->slab_list, &discard);
    4599             :                 } else {
    4600           0 :                         list_slab_objects(s, slab,
    4601             :                           "Objects remaining in %s on __kmem_cache_shutdown()");
    4602             :                 }
    4603             :         }
    4604           0 :         spin_unlock_irq(&n->list_lock);
    4605             : 
    4606           0 :         list_for_each_entry_safe(slab, h, &discard, slab_list)
    4607           0 :                 discard_slab(s, slab);
    4608           0 : }
    4609             : 
    4610           0 : bool __kmem_cache_empty(struct kmem_cache *s)
    4611             : {
    4612             :         int node;
    4613             :         struct kmem_cache_node *n;
    4614             : 
    4615           0 :         for_each_kmem_cache_node(s, node, n)
    4616           0 :                 if (n->nr_partial || node_nr_slabs(n))
    4617             :                         return false;
    4618             :         return true;
    4619             : }
    4620             : 
    4621             : /*
    4622             :  * Release all resources used by a slab cache.
    4623             :  */
    4624           0 : int __kmem_cache_shutdown(struct kmem_cache *s)
    4625             : {
    4626             :         int node;
    4627             :         struct kmem_cache_node *n;
    4628             : 
    4629           0 :         flush_all_cpus_locked(s);
    4630             :         /* Attempt to free all objects */
    4631           0 :         for_each_kmem_cache_node(s, node, n) {
    4632           0 :                 free_partial(s, n);
    4633           0 :                 if (n->nr_partial || node_nr_slabs(n))
    4634             :                         return 1;
    4635             :         }
    4636             :         return 0;
    4637             : }
    4638             : 
    4639             : #ifdef CONFIG_PRINTK
    4640           0 : void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
    4641             : {
    4642             :         void *base;
    4643             :         int __maybe_unused i;
    4644             :         unsigned int objnr;
    4645             :         void *objp;
    4646             :         void *objp0;
    4647           0 :         struct kmem_cache *s = slab->slab_cache;
    4648             :         struct track __maybe_unused *trackp;
    4649             : 
    4650           0 :         kpp->kp_ptr = object;
    4651           0 :         kpp->kp_slab = slab;
    4652           0 :         kpp->kp_slab_cache = s;
    4653           0 :         base = slab_address(slab);
    4654           0 :         objp0 = kasan_reset_tag(object);
    4655             : #ifdef CONFIG_SLUB_DEBUG
    4656           0 :         objp = restore_red_left(s, objp0);
    4657             : #else
    4658             :         objp = objp0;
    4659             : #endif
    4660           0 :         objnr = obj_to_index(s, slab, objp);
    4661           0 :         kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp);
    4662           0 :         objp = base + s->size * objnr;
    4663           0 :         kpp->kp_objp = objp;
    4664           0 :         if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size
    4665           0 :                          || (objp - base) % s->size) ||
    4666           0 :             !(s->flags & SLAB_STORE_USER))
    4667             :                 return;
    4668             : #ifdef CONFIG_SLUB_DEBUG
    4669           0 :         objp = fixup_red_left(s, objp);
    4670           0 :         trackp = get_track(s, objp, TRACK_ALLOC);
    4671           0 :         kpp->kp_ret = (void *)trackp->addr;
    4672             : #ifdef CONFIG_STACKDEPOT
    4673             :         {
    4674             :                 depot_stack_handle_t handle;
    4675             :                 unsigned long *entries;
    4676             :                 unsigned int nr_entries;
    4677             : 
    4678           0 :                 handle = READ_ONCE(trackp->handle);
    4679           0 :                 if (handle) {
    4680           0 :                         nr_entries = stack_depot_fetch(handle, &entries);
    4681           0 :                         for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
    4682           0 :                                 kpp->kp_stack[i] = (void *)entries[i];
    4683             :                 }
    4684             : 
    4685           0 :                 trackp = get_track(s, objp, TRACK_FREE);
    4686           0 :                 handle = READ_ONCE(trackp->handle);
    4687           0 :                 if (handle) {
    4688           0 :                         nr_entries = stack_depot_fetch(handle, &entries);
    4689           0 :                         for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
    4690           0 :                                 kpp->kp_free_stack[i] = (void *)entries[i];
    4691             :                 }
    4692             :         }
    4693             : #endif
    4694             : #endif
    4695             : }
    4696             : #endif
    4697             : 
    4698             : /********************************************************************
    4699             :  *              Kmalloc subsystem
    4700             :  *******************************************************************/
    4701             : 
    4702           0 : static int __init setup_slub_min_order(char *str)
    4703             : {
    4704           0 :         get_option(&str, (int *)&slub_min_order);
    4705             : 
    4706           0 :         return 1;
    4707             : }
    4708             : 
    4709             : __setup("slub_min_order=", setup_slub_min_order);
    4710             : 
    4711           0 : static int __init setup_slub_max_order(char *str)
    4712             : {
    4713           0 :         get_option(&str, (int *)&slub_max_order);
    4714           0 :         slub_max_order = min_t(unsigned int, slub_max_order, MAX_ORDER);
    4715             : 
    4716           0 :         return 1;
    4717             : }
    4718             : 
    4719             : __setup("slub_max_order=", setup_slub_max_order);
    4720             : 
    4721           0 : static int __init setup_slub_min_objects(char *str)
    4722             : {
    4723           0 :         get_option(&str, (int *)&slub_min_objects);
    4724             : 
    4725           0 :         return 1;
    4726             : }
    4727             : 
    4728             : __setup("slub_min_objects=", setup_slub_min_objects);
    4729             : 
    4730             : #ifdef CONFIG_HARDENED_USERCOPY
    4731             : /*
    4732             :  * Rejects incorrectly sized objects and objects that are to be copied
    4733             :  * to/from userspace but do not fall entirely within the containing slab
    4734             :  * cache's usercopy region.
    4735             :  *
    4736             :  * Returns NULL if check passes, otherwise const char * to name of cache
    4737             :  * to indicate an error.
    4738             :  */
    4739             : void __check_heap_object(const void *ptr, unsigned long n,
    4740             :                          const struct slab *slab, bool to_user)
    4741             : {
    4742             :         struct kmem_cache *s;
    4743             :         unsigned int offset;
    4744             :         bool is_kfence = is_kfence_address(ptr);
    4745             : 
    4746             :         ptr = kasan_reset_tag(ptr);
    4747             : 
    4748             :         /* Find object and usable object size. */
    4749             :         s = slab->slab_cache;
    4750             : 
    4751             :         /* Reject impossible pointers. */
    4752             :         if (ptr < slab_address(slab))
    4753             :                 usercopy_abort("SLUB object not in SLUB page?!", NULL,
    4754             :                                to_user, 0, n);
    4755             : 
    4756             :         /* Find offset within object. */
    4757             :         if (is_kfence)
    4758             :                 offset = ptr - kfence_object_start(ptr);
    4759             :         else
    4760             :                 offset = (ptr - slab_address(slab)) % s->size;
    4761             : 
    4762             :         /* Adjust for redzone and reject if within the redzone. */
    4763             :         if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
    4764             :                 if (offset < s->red_left_pad)
    4765             :                         usercopy_abort("SLUB object in left red zone",
    4766             :                                        s->name, to_user, offset, n);
    4767             :                 offset -= s->red_left_pad;
    4768             :         }
    4769             : 
    4770             :         /* Allow address range falling entirely within usercopy region. */
    4771             :         if (offset >= s->useroffset &&
    4772             :             offset - s->useroffset <= s->usersize &&
    4773             :             n <= s->useroffset - offset + s->usersize)
    4774             :                 return;
    4775             : 
    4776             :         usercopy_abort("SLUB object", s->name, to_user, offset, n);
    4777             : }
    4778             : #endif /* CONFIG_HARDENED_USERCOPY */
    4779             : 
    4780             : #define SHRINK_PROMOTE_MAX 32
    4781             : 
    4782             : /*
    4783             :  * kmem_cache_shrink discards empty slabs and promotes the slabs filled
    4784             :  * up most to the head of the partial lists. New allocations will then
    4785             :  * fill those up and thus they can be removed from the partial lists.
    4786             :  *
    4787             :  * The slabs with the least items are placed last. This results in them
    4788             :  * being allocated from last increasing the chance that the last objects
    4789             :  * are freed in them.
    4790             :  */
    4791           0 : static int __kmem_cache_do_shrink(struct kmem_cache *s)
    4792             : {
    4793             :         int node;
    4794             :         int i;
    4795             :         struct kmem_cache_node *n;
    4796             :         struct slab *slab;
    4797             :         struct slab *t;
    4798             :         struct list_head discard;
    4799             :         struct list_head promote[SHRINK_PROMOTE_MAX];
    4800             :         unsigned long flags;
    4801           0 :         int ret = 0;
    4802             : 
    4803           0 :         for_each_kmem_cache_node(s, node, n) {
    4804           0 :                 INIT_LIST_HEAD(&discard);
    4805           0 :                 for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
    4806           0 :                         INIT_LIST_HEAD(promote + i);
    4807             : 
    4808           0 :                 spin_lock_irqsave(&n->list_lock, flags);
    4809             : 
    4810             :                 /*
    4811             :                  * Build lists of slabs to discard or promote.
    4812             :                  *
    4813             :                  * Note that concurrent frees may occur while we hold the
    4814             :                  * list_lock. slab->inuse here is the upper limit.
    4815             :                  */
    4816           0 :                 list_for_each_entry_safe(slab, t, &n->partial, slab_list) {
    4817           0 :                         int free = slab->objects - slab->inuse;
    4818             : 
    4819             :                         /* Do not reread slab->inuse */
    4820           0 :                         barrier();
    4821             : 
    4822             :                         /* We do not keep full slabs on the list */
    4823           0 :                         BUG_ON(free <= 0);
    4824             : 
    4825           0 :                         if (free == slab->objects) {
    4826           0 :                                 list_move(&slab->slab_list, &discard);
    4827           0 :                                 n->nr_partial--;
    4828           0 :                                 dec_slabs_node(s, node, slab->objects);
    4829           0 :                         } else if (free <= SHRINK_PROMOTE_MAX)
    4830           0 :                                 list_move(&slab->slab_list, promote + free - 1);
    4831             :                 }
    4832             : 
    4833             :                 /*
    4834             :                  * Promote the slabs filled up most to the head of the
    4835             :                  * partial list.
    4836             :                  */
    4837           0 :                 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
    4838           0 :                         list_splice(promote + i, &n->partial);
    4839             : 
    4840           0 :                 spin_unlock_irqrestore(&n->list_lock, flags);
    4841             : 
    4842             :                 /* Release empty slabs */
    4843           0 :                 list_for_each_entry_safe(slab, t, &discard, slab_list)
    4844           0 :                         free_slab(s, slab);
    4845             : 
    4846           0 :                 if (node_nr_slabs(n))
    4847           0 :                         ret = 1;
    4848             :         }
    4849             : 
    4850           0 :         return ret;
    4851             : }
    4852             : 
    4853           0 : int __kmem_cache_shrink(struct kmem_cache *s)
    4854             : {
    4855           0 :         flush_all(s);
    4856           0 :         return __kmem_cache_do_shrink(s);
    4857             : }
    4858             : 
    4859             : static int slab_mem_going_offline_callback(void *arg)
    4860             : {
    4861             :         struct kmem_cache *s;
    4862             : 
    4863             :         mutex_lock(&slab_mutex);
    4864             :         list_for_each_entry(s, &slab_caches, list) {
    4865             :                 flush_all_cpus_locked(s);
    4866             :                 __kmem_cache_do_shrink(s);
    4867             :         }
    4868             :         mutex_unlock(&slab_mutex);
    4869             : 
    4870             :         return 0;
    4871             : }
    4872             : 
    4873             : static void slab_mem_offline_callback(void *arg)
    4874             : {
    4875             :         struct memory_notify *marg = arg;
    4876             :         int offline_node;
    4877             : 
    4878             :         offline_node = marg->status_change_nid_normal;
    4879             : 
    4880             :         /*
    4881             :          * If the node still has available memory. we need kmem_cache_node
    4882             :          * for it yet.
    4883             :          */
    4884             :         if (offline_node < 0)
    4885             :                 return;
    4886             : 
    4887             :         mutex_lock(&slab_mutex);
    4888             :         node_clear(offline_node, slab_nodes);
    4889             :         /*
    4890             :          * We no longer free kmem_cache_node structures here, as it would be
    4891             :          * racy with all get_node() users, and infeasible to protect them with
    4892             :          * slab_mutex.
    4893             :          */
    4894             :         mutex_unlock(&slab_mutex);
    4895             : }
    4896             : 
    4897             : static int slab_mem_going_online_callback(void *arg)
    4898             : {
    4899             :         struct kmem_cache_node *n;
    4900             :         struct kmem_cache *s;
    4901             :         struct memory_notify *marg = arg;
    4902             :         int nid = marg->status_change_nid_normal;
    4903             :         int ret = 0;
    4904             : 
    4905             :         /*
    4906             :          * If the node's memory is already available, then kmem_cache_node is
    4907             :          * already created. Nothing to do.
    4908             :          */
    4909             :         if (nid < 0)
    4910             :                 return 0;
    4911             : 
    4912             :         /*
    4913             :          * We are bringing a node online. No memory is available yet. We must
    4914             :          * allocate a kmem_cache_node structure in order to bring the node
    4915             :          * online.
    4916             :          */
    4917             :         mutex_lock(&slab_mutex);
    4918             :         list_for_each_entry(s, &slab_caches, list) {
    4919             :                 /*
    4920             :                  * The structure may already exist if the node was previously
    4921             :                  * onlined and offlined.
    4922             :                  */
    4923             :                 if (get_node(s, nid))
    4924             :                         continue;
    4925             :                 /*
    4926             :                  * XXX: kmem_cache_alloc_node will fallback to other nodes
    4927             :                  *      since memory is not yet available from the node that
    4928             :                  *      is brought up.
    4929             :                  */
    4930             :                 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
    4931             :                 if (!n) {
    4932             :                         ret = -ENOMEM;
    4933             :                         goto out;
    4934             :                 }
    4935             :                 init_kmem_cache_node(n);
    4936             :                 s->node[nid] = n;
    4937             :         }
    4938             :         /*
    4939             :          * Any cache created after this point will also have kmem_cache_node
    4940             :          * initialized for the new node.
    4941             :          */
    4942             :         node_set(nid, slab_nodes);
    4943             : out:
    4944             :         mutex_unlock(&slab_mutex);
    4945             :         return ret;
    4946             : }
    4947             : 
    4948             : static int slab_memory_callback(struct notifier_block *self,
    4949             :                                 unsigned long action, void *arg)
    4950             : {
    4951             :         int ret = 0;
    4952             : 
    4953             :         switch (action) {
    4954             :         case MEM_GOING_ONLINE:
    4955             :                 ret = slab_mem_going_online_callback(arg);
    4956             :                 break;
    4957             :         case MEM_GOING_OFFLINE:
    4958             :                 ret = slab_mem_going_offline_callback(arg);
    4959             :                 break;
    4960             :         case MEM_OFFLINE:
    4961             :         case MEM_CANCEL_ONLINE:
    4962             :                 slab_mem_offline_callback(arg);
    4963             :                 break;
    4964             :         case MEM_ONLINE:
    4965             :         case MEM_CANCEL_OFFLINE:
    4966             :                 break;
    4967             :         }
    4968             :         if (ret)
    4969             :                 ret = notifier_from_errno(ret);
    4970             :         else
    4971             :                 ret = NOTIFY_OK;
    4972             :         return ret;
    4973             : }
    4974             : 
    4975             : /********************************************************************
    4976             :  *                      Basic setup of slabs
    4977             :  *******************************************************************/
    4978             : 
    4979             : /*
    4980             :  * Used for early kmem_cache structures that were allocated using
    4981             :  * the page allocator. Allocate them properly then fix up the pointers
    4982             :  * that may be pointing to the wrong kmem_cache structure.
    4983             :  */
    4984             : 
    4985           2 : static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
    4986             : {
    4987             :         int node;
    4988           4 :         struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
    4989             :         struct kmem_cache_node *n;
    4990             : 
    4991           4 :         memcpy(s, static_cache, kmem_cache->object_size);
    4992             : 
    4993             :         /*
    4994             :          * This runs very early, and only the boot processor is supposed to be
    4995             :          * up.  Even if it weren't true, IRQs are not up so we couldn't fire
    4996             :          * IPIs around.
    4997             :          */
    4998           2 :         __flush_cpu_slab(s, smp_processor_id());
    4999           6 :         for_each_kmem_cache_node(s, node, n) {
    5000             :                 struct slab *p;
    5001             : 
    5002           4 :                 list_for_each_entry(p, &n->partial, slab_list)
    5003           2 :                         p->slab_cache = s;
    5004             : 
    5005             : #ifdef CONFIG_SLUB_DEBUG
    5006           2 :                 list_for_each_entry(p, &n->full, slab_list)
    5007           0 :                         p->slab_cache = s;
    5008             : #endif
    5009             :         }
    5010           4 :         list_add(&s->list, &slab_caches);
    5011           2 :         return s;
    5012             : }
    5013             : 
    5014           1 : void __init kmem_cache_init(void)
    5015             : {
    5016             :         static __initdata struct kmem_cache boot_kmem_cache,
    5017             :                 boot_kmem_cache_node;
    5018             :         int node;
    5019             : 
    5020             :         if (debug_guardpage_minorder())
    5021             :                 slub_max_order = 0;
    5022             : 
    5023             :         /* Print slub debugging pointers without hashing */
    5024           1 :         if (__slub_debug_enabled())
    5025           0 :                 no_hash_pointers_enable(NULL);
    5026             : 
    5027           1 :         kmem_cache_node = &boot_kmem_cache_node;
    5028           1 :         kmem_cache = &boot_kmem_cache;
    5029             : 
    5030             :         /*
    5031             :          * Initialize the nodemask for which we will allocate per node
    5032             :          * structures. Here we don't need taking slab_mutex yet.
    5033             :          */
    5034           3 :         for_each_node_state(node, N_NORMAL_MEMORY)
    5035             :                 node_set(node, slab_nodes);
    5036             : 
    5037           1 :         create_boot_cache(kmem_cache_node, "kmem_cache_node",
    5038             :                 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);
    5039             : 
    5040           1 :         hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
    5041             : 
    5042             :         /* Able to allocate the per node structures */
    5043           1 :         slab_state = PARTIAL;
    5044             : 
    5045           1 :         create_boot_cache(kmem_cache, "kmem_cache",
    5046             :                         offsetof(struct kmem_cache, node) +
    5047             :                                 nr_node_ids * sizeof(struct kmem_cache_node *),
    5048             :                        SLAB_HWCACHE_ALIGN, 0, 0);
    5049             : 
    5050           1 :         kmem_cache = bootstrap(&boot_kmem_cache);
    5051           1 :         kmem_cache_node = bootstrap(&boot_kmem_cache_node);
    5052             : 
    5053             :         /* Now we can use the kmem_cache to allocate kmalloc slabs */
    5054           1 :         setup_kmalloc_cache_index_table();
    5055           1 :         create_kmalloc_caches(0);
    5056             : 
    5057             :         /* Setup random freelists for each cache */
    5058           1 :         init_freelist_randomization();
    5059             : 
    5060           1 :         cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
    5061             :                                   slub_cpu_dead);
    5062             : 
    5063           1 :         pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
    5064             :                 cache_line_size(),
    5065             :                 slub_min_order, slub_max_order, slub_min_objects,
    5066             :                 nr_cpu_ids, nr_node_ids);
    5067           1 : }
    5068             : 
    5069           1 : void __init kmem_cache_init_late(void)
    5070             : {
    5071             : #ifndef CONFIG_SLUB_TINY
    5072           1 :         flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0);
    5073           1 :         WARN_ON(!flushwq);
    5074             : #endif
    5075           1 : }
    5076             : 
    5077             : struct kmem_cache *
    5078          57 : __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
    5079             :                    slab_flags_t flags, void (*ctor)(void *))
    5080             : {
    5081             :         struct kmem_cache *s;
    5082             : 
    5083          57 :         s = find_mergeable(size, align, flags, name, ctor);
    5084          57 :         if (s) {
    5085          32 :                 if (sysfs_slab_alias(s, name))
    5086             :                         return NULL;
    5087             : 
    5088          32 :                 s->refcount++;
    5089             : 
    5090             :                 /*
    5091             :                  * Adjust the object sizes so that we clear
    5092             :                  * the complete object on kzalloc.
    5093             :                  */
    5094          32 :                 s->object_size = max(s->object_size, size);
    5095          32 :                 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
    5096             :         }
    5097             : 
    5098             :         return s;
    5099             : }
    5100             : 
    5101          53 : int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
    5102             : {
    5103             :         int err;
    5104             : 
    5105          53 :         err = kmem_cache_open(s, flags);
    5106          53 :         if (err)
    5107             :                 return err;
    5108             : 
    5109             :         /* Mutex is not taken during early boot */
    5110          53 :         if (slab_state <= UP)
    5111             :                 return 0;
    5112             : 
    5113           0 :         err = sysfs_slab_add(s);
    5114           0 :         if (err) {
    5115           0 :                 __kmem_cache_release(s);
    5116           0 :                 return err;
    5117             :         }
    5118             : 
    5119             :         if (s->flags & SLAB_STORE_USER)
    5120             :                 debugfs_slab_add(s);
    5121             : 
    5122             :         return 0;
    5123             : }
    5124             : 
    5125             : #ifdef SLAB_SUPPORTS_SYSFS
    5126           0 : static int count_inuse(struct slab *slab)
    5127             : {
    5128           0 :         return slab->inuse;
    5129             : }
    5130             : 
    5131           0 : static int count_total(struct slab *slab)
    5132             : {
    5133           0 :         return slab->objects;
    5134             : }
    5135             : #endif
    5136             : 
    5137             : #ifdef CONFIG_SLUB_DEBUG
    5138           0 : static void validate_slab(struct kmem_cache *s, struct slab *slab,
    5139             :                           unsigned long *obj_map)
    5140             : {
    5141             :         void *p;
    5142           0 :         void *addr = slab_address(slab);
    5143             : 
    5144           0 :         if (!check_slab(s, slab) || !on_freelist(s, slab, NULL))
    5145             :                 return;
    5146             : 
    5147             :         /* Now we know that a valid freelist exists */
    5148           0 :         __fill_map(obj_map, s, slab);
    5149           0 :         for_each_object(p, s, addr, slab->objects) {
    5150           0 :                 u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ?
    5151             :                          SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
    5152             : 
    5153           0 :                 if (!check_object(s, slab, p, val))
    5154             :                         break;
    5155             :         }
    5156             : }
    5157             : 
    5158           0 : static int validate_slab_node(struct kmem_cache *s,
    5159             :                 struct kmem_cache_node *n, unsigned long *obj_map)
    5160             : {
    5161           0 :         unsigned long count = 0;
    5162             :         struct slab *slab;
    5163             :         unsigned long flags;
    5164             : 
    5165           0 :         spin_lock_irqsave(&n->list_lock, flags);
    5166             : 
    5167           0 :         list_for_each_entry(slab, &n->partial, slab_list) {
    5168           0 :                 validate_slab(s, slab, obj_map);
    5169           0 :                 count++;
    5170             :         }
    5171           0 :         if (count != n->nr_partial) {
    5172           0 :                 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
    5173             :                        s->name, count, n->nr_partial);
    5174           0 :                 slab_add_kunit_errors();
    5175             :         }
    5176             : 
    5177           0 :         if (!(s->flags & SLAB_STORE_USER))
    5178             :                 goto out;
    5179             : 
    5180           0 :         list_for_each_entry(slab, &n->full, slab_list) {
    5181           0 :                 validate_slab(s, slab, obj_map);
    5182           0 :                 count++;
    5183             :         }
    5184           0 :         if (count != node_nr_slabs(n)) {
    5185           0 :                 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
    5186             :                        s->name, count, node_nr_slabs(n));
    5187           0 :                 slab_add_kunit_errors();
    5188             :         }
    5189             : 
    5190             : out:
    5191           0 :         spin_unlock_irqrestore(&n->list_lock, flags);
    5192           0 :         return count;
    5193             : }
    5194             : 
    5195           0 : long validate_slab_cache(struct kmem_cache *s)
    5196             : {
    5197             :         int node;
    5198           0 :         unsigned long count = 0;
    5199             :         struct kmem_cache_node *n;
    5200             :         unsigned long *obj_map;
    5201             : 
    5202           0 :         obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
    5203           0 :         if (!obj_map)
    5204             :                 return -ENOMEM;
    5205             : 
    5206           0 :         flush_all(s);
    5207           0 :         for_each_kmem_cache_node(s, node, n)
    5208           0 :                 count += validate_slab_node(s, n, obj_map);
    5209             : 
    5210           0 :         bitmap_free(obj_map);
    5211             : 
    5212           0 :         return count;
    5213             : }
    5214             : EXPORT_SYMBOL(validate_slab_cache);
    5215             : 
    5216             : #ifdef CONFIG_DEBUG_FS
    5217             : /*
    5218             :  * Generate lists of code addresses where slabcache objects are allocated
    5219             :  * and freed.
    5220             :  */
    5221             : 
    5222             : struct location {
    5223             :         depot_stack_handle_t handle;
    5224             :         unsigned long count;
    5225             :         unsigned long addr;
    5226             :         unsigned long waste;
    5227             :         long long sum_time;
    5228             :         long min_time;
    5229             :         long max_time;
    5230             :         long min_pid;
    5231             :         long max_pid;
    5232             :         DECLARE_BITMAP(cpus, NR_CPUS);
    5233             :         nodemask_t nodes;
    5234             : };
    5235             : 
    5236             : struct loc_track {
    5237             :         unsigned long max;
    5238             :         unsigned long count;
    5239             :         struct location *loc;
    5240             :         loff_t idx;
    5241             : };
    5242             : 
    5243             : static struct dentry *slab_debugfs_root;
    5244             : 
    5245             : static void free_loc_track(struct loc_track *t)
    5246             : {
    5247             :         if (t->max)
    5248             :                 free_pages((unsigned long)t->loc,
    5249             :                         get_order(sizeof(struct location) * t->max));
    5250             : }
    5251             : 
    5252             : static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
    5253             : {
    5254             :         struct location *l;
    5255             :         int order;
    5256             : 
    5257             :         order = get_order(sizeof(struct location) * max);
    5258             : 
    5259             :         l = (void *)__get_free_pages(flags, order);
    5260             :         if (!l)
    5261             :                 return 0;
    5262             : 
    5263             :         if (t->count) {
    5264             :                 memcpy(l, t->loc, sizeof(struct location) * t->count);
    5265             :                 free_loc_track(t);
    5266             :         }
    5267             :         t->max = max;
    5268             :         t->loc = l;
    5269             :         return 1;
    5270             : }
    5271             : 
    5272             : static int add_location(struct loc_track *t, struct kmem_cache *s,
    5273             :                                 const struct track *track,
    5274             :                                 unsigned int orig_size)
    5275             : {
    5276             :         long start, end, pos;
    5277             :         struct location *l;
    5278             :         unsigned long caddr, chandle, cwaste;
    5279             :         unsigned long age = jiffies - track->when;
    5280             :         depot_stack_handle_t handle = 0;
    5281             :         unsigned int waste = s->object_size - orig_size;
    5282             : 
    5283             : #ifdef CONFIG_STACKDEPOT
    5284             :         handle = READ_ONCE(track->handle);
    5285             : #endif
    5286             :         start = -1;
    5287             :         end = t->count;
    5288             : 
    5289             :         for ( ; ; ) {
    5290             :                 pos = start + (end - start + 1) / 2;
    5291             : 
    5292             :                 /*
    5293             :                  * There is nothing at "end". If we end up there
    5294             :                  * we need to add something to before end.
    5295             :                  */
    5296             :                 if (pos == end)
    5297             :                         break;
    5298             : 
    5299             :                 l = &t->loc[pos];
    5300             :                 caddr = l->addr;
    5301             :                 chandle = l->handle;
    5302             :                 cwaste = l->waste;
    5303             :                 if ((track->addr == caddr) && (handle == chandle) &&
    5304             :                         (waste == cwaste)) {
    5305             : 
    5306             :                         l->count++;
    5307             :                         if (track->when) {
    5308             :                                 l->sum_time += age;
    5309             :                                 if (age < l->min_time)
    5310             :                                         l->min_time = age;
    5311             :                                 if (age > l->max_time)
    5312             :                                         l->max_time = age;
    5313             : 
    5314             :                                 if (track->pid < l->min_pid)
    5315             :                                         l->min_pid = track->pid;
    5316             :                                 if (track->pid > l->max_pid)
    5317             :                                         l->max_pid = track->pid;
    5318             : 
    5319             :                                 cpumask_set_cpu(track->cpu,
    5320             :                                                 to_cpumask(l->cpus));
    5321             :                         }
    5322             :                         node_set(page_to_nid(virt_to_page(track)), l->nodes);
    5323             :                         return 1;
    5324             :                 }
    5325             : 
    5326             :                 if (track->addr < caddr)
    5327             :                         end = pos;
    5328             :                 else if (track->addr == caddr && handle < chandle)
    5329             :                         end = pos;
    5330             :                 else if (track->addr == caddr && handle == chandle &&
    5331             :                                 waste < cwaste)
    5332             :                         end = pos;
    5333             :                 else
    5334             :                         start = pos;
    5335             :         }
    5336             : 
    5337             :         /*
    5338             :          * Not found. Insert new tracking element.
    5339             :          */
    5340             :         if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
    5341             :                 return 0;
    5342             : 
    5343             :         l = t->loc + pos;
    5344             :         if (pos < t->count)
    5345             :                 memmove(l + 1, l,
    5346             :                         (t->count - pos) * sizeof(struct location));
    5347             :         t->count++;
    5348             :         l->count = 1;
    5349             :         l->addr = track->addr;
    5350             :         l->sum_time = age;
    5351             :         l->min_time = age;
    5352             :         l->max_time = age;
    5353             :         l->min_pid = track->pid;
    5354             :         l->max_pid = track->pid;
    5355             :         l->handle = handle;
    5356             :         l->waste = waste;
    5357             :         cpumask_clear(to_cpumask(l->cpus));
    5358             :         cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
    5359             :         nodes_clear(l->nodes);
    5360             :         node_set(page_to_nid(virt_to_page(track)), l->nodes);
    5361             :         return 1;
    5362             : }
    5363             : 
    5364             : static void process_slab(struct loc_track *t, struct kmem_cache *s,
    5365             :                 struct slab *slab, enum track_item alloc,
    5366             :                 unsigned long *obj_map)
    5367             : {
    5368             :         void *addr = slab_address(slab);
    5369             :         bool is_alloc = (alloc == TRACK_ALLOC);
    5370             :         void *p;
    5371             : 
    5372             :         __fill_map(obj_map, s, slab);
    5373             : 
    5374             :         for_each_object(p, s, addr, slab->objects)
    5375             :                 if (!test_bit(__obj_to_index(s, addr, p), obj_map))
    5376             :                         add_location(t, s, get_track(s, p, alloc),
    5377             :                                      is_alloc ? get_orig_size(s, p) :
    5378             :                                                 s->object_size);
    5379             : }
    5380             : #endif  /* CONFIG_DEBUG_FS   */
    5381             : #endif  /* CONFIG_SLUB_DEBUG */
    5382             : 
    5383             : #ifdef SLAB_SUPPORTS_SYSFS
    5384             : enum slab_stat_type {
    5385             :         SL_ALL,                 /* All slabs */
    5386             :         SL_PARTIAL,             /* Only partially allocated slabs */
    5387             :         SL_CPU,                 /* Only slabs used for cpu caches */
    5388             :         SL_OBJECTS,             /* Determine allocated objects not slabs */
    5389             :         SL_TOTAL                /* Determine object capacity not slabs */
    5390             : };
    5391             : 
    5392             : #define SO_ALL          (1 << SL_ALL)
    5393             : #define SO_PARTIAL      (1 << SL_PARTIAL)
    5394             : #define SO_CPU          (1 << SL_CPU)
    5395             : #define SO_OBJECTS      (1 << SL_OBJECTS)
    5396             : #define SO_TOTAL        (1 << SL_TOTAL)
    5397             : 
    5398           0 : static ssize_t show_slab_objects(struct kmem_cache *s,
    5399             :                                  char *buf, unsigned long flags)
    5400             : {
    5401           0 :         unsigned long total = 0;
    5402             :         int node;
    5403             :         int x;
    5404             :         unsigned long *nodes;
    5405           0 :         int len = 0;
    5406             : 
    5407           0 :         nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
    5408           0 :         if (!nodes)
    5409             :                 return -ENOMEM;
    5410             : 
    5411           0 :         if (flags & SO_CPU) {
    5412             :                 int cpu;
    5413             : 
    5414           0 :                 for_each_possible_cpu(cpu) {
    5415           0 :                         struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
    5416             :                                                                cpu);
    5417             :                         int node;
    5418             :                         struct slab *slab;
    5419             : 
    5420           0 :                         slab = READ_ONCE(c->slab);
    5421           0 :                         if (!slab)
    5422           0 :                                 continue;
    5423             : 
    5424           0 :                         node = slab_nid(slab);
    5425           0 :                         if (flags & SO_TOTAL)
    5426           0 :                                 x = slab->objects;
    5427           0 :                         else if (flags & SO_OBJECTS)
    5428           0 :                                 x = slab->inuse;
    5429             :                         else
    5430             :                                 x = 1;
    5431             : 
    5432           0 :                         total += x;
    5433           0 :                         nodes[node] += x;
    5434             : 
    5435             : #ifdef CONFIG_SLUB_CPU_PARTIAL
    5436             :                         slab = slub_percpu_partial_read_once(c);
    5437             :                         if (slab) {
    5438             :                                 node = slab_nid(slab);
    5439             :                                 if (flags & SO_TOTAL)
    5440             :                                         WARN_ON_ONCE(1);
    5441             :                                 else if (flags & SO_OBJECTS)
    5442             :                                         WARN_ON_ONCE(1);
    5443             :                                 else
    5444             :                                         x = slab->slabs;
    5445             :                                 total += x;
    5446             :                                 nodes[node] += x;
    5447             :                         }
    5448             : #endif
    5449             :                 }
    5450             :         }
    5451             : 
    5452             :         /*
    5453             :          * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
    5454             :          * already held which will conflict with an existing lock order:
    5455             :          *
    5456             :          * mem_hotplug_lock->slab_mutex->kernfs_mutex
    5457             :          *
    5458             :          * We don't really need mem_hotplug_lock (to hold off
    5459             :          * slab_mem_going_offline_callback) here because slab's memory hot
    5460             :          * unplug code doesn't destroy the kmem_cache->node[] data.
    5461             :          */
    5462             : 
    5463             : #ifdef CONFIG_SLUB_DEBUG
    5464           0 :         if (flags & SO_ALL) {
    5465             :                 struct kmem_cache_node *n;
    5466             : 
    5467           0 :                 for_each_kmem_cache_node(s, node, n) {
    5468             : 
    5469           0 :                         if (flags & SO_TOTAL)
    5470           0 :                                 x = node_nr_objs(n);
    5471           0 :                         else if (flags & SO_OBJECTS)
    5472           0 :                                 x = node_nr_objs(n) - count_partial(n, count_free);
    5473             :                         else
    5474           0 :                                 x = node_nr_slabs(n);
    5475           0 :                         total += x;
    5476           0 :                         nodes[node] += x;
    5477             :                 }
    5478             : 
    5479             :         } else
    5480             : #endif
    5481           0 :         if (flags & SO_PARTIAL) {
    5482             :                 struct kmem_cache_node *n;
    5483             : 
    5484           0 :                 for_each_kmem_cache_node(s, node, n) {
    5485           0 :                         if (flags & SO_TOTAL)
    5486           0 :                                 x = count_partial(n, count_total);
    5487           0 :                         else if (flags & SO_OBJECTS)
    5488           0 :                                 x = count_partial(n, count_inuse);
    5489             :                         else
    5490           0 :                                 x = n->nr_partial;
    5491           0 :                         total += x;
    5492           0 :                         nodes[node] += x;
    5493             :                 }
    5494             :         }
    5495             : 
    5496           0 :         len += sysfs_emit_at(buf, len, "%lu", total);
    5497             : #ifdef CONFIG_NUMA
    5498             :         for (node = 0; node < nr_node_ids; node++) {
    5499             :                 if (nodes[node])
    5500             :                         len += sysfs_emit_at(buf, len, " N%d=%lu",
    5501             :                                              node, nodes[node]);
    5502             :         }
    5503             : #endif
    5504           0 :         len += sysfs_emit_at(buf, len, "\n");
    5505           0 :         kfree(nodes);
    5506             : 
    5507           0 :         return len;
    5508             : }
    5509             : 
    5510             : #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
    5511             : #define to_slab(n) container_of(n, struct kmem_cache, kobj)
    5512             : 
    5513             : struct slab_attribute {
    5514             :         struct attribute attr;
    5515             :         ssize_t (*show)(struct kmem_cache *s, char *buf);
    5516             :         ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
    5517             : };
    5518             : 
    5519             : #define SLAB_ATTR_RO(_name) \
    5520             :         static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400)
    5521             : 
    5522             : #define SLAB_ATTR(_name) \
    5523             :         static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600)
    5524             : 
    5525           0 : static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
    5526             : {
    5527           0 :         return sysfs_emit(buf, "%u\n", s->size);
    5528             : }
    5529             : SLAB_ATTR_RO(slab_size);
    5530             : 
    5531           0 : static ssize_t align_show(struct kmem_cache *s, char *buf)
    5532             : {
    5533           0 :         return sysfs_emit(buf, "%u\n", s->align);
    5534             : }
    5535             : SLAB_ATTR_RO(align);
    5536             : 
    5537           0 : static ssize_t object_size_show(struct kmem_cache *s, char *buf)
    5538             : {
    5539           0 :         return sysfs_emit(buf, "%u\n", s->object_size);
    5540             : }
    5541             : SLAB_ATTR_RO(object_size);
    5542             : 
    5543           0 : static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
    5544             : {
    5545           0 :         return sysfs_emit(buf, "%u\n", oo_objects(s->oo));
    5546             : }
    5547             : SLAB_ATTR_RO(objs_per_slab);
    5548             : 
    5549           0 : static ssize_t order_show(struct kmem_cache *s, char *buf)
    5550             : {
    5551           0 :         return sysfs_emit(buf, "%u\n", oo_order(s->oo));
    5552             : }
    5553             : SLAB_ATTR_RO(order);
    5554             : 
    5555           0 : static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
    5556             : {
    5557           0 :         return sysfs_emit(buf, "%lu\n", s->min_partial);
    5558             : }
    5559             : 
    5560           0 : static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
    5561             :                                  size_t length)
    5562             : {
    5563             :         unsigned long min;
    5564             :         int err;
    5565             : 
    5566           0 :         err = kstrtoul(buf, 10, &min);
    5567           0 :         if (err)
    5568           0 :                 return err;
    5569             : 
    5570           0 :         s->min_partial = min;
    5571           0 :         return length;
    5572             : }
    5573             : SLAB_ATTR(min_partial);
    5574             : 
    5575           0 : static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
    5576             : {
    5577           0 :         unsigned int nr_partial = 0;
    5578             : #ifdef CONFIG_SLUB_CPU_PARTIAL
    5579             :         nr_partial = s->cpu_partial;
    5580             : #endif
    5581             : 
    5582           0 :         return sysfs_emit(buf, "%u\n", nr_partial);
    5583             : }
    5584             : 
    5585           0 : static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
    5586             :                                  size_t length)
    5587             : {
    5588             :         unsigned int objects;
    5589             :         int err;
    5590             : 
    5591           0 :         err = kstrtouint(buf, 10, &objects);
    5592           0 :         if (err)
    5593           0 :                 return err;
    5594           0 :         if (objects && !kmem_cache_has_cpu_partial(s))
    5595             :                 return -EINVAL;
    5596             : 
    5597           0 :         slub_set_cpu_partial(s, objects);
    5598           0 :         flush_all(s);
    5599           0 :         return length;
    5600             : }
    5601             : SLAB_ATTR(cpu_partial);
    5602             : 
    5603           0 : static ssize_t ctor_show(struct kmem_cache *s, char *buf)
    5604             : {
    5605           0 :         if (!s->ctor)
    5606             :                 return 0;
    5607           0 :         return sysfs_emit(buf, "%pS\n", s->ctor);
    5608             : }
    5609             : SLAB_ATTR_RO(ctor);
    5610             : 
    5611           0 : static ssize_t aliases_show(struct kmem_cache *s, char *buf)
    5612             : {
    5613           0 :         return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
    5614             : }
    5615             : SLAB_ATTR_RO(aliases);
    5616             : 
    5617           0 : static ssize_t partial_show(struct kmem_cache *s, char *buf)
    5618             : {
    5619           0 :         return show_slab_objects(s, buf, SO_PARTIAL);
    5620             : }
    5621             : SLAB_ATTR_RO(partial);
    5622             : 
    5623           0 : static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
    5624             : {
    5625           0 :         return show_slab_objects(s, buf, SO_CPU);
    5626             : }
    5627             : SLAB_ATTR_RO(cpu_slabs);
    5628             : 
    5629           0 : static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
    5630             : {
    5631           0 :         return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
    5632             : }
    5633             : SLAB_ATTR_RO(objects_partial);
    5634             : 
    5635           0 : static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
    5636             : {
    5637           0 :         int objects = 0;
    5638           0 :         int slabs = 0;
    5639             :         int cpu __maybe_unused;
    5640           0 :         int len = 0;
    5641             : 
    5642             : #ifdef CONFIG_SLUB_CPU_PARTIAL
    5643             :         for_each_online_cpu(cpu) {
    5644             :                 struct slab *slab;
    5645             : 
    5646             :                 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
    5647             : 
    5648             :                 if (slab)
    5649             :                         slabs += slab->slabs;
    5650             :         }
    5651             : #endif
    5652             : 
    5653             :         /* Approximate half-full slabs, see slub_set_cpu_partial() */
    5654           0 :         objects = (slabs * oo_objects(s->oo)) / 2;
    5655           0 :         len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs);
    5656             : 
    5657             : #ifdef CONFIG_SLUB_CPU_PARTIAL
    5658             :         for_each_online_cpu(cpu) {
    5659             :                 struct slab *slab;
    5660             : 
    5661             :                 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
    5662             :                 if (slab) {
    5663             :                         slabs = READ_ONCE(slab->slabs);
    5664             :                         objects = (slabs * oo_objects(s->oo)) / 2;
    5665             :                         len += sysfs_emit_at(buf, len, " C%d=%d(%d)",
    5666             :                                              cpu, objects, slabs);
    5667             :                 }
    5668             :         }
    5669             : #endif
    5670           0 :         len += sysfs_emit_at(buf, len, "\n");
    5671             : 
    5672           0 :         return len;
    5673             : }
    5674             : SLAB_ATTR_RO(slabs_cpu_partial);
    5675             : 
    5676           0 : static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
    5677             : {
    5678           0 :         return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
    5679             : }
    5680             : SLAB_ATTR_RO(reclaim_account);
    5681             : 
    5682           0 : static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
    5683             : {
    5684           0 :         return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
    5685             : }
    5686             : SLAB_ATTR_RO(hwcache_align);
    5687             : 
    5688             : #ifdef CONFIG_ZONE_DMA
    5689             : static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
    5690             : {
    5691             :         return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
    5692             : }
    5693             : SLAB_ATTR_RO(cache_dma);
    5694             : #endif
    5695             : 
    5696             : #ifdef CONFIG_HARDENED_USERCOPY
    5697             : static ssize_t usersize_show(struct kmem_cache *s, char *buf)
    5698             : {
    5699             :         return sysfs_emit(buf, "%u\n", s->usersize);
    5700             : }
    5701             : SLAB_ATTR_RO(usersize);
    5702             : #endif
    5703             : 
    5704           0 : static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
    5705             : {
    5706           0 :         return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
    5707             : }
    5708             : SLAB_ATTR_RO(destroy_by_rcu);
    5709             : 
    5710             : #ifdef CONFIG_SLUB_DEBUG
    5711           0 : static ssize_t slabs_show(struct kmem_cache *s, char *buf)
    5712             : {
    5713           0 :         return show_slab_objects(s, buf, SO_ALL);
    5714             : }
    5715             : SLAB_ATTR_RO(slabs);
    5716             : 
    5717           0 : static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
    5718             : {
    5719           0 :         return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
    5720             : }
    5721             : SLAB_ATTR_RO(total_objects);
    5722             : 
    5723           0 : static ssize_t objects_show(struct kmem_cache *s, char *buf)
    5724             : {
    5725           0 :         return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
    5726             : }
    5727             : SLAB_ATTR_RO(objects);
    5728             : 
    5729           0 : static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
    5730             : {
    5731           0 :         return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
    5732             : }
    5733             : SLAB_ATTR_RO(sanity_checks);
    5734             : 
    5735           0 : static ssize_t trace_show(struct kmem_cache *s, char *buf)
    5736             : {
    5737           0 :         return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE));
    5738             : }
    5739             : SLAB_ATTR_RO(trace);
    5740             : 
    5741           0 : static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
    5742             : {
    5743           0 :         return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
    5744             : }
    5745             : 
    5746             : SLAB_ATTR_RO(red_zone);
    5747             : 
    5748           0 : static ssize_t poison_show(struct kmem_cache *s, char *buf)
    5749             : {
    5750           0 :         return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON));
    5751             : }
    5752             : 
    5753             : SLAB_ATTR_RO(poison);
    5754             : 
    5755           0 : static ssize_t store_user_show(struct kmem_cache *s, char *buf)
    5756             : {
    5757           0 :         return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
    5758             : }
    5759             : 
    5760             : SLAB_ATTR_RO(store_user);
    5761             : 
    5762           0 : static ssize_t validate_show(struct kmem_cache *s, char *buf)
    5763             : {
    5764           0 :         return 0;
    5765             : }
    5766             : 
    5767           0 : static ssize_t validate_store(struct kmem_cache *s,
    5768             :                         const char *buf, size_t length)
    5769             : {
    5770           0 :         int ret = -EINVAL;
    5771             : 
    5772           0 :         if (buf[0] == '1' && kmem_cache_debug(s)) {
    5773           0 :                 ret = validate_slab_cache(s);
    5774           0 :                 if (ret >= 0)
    5775           0 :                         ret = length;
    5776             :         }
    5777           0 :         return ret;
    5778             : }
    5779             : SLAB_ATTR(validate);
    5780             : 
    5781             : #endif /* CONFIG_SLUB_DEBUG */
    5782             : 
    5783             : #ifdef CONFIG_FAILSLAB
    5784             : static ssize_t failslab_show(struct kmem_cache *s, char *buf)
    5785             : {
    5786             :         return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
    5787             : }
    5788             : 
    5789             : static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
    5790             :                                 size_t length)
    5791             : {
    5792             :         if (s->refcount > 1)
    5793             :                 return -EINVAL;
    5794             : 
    5795             :         if (buf[0] == '1')
    5796             :                 WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB);
    5797             :         else
    5798             :                 WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB);
    5799             : 
    5800             :         return length;
    5801             : }
    5802             : SLAB_ATTR(failslab);
    5803             : #endif
    5804             : 
    5805           0 : static ssize_t shrink_show(struct kmem_cache *s, char *buf)
    5806             : {
    5807           0 :         return 0;
    5808             : }
    5809             : 
    5810           0 : static ssize_t shrink_store(struct kmem_cache *s,
    5811             :                         const char *buf, size_t length)
    5812             : {
    5813           0 :         if (buf[0] == '1')
    5814           0 :                 kmem_cache_shrink(s);
    5815             :         else
    5816             :                 return -EINVAL;
    5817           0 :         return length;
    5818             : }
    5819             : SLAB_ATTR(shrink);
    5820             : 
    5821             : #ifdef CONFIG_NUMA
    5822             : static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
    5823             : {
    5824             :         return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10);
    5825             : }
    5826             : 
    5827             : static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
    5828             :                                 const char *buf, size_t length)
    5829             : {
    5830             :         unsigned int ratio;
    5831             :         int err;
    5832             : 
    5833             :         err = kstrtouint(buf, 10, &ratio);
    5834             :         if (err)
    5835             :                 return err;
    5836             :         if (ratio > 100)
    5837             :                 return -ERANGE;
    5838             : 
    5839             :         s->remote_node_defrag_ratio = ratio * 10;
    5840             : 
    5841             :         return length;
    5842             : }
    5843             : SLAB_ATTR(remote_node_defrag_ratio);
    5844             : #endif
    5845             : 
    5846             : #ifdef CONFIG_SLUB_STATS
    5847             : static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
    5848             : {
    5849             :         unsigned long sum  = 0;
    5850             :         int cpu;
    5851             :         int len = 0;
    5852             :         int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL);
    5853             : 
    5854             :         if (!data)
    5855             :                 return -ENOMEM;
    5856             : 
    5857             :         for_each_online_cpu(cpu) {
    5858             :                 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
    5859             : 
    5860             :                 data[cpu] = x;
    5861             :                 sum += x;
    5862             :         }
    5863             : 
    5864             :         len += sysfs_emit_at(buf, len, "%lu", sum);
    5865             : 
    5866             : #ifdef CONFIG_SMP
    5867             :         for_each_online_cpu(cpu) {
    5868             :                 if (data[cpu])
    5869             :                         len += sysfs_emit_at(buf, len, " C%d=%u",
    5870             :                                              cpu, data[cpu]);
    5871             :         }
    5872             : #endif
    5873             :         kfree(data);
    5874             :         len += sysfs_emit_at(buf, len, "\n");
    5875             : 
    5876             :         return len;
    5877             : }
    5878             : 
    5879             : static void clear_stat(struct kmem_cache *s, enum stat_item si)
    5880             : {
    5881             :         int cpu;
    5882             : 
    5883             :         for_each_online_cpu(cpu)
    5884             :                 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
    5885             : }
    5886             : 
    5887             : #define STAT_ATTR(si, text)                                     \
    5888             : static ssize_t text##_show(struct kmem_cache *s, char *buf)     \
    5889             : {                                                               \
    5890             :         return show_stat(s, buf, si);                           \
    5891             : }                                                               \
    5892             : static ssize_t text##_store(struct kmem_cache *s,               \
    5893             :                                 const char *buf, size_t length) \
    5894             : {                                                               \
    5895             :         if (buf[0] != '0')                                      \
    5896             :                 return -EINVAL;                                 \
    5897             :         clear_stat(s, si);                                      \
    5898             :         return length;                                          \
    5899             : }                                                               \
    5900             : SLAB_ATTR(text);                                                \
    5901             : 
    5902             : STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
    5903             : STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
    5904             : STAT_ATTR(FREE_FASTPATH, free_fastpath);
    5905             : STAT_ATTR(FREE_SLOWPATH, free_slowpath);
    5906             : STAT_ATTR(FREE_FROZEN, free_frozen);
    5907             : STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
    5908             : STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
    5909             : STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
    5910             : STAT_ATTR(ALLOC_SLAB, alloc_slab);
    5911             : STAT_ATTR(ALLOC_REFILL, alloc_refill);
    5912             : STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
    5913             : STAT_ATTR(FREE_SLAB, free_slab);
    5914             : STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
    5915             : STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
    5916             : STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
    5917             : STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
    5918             : STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
    5919             : STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
    5920             : STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
    5921             : STAT_ATTR(ORDER_FALLBACK, order_fallback);
    5922             : STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
    5923             : STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
    5924             : STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
    5925             : STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
    5926             : STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
    5927             : STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
    5928             : #endif  /* CONFIG_SLUB_STATS */
    5929             : 
    5930             : #ifdef CONFIG_KFENCE
    5931             : static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf)
    5932             : {
    5933             :         return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE));
    5934             : }
    5935             : 
    5936             : static ssize_t skip_kfence_store(struct kmem_cache *s,
    5937             :                         const char *buf, size_t length)
    5938             : {
    5939             :         int ret = length;
    5940             : 
    5941             :         if (buf[0] == '0')
    5942             :                 s->flags &= ~SLAB_SKIP_KFENCE;
    5943             :         else if (buf[0] == '1')
    5944             :                 s->flags |= SLAB_SKIP_KFENCE;
    5945             :         else
    5946             :                 ret = -EINVAL;
    5947             : 
    5948             :         return ret;
    5949             : }
    5950             : SLAB_ATTR(skip_kfence);
    5951             : #endif
    5952             : 
    5953             : static struct attribute *slab_attrs[] = {
    5954             :         &slab_size_attr.attr,
    5955             :         &object_size_attr.attr,
    5956             :         &objs_per_slab_attr.attr,
    5957             :         &order_attr.attr,
    5958             :         &min_partial_attr.attr,
    5959             :         &cpu_partial_attr.attr,
    5960             :         &objects_partial_attr.attr,
    5961             :         &partial_attr.attr,
    5962             :         &cpu_slabs_attr.attr,
    5963             :         &ctor_attr.attr,
    5964             :         &aliases_attr.attr,
    5965             :         &align_attr.attr,
    5966             :         &hwcache_align_attr.attr,
    5967             :         &reclaim_account_attr.attr,
    5968             :         &destroy_by_rcu_attr.attr,
    5969             :         &shrink_attr.attr,
    5970             :         &slabs_cpu_partial_attr.attr,
    5971             : #ifdef CONFIG_SLUB_DEBUG
    5972             :         &total_objects_attr.attr,
    5973             :         &objects_attr.attr,
    5974             :         &slabs_attr.attr,
    5975             :         &sanity_checks_attr.attr,
    5976             :         &trace_attr.attr,
    5977             :         &red_zone_attr.attr,
    5978             :         &poison_attr.attr,
    5979             :         &store_user_attr.attr,
    5980             :         &validate_attr.attr,
    5981             : #endif
    5982             : #ifdef CONFIG_ZONE_DMA
    5983             :         &cache_dma_attr.attr,
    5984             : #endif
    5985             : #ifdef CONFIG_NUMA
    5986             :         &remote_node_defrag_ratio_attr.attr,
    5987             : #endif
    5988             : #ifdef CONFIG_SLUB_STATS
    5989             :         &alloc_fastpath_attr.attr,
    5990             :         &alloc_slowpath_attr.attr,
    5991             :         &free_fastpath_attr.attr,
    5992             :         &free_slowpath_attr.attr,
    5993             :         &free_frozen_attr.attr,
    5994             :         &free_add_partial_attr.attr,
    5995             :         &free_remove_partial_attr.attr,
    5996             :         &alloc_from_partial_attr.attr,
    5997             :         &alloc_slab_attr.attr,
    5998             :         &alloc_refill_attr.attr,
    5999             :         &alloc_node_mismatch_attr.attr,
    6000             :         &free_slab_attr.attr,
    6001             :         &cpuslab_flush_attr.attr,
    6002             :         &deactivate_full_attr.attr,
    6003             :         &deactivate_empty_attr.attr,
    6004             :         &deactivate_to_head_attr.attr,
    6005             :         &deactivate_to_tail_attr.attr,
    6006             :         &deactivate_remote_frees_attr.attr,
    6007             :         &deactivate_bypass_attr.attr,
    6008             :         &order_fallback_attr.attr,
    6009             :         &cmpxchg_double_fail_attr.attr,
    6010             :         &cmpxchg_double_cpu_fail_attr.attr,
    6011             :         &cpu_partial_alloc_attr.attr,
    6012             :         &cpu_partial_free_attr.attr,
    6013             :         &cpu_partial_node_attr.attr,
    6014             :         &cpu_partial_drain_attr.attr,
    6015             : #endif
    6016             : #ifdef CONFIG_FAILSLAB
    6017             :         &failslab_attr.attr,
    6018             : #endif
    6019             : #ifdef CONFIG_HARDENED_USERCOPY
    6020             :         &usersize_attr.attr,
    6021             : #endif
    6022             : #ifdef CONFIG_KFENCE
    6023             :         &skip_kfence_attr.attr,
    6024             : #endif
    6025             : 
    6026             :         NULL
    6027             : };
    6028             : 
    6029             : static const struct attribute_group slab_attr_group = {
    6030             :         .attrs = slab_attrs,
    6031             : };
    6032             : 
    6033           0 : static ssize_t slab_attr_show(struct kobject *kobj,
    6034             :                                 struct attribute *attr,
    6035             :                                 char *buf)
    6036             : {
    6037             :         struct slab_attribute *attribute;
    6038             :         struct kmem_cache *s;
    6039             : 
    6040           0 :         attribute = to_slab_attr(attr);
    6041           0 :         s = to_slab(kobj);
    6042             : 
    6043           0 :         if (!attribute->show)
    6044             :                 return -EIO;
    6045             : 
    6046           0 :         return attribute->show(s, buf);
    6047             : }
    6048             : 
    6049           0 : static ssize_t slab_attr_store(struct kobject *kobj,
    6050             :                                 struct attribute *attr,
    6051             :                                 const char *buf, size_t len)
    6052             : {
    6053             :         struct slab_attribute *attribute;
    6054             :         struct kmem_cache *s;
    6055             : 
    6056           0 :         attribute = to_slab_attr(attr);
    6057           0 :         s = to_slab(kobj);
    6058             : 
    6059           0 :         if (!attribute->store)
    6060             :                 return -EIO;
    6061             : 
    6062           0 :         return attribute->store(s, buf, len);
    6063             : }
    6064             : 
    6065           0 : static void kmem_cache_release(struct kobject *k)
    6066             : {
    6067           0 :         slab_kmem_cache_release(to_slab(k));
    6068           0 : }
    6069             : 
    6070             : static const struct sysfs_ops slab_sysfs_ops = {
    6071             :         .show = slab_attr_show,
    6072             :         .store = slab_attr_store,
    6073             : };
    6074             : 
    6075             : static const struct kobj_type slab_ktype = {
    6076             :         .sysfs_ops = &slab_sysfs_ops,
    6077             :         .release = kmem_cache_release,
    6078             : };
    6079             : 
    6080             : static struct kset *slab_kset;
    6081             : 
    6082             : static inline struct kset *cache_kset(struct kmem_cache *s)
    6083             : {
    6084          53 :         return slab_kset;
    6085             : }
    6086             : 
    6087             : #define ID_STR_LENGTH 32
    6088             : 
    6089             : /* Create a unique string id for a slab cache:
    6090             :  *
    6091             :  * Format       :[flags-]size
    6092             :  */
    6093          41 : static char *create_unique_id(struct kmem_cache *s)
    6094             : {
    6095          41 :         char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
    6096          41 :         char *p = name;
    6097             : 
    6098          41 :         if (!name)
    6099             :                 return ERR_PTR(-ENOMEM);
    6100             : 
    6101          41 :         *p++ = ':';
    6102             :         /*
    6103             :          * First flags affecting slabcache operations. We will only
    6104             :          * get here for aliasable slabs so we do not need to support
    6105             :          * too many flags. The flags here must cover all flags that
    6106             :          * are matched during merging to guarantee that the id is
    6107             :          * unique.
    6108             :          */
    6109          41 :         if (s->flags & SLAB_CACHE_DMA)
    6110           0 :                 *p++ = 'd';
    6111          41 :         if (s->flags & SLAB_CACHE_DMA32)
    6112           0 :                 *p++ = 'D';
    6113          41 :         if (s->flags & SLAB_RECLAIM_ACCOUNT)
    6114          14 :                 *p++ = 'a';
    6115          41 :         if (s->flags & SLAB_CONSISTENCY_CHECKS)
    6116           0 :                 *p++ = 'F';
    6117             :         if (s->flags & SLAB_ACCOUNT)
    6118             :                 *p++ = 'A';
    6119          41 :         if (p != name + 1)
    6120          14 :                 *p++ = '-';
    6121          41 :         p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size);
    6122             : 
    6123          41 :         if (WARN_ON(p > name + ID_STR_LENGTH - 1)) {
    6124           0 :                 kfree(name);
    6125           0 :                 return ERR_PTR(-EINVAL);
    6126             :         }
    6127             :         kmsan_unpoison_memory(name, p - name);
    6128             :         return name;
    6129             : }
    6130             : 
    6131          53 : static int sysfs_slab_add(struct kmem_cache *s)
    6132             : {
    6133             :         int err;
    6134             :         const char *name;
    6135         106 :         struct kset *kset = cache_kset(s);
    6136          53 :         int unmergeable = slab_unmergeable(s);
    6137             : 
    6138          53 :         if (!unmergeable && disable_higher_order_debug &&
    6139           0 :                         (slub_debug & DEBUG_METADATA_FLAGS))
    6140           0 :                 unmergeable = 1;
    6141             : 
    6142          53 :         if (unmergeable) {
    6143             :                 /*
    6144             :                  * Slabcache can never be merged so we can use the name proper.
    6145             :                  * This is typically the case for debug situations. In that
    6146             :                  * case we can catch duplicate names easily.
    6147             :                  */
    6148          12 :                 sysfs_remove_link(&slab_kset->kobj, s->name);
    6149          12 :                 name = s->name;
    6150             :         } else {
    6151             :                 /*
    6152             :                  * Create a unique name for the slab as a target
    6153             :                  * for the symlinks.
    6154             :                  */
    6155          41 :                 name = create_unique_id(s);
    6156          41 :                 if (IS_ERR(name))
    6157           0 :                         return PTR_ERR(name);
    6158             :         }
    6159             : 
    6160          53 :         s->kobj.kset = kset;
    6161          53 :         err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
    6162          53 :         if (err)
    6163             :                 goto out;
    6164             : 
    6165          53 :         err = sysfs_create_group(&s->kobj, &slab_attr_group);
    6166          53 :         if (err)
    6167             :                 goto out_del_kobj;
    6168             : 
    6169          53 :         if (!unmergeable) {
    6170             :                 /* Setup first alias */
    6171          41 :                 sysfs_slab_alias(s, s->name);
    6172             :         }
    6173             : out:
    6174          53 :         if (!unmergeable)
    6175          41 :                 kfree(name);
    6176             :         return err;
    6177             : out_del_kobj:
    6178           0 :         kobject_del(&s->kobj);
    6179           0 :         goto out;
    6180             : }
    6181             : 
    6182           0 : void sysfs_slab_unlink(struct kmem_cache *s)
    6183             : {
    6184           0 :         if (slab_state >= FULL)
    6185           0 :                 kobject_del(&s->kobj);
    6186           0 : }
    6187             : 
    6188           0 : void sysfs_slab_release(struct kmem_cache *s)
    6189             : {
    6190           0 :         if (slab_state >= FULL)
    6191           0 :                 kobject_put(&s->kobj);
    6192           0 : }
    6193             : 
    6194             : /*
    6195             :  * Need to buffer aliases during bootup until sysfs becomes
    6196             :  * available lest we lose that information.
    6197             :  */
    6198             : struct saved_alias {
    6199             :         struct kmem_cache *s;
    6200             :         const char *name;
    6201             :         struct saved_alias *next;
    6202             : };
    6203             : 
    6204             : static struct saved_alias *alias_list;
    6205             : 
    6206         105 : static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
    6207             : {
    6208             :         struct saved_alias *al;
    6209             : 
    6210         105 :         if (slab_state == FULL) {
    6211             :                 /*
    6212             :                  * If we have a leftover link then remove it.
    6213             :                  */
    6214          73 :                 sysfs_remove_link(&slab_kset->kobj, name);
    6215          73 :                 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
    6216             :         }
    6217             : 
    6218          32 :         al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
    6219          32 :         if (!al)
    6220             :                 return -ENOMEM;
    6221             : 
    6222          32 :         al->s = s;
    6223          32 :         al->name = name;
    6224          32 :         al->next = alias_list;
    6225          32 :         alias_list = al;
    6226          32 :         kmsan_unpoison_memory(al, sizeof(*al));
    6227          32 :         return 0;
    6228             : }
    6229             : 
    6230           1 : static int __init slab_sysfs_init(void)
    6231             : {
    6232             :         struct kmem_cache *s;
    6233             :         int err;
    6234             : 
    6235           1 :         mutex_lock(&slab_mutex);
    6236             : 
    6237           1 :         slab_kset = kset_create_and_add("slab", NULL, kernel_kobj);
    6238           1 :         if (!slab_kset) {
    6239           0 :                 mutex_unlock(&slab_mutex);
    6240           0 :                 pr_err("Cannot register slab subsystem.\n");
    6241           0 :                 return -ENOMEM;
    6242             :         }
    6243             : 
    6244           1 :         slab_state = FULL;
    6245             : 
    6246          54 :         list_for_each_entry(s, &slab_caches, list) {
    6247          53 :                 err = sysfs_slab_add(s);
    6248          53 :                 if (err)
    6249           0 :                         pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
    6250             :                                s->name);
    6251             :         }
    6252             : 
    6253          33 :         while (alias_list) {
    6254          32 :                 struct saved_alias *al = alias_list;
    6255             : 
    6256          32 :                 alias_list = alias_list->next;
    6257          32 :                 err = sysfs_slab_alias(al->s, al->name);
    6258          32 :                 if (err)
    6259           0 :                         pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
    6260             :                                al->name);
    6261          32 :                 kfree(al);
    6262             :         }
    6263             : 
    6264           1 :         mutex_unlock(&slab_mutex);
    6265           1 :         return 0;
    6266             : }
    6267             : late_initcall(slab_sysfs_init);
    6268             : #endif /* SLAB_SUPPORTS_SYSFS */
    6269             : 
    6270             : #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
    6271             : static int slab_debugfs_show(struct seq_file *seq, void *v)
    6272             : {
    6273             :         struct loc_track *t = seq->private;
    6274             :         struct location *l;
    6275             :         unsigned long idx;
    6276             : 
    6277             :         idx = (unsigned long) t->idx;
    6278             :         if (idx < t->count) {
    6279             :                 l = &t->loc[idx];
    6280             : 
    6281             :                 seq_printf(seq, "%7ld ", l->count);
    6282             : 
    6283             :                 if (l->addr)
    6284             :                         seq_printf(seq, "%pS", (void *)l->addr);
    6285             :                 else
    6286             :                         seq_puts(seq, "<not-available>");
    6287             : 
    6288             :                 if (l->waste)
    6289             :                         seq_printf(seq, " waste=%lu/%lu",
    6290             :                                 l->count * l->waste, l->waste);
    6291             : 
    6292             :                 if (l->sum_time != l->min_time) {
    6293             :                         seq_printf(seq, " age=%ld/%llu/%ld",
    6294             :                                 l->min_time, div_u64(l->sum_time, l->count),
    6295             :                                 l->max_time);
    6296             :                 } else
    6297             :                         seq_printf(seq, " age=%ld", l->min_time);
    6298             : 
    6299             :                 if (l->min_pid != l->max_pid)
    6300             :                         seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid);
    6301             :                 else
    6302             :                         seq_printf(seq, " pid=%ld",
    6303             :                                 l->min_pid);
    6304             : 
    6305             :                 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus)))
    6306             :                         seq_printf(seq, " cpus=%*pbl",
    6307             :                                  cpumask_pr_args(to_cpumask(l->cpus)));
    6308             : 
    6309             :                 if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
    6310             :                         seq_printf(seq, " nodes=%*pbl",
    6311             :                                  nodemask_pr_args(&l->nodes));
    6312             : 
    6313             : #ifdef CONFIG_STACKDEPOT
    6314             :                 {
    6315             :                         depot_stack_handle_t handle;
    6316             :                         unsigned long *entries;
    6317             :                         unsigned int nr_entries, j;
    6318             : 
    6319             :                         handle = READ_ONCE(l->handle);
    6320             :                         if (handle) {
    6321             :                                 nr_entries = stack_depot_fetch(handle, &entries);
    6322             :                                 seq_puts(seq, "\n");
    6323             :                                 for (j = 0; j < nr_entries; j++)
    6324             :                                         seq_printf(seq, "        %pS\n", (void *)entries[j]);
    6325             :                         }
    6326             :                 }
    6327             : #endif
    6328             :                 seq_puts(seq, "\n");
    6329             :         }
    6330             : 
    6331             :         if (!idx && !t->count)
    6332             :                 seq_puts(seq, "No data\n");
    6333             : 
    6334             :         return 0;
    6335             : }
    6336             : 
    6337             : static void slab_debugfs_stop(struct seq_file *seq, void *v)
    6338             : {
    6339             : }
    6340             : 
    6341             : static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
    6342             : {
    6343             :         struct loc_track *t = seq->private;
    6344             : 
    6345             :         t->idx = ++(*ppos);
    6346             :         if (*ppos <= t->count)
    6347             :                 return ppos;
    6348             : 
    6349             :         return NULL;
    6350             : }
    6351             : 
    6352             : static int cmp_loc_by_count(const void *a, const void *b, const void *data)
    6353             : {
    6354             :         struct location *loc1 = (struct location *)a;
    6355             :         struct location *loc2 = (struct location *)b;
    6356             : 
    6357             :         if (loc1->count > loc2->count)
    6358             :                 return -1;
    6359             :         else
    6360             :                 return 1;
    6361             : }
    6362             : 
    6363             : static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
    6364             : {
    6365             :         struct loc_track *t = seq->private;
    6366             : 
    6367             :         t->idx = *ppos;
    6368             :         return ppos;
    6369             : }
    6370             : 
    6371             : static const struct seq_operations slab_debugfs_sops = {
    6372             :         .start  = slab_debugfs_start,
    6373             :         .next   = slab_debugfs_next,
    6374             :         .stop   = slab_debugfs_stop,
    6375             :         .show   = slab_debugfs_show,
    6376             : };
    6377             : 
    6378             : static int slab_debug_trace_open(struct inode *inode, struct file *filep)
    6379             : {
    6380             : 
    6381             :         struct kmem_cache_node *n;
    6382             :         enum track_item alloc;
    6383             :         int node;
    6384             :         struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
    6385             :                                                 sizeof(struct loc_track));
    6386             :         struct kmem_cache *s = file_inode(filep)->i_private;
    6387             :         unsigned long *obj_map;
    6388             : 
    6389             :         if (!t)
    6390             :                 return -ENOMEM;
    6391             : 
    6392             :         obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
    6393             :         if (!obj_map) {
    6394             :                 seq_release_private(inode, filep);
    6395             :                 return -ENOMEM;
    6396             :         }
    6397             : 
    6398             :         if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
    6399             :                 alloc = TRACK_ALLOC;
    6400             :         else
    6401             :                 alloc = TRACK_FREE;
    6402             : 
    6403             :         if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
    6404             :                 bitmap_free(obj_map);
    6405             :                 seq_release_private(inode, filep);
    6406             :                 return -ENOMEM;
    6407             :         }
    6408             : 
    6409             :         for_each_kmem_cache_node(s, node, n) {
    6410             :                 unsigned long flags;
    6411             :                 struct slab *slab;
    6412             : 
    6413             :                 if (!node_nr_slabs(n))
    6414             :                         continue;
    6415             : 
    6416             :                 spin_lock_irqsave(&n->list_lock, flags);
    6417             :                 list_for_each_entry(slab, &n->partial, slab_list)
    6418             :                         process_slab(t, s, slab, alloc, obj_map);
    6419             :                 list_for_each_entry(slab, &n->full, slab_list)
    6420             :                         process_slab(t, s, slab, alloc, obj_map);
    6421             :                 spin_unlock_irqrestore(&n->list_lock, flags);
    6422             :         }
    6423             : 
    6424             :         /* Sort locations by count */
    6425             :         sort_r(t->loc, t->count, sizeof(struct location),
    6426             :                 cmp_loc_by_count, NULL, NULL);
    6427             : 
    6428             :         bitmap_free(obj_map);
    6429             :         return 0;
    6430             : }
    6431             : 
    6432             : static int slab_debug_trace_release(struct inode *inode, struct file *file)
    6433             : {
    6434             :         struct seq_file *seq = file->private_data;
    6435             :         struct loc_track *t = seq->private;
    6436             : 
    6437             :         free_loc_track(t);
    6438             :         return seq_release_private(inode, file);
    6439             : }
    6440             : 
    6441             : static const struct file_operations slab_debugfs_fops = {
    6442             :         .open    = slab_debug_trace_open,
    6443             :         .read    = seq_read,
    6444             :         .llseek  = seq_lseek,
    6445             :         .release = slab_debug_trace_release,
    6446             : };
    6447             : 
    6448             : static void debugfs_slab_add(struct kmem_cache *s)
    6449             : {
    6450             :         struct dentry *slab_cache_dir;
    6451             : 
    6452             :         if (unlikely(!slab_debugfs_root))
    6453             :                 return;
    6454             : 
    6455             :         slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root);
    6456             : 
    6457             :         debugfs_create_file("alloc_traces", 0400,
    6458             :                 slab_cache_dir, s, &slab_debugfs_fops);
    6459             : 
    6460             :         debugfs_create_file("free_traces", 0400,
    6461             :                 slab_cache_dir, s, &slab_debugfs_fops);
    6462             : }
    6463             : 
    6464             : void debugfs_slab_release(struct kmem_cache *s)
    6465             : {
    6466             :         debugfs_lookup_and_remove(s->name, slab_debugfs_root);
    6467             : }
    6468             : 
    6469             : static int __init slab_debugfs_init(void)
    6470             : {
    6471             :         struct kmem_cache *s;
    6472             : 
    6473             :         slab_debugfs_root = debugfs_create_dir("slab", NULL);
    6474             : 
    6475             :         list_for_each_entry(s, &slab_caches, list)
    6476             :                 if (s->flags & SLAB_STORE_USER)
    6477             :                         debugfs_slab_add(s);
    6478             : 
    6479             :         return 0;
    6480             : 
    6481             : }
    6482             : __initcall(slab_debugfs_init);
    6483             : #endif
    6484             : /*
    6485             :  * The /proc/slabinfo ABI
    6486             :  */
    6487             : #ifdef CONFIG_SLUB_DEBUG
    6488           0 : void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
    6489             : {
    6490           0 :         unsigned long nr_slabs = 0;
    6491           0 :         unsigned long nr_objs = 0;
    6492           0 :         unsigned long nr_free = 0;
    6493             :         int node;
    6494             :         struct kmem_cache_node *n;
    6495             : 
    6496           0 :         for_each_kmem_cache_node(s, node, n) {
    6497           0 :                 nr_slabs += node_nr_slabs(n);
    6498           0 :                 nr_objs += node_nr_objs(n);
    6499           0 :                 nr_free += count_partial(n, count_free);
    6500             :         }
    6501             : 
    6502           0 :         sinfo->active_objs = nr_objs - nr_free;
    6503           0 :         sinfo->num_objs = nr_objs;
    6504           0 :         sinfo->active_slabs = nr_slabs;
    6505           0 :         sinfo->num_slabs = nr_slabs;
    6506           0 :         sinfo->objects_per_slab = oo_objects(s->oo);
    6507           0 :         sinfo->cache_order = oo_order(s->oo);
    6508           0 : }
    6509             : 
    6510           0 : void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
    6511             : {
    6512           0 : }
    6513             : 
    6514           0 : ssize_t slabinfo_write(struct file *file, const char __user *buffer,
    6515             :                        size_t count, loff_t *ppos)
    6516             : {
    6517           0 :         return -EIO;
    6518             : }
    6519             : #endif /* CONFIG_SLUB_DEBUG */

Generated by: LCOV version 1.14