Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Slab allocator functions that are independent of the allocator strategy
4 : *
5 : * (C) 2012 Christoph Lameter <cl@linux.com>
6 : */
7 : #include <linux/slab.h>
8 :
9 : #include <linux/mm.h>
10 : #include <linux/poison.h>
11 : #include <linux/interrupt.h>
12 : #include <linux/memory.h>
13 : #include <linux/cache.h>
14 : #include <linux/compiler.h>
15 : #include <linux/kfence.h>
16 : #include <linux/module.h>
17 : #include <linux/cpu.h>
18 : #include <linux/uaccess.h>
19 : #include <linux/seq_file.h>
20 : #include <linux/proc_fs.h>
21 : #include <linux/debugfs.h>
22 : #include <linux/kasan.h>
23 : #include <asm/cacheflush.h>
24 : #include <asm/tlbflush.h>
25 : #include <asm/page.h>
26 : #include <linux/memcontrol.h>
27 : #include <linux/stackdepot.h>
28 :
29 : #include "internal.h"
30 : #include "slab.h"
31 :
32 : #define CREATE_TRACE_POINTS
33 : #include <trace/events/kmem.h>
34 :
35 : enum slab_state slab_state;
36 : LIST_HEAD(slab_caches);
37 : DEFINE_MUTEX(slab_mutex);
38 : struct kmem_cache *kmem_cache;
39 :
40 : static LIST_HEAD(slab_caches_to_rcu_destroy);
41 : static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
42 : static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
43 : slab_caches_to_rcu_destroy_workfn);
44 :
45 : /*
46 : * Set of flags that will prevent slab merging
47 : */
48 : #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
49 : SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
50 : SLAB_FAILSLAB | kasan_never_merge())
51 :
52 : #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
53 : SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
54 :
55 : /*
56 : * Merge control. If this is set then no merging of slab caches will occur.
57 : */
58 : static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
59 :
60 0 : static int __init setup_slab_nomerge(char *str)
61 : {
62 0 : slab_nomerge = true;
63 0 : return 1;
64 : }
65 :
66 0 : static int __init setup_slab_merge(char *str)
67 : {
68 0 : slab_nomerge = false;
69 0 : return 1;
70 : }
71 :
72 : #ifdef CONFIG_SLUB
73 : __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
74 : __setup_param("slub_merge", slub_merge, setup_slab_merge, 0);
75 : #endif
76 :
77 : __setup("slab_nomerge", setup_slab_nomerge);
78 : __setup("slab_merge", setup_slab_merge);
79 :
80 : /*
81 : * Determine the size of a slab object
82 : */
83 0 : unsigned int kmem_cache_size(struct kmem_cache *s)
84 : {
85 0 : return s->object_size;
86 : }
87 : EXPORT_SYMBOL(kmem_cache_size);
88 :
89 : #ifdef CONFIG_DEBUG_VM
90 : static int kmem_cache_sanity_check(const char *name, unsigned int size)
91 : {
92 : if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
93 : pr_err("kmem_cache_create(%s) integrity check failed\n", name);
94 : return -EINVAL;
95 : }
96 :
97 : WARN_ON(strchr(name, ' ')); /* It confuses parsers */
98 : return 0;
99 : }
100 : #else
101 : static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
102 : {
103 : return 0;
104 : }
105 : #endif
106 :
107 : /*
108 : * Figure out what the alignment of the objects will be given a set of
109 : * flags, a user specified alignment and the size of the objects.
110 : */
111 : static unsigned int calculate_alignment(slab_flags_t flags,
112 : unsigned int align, unsigned int size)
113 : {
114 : /*
115 : * If the user wants hardware cache aligned objects then follow that
116 : * suggestion if the object is sufficiently large.
117 : *
118 : * The hardware cache alignment cannot override the specified
119 : * alignment though. If that is greater then use it.
120 : */
121 103 : if (flags & SLAB_HWCACHE_ALIGN) {
122 : unsigned int ralign;
123 :
124 30 : ralign = cache_line_size();
125 30 : while (size <= ralign / 2)
126 : ralign /= 2;
127 30 : align = max(align, ralign);
128 : }
129 :
130 103 : align = max(align, arch_slab_minalign());
131 :
132 103 : return ALIGN(align, sizeof(void *));
133 : }
134 :
135 : /*
136 : * Find a mergeable slab cache
137 : */
138 53 : int slab_unmergeable(struct kmem_cache *s)
139 : {
140 1110 : if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
141 : return 1;
142 :
143 1072 : if (s->ctor)
144 : return 1;
145 :
146 : #ifdef CONFIG_HARDENED_USERCOPY
147 : if (s->usersize)
148 : return 1;
149 : #endif
150 :
151 : /*
152 : * We may have set a slab to be unmergeable during bootstrap.
153 : */
154 1015 : if (s->refcount < 0)
155 : return 1;
156 :
157 41 : return 0;
158 : }
159 :
160 57 : struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
161 : slab_flags_t flags, const char *name, void (*ctor)(void *))
162 : {
163 : struct kmem_cache *s;
164 :
165 57 : if (slab_nomerge)
166 : return NULL;
167 :
168 57 : if (ctor)
169 : return NULL;
170 :
171 50 : size = ALIGN(size, sizeof(void *));
172 50 : align = calculate_alignment(flags, align, size);
173 50 : size = ALIGN(size, align);
174 50 : flags = kmem_cache_flags(size, flags, name);
175 :
176 50 : if (flags & SLAB_NEVER_MERGE)
177 : return NULL;
178 :
179 1072 : list_for_each_entry_reverse(s, &slab_caches, list) {
180 1057 : if (slab_unmergeable(s))
181 179 : continue;
182 :
183 878 : if (size > s->size)
184 450 : continue;
185 :
186 428 : if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
187 171 : continue;
188 : /*
189 : * Check if alignment is compatible.
190 : * Courtesy of Adrian Drzewiecki
191 : */
192 257 : if ((s->size & ~(align - 1)) != s->size)
193 2 : continue;
194 :
195 255 : if (s->size - size >= sizeof(void *))
196 223 : continue;
197 :
198 : if (IS_ENABLED(CONFIG_SLAB) && align &&
199 : (align > s->align || s->align % align))
200 : continue;
201 :
202 : return s;
203 : }
204 : return NULL;
205 : }
206 :
207 25 : static struct kmem_cache *create_cache(const char *name,
208 : unsigned int object_size, unsigned int align,
209 : slab_flags_t flags, unsigned int useroffset,
210 : unsigned int usersize, void (*ctor)(void *),
211 : struct kmem_cache *root_cache)
212 : {
213 : struct kmem_cache *s;
214 : int err;
215 :
216 25 : if (WARN_ON(useroffset + usersize > object_size))
217 : useroffset = usersize = 0;
218 :
219 25 : err = -ENOMEM;
220 50 : s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
221 25 : if (!s)
222 : goto out;
223 :
224 25 : s->name = name;
225 25 : s->size = s->object_size = object_size;
226 25 : s->align = align;
227 25 : s->ctor = ctor;
228 : #ifdef CONFIG_HARDENED_USERCOPY
229 : s->useroffset = useroffset;
230 : s->usersize = usersize;
231 : #endif
232 :
233 25 : err = __kmem_cache_create(s, flags);
234 25 : if (err)
235 : goto out_free_cache;
236 :
237 25 : s->refcount = 1;
238 25 : list_add(&s->list, &slab_caches);
239 : out:
240 25 : if (err)
241 0 : return ERR_PTR(err);
242 : return s;
243 :
244 : out_free_cache:
245 0 : kmem_cache_free(kmem_cache, s);
246 : goto out;
247 : }
248 :
249 : /**
250 : * kmem_cache_create_usercopy - Create a cache with a region suitable
251 : * for copying to userspace
252 : * @name: A string which is used in /proc/slabinfo to identify this cache.
253 : * @size: The size of objects to be created in this cache.
254 : * @align: The required alignment for the objects.
255 : * @flags: SLAB flags
256 : * @useroffset: Usercopy region offset
257 : * @usersize: Usercopy region size
258 : * @ctor: A constructor for the objects.
259 : *
260 : * Cannot be called within a interrupt, but can be interrupted.
261 : * The @ctor is run when new pages are allocated by the cache.
262 : *
263 : * The flags are
264 : *
265 : * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
266 : * to catch references to uninitialised memory.
267 : *
268 : * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
269 : * for buffer overruns.
270 : *
271 : * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
272 : * cacheline. This can be beneficial if you're counting cycles as closely
273 : * as davem.
274 : *
275 : * Return: a pointer to the cache on success, NULL on failure.
276 : */
277 : struct kmem_cache *
278 57 : kmem_cache_create_usercopy(const char *name,
279 : unsigned int size, unsigned int align,
280 : slab_flags_t flags,
281 : unsigned int useroffset, unsigned int usersize,
282 : void (*ctor)(void *))
283 : {
284 57 : struct kmem_cache *s = NULL;
285 : const char *cache_name;
286 : int err;
287 :
288 : #ifdef CONFIG_SLUB_DEBUG
289 : /*
290 : * If no slub_debug was enabled globally, the static key is not yet
291 : * enabled by setup_slub_debug(). Enable it if the cache is being
292 : * created with any of the debugging flags passed explicitly.
293 : * It's also possible that this is the first cache created with
294 : * SLAB_STORE_USER and we should init stack_depot for it.
295 : */
296 57 : if (flags & SLAB_DEBUG_FLAGS)
297 0 : static_branch_enable(&slub_debug_enabled);
298 57 : if (flags & SLAB_STORE_USER)
299 0 : stack_depot_init();
300 : #endif
301 :
302 57 : mutex_lock(&slab_mutex);
303 :
304 57 : err = kmem_cache_sanity_check(name, size);
305 : if (err) {
306 : goto out_unlock;
307 : }
308 :
309 : /* Refuse requests with allocator specific flags */
310 57 : if (flags & ~SLAB_FLAGS_PERMITTED) {
311 : err = -EINVAL;
312 : goto out_unlock;
313 : }
314 :
315 : /*
316 : * Some allocators will constraint the set of valid flags to a subset
317 : * of all flags. We expect them to define CACHE_CREATE_MASK in this
318 : * case, and we'll just provide them with a sanitized version of the
319 : * passed flags.
320 : */
321 57 : flags &= CACHE_CREATE_MASK;
322 :
323 : /* Fail closed on bad usersize of useroffset values. */
324 : if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY) ||
325 : WARN_ON(!usersize && useroffset) ||
326 : WARN_ON(size < usersize || size - usersize < useroffset))
327 57 : usersize = useroffset = 0;
328 :
329 : if (!usersize)
330 57 : s = __kmem_cache_alias(name, size, align, flags, ctor);
331 57 : if (s)
332 : goto out_unlock;
333 :
334 25 : cache_name = kstrdup_const(name, GFP_KERNEL);
335 25 : if (!cache_name) {
336 : err = -ENOMEM;
337 : goto out_unlock;
338 : }
339 :
340 25 : s = create_cache(cache_name, size,
341 : calculate_alignment(flags, align, size),
342 : flags, useroffset, usersize, ctor, NULL);
343 25 : if (IS_ERR(s)) {
344 0 : err = PTR_ERR(s);
345 0 : kfree_const(cache_name);
346 : }
347 :
348 : out_unlock:
349 57 : mutex_unlock(&slab_mutex);
350 :
351 57 : if (err) {
352 0 : if (flags & SLAB_PANIC)
353 0 : panic("%s: Failed to create slab '%s'. Error %d\n",
354 : __func__, name, err);
355 : else {
356 0 : pr_warn("%s(%s) failed with error %d\n",
357 : __func__, name, err);
358 0 : dump_stack();
359 : }
360 0 : return NULL;
361 : }
362 : return s;
363 : }
364 : EXPORT_SYMBOL(kmem_cache_create_usercopy);
365 :
366 : /**
367 : * kmem_cache_create - Create a cache.
368 : * @name: A string which is used in /proc/slabinfo to identify this cache.
369 : * @size: The size of objects to be created in this cache.
370 : * @align: The required alignment for the objects.
371 : * @flags: SLAB flags
372 : * @ctor: A constructor for the objects.
373 : *
374 : * Cannot be called within a interrupt, but can be interrupted.
375 : * The @ctor is run when new pages are allocated by the cache.
376 : *
377 : * The flags are
378 : *
379 : * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
380 : * to catch references to uninitialised memory.
381 : *
382 : * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
383 : * for buffer overruns.
384 : *
385 : * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
386 : * cacheline. This can be beneficial if you're counting cycles as closely
387 : * as davem.
388 : *
389 : * Return: a pointer to the cache on success, NULL on failure.
390 : */
391 : struct kmem_cache *
392 51 : kmem_cache_create(const char *name, unsigned int size, unsigned int align,
393 : slab_flags_t flags, void (*ctor)(void *))
394 : {
395 51 : return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
396 : ctor);
397 : }
398 : EXPORT_SYMBOL(kmem_cache_create);
399 :
400 : #ifdef SLAB_SUPPORTS_SYSFS
401 : /*
402 : * For a given kmem_cache, kmem_cache_destroy() should only be called
403 : * once or there will be a use-after-free problem. The actual deletion
404 : * and release of the kobject does not need slab_mutex or cpu_hotplug_lock
405 : * protection. So they are now done without holding those locks.
406 : *
407 : * Note that there will be a slight delay in the deletion of sysfs files
408 : * if kmem_cache_release() is called indrectly from a work function.
409 : */
410 : static void kmem_cache_release(struct kmem_cache *s)
411 : {
412 0 : sysfs_slab_unlink(s);
413 0 : sysfs_slab_release(s);
414 : }
415 : #else
416 : static void kmem_cache_release(struct kmem_cache *s)
417 : {
418 : slab_kmem_cache_release(s);
419 : }
420 : #endif
421 :
422 0 : static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
423 : {
424 0 : LIST_HEAD(to_destroy);
425 : struct kmem_cache *s, *s2;
426 :
427 : /*
428 : * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
429 : * @slab_caches_to_rcu_destroy list. The slab pages are freed
430 : * through RCU and the associated kmem_cache are dereferenced
431 : * while freeing the pages, so the kmem_caches should be freed only
432 : * after the pending RCU operations are finished. As rcu_barrier()
433 : * is a pretty slow operation, we batch all pending destructions
434 : * asynchronously.
435 : */
436 0 : mutex_lock(&slab_mutex);
437 0 : list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
438 0 : mutex_unlock(&slab_mutex);
439 :
440 0 : if (list_empty(&to_destroy))
441 0 : return;
442 :
443 0 : rcu_barrier();
444 :
445 0 : list_for_each_entry_safe(s, s2, &to_destroy, list) {
446 0 : debugfs_slab_release(s);
447 0 : kfence_shutdown_cache(s);
448 0 : kmem_cache_release(s);
449 : }
450 : }
451 :
452 0 : static int shutdown_cache(struct kmem_cache *s)
453 : {
454 : /* free asan quarantined objects */
455 0 : kasan_cache_shutdown(s);
456 :
457 0 : if (__kmem_cache_shutdown(s) != 0)
458 : return -EBUSY;
459 :
460 0 : list_del(&s->list);
461 :
462 0 : if (s->flags & SLAB_TYPESAFE_BY_RCU) {
463 0 : list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
464 : schedule_work(&slab_caches_to_rcu_destroy_work);
465 : } else {
466 : kfence_shutdown_cache(s);
467 : debugfs_slab_release(s);
468 : }
469 :
470 : return 0;
471 : }
472 :
473 0 : void slab_kmem_cache_release(struct kmem_cache *s)
474 : {
475 0 : __kmem_cache_release(s);
476 0 : kfree_const(s->name);
477 0 : kmem_cache_free(kmem_cache, s);
478 0 : }
479 :
480 0 : void kmem_cache_destroy(struct kmem_cache *s)
481 : {
482 : int refcnt;
483 : bool rcu_set;
484 :
485 0 : if (unlikely(!s) || !kasan_check_byte(s))
486 : return;
487 :
488 : cpus_read_lock();
489 0 : mutex_lock(&slab_mutex);
490 :
491 0 : rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
492 :
493 0 : refcnt = --s->refcount;
494 0 : if (refcnt)
495 : goto out_unlock;
496 :
497 0 : WARN(shutdown_cache(s),
498 : "%s %s: Slab cache still has objects when called from %pS",
499 : __func__, s->name, (void *)_RET_IP_);
500 : out_unlock:
501 0 : mutex_unlock(&slab_mutex);
502 : cpus_read_unlock();
503 0 : if (!refcnt && !rcu_set)
504 : kmem_cache_release(s);
505 : }
506 : EXPORT_SYMBOL(kmem_cache_destroy);
507 :
508 : /**
509 : * kmem_cache_shrink - Shrink a cache.
510 : * @cachep: The cache to shrink.
511 : *
512 : * Releases as many slabs as possible for a cache.
513 : * To help debugging, a zero exit status indicates all slabs were released.
514 : *
515 : * Return: %0 if all slabs were released, non-zero otherwise
516 : */
517 0 : int kmem_cache_shrink(struct kmem_cache *cachep)
518 : {
519 0 : kasan_cache_shrink(cachep);
520 :
521 0 : return __kmem_cache_shrink(cachep);
522 : }
523 : EXPORT_SYMBOL(kmem_cache_shrink);
524 :
525 22 : bool slab_is_available(void)
526 : {
527 22 : return slab_state >= UP;
528 : }
529 :
530 : #ifdef CONFIG_PRINTK
531 : /**
532 : * kmem_valid_obj - does the pointer reference a valid slab object?
533 : * @object: pointer to query.
534 : *
535 : * Return: %true if the pointer is to a not-yet-freed object from
536 : * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
537 : * is to an already-freed object, and %false otherwise.
538 : */
539 0 : bool kmem_valid_obj(void *object)
540 : {
541 : struct folio *folio;
542 :
543 : /* Some arches consider ZERO_SIZE_PTR to be a valid address. */
544 0 : if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
545 : return false;
546 0 : folio = virt_to_folio(object);
547 0 : return folio_test_slab(folio);
548 : }
549 : EXPORT_SYMBOL_GPL(kmem_valid_obj);
550 :
551 : static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
552 : {
553 0 : if (__kfence_obj_info(kpp, object, slab))
554 : return;
555 0 : __kmem_obj_info(kpp, object, slab);
556 : }
557 :
558 : /**
559 : * kmem_dump_obj - Print available slab provenance information
560 : * @object: slab object for which to find provenance information.
561 : *
562 : * This function uses pr_cont(), so that the caller is expected to have
563 : * printed out whatever preamble is appropriate. The provenance information
564 : * depends on the type of object and on how much debugging is enabled.
565 : * For a slab-cache object, the fact that it is a slab object is printed,
566 : * and, if available, the slab name, return address, and stack trace from
567 : * the allocation and last free path of that object.
568 : *
569 : * This function will splat if passed a pointer to a non-slab object.
570 : * If you are not sure what type of object you have, you should instead
571 : * use mem_dump_obj().
572 : */
573 0 : void kmem_dump_obj(void *object)
574 : {
575 0 : char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc";
576 : int i;
577 : struct slab *slab;
578 : unsigned long ptroffset;
579 0 : struct kmem_obj_info kp = { };
580 :
581 0 : if (WARN_ON_ONCE(!virt_addr_valid(object)))
582 0 : return;
583 0 : slab = virt_to_slab(object);
584 0 : if (WARN_ON_ONCE(!slab)) {
585 0 : pr_cont(" non-slab memory.\n");
586 0 : return;
587 : }
588 0 : kmem_obj_info(&kp, object, slab);
589 0 : if (kp.kp_slab_cache)
590 0 : pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
591 : else
592 0 : pr_cont(" slab%s", cp);
593 0 : if (is_kfence_address(object))
594 : pr_cont(" (kfence)");
595 0 : if (kp.kp_objp)
596 0 : pr_cont(" start %px", kp.kp_objp);
597 0 : if (kp.kp_data_offset)
598 0 : pr_cont(" data offset %lu", kp.kp_data_offset);
599 0 : if (kp.kp_objp) {
600 0 : ptroffset = ((char *)object - (char *)kp.kp_objp) - kp.kp_data_offset;
601 0 : pr_cont(" pointer offset %lu", ptroffset);
602 : }
603 0 : if (kp.kp_slab_cache && kp.kp_slab_cache->object_size)
604 0 : pr_cont(" size %u", kp.kp_slab_cache->object_size);
605 0 : if (kp.kp_ret)
606 0 : pr_cont(" allocated at %pS\n", kp.kp_ret);
607 : else
608 0 : pr_cont("\n");
609 0 : for (i = 0; i < ARRAY_SIZE(kp.kp_stack); i++) {
610 0 : if (!kp.kp_stack[i])
611 : break;
612 0 : pr_info(" %pS\n", kp.kp_stack[i]);
613 : }
614 :
615 0 : if (kp.kp_free_stack[0])
616 0 : pr_cont(" Free path:\n");
617 :
618 0 : for (i = 0; i < ARRAY_SIZE(kp.kp_free_stack); i++) {
619 0 : if (!kp.kp_free_stack[i])
620 : break;
621 0 : pr_info(" %pS\n", kp.kp_free_stack[i]);
622 : }
623 :
624 : }
625 : EXPORT_SYMBOL_GPL(kmem_dump_obj);
626 : #endif
627 :
628 : /* Create a cache during boot when no slab services are available yet */
629 28 : void __init create_boot_cache(struct kmem_cache *s, const char *name,
630 : unsigned int size, slab_flags_t flags,
631 : unsigned int useroffset, unsigned int usersize)
632 : {
633 : int err;
634 28 : unsigned int align = ARCH_KMALLOC_MINALIGN;
635 :
636 28 : s->name = name;
637 28 : s->size = s->object_size = size;
638 :
639 : /*
640 : * For power of two sizes, guarantee natural alignment for kmalloc
641 : * caches, regardless of SL*B debugging options.
642 : */
643 56 : if (is_power_of_2(size))
644 22 : align = max(align, size);
645 28 : s->align = calculate_alignment(flags, align, size);
646 :
647 : #ifdef CONFIG_HARDENED_USERCOPY
648 : s->useroffset = useroffset;
649 : s->usersize = usersize;
650 : #endif
651 :
652 28 : err = __kmem_cache_create(s, flags);
653 :
654 28 : if (err)
655 0 : panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
656 : name, size, err);
657 :
658 28 : s->refcount = -1; /* Exempt from merging for now */
659 28 : }
660 :
661 26 : struct kmem_cache *__init create_kmalloc_cache(const char *name,
662 : unsigned int size, slab_flags_t flags,
663 : unsigned int useroffset, unsigned int usersize)
664 : {
665 52 : struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
666 :
667 26 : if (!s)
668 0 : panic("Out of memory when creating slab %s\n", name);
669 :
670 26 : create_boot_cache(s, name, size, flags | SLAB_KMALLOC, useroffset,
671 : usersize);
672 52 : list_add(&s->list, &slab_caches);
673 26 : s->refcount = 1;
674 26 : return s;
675 : }
676 :
677 : struct kmem_cache *
678 : kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
679 : { /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ };
680 : EXPORT_SYMBOL(kmalloc_caches);
681 :
682 : /*
683 : * Conversion table for small slabs sizes / 8 to the index in the
684 : * kmalloc array. This is necessary for slabs < 192 since we have non power
685 : * of two cache sizes there. The size of larger slabs can be determined using
686 : * fls.
687 : */
688 : static u8 size_index[24] __ro_after_init = {
689 : 3, /* 8 */
690 : 4, /* 16 */
691 : 5, /* 24 */
692 : 5, /* 32 */
693 : 6, /* 40 */
694 : 6, /* 48 */
695 : 6, /* 56 */
696 : 6, /* 64 */
697 : 1, /* 72 */
698 : 1, /* 80 */
699 : 1, /* 88 */
700 : 1, /* 96 */
701 : 7, /* 104 */
702 : 7, /* 112 */
703 : 7, /* 120 */
704 : 7, /* 128 */
705 : 2, /* 136 */
706 : 2, /* 144 */
707 : 2, /* 152 */
708 : 2, /* 160 */
709 : 2, /* 168 */
710 : 2, /* 176 */
711 : 2, /* 184 */
712 : 2 /* 192 */
713 : };
714 :
715 : static inline unsigned int size_index_elem(unsigned int bytes)
716 : {
717 4607 : return (bytes - 1) / 8;
718 : }
719 :
720 : /*
721 : * Find the kmem_cache structure that serves a given size of
722 : * allocation
723 : */
724 5326 : struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
725 : {
726 : unsigned int index;
727 :
728 5326 : if (size <= 192) {
729 4611 : if (!size)
730 : return ZERO_SIZE_PTR;
731 :
732 9214 : index = size_index[size_index_elem(size)];
733 : } else {
734 715 : if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
735 : return NULL;
736 1430 : index = fls(size - 1);
737 : }
738 :
739 5322 : return kmalloc_caches[kmalloc_type(flags)][index];
740 : }
741 :
742 25 : size_t kmalloc_size_roundup(size_t size)
743 : {
744 : struct kmem_cache *c;
745 :
746 : /* Short-circuit the 0 size case. */
747 25 : if (unlikely(size == 0))
748 : return 0;
749 : /* Short-circuit saturated "too-large" case. */
750 25 : if (unlikely(size == SIZE_MAX))
751 : return SIZE_MAX;
752 : /* Above the smaller buckets, size is a multiple of page size. */
753 25 : if (size > KMALLOC_MAX_CACHE_SIZE)
754 0 : return PAGE_SIZE << get_order(size);
755 :
756 : /* The flags don't matter since size_index is common to all. */
757 25 : c = kmalloc_slab(size, GFP_KERNEL);
758 25 : return c ? c->object_size : 0;
759 : }
760 : EXPORT_SYMBOL(kmalloc_size_roundup);
761 :
762 : #ifdef CONFIG_ZONE_DMA
763 : #define KMALLOC_DMA_NAME(sz) .name[KMALLOC_DMA] = "dma-kmalloc-" #sz,
764 : #else
765 : #define KMALLOC_DMA_NAME(sz)
766 : #endif
767 :
768 : #ifdef CONFIG_MEMCG_KMEM
769 : #define KMALLOC_CGROUP_NAME(sz) .name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz,
770 : #else
771 : #define KMALLOC_CGROUP_NAME(sz)
772 : #endif
773 :
774 : #ifndef CONFIG_SLUB_TINY
775 : #define KMALLOC_RCL_NAME(sz) .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #sz,
776 : #else
777 : #define KMALLOC_RCL_NAME(sz)
778 : #endif
779 :
780 : #define INIT_KMALLOC_INFO(__size, __short_size) \
781 : { \
782 : .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
783 : KMALLOC_RCL_NAME(__short_size) \
784 : KMALLOC_CGROUP_NAME(__short_size) \
785 : KMALLOC_DMA_NAME(__short_size) \
786 : .size = __size, \
787 : }
788 :
789 : /*
790 : * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
791 : * kmalloc_index() supports up to 2^21=2MB, so the final entry of the table is
792 : * kmalloc-2M.
793 : */
794 : const struct kmalloc_info_struct kmalloc_info[] __initconst = {
795 : INIT_KMALLOC_INFO(0, 0),
796 : INIT_KMALLOC_INFO(96, 96),
797 : INIT_KMALLOC_INFO(192, 192),
798 : INIT_KMALLOC_INFO(8, 8),
799 : INIT_KMALLOC_INFO(16, 16),
800 : INIT_KMALLOC_INFO(32, 32),
801 : INIT_KMALLOC_INFO(64, 64),
802 : INIT_KMALLOC_INFO(128, 128),
803 : INIT_KMALLOC_INFO(256, 256),
804 : INIT_KMALLOC_INFO(512, 512),
805 : INIT_KMALLOC_INFO(1024, 1k),
806 : INIT_KMALLOC_INFO(2048, 2k),
807 : INIT_KMALLOC_INFO(4096, 4k),
808 : INIT_KMALLOC_INFO(8192, 8k),
809 : INIT_KMALLOC_INFO(16384, 16k),
810 : INIT_KMALLOC_INFO(32768, 32k),
811 : INIT_KMALLOC_INFO(65536, 64k),
812 : INIT_KMALLOC_INFO(131072, 128k),
813 : INIT_KMALLOC_INFO(262144, 256k),
814 : INIT_KMALLOC_INFO(524288, 512k),
815 : INIT_KMALLOC_INFO(1048576, 1M),
816 : INIT_KMALLOC_INFO(2097152, 2M)
817 : };
818 :
819 : /*
820 : * Patch up the size_index table if we have strange large alignment
821 : * requirements for the kmalloc array. This is only the case for
822 : * MIPS it seems. The standard arches will not generate any code here.
823 : *
824 : * Largest permitted alignment is 256 bytes due to the way we
825 : * handle the index determination for the smaller caches.
826 : *
827 : * Make sure that nothing crazy happens if someone starts tinkering
828 : * around with ARCH_KMALLOC_MINALIGN
829 : */
830 1 : void __init setup_kmalloc_cache_index_table(void)
831 : {
832 : unsigned int i;
833 :
834 1 : BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
835 : !is_power_of_2(KMALLOC_MIN_SIZE));
836 :
837 1 : for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
838 : unsigned int elem = size_index_elem(i);
839 :
840 : if (elem >= ARRAY_SIZE(size_index))
841 : break;
842 : size_index[elem] = KMALLOC_SHIFT_LOW;
843 : }
844 :
845 : if (KMALLOC_MIN_SIZE >= 64) {
846 : /*
847 : * The 96 byte sized cache is not used if the alignment
848 : * is 64 byte.
849 : */
850 : for (i = 64 + 8; i <= 96; i += 8)
851 : size_index[size_index_elem(i)] = 7;
852 :
853 : }
854 :
855 : if (KMALLOC_MIN_SIZE >= 128) {
856 : /*
857 : * The 192 byte sized cache is not used if the alignment
858 : * is 128 byte. Redirect kmalloc to use the 256 byte cache
859 : * instead.
860 : */
861 : for (i = 128 + 8; i <= 192; i += 8)
862 : size_index[size_index_elem(i)] = 8;
863 : }
864 1 : }
865 :
866 : static void __init
867 26 : new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
868 : {
869 26 : if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) {
870 13 : flags |= SLAB_RECLAIM_ACCOUNT;
871 : } else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) {
872 : if (mem_cgroup_kmem_disabled()) {
873 : kmalloc_caches[type][idx] = kmalloc_caches[KMALLOC_NORMAL][idx];
874 : return;
875 : }
876 : flags |= SLAB_ACCOUNT;
877 : } else if (IS_ENABLED(CONFIG_ZONE_DMA) && (type == KMALLOC_DMA)) {
878 : flags |= SLAB_CACHE_DMA;
879 : }
880 :
881 26 : kmalloc_caches[type][idx] = create_kmalloc_cache(
882 : kmalloc_info[idx].name[type],
883 : kmalloc_info[idx].size, flags, 0,
884 : kmalloc_info[idx].size);
885 :
886 : /*
887 : * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for
888 : * KMALLOC_NORMAL caches.
889 : */
890 : if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_NORMAL))
891 : kmalloc_caches[type][idx]->refcount = -1;
892 : }
893 :
894 : /*
895 : * Create the kmalloc array. Some of the regular kmalloc arrays
896 : * may already have been created because they were needed to
897 : * enable allocations for slab creation.
898 : */
899 1 : void __init create_kmalloc_caches(slab_flags_t flags)
900 : {
901 : int i;
902 : enum kmalloc_cache_type type;
903 :
904 : /*
905 : * Including KMALLOC_CGROUP if CONFIG_MEMCG_KMEM defined
906 : */
907 3 : for (type = KMALLOC_NORMAL; type < NR_KMALLOC_TYPES; type++) {
908 22 : for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
909 22 : if (!kmalloc_caches[type][i])
910 22 : new_kmalloc_cache(i, type, flags);
911 :
912 : /*
913 : * Caches that are not of the two-to-the-power-of size.
914 : * These have to be created immediately after the
915 : * earlier power of two caches
916 : */
917 24 : if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
918 2 : !kmalloc_caches[type][1])
919 2 : new_kmalloc_cache(1, type, flags);
920 24 : if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
921 2 : !kmalloc_caches[type][2])
922 2 : new_kmalloc_cache(2, type, flags);
923 : }
924 : }
925 :
926 : /* Kmalloc array is now usable */
927 1 : slab_state = UP;
928 1 : }
929 :
930 6 : void free_large_kmalloc(struct folio *folio, void *object)
931 : {
932 6 : unsigned int order = folio_order(folio);
933 :
934 6 : if (WARN_ON_ONCE(order == 0))
935 0 : pr_warn_once("object pointer: 0x%p\n", object);
936 :
937 6 : kmemleak_free(object);
938 6 : kasan_kfree_large(object);
939 6 : kmsan_kfree_large(object);
940 :
941 12 : mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B,
942 6 : -(PAGE_SIZE << order));
943 6 : __free_pages(folio_page(folio, 0), order);
944 6 : }
945 :
946 : static void *__kmalloc_large_node(size_t size, gfp_t flags, int node);
947 : static __always_inline
948 : void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
949 : {
950 : struct kmem_cache *s;
951 : void *ret;
952 :
953 5307 : if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
954 6 : ret = __kmalloc_large_node(size, flags, node);
955 : trace_kmalloc(caller, ret, size,
956 : PAGE_SIZE << get_order(size), flags, node);
957 : return ret;
958 : }
959 :
960 5301 : s = kmalloc_slab(size, flags);
961 :
962 5301 : if (unlikely(ZERO_OR_NULL_PTR(s)))
963 : return s;
964 :
965 5297 : ret = __kmem_cache_alloc_node(s, flags, node, size, caller);
966 5297 : ret = kasan_kmalloc(s, ret, size, flags);
967 5297 : trace_kmalloc(caller, ret, size, s->size, flags, node);
968 : return ret;
969 : }
970 :
971 277 : void *__kmalloc_node(size_t size, gfp_t flags, int node)
972 : {
973 554 : return __do_kmalloc_node(size, flags, node, _RET_IP_);
974 : }
975 : EXPORT_SYMBOL(__kmalloc_node);
976 :
977 2003 : void *__kmalloc(size_t size, gfp_t flags)
978 : {
979 4006 : return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
980 : }
981 : EXPORT_SYMBOL(__kmalloc);
982 :
983 3027 : void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
984 : int node, unsigned long caller)
985 : {
986 3027 : return __do_kmalloc_node(size, flags, node, caller);
987 : }
988 : EXPORT_SYMBOL(__kmalloc_node_track_caller);
989 :
990 : /**
991 : * kfree - free previously allocated memory
992 : * @object: pointer returned by kmalloc() or kmem_cache_alloc()
993 : *
994 : * If @object is NULL, no operation is performed.
995 : */
996 45334 : void kfree(const void *object)
997 : {
998 : struct folio *folio;
999 : struct slab *slab;
1000 : struct kmem_cache *s;
1001 :
1002 45334 : trace_kfree(_RET_IP_, object);
1003 :
1004 45334 : if (unlikely(ZERO_OR_NULL_PTR(object)))
1005 : return;
1006 :
1007 43512 : folio = virt_to_folio(object);
1008 43512 : if (unlikely(!folio_test_slab(folio))) {
1009 6 : free_large_kmalloc(folio, (void *)object);
1010 6 : return;
1011 : }
1012 :
1013 43506 : slab = folio_slab(folio);
1014 43506 : s = slab->slab_cache;
1015 43506 : __kmem_cache_free(s, (void *)object, _RET_IP_);
1016 : }
1017 : EXPORT_SYMBOL(kfree);
1018 :
1019 : /**
1020 : * __ksize -- Report full size of underlying allocation
1021 : * @object: pointer to the object
1022 : *
1023 : * This should only be used internally to query the true size of allocations.
1024 : * It is not meant to be a way to discover the usable size of an allocation
1025 : * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
1026 : * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
1027 : * and/or FORTIFY_SOURCE.
1028 : *
1029 : * Return: size of the actual memory used by @object in bytes
1030 : */
1031 118 : size_t __ksize(const void *object)
1032 : {
1033 : struct folio *folio;
1034 :
1035 118 : if (unlikely(object == ZERO_SIZE_PTR))
1036 : return 0;
1037 :
1038 118 : folio = virt_to_folio(object);
1039 :
1040 118 : if (unlikely(!folio_test_slab(folio))) {
1041 0 : if (WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE))
1042 : return 0;
1043 0 : if (WARN_ON(object != folio_address(folio)))
1044 : return 0;
1045 0 : return folio_size(folio);
1046 : }
1047 :
1048 : #ifdef CONFIG_SLUB_DEBUG
1049 118 : skip_orig_size_check(folio_slab(folio)->slab_cache, object);
1050 : #endif
1051 :
1052 118 : return slab_ksize(folio_slab(folio)->slab_cache);
1053 : }
1054 :
1055 41669 : void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
1056 : {
1057 41669 : void *ret = __kmem_cache_alloc_node(s, gfpflags, NUMA_NO_NODE,
1058 41669 : size, _RET_IP_);
1059 :
1060 41669 : trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
1061 :
1062 41669 : ret = kasan_kmalloc(s, ret, size, gfpflags);
1063 41669 : return ret;
1064 : }
1065 : EXPORT_SYMBOL(kmalloc_trace);
1066 :
1067 285 : void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
1068 : int node, size_t size)
1069 : {
1070 285 : void *ret = __kmem_cache_alloc_node(s, gfpflags, node, size, _RET_IP_);
1071 :
1072 285 : trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
1073 :
1074 285 : ret = kasan_kmalloc(s, ret, size, gfpflags);
1075 285 : return ret;
1076 : }
1077 : EXPORT_SYMBOL(kmalloc_node_trace);
1078 :
1079 0 : gfp_t kmalloc_fix_flags(gfp_t flags)
1080 : {
1081 0 : gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
1082 :
1083 0 : flags &= ~GFP_SLAB_BUG_MASK;
1084 0 : pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
1085 : invalid_mask, &invalid_mask, flags, &flags);
1086 0 : dump_stack();
1087 :
1088 0 : return flags;
1089 : }
1090 :
1091 : /*
1092 : * To avoid unnecessary overhead, we pass through large allocation requests
1093 : * directly to the page allocator. We use __GFP_COMP, because we will need to
1094 : * know the allocation order to free the pages properly in kfree.
1095 : */
1096 :
1097 6 : static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
1098 : {
1099 : struct page *page;
1100 6 : void *ptr = NULL;
1101 6 : unsigned int order = get_order(size);
1102 :
1103 6 : if (unlikely(flags & GFP_SLAB_BUG_MASK))
1104 0 : flags = kmalloc_fix_flags(flags);
1105 :
1106 6 : flags |= __GFP_COMP;
1107 6 : page = alloc_pages_node(node, flags, order);
1108 6 : if (page) {
1109 6 : ptr = page_address(page);
1110 6 : mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
1111 6 : PAGE_SIZE << order);
1112 : }
1113 :
1114 6 : ptr = kasan_kmalloc_large(ptr, size, flags);
1115 : /* As ptr might get tagged, call kmemleak hook after KASAN. */
1116 6 : kmemleak_alloc(ptr, size, 1, flags);
1117 6 : kmsan_kmalloc_large(ptr, size, flags);
1118 :
1119 6 : return ptr;
1120 : }
1121 :
1122 0 : void *kmalloc_large(size_t size, gfp_t flags)
1123 : {
1124 0 : void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
1125 :
1126 0 : trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
1127 : flags, NUMA_NO_NODE);
1128 0 : return ret;
1129 : }
1130 : EXPORT_SYMBOL(kmalloc_large);
1131 :
1132 0 : void *kmalloc_large_node(size_t size, gfp_t flags, int node)
1133 : {
1134 0 : void *ret = __kmalloc_large_node(size, flags, node);
1135 :
1136 0 : trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
1137 : flags, node);
1138 0 : return ret;
1139 : }
1140 : EXPORT_SYMBOL(kmalloc_large_node);
1141 :
1142 : #ifdef CONFIG_SLAB_FREELIST_RANDOM
1143 : /* Randomize a generic freelist */
1144 : static void freelist_randomize(struct rnd_state *state, unsigned int *list,
1145 : unsigned int count)
1146 : {
1147 : unsigned int rand;
1148 : unsigned int i;
1149 :
1150 : for (i = 0; i < count; i++)
1151 : list[i] = i;
1152 :
1153 : /* Fisher-Yates shuffle */
1154 : for (i = count - 1; i > 0; i--) {
1155 : rand = prandom_u32_state(state);
1156 : rand %= (i + 1);
1157 : swap(list[i], list[rand]);
1158 : }
1159 : }
1160 :
1161 : /* Create a random sequence per cache */
1162 : int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
1163 : gfp_t gfp)
1164 : {
1165 : struct rnd_state state;
1166 :
1167 : if (count < 2 || cachep->random_seq)
1168 : return 0;
1169 :
1170 : cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
1171 : if (!cachep->random_seq)
1172 : return -ENOMEM;
1173 :
1174 : /* Get best entropy at this stage of boot */
1175 : prandom_seed_state(&state, get_random_long());
1176 :
1177 : freelist_randomize(&state, cachep->random_seq, count);
1178 : return 0;
1179 : }
1180 :
1181 : /* Destroy the per-cache random freelist sequence */
1182 : void cache_random_seq_destroy(struct kmem_cache *cachep)
1183 : {
1184 : kfree(cachep->random_seq);
1185 : cachep->random_seq = NULL;
1186 : }
1187 : #endif /* CONFIG_SLAB_FREELIST_RANDOM */
1188 :
1189 : #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
1190 : #ifdef CONFIG_SLAB
1191 : #define SLABINFO_RIGHTS (0600)
1192 : #else
1193 : #define SLABINFO_RIGHTS (0400)
1194 : #endif
1195 :
1196 0 : static void print_slabinfo_header(struct seq_file *m)
1197 : {
1198 : /*
1199 : * Output format version, so at least we can change it
1200 : * without _too_ many complaints.
1201 : */
1202 : #ifdef CONFIG_DEBUG_SLAB
1203 : seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
1204 : #else
1205 0 : seq_puts(m, "slabinfo - version: 2.1\n");
1206 : #endif
1207 0 : seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
1208 0 : seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
1209 0 : seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1210 : #ifdef CONFIG_DEBUG_SLAB
1211 : seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
1212 : seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1213 : #endif
1214 0 : seq_putc(m, '\n');
1215 0 : }
1216 :
1217 0 : static void *slab_start(struct seq_file *m, loff_t *pos)
1218 : {
1219 0 : mutex_lock(&slab_mutex);
1220 0 : return seq_list_start(&slab_caches, *pos);
1221 : }
1222 :
1223 0 : static void *slab_next(struct seq_file *m, void *p, loff_t *pos)
1224 : {
1225 0 : return seq_list_next(p, &slab_caches, pos);
1226 : }
1227 :
1228 0 : static void slab_stop(struct seq_file *m, void *p)
1229 : {
1230 0 : mutex_unlock(&slab_mutex);
1231 0 : }
1232 :
1233 0 : static void cache_show(struct kmem_cache *s, struct seq_file *m)
1234 : {
1235 : struct slabinfo sinfo;
1236 :
1237 0 : memset(&sinfo, 0, sizeof(sinfo));
1238 0 : get_slabinfo(s, &sinfo);
1239 :
1240 0 : seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
1241 : s->name, sinfo.active_objs, sinfo.num_objs, s->size,
1242 0 : sinfo.objects_per_slab, (1 << sinfo.cache_order));
1243 :
1244 0 : seq_printf(m, " : tunables %4u %4u %4u",
1245 : sinfo.limit, sinfo.batchcount, sinfo.shared);
1246 0 : seq_printf(m, " : slabdata %6lu %6lu %6lu",
1247 : sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
1248 0 : slabinfo_show_stats(m, s);
1249 0 : seq_putc(m, '\n');
1250 0 : }
1251 :
1252 0 : static int slab_show(struct seq_file *m, void *p)
1253 : {
1254 0 : struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1255 :
1256 0 : if (p == slab_caches.next)
1257 0 : print_slabinfo_header(m);
1258 0 : cache_show(s, m);
1259 0 : return 0;
1260 : }
1261 :
1262 0 : void dump_unreclaimable_slab(void)
1263 : {
1264 : struct kmem_cache *s;
1265 : struct slabinfo sinfo;
1266 :
1267 : /*
1268 : * Here acquiring slab_mutex is risky since we don't prefer to get
1269 : * sleep in oom path. But, without mutex hold, it may introduce a
1270 : * risk of crash.
1271 : * Use mutex_trylock to protect the list traverse, dump nothing
1272 : * without acquiring the mutex.
1273 : */
1274 0 : if (!mutex_trylock(&slab_mutex)) {
1275 0 : pr_warn("excessive unreclaimable slab but cannot dump stats\n");
1276 0 : return;
1277 : }
1278 :
1279 0 : pr_info("Unreclaimable slab info:\n");
1280 0 : pr_info("Name Used Total\n");
1281 :
1282 0 : list_for_each_entry(s, &slab_caches, list) {
1283 0 : if (s->flags & SLAB_RECLAIM_ACCOUNT)
1284 0 : continue;
1285 :
1286 0 : get_slabinfo(s, &sinfo);
1287 :
1288 0 : if (sinfo.num_objs > 0)
1289 0 : pr_info("%-17s %10luKB %10luKB\n", s->name,
1290 : (sinfo.active_objs * s->size) / 1024,
1291 : (sinfo.num_objs * s->size) / 1024);
1292 : }
1293 0 : mutex_unlock(&slab_mutex);
1294 : }
1295 :
1296 : /*
1297 : * slabinfo_op - iterator that generates /proc/slabinfo
1298 : *
1299 : * Output layout:
1300 : * cache-name
1301 : * num-active-objs
1302 : * total-objs
1303 : * object size
1304 : * num-active-slabs
1305 : * total-slabs
1306 : * num-pages-per-slab
1307 : * + further values on SMP and with statistics enabled
1308 : */
1309 : static const struct seq_operations slabinfo_op = {
1310 : .start = slab_start,
1311 : .next = slab_next,
1312 : .stop = slab_stop,
1313 : .show = slab_show,
1314 : };
1315 :
1316 0 : static int slabinfo_open(struct inode *inode, struct file *file)
1317 : {
1318 0 : return seq_open(file, &slabinfo_op);
1319 : }
1320 :
1321 : static const struct proc_ops slabinfo_proc_ops = {
1322 : .proc_flags = PROC_ENTRY_PERMANENT,
1323 : .proc_open = slabinfo_open,
1324 : .proc_read = seq_read,
1325 : .proc_write = slabinfo_write,
1326 : .proc_lseek = seq_lseek,
1327 : .proc_release = seq_release,
1328 : };
1329 :
1330 1 : static int __init slab_proc_init(void)
1331 : {
1332 1 : proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops);
1333 1 : return 0;
1334 : }
1335 : module_init(slab_proc_init);
1336 :
1337 : #endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
1338 :
1339 : static __always_inline __realloc_size(2) void *
1340 : __do_krealloc(const void *p, size_t new_size, gfp_t flags)
1341 : {
1342 : void *ret;
1343 : size_t ks;
1344 :
1345 : /* Check for double-free before calling ksize. */
1346 96 : if (likely(!ZERO_OR_NULL_PTR(p))) {
1347 96 : if (!kasan_check_byte(p))
1348 : return NULL;
1349 96 : ks = ksize(p);
1350 : } else
1351 : ks = 0;
1352 :
1353 : /* If the object still fits, repoison it precisely. */
1354 96 : if (ks >= new_size) {
1355 : p = kasan_krealloc((void *)p, new_size, flags);
1356 : return (void *)p;
1357 : }
1358 :
1359 76 : ret = kmalloc_track_caller(new_size, flags);
1360 76 : if (ret && p) {
1361 : /* Disable KASAN checks as the object's redzone is accessed. */
1362 : kasan_disable_current();
1363 76 : memcpy(ret, kasan_reset_tag(p), ks);
1364 : kasan_enable_current();
1365 : }
1366 :
1367 : return ret;
1368 : }
1369 :
1370 : /**
1371 : * krealloc - reallocate memory. The contents will remain unchanged.
1372 : * @p: object to reallocate memory for.
1373 : * @new_size: how many bytes of memory are required.
1374 : * @flags: the type of memory to allocate.
1375 : *
1376 : * The contents of the object pointed to are preserved up to the
1377 : * lesser of the new and old sizes (__GFP_ZERO flag is effectively ignored).
1378 : * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size
1379 : * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
1380 : *
1381 : * Return: pointer to the allocated memory or %NULL in case of error
1382 : */
1383 96 : void *krealloc(const void *p, size_t new_size, gfp_t flags)
1384 : {
1385 : void *ret;
1386 :
1387 96 : if (unlikely(!new_size)) {
1388 0 : kfree(p);
1389 0 : return ZERO_SIZE_PTR;
1390 : }
1391 :
1392 96 : ret = __do_krealloc(p, new_size, flags);
1393 96 : if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
1394 76 : kfree(p);
1395 :
1396 : return ret;
1397 : }
1398 : EXPORT_SYMBOL(krealloc);
1399 :
1400 : /**
1401 : * kfree_sensitive - Clear sensitive information in memory before freeing
1402 : * @p: object to free memory of
1403 : *
1404 : * The memory of the object @p points to is zeroed before freed.
1405 : * If @p is %NULL, kfree_sensitive() does nothing.
1406 : *
1407 : * Note: this function zeroes the whole allocated buffer which can be a good
1408 : * deal bigger than the requested buffer size passed to kmalloc(). So be
1409 : * careful when using this function in performance sensitive code.
1410 : */
1411 0 : void kfree_sensitive(const void *p)
1412 : {
1413 : size_t ks;
1414 0 : void *mem = (void *)p;
1415 :
1416 0 : ks = ksize(mem);
1417 0 : if (ks) {
1418 0 : kasan_unpoison_range(mem, ks);
1419 : memzero_explicit(mem, ks);
1420 : }
1421 0 : kfree(mem);
1422 0 : }
1423 : EXPORT_SYMBOL(kfree_sensitive);
1424 :
1425 118 : size_t ksize(const void *objp)
1426 : {
1427 : /*
1428 : * We need to first check that the pointer to the object is valid.
1429 : * The KASAN report printed from ksize() is more useful, then when
1430 : * it's printed later when the behaviour could be undefined due to
1431 : * a potential use-after-free or double-free.
1432 : *
1433 : * We use kasan_check_byte(), which is supported for the hardware
1434 : * tag-based KASAN mode, unlike kasan_check_read/write().
1435 : *
1436 : * If the pointed to memory is invalid, we return 0 to avoid users of
1437 : * ksize() writing to and potentially corrupting the memory region.
1438 : *
1439 : * We want to perform the check before __ksize(), to avoid potentially
1440 : * crashing in __ksize() due to accessing invalid metadata.
1441 : */
1442 118 : if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
1443 : return 0;
1444 :
1445 118 : return kfence_ksize(objp) ?: __ksize(objp);
1446 : }
1447 : EXPORT_SYMBOL(ksize);
1448 :
1449 : /* Tracepoints definitions. */
1450 : EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1451 : EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1452 : EXPORT_TRACEPOINT_SYMBOL(kfree);
1453 : EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
1454 :
1455 61648 : int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
1456 : {
1457 61648 : if (__should_failslab(s, gfpflags))
1458 : return -ENOMEM;
1459 : return 0;
1460 : }
1461 : ALLOW_ERROR_INJECTION(should_failslab, ERRNO);
|