LCOV - code coverage report
Current view: top level - include/linux - memblock.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 7 11 63.6 %
Date: 2023-08-24 13:40:31 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0-or-later */
       2             : #ifndef _LINUX_MEMBLOCK_H
       3             : #define _LINUX_MEMBLOCK_H
       4             : 
       5             : /*
       6             :  * Logical memory blocks.
       7             :  *
       8             :  * Copyright (C) 2001 Peter Bergner, IBM Corp.
       9             :  */
      10             : 
      11             : #include <linux/init.h>
      12             : #include <linux/mm.h>
      13             : #include <asm/dma.h>
      14             : 
      15             : extern unsigned long max_low_pfn;
      16             : extern unsigned long min_low_pfn;
      17             : 
      18             : /*
      19             :  * highest page
      20             :  */
      21             : extern unsigned long max_pfn;
      22             : /*
      23             :  * highest possible page
      24             :  */
      25             : extern unsigned long long max_possible_pfn;
      26             : 
      27             : /**
      28             :  * enum memblock_flags - definition of memory region attributes
      29             :  * @MEMBLOCK_NONE: no special request
      30             :  * @MEMBLOCK_HOTPLUG: memory region indicated in the firmware-provided memory
      31             :  * map during early boot as hot(un)pluggable system RAM (e.g., memory range
      32             :  * that might get hotunplugged later). With "movable_node" set on the kernel
      33             :  * commandline, try keeping this memory region hotunpluggable. Does not apply
      34             :  * to memblocks added ("hotplugged") after early boot.
      35             :  * @MEMBLOCK_MIRROR: mirrored region
      36             :  * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as
      37             :  * reserved in the memory map; refer to memblock_mark_nomap() description
      38             :  * for further details
      39             :  * @MEMBLOCK_DRIVER_MANAGED: memory region that is always detected and added
      40             :  * via a driver, and never indicated in the firmware-provided memory map as
      41             :  * system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the
      42             :  * kernel resource tree.
      43             :  */
      44             : enum memblock_flags {
      45             :         MEMBLOCK_NONE           = 0x0,  /* No special request */
      46             :         MEMBLOCK_HOTPLUG        = 0x1,  /* hotpluggable region */
      47             :         MEMBLOCK_MIRROR         = 0x2,  /* mirrored region */
      48             :         MEMBLOCK_NOMAP          = 0x4,  /* don't add to kernel direct mapping */
      49             :         MEMBLOCK_DRIVER_MANAGED = 0x8,  /* always detected via a driver */
      50             : };
      51             : 
      52             : /**
      53             :  * struct memblock_region - represents a memory region
      54             :  * @base: base address of the region
      55             :  * @size: size of the region
      56             :  * @flags: memory region attributes
      57             :  * @nid: NUMA node id
      58             :  */
      59             : struct memblock_region {
      60             :         phys_addr_t base;
      61             :         phys_addr_t size;
      62             :         enum memblock_flags flags;
      63             : #ifdef CONFIG_NUMA
      64             :         int nid;
      65             : #endif
      66             : };
      67             : 
      68             : /**
      69             :  * struct memblock_type - collection of memory regions of certain type
      70             :  * @cnt: number of regions
      71             :  * @max: size of the allocated array
      72             :  * @total_size: size of all regions
      73             :  * @regions: array of regions
      74             :  * @name: the memory type symbolic name
      75             :  */
      76             : struct memblock_type {
      77             :         unsigned long cnt;
      78             :         unsigned long max;
      79             :         phys_addr_t total_size;
      80             :         struct memblock_region *regions;
      81             :         char *name;
      82             : };
      83             : 
      84             : /**
      85             :  * struct memblock - memblock allocator metadata
      86             :  * @bottom_up: is bottom up direction?
      87             :  * @current_limit: physical address of the current allocation limit
      88             :  * @memory: usable memory regions
      89             :  * @reserved: reserved memory regions
      90             :  */
      91             : struct memblock {
      92             :         bool bottom_up;  /* is bottom up direction? */
      93             :         phys_addr_t current_limit;
      94             :         struct memblock_type memory;
      95             :         struct memblock_type reserved;
      96             : };
      97             : 
      98             : extern struct memblock memblock;
      99             : 
     100             : #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
     101             : #define __init_memblock __meminit
     102             : #define __initdata_memblock __meminitdata
     103             : void memblock_discard(void);
     104             : #else
     105             : #define __init_memblock
     106             : #define __initdata_memblock
     107             : static inline void memblock_discard(void) {}
     108             : #endif
     109             : 
     110             : void memblock_allow_resize(void);
     111             : int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid,
     112             :                       enum memblock_flags flags);
     113             : int memblock_add(phys_addr_t base, phys_addr_t size);
     114             : int memblock_remove(phys_addr_t base, phys_addr_t size);
     115             : int memblock_phys_free(phys_addr_t base, phys_addr_t size);
     116             : int memblock_reserve(phys_addr_t base, phys_addr_t size);
     117             : #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
     118             : int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
     119             : #endif
     120             : void memblock_trim_memory(phys_addr_t align);
     121             : bool memblock_overlaps_region(struct memblock_type *type,
     122             :                               phys_addr_t base, phys_addr_t size);
     123             : int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
     124             : int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
     125             : int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
     126             : int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
     127             : int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
     128             : 
     129             : void memblock_free_all(void);
     130             : void memblock_free(void *ptr, size_t size);
     131             : void reset_all_zones_managed_pages(void);
     132             : 
     133             : /* Low level functions */
     134             : void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
     135             :                       struct memblock_type *type_a,
     136             :                       struct memblock_type *type_b, phys_addr_t *out_start,
     137             :                       phys_addr_t *out_end, int *out_nid);
     138             : 
     139             : void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
     140             :                           struct memblock_type *type_a,
     141             :                           struct memblock_type *type_b, phys_addr_t *out_start,
     142             :                           phys_addr_t *out_end, int *out_nid);
     143             : 
     144             : void memblock_free_late(phys_addr_t base, phys_addr_t size);
     145             : 
     146             : #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
     147             : static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
     148             :                                         phys_addr_t *out_start,
     149             :                                         phys_addr_t *out_end)
     150             : {
     151             :         extern struct memblock_type physmem;
     152             : 
     153             :         __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type,
     154             :                          out_start, out_end, NULL);
     155             : }
     156             : 
     157             : /**
     158             :  * for_each_physmem_range - iterate through physmem areas not included in type.
     159             :  * @i: u64 used as loop variable
     160             :  * @type: ptr to memblock_type which excludes from the iteration, can be %NULL
     161             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     162             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     163             :  */
     164             : #define for_each_physmem_range(i, type, p_start, p_end)                 \
     165             :         for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \
     166             :              i != (u64)ULLONG_MAX;                                      \
     167             :              __next_physmem_range(&i, type, p_start, p_end))
     168             : #endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */
     169             : 
     170             : /**
     171             :  * __for_each_mem_range - iterate through memblock areas from type_a and not
     172             :  * included in type_b. Or just type_a if type_b is NULL.
     173             :  * @i: u64 used as loop variable
     174             :  * @type_a: ptr to memblock_type to iterate
     175             :  * @type_b: ptr to memblock_type which excludes from the iteration
     176             :  * @nid: node selector, %NUMA_NO_NODE for all nodes
     177             :  * @flags: pick from blocks based on memory attributes
     178             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     179             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     180             :  * @p_nid: ptr to int for nid of the range, can be %NULL
     181             :  */
     182             : #define __for_each_mem_range(i, type_a, type_b, nid, flags,             \
     183             :                            p_start, p_end, p_nid)                       \
     184             :         for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b,        \
     185             :                                      p_start, p_end, p_nid);            \
     186             :              i != (u64)ULLONG_MAX;                                      \
     187             :              __next_mem_range(&i, nid, flags, type_a, type_b,               \
     188             :                               p_start, p_end, p_nid))
     189             : 
     190             : /**
     191             :  * __for_each_mem_range_rev - reverse iterate through memblock areas from
     192             :  * type_a and not included in type_b. Or just type_a if type_b is NULL.
     193             :  * @i: u64 used as loop variable
     194             :  * @type_a: ptr to memblock_type to iterate
     195             :  * @type_b: ptr to memblock_type which excludes from the iteration
     196             :  * @nid: node selector, %NUMA_NO_NODE for all nodes
     197             :  * @flags: pick from blocks based on memory attributes
     198             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     199             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     200             :  * @p_nid: ptr to int for nid of the range, can be %NULL
     201             :  */
     202             : #define __for_each_mem_range_rev(i, type_a, type_b, nid, flags,         \
     203             :                                  p_start, p_end, p_nid)                 \
     204             :         for (i = (u64)ULLONG_MAX,                                       \
     205             :                      __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
     206             :                                           p_start, p_end, p_nid);       \
     207             :              i != (u64)ULLONG_MAX;                                      \
     208             :              __next_mem_range_rev(&i, nid, flags, type_a, type_b,   \
     209             :                                   p_start, p_end, p_nid))
     210             : 
     211             : /**
     212             :  * for_each_mem_range - iterate through memory areas.
     213             :  * @i: u64 used as loop variable
     214             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     215             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     216             :  */
     217             : #define for_each_mem_range(i, p_start, p_end) \
     218             :         __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,       \
     219             :                              MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED, \
     220             :                              p_start, p_end, NULL)
     221             : 
     222             : /**
     223             :  * for_each_mem_range_rev - reverse iterate through memblock areas from
     224             :  * type_a and not included in type_b. Or just type_a if type_b is NULL.
     225             :  * @i: u64 used as loop variable
     226             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     227             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     228             :  */
     229             : #define for_each_mem_range_rev(i, p_start, p_end)                       \
     230             :         __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
     231             :                                  MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED,\
     232             :                                  p_start, p_end, NULL)
     233             : 
     234             : /**
     235             :  * for_each_reserved_mem_range - iterate over all reserved memblock areas
     236             :  * @i: u64 used as loop variable
     237             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     238             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     239             :  *
     240             :  * Walks over reserved areas of memblock. Available as soon as memblock
     241             :  * is initialized.
     242             :  */
     243             : #define for_each_reserved_mem_range(i, p_start, p_end)                  \
     244             :         __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE,     \
     245             :                              MEMBLOCK_NONE, p_start, p_end, NULL)
     246             : 
     247             : static inline bool memblock_is_hotpluggable(struct memblock_region *m)
     248             : {
     249             :         return m->flags & MEMBLOCK_HOTPLUG;
     250             : }
     251             : 
     252             : static inline bool memblock_is_mirror(struct memblock_region *m)
     253             : {
     254           0 :         return m->flags & MEMBLOCK_MIRROR;
     255             : }
     256             : 
     257             : static inline bool memblock_is_nomap(struct memblock_region *m)
     258             : {
     259         138 :         return m->flags & MEMBLOCK_NOMAP;
     260             : }
     261             : 
     262             : static inline bool memblock_is_driver_managed(struct memblock_region *m)
     263             : {
     264         137 :         return m->flags & MEMBLOCK_DRIVER_MANAGED;
     265             : }
     266             : 
     267             : int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
     268             :                             unsigned long  *end_pfn);
     269             : void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
     270             :                           unsigned long *out_end_pfn, int *out_nid);
     271             : 
     272             : /**
     273             :  * for_each_mem_pfn_range - early memory pfn range iterator
     274             :  * @i: an integer used as loop variable
     275             :  * @nid: node selector, %MAX_NUMNODES for all nodes
     276             :  * @p_start: ptr to ulong for start pfn of the range, can be %NULL
     277             :  * @p_end: ptr to ulong for end pfn of the range, can be %NULL
     278             :  * @p_nid: ptr to int for nid of the range, can be %NULL
     279             :  *
     280             :  * Walks over configured memory ranges.
     281             :  */
     282             : #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid)           \
     283             :         for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
     284             :              i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
     285             : 
     286             : #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
     287             : void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
     288             :                                   unsigned long *out_spfn,
     289             :                                   unsigned long *out_epfn);
     290             : /**
     291             :  * for_each_free_mem_pfn_range_in_zone - iterate through zone specific free
     292             :  * memblock areas
     293             :  * @i: u64 used as loop variable
     294             :  * @zone: zone in which all of the memory blocks reside
     295             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     296             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     297             :  *
     298             :  * Walks over free (memory && !reserved) areas of memblock in a specific
     299             :  * zone. Available once memblock and an empty zone is initialized. The main
     300             :  * assumption is that the zone start, end, and pgdat have been associated.
     301             :  * This way we can use the zone to determine NUMA node, and if a given part
     302             :  * of the memblock is valid for the zone.
     303             :  */
     304             : #define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end)    \
     305             :         for (i = 0,                                                     \
     306             :              __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end);        \
     307             :              i != U64_MAX;                                      \
     308             :              __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
     309             : 
     310             : /**
     311             :  * for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific
     312             :  * free memblock areas from a given point
     313             :  * @i: u64 used as loop variable
     314             :  * @zone: zone in which all of the memory blocks reside
     315             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     316             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     317             :  *
     318             :  * Walks over free (memory && !reserved) areas of memblock in a specific
     319             :  * zone, continuing from current position. Available as soon as memblock is
     320             :  * initialized.
     321             :  */
     322             : #define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
     323             :         for (; i != U64_MAX;                                      \
     324             :              __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
     325             : 
     326             : int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
     327             : 
     328             : #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
     329             : 
     330             : /**
     331             :  * for_each_free_mem_range - iterate through free memblock areas
     332             :  * @i: u64 used as loop variable
     333             :  * @nid: node selector, %NUMA_NO_NODE for all nodes
     334             :  * @flags: pick from blocks based on memory attributes
     335             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     336             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     337             :  * @p_nid: ptr to int for nid of the range, can be %NULL
     338             :  *
     339             :  * Walks over free (memory && !reserved) areas of memblock.  Available as
     340             :  * soon as memblock is initialized.
     341             :  */
     342             : #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid)   \
     343             :         __for_each_mem_range(i, &memblock.memory, &memblock.reserved,   \
     344             :                              nid, flags, p_start, p_end, p_nid)
     345             : 
     346             : /**
     347             :  * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
     348             :  * @i: u64 used as loop variable
     349             :  * @nid: node selector, %NUMA_NO_NODE for all nodes
     350             :  * @flags: pick from blocks based on memory attributes
     351             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     352             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     353             :  * @p_nid: ptr to int for nid of the range, can be %NULL
     354             :  *
     355             :  * Walks over free (memory && !reserved) areas of memblock in reverse
     356             :  * order.  Available as soon as memblock is initialized.
     357             :  */
     358             : #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end,  \
     359             :                                         p_nid)                          \
     360             :         __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
     361             :                                  nid, flags, p_start, p_end, p_nid)
     362             : 
     363             : int memblock_set_node(phys_addr_t base, phys_addr_t size,
     364             :                       struct memblock_type *type, int nid);
     365             : 
     366             : #ifdef CONFIG_NUMA
     367             : static inline void memblock_set_region_node(struct memblock_region *r, int nid)
     368             : {
     369             :         r->nid = nid;
     370             : }
     371             : 
     372             : static inline int memblock_get_region_node(const struct memblock_region *r)
     373             : {
     374             :         return r->nid;
     375             : }
     376             : #else
     377             : static inline void memblock_set_region_node(struct memblock_region *r, int nid)
     378             : {
     379             : }
     380             : 
     381             : static inline int memblock_get_region_node(const struct memblock_region *r)
     382             : {
     383             :         return 0;
     384             : }
     385             : #endif /* CONFIG_NUMA */
     386             : 
     387             : /* Flags for memblock allocation APIs */
     388             : #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
     389             : #define MEMBLOCK_ALLOC_ACCESSIBLE       0
     390             : #define MEMBLOCK_ALLOC_NOLEAKTRACE      1
     391             : 
     392             : /* We are using top down, so it is safe to use 0 here */
     393             : #define MEMBLOCK_LOW_LIMIT 0
     394             : 
     395             : #ifndef ARCH_LOW_ADDRESS_LIMIT
     396             : #define ARCH_LOW_ADDRESS_LIMIT  0xffffffffUL
     397             : #endif
     398             : 
     399             : phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
     400             :                                       phys_addr_t start, phys_addr_t end);
     401             : phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
     402             :                                       phys_addr_t align, phys_addr_t start,
     403             :                                       phys_addr_t end, int nid, bool exact_nid);
     404             : phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
     405             : 
     406             : static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
     407             :                                                        phys_addr_t align)
     408             : {
     409             :         return memblock_phys_alloc_range(size, align, 0,
     410             :                                          MEMBLOCK_ALLOC_ACCESSIBLE);
     411             : }
     412             : 
     413             : void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
     414             :                                  phys_addr_t min_addr, phys_addr_t max_addr,
     415             :                                  int nid);
     416             : void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
     417             :                                  phys_addr_t min_addr, phys_addr_t max_addr,
     418             :                                  int nid);
     419             : void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
     420             :                              phys_addr_t min_addr, phys_addr_t max_addr,
     421             :                              int nid);
     422             : 
     423             : static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
     424             : {
     425          15 :         return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
     426             :                                       MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
     427             : }
     428             : 
     429             : static inline void *memblock_alloc_raw(phys_addr_t size,
     430             :                                                phys_addr_t align)
     431             : {
     432           0 :         return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
     433             :                                           MEMBLOCK_ALLOC_ACCESSIBLE,
     434             :                                           NUMA_NO_NODE);
     435             : }
     436             : 
     437             : static inline void *memblock_alloc_from(phys_addr_t size,
     438             :                                                 phys_addr_t align,
     439             :                                                 phys_addr_t min_addr)
     440             : {
     441           1 :         return memblock_alloc_try_nid(size, align, min_addr,
     442             :                                       MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
     443             : }
     444             : 
     445             : static inline void *memblock_alloc_low(phys_addr_t size,
     446             :                                                phys_addr_t align)
     447             : {
     448           3 :         return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
     449             :                                       ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
     450             : }
     451             : 
     452             : static inline void *memblock_alloc_node(phys_addr_t size,
     453             :                                                 phys_addr_t align, int nid)
     454             : {
     455           1 :         return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
     456             :                                       MEMBLOCK_ALLOC_ACCESSIBLE, nid);
     457             : }
     458             : 
     459             : /*
     460             :  * Set the allocation direction to bottom-up or top-down.
     461             :  */
     462             : static inline __init_memblock void memblock_set_bottom_up(bool enable)
     463             : {
     464             :         memblock.bottom_up = enable;
     465             : }
     466             : 
     467             : /*
     468             :  * Check if the allocation direction is bottom-up or not.
     469             :  * if this is true, that said, memblock will allocate memory
     470             :  * in bottom-up direction.
     471             :  */
     472             : static inline __init_memblock bool memblock_bottom_up(void)
     473             : {
     474          21 :         return memblock.bottom_up;
     475             : }
     476             : 
     477             : phys_addr_t memblock_phys_mem_size(void);
     478             : phys_addr_t memblock_reserved_size(void);
     479             : phys_addr_t memblock_start_of_DRAM(void);
     480             : phys_addr_t memblock_end_of_DRAM(void);
     481             : void memblock_enforce_memory_limit(phys_addr_t memory_limit);
     482             : void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
     483             : void memblock_mem_limit_remove_map(phys_addr_t limit);
     484             : bool memblock_is_memory(phys_addr_t addr);
     485             : bool memblock_is_map_memory(phys_addr_t addr);
     486             : bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
     487             : bool memblock_is_reserved(phys_addr_t addr);
     488             : bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
     489             : 
     490             : void memblock_dump_all(void);
     491             : 
     492             : /**
     493             :  * memblock_set_current_limit - Set the current allocation limit to allow
     494             :  *                         limiting allocations to what is currently
     495             :  *                         accessible during boot
     496             :  * @limit: New limit value (physical address)
     497             :  */
     498             : void memblock_set_current_limit(phys_addr_t limit);
     499             : 
     500             : 
     501             : phys_addr_t memblock_get_current_limit(void);
     502             : 
     503             : /*
     504             :  * pfn conversion functions
     505             :  *
     506             :  * While the memory MEMBLOCKs should always be page aligned, the reserved
     507             :  * MEMBLOCKs may not be. This accessor attempt to provide a very clear
     508             :  * idea of what they return for such non aligned MEMBLOCKs.
     509             :  */
     510             : 
     511             : /**
     512             :  * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
     513             :  * @reg: memblock_region structure
     514             :  *
     515             :  * Return: the lowest pfn intersecting with the memory region
     516             :  */
     517             : static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
     518             : {
     519           0 :         return PFN_UP(reg->base);
     520             : }
     521             : 
     522             : /**
     523             :  * memblock_region_memory_end_pfn - get the end pfn of the memory region
     524             :  * @reg: memblock_region structure
     525             :  *
     526             :  * Return: the end_pfn of the reserved region
     527             :  */
     528             : static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
     529             : {
     530           0 :         return PFN_DOWN(reg->base + reg->size);
     531             : }
     532             : 
     533             : /**
     534             :  * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
     535             :  * @reg: memblock_region structure
     536             :  *
     537             :  * Return: the lowest pfn intersecting with the reserved region
     538             :  */
     539             : static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
     540             : {
     541             :         return PFN_DOWN(reg->base);
     542             : }
     543             : 
     544             : /**
     545             :  * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
     546             :  * @reg: memblock_region structure
     547             :  *
     548             :  * Return: the end_pfn of the reserved region
     549             :  */
     550             : static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
     551             : {
     552             :         return PFN_UP(reg->base + reg->size);
     553             : }
     554             : 
     555             : /**
     556             :  * for_each_mem_region - itereate over memory regions
     557             :  * @region: loop variable
     558             :  */
     559             : #define for_each_mem_region(region)                                     \
     560             :         for (region = memblock.memory.regions;                          \
     561             :              region < (memblock.memory.regions + memblock.memory.cnt);       \
     562             :              region++)
     563             : 
     564             : /**
     565             :  * for_each_reserved_mem_region - itereate over reserved memory regions
     566             :  * @region: loop variable
     567             :  */
     568             : #define for_each_reserved_mem_region(region)                            \
     569             :         for (region = memblock.reserved.regions;                        \
     570             :              region < (memblock.reserved.regions + memblock.reserved.cnt); \
     571             :              region++)
     572             : 
     573             : extern void *alloc_large_system_hash(const char *tablename,
     574             :                                      unsigned long bucketsize,
     575             :                                      unsigned long numentries,
     576             :                                      int scale,
     577             :                                      int flags,
     578             :                                      unsigned int *_hash_shift,
     579             :                                      unsigned int *_hash_mask,
     580             :                                      unsigned long low_limit,
     581             :                                      unsigned long high_limit);
     582             : 
     583             : #define HASH_EARLY      0x00000001      /* Allocating during early boot? */
     584             : #define HASH_SMALL      0x00000002      /* sub-page allocation allowed, min
     585             :                                          * shift passed via *_hash_shift */
     586             : #define HASH_ZERO       0x00000004      /* Zero allocated hash table */
     587             : 
     588             : /* Only NUMA needs hash distribution. 64bit NUMA architectures have
     589             :  * sufficient vmalloc space.
     590             :  */
     591             : #ifdef CONFIG_NUMA
     592             : #define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
     593             : extern int hashdist;            /* Distribute hashes across NUMA nodes? */
     594             : #else
     595             : #define hashdist (0)
     596             : #endif
     597             : 
     598             : #ifdef CONFIG_MEMTEST
     599             : extern phys_addr_t early_memtest_bad_size;      /* Size of faulty ram found by memtest */
     600             : extern bool early_memtest_done;                 /* Was early memtest done? */
     601             : extern void early_memtest(phys_addr_t start, phys_addr_t end);
     602             : #else
     603             : static inline void early_memtest(phys_addr_t start, phys_addr_t end)
     604             : {
     605             : }
     606             : #endif
     607             : 
     608             : 
     609             : #endif /* _LINUX_MEMBLOCK_H */

Generated by: LCOV version 1.14