LCOV - code coverage report
Current view: top level - include/linux - memblock.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 7 11 63.6 %
Date: 2023-07-19 18:55:55 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0-or-later */
       2             : #ifndef _LINUX_MEMBLOCK_H
       3             : #define _LINUX_MEMBLOCK_H
       4             : 
       5             : /*
       6             :  * Logical memory blocks.
       7             :  *
       8             :  * Copyright (C) 2001 Peter Bergner, IBM Corp.
       9             :  */
      10             : 
      11             : #include <linux/init.h>
      12             : #include <linux/mm.h>
      13             : #include <asm/dma.h>
      14             : 
      15             : extern unsigned long max_low_pfn;
      16             : extern unsigned long min_low_pfn;
      17             : 
      18             : /*
      19             :  * highest page
      20             :  */
      21             : extern unsigned long max_pfn;
      22             : /*
      23             :  * highest possible page
      24             :  */
      25             : extern unsigned long long max_possible_pfn;
      26             : 
      27             : /**
      28             :  * enum memblock_flags - definition of memory region attributes
      29             :  * @MEMBLOCK_NONE: no special request
      30             :  * @MEMBLOCK_HOTPLUG: memory region indicated in the firmware-provided memory
      31             :  * map during early boot as hot(un)pluggable system RAM (e.g., memory range
      32             :  * that might get hotunplugged later). With "movable_node" set on the kernel
      33             :  * commandline, try keeping this memory region hotunpluggable. Does not apply
      34             :  * to memblocks added ("hotplugged") after early boot.
      35             :  * @MEMBLOCK_MIRROR: mirrored region
      36             :  * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as
      37             :  * reserved in the memory map; refer to memblock_mark_nomap() description
      38             :  * for further details
      39             :  * @MEMBLOCK_DRIVER_MANAGED: memory region that is always detected and added
      40             :  * via a driver, and never indicated in the firmware-provided memory map as
      41             :  * system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the
      42             :  * kernel resource tree.
      43             :  */
      44             : enum memblock_flags {
      45             :         MEMBLOCK_NONE           = 0x0,  /* No special request */
      46             :         MEMBLOCK_HOTPLUG        = 0x1,  /* hotpluggable region */
      47             :         MEMBLOCK_MIRROR         = 0x2,  /* mirrored region */
      48             :         MEMBLOCK_NOMAP          = 0x4,  /* don't add to kernel direct mapping */
      49             :         MEMBLOCK_DRIVER_MANAGED = 0x8,  /* always detected via a driver */
      50             : };
      51             : 
      52             : /**
      53             :  * struct memblock_region - represents a memory region
      54             :  * @base: base address of the region
      55             :  * @size: size of the region
      56             :  * @flags: memory region attributes
      57             :  * @nid: NUMA node id
      58             :  */
      59             : struct memblock_region {
      60             :         phys_addr_t base;
      61             :         phys_addr_t size;
      62             :         enum memblock_flags flags;
      63             : #ifdef CONFIG_NUMA
      64             :         int nid;
      65             : #endif
      66             : };
      67             : 
      68             : /**
      69             :  * struct memblock_type - collection of memory regions of certain type
      70             :  * @cnt: number of regions
      71             :  * @max: size of the allocated array
      72             :  * @total_size: size of all regions
      73             :  * @regions: array of regions
      74             :  * @name: the memory type symbolic name
      75             :  */
      76             : struct memblock_type {
      77             :         unsigned long cnt;
      78             :         unsigned long max;
      79             :         phys_addr_t total_size;
      80             :         struct memblock_region *regions;
      81             :         char *name;
      82             : };
      83             : 
      84             : /**
      85             :  * struct memblock - memblock allocator metadata
      86             :  * @bottom_up: is bottom up direction?
      87             :  * @current_limit: physical address of the current allocation limit
      88             :  * @memory: usable memory regions
      89             :  * @reserved: reserved memory regions
      90             :  */
      91             : struct memblock {
      92             :         bool bottom_up;  /* is bottom up direction? */
      93             :         phys_addr_t current_limit;
      94             :         struct memblock_type memory;
      95             :         struct memblock_type reserved;
      96             : };
      97             : 
      98             : extern struct memblock memblock;
      99             : 
     100             : #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
     101             : #define __init_memblock __meminit
     102             : #define __initdata_memblock __meminitdata
     103             : void memblock_discard(void);
     104             : #else
     105             : #define __init_memblock
     106             : #define __initdata_memblock
     107             : static inline void memblock_discard(void) {}
     108             : #endif
     109             : 
     110             : void memblock_allow_resize(void);
     111             : int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid,
     112             :                       enum memblock_flags flags);
     113             : int memblock_add(phys_addr_t base, phys_addr_t size);
     114             : int memblock_remove(phys_addr_t base, phys_addr_t size);
     115             : int memblock_phys_free(phys_addr_t base, phys_addr_t size);
     116             : int memblock_reserve(phys_addr_t base, phys_addr_t size);
     117             : #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
     118             : int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
     119             : #endif
     120             : void memblock_trim_memory(phys_addr_t align);
     121             : bool memblock_overlaps_region(struct memblock_type *type,
     122             :                               phys_addr_t base, phys_addr_t size);
     123             : int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
     124             : int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
     125             : int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
     126             : int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
     127             : int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
     128             : 
     129             : void memblock_free_all(void);
     130             : void memblock_free(void *ptr, size_t size);
     131             : void reset_node_managed_pages(pg_data_t *pgdat);
     132             : void reset_all_zones_managed_pages(void);
     133             : 
     134             : /* Low level functions */
     135             : void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
     136             :                       struct memblock_type *type_a,
     137             :                       struct memblock_type *type_b, phys_addr_t *out_start,
     138             :                       phys_addr_t *out_end, int *out_nid);
     139             : 
     140             : void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
     141             :                           struct memblock_type *type_a,
     142             :                           struct memblock_type *type_b, phys_addr_t *out_start,
     143             :                           phys_addr_t *out_end, int *out_nid);
     144             : 
     145             : void memblock_free_late(phys_addr_t base, phys_addr_t size);
     146             : 
     147             : #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
     148             : static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
     149             :                                         phys_addr_t *out_start,
     150             :                                         phys_addr_t *out_end)
     151             : {
     152             :         extern struct memblock_type physmem;
     153             : 
     154             :         __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type,
     155             :                          out_start, out_end, NULL);
     156             : }
     157             : 
     158             : /**
     159             :  * for_each_physmem_range - iterate through physmem areas not included in type.
     160             :  * @i: u64 used as loop variable
     161             :  * @type: ptr to memblock_type which excludes from the iteration, can be %NULL
     162             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     163             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     164             :  */
     165             : #define for_each_physmem_range(i, type, p_start, p_end)                 \
     166             :         for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \
     167             :              i != (u64)ULLONG_MAX;                                      \
     168             :              __next_physmem_range(&i, type, p_start, p_end))
     169             : #endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */
     170             : 
     171             : /**
     172             :  * __for_each_mem_range - iterate through memblock areas from type_a and not
     173             :  * included in type_b. Or just type_a if type_b is NULL.
     174             :  * @i: u64 used as loop variable
     175             :  * @type_a: ptr to memblock_type to iterate
     176             :  * @type_b: ptr to memblock_type which excludes from the iteration
     177             :  * @nid: node selector, %NUMA_NO_NODE for all nodes
     178             :  * @flags: pick from blocks based on memory attributes
     179             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     180             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     181             :  * @p_nid: ptr to int for nid of the range, can be %NULL
     182             :  */
     183             : #define __for_each_mem_range(i, type_a, type_b, nid, flags,             \
     184             :                            p_start, p_end, p_nid)                       \
     185             :         for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b,        \
     186             :                                      p_start, p_end, p_nid);            \
     187             :              i != (u64)ULLONG_MAX;                                      \
     188             :              __next_mem_range(&i, nid, flags, type_a, type_b,               \
     189             :                               p_start, p_end, p_nid))
     190             : 
     191             : /**
     192             :  * __for_each_mem_range_rev - reverse iterate through memblock areas from
     193             :  * type_a and not included in type_b. Or just type_a if type_b is NULL.
     194             :  * @i: u64 used as loop variable
     195             :  * @type_a: ptr to memblock_type to iterate
     196             :  * @type_b: ptr to memblock_type which excludes from the iteration
     197             :  * @nid: node selector, %NUMA_NO_NODE for all nodes
     198             :  * @flags: pick from blocks based on memory attributes
     199             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     200             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     201             :  * @p_nid: ptr to int for nid of the range, can be %NULL
     202             :  */
     203             : #define __for_each_mem_range_rev(i, type_a, type_b, nid, flags,         \
     204             :                                  p_start, p_end, p_nid)                 \
     205             :         for (i = (u64)ULLONG_MAX,                                       \
     206             :                      __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
     207             :                                           p_start, p_end, p_nid);       \
     208             :              i != (u64)ULLONG_MAX;                                      \
     209             :              __next_mem_range_rev(&i, nid, flags, type_a, type_b,   \
     210             :                                   p_start, p_end, p_nid))
     211             : 
     212             : /**
     213             :  * for_each_mem_range - iterate through memory areas.
     214             :  * @i: u64 used as loop variable
     215             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     216             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     217             :  */
     218             : #define for_each_mem_range(i, p_start, p_end) \
     219             :         __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,       \
     220             :                              MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED, \
     221             :                              p_start, p_end, NULL)
     222             : 
     223             : /**
     224             :  * for_each_mem_range_rev - reverse iterate through memblock areas from
     225             :  * type_a and not included in type_b. Or just type_a if type_b is NULL.
     226             :  * @i: u64 used as loop variable
     227             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     228             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     229             :  */
     230             : #define for_each_mem_range_rev(i, p_start, p_end)                       \
     231             :         __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
     232             :                                  MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED,\
     233             :                                  p_start, p_end, NULL)
     234             : 
     235             : /**
     236             :  * for_each_reserved_mem_range - iterate over all reserved memblock areas
     237             :  * @i: u64 used as loop variable
     238             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     239             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     240             :  *
     241             :  * Walks over reserved areas of memblock. Available as soon as memblock
     242             :  * is initialized.
     243             :  */
     244             : #define for_each_reserved_mem_range(i, p_start, p_end)                  \
     245             :         __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE,     \
     246             :                              MEMBLOCK_NONE, p_start, p_end, NULL)
     247             : 
     248             : static inline bool memblock_is_hotpluggable(struct memblock_region *m)
     249             : {
     250             :         return m->flags & MEMBLOCK_HOTPLUG;
     251             : }
     252             : 
     253             : static inline bool memblock_is_mirror(struct memblock_region *m)
     254             : {
     255           0 :         return m->flags & MEMBLOCK_MIRROR;
     256             : }
     257             : 
     258             : static inline bool memblock_is_nomap(struct memblock_region *m)
     259             : {
     260         107 :         return m->flags & MEMBLOCK_NOMAP;
     261             : }
     262             : 
     263             : static inline bool memblock_is_driver_managed(struct memblock_region *m)
     264             : {
     265         106 :         return m->flags & MEMBLOCK_DRIVER_MANAGED;
     266             : }
     267             : 
     268             : int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
     269             :                             unsigned long  *end_pfn);
     270             : void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
     271             :                           unsigned long *out_end_pfn, int *out_nid);
     272             : 
     273             : /**
     274             :  * for_each_mem_pfn_range - early memory pfn range iterator
     275             :  * @i: an integer used as loop variable
     276             :  * @nid: node selector, %MAX_NUMNODES for all nodes
     277             :  * @p_start: ptr to ulong for start pfn of the range, can be %NULL
     278             :  * @p_end: ptr to ulong for end pfn of the range, can be %NULL
     279             :  * @p_nid: ptr to int for nid of the range, can be %NULL
     280             :  *
     281             :  * Walks over configured memory ranges.
     282             :  */
     283             : #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid)           \
     284             :         for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
     285             :              i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
     286             : 
     287             : #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
     288             : void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
     289             :                                   unsigned long *out_spfn,
     290             :                                   unsigned long *out_epfn);
     291             : /**
     292             :  * for_each_free_mem_pfn_range_in_zone - iterate through zone specific free
     293             :  * memblock areas
     294             :  * @i: u64 used as loop variable
     295             :  * @zone: zone in which all of the memory blocks reside
     296             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     297             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     298             :  *
     299             :  * Walks over free (memory && !reserved) areas of memblock in a specific
     300             :  * zone. Available once memblock and an empty zone is initialized. The main
     301             :  * assumption is that the zone start, end, and pgdat have been associated.
     302             :  * This way we can use the zone to determine NUMA node, and if a given part
     303             :  * of the memblock is valid for the zone.
     304             :  */
     305             : #define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end)    \
     306             :         for (i = 0,                                                     \
     307             :              __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end);        \
     308             :              i != U64_MAX;                                      \
     309             :              __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
     310             : 
     311             : /**
     312             :  * for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific
     313             :  * free memblock areas from a given point
     314             :  * @i: u64 used as loop variable
     315             :  * @zone: zone in which all of the memory blocks reside
     316             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     317             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     318             :  *
     319             :  * Walks over free (memory && !reserved) areas of memblock in a specific
     320             :  * zone, continuing from current position. Available as soon as memblock is
     321             :  * initialized.
     322             :  */
     323             : #define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
     324             :         for (; i != U64_MAX;                                      \
     325             :              __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
     326             : 
     327             : int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
     328             : 
     329             : #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
     330             : 
     331             : /**
     332             :  * for_each_free_mem_range - iterate through free memblock areas
     333             :  * @i: u64 used as loop variable
     334             :  * @nid: node selector, %NUMA_NO_NODE for all nodes
     335             :  * @flags: pick from blocks based on memory attributes
     336             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     337             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     338             :  * @p_nid: ptr to int for nid of the range, can be %NULL
     339             :  *
     340             :  * Walks over free (memory && !reserved) areas of memblock.  Available as
     341             :  * soon as memblock is initialized.
     342             :  */
     343             : #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid)   \
     344             :         __for_each_mem_range(i, &memblock.memory, &memblock.reserved,   \
     345             :                              nid, flags, p_start, p_end, p_nid)
     346             : 
     347             : /**
     348             :  * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
     349             :  * @i: u64 used as loop variable
     350             :  * @nid: node selector, %NUMA_NO_NODE for all nodes
     351             :  * @flags: pick from blocks based on memory attributes
     352             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     353             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     354             :  * @p_nid: ptr to int for nid of the range, can be %NULL
     355             :  *
     356             :  * Walks over free (memory && !reserved) areas of memblock in reverse
     357             :  * order.  Available as soon as memblock is initialized.
     358             :  */
     359             : #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end,  \
     360             :                                         p_nid)                          \
     361             :         __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
     362             :                                  nid, flags, p_start, p_end, p_nid)
     363             : 
     364             : int memblock_set_node(phys_addr_t base, phys_addr_t size,
     365             :                       struct memblock_type *type, int nid);
     366             : 
     367             : #ifdef CONFIG_NUMA
     368             : static inline void memblock_set_region_node(struct memblock_region *r, int nid)
     369             : {
     370             :         r->nid = nid;
     371             : }
     372             : 
     373             : static inline int memblock_get_region_node(const struct memblock_region *r)
     374             : {
     375             :         return r->nid;
     376             : }
     377             : #else
     378             : static inline void memblock_set_region_node(struct memblock_region *r, int nid)
     379             : {
     380             : }
     381             : 
     382             : static inline int memblock_get_region_node(const struct memblock_region *r)
     383             : {
     384             :         return 0;
     385             : }
     386             : #endif /* CONFIG_NUMA */
     387             : 
     388             : /* Flags for memblock allocation APIs */
     389             : #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
     390             : #define MEMBLOCK_ALLOC_ACCESSIBLE       0
     391             : #define MEMBLOCK_ALLOC_NOLEAKTRACE      1
     392             : 
     393             : /* We are using top down, so it is safe to use 0 here */
     394             : #define MEMBLOCK_LOW_LIMIT 0
     395             : 
     396             : #ifndef ARCH_LOW_ADDRESS_LIMIT
     397             : #define ARCH_LOW_ADDRESS_LIMIT  0xffffffffUL
     398             : #endif
     399             : 
     400             : phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
     401             :                                       phys_addr_t start, phys_addr_t end);
     402             : phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
     403             :                                       phys_addr_t align, phys_addr_t start,
     404             :                                       phys_addr_t end, int nid, bool exact_nid);
     405             : phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
     406             : 
     407             : static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
     408             :                                                        phys_addr_t align)
     409             : {
     410             :         return memblock_phys_alloc_range(size, align, 0,
     411             :                                          MEMBLOCK_ALLOC_ACCESSIBLE);
     412             : }
     413             : 
     414             : void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
     415             :                                  phys_addr_t min_addr, phys_addr_t max_addr,
     416             :                                  int nid);
     417             : void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
     418             :                                  phys_addr_t min_addr, phys_addr_t max_addr,
     419             :                                  int nid);
     420             : void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
     421             :                              phys_addr_t min_addr, phys_addr_t max_addr,
     422             :                              int nid);
     423             : 
     424             : static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
     425             : {
     426          15 :         return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
     427             :                                       MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
     428             : }
     429             : 
     430             : static inline void *memblock_alloc_raw(phys_addr_t size,
     431             :                                                phys_addr_t align)
     432             : {
     433           0 :         return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
     434             :                                           MEMBLOCK_ALLOC_ACCESSIBLE,
     435             :                                           NUMA_NO_NODE);
     436             : }
     437             : 
     438             : static inline void *memblock_alloc_from(phys_addr_t size,
     439             :                                                 phys_addr_t align,
     440             :                                                 phys_addr_t min_addr)
     441             : {
     442           1 :         return memblock_alloc_try_nid(size, align, min_addr,
     443             :                                       MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
     444             : }
     445             : 
     446             : static inline void *memblock_alloc_low(phys_addr_t size,
     447             :                                                phys_addr_t align)
     448             : {
     449           3 :         return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
     450             :                                       ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
     451             : }
     452             : 
     453             : static inline void *memblock_alloc_node(phys_addr_t size,
     454             :                                                 phys_addr_t align, int nid)
     455             : {
     456           1 :         return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
     457             :                                       MEMBLOCK_ALLOC_ACCESSIBLE, nid);
     458             : }
     459             : 
     460             : /*
     461             :  * Set the allocation direction to bottom-up or top-down.
     462             :  */
     463             : static inline __init_memblock void memblock_set_bottom_up(bool enable)
     464             : {
     465             :         memblock.bottom_up = enable;
     466             : }
     467             : 
     468             : /*
     469             :  * Check if the allocation direction is bottom-up or not.
     470             :  * if this is true, that said, memblock will allocate memory
     471             :  * in bottom-up direction.
     472             :  */
     473             : static inline __init_memblock bool memblock_bottom_up(void)
     474             : {
     475          21 :         return memblock.bottom_up;
     476             : }
     477             : 
     478             : phys_addr_t memblock_phys_mem_size(void);
     479             : phys_addr_t memblock_reserved_size(void);
     480             : phys_addr_t memblock_start_of_DRAM(void);
     481             : phys_addr_t memblock_end_of_DRAM(void);
     482             : void memblock_enforce_memory_limit(phys_addr_t memory_limit);
     483             : void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
     484             : void memblock_mem_limit_remove_map(phys_addr_t limit);
     485             : bool memblock_is_memory(phys_addr_t addr);
     486             : bool memblock_is_map_memory(phys_addr_t addr);
     487             : bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
     488             : bool memblock_is_reserved(phys_addr_t addr);
     489             : bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
     490             : 
     491             : void memblock_dump_all(void);
     492             : 
     493             : /**
     494             :  * memblock_set_current_limit - Set the current allocation limit to allow
     495             :  *                         limiting allocations to what is currently
     496             :  *                         accessible during boot
     497             :  * @limit: New limit value (physical address)
     498             :  */
     499             : void memblock_set_current_limit(phys_addr_t limit);
     500             : 
     501             : 
     502             : phys_addr_t memblock_get_current_limit(void);
     503             : 
     504             : /*
     505             :  * pfn conversion functions
     506             :  *
     507             :  * While the memory MEMBLOCKs should always be page aligned, the reserved
     508             :  * MEMBLOCKs may not be. This accessor attempt to provide a very clear
     509             :  * idea of what they return for such non aligned MEMBLOCKs.
     510             :  */
     511             : 
     512             : /**
     513             :  * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
     514             :  * @reg: memblock_region structure
     515             :  *
     516             :  * Return: the lowest pfn intersecting with the memory region
     517             :  */
     518             : static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
     519             : {
     520           0 :         return PFN_UP(reg->base);
     521             : }
     522             : 
     523             : /**
     524             :  * memblock_region_memory_end_pfn - get the end pfn of the memory region
     525             :  * @reg: memblock_region structure
     526             :  *
     527             :  * Return: the end_pfn of the reserved region
     528             :  */
     529             : static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
     530             : {
     531           0 :         return PFN_DOWN(reg->base + reg->size);
     532             : }
     533             : 
     534             : /**
     535             :  * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
     536             :  * @reg: memblock_region structure
     537             :  *
     538             :  * Return: the lowest pfn intersecting with the reserved region
     539             :  */
     540             : static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
     541             : {
     542             :         return PFN_DOWN(reg->base);
     543             : }
     544             : 
     545             : /**
     546             :  * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
     547             :  * @reg: memblock_region structure
     548             :  *
     549             :  * Return: the end_pfn of the reserved region
     550             :  */
     551             : static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
     552             : {
     553             :         return PFN_UP(reg->base + reg->size);
     554             : }
     555             : 
     556             : /**
     557             :  * for_each_mem_region - itereate over memory regions
     558             :  * @region: loop variable
     559             :  */
     560             : #define for_each_mem_region(region)                                     \
     561             :         for (region = memblock.memory.regions;                          \
     562             :              region < (memblock.memory.regions + memblock.memory.cnt);       \
     563             :              region++)
     564             : 
     565             : /**
     566             :  * for_each_reserved_mem_region - itereate over reserved memory regions
     567             :  * @region: loop variable
     568             :  */
     569             : #define for_each_reserved_mem_region(region)                            \
     570             :         for (region = memblock.reserved.regions;                        \
     571             :              region < (memblock.reserved.regions + memblock.reserved.cnt); \
     572             :              region++)
     573             : 
     574             : extern void *alloc_large_system_hash(const char *tablename,
     575             :                                      unsigned long bucketsize,
     576             :                                      unsigned long numentries,
     577             :                                      int scale,
     578             :                                      int flags,
     579             :                                      unsigned int *_hash_shift,
     580             :                                      unsigned int *_hash_mask,
     581             :                                      unsigned long low_limit,
     582             :                                      unsigned long high_limit);
     583             : 
     584             : #define HASH_EARLY      0x00000001      /* Allocating during early boot? */
     585             : #define HASH_SMALL      0x00000002      /* sub-page allocation allowed, min
     586             :                                          * shift passed via *_hash_shift */
     587             : #define HASH_ZERO       0x00000004      /* Zero allocated hash table */
     588             : 
     589             : /* Only NUMA needs hash distribution. 64bit NUMA architectures have
     590             :  * sufficient vmalloc space.
     591             :  */
     592             : #ifdef CONFIG_NUMA
     593             : #define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
     594             : extern int hashdist;            /* Distribute hashes across NUMA nodes? */
     595             : #else
     596             : #define hashdist (0)
     597             : #endif
     598             : 
     599             : #ifdef CONFIG_MEMTEST
     600             : extern phys_addr_t early_memtest_bad_size;      /* Size of faulty ram found by memtest */
     601             : extern bool early_memtest_done;                 /* Was early memtest done? */
     602             : extern void early_memtest(phys_addr_t start, phys_addr_t end);
     603             : #else
     604             : static inline void early_memtest(phys_addr_t start, phys_addr_t end)
     605             : {
     606             : }
     607             : #endif
     608             : 
     609             : 
     610             : #endif /* _LINUX_MEMBLOCK_H */

Generated by: LCOV version 1.14