Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * Generic show_mem() implementation
4 : *
5 : * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de>
6 : */
7 :
8 : #include <linux/blkdev.h>
9 : #include <linux/cma.h>
10 : #include <linux/cpuset.h>
11 : #include <linux/highmem.h>
12 : #include <linux/hugetlb.h>
13 : #include <linux/mm.h>
14 : #include <linux/mmzone.h>
15 : #include <linux/swap.h>
16 : #include <linux/vmstat.h>
17 :
18 : #include "internal.h"
19 : #include "swap.h"
20 :
21 : atomic_long_t _totalram_pages __read_mostly;
22 : EXPORT_SYMBOL(_totalram_pages);
23 : unsigned long totalreserve_pages __read_mostly;
24 : unsigned long totalcma_pages __read_mostly;
25 :
26 : static inline void show_node(struct zone *zone)
27 : {
28 : if (IS_ENABLED(CONFIG_NUMA))
29 : printk("Node %d ", zone_to_nid(zone));
30 : }
31 :
32 0 : long si_mem_available(void)
33 : {
34 : long available;
35 : unsigned long pagecache;
36 0 : unsigned long wmark_low = 0;
37 : unsigned long pages[NR_LRU_LISTS];
38 : unsigned long reclaimable;
39 : struct zone *zone;
40 : int lru;
41 :
42 0 : for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
43 0 : pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
44 :
45 0 : for_each_zone(zone)
46 0 : wmark_low += low_wmark_pages(zone);
47 :
48 : /*
49 : * Estimate the amount of memory available for userspace allocations,
50 : * without causing swapping or OOM.
51 : */
52 0 : available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
53 :
54 : /*
55 : * Not all the page cache can be freed, otherwise the system will
56 : * start swapping or thrashing. Assume at least half of the page
57 : * cache, or the low watermark worth of cache, needs to stay.
58 : */
59 0 : pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
60 0 : pagecache -= min(pagecache / 2, wmark_low);
61 0 : available += pagecache;
62 :
63 : /*
64 : * Part of the reclaimable slab and other kernel memory consists of
65 : * items that are in use, and cannot be freed. Cap this estimate at the
66 : * low watermark.
67 : */
68 0 : reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
69 0 : global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
70 0 : available += reclaimable - min(reclaimable / 2, wmark_low);
71 :
72 0 : if (available < 0)
73 0 : available = 0;
74 0 : return available;
75 : }
76 : EXPORT_SYMBOL_GPL(si_mem_available);
77 :
78 2 : void si_meminfo(struct sysinfo *val)
79 : {
80 2 : val->totalram = totalram_pages();
81 2 : val->sharedram = global_node_page_state(NR_SHMEM);
82 2 : val->freeram = global_zone_page_state(NR_FREE_PAGES);
83 2 : val->bufferram = nr_blockdev_pages();
84 2 : val->totalhigh = totalhigh_pages();
85 2 : val->freehigh = nr_free_highpages();
86 2 : val->mem_unit = PAGE_SIZE;
87 2 : }
88 :
89 : EXPORT_SYMBOL(si_meminfo);
90 :
91 : #ifdef CONFIG_NUMA
92 : void si_meminfo_node(struct sysinfo *val, int nid)
93 : {
94 : int zone_type; /* needs to be signed */
95 : unsigned long managed_pages = 0;
96 : unsigned long managed_highpages = 0;
97 : unsigned long free_highpages = 0;
98 : pg_data_t *pgdat = NODE_DATA(nid);
99 :
100 : for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
101 : managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
102 : val->totalram = managed_pages;
103 : val->sharedram = node_page_state(pgdat, NR_SHMEM);
104 : val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
105 : #ifdef CONFIG_HIGHMEM
106 : for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
107 : struct zone *zone = &pgdat->node_zones[zone_type];
108 :
109 : if (is_highmem(zone)) {
110 : managed_highpages += zone_managed_pages(zone);
111 : free_highpages += zone_page_state(zone, NR_FREE_PAGES);
112 : }
113 : }
114 : val->totalhigh = managed_highpages;
115 : val->freehigh = free_highpages;
116 : #else
117 : val->totalhigh = managed_highpages;
118 : val->freehigh = free_highpages;
119 : #endif
120 : val->mem_unit = PAGE_SIZE;
121 : }
122 : #endif
123 :
124 : /*
125 : * Determine whether the node should be displayed or not, depending on whether
126 : * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
127 : */
128 0 : static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
129 : {
130 0 : if (!(flags & SHOW_MEM_FILTER_NODES))
131 : return false;
132 :
133 : /*
134 : * no node mask - aka implicit memory numa policy. Do not bother with
135 : * the synchronization - read_mems_allowed_begin - because we do not
136 : * have to be precise here.
137 : */
138 0 : if (!nodemask)
139 0 : nodemask = &cpuset_current_mems_allowed;
140 :
141 0 : return !node_isset(nid, *nodemask);
142 : }
143 :
144 0 : static void show_migration_types(unsigned char type)
145 : {
146 : static const char types[MIGRATE_TYPES] = {
147 : [MIGRATE_UNMOVABLE] = 'U',
148 : [MIGRATE_MOVABLE] = 'M',
149 : [MIGRATE_RECLAIMABLE] = 'E',
150 : [MIGRATE_HIGHATOMIC] = 'H',
151 : #ifdef CONFIG_CMA
152 : [MIGRATE_CMA] = 'C',
153 : #endif
154 : #ifdef CONFIG_MEMORY_ISOLATION
155 : [MIGRATE_ISOLATE] = 'I',
156 : #endif
157 : };
158 : char tmp[MIGRATE_TYPES + 1];
159 0 : char *p = tmp;
160 : int i;
161 :
162 0 : for (i = 0; i < MIGRATE_TYPES; i++) {
163 0 : if (type & (1 << i))
164 0 : *p++ = types[i];
165 : }
166 :
167 0 : *p = '\0';
168 0 : printk(KERN_CONT "(%s) ", tmp);
169 0 : }
170 :
171 : static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx)
172 : {
173 : int zone_idx;
174 0 : for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++)
175 0 : if (zone_managed_pages(pgdat->node_zones + zone_idx))
176 : return true;
177 : return false;
178 : }
179 :
180 : /*
181 : * Show free area list (used inside shift_scroll-lock stuff)
182 : * We also calculate the percentage fragmentation. We do this by counting the
183 : * memory on each free list with the exception of the first item on the list.
184 : *
185 : * Bits in @filter:
186 : * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
187 : * cpuset.
188 : */
189 0 : void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
190 : {
191 0 : unsigned long free_pcp = 0;
192 : int cpu, nid;
193 : struct zone *zone;
194 : pg_data_t *pgdat;
195 :
196 0 : for_each_populated_zone(zone) {
197 0 : if (zone_idx(zone) > max_zone_idx)
198 0 : continue;
199 0 : if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
200 0 : continue;
201 :
202 0 : for_each_online_cpu(cpu)
203 0 : free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
204 : }
205 :
206 0 : printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
207 : " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
208 : " unevictable:%lu dirty:%lu writeback:%lu\n"
209 : " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
210 : " mapped:%lu shmem:%lu pagetables:%lu\n"
211 : " sec_pagetables:%lu bounce:%lu\n"
212 : " kernel_misc_reclaimable:%lu\n"
213 : " free:%lu free_pcp:%lu free_cma:%lu\n",
214 : global_node_page_state(NR_ACTIVE_ANON),
215 : global_node_page_state(NR_INACTIVE_ANON),
216 : global_node_page_state(NR_ISOLATED_ANON),
217 : global_node_page_state(NR_ACTIVE_FILE),
218 : global_node_page_state(NR_INACTIVE_FILE),
219 : global_node_page_state(NR_ISOLATED_FILE),
220 : global_node_page_state(NR_UNEVICTABLE),
221 : global_node_page_state(NR_FILE_DIRTY),
222 : global_node_page_state(NR_WRITEBACK),
223 : global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
224 : global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
225 : global_node_page_state(NR_FILE_MAPPED),
226 : global_node_page_state(NR_SHMEM),
227 : global_node_page_state(NR_PAGETABLE),
228 : global_node_page_state(NR_SECONDARY_PAGETABLE),
229 : global_zone_page_state(NR_BOUNCE),
230 : global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE),
231 : global_zone_page_state(NR_FREE_PAGES),
232 : free_pcp,
233 : global_zone_page_state(NR_FREE_CMA_PAGES));
234 :
235 0 : for_each_online_pgdat(pgdat) {
236 0 : if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
237 0 : continue;
238 0 : if (!node_has_managed_zones(pgdat, max_zone_idx))
239 0 : continue;
240 :
241 0 : printk("Node %d"
242 : " active_anon:%lukB"
243 : " inactive_anon:%lukB"
244 : " active_file:%lukB"
245 : " inactive_file:%lukB"
246 : " unevictable:%lukB"
247 : " isolated(anon):%lukB"
248 : " isolated(file):%lukB"
249 : " mapped:%lukB"
250 : " dirty:%lukB"
251 : " writeback:%lukB"
252 : " shmem:%lukB"
253 : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
254 : " shmem_thp: %lukB"
255 : " shmem_pmdmapped: %lukB"
256 : " anon_thp: %lukB"
257 : #endif
258 : " writeback_tmp:%lukB"
259 : " kernel_stack:%lukB"
260 : #ifdef CONFIG_SHADOW_CALL_STACK
261 : " shadow_call_stack:%lukB"
262 : #endif
263 : " pagetables:%lukB"
264 : " sec_pagetables:%lukB"
265 : " all_unreclaimable? %s"
266 : "\n",
267 : pgdat->node_id,
268 : K(node_page_state(pgdat, NR_ACTIVE_ANON)),
269 : K(node_page_state(pgdat, NR_INACTIVE_ANON)),
270 : K(node_page_state(pgdat, NR_ACTIVE_FILE)),
271 : K(node_page_state(pgdat, NR_INACTIVE_FILE)),
272 : K(node_page_state(pgdat, NR_UNEVICTABLE)),
273 : K(node_page_state(pgdat, NR_ISOLATED_ANON)),
274 : K(node_page_state(pgdat, NR_ISOLATED_FILE)),
275 : K(node_page_state(pgdat, NR_FILE_MAPPED)),
276 : K(node_page_state(pgdat, NR_FILE_DIRTY)),
277 : K(node_page_state(pgdat, NR_WRITEBACK)),
278 : K(node_page_state(pgdat, NR_SHMEM)),
279 : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
280 : K(node_page_state(pgdat, NR_SHMEM_THPS)),
281 : K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
282 : K(node_page_state(pgdat, NR_ANON_THPS)),
283 : #endif
284 : K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
285 : node_page_state(pgdat, NR_KERNEL_STACK_KB),
286 : #ifdef CONFIG_SHADOW_CALL_STACK
287 : node_page_state(pgdat, NR_KERNEL_SCS_KB),
288 : #endif
289 : K(node_page_state(pgdat, NR_PAGETABLE)),
290 : K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
291 : pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
292 : "yes" : "no");
293 : }
294 :
295 0 : for_each_populated_zone(zone) {
296 : int i;
297 :
298 0 : if (zone_idx(zone) > max_zone_idx)
299 0 : continue;
300 0 : if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
301 0 : continue;
302 :
303 : free_pcp = 0;
304 0 : for_each_online_cpu(cpu)
305 0 : free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
306 :
307 0 : show_node(zone);
308 0 : printk(KERN_CONT
309 : "%s"
310 : " free:%lukB"
311 : " boost:%lukB"
312 : " min:%lukB"
313 : " low:%lukB"
314 : " high:%lukB"
315 : " reserved_highatomic:%luKB"
316 : " active_anon:%lukB"
317 : " inactive_anon:%lukB"
318 : " active_file:%lukB"
319 : " inactive_file:%lukB"
320 : " unevictable:%lukB"
321 : " writepending:%lukB"
322 : " present:%lukB"
323 : " managed:%lukB"
324 : " mlocked:%lukB"
325 : " bounce:%lukB"
326 : " free_pcp:%lukB"
327 : " local_pcp:%ukB"
328 : " free_cma:%lukB"
329 : "\n",
330 : zone->name,
331 : K(zone_page_state(zone, NR_FREE_PAGES)),
332 : K(zone->watermark_boost),
333 : K(min_wmark_pages(zone)),
334 : K(low_wmark_pages(zone)),
335 : K(high_wmark_pages(zone)),
336 : K(zone->nr_reserved_highatomic),
337 : K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
338 : K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
339 : K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
340 : K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
341 : K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
342 : K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
343 : K(zone->present_pages),
344 : K(zone_managed_pages(zone)),
345 : K(zone_page_state(zone, NR_MLOCK)),
346 : K(zone_page_state(zone, NR_BOUNCE)),
347 : K(free_pcp),
348 : K(this_cpu_read(zone->per_cpu_pageset->count)),
349 : K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
350 0 : printk("lowmem_reserve[]:");
351 0 : for (i = 0; i < MAX_NR_ZONES; i++)
352 0 : printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
353 0 : printk(KERN_CONT "\n");
354 : }
355 :
356 0 : for_each_populated_zone(zone) {
357 : unsigned int order;
358 0 : unsigned long nr[MAX_ORDER + 1], flags, total = 0;
359 : unsigned char types[MAX_ORDER + 1];
360 :
361 0 : if (zone_idx(zone) > max_zone_idx)
362 0 : continue;
363 0 : if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
364 0 : continue;
365 0 : show_node(zone);
366 0 : printk(KERN_CONT "%s: ", zone->name);
367 :
368 0 : spin_lock_irqsave(&zone->lock, flags);
369 0 : for (order = 0; order <= MAX_ORDER; order++) {
370 0 : struct free_area *area = &zone->free_area[order];
371 : int type;
372 :
373 0 : nr[order] = area->nr_free;
374 0 : total += nr[order] << order;
375 :
376 0 : types[order] = 0;
377 0 : for (type = 0; type < MIGRATE_TYPES; type++) {
378 0 : if (!free_area_empty(area, type))
379 0 : types[order] |= 1 << type;
380 : }
381 : }
382 0 : spin_unlock_irqrestore(&zone->lock, flags);
383 0 : for (order = 0; order <= MAX_ORDER; order++) {
384 0 : printk(KERN_CONT "%lu*%lukB ",
385 : nr[order], K(1UL) << order);
386 0 : if (nr[order])
387 0 : show_migration_types(types[order]);
388 : }
389 0 : printk(KERN_CONT "= %lukB\n", K(total));
390 : }
391 :
392 0 : for_each_online_node(nid) {
393 0 : if (show_mem_node_skip(filter, nid, nodemask))
394 : continue;
395 : hugetlb_show_meminfo_node(nid);
396 : }
397 :
398 0 : printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
399 :
400 0 : show_swap_cache_info();
401 0 : }
402 :
403 0 : void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
404 : {
405 0 : unsigned long total = 0, reserved = 0, highmem = 0;
406 : struct zone *zone;
407 :
408 0 : printk("Mem-Info:\n");
409 0 : __show_free_areas(filter, nodemask, max_zone_idx);
410 :
411 0 : for_each_populated_zone(zone) {
412 :
413 0 : total += zone->present_pages;
414 0 : reserved += zone->present_pages - zone_managed_pages(zone);
415 :
416 0 : if (is_highmem(zone))
417 : highmem += zone->present_pages;
418 : }
419 :
420 0 : printk("%lu pages RAM\n", total);
421 0 : printk("%lu pages HighMem/MovableOnly\n", highmem);
422 0 : printk("%lu pages reserved\n", reserved);
423 : #ifdef CONFIG_CMA
424 : printk("%lu pages cma reserved\n", totalcma_pages);
425 : #endif
426 : #ifdef CONFIG_MEMORY_FAILURE
427 : printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
428 : #endif
429 0 : }
|