LCOV - code coverage report
Current view: top level - fs/proc - page.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 4 96 4.2 %
Date: 2023-07-19 18:55:55 Functions: 1 4 25.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : #include <linux/memblock.h>
       3             : #include <linux/compiler.h>
       4             : #include <linux/fs.h>
       5             : #include <linux/init.h>
       6             : #include <linux/ksm.h>
       7             : #include <linux/mm.h>
       8             : #include <linux/mmzone.h>
       9             : #include <linux/huge_mm.h>
      10             : #include <linux/proc_fs.h>
      11             : #include <linux/seq_file.h>
      12             : #include <linux/hugetlb.h>
      13             : #include <linux/memremap.h>
      14             : #include <linux/memcontrol.h>
      15             : #include <linux/mmu_notifier.h>
      16             : #include <linux/page_idle.h>
      17             : #include <linux/kernel-page-flags.h>
      18             : #include <linux/uaccess.h>
      19             : #include "internal.h"
      20             : 
      21             : #define KPMSIZE sizeof(u64)
      22             : #define KPMMASK (KPMSIZE - 1)
      23             : #define KPMBITS (KPMSIZE * BITS_PER_BYTE)
      24             : 
      25             : static inline unsigned long get_max_dump_pfn(void)
      26             : {
      27             : #ifdef CONFIG_SPARSEMEM
      28             :         /*
      29             :          * The memmap of early sections is completely populated and marked
      30             :          * online even if max_pfn does not fall on a section boundary -
      31             :          * pfn_to_online_page() will succeed on all pages. Allow inspecting
      32             :          * these memmaps.
      33             :          */
      34             :         return round_up(max_pfn, PAGES_PER_SECTION);
      35             : #else
      36           0 :         return max_pfn;
      37             : #endif
      38             : }
      39             : 
      40             : /* /proc/kpagecount - an array exposing page counts
      41             :  *
      42             :  * Each entry is a u64 representing the corresponding
      43             :  * physical page count.
      44             :  */
      45           0 : static ssize_t kpagecount_read(struct file *file, char __user *buf,
      46             :                              size_t count, loff_t *ppos)
      47             : {
      48           0 :         const unsigned long max_dump_pfn = get_max_dump_pfn();
      49           0 :         u64 __user *out = (u64 __user *)buf;
      50             :         struct page *ppage;
      51           0 :         unsigned long src = *ppos;
      52             :         unsigned long pfn;
      53           0 :         ssize_t ret = 0;
      54             :         u64 pcount;
      55             : 
      56           0 :         pfn = src / KPMSIZE;
      57           0 :         if (src & KPMMASK || count & KPMMASK)
      58             :                 return -EINVAL;
      59           0 :         if (src >= max_dump_pfn * KPMSIZE)
      60             :                 return 0;
      61           0 :         count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
      62             : 
      63           0 :         while (count > 0) {
      64             :                 /*
      65             :                  * TODO: ZONE_DEVICE support requires to identify
      66             :                  * memmaps that were actually initialized.
      67             :                  */
      68           0 :                 ppage = pfn_to_online_page(pfn);
      69             : 
      70           0 :                 if (!ppage || PageSlab(ppage) || page_has_type(ppage))
      71             :                         pcount = 0;
      72             :                 else
      73           0 :                         pcount = page_mapcount(ppage);
      74             : 
      75           0 :                 if (put_user(pcount, out)) {
      76             :                         ret = -EFAULT;
      77             :                         break;
      78             :                 }
      79             : 
      80           0 :                 pfn++;
      81           0 :                 out++;
      82           0 :                 count -= KPMSIZE;
      83             : 
      84           0 :                 cond_resched();
      85             :         }
      86             : 
      87           0 :         *ppos += (char __user *)out - buf;
      88           0 :         if (!ret)
      89           0 :                 ret = (char __user *)out - buf;
      90             :         return ret;
      91             : }
      92             : 
      93             : static const struct proc_ops kpagecount_proc_ops = {
      94             :         .proc_flags     = PROC_ENTRY_PERMANENT,
      95             :         .proc_lseek     = mem_lseek,
      96             :         .proc_read      = kpagecount_read,
      97             : };
      98             : 
      99             : /* /proc/kpageflags - an array exposing page flags
     100             :  *
     101             :  * Each entry is a u64 representing the corresponding
     102             :  * physical page flags.
     103             :  */
     104             : 
     105             : static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
     106             : {
     107           0 :         return ((kflags >> kbit) & 1) << ubit;
     108             : }
     109             : 
     110           0 : u64 stable_page_flags(struct page *page)
     111             : {
     112             :         u64 k;
     113             :         u64 u;
     114             : 
     115             :         /*
     116             :          * pseudo flag: KPF_NOPAGE
     117             :          * it differentiates a memory hole from a page with no flags
     118             :          */
     119           0 :         if (!page)
     120             :                 return 1 << KPF_NOPAGE;
     121             : 
     122           0 :         k = page->flags;
     123           0 :         u = 0;
     124             : 
     125             :         /*
     126             :          * pseudo flags for the well known (anonymous) memory mapped pages
     127             :          *
     128             :          * Note that page->_mapcount is overloaded in SLAB, so the
     129             :          * simple test in page_mapped() is not enough.
     130             :          */
     131           0 :         if (!PageSlab(page) && page_mapped(page))
     132           0 :                 u |= 1 << KPF_MMAP;
     133           0 :         if (PageAnon(page))
     134           0 :                 u |= 1 << KPF_ANON;
     135           0 :         if (PageKsm(page))
     136             :                 u |= 1 << KPF_KSM;
     137             : 
     138             :         /*
     139             :          * compound pages: export both head/tail info
     140             :          * they together define a compound page's start/end pos and order
     141             :          */
     142           0 :         if (PageHead(page))
     143           0 :                 u |= 1 << KPF_COMPOUND_HEAD;
     144           0 :         if (PageTail(page))
     145           0 :                 u |= 1 << KPF_COMPOUND_TAIL;
     146           0 :         if (PageHuge(page))
     147             :                 u |= 1 << KPF_HUGE;
     148             :         /*
     149             :          * PageTransCompound can be true for non-huge compound pages (slab
     150             :          * pages or pages allocated by drivers with __GFP_COMP) because it
     151             :          * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
     152             :          * to make sure a given page is a thp, not a non-huge compound page.
     153             :          */
     154           0 :         else if (PageTransCompound(page)) {
     155             :                 struct page *head = compound_head(page);
     156             : 
     157             :                 if (PageLRU(head) || PageAnon(head))
     158             :                         u |= 1 << KPF_THP;
     159             :                 else if (is_huge_zero_page(head)) {
     160             :                         u |= 1 << KPF_ZERO_PAGE;
     161             :                         u |= 1 << KPF_THP;
     162             :                 }
     163           0 :         } else if (is_zero_pfn(page_to_pfn(page)))
     164           0 :                 u |= 1 << KPF_ZERO_PAGE;
     165             : 
     166             : 
     167             :         /*
     168             :          * Caveats on high order pages: PG_buddy and PG_slab will only be set
     169             :          * on the head page.
     170             :          */
     171           0 :         if (PageBuddy(page))
     172           0 :                 u |= 1 << KPF_BUDDY;
     173           0 :         else if (page_count(page) == 0 && is_free_buddy_page(page))
     174           0 :                 u |= 1 << KPF_BUDDY;
     175             : 
     176           0 :         if (PageOffline(page))
     177           0 :                 u |= 1 << KPF_OFFLINE;
     178           0 :         if (PageTable(page))
     179           0 :                 u |= 1 << KPF_PGTABLE;
     180             : 
     181           0 :         if (page_is_idle(page))
     182             :                 u |= 1 << KPF_IDLE;
     183             : 
     184           0 :         u |= kpf_copy_bit(k, KPF_LOCKED,        PG_locked);
     185             : 
     186           0 :         u |= kpf_copy_bit(k, KPF_SLAB,          PG_slab);
     187           0 :         if (PageTail(page) && PageSlab(page))
     188           0 :                 u |= 1 << KPF_SLAB;
     189             : 
     190           0 :         u |= kpf_copy_bit(k, KPF_ERROR,         PG_error);
     191           0 :         u |= kpf_copy_bit(k, KPF_DIRTY,         PG_dirty);
     192           0 :         u |= kpf_copy_bit(k, KPF_UPTODATE,      PG_uptodate);
     193           0 :         u |= kpf_copy_bit(k, KPF_WRITEBACK,     PG_writeback);
     194             : 
     195           0 :         u |= kpf_copy_bit(k, KPF_LRU,           PG_lru);
     196           0 :         u |= kpf_copy_bit(k, KPF_REFERENCED,    PG_referenced);
     197           0 :         u |= kpf_copy_bit(k, KPF_ACTIVE,        PG_active);
     198           0 :         u |= kpf_copy_bit(k, KPF_RECLAIM,       PG_reclaim);
     199             : 
     200           0 :         if (PageSwapCache(page))
     201           0 :                 u |= 1 << KPF_SWAPCACHE;
     202           0 :         u |= kpf_copy_bit(k, KPF_SWAPBACKED,    PG_swapbacked);
     203             : 
     204           0 :         u |= kpf_copy_bit(k, KPF_UNEVICTABLE,   PG_unevictable);
     205           0 :         u |= kpf_copy_bit(k, KPF_MLOCKED,       PG_mlocked);
     206             : 
     207             : #ifdef CONFIG_MEMORY_FAILURE
     208             :         u |= kpf_copy_bit(k, KPF_HWPOISON,      PG_hwpoison);
     209             : #endif
     210             : 
     211             : #ifdef CONFIG_ARCH_USES_PG_UNCACHED
     212             :         u |= kpf_copy_bit(k, KPF_UNCACHED,      PG_uncached);
     213             : #endif
     214             : 
     215           0 :         u |= kpf_copy_bit(k, KPF_RESERVED,      PG_reserved);
     216           0 :         u |= kpf_copy_bit(k, KPF_MAPPEDTODISK,  PG_mappedtodisk);
     217           0 :         u |= kpf_copy_bit(k, KPF_PRIVATE,       PG_private);
     218           0 :         u |= kpf_copy_bit(k, KPF_PRIVATE_2,     PG_private_2);
     219           0 :         u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1);
     220           0 :         u |= kpf_copy_bit(k, KPF_ARCH,          PG_arch_1);
     221             : #ifdef CONFIG_ARCH_USES_PG_ARCH_X
     222             :         u |= kpf_copy_bit(k, KPF_ARCH_2,        PG_arch_2);
     223             :         u |= kpf_copy_bit(k, KPF_ARCH_3,        PG_arch_3);
     224             : #endif
     225             : 
     226           0 :         return u;
     227             : };
     228             : 
     229           0 : static ssize_t kpageflags_read(struct file *file, char __user *buf,
     230             :                              size_t count, loff_t *ppos)
     231             : {
     232           0 :         const unsigned long max_dump_pfn = get_max_dump_pfn();
     233           0 :         u64 __user *out = (u64 __user *)buf;
     234             :         struct page *ppage;
     235           0 :         unsigned long src = *ppos;
     236             :         unsigned long pfn;
     237           0 :         ssize_t ret = 0;
     238             : 
     239           0 :         pfn = src / KPMSIZE;
     240           0 :         if (src & KPMMASK || count & KPMMASK)
     241             :                 return -EINVAL;
     242           0 :         if (src >= max_dump_pfn * KPMSIZE)
     243             :                 return 0;
     244           0 :         count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
     245             : 
     246           0 :         while (count > 0) {
     247             :                 /*
     248             :                  * TODO: ZONE_DEVICE support requires to identify
     249             :                  * memmaps that were actually initialized.
     250             :                  */
     251           0 :                 ppage = pfn_to_online_page(pfn);
     252             : 
     253           0 :                 if (put_user(stable_page_flags(ppage), out)) {
     254             :                         ret = -EFAULT;
     255             :                         break;
     256             :                 }
     257             : 
     258           0 :                 pfn++;
     259           0 :                 out++;
     260           0 :                 count -= KPMSIZE;
     261             : 
     262           0 :                 cond_resched();
     263             :         }
     264             : 
     265           0 :         *ppos += (char __user *)out - buf;
     266           0 :         if (!ret)
     267           0 :                 ret = (char __user *)out - buf;
     268             :         return ret;
     269             : }
     270             : 
     271             : static const struct proc_ops kpageflags_proc_ops = {
     272             :         .proc_flags     = PROC_ENTRY_PERMANENT,
     273             :         .proc_lseek     = mem_lseek,
     274             :         .proc_read      = kpageflags_read,
     275             : };
     276             : 
     277             : #ifdef CONFIG_MEMCG
     278             : static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
     279             :                                 size_t count, loff_t *ppos)
     280             : {
     281             :         const unsigned long max_dump_pfn = get_max_dump_pfn();
     282             :         u64 __user *out = (u64 __user *)buf;
     283             :         struct page *ppage;
     284             :         unsigned long src = *ppos;
     285             :         unsigned long pfn;
     286             :         ssize_t ret = 0;
     287             :         u64 ino;
     288             : 
     289             :         pfn = src / KPMSIZE;
     290             :         if (src & KPMMASK || count & KPMMASK)
     291             :                 return -EINVAL;
     292             :         if (src >= max_dump_pfn * KPMSIZE)
     293             :                 return 0;
     294             :         count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
     295             : 
     296             :         while (count > 0) {
     297             :                 /*
     298             :                  * TODO: ZONE_DEVICE support requires to identify
     299             :                  * memmaps that were actually initialized.
     300             :                  */
     301             :                 ppage = pfn_to_online_page(pfn);
     302             : 
     303             :                 if (ppage)
     304             :                         ino = page_cgroup_ino(ppage);
     305             :                 else
     306             :                         ino = 0;
     307             : 
     308             :                 if (put_user(ino, out)) {
     309             :                         ret = -EFAULT;
     310             :                         break;
     311             :                 }
     312             : 
     313             :                 pfn++;
     314             :                 out++;
     315             :                 count -= KPMSIZE;
     316             : 
     317             :                 cond_resched();
     318             :         }
     319             : 
     320             :         *ppos += (char __user *)out - buf;
     321             :         if (!ret)
     322             :                 ret = (char __user *)out - buf;
     323             :         return ret;
     324             : }
     325             : 
     326             : static const struct proc_ops kpagecgroup_proc_ops = {
     327             :         .proc_flags     = PROC_ENTRY_PERMANENT,
     328             :         .proc_lseek     = mem_lseek,
     329             :         .proc_read      = kpagecgroup_read,
     330             : };
     331             : #endif /* CONFIG_MEMCG */
     332             : 
     333           1 : static int __init proc_page_init(void)
     334             : {
     335           1 :         proc_create("kpagecount", S_IRUSR, NULL, &kpagecount_proc_ops);
     336           1 :         proc_create("kpageflags", S_IRUSR, NULL, &kpageflags_proc_ops);
     337             : #ifdef CONFIG_MEMCG
     338             :         proc_create("kpagecgroup", S_IRUSR, NULL, &kpagecgroup_proc_ops);
     339             : #endif
     340           1 :         return 0;
     341             : }
     342             : fs_initcall(proc_page_init);

Generated by: LCOV version 1.14