LCOV - code coverage report
Current view: top level - fs - binfmt_elf.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 3 651 0.5 %
Date: 2023-08-24 13:40:31 Functions: 1 21 4.8 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : /*
       3             :  * linux/fs/binfmt_elf.c
       4             :  *
       5             :  * These are the functions used to load ELF format executables as used
       6             :  * on SVr4 machines.  Information on the format may be found in the book
       7             :  * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
       8             :  * Tools".
       9             :  *
      10             :  * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
      11             :  */
      12             : 
      13             : #include <linux/module.h>
      14             : #include <linux/kernel.h>
      15             : #include <linux/fs.h>
      16             : #include <linux/log2.h>
      17             : #include <linux/mm.h>
      18             : #include <linux/mman.h>
      19             : #include <linux/errno.h>
      20             : #include <linux/signal.h>
      21             : #include <linux/binfmts.h>
      22             : #include <linux/string.h>
      23             : #include <linux/file.h>
      24             : #include <linux/slab.h>
      25             : #include <linux/personality.h>
      26             : #include <linux/elfcore.h>
      27             : #include <linux/init.h>
      28             : #include <linux/highuid.h>
      29             : #include <linux/compiler.h>
      30             : #include <linux/highmem.h>
      31             : #include <linux/hugetlb.h>
      32             : #include <linux/pagemap.h>
      33             : #include <linux/vmalloc.h>
      34             : #include <linux/security.h>
      35             : #include <linux/random.h>
      36             : #include <linux/elf.h>
      37             : #include <linux/elf-randomize.h>
      38             : #include <linux/utsname.h>
      39             : #include <linux/coredump.h>
      40             : #include <linux/sched.h>
      41             : #include <linux/sched/coredump.h>
      42             : #include <linux/sched/task_stack.h>
      43             : #include <linux/sched/cputime.h>
      44             : #include <linux/sizes.h>
      45             : #include <linux/types.h>
      46             : #include <linux/cred.h>
      47             : #include <linux/dax.h>
      48             : #include <linux/uaccess.h>
      49             : #include <linux/rseq.h>
      50             : #include <asm/param.h>
      51             : #include <asm/page.h>
      52             : 
      53             : #ifndef ELF_COMPAT
      54             : #define ELF_COMPAT 0
      55             : #endif
      56             : 
      57             : #ifndef user_long_t
      58             : #define user_long_t long
      59             : #endif
      60             : #ifndef user_siginfo_t
      61             : #define user_siginfo_t siginfo_t
      62             : #endif
      63             : 
      64             : /* That's for binfmt_elf_fdpic to deal with */
      65             : #ifndef elf_check_fdpic
      66             : #define elf_check_fdpic(ex) false
      67             : #endif
      68             : 
      69             : static int load_elf_binary(struct linux_binprm *bprm);
      70             : 
      71             : #ifdef CONFIG_USELIB
      72             : static int load_elf_library(struct file *);
      73             : #else
      74             : #define load_elf_library NULL
      75             : #endif
      76             : 
      77             : /*
      78             :  * If we don't support core dumping, then supply a NULL so we
      79             :  * don't even try.
      80             :  */
      81             : #ifdef CONFIG_ELF_CORE
      82             : static int elf_core_dump(struct coredump_params *cprm);
      83             : #else
      84             : #define elf_core_dump   NULL
      85             : #endif
      86             : 
      87             : #if ELF_EXEC_PAGESIZE > PAGE_SIZE
      88             : #define ELF_MIN_ALIGN   ELF_EXEC_PAGESIZE
      89             : #else
      90             : #define ELF_MIN_ALIGN   PAGE_SIZE
      91             : #endif
      92             : 
      93             : #ifndef ELF_CORE_EFLAGS
      94             : #define ELF_CORE_EFLAGS 0
      95             : #endif
      96             : 
      97             : #define ELF_PAGESTART(_v) ((_v) & ~(int)(ELF_MIN_ALIGN-1))
      98             : #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
      99             : #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
     100             : 
     101             : static struct linux_binfmt elf_format = {
     102             :         .module         = THIS_MODULE,
     103             :         .load_binary    = load_elf_binary,
     104             :         .load_shlib     = load_elf_library,
     105             : #ifdef CONFIG_COREDUMP
     106             :         .core_dump      = elf_core_dump,
     107             :         .min_coredump   = ELF_EXEC_PAGESIZE,
     108             : #endif
     109             : };
     110             : 
     111             : #define BAD_ADDR(x) (unlikely((unsigned long)(x) >= TASK_SIZE))
     112             : 
     113           0 : static int set_brk(unsigned long start, unsigned long end, int prot)
     114             : {
     115           0 :         start = ELF_PAGEALIGN(start);
     116           0 :         end = ELF_PAGEALIGN(end);
     117           0 :         if (end > start) {
     118             :                 /*
     119             :                  * Map the last of the bss segment.
     120             :                  * If the header is requesting these pages to be
     121             :                  * executable, honour that (ppc32 needs this).
     122             :                  */
     123           0 :                 int error = vm_brk_flags(start, end - start,
     124             :                                 prot & PROT_EXEC ? VM_EXEC : 0);
     125           0 :                 if (error)
     126             :                         return error;
     127             :         }
     128           0 :         current->mm->start_brk = current->mm->brk = end;
     129           0 :         return 0;
     130             : }
     131             : 
     132             : /* We need to explicitly zero any fractional pages
     133             :    after the data section (i.e. bss).  This would
     134             :    contain the junk from the file that should not
     135             :    be in memory
     136             :  */
     137             : static int padzero(unsigned long elf_bss)
     138             : {
     139             :         unsigned long nbyte;
     140             : 
     141           0 :         nbyte = ELF_PAGEOFFSET(elf_bss);
     142           0 :         if (nbyte) {
     143           0 :                 nbyte = ELF_MIN_ALIGN - nbyte;
     144           0 :                 if (clear_user((void __user *) elf_bss, nbyte))
     145             :                         return -EFAULT;
     146             :         }
     147             :         return 0;
     148             : }
     149             : 
     150             : /* Let's use some macros to make this stack manipulation a little clearer */
     151             : #ifdef CONFIG_STACK_GROWSUP
     152             : #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
     153             : #define STACK_ROUND(sp, items) \
     154             :         ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
     155             : #define STACK_ALLOC(sp, len) ({ \
     156             :         elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
     157             :         old_sp; })
     158             : #else
     159             : #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
     160             : #define STACK_ROUND(sp, items) \
     161             :         (((unsigned long) (sp - items)) &~ 15UL)
     162             : #define STACK_ALLOC(sp, len) (sp -= len)
     163             : #endif
     164             : 
     165             : #ifndef ELF_BASE_PLATFORM
     166             : /*
     167             :  * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
     168             :  * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
     169             :  * will be copied to the user stack in the same manner as AT_PLATFORM.
     170             :  */
     171             : #define ELF_BASE_PLATFORM NULL
     172             : #endif
     173             : 
     174             : static int
     175           0 : create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
     176             :                 unsigned long interp_load_addr,
     177             :                 unsigned long e_entry, unsigned long phdr_addr)
     178             : {
     179           0 :         struct mm_struct *mm = current->mm;
     180           0 :         unsigned long p = bprm->p;
     181           0 :         int argc = bprm->argc;
     182           0 :         int envc = bprm->envc;
     183             :         elf_addr_t __user *sp;
     184             :         elf_addr_t __user *u_platform;
     185             :         elf_addr_t __user *u_base_platform;
     186             :         elf_addr_t __user *u_rand_bytes;
     187           0 :         const char *k_platform = ELF_PLATFORM;
     188           0 :         const char *k_base_platform = ELF_BASE_PLATFORM;
     189             :         unsigned char k_rand_bytes[16];
     190             :         int items;
     191             :         elf_addr_t *elf_info;
     192           0 :         elf_addr_t flags = 0;
     193             :         int ei_index;
     194           0 :         const struct cred *cred = current_cred();
     195             :         struct vm_area_struct *vma;
     196             : 
     197             :         /*
     198             :          * In some cases (e.g. Hyper-Threading), we want to avoid L1
     199             :          * evictions by the processes running on the same package. One
     200             :          * thing we can do is to shuffle the initial stack for them.
     201             :          */
     202             : 
     203           0 :         p = arch_align_stack(p);
     204             : 
     205             :         /*
     206             :          * If this architecture has a platform capability string, copy it
     207             :          * to userspace.  In some cases (Sparc), this info is impossible
     208             :          * for userspace to get any other way, in others (i386) it is
     209             :          * merely difficult.
     210             :          */
     211           0 :         u_platform = NULL;
     212             :         if (k_platform) {
     213           0 :                 size_t len = strlen(k_platform) + 1;
     214             : 
     215           0 :                 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
     216           0 :                 if (copy_to_user(u_platform, k_platform, len))
     217             :                         return -EFAULT;
     218             :         }
     219             : 
     220             :         /*
     221             :          * If this architecture has a "base" platform capability
     222             :          * string, copy it to userspace.
     223             :          */
     224           0 :         u_base_platform = NULL;
     225             :         if (k_base_platform) {
     226             :                 size_t len = strlen(k_base_platform) + 1;
     227             : 
     228             :                 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
     229             :                 if (copy_to_user(u_base_platform, k_base_platform, len))
     230             :                         return -EFAULT;
     231             :         }
     232             : 
     233             :         /*
     234             :          * Generate 16 random bytes for userspace PRNG seeding.
     235             :          */
     236           0 :         get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
     237           0 :         u_rand_bytes = (elf_addr_t __user *)
     238           0 :                        STACK_ALLOC(p, sizeof(k_rand_bytes));
     239           0 :         if (copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
     240             :                 return -EFAULT;
     241             : 
     242             :         /* Create the ELF interpreter info */
     243           0 :         elf_info = (elf_addr_t *)mm->saved_auxv;
     244             :         /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
     245             : #define NEW_AUX_ENT(id, val) \
     246             :         do { \
     247             :                 *elf_info++ = id; \
     248             :                 *elf_info++ = val; \
     249             :         } while (0)
     250             : 
     251             : #ifdef ARCH_DLINFO
     252             :         /*
     253             :          * ARCH_DLINFO must come first so PPC can do its special alignment of
     254             :          * AUXV.
     255             :          * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
     256             :          * ARCH_DLINFO changes
     257             :          */
     258           0 :         ARCH_DLINFO;
     259             : #endif
     260           0 :         NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
     261           0 :         NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
     262           0 :         NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
     263           0 :         NEW_AUX_ENT(AT_PHDR, phdr_addr);
     264           0 :         NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
     265           0 :         NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
     266           0 :         NEW_AUX_ENT(AT_BASE, interp_load_addr);
     267           0 :         if (bprm->interp_flags & BINPRM_FLAGS_PRESERVE_ARGV0)
     268           0 :                 flags |= AT_FLAGS_PRESERVE_ARGV0;
     269           0 :         NEW_AUX_ENT(AT_FLAGS, flags);
     270           0 :         NEW_AUX_ENT(AT_ENTRY, e_entry);
     271           0 :         NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
     272           0 :         NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
     273           0 :         NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
     274           0 :         NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid));
     275           0 :         NEW_AUX_ENT(AT_SECURE, bprm->secureexec);
     276           0 :         NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
     277             : #ifdef ELF_HWCAP2
     278             :         NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
     279             : #endif
     280           0 :         NEW_AUX_ENT(AT_EXECFN, bprm->exec);
     281             :         if (k_platform) {
     282           0 :                 NEW_AUX_ENT(AT_PLATFORM,
     283             :                             (elf_addr_t)(unsigned long)u_platform);
     284             :         }
     285             :         if (k_base_platform) {
     286             :                 NEW_AUX_ENT(AT_BASE_PLATFORM,
     287             :                             (elf_addr_t)(unsigned long)u_base_platform);
     288             :         }
     289           0 :         if (bprm->have_execfd) {
     290           0 :                 NEW_AUX_ENT(AT_EXECFD, bprm->execfd);
     291             :         }
     292             : #ifdef CONFIG_RSEQ
     293             :         NEW_AUX_ENT(AT_RSEQ_FEATURE_SIZE, offsetof(struct rseq, end));
     294             :         NEW_AUX_ENT(AT_RSEQ_ALIGN, __alignof__(struct rseq));
     295             : #endif
     296             : #undef NEW_AUX_ENT
     297             :         /* AT_NULL is zero; clear the rest too */
     298           0 :         memset(elf_info, 0, (char *)mm->saved_auxv +
     299             :                         sizeof(mm->saved_auxv) - (char *)elf_info);
     300             : 
     301             :         /* And advance past the AT_NULL entry.  */
     302           0 :         elf_info += 2;
     303             : 
     304           0 :         ei_index = elf_info - (elf_addr_t *)mm->saved_auxv;
     305           0 :         sp = STACK_ADD(p, ei_index);
     306             : 
     307           0 :         items = (argc + 1) + (envc + 1) + 1;
     308           0 :         bprm->p = STACK_ROUND(sp, items);
     309             : 
     310             :         /* Point sp at the lowest address on the stack */
     311             : #ifdef CONFIG_STACK_GROWSUP
     312             :         sp = (elf_addr_t __user *)bprm->p - items - ei_index;
     313             :         bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
     314             : #else
     315           0 :         sp = (elf_addr_t __user *)bprm->p;
     316             : #endif
     317             : 
     318             : 
     319             :         /*
     320             :          * Grow the stack manually; some architectures have a limit on how
     321             :          * far ahead a user-space access may be in order to grow the stack.
     322             :          */
     323           0 :         if (mmap_write_lock_killable(mm))
     324             :                 return -EINTR;
     325           0 :         vma = find_extend_vma_locked(mm, bprm->p);
     326           0 :         mmap_write_unlock(mm);
     327           0 :         if (!vma)
     328             :                 return -EFAULT;
     329             : 
     330             :         /* Now, let's put argc (and argv, envp if appropriate) on the stack */
     331           0 :         if (put_user(argc, sp++))
     332             :                 return -EFAULT;
     333             : 
     334             :         /* Populate list of argv pointers back to argv strings. */
     335           0 :         p = mm->arg_end = mm->arg_start;
     336           0 :         while (argc-- > 0) {
     337             :                 size_t len;
     338           0 :                 if (put_user((elf_addr_t)p, sp++))
     339             :                         return -EFAULT;
     340           0 :                 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
     341           0 :                 if (!len || len > MAX_ARG_STRLEN)
     342             :                         return -EINVAL;
     343           0 :                 p += len;
     344             :         }
     345           0 :         if (put_user(0, sp++))
     346             :                 return -EFAULT;
     347           0 :         mm->arg_end = p;
     348             : 
     349             :         /* Populate list of envp pointers back to envp strings. */
     350           0 :         mm->env_end = mm->env_start = p;
     351           0 :         while (envc-- > 0) {
     352             :                 size_t len;
     353           0 :                 if (put_user((elf_addr_t)p, sp++))
     354             :                         return -EFAULT;
     355           0 :                 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
     356           0 :                 if (!len || len > MAX_ARG_STRLEN)
     357             :                         return -EINVAL;
     358           0 :                 p += len;
     359             :         }
     360           0 :         if (put_user(0, sp++))
     361             :                 return -EFAULT;
     362           0 :         mm->env_end = p;
     363             : 
     364             :         /* Put the elf_info on the stack in the right place.  */
     365           0 :         if (copy_to_user(sp, mm->saved_auxv, ei_index * sizeof(elf_addr_t)))
     366             :                 return -EFAULT;
     367             :         return 0;
     368             : }
     369             : 
     370           0 : static unsigned long elf_map(struct file *filep, unsigned long addr,
     371             :                 const struct elf_phdr *eppnt, int prot, int type,
     372             :                 unsigned long total_size)
     373             : {
     374             :         unsigned long map_addr;
     375           0 :         unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
     376           0 :         unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
     377           0 :         addr = ELF_PAGESTART(addr);
     378           0 :         size = ELF_PAGEALIGN(size);
     379             : 
     380             :         /* mmap() will return -EINVAL if given a zero size, but a
     381             :          * segment with zero filesize is perfectly valid */
     382           0 :         if (!size)
     383             :                 return addr;
     384             : 
     385             :         /*
     386             :         * total_size is the size of the ELF (interpreter) image.
     387             :         * The _first_ mmap needs to know the full size, otherwise
     388             :         * randomization might put this image into an overlapping
     389             :         * position with the ELF binary image. (since size < total_size)
     390             :         * So we first map the 'big' image - and unmap the remainder at
     391             :         * the end. (which unmap is needed for ELF images with holes.)
     392             :         */
     393           0 :         if (total_size) {
     394           0 :                 total_size = ELF_PAGEALIGN(total_size);
     395           0 :                 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
     396           0 :                 if (!BAD_ADDR(map_addr))
     397           0 :                         vm_munmap(map_addr+size, total_size-size);
     398             :         } else
     399           0 :                 map_addr = vm_mmap(filep, addr, size, prot, type, off);
     400             : 
     401           0 :         if ((type & MAP_FIXED_NOREPLACE) &&
     402           0 :             PTR_ERR((void *)map_addr) == -EEXIST)
     403           0 :                 pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n",
     404             :                         task_pid_nr(current), current->comm, (void *)addr);
     405             : 
     406             :         return(map_addr);
     407             : }
     408             : 
     409             : static unsigned long total_mapping_size(const struct elf_phdr *phdr, int nr)
     410             : {
     411           0 :         elf_addr_t min_addr = -1;
     412           0 :         elf_addr_t max_addr = 0;
     413           0 :         bool pt_load = false;
     414             :         int i;
     415             : 
     416           0 :         for (i = 0; i < nr; i++) {
     417           0 :                 if (phdr[i].p_type == PT_LOAD) {
     418           0 :                         min_addr = min(min_addr, ELF_PAGESTART(phdr[i].p_vaddr));
     419           0 :                         max_addr = max(max_addr, phdr[i].p_vaddr + phdr[i].p_memsz);
     420           0 :                         pt_load = true;
     421             :                 }
     422             :         }
     423           0 :         return pt_load ? (max_addr - min_addr) : 0;
     424             : }
     425             : 
     426             : static int elf_read(struct file *file, void *buf, size_t len, loff_t pos)
     427             : {
     428             :         ssize_t rv;
     429             : 
     430           0 :         rv = kernel_read(file, buf, len, &pos);
     431           0 :         if (unlikely(rv != len)) {
     432           0 :                 return (rv < 0) ? rv : -EIO;
     433             :         }
     434             :         return 0;
     435             : }
     436             : 
     437           0 : static unsigned long maximum_alignment(struct elf_phdr *cmds, int nr)
     438             : {
     439           0 :         unsigned long alignment = 0;
     440             :         int i;
     441             : 
     442           0 :         for (i = 0; i < nr; i++) {
     443           0 :                 if (cmds[i].p_type == PT_LOAD) {
     444           0 :                         unsigned long p_align = cmds[i].p_align;
     445             : 
     446             :                         /* skip non-power of two alignments as invalid */
     447           0 :                         if (!is_power_of_2(p_align))
     448           0 :                                 continue;
     449           0 :                         alignment = max(alignment, p_align);
     450             :                 }
     451             :         }
     452             : 
     453             :         /* ensure we align to at least one page */
     454           0 :         return ELF_PAGEALIGN(alignment);
     455             : }
     456             : 
     457             : /**
     458             :  * load_elf_phdrs() - load ELF program headers
     459             :  * @elf_ex:   ELF header of the binary whose program headers should be loaded
     460             :  * @elf_file: the opened ELF binary file
     461             :  *
     462             :  * Loads ELF program headers from the binary file elf_file, which has the ELF
     463             :  * header pointed to by elf_ex, into a newly allocated array. The caller is
     464             :  * responsible for freeing the allocated data. Returns NULL upon failure.
     465             :  */
     466           0 : static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex,
     467             :                                        struct file *elf_file)
     468             : {
     469           0 :         struct elf_phdr *elf_phdata = NULL;
     470           0 :         int retval = -1;
     471             :         unsigned int size;
     472             : 
     473             :         /*
     474             :          * If the size of this structure has changed, then punt, since
     475             :          * we will be doing the wrong thing.
     476             :          */
     477           0 :         if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
     478             :                 goto out;
     479             : 
     480             :         /* Sanity check the number of program headers... */
     481             :         /* ...and their total size. */
     482           0 :         size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
     483           0 :         if (size == 0 || size > 65536 || size > ELF_MIN_ALIGN)
     484             :                 goto out;
     485             : 
     486           0 :         elf_phdata = kmalloc(size, GFP_KERNEL);
     487           0 :         if (!elf_phdata)
     488             :                 goto out;
     489             : 
     490             :         /* Read in the program headers */
     491           0 :         retval = elf_read(elf_file, elf_phdata, size, elf_ex->e_phoff);
     492             : 
     493             : out:
     494           0 :         if (retval) {
     495           0 :                 kfree(elf_phdata);
     496           0 :                 elf_phdata = NULL;
     497             :         }
     498           0 :         return elf_phdata;
     499             : }
     500             : 
     501             : #ifndef CONFIG_ARCH_BINFMT_ELF_STATE
     502             : 
     503             : /**
     504             :  * struct arch_elf_state - arch-specific ELF loading state
     505             :  *
     506             :  * This structure is used to preserve architecture specific data during
     507             :  * the loading of an ELF file, throughout the checking of architecture
     508             :  * specific ELF headers & through to the point where the ELF load is
     509             :  * known to be proceeding (ie. SET_PERSONALITY).
     510             :  *
     511             :  * This implementation is a dummy for architectures which require no
     512             :  * specific state.
     513             :  */
     514             : struct arch_elf_state {
     515             : };
     516             : 
     517             : #define INIT_ARCH_ELF_STATE {}
     518             : 
     519             : /**
     520             :  * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header
     521             :  * @ehdr:       The main ELF header
     522             :  * @phdr:       The program header to check
     523             :  * @elf:        The open ELF file
     524             :  * @is_interp:  True if the phdr is from the interpreter of the ELF being
     525             :  *              loaded, else false.
     526             :  * @state:      Architecture-specific state preserved throughout the process
     527             :  *              of loading the ELF.
     528             :  *
     529             :  * Inspects the program header phdr to validate its correctness and/or
     530             :  * suitability for the system. Called once per ELF program header in the
     531             :  * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its
     532             :  * interpreter.
     533             :  *
     534             :  * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
     535             :  *         with that return code.
     536             :  */
     537             : static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
     538             :                                    struct elf_phdr *phdr,
     539             :                                    struct file *elf, bool is_interp,
     540             :                                    struct arch_elf_state *state)
     541             : {
     542             :         /* Dummy implementation, always proceed */
     543             :         return 0;
     544             : }
     545             : 
     546             : /**
     547             :  * arch_check_elf() - check an ELF executable
     548             :  * @ehdr:       The main ELF header
     549             :  * @has_interp: True if the ELF has an interpreter, else false.
     550             :  * @interp_ehdr: The interpreter's ELF header
     551             :  * @state:      Architecture-specific state preserved throughout the process
     552             :  *              of loading the ELF.
     553             :  *
     554             :  * Provides a final opportunity for architecture code to reject the loading
     555             :  * of the ELF & cause an exec syscall to return an error. This is called after
     556             :  * all program headers to be checked by arch_elf_pt_proc have been.
     557             :  *
     558             :  * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
     559             :  *         with that return code.
     560             :  */
     561             : static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
     562             :                                  struct elfhdr *interp_ehdr,
     563             :                                  struct arch_elf_state *state)
     564             : {
     565             :         /* Dummy implementation, always proceed */
     566             :         return 0;
     567             : }
     568             : 
     569             : #endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */
     570             : 
     571             : static inline int make_prot(u32 p_flags, struct arch_elf_state *arch_state,
     572             :                             bool has_interp, bool is_interp)
     573             : {
     574           0 :         int prot = 0;
     575             : 
     576           0 :         if (p_flags & PF_R)
     577           0 :                 prot |= PROT_READ;
     578           0 :         if (p_flags & PF_W)
     579           0 :                 prot |= PROT_WRITE;
     580           0 :         if (p_flags & PF_X)
     581           0 :                 prot |= PROT_EXEC;
     582             : 
     583           0 :         return arch_elf_adjust_prot(prot, arch_state, has_interp, is_interp);
     584             : }
     585             : 
     586             : /* This is much more generalized than the library routine read function,
     587             :    so we keep this separate.  Technically the library read function
     588             :    is only provided so that we can read a.out libraries that have
     589             :    an ELF header */
     590             : 
     591           0 : static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
     592             :                 struct file *interpreter,
     593             :                 unsigned long no_base, struct elf_phdr *interp_elf_phdata,
     594             :                 struct arch_elf_state *arch_state)
     595             : {
     596             :         struct elf_phdr *eppnt;
     597           0 :         unsigned long load_addr = 0;
     598           0 :         int load_addr_set = 0;
     599           0 :         unsigned long last_bss = 0, elf_bss = 0;
     600           0 :         int bss_prot = 0;
     601           0 :         unsigned long error = ~0UL;
     602             :         unsigned long total_size;
     603             :         int i;
     604             : 
     605             :         /* First of all, some simple consistency checks */
     606           0 :         if (interp_elf_ex->e_type != ET_EXEC &&
     607             :             interp_elf_ex->e_type != ET_DYN)
     608             :                 goto out;
     609           0 :         if (!elf_check_arch(interp_elf_ex) ||
     610             :             elf_check_fdpic(interp_elf_ex))
     611             :                 goto out;
     612           0 :         if (!interpreter->f_op->mmap)
     613             :                 goto out;
     614             : 
     615           0 :         total_size = total_mapping_size(interp_elf_phdata,
     616           0 :                                         interp_elf_ex->e_phnum);
     617           0 :         if (!total_size) {
     618             :                 error = -EINVAL;
     619             :                 goto out;
     620             :         }
     621             : 
     622             :         eppnt = interp_elf_phdata;
     623           0 :         for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
     624           0 :                 if (eppnt->p_type == PT_LOAD) {
     625           0 :                         int elf_type = MAP_PRIVATE;
     626           0 :                         int elf_prot = make_prot(eppnt->p_flags, arch_state,
     627             :                                                  true, true);
     628           0 :                         unsigned long vaddr = 0;
     629             :                         unsigned long k, map_addr;
     630             : 
     631           0 :                         vaddr = eppnt->p_vaddr;
     632           0 :                         if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
     633             :                                 elf_type |= MAP_FIXED;
     634           0 :                         else if (no_base && interp_elf_ex->e_type == ET_DYN)
     635           0 :                                 load_addr = -vaddr;
     636             : 
     637           0 :                         map_addr = elf_map(interpreter, load_addr + vaddr,
     638             :                                         eppnt, elf_prot, elf_type, total_size);
     639           0 :                         total_size = 0;
     640           0 :                         error = map_addr;
     641           0 :                         if (BAD_ADDR(map_addr))
     642             :                                 goto out;
     643             : 
     644           0 :                         if (!load_addr_set &&
     645           0 :                             interp_elf_ex->e_type == ET_DYN) {
     646           0 :                                 load_addr = map_addr - ELF_PAGESTART(vaddr);
     647           0 :                                 load_addr_set = 1;
     648             :                         }
     649             : 
     650             :                         /*
     651             :                          * Check to see if the section's size will overflow the
     652             :                          * allowed task size. Note that p_filesz must always be
     653             :                          * <= p_memsize so it's only necessary to check p_memsz.
     654             :                          */
     655           0 :                         k = load_addr + eppnt->p_vaddr;
     656           0 :                         if (BAD_ADDR(k) ||
     657           0 :                             eppnt->p_filesz > eppnt->p_memsz ||
     658           0 :                             eppnt->p_memsz > TASK_SIZE ||
     659           0 :                             TASK_SIZE - eppnt->p_memsz < k) {
     660             :                                 error = -ENOMEM;
     661             :                                 goto out;
     662             :                         }
     663             : 
     664             :                         /*
     665             :                          * Find the end of the file mapping for this phdr, and
     666             :                          * keep track of the largest address we see for this.
     667             :                          */
     668           0 :                         k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
     669           0 :                         if (k > elf_bss)
     670           0 :                                 elf_bss = k;
     671             : 
     672             :                         /*
     673             :                          * Do the same thing for the memory mapping - between
     674             :                          * elf_bss and last_bss is the bss section.
     675             :                          */
     676           0 :                         k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
     677           0 :                         if (k > last_bss) {
     678           0 :                                 last_bss = k;
     679           0 :                                 bss_prot = elf_prot;
     680             :                         }
     681             :                 }
     682             :         }
     683             : 
     684             :         /*
     685             :          * Now fill out the bss section: first pad the last page from
     686             :          * the file up to the page boundary, and zero it from elf_bss
     687             :          * up to the end of the page.
     688             :          */
     689           0 :         if (padzero(elf_bss)) {
     690             :                 error = -EFAULT;
     691             :                 goto out;
     692             :         }
     693             :         /*
     694             :          * Next, align both the file and mem bss up to the page size,
     695             :          * since this is where elf_bss was just zeroed up to, and where
     696             :          * last_bss will end after the vm_brk_flags() below.
     697             :          */
     698           0 :         elf_bss = ELF_PAGEALIGN(elf_bss);
     699           0 :         last_bss = ELF_PAGEALIGN(last_bss);
     700             :         /* Finally, if there is still more bss to allocate, do it. */
     701           0 :         if (last_bss > elf_bss) {
     702           0 :                 error = vm_brk_flags(elf_bss, last_bss - elf_bss,
     703             :                                 bss_prot & PROT_EXEC ? VM_EXEC : 0);
     704           0 :                 if (error)
     705             :                         goto out;
     706             :         }
     707             : 
     708             :         error = load_addr;
     709             : out:
     710           0 :         return error;
     711             : }
     712             : 
     713             : /*
     714             :  * These are the functions used to load ELF style executables and shared
     715             :  * libraries.  There is no binary dependent code anywhere else.
     716             :  */
     717             : 
     718             : static int parse_elf_property(const char *data, size_t *off, size_t datasz,
     719             :                               struct arch_elf_state *arch,
     720             :                               bool have_prev_type, u32 *prev_type)
     721             : {
     722             :         size_t o, step;
     723             :         const struct gnu_property *pr;
     724             :         int ret;
     725             : 
     726             :         if (*off == datasz)
     727             :                 return -ENOENT;
     728             : 
     729             :         if (WARN_ON_ONCE(*off > datasz || *off % ELF_GNU_PROPERTY_ALIGN))
     730             :                 return -EIO;
     731             :         o = *off;
     732             :         datasz -= *off;
     733             : 
     734             :         if (datasz < sizeof(*pr))
     735             :                 return -ENOEXEC;
     736             :         pr = (const struct gnu_property *)(data + o);
     737             :         o += sizeof(*pr);
     738             :         datasz -= sizeof(*pr);
     739             : 
     740             :         if (pr->pr_datasz > datasz)
     741             :                 return -ENOEXEC;
     742             : 
     743             :         WARN_ON_ONCE(o % ELF_GNU_PROPERTY_ALIGN);
     744             :         step = round_up(pr->pr_datasz, ELF_GNU_PROPERTY_ALIGN);
     745             :         if (step > datasz)
     746             :                 return -ENOEXEC;
     747             : 
     748             :         /* Properties are supposed to be unique and sorted on pr_type: */
     749             :         if (have_prev_type && pr->pr_type <= *prev_type)
     750             :                 return -ENOEXEC;
     751             :         *prev_type = pr->pr_type;
     752             : 
     753             :         ret = arch_parse_elf_property(pr->pr_type, data + o,
     754             :                                       pr->pr_datasz, ELF_COMPAT, arch);
     755             :         if (ret)
     756             :                 return ret;
     757             : 
     758             :         *off = o + step;
     759             :         return 0;
     760             : }
     761             : 
     762             : #define NOTE_DATA_SZ SZ_1K
     763             : #define GNU_PROPERTY_TYPE_0_NAME "GNU"
     764             : #define NOTE_NAME_SZ (sizeof(GNU_PROPERTY_TYPE_0_NAME))
     765             : 
     766             : static int parse_elf_properties(struct file *f, const struct elf_phdr *phdr,
     767             :                                 struct arch_elf_state *arch)
     768             : {
     769             :         union {
     770             :                 struct elf_note nhdr;
     771             :                 char data[NOTE_DATA_SZ];
     772             :         } note;
     773             :         loff_t pos;
     774             :         ssize_t n;
     775             :         size_t off, datasz;
     776             :         int ret;
     777             :         bool have_prev_type;
     778             :         u32 prev_type;
     779             : 
     780             :         if (!IS_ENABLED(CONFIG_ARCH_USE_GNU_PROPERTY) || !phdr)
     781             :                 return 0;
     782             : 
     783             :         /* load_elf_binary() shouldn't call us unless this is true... */
     784             :         if (WARN_ON_ONCE(phdr->p_type != PT_GNU_PROPERTY))
     785             :                 return -ENOEXEC;
     786             : 
     787             :         /* If the properties are crazy large, that's too bad (for now): */
     788             :         if (phdr->p_filesz > sizeof(note))
     789             :                 return -ENOEXEC;
     790             : 
     791             :         pos = phdr->p_offset;
     792             :         n = kernel_read(f, &note, phdr->p_filesz, &pos);
     793             : 
     794             :         BUILD_BUG_ON(sizeof(note) < sizeof(note.nhdr) + NOTE_NAME_SZ);
     795             :         if (n < 0 || n < sizeof(note.nhdr) + NOTE_NAME_SZ)
     796             :                 return -EIO;
     797             : 
     798             :         if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 ||
     799             :             note.nhdr.n_namesz != NOTE_NAME_SZ ||
     800             :             strncmp(note.data + sizeof(note.nhdr),
     801             :                     GNU_PROPERTY_TYPE_0_NAME, n - sizeof(note.nhdr)))
     802             :                 return -ENOEXEC;
     803             : 
     804             :         off = round_up(sizeof(note.nhdr) + NOTE_NAME_SZ,
     805             :                        ELF_GNU_PROPERTY_ALIGN);
     806             :         if (off > n)
     807             :                 return -ENOEXEC;
     808             : 
     809             :         if (note.nhdr.n_descsz > n - off)
     810             :                 return -ENOEXEC;
     811             :         datasz = off + note.nhdr.n_descsz;
     812             : 
     813             :         have_prev_type = false;
     814             :         do {
     815             :                 ret = parse_elf_property(note.data, &off, datasz, arch,
     816             :                                          have_prev_type, &prev_type);
     817             :                 have_prev_type = true;
     818             :         } while (!ret);
     819             : 
     820             :         return ret == -ENOENT ? 0 : ret;
     821             : }
     822             : 
     823           0 : static int load_elf_binary(struct linux_binprm *bprm)
     824             : {
     825           0 :         struct file *interpreter = NULL; /* to shut gcc up */
     826           0 :         unsigned long load_bias = 0, phdr_addr = 0;
     827           0 :         int first_pt_load = 1;
     828             :         unsigned long error;
     829           0 :         struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
     830           0 :         struct elf_phdr *elf_property_phdata = NULL;
     831             :         unsigned long elf_bss, elf_brk;
     832           0 :         int bss_prot = 0;
     833             :         int retval, i;
     834             :         unsigned long elf_entry;
     835             :         unsigned long e_entry;
     836           0 :         unsigned long interp_load_addr = 0;
     837             :         unsigned long start_code, end_code, start_data, end_data;
     838           0 :         unsigned long reloc_func_desc __maybe_unused = 0;
     839           0 :         int executable_stack = EXSTACK_DEFAULT;
     840           0 :         struct elfhdr *elf_ex = (struct elfhdr *)bprm->buf;
     841           0 :         struct elfhdr *interp_elf_ex = NULL;
     842             :         struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
     843             :         struct mm_struct *mm;
     844             :         struct pt_regs *regs;
     845             : 
     846           0 :         retval = -ENOEXEC;
     847             :         /* First of all, some simple consistency checks */
     848           0 :         if (memcmp(elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
     849             :                 goto out;
     850             : 
     851           0 :         if (elf_ex->e_type != ET_EXEC && elf_ex->e_type != ET_DYN)
     852             :                 goto out;
     853           0 :         if (!elf_check_arch(elf_ex))
     854             :                 goto out;
     855             :         if (elf_check_fdpic(elf_ex))
     856             :                 goto out;
     857           0 :         if (!bprm->file->f_op->mmap)
     858             :                 goto out;
     859             : 
     860           0 :         elf_phdata = load_elf_phdrs(elf_ex, bprm->file);
     861           0 :         if (!elf_phdata)
     862             :                 goto out;
     863             : 
     864             :         elf_ppnt = elf_phdata;
     865           0 :         for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) {
     866             :                 char *elf_interpreter;
     867             : 
     868           0 :                 if (elf_ppnt->p_type == PT_GNU_PROPERTY) {
     869           0 :                         elf_property_phdata = elf_ppnt;
     870           0 :                         continue;
     871             :                 }
     872             : 
     873           0 :                 if (elf_ppnt->p_type != PT_INTERP)
     874           0 :                         continue;
     875             : 
     876             :                 /*
     877             :                  * This is the program interpreter used for shared libraries -
     878             :                  * for now assume that this is an a.out format binary.
     879             :                  */
     880           0 :                 retval = -ENOEXEC;
     881           0 :                 if (elf_ppnt->p_filesz > PATH_MAX || elf_ppnt->p_filesz < 2)
     882             :                         goto out_free_ph;
     883             : 
     884           0 :                 retval = -ENOMEM;
     885           0 :                 elf_interpreter = kmalloc(elf_ppnt->p_filesz, GFP_KERNEL);
     886           0 :                 if (!elf_interpreter)
     887             :                         goto out_free_ph;
     888             : 
     889           0 :                 retval = elf_read(bprm->file, elf_interpreter, elf_ppnt->p_filesz,
     890           0 :                                   elf_ppnt->p_offset);
     891           0 :                 if (retval < 0)
     892             :                         goto out_free_interp;
     893             :                 /* make sure path is NULL terminated */
     894           0 :                 retval = -ENOEXEC;
     895           0 :                 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
     896             :                         goto out_free_interp;
     897             : 
     898           0 :                 interpreter = open_exec(elf_interpreter);
     899           0 :                 kfree(elf_interpreter);
     900           0 :                 retval = PTR_ERR(interpreter);
     901           0 :                 if (IS_ERR(interpreter))
     902             :                         goto out_free_ph;
     903             : 
     904             :                 /*
     905             :                  * If the binary is not readable then enforce mm->dumpable = 0
     906             :                  * regardless of the interpreter's permissions.
     907             :                  */
     908           0 :                 would_dump(bprm, interpreter);
     909             : 
     910           0 :                 interp_elf_ex = kmalloc(sizeof(*interp_elf_ex), GFP_KERNEL);
     911           0 :                 if (!interp_elf_ex) {
     912             :                         retval = -ENOMEM;
     913             :                         goto out_free_file;
     914             :                 }
     915             : 
     916             :                 /* Get the exec headers */
     917           0 :                 retval = elf_read(interpreter, interp_elf_ex,
     918             :                                   sizeof(*interp_elf_ex), 0);
     919           0 :                 if (retval < 0)
     920             :                         goto out_free_dentry;
     921             : 
     922             :                 break;
     923             : 
     924             : out_free_interp:
     925           0 :                 kfree(elf_interpreter);
     926           0 :                 goto out_free_ph;
     927             :         }
     928             : 
     929           0 :         elf_ppnt = elf_phdata;
     930           0 :         for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++)
     931           0 :                 switch (elf_ppnt->p_type) {
     932             :                 case PT_GNU_STACK:
     933           0 :                         if (elf_ppnt->p_flags & PF_X)
     934             :                                 executable_stack = EXSTACK_ENABLE_X;
     935             :                         else
     936           0 :                                 executable_stack = EXSTACK_DISABLE_X;
     937             :                         break;
     938             : 
     939             :                 case PT_LOPROC ... PT_HIPROC:
     940             :                         retval = arch_elf_pt_proc(elf_ex, elf_ppnt,
     941             :                                                   bprm->file, false,
     942             :                                                   &arch_state);
     943             :                         if (retval)
     944             :                                 goto out_free_dentry;
     945             :                         break;
     946             :                 }
     947             : 
     948             :         /* Some simple consistency checks for the interpreter */
     949           0 :         if (interpreter) {
     950           0 :                 retval = -ELIBBAD;
     951             :                 /* Not an ELF interpreter */
     952           0 :                 if (memcmp(interp_elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
     953             :                         goto out_free_dentry;
     954             :                 /* Verify the interpreter has a valid arch */
     955           0 :                 if (!elf_check_arch(interp_elf_ex) ||
     956             :                     elf_check_fdpic(interp_elf_ex))
     957             :                         goto out_free_dentry;
     958             : 
     959             :                 /* Load the interpreter program headers */
     960           0 :                 interp_elf_phdata = load_elf_phdrs(interp_elf_ex,
     961             :                                                    interpreter);
     962           0 :                 if (!interp_elf_phdata)
     963             :                         goto out_free_dentry;
     964             : 
     965             :                 /* Pass PT_LOPROC..PT_HIPROC headers to arch code */
     966             :                 elf_property_phdata = NULL;
     967             :                 elf_ppnt = interp_elf_phdata;
     968           0 :                 for (i = 0; i < interp_elf_ex->e_phnum; i++, elf_ppnt++)
     969             :                         switch (elf_ppnt->p_type) {
     970             :                         case PT_GNU_PROPERTY:
     971             :                                 elf_property_phdata = elf_ppnt;
     972             :                                 break;
     973             : 
     974             :                         case PT_LOPROC ... PT_HIPROC:
     975             :                                 retval = arch_elf_pt_proc(interp_elf_ex,
     976             :                                                           elf_ppnt, interpreter,
     977             :                                                           true, &arch_state);
     978             :                                 if (retval)
     979             :                                         goto out_free_dentry;
     980             :                                 break;
     981             :                         }
     982             :         }
     983             : 
     984           0 :         retval = parse_elf_properties(interpreter ?: bprm->file,
     985             :                                       elf_property_phdata, &arch_state);
     986             :         if (retval)
     987             :                 goto out_free_dentry;
     988             : 
     989             :         /*
     990             :          * Allow arch code to reject the ELF at this point, whilst it's
     991             :          * still possible to return an error to the code that invoked
     992             :          * the exec syscall.
     993             :          */
     994           0 :         retval = arch_check_elf(elf_ex,
     995             :                                 !!interpreter, interp_elf_ex,
     996             :                                 &arch_state);
     997             :         if (retval)
     998             :                 goto out_free_dentry;
     999             : 
    1000             :         /* Flush all traces of the currently running executable */
    1001           0 :         retval = begin_new_exec(bprm);
    1002           0 :         if (retval)
    1003             :                 goto out_free_dentry;
    1004             : 
    1005             :         /* Do this immediately, since STACK_TOP as used in setup_arg_pages
    1006             :            may depend on the personality.  */
    1007             :         SET_PERSONALITY2(*elf_ex, &arch_state);
    1008             :         if (elf_read_implies_exec(*elf_ex, executable_stack))
    1009             :                 current->personality |= READ_IMPLIES_EXEC;
    1010             : 
    1011           0 :         if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
    1012           0 :                 current->flags |= PF_RANDOMIZE;
    1013             : 
    1014           0 :         setup_new_exec(bprm);
    1015             : 
    1016             :         /* Do this so that we can load the interpreter, if need be.  We will
    1017             :            change some of these later */
    1018           0 :         retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
    1019             :                                  executable_stack);
    1020           0 :         if (retval < 0)
    1021             :                 goto out_free_dentry;
    1022             : 
    1023             :         elf_bss = 0;
    1024             :         elf_brk = 0;
    1025             : 
    1026             :         start_code = ~0UL;
    1027             :         end_code = 0;
    1028             :         start_data = 0;
    1029             :         end_data = 0;
    1030             : 
    1031             :         /* Now we do a little grungy work by mmapping the ELF image into
    1032             :            the correct location in memory. */
    1033           0 :         for(i = 0, elf_ppnt = elf_phdata;
    1034           0 :             i < elf_ex->e_phnum; i++, elf_ppnt++) {
    1035             :                 int elf_prot, elf_flags;
    1036             :                 unsigned long k, vaddr;
    1037           0 :                 unsigned long total_size = 0;
    1038             :                 unsigned long alignment;
    1039             : 
    1040           0 :                 if (elf_ppnt->p_type != PT_LOAD)
    1041           0 :                         continue;
    1042             : 
    1043           0 :                 if (unlikely (elf_brk > elf_bss)) {
    1044             :                         unsigned long nbyte;
    1045             : 
    1046             :                         /* There was a PT_LOAD segment with p_memsz > p_filesz
    1047             :                            before this one. Map anonymous pages, if needed,
    1048             :                            and clear the area.  */
    1049           0 :                         retval = set_brk(elf_bss + load_bias,
    1050             :                                          elf_brk + load_bias,
    1051             :                                          bss_prot);
    1052           0 :                         if (retval)
    1053             :                                 goto out_free_dentry;
    1054           0 :                         nbyte = ELF_PAGEOFFSET(elf_bss);
    1055           0 :                         if (nbyte) {
    1056           0 :                                 nbyte = ELF_MIN_ALIGN - nbyte;
    1057           0 :                                 if (nbyte > elf_brk - elf_bss)
    1058           0 :                                         nbyte = elf_brk - elf_bss;
    1059           0 :                                 if (clear_user((void __user *)elf_bss +
    1060             :                                                         load_bias, nbyte)) {
    1061             :                                         /*
    1062             :                                          * This bss-zeroing can fail if the ELF
    1063             :                                          * file specifies odd protections. So
    1064             :                                          * we don't check the return value
    1065             :                                          */
    1066             :                                 }
    1067             :                         }
    1068             :                 }
    1069             : 
    1070           0 :                 elf_prot = make_prot(elf_ppnt->p_flags, &arch_state,
    1071             :                                      !!interpreter, false);
    1072             : 
    1073           0 :                 elf_flags = MAP_PRIVATE;
    1074             : 
    1075           0 :                 vaddr = elf_ppnt->p_vaddr;
    1076             :                 /*
    1077             :                  * The first time through the loop, first_pt_load is true:
    1078             :                  * layout will be calculated. Once set, use MAP_FIXED since
    1079             :                  * we know we've already safely mapped the entire region with
    1080             :                  * MAP_FIXED_NOREPLACE in the once-per-binary logic following.
    1081             :                  */
    1082           0 :                 if (!first_pt_load) {
    1083             :                         elf_flags |= MAP_FIXED;
    1084           0 :                 } else if (elf_ex->e_type == ET_EXEC) {
    1085             :                         /*
    1086             :                          * This logic is run once for the first LOAD Program
    1087             :                          * Header for ET_EXEC binaries. No special handling
    1088             :                          * is needed.
    1089             :                          */
    1090             :                         elf_flags |= MAP_FIXED_NOREPLACE;
    1091           0 :                 } else if (elf_ex->e_type == ET_DYN) {
    1092             :                         /*
    1093             :                          * This logic is run once for the first LOAD Program
    1094             :                          * Header for ET_DYN binaries to calculate the
    1095             :                          * randomization (load_bias) for all the LOAD
    1096             :                          * Program Headers.
    1097             :                          *
    1098             :                          * There are effectively two types of ET_DYN
    1099             :                          * binaries: programs (i.e. PIE: ET_DYN with INTERP)
    1100             :                          * and loaders (ET_DYN without INTERP, since they
    1101             :                          * _are_ the ELF interpreter). The loaders must
    1102             :                          * be loaded away from programs since the program
    1103             :                          * may otherwise collide with the loader (especially
    1104             :                          * for ET_EXEC which does not have a randomized
    1105             :                          * position). For example to handle invocations of
    1106             :                          * "./ld.so someprog" to test out a new version of
    1107             :                          * the loader, the subsequent program that the
    1108             :                          * loader loads must avoid the loader itself, so
    1109             :                          * they cannot share the same load range. Sufficient
    1110             :                          * room for the brk must be allocated with the
    1111             :                          * loader as well, since brk must be available with
    1112             :                          * the loader.
    1113             :                          *
    1114             :                          * Therefore, programs are loaded offset from
    1115             :                          * ELF_ET_DYN_BASE and loaders are loaded into the
    1116             :                          * independently randomized mmap region (0 load_bias
    1117             :                          * without MAP_FIXED nor MAP_FIXED_NOREPLACE).
    1118             :                          */
    1119           0 :                         if (interpreter) {
    1120           0 :                                 load_bias = ELF_ET_DYN_BASE;
    1121           0 :                                 if (current->flags & PF_RANDOMIZE)
    1122             :                                         load_bias += arch_mmap_rnd();
    1123           0 :                                 alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
    1124           0 :                                 if (alignment)
    1125           0 :                                         load_bias &= ~(alignment - 1);
    1126             :                                 elf_flags |= MAP_FIXED_NOREPLACE;
    1127             :                         } else
    1128             :                                 load_bias = 0;
    1129             : 
    1130             :                         /*
    1131             :                          * Since load_bias is used for all subsequent loading
    1132             :                          * calculations, we must lower it by the first vaddr
    1133             :                          * so that the remaining calculations based on the
    1134             :                          * ELF vaddrs will be correctly offset. The result
    1135             :                          * is then page aligned.
    1136             :                          */
    1137           0 :                         load_bias = ELF_PAGESTART(load_bias - vaddr);
    1138             : 
    1139             :                         /*
    1140             :                          * Calculate the entire size of the ELF mapping
    1141             :                          * (total_size), used for the initial mapping,
    1142             :                          * due to load_addr_set which is set to true later
    1143             :                          * once the initial mapping is performed.
    1144             :                          *
    1145             :                          * Note that this is only sensible when the LOAD
    1146             :                          * segments are contiguous (or overlapping). If
    1147             :                          * used for LOADs that are far apart, this would
    1148             :                          * cause the holes between LOADs to be mapped,
    1149             :                          * running the risk of having the mapping fail,
    1150             :                          * as it would be larger than the ELF file itself.
    1151             :                          *
    1152             :                          * As a result, only ET_DYN does this, since
    1153             :                          * some ET_EXEC (e.g. ia64) may have large virtual
    1154             :                          * memory holes between LOADs.
    1155             :                          *
    1156             :                          */
    1157           0 :                         total_size = total_mapping_size(elf_phdata,
    1158           0 :                                                         elf_ex->e_phnum);
    1159           0 :                         if (!total_size) {
    1160             :                                 retval = -EINVAL;
    1161             :                                 goto out_free_dentry;
    1162             :                         }
    1163             :                 }
    1164             : 
    1165           0 :                 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
    1166             :                                 elf_prot, elf_flags, total_size);
    1167           0 :                 if (BAD_ADDR(error)) {
    1168           0 :                         retval = IS_ERR_VALUE(error) ?
    1169           0 :                                 PTR_ERR((void*)error) : -EINVAL;
    1170             :                         goto out_free_dentry;
    1171             :                 }
    1172             : 
    1173           0 :                 if (first_pt_load) {
    1174           0 :                         first_pt_load = 0;
    1175           0 :                         if (elf_ex->e_type == ET_DYN) {
    1176           0 :                                 load_bias += error -
    1177           0 :                                              ELF_PAGESTART(load_bias + vaddr);
    1178           0 :                                 reloc_func_desc = load_bias;
    1179             :                         }
    1180             :                 }
    1181             : 
    1182             :                 /*
    1183             :                  * Figure out which segment in the file contains the Program
    1184             :                  * Header table, and map to the associated memory address.
    1185             :                  */
    1186           0 :                 if (elf_ppnt->p_offset <= elf_ex->e_phoff &&
    1187           0 :                     elf_ex->e_phoff < elf_ppnt->p_offset + elf_ppnt->p_filesz) {
    1188           0 :                         phdr_addr = elf_ex->e_phoff - elf_ppnt->p_offset +
    1189           0 :                                     elf_ppnt->p_vaddr;
    1190             :                 }
    1191             : 
    1192           0 :                 k = elf_ppnt->p_vaddr;
    1193           0 :                 if ((elf_ppnt->p_flags & PF_X) && k < start_code)
    1194           0 :                         start_code = k;
    1195           0 :                 if (start_data < k)
    1196           0 :                         start_data = k;
    1197             : 
    1198             :                 /*
    1199             :                  * Check to see if the section's size will overflow the
    1200             :                  * allowed task size. Note that p_filesz must always be
    1201             :                  * <= p_memsz so it is only necessary to check p_memsz.
    1202             :                  */
    1203           0 :                 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
    1204           0 :                     elf_ppnt->p_memsz > TASK_SIZE ||
    1205           0 :                     TASK_SIZE - elf_ppnt->p_memsz < k) {
    1206             :                         /* set_brk can never work. Avoid overflows. */
    1207             :                         retval = -EINVAL;
    1208             :                         goto out_free_dentry;
    1209             :                 }
    1210             : 
    1211           0 :                 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
    1212             : 
    1213           0 :                 if (k > elf_bss)
    1214           0 :                         elf_bss = k;
    1215           0 :                 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
    1216           0 :                         end_code = k;
    1217             :                 if (end_data < k)
    1218             :                         end_data = k;
    1219           0 :                 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
    1220           0 :                 if (k > elf_brk) {
    1221           0 :                         bss_prot = elf_prot;
    1222           0 :                         elf_brk = k;
    1223             :                 }
    1224             :         }
    1225             : 
    1226           0 :         e_entry = elf_ex->e_entry + load_bias;
    1227           0 :         phdr_addr += load_bias;
    1228           0 :         elf_bss += load_bias;
    1229           0 :         elf_brk += load_bias;
    1230           0 :         start_code += load_bias;
    1231           0 :         end_code += load_bias;
    1232           0 :         start_data += load_bias;
    1233           0 :         end_data += load_bias;
    1234             : 
    1235             :         /* Calling set_brk effectively mmaps the pages that we need
    1236             :          * for the bss and break sections.  We must do this before
    1237             :          * mapping in the interpreter, to make sure it doesn't wind
    1238             :          * up getting placed where the bss needs to go.
    1239             :          */
    1240           0 :         retval = set_brk(elf_bss, elf_brk, bss_prot);
    1241           0 :         if (retval)
    1242             :                 goto out_free_dentry;
    1243           0 :         if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
    1244             :                 retval = -EFAULT; /* Nobody gets to see this, but.. */
    1245             :                 goto out_free_dentry;
    1246             :         }
    1247             : 
    1248           0 :         if (interpreter) {
    1249           0 :                 elf_entry = load_elf_interp(interp_elf_ex,
    1250             :                                             interpreter,
    1251             :                                             load_bias, interp_elf_phdata,
    1252             :                                             &arch_state);
    1253           0 :                 if (!IS_ERR_VALUE(elf_entry)) {
    1254             :                         /*
    1255             :                          * load_elf_interp() returns relocation
    1256             :                          * adjustment
    1257             :                          */
    1258           0 :                         interp_load_addr = elf_entry;
    1259           0 :                         elf_entry += interp_elf_ex->e_entry;
    1260             :                 }
    1261           0 :                 if (BAD_ADDR(elf_entry)) {
    1262           0 :                         retval = IS_ERR_VALUE(elf_entry) ?
    1263           0 :                                         (int)elf_entry : -EINVAL;
    1264             :                         goto out_free_dentry;
    1265             :                 }
    1266           0 :                 reloc_func_desc = interp_load_addr;
    1267             : 
    1268           0 :                 allow_write_access(interpreter);
    1269           0 :                 fput(interpreter);
    1270             : 
    1271           0 :                 kfree(interp_elf_ex);
    1272           0 :                 kfree(interp_elf_phdata);
    1273             :         } else {
    1274           0 :                 elf_entry = e_entry;
    1275           0 :                 if (BAD_ADDR(elf_entry)) {
    1276             :                         retval = -EINVAL;
    1277             :                         goto out_free_dentry;
    1278             :                 }
    1279             :         }
    1280             : 
    1281           0 :         kfree(elf_phdata);
    1282             : 
    1283           0 :         set_binfmt(&elf_format);
    1284             : 
    1285             : #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
    1286           0 :         retval = ARCH_SETUP_ADDITIONAL_PAGES(bprm, elf_ex, !!interpreter);
    1287           0 :         if (retval < 0)
    1288             :                 goto out;
    1289             : #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
    1290             : 
    1291           0 :         retval = create_elf_tables(bprm, elf_ex, interp_load_addr,
    1292             :                                    e_entry, phdr_addr);
    1293           0 :         if (retval < 0)
    1294             :                 goto out;
    1295             : 
    1296           0 :         mm = current->mm;
    1297           0 :         mm->end_code = end_code;
    1298           0 :         mm->start_code = start_code;
    1299           0 :         mm->start_data = start_data;
    1300           0 :         mm->end_data = end_data;
    1301           0 :         mm->start_stack = bprm->p;
    1302             : 
    1303           0 :         if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
    1304             :                 /*
    1305             :                  * For architectures with ELF randomization, when executing
    1306             :                  * a loader directly (i.e. no interpreter listed in ELF
    1307             :                  * headers), move the brk area out of the mmap region
    1308             :                  * (since it grows up, and may collide early with the stack
    1309             :                  * growing down), and into the unused ELF_ET_DYN_BASE region.
    1310             :                  */
    1311             :                 if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
    1312             :                     elf_ex->e_type == ET_DYN && !interpreter) {
    1313             :                         mm->brk = mm->start_brk = ELF_ET_DYN_BASE;
    1314             :                 }
    1315             : 
    1316           0 :                 mm->brk = mm->start_brk = arch_randomize_brk(mm);
    1317             : #ifdef compat_brk_randomized
    1318             :                 current->brk_randomized = 1;
    1319             : #endif
    1320             :         }
    1321             : 
    1322           0 :         if (current->personality & MMAP_PAGE_ZERO) {
    1323             :                 /* Why this, you ask???  Well SVr4 maps page 0 as read-only,
    1324             :                    and some applications "depend" upon this behavior.
    1325             :                    Since we do not have the power to recompile these, we
    1326             :                    emulate the SVr4 behavior. Sigh. */
    1327           0 :                 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
    1328             :                                 MAP_FIXED | MAP_PRIVATE, 0);
    1329             :         }
    1330             : 
    1331           0 :         regs = current_pt_regs();
    1332             : #ifdef ELF_PLAT_INIT
    1333             :         /*
    1334             :          * The ABI may specify that certain registers be set up in special
    1335             :          * ways (on i386 %edx is the address of a DT_FINI function, for
    1336             :          * example.  In addition, it may also specify (eg, PowerPC64 ELF)
    1337             :          * that the e_entry field is the address of the function descriptor
    1338             :          * for the startup routine, rather than the address of the startup
    1339             :          * routine itself.  This macro performs whatever initialization to
    1340             :          * the regs structure is required as well as any relocations to the
    1341             :          * function descriptor entries when executing dynamically links apps.
    1342             :          */
    1343           0 :         ELF_PLAT_INIT(regs, reloc_func_desc);
    1344             : #endif
    1345             : 
    1346           0 :         finalize_exec(bprm);
    1347           0 :         START_THREAD(elf_ex, regs, elf_entry, bprm->p);
    1348           0 :         retval = 0;
    1349             : out:
    1350           0 :         return retval;
    1351             : 
    1352             :         /* error cleanup */
    1353             : out_free_dentry:
    1354           0 :         kfree(interp_elf_ex);
    1355           0 :         kfree(interp_elf_phdata);
    1356             : out_free_file:
    1357           0 :         allow_write_access(interpreter);
    1358           0 :         if (interpreter)
    1359           0 :                 fput(interpreter);
    1360             : out_free_ph:
    1361           0 :         kfree(elf_phdata);
    1362           0 :         goto out;
    1363             : }
    1364             : 
    1365             : #ifdef CONFIG_USELIB
    1366             : /* This is really simpleminded and specialized - we are loading an
    1367             :    a.out library that is given an ELF header. */
    1368             : static int load_elf_library(struct file *file)
    1369             : {
    1370             :         struct elf_phdr *elf_phdata;
    1371             :         struct elf_phdr *eppnt;
    1372             :         unsigned long elf_bss, bss, len;
    1373             :         int retval, error, i, j;
    1374             :         struct elfhdr elf_ex;
    1375             : 
    1376             :         error = -ENOEXEC;
    1377             :         retval = elf_read(file, &elf_ex, sizeof(elf_ex), 0);
    1378             :         if (retval < 0)
    1379             :                 goto out;
    1380             : 
    1381             :         if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
    1382             :                 goto out;
    1383             : 
    1384             :         /* First of all, some simple consistency checks */
    1385             :         if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
    1386             :             !elf_check_arch(&elf_ex) || !file->f_op->mmap)
    1387             :                 goto out;
    1388             :         if (elf_check_fdpic(&elf_ex))
    1389             :                 goto out;
    1390             : 
    1391             :         /* Now read in all of the header information */
    1392             : 
    1393             :         j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
    1394             :         /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
    1395             : 
    1396             :         error = -ENOMEM;
    1397             :         elf_phdata = kmalloc(j, GFP_KERNEL);
    1398             :         if (!elf_phdata)
    1399             :                 goto out;
    1400             : 
    1401             :         eppnt = elf_phdata;
    1402             :         error = -ENOEXEC;
    1403             :         retval = elf_read(file, eppnt, j, elf_ex.e_phoff);
    1404             :         if (retval < 0)
    1405             :                 goto out_free_ph;
    1406             : 
    1407             :         for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
    1408             :                 if ((eppnt + i)->p_type == PT_LOAD)
    1409             :                         j++;
    1410             :         if (j != 1)
    1411             :                 goto out_free_ph;
    1412             : 
    1413             :         while (eppnt->p_type != PT_LOAD)
    1414             :                 eppnt++;
    1415             : 
    1416             :         /* Now use mmap to map the library into memory. */
    1417             :         error = vm_mmap(file,
    1418             :                         ELF_PAGESTART(eppnt->p_vaddr),
    1419             :                         (eppnt->p_filesz +
    1420             :                          ELF_PAGEOFFSET(eppnt->p_vaddr)),
    1421             :                         PROT_READ | PROT_WRITE | PROT_EXEC,
    1422             :                         MAP_FIXED_NOREPLACE | MAP_PRIVATE,
    1423             :                         (eppnt->p_offset -
    1424             :                          ELF_PAGEOFFSET(eppnt->p_vaddr)));
    1425             :         if (error != ELF_PAGESTART(eppnt->p_vaddr))
    1426             :                 goto out_free_ph;
    1427             : 
    1428             :         elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
    1429             :         if (padzero(elf_bss)) {
    1430             :                 error = -EFAULT;
    1431             :                 goto out_free_ph;
    1432             :         }
    1433             : 
    1434             :         len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
    1435             :         bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
    1436             :         if (bss > len) {
    1437             :                 error = vm_brk(len, bss - len);
    1438             :                 if (error)
    1439             :                         goto out_free_ph;
    1440             :         }
    1441             :         error = 0;
    1442             : 
    1443             : out_free_ph:
    1444             :         kfree(elf_phdata);
    1445             : out:
    1446             :         return error;
    1447             : }
    1448             : #endif /* #ifdef CONFIG_USELIB */
    1449             : 
    1450             : #ifdef CONFIG_ELF_CORE
    1451             : /*
    1452             :  * ELF core dumper
    1453             :  *
    1454             :  * Modelled on fs/exec.c:aout_core_dump()
    1455             :  * Jeremy Fitzhardinge <jeremy@sw.oz.au>
    1456             :  */
    1457             : 
    1458             : /* An ELF note in memory */
    1459             : struct memelfnote
    1460             : {
    1461             :         const char *name;
    1462             :         int type;
    1463             :         unsigned int datasz;
    1464             :         void *data;
    1465             : };
    1466             : 
    1467           0 : static int notesize(struct memelfnote *en)
    1468             : {
    1469             :         int sz;
    1470             : 
    1471           0 :         sz = sizeof(struct elf_note);
    1472           0 :         sz += roundup(strlen(en->name) + 1, 4);
    1473           0 :         sz += roundup(en->datasz, 4);
    1474             : 
    1475           0 :         return sz;
    1476             : }
    1477             : 
    1478           0 : static int writenote(struct memelfnote *men, struct coredump_params *cprm)
    1479             : {
    1480             :         struct elf_note en;
    1481           0 :         en.n_namesz = strlen(men->name) + 1;
    1482           0 :         en.n_descsz = men->datasz;
    1483           0 :         en.n_type = men->type;
    1484             : 
    1485           0 :         return dump_emit(cprm, &en, sizeof(en)) &&
    1486           0 :             dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) &&
    1487           0 :             dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4);
    1488             : }
    1489             : 
    1490           0 : static void fill_elf_header(struct elfhdr *elf, int segs,
    1491             :                             u16 machine, u32 flags)
    1492             : {
    1493           0 :         memset(elf, 0, sizeof(*elf));
    1494             : 
    1495           0 :         memcpy(elf->e_ident, ELFMAG, SELFMAG);
    1496           0 :         elf->e_ident[EI_CLASS] = ELF_CLASS;
    1497           0 :         elf->e_ident[EI_DATA] = ELF_DATA;
    1498           0 :         elf->e_ident[EI_VERSION] = EV_CURRENT;
    1499           0 :         elf->e_ident[EI_OSABI] = ELF_OSABI;
    1500             : 
    1501           0 :         elf->e_type = ET_CORE;
    1502           0 :         elf->e_machine = machine;
    1503           0 :         elf->e_version = EV_CURRENT;
    1504           0 :         elf->e_phoff = sizeof(struct elfhdr);
    1505           0 :         elf->e_flags = flags;
    1506           0 :         elf->e_ehsize = sizeof(struct elfhdr);
    1507           0 :         elf->e_phentsize = sizeof(struct elf_phdr);
    1508           0 :         elf->e_phnum = segs;
    1509           0 : }
    1510             : 
    1511             : static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
    1512             : {
    1513           0 :         phdr->p_type = PT_NOTE;
    1514           0 :         phdr->p_offset = offset;
    1515           0 :         phdr->p_vaddr = 0;
    1516           0 :         phdr->p_paddr = 0;
    1517           0 :         phdr->p_filesz = sz;
    1518           0 :         phdr->p_memsz = 0;
    1519           0 :         phdr->p_flags = 0;
    1520           0 :         phdr->p_align = 4;
    1521             : }
    1522             : 
    1523             : static void fill_note(struct memelfnote *note, const char *name, int type,
    1524             :                 unsigned int sz, void *data)
    1525             : {
    1526           0 :         note->name = name;
    1527           0 :         note->type = type;
    1528           0 :         note->datasz = sz;
    1529           0 :         note->data = data;
    1530             : }
    1531             : 
    1532             : /*
    1533             :  * fill up all the fields in prstatus from the given task struct, except
    1534             :  * registers which need to be filled up separately.
    1535             :  */
    1536           0 : static void fill_prstatus(struct elf_prstatus_common *prstatus,
    1537             :                 struct task_struct *p, long signr)
    1538             : {
    1539           0 :         prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
    1540           0 :         prstatus->pr_sigpend = p->pending.signal.sig[0];
    1541           0 :         prstatus->pr_sighold = p->blocked.sig[0];
    1542             :         rcu_read_lock();
    1543           0 :         prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
    1544           0 :         rcu_read_unlock();
    1545           0 :         prstatus->pr_pid = task_pid_vnr(p);
    1546           0 :         prstatus->pr_pgrp = task_pgrp_vnr(p);
    1547           0 :         prstatus->pr_sid = task_session_vnr(p);
    1548           0 :         if (thread_group_leader(p)) {
    1549             :                 struct task_cputime cputime;
    1550             : 
    1551             :                 /*
    1552             :                  * This is the record for the group leader.  It shows the
    1553             :                  * group-wide total, not its individual thread total.
    1554             :                  */
    1555           0 :                 thread_group_cputime(p, &cputime);
    1556           0 :                 prstatus->pr_utime = ns_to_kernel_old_timeval(cputime.utime);
    1557           0 :                 prstatus->pr_stime = ns_to_kernel_old_timeval(cputime.stime);
    1558             :         } else {
    1559             :                 u64 utime, stime;
    1560             : 
    1561           0 :                 task_cputime(p, &utime, &stime);
    1562           0 :                 prstatus->pr_utime = ns_to_kernel_old_timeval(utime);
    1563           0 :                 prstatus->pr_stime = ns_to_kernel_old_timeval(stime);
    1564             :         }
    1565             : 
    1566           0 :         prstatus->pr_cutime = ns_to_kernel_old_timeval(p->signal->cutime);
    1567           0 :         prstatus->pr_cstime = ns_to_kernel_old_timeval(p->signal->cstime);
    1568           0 : }
    1569             : 
    1570           0 : static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
    1571             :                        struct mm_struct *mm)
    1572             : {
    1573             :         const struct cred *cred;
    1574             :         unsigned int i, len;
    1575             :         unsigned int state;
    1576             : 
    1577             :         /* first copy the parameters from user space */
    1578           0 :         memset(psinfo, 0, sizeof(struct elf_prpsinfo));
    1579             : 
    1580           0 :         len = mm->arg_end - mm->arg_start;
    1581           0 :         if (len >= ELF_PRARGSZ)
    1582           0 :                 len = ELF_PRARGSZ-1;
    1583           0 :         if (copy_from_user(&psinfo->pr_psargs,
    1584             :                            (const char __user *)mm->arg_start, len))
    1585             :                 return -EFAULT;
    1586           0 :         for(i = 0; i < len; i++)
    1587           0 :                 if (psinfo->pr_psargs[i] == 0)
    1588           0 :                         psinfo->pr_psargs[i] = ' ';
    1589           0 :         psinfo->pr_psargs[len] = 0;
    1590             : 
    1591             :         rcu_read_lock();
    1592           0 :         psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
    1593           0 :         rcu_read_unlock();
    1594           0 :         psinfo->pr_pid = task_pid_vnr(p);
    1595           0 :         psinfo->pr_pgrp = task_pgrp_vnr(p);
    1596           0 :         psinfo->pr_sid = task_session_vnr(p);
    1597             : 
    1598           0 :         state = READ_ONCE(p->__state);
    1599           0 :         i = state ? ffz(~state) + 1 : 0;
    1600           0 :         psinfo->pr_state = i;
    1601           0 :         psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
    1602           0 :         psinfo->pr_zomb = psinfo->pr_sname == 'Z';
    1603           0 :         psinfo->pr_nice = task_nice(p);
    1604           0 :         psinfo->pr_flag = p->flags;
    1605             :         rcu_read_lock();
    1606           0 :         cred = __task_cred(p);
    1607           0 :         SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
    1608           0 :         SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
    1609             :         rcu_read_unlock();
    1610           0 :         get_task_comm(psinfo->pr_fname, p);
    1611             : 
    1612           0 :         return 0;
    1613             : }
    1614             : 
    1615             : static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
    1616             : {
    1617           0 :         elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
    1618           0 :         int i = 0;
    1619             :         do
    1620           0 :                 i += 2;
    1621           0 :         while (auxv[i - 2] != AT_NULL);
    1622           0 :         fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
    1623             : }
    1624             : 
    1625             : static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
    1626             :                 const kernel_siginfo_t *siginfo)
    1627             : {
    1628           0 :         copy_siginfo_to_external(csigdata, siginfo);
    1629           0 :         fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
    1630             : }
    1631             : 
    1632             : #define MAX_FILE_NOTE_SIZE (4*1024*1024)
    1633             : /*
    1634             :  * Format of NT_FILE note:
    1635             :  *
    1636             :  * long count     -- how many files are mapped
    1637             :  * long page_size -- units for file_ofs
    1638             :  * array of [COUNT] elements of
    1639             :  *   long start
    1640             :  *   long end
    1641             :  *   long file_ofs
    1642             :  * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
    1643             :  */
    1644           0 : static int fill_files_note(struct memelfnote *note, struct coredump_params *cprm)
    1645             : {
    1646             :         unsigned count, size, names_ofs, remaining, n;
    1647             :         user_long_t *data;
    1648             :         user_long_t *start_end_ofs;
    1649             :         char *name_base, *name_curpos;
    1650             :         int i;
    1651             : 
    1652             :         /* *Estimated* file count and total data size needed */
    1653           0 :         count = cprm->vma_count;
    1654           0 :         if (count > UINT_MAX / 64)
    1655             :                 return -EINVAL;
    1656           0 :         size = count * 64;
    1657             : 
    1658           0 :         names_ofs = (2 + 3 * count) * sizeof(data[0]);
    1659             :  alloc:
    1660           0 :         if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
    1661             :                 return -EINVAL;
    1662           0 :         size = round_up(size, PAGE_SIZE);
    1663             :         /*
    1664             :          * "size" can be 0 here legitimately.
    1665             :          * Let it ENOMEM and omit NT_FILE section which will be empty anyway.
    1666             :          */
    1667           0 :         data = kvmalloc(size, GFP_KERNEL);
    1668           0 :         if (ZERO_OR_NULL_PTR(data))
    1669             :                 return -ENOMEM;
    1670             : 
    1671           0 :         start_end_ofs = data + 2;
    1672           0 :         name_base = name_curpos = ((char *)data) + names_ofs;
    1673           0 :         remaining = size - names_ofs;
    1674           0 :         count = 0;
    1675           0 :         for (i = 0; i < cprm->vma_count; i++) {
    1676           0 :                 struct core_vma_metadata *m = &cprm->vma_meta[i];
    1677             :                 struct file *file;
    1678             :                 const char *filename;
    1679             : 
    1680           0 :                 file = m->file;
    1681           0 :                 if (!file)
    1682           0 :                         continue;
    1683           0 :                 filename = file_path(file, name_curpos, remaining);
    1684           0 :                 if (IS_ERR(filename)) {
    1685           0 :                         if (PTR_ERR(filename) == -ENAMETOOLONG) {
    1686           0 :                                 kvfree(data);
    1687           0 :                                 size = size * 5 / 4;
    1688             :                                 goto alloc;
    1689             :                         }
    1690           0 :                         continue;
    1691             :                 }
    1692             : 
    1693             :                 /* file_path() fills at the end, move name down */
    1694             :                 /* n = strlen(filename) + 1: */
    1695           0 :                 n = (name_curpos + remaining) - filename;
    1696           0 :                 remaining = filename - name_curpos;
    1697           0 :                 memmove(name_curpos, filename, n);
    1698           0 :                 name_curpos += n;
    1699             : 
    1700           0 :                 *start_end_ofs++ = m->start;
    1701           0 :                 *start_end_ofs++ = m->end;
    1702           0 :                 *start_end_ofs++ = m->pgoff;
    1703           0 :                 count++;
    1704             :         }
    1705             : 
    1706             :         /* Now we know exact count of files, can store it */
    1707           0 :         data[0] = count;
    1708           0 :         data[1] = PAGE_SIZE;
    1709             :         /*
    1710             :          * Count usually is less than mm->map_count,
    1711             :          * we need to move filenames down.
    1712             :          */
    1713           0 :         n = cprm->vma_count - count;
    1714           0 :         if (n != 0) {
    1715           0 :                 unsigned shift_bytes = n * 3 * sizeof(data[0]);
    1716           0 :                 memmove(name_base - shift_bytes, name_base,
    1717             :                         name_curpos - name_base);
    1718           0 :                 name_curpos -= shift_bytes;
    1719             :         }
    1720             : 
    1721           0 :         size = name_curpos - (char *)data;
    1722           0 :         fill_note(note, "CORE", NT_FILE, size, data);
    1723             :         return 0;
    1724             : }
    1725             : 
    1726             : #include <linux/regset.h>
    1727             : 
    1728             : struct elf_thread_core_info {
    1729             :         struct elf_thread_core_info *next;
    1730             :         struct task_struct *task;
    1731             :         struct elf_prstatus prstatus;
    1732             :         struct memelfnote notes[];
    1733             : };
    1734             : 
    1735             : struct elf_note_info {
    1736             :         struct elf_thread_core_info *thread;
    1737             :         struct memelfnote psinfo;
    1738             :         struct memelfnote signote;
    1739             :         struct memelfnote auxv;
    1740             :         struct memelfnote files;
    1741             :         user_siginfo_t csigdata;
    1742             :         size_t size;
    1743             :         int thread_notes;
    1744             : };
    1745             : 
    1746             : #ifdef CORE_DUMP_USE_REGSET
    1747             : /*
    1748             :  * When a regset has a writeback hook, we call it on each thread before
    1749             :  * dumping user memory.  On register window machines, this makes sure the
    1750             :  * user memory backing the register data is up to date before we read it.
    1751             :  */
    1752             : static void do_thread_regset_writeback(struct task_struct *task,
    1753             :                                        const struct user_regset *regset)
    1754             : {
    1755             :         if (regset->writeback)
    1756             :                 regset->writeback(task, regset, 1);
    1757             : }
    1758             : 
    1759             : #ifndef PRSTATUS_SIZE
    1760             : #define PRSTATUS_SIZE sizeof(struct elf_prstatus)
    1761             : #endif
    1762             : 
    1763             : #ifndef SET_PR_FPVALID
    1764             : #define SET_PR_FPVALID(S) ((S)->pr_fpvalid = 1)
    1765             : #endif
    1766             : 
    1767             : static int fill_thread_core_info(struct elf_thread_core_info *t,
    1768             :                                  const struct user_regset_view *view,
    1769             :                                  long signr, struct elf_note_info *info)
    1770             : {
    1771             :         unsigned int note_iter, view_iter;
    1772             : 
    1773             :         /*
    1774             :          * NT_PRSTATUS is the one special case, because the regset data
    1775             :          * goes into the pr_reg field inside the note contents, rather
    1776             :          * than being the whole note contents.  We fill the regset in here.
    1777             :          * We assume that regset 0 is NT_PRSTATUS.
    1778             :          */
    1779             :         fill_prstatus(&t->prstatus.common, t->task, signr);
    1780             :         regset_get(t->task, &view->regsets[0],
    1781             :                    sizeof(t->prstatus.pr_reg), &t->prstatus.pr_reg);
    1782             : 
    1783             :         fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
    1784             :                   PRSTATUS_SIZE, &t->prstatus);
    1785             :         info->size += notesize(&t->notes[0]);
    1786             : 
    1787             :         do_thread_regset_writeback(t->task, &view->regsets[0]);
    1788             : 
    1789             :         /*
    1790             :          * Each other regset might generate a note too.  For each regset
    1791             :          * that has no core_note_type or is inactive, skip it.
    1792             :          */
    1793             :         note_iter = 1;
    1794             :         for (view_iter = 1; view_iter < view->n; ++view_iter) {
    1795             :                 const struct user_regset *regset = &view->regsets[view_iter];
    1796             :                 int note_type = regset->core_note_type;
    1797             :                 bool is_fpreg = note_type == NT_PRFPREG;
    1798             :                 void *data;
    1799             :                 int ret;
    1800             : 
    1801             :                 do_thread_regset_writeback(t->task, regset);
    1802             :                 if (!note_type) // not for coredumps
    1803             :                         continue;
    1804             :                 if (regset->active && regset->active(t->task, regset) <= 0)
    1805             :                         continue;
    1806             : 
    1807             :                 ret = regset_get_alloc(t->task, regset, ~0U, &data);
    1808             :                 if (ret < 0)
    1809             :                         continue;
    1810             : 
    1811             :                 if (WARN_ON_ONCE(note_iter >= info->thread_notes))
    1812             :                         break;
    1813             : 
    1814             :                 if (is_fpreg)
    1815             :                         SET_PR_FPVALID(&t->prstatus);
    1816             : 
    1817             :                 fill_note(&t->notes[note_iter], is_fpreg ? "CORE" : "LINUX",
    1818             :                           note_type, ret, data);
    1819             : 
    1820             :                 info->size += notesize(&t->notes[note_iter]);
    1821             :                 note_iter++;
    1822             :         }
    1823             : 
    1824             :         return 1;
    1825             : }
    1826             : #else
    1827           0 : static int fill_thread_core_info(struct elf_thread_core_info *t,
    1828             :                                  const struct user_regset_view *view,
    1829             :                                  long signr, struct elf_note_info *info)
    1830             : {
    1831           0 :         struct task_struct *p = t->task;
    1832             :         elf_fpregset_t *fpu;
    1833             : 
    1834           0 :         fill_prstatus(&t->prstatus.common, p, signr);
    1835           0 :         elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
    1836             : 
    1837           0 :         fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
    1838           0 :                   &(t->prstatus));
    1839           0 :         info->size += notesize(&t->notes[0]);
    1840             : 
    1841           0 :         fpu = kzalloc(sizeof(elf_fpregset_t), GFP_KERNEL);
    1842           0 :         if (!fpu || !elf_core_copy_task_fpregs(p, fpu)) {
    1843           0 :                 kfree(fpu);
    1844             :                 return 1;
    1845             :         }
    1846             : 
    1847           0 :         t->prstatus.pr_fpvalid = 1;
    1848           0 :         fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
    1849           0 :         info->size += notesize(&t->notes[1]);
    1850             : 
    1851             :         return 1;
    1852             : }
    1853             : #endif
    1854             : 
    1855           0 : static int fill_note_info(struct elfhdr *elf, int phdrs,
    1856             :                           struct elf_note_info *info,
    1857             :                           struct coredump_params *cprm)
    1858             : {
    1859           0 :         struct task_struct *dump_task = current;
    1860             :         const struct user_regset_view *view;
    1861             :         struct elf_thread_core_info *t;
    1862             :         struct elf_prpsinfo *psinfo;
    1863             :         struct core_thread *ct;
    1864             : 
    1865           0 :         psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
    1866           0 :         if (!psinfo)
    1867             :                 return 0;
    1868           0 :         fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
    1869             : 
    1870             : #ifdef CORE_DUMP_USE_REGSET
    1871             :         view = task_user_regset_view(dump_task);
    1872             : 
    1873             :         /*
    1874             :          * Figure out how many notes we're going to need for each thread.
    1875             :          */
    1876             :         info->thread_notes = 0;
    1877             :         for (int i = 0; i < view->n; ++i)
    1878             :                 if (view->regsets[i].core_note_type != 0)
    1879             :                         ++info->thread_notes;
    1880             : 
    1881             :         /*
    1882             :          * Sanity check.  We rely on regset 0 being in NT_PRSTATUS,
    1883             :          * since it is our one special case.
    1884             :          */
    1885             :         if (unlikely(info->thread_notes == 0) ||
    1886             :             unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
    1887             :                 WARN_ON(1);
    1888             :                 return 0;
    1889             :         }
    1890             : 
    1891             :         /*
    1892             :          * Initialize the ELF file header.
    1893             :          */
    1894             :         fill_elf_header(elf, phdrs,
    1895             :                         view->e_machine, view->e_flags);
    1896             : #else
    1897           0 :         view = NULL;
    1898           0 :         info->thread_notes = 2;
    1899           0 :         fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
    1900             : #endif
    1901             : 
    1902             :         /*
    1903             :          * Allocate a structure for each thread.
    1904             :          */
    1905           0 :         info->thread = kzalloc(offsetof(struct elf_thread_core_info,
    1906             :                                      notes[info->thread_notes]),
    1907             :                             GFP_KERNEL);
    1908           0 :         if (unlikely(!info->thread))
    1909             :                 return 0;
    1910             : 
    1911           0 :         info->thread->task = dump_task;
    1912           0 :         for (ct = dump_task->signal->core_state->dumper.next; ct; ct = ct->next) {
    1913           0 :                 t = kzalloc(offsetof(struct elf_thread_core_info,
    1914             :                                      notes[info->thread_notes]),
    1915             :                             GFP_KERNEL);
    1916           0 :                 if (unlikely(!t))
    1917             :                         return 0;
    1918             : 
    1919           0 :                 t->task = ct->task;
    1920           0 :                 t->next = info->thread->next;
    1921           0 :                 info->thread->next = t;
    1922             :         }
    1923             : 
    1924             :         /*
    1925             :          * Now fill in each thread's information.
    1926             :          */
    1927           0 :         for (t = info->thread; t != NULL; t = t->next)
    1928           0 :                 if (!fill_thread_core_info(t, view, cprm->siginfo->si_signo, info))
    1929             :                         return 0;
    1930             : 
    1931             :         /*
    1932             :          * Fill in the two process-wide notes.
    1933             :          */
    1934           0 :         fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
    1935           0 :         info->size += notesize(&info->psinfo);
    1936             : 
    1937           0 :         fill_siginfo_note(&info->signote, &info->csigdata, cprm->siginfo);
    1938           0 :         info->size += notesize(&info->signote);
    1939             : 
    1940           0 :         fill_auxv_note(&info->auxv, current->mm);
    1941           0 :         info->size += notesize(&info->auxv);
    1942             : 
    1943           0 :         if (fill_files_note(&info->files, cprm) == 0)
    1944           0 :                 info->size += notesize(&info->files);
    1945             : 
    1946             :         return 1;
    1947             : }
    1948             : 
    1949             : /*
    1950             :  * Write all the notes for each thread.  When writing the first thread, the
    1951             :  * process-wide notes are interleaved after the first thread-specific note.
    1952             :  */
    1953           0 : static int write_note_info(struct elf_note_info *info,
    1954             :                            struct coredump_params *cprm)
    1955             : {
    1956           0 :         bool first = true;
    1957           0 :         struct elf_thread_core_info *t = info->thread;
    1958             : 
    1959             :         do {
    1960             :                 int i;
    1961             : 
    1962           0 :                 if (!writenote(&t->notes[0], cprm))
    1963             :                         return 0;
    1964             : 
    1965           0 :                 if (first && !writenote(&info->psinfo, cprm))
    1966             :                         return 0;
    1967           0 :                 if (first && !writenote(&info->signote, cprm))
    1968             :                         return 0;
    1969           0 :                 if (first && !writenote(&info->auxv, cprm))
    1970             :                         return 0;
    1971           0 :                 if (first && info->files.data &&
    1972           0 :                                 !writenote(&info->files, cprm))
    1973             :                         return 0;
    1974             : 
    1975           0 :                 for (i = 1; i < info->thread_notes; ++i)
    1976           0 :                         if (t->notes[i].data &&
    1977           0 :                             !writenote(&t->notes[i], cprm))
    1978             :                                 return 0;
    1979             : 
    1980           0 :                 first = false;
    1981           0 :                 t = t->next;
    1982           0 :         } while (t);
    1983             : 
    1984             :         return 1;
    1985             : }
    1986             : 
    1987           0 : static void free_note_info(struct elf_note_info *info)
    1988             : {
    1989           0 :         struct elf_thread_core_info *threads = info->thread;
    1990           0 :         while (threads) {
    1991             :                 unsigned int i;
    1992           0 :                 struct elf_thread_core_info *t = threads;
    1993           0 :                 threads = t->next;
    1994           0 :                 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
    1995           0 :                 for (i = 1; i < info->thread_notes; ++i)
    1996           0 :                         kfree(t->notes[i].data);
    1997           0 :                 kfree(t);
    1998             :         }
    1999           0 :         kfree(info->psinfo.data);
    2000           0 :         kvfree(info->files.data);
    2001           0 : }
    2002             : 
    2003           0 : static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
    2004             :                              elf_addr_t e_shoff, int segs)
    2005             : {
    2006           0 :         elf->e_shoff = e_shoff;
    2007           0 :         elf->e_shentsize = sizeof(*shdr4extnum);
    2008           0 :         elf->e_shnum = 1;
    2009           0 :         elf->e_shstrndx = SHN_UNDEF;
    2010             : 
    2011           0 :         memset(shdr4extnum, 0, sizeof(*shdr4extnum));
    2012             : 
    2013           0 :         shdr4extnum->sh_type = SHT_NULL;
    2014           0 :         shdr4extnum->sh_size = elf->e_shnum;
    2015           0 :         shdr4extnum->sh_link = elf->e_shstrndx;
    2016           0 :         shdr4extnum->sh_info = segs;
    2017           0 : }
    2018             : 
    2019             : /*
    2020             :  * Actual dumper
    2021             :  *
    2022             :  * This is a two-pass process; first we find the offsets of the bits,
    2023             :  * and then they are actually written out.  If we run out of core limit
    2024             :  * we just truncate.
    2025             :  */
    2026           0 : static int elf_core_dump(struct coredump_params *cprm)
    2027             : {
    2028           0 :         int has_dumped = 0;
    2029             :         int segs, i;
    2030             :         struct elfhdr elf;
    2031           0 :         loff_t offset = 0, dataoff;
    2032           0 :         struct elf_note_info info = { };
    2033           0 :         struct elf_phdr *phdr4note = NULL;
    2034           0 :         struct elf_shdr *shdr4extnum = NULL;
    2035             :         Elf_Half e_phnum;
    2036             :         elf_addr_t e_shoff;
    2037             : 
    2038             :         /*
    2039             :          * The number of segs are recored into ELF header as 16bit value.
    2040             :          * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
    2041             :          */
    2042           0 :         segs = cprm->vma_count + elf_core_extra_phdrs(cprm);
    2043             : 
    2044             :         /* for notes section */
    2045           0 :         segs++;
    2046             : 
    2047             :         /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
    2048             :          * this, kernel supports extended numbering. Have a look at
    2049             :          * include/linux/elf.h for further information. */
    2050           0 :         e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
    2051             : 
    2052             :         /*
    2053             :          * Collect all the non-memory information about the process for the
    2054             :          * notes.  This also sets up the file header.
    2055             :          */
    2056           0 :         if (!fill_note_info(&elf, e_phnum, &info, cprm))
    2057             :                 goto end_coredump;
    2058             : 
    2059           0 :         has_dumped = 1;
    2060             : 
    2061           0 :         offset += sizeof(elf);                          /* ELF header */
    2062           0 :         offset += segs * sizeof(struct elf_phdr);       /* Program headers */
    2063             : 
    2064             :         /* Write notes phdr entry */
    2065             :         {
    2066           0 :                 size_t sz = info.size;
    2067             : 
    2068             :                 /* For cell spufs */
    2069           0 :                 sz += elf_coredump_extra_notes_size();
    2070             : 
    2071           0 :                 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
    2072           0 :                 if (!phdr4note)
    2073             :                         goto end_coredump;
    2074             : 
    2075           0 :                 fill_elf_note_phdr(phdr4note, sz, offset);
    2076           0 :                 offset += sz;
    2077             :         }
    2078             : 
    2079           0 :         dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
    2080             : 
    2081           0 :         offset += cprm->vma_data_size;
    2082           0 :         offset += elf_core_extra_data_size(cprm);
    2083           0 :         e_shoff = offset;
    2084             : 
    2085           0 :         if (e_phnum == PN_XNUM) {
    2086           0 :                 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
    2087           0 :                 if (!shdr4extnum)
    2088             :                         goto end_coredump;
    2089           0 :                 fill_extnum_info(&elf, shdr4extnum, e_shoff, segs);
    2090             :         }
    2091             : 
    2092           0 :         offset = dataoff;
    2093             : 
    2094           0 :         if (!dump_emit(cprm, &elf, sizeof(elf)))
    2095             :                 goto end_coredump;
    2096             : 
    2097           0 :         if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
    2098             :                 goto end_coredump;
    2099             : 
    2100             :         /* Write program headers for segments dump */
    2101           0 :         for (i = 0; i < cprm->vma_count; i++) {
    2102           0 :                 struct core_vma_metadata *meta = cprm->vma_meta + i;
    2103             :                 struct elf_phdr phdr;
    2104             : 
    2105           0 :                 phdr.p_type = PT_LOAD;
    2106           0 :                 phdr.p_offset = offset;
    2107           0 :                 phdr.p_vaddr = meta->start;
    2108           0 :                 phdr.p_paddr = 0;
    2109           0 :                 phdr.p_filesz = meta->dump_size;
    2110           0 :                 phdr.p_memsz = meta->end - meta->start;
    2111           0 :                 offset += phdr.p_filesz;
    2112           0 :                 phdr.p_flags = 0;
    2113           0 :                 if (meta->flags & VM_READ)
    2114           0 :                         phdr.p_flags |= PF_R;
    2115           0 :                 if (meta->flags & VM_WRITE)
    2116           0 :                         phdr.p_flags |= PF_W;
    2117           0 :                 if (meta->flags & VM_EXEC)
    2118           0 :                         phdr.p_flags |= PF_X;
    2119           0 :                 phdr.p_align = ELF_EXEC_PAGESIZE;
    2120             : 
    2121           0 :                 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
    2122             :                         goto end_coredump;
    2123             :         }
    2124             : 
    2125           0 :         if (!elf_core_write_extra_phdrs(cprm, offset))
    2126             :                 goto end_coredump;
    2127             : 
    2128             :         /* write out the notes section */
    2129           0 :         if (!write_note_info(&info, cprm))
    2130             :                 goto end_coredump;
    2131             : 
    2132             :         /* For cell spufs */
    2133           0 :         if (elf_coredump_extra_notes_write(cprm))
    2134             :                 goto end_coredump;
    2135             : 
    2136             :         /* Align to page */
    2137           0 :         dump_skip_to(cprm, dataoff);
    2138             : 
    2139           0 :         for (i = 0; i < cprm->vma_count; i++) {
    2140           0 :                 struct core_vma_metadata *meta = cprm->vma_meta + i;
    2141             : 
    2142           0 :                 if (!dump_user_range(cprm, meta->start, meta->dump_size))
    2143             :                         goto end_coredump;
    2144             :         }
    2145             : 
    2146           0 :         if (!elf_core_write_extra_data(cprm))
    2147             :                 goto end_coredump;
    2148             : 
    2149           0 :         if (e_phnum == PN_XNUM) {
    2150           0 :                 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
    2151             :                         goto end_coredump;
    2152             :         }
    2153             : 
    2154             : end_coredump:
    2155           0 :         free_note_info(&info);
    2156           0 :         kfree(shdr4extnum);
    2157           0 :         kfree(phdr4note);
    2158           0 :         return has_dumped;
    2159             : }
    2160             : 
    2161             : #endif          /* CONFIG_ELF_CORE */
    2162             : 
    2163           1 : static int __init init_elf_binfmt(void)
    2164             : {
    2165           1 :         register_binfmt(&elf_format);
    2166           1 :         return 0;
    2167             : }
    2168             : 
    2169           0 : static void __exit exit_elf_binfmt(void)
    2170             : {
    2171             :         /* Remove the COFF and ELF loaders. */
    2172           0 :         unregister_binfmt(&elf_format);
    2173           0 : }
    2174             : 
    2175             : core_initcall(init_elf_binfmt);
    2176             : module_exit(exit_elf_binfmt);
    2177             : 
    2178             : #ifdef CONFIG_BINFMT_ELF_KUNIT_TEST
    2179             : #include "binfmt_elf_test.c"
    2180             : #endif

Generated by: LCOV version 1.14