LCOV - code coverage report
Current view: top level - kernel - pid.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 107 240 44.6 %
Date: 2023-04-06 08:38:28 Functions: 13 35 37.1 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : /*
       3             :  * Generic pidhash and scalable, time-bounded PID allocator
       4             :  *
       5             :  * (C) 2002-2003 Nadia Yvette Chambers, IBM
       6             :  * (C) 2004 Nadia Yvette Chambers, Oracle
       7             :  * (C) 2002-2004 Ingo Molnar, Red Hat
       8             :  *
       9             :  * pid-structures are backing objects for tasks sharing a given ID to chain
      10             :  * against. There is very little to them aside from hashing them and
      11             :  * parking tasks using given ID's on a list.
      12             :  *
      13             :  * The hash is always changed with the tasklist_lock write-acquired,
      14             :  * and the hash is only accessed with the tasklist_lock at least
      15             :  * read-acquired, so there's no additional SMP locking needed here.
      16             :  *
      17             :  * We have a list of bitmap pages, which bitmaps represent the PID space.
      18             :  * Allocating and freeing PIDs is completely lockless. The worst-case
      19             :  * allocation scenario when all but one out of 1 million PIDs possible are
      20             :  * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
      21             :  * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
      22             :  *
      23             :  * Pid namespaces:
      24             :  *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
      25             :  *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
      26             :  *     Many thanks to Oleg Nesterov for comments and help
      27             :  *
      28             :  */
      29             : 
      30             : #include <linux/mm.h>
      31             : #include <linux/export.h>
      32             : #include <linux/slab.h>
      33             : #include <linux/init.h>
      34             : #include <linux/rculist.h>
      35             : #include <linux/memblock.h>
      36             : #include <linux/pid_namespace.h>
      37             : #include <linux/init_task.h>
      38             : #include <linux/syscalls.h>
      39             : #include <linux/proc_ns.h>
      40             : #include <linux/refcount.h>
      41             : #include <linux/anon_inodes.h>
      42             : #include <linux/sched/signal.h>
      43             : #include <linux/sched/task.h>
      44             : #include <linux/idr.h>
      45             : #include <net/sock.h>
      46             : #include <uapi/linux/pidfd.h>
      47             : 
      48             : struct pid init_struct_pid = {
      49             :         .count          = REFCOUNT_INIT(1),
      50             :         .tasks          = {
      51             :                 { .first = NULL },
      52             :                 { .first = NULL },
      53             :                 { .first = NULL },
      54             :         },
      55             :         .level          = 0,
      56             :         .numbers        = { {
      57             :                 .nr             = 0,
      58             :                 .ns             = &init_pid_ns,
      59             :         }, }
      60             : };
      61             : 
      62             : int pid_max = PID_MAX_DEFAULT;
      63             : 
      64             : #define RESERVED_PIDS           300
      65             : 
      66             : int pid_max_min = RESERVED_PIDS + 1;
      67             : int pid_max_max = PID_MAX_LIMIT;
      68             : 
      69             : /*
      70             :  * PID-map pages start out as NULL, they get allocated upon
      71             :  * first use and are never deallocated. This way a low pid_max
      72             :  * value does not cause lots of bitmaps to be allocated, but
      73             :  * the scheme scales to up to 4 million PIDs, runtime.
      74             :  */
      75             : struct pid_namespace init_pid_ns = {
      76             :         .ns.count = REFCOUNT_INIT(2),
      77             :         .idr = IDR_INIT(init_pid_ns.idr),
      78             :         .pid_allocated = PIDNS_ADDING,
      79             :         .level = 0,
      80             :         .child_reaper = &init_task,
      81             :         .user_ns = &init_user_ns,
      82             :         .ns.inum = PROC_PID_INIT_INO,
      83             : #ifdef CONFIG_PID_NS
      84             :         .ns.ops = &pidns_operations,
      85             : #endif
      86             : };
      87             : EXPORT_SYMBOL_GPL(init_pid_ns);
      88             : 
      89             : /*
      90             :  * Note: disable interrupts while the pidmap_lock is held as an
      91             :  * interrupt might come in and do read_lock(&tasklist_lock).
      92             :  *
      93             :  * If we don't disable interrupts there is a nasty deadlock between
      94             :  * detach_pid()->free_pid() and another cpu that does
      95             :  * spin_lock(&pidmap_lock) followed by an interrupt routine that does
      96             :  * read_lock(&tasklist_lock);
      97             :  *
      98             :  * After we clean up the tasklist_lock and know there are no
      99             :  * irq handlers that take it we can leave the interrupts enabled.
     100             :  * For now it is easier to be safe than to prove it can't happen.
     101             :  */
     102             : 
     103             : static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
     104             : 
     105        1013 : void put_pid(struct pid *pid)
     106             : {
     107             :         struct pid_namespace *ns;
     108             : 
     109        1013 :         if (!pid)
     110             :                 return;
     111             : 
     112        1013 :         ns = pid->numbers[pid->level].ns;
     113        2026 :         if (refcount_dec_and_test(&pid->count)) {
     114         332 :                 kmem_cache_free(ns->pid_cachep, pid);
     115         332 :                 put_pid_ns(ns);
     116             :         }
     117             : }
     118             : EXPORT_SYMBOL_GPL(put_pid);
     119             : 
     120         332 : static void delayed_put_pid(struct rcu_head *rhp)
     121             : {
     122         332 :         struct pid *pid = container_of(rhp, struct pid, rcu);
     123         332 :         put_pid(pid);
     124         332 : }
     125             : 
     126         333 : void free_pid(struct pid *pid)
     127             : {
     128             :         /* We can be called with write_lock_irq(&tasklist_lock) held */
     129             :         int i;
     130             :         unsigned long flags;
     131             : 
     132         333 :         spin_lock_irqsave(&pidmap_lock, flags);
     133         666 :         for (i = 0; i <= pid->level; i++) {
     134         333 :                 struct upid *upid = pid->numbers + i;
     135         333 :                 struct pid_namespace *ns = upid->ns;
     136         333 :                 switch (--ns->pid_allocated) {
     137             :                 case 2:
     138             :                 case 1:
     139             :                         /* When all that is left in the pid namespace
     140             :                          * is the reaper wake up the reaper.  The reaper
     141             :                          * may be sleeping in zap_pid_ns_processes().
     142             :                          */
     143           0 :                         wake_up_process(ns->child_reaper);
     144           0 :                         break;
     145             :                 case PIDNS_ADDING:
     146             :                         /* Handle a fork failure of the first process */
     147           0 :                         WARN_ON(ns->child_reaper);
     148           0 :                         ns->pid_allocated = 0;
     149           0 :                         break;
     150             :                 }
     151             : 
     152         333 :                 idr_remove(&ns->idr, upid->nr);
     153             :         }
     154         333 :         spin_unlock_irqrestore(&pidmap_lock, flags);
     155             : 
     156         333 :         call_rcu(&pid->rcu, delayed_put_pid);
     157         333 : }
     158             : 
     159         348 : struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
     160             :                       size_t set_tid_size)
     161             : {
     162             :         struct pid *pid;
     163             :         enum pid_type type;
     164             :         int i, nr;
     165             :         struct pid_namespace *tmp;
     166             :         struct upid *upid;
     167         348 :         int retval = -ENOMEM;
     168             : 
     169             :         /*
     170             :          * set_tid_size contains the size of the set_tid array. Starting at
     171             :          * the most nested currently active PID namespace it tells alloc_pid()
     172             :          * which PID to set for a process in that most nested PID namespace
     173             :          * up to set_tid_size PID namespaces. It does not have to set the PID
     174             :          * for a process in all nested PID namespaces but set_tid_size must
     175             :          * never be greater than the current ns->level + 1.
     176             :          */
     177         348 :         if (set_tid_size > ns->level + 1)
     178             :                 return ERR_PTR(-EINVAL);
     179             : 
     180         348 :         pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
     181         348 :         if (!pid)
     182             :                 return ERR_PTR(retval);
     183             : 
     184         348 :         tmp = ns;
     185         348 :         pid->level = ns->level;
     186             : 
     187         696 :         for (i = ns->level; i >= 0; i--) {
     188         348 :                 int tid = 0;
     189             : 
     190         348 :                 if (set_tid_size) {
     191           0 :                         tid = set_tid[ns->level - i];
     192             : 
     193           0 :                         retval = -EINVAL;
     194           0 :                         if (tid < 1 || tid >= pid_max)
     195             :                                 goto out_free;
     196             :                         /*
     197             :                          * Also fail if a PID != 1 is requested and
     198             :                          * no PID 1 exists.
     199             :                          */
     200           0 :                         if (tid != 1 && !tmp->child_reaper)
     201             :                                 goto out_free;
     202           0 :                         retval = -EPERM;
     203           0 :                         if (!checkpoint_restore_ns_capable(tmp->user_ns))
     204             :                                 goto out_free;
     205           0 :                         set_tid_size--;
     206             :                 }
     207             : 
     208         348 :                 idr_preload(GFP_KERNEL);
     209         348 :                 spin_lock_irq(&pidmap_lock);
     210             : 
     211         348 :                 if (tid) {
     212           0 :                         nr = idr_alloc(&tmp->idr, NULL, tid,
     213             :                                        tid + 1, GFP_ATOMIC);
     214             :                         /*
     215             :                          * If ENOSPC is returned it means that the PID is
     216             :                          * alreay in use. Return EEXIST in that case.
     217             :                          */
     218           0 :                         if (nr == -ENOSPC)
     219           0 :                                 nr = -EEXIST;
     220             :                 } else {
     221         348 :                         int pid_min = 1;
     222             :                         /*
     223             :                          * init really needs pid 1, but after reaching the
     224             :                          * maximum wrap back to RESERVED_PIDS
     225             :                          */
     226         696 :                         if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS)
     227          48 :                                 pid_min = RESERVED_PIDS;
     228             : 
     229             :                         /*
     230             :                          * Store a null pointer so find_pid_ns does not find
     231             :                          * a partially initialized PID (see below).
     232             :                          */
     233         348 :                         nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
     234             :                                               pid_max, GFP_ATOMIC);
     235             :                 }
     236         348 :                 spin_unlock_irq(&pidmap_lock);
     237             :                 idr_preload_end();
     238             : 
     239         348 :                 if (nr < 0) {
     240           0 :                         retval = (nr == -ENOSPC) ? -EAGAIN : nr;
     241             :                         goto out_free;
     242             :                 }
     243             : 
     244         348 :                 pid->numbers[i].nr = nr;
     245         348 :                 pid->numbers[i].ns = tmp;
     246         348 :                 tmp = tmp->parent;
     247             :         }
     248             : 
     249             :         /*
     250             :          * ENOMEM is not the most obvious choice especially for the case
     251             :          * where the child subreaper has already exited and the pid
     252             :          * namespace denies the creation of any new processes. But ENOMEM
     253             :          * is what we have exposed to userspace for a long time and it is
     254             :          * documented behavior for pid namespaces. So we can't easily
     255             :          * change it even if there were an error code better suited.
     256             :          */
     257         348 :         retval = -ENOMEM;
     258             : 
     259         348 :         get_pid_ns(ns);
     260         696 :         refcount_set(&pid->count, 1);
     261         348 :         spin_lock_init(&pid->lock);
     262        1740 :         for (type = 0; type < PIDTYPE_MAX; ++type)
     263        1392 :                 INIT_HLIST_HEAD(&pid->tasks[type]);
     264             : 
     265         348 :         init_waitqueue_head(&pid->wait_pidfd);
     266         348 :         INIT_HLIST_HEAD(&pid->inodes);
     267             : 
     268         348 :         upid = pid->numbers + ns->level;
     269         348 :         spin_lock_irq(&pidmap_lock);
     270         348 :         if (!(ns->pid_allocated & PIDNS_ADDING))
     271             :                 goto out_unlock;
     272         348 :         for ( ; upid >= pid->numbers; --upid) {
     273             :                 /* Make the PID visible to find_pid_ns. */
     274         348 :                 idr_replace(&upid->ns->idr, pid, upid->nr);
     275         348 :                 upid->ns->pid_allocated++;
     276             :         }
     277         348 :         spin_unlock_irq(&pidmap_lock);
     278             : 
     279         348 :         return pid;
     280             : 
     281             : out_unlock:
     282           0 :         spin_unlock_irq(&pidmap_lock);
     283           0 :         put_pid_ns(ns);
     284             : 
     285             : out_free:
     286             :         spin_lock_irq(&pidmap_lock);
     287           0 :         while (++i <= ns->level) {
     288           0 :                 upid = pid->numbers + i;
     289           0 :                 idr_remove(&upid->ns->idr, upid->nr);
     290             :         }
     291             : 
     292             :         /* On failure to allocate the first pid, reset the state */
     293           0 :         if (ns->pid_allocated == PIDNS_ADDING)
     294           0 :                 idr_set_cursor(&ns->idr, 0);
     295             : 
     296           0 :         spin_unlock_irq(&pidmap_lock);
     297             : 
     298           0 :         kmem_cache_free(ns->pid_cachep, pid);
     299           0 :         return ERR_PTR(retval);
     300             : }
     301             : 
     302           0 : void disable_pid_allocation(struct pid_namespace *ns)
     303             : {
     304           0 :         spin_lock_irq(&pidmap_lock);
     305           0 :         ns->pid_allocated &= ~PIDNS_ADDING;
     306           0 :         spin_unlock_irq(&pidmap_lock);
     307           0 : }
     308             : 
     309           0 : struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
     310             : {
     311           2 :         return idr_find(&ns->idr, nr);
     312             : }
     313             : EXPORT_SYMBOL_GPL(find_pid_ns);
     314             : 
     315           0 : struct pid *find_vpid(int nr)
     316             : {
     317           0 :         return find_pid_ns(nr, task_active_pid_ns(current));
     318             : }
     319             : EXPORT_SYMBOL_GPL(find_vpid);
     320             : 
     321             : static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type)
     322             : {
     323             :         return (type == PIDTYPE_PID) ?
     324        5448 :                 &task->thread_pid :
     325        2043 :                 &task->signal->pids[type];
     326             : }
     327             : 
     328             : /*
     329             :  * attach_pid() must be called with the tasklist_lock write-held.
     330             :  */
     331        1392 : void attach_pid(struct task_struct *task, enum pid_type type)
     332             : {
     333        1392 :         struct pid *pid = *task_pid_ptr(task, type);
     334        2784 :         hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]);
     335        1392 : }
     336             : 
     337        1332 : static void __change_pid(struct task_struct *task, enum pid_type type,
     338             :                         struct pid *new)
     339             : {
     340        1332 :         struct pid **pid_ptr = task_pid_ptr(task, type);
     341             :         struct pid *pid;
     342             :         int tmp;
     343             : 
     344        1332 :         pid = *pid_ptr;
     345             : 
     346        2664 :         hlist_del_rcu(&task->pid_links[type]);
     347        1332 :         *pid_ptr = new;
     348             : 
     349        4662 :         for (tmp = PIDTYPE_MAX; --tmp >= 0; )
     350        5994 :                 if (pid_has_task(pid, tmp))
     351             :                         return;
     352             : 
     353         333 :         free_pid(pid);
     354             : }
     355             : 
     356        1332 : void detach_pid(struct task_struct *task, enum pid_type type)
     357             : {
     358        1332 :         __change_pid(task, type, NULL);
     359        1332 : }
     360             : 
     361           0 : void change_pid(struct task_struct *task, enum pid_type type,
     362             :                 struct pid *pid)
     363             : {
     364           0 :         __change_pid(task, type, pid);
     365           0 :         attach_pid(task, type);
     366           0 : }
     367             : 
     368           0 : void exchange_tids(struct task_struct *left, struct task_struct *right)
     369             : {
     370           0 :         struct pid *pid1 = left->thread_pid;
     371           0 :         struct pid *pid2 = right->thread_pid;
     372           0 :         struct hlist_head *head1 = &pid1->tasks[PIDTYPE_PID];
     373           0 :         struct hlist_head *head2 = &pid2->tasks[PIDTYPE_PID];
     374             : 
     375             :         /* Swap the single entry tid lists */
     376           0 :         hlists_swap_heads_rcu(head1, head2);
     377             : 
     378             :         /* Swap the per task_struct pid */
     379           0 :         rcu_assign_pointer(left->thread_pid, pid2);
     380           0 :         rcu_assign_pointer(right->thread_pid, pid1);
     381             : 
     382             :         /* Swap the cached value */
     383           0 :         WRITE_ONCE(left->pid, pid_nr(pid2));
     384           0 :         WRITE_ONCE(right->pid, pid_nr(pid1));
     385           0 : }
     386             : 
     387             : /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
     388           0 : void transfer_pid(struct task_struct *old, struct task_struct *new,
     389             :                            enum pid_type type)
     390             : {
     391           0 :         if (type == PIDTYPE_PID)
     392           0 :                 new->thread_pid = old->thread_pid;
     393           0 :         hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]);
     394           0 : }
     395             : 
     396           0 : struct task_struct *pid_task(struct pid *pid, enum pid_type type)
     397             : {
     398           2 :         struct task_struct *result = NULL;
     399           2 :         if (pid) {
     400             :                 struct hlist_node *first;
     401           2 :                 first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
     402             :                                               lockdep_tasklist_lock_is_held());
     403           2 :                 if (first)
     404           2 :                         result = hlist_entry(first, struct task_struct, pid_links[(type)]);
     405             :         }
     406           0 :         return result;
     407             : }
     408             : EXPORT_SYMBOL(pid_task);
     409             : 
     410             : /*
     411             :  * Must be called under rcu_read_lock().
     412             :  */
     413           2 : struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
     414             : {
     415             :         RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
     416             :                          "find_task_by_pid_ns() needs rcu_read_lock() protection");
     417           4 :         return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
     418             : }
     419             : 
     420           0 : struct task_struct *find_task_by_vpid(pid_t vnr)
     421             : {
     422           0 :         return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
     423             : }
     424             : 
     425           0 : struct task_struct *find_get_task_by_vpid(pid_t nr)
     426             : {
     427             :         struct task_struct *task;
     428             : 
     429             :         rcu_read_lock();
     430           0 :         task = find_task_by_vpid(nr);
     431           0 :         if (task)
     432             :                 get_task_struct(task);
     433             :         rcu_read_unlock();
     434             : 
     435           0 :         return task;
     436             : }
     437             : 
     438         348 : struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
     439             : {
     440             :         struct pid *pid;
     441             :         rcu_read_lock();
     442         696 :         pid = get_pid(rcu_dereference(*task_pid_ptr(task, type)));
     443             :         rcu_read_unlock();
     444         348 :         return pid;
     445             : }
     446             : EXPORT_SYMBOL_GPL(get_task_pid);
     447             : 
     448           0 : struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
     449             : {
     450             :         struct task_struct *result;
     451             :         rcu_read_lock();
     452           0 :         result = pid_task(pid, type);
     453           0 :         if (result)
     454             :                 get_task_struct(result);
     455             :         rcu_read_unlock();
     456           0 :         return result;
     457             : }
     458             : EXPORT_SYMBOL_GPL(get_pid_task);
     459             : 
     460           0 : struct pid *find_get_pid(pid_t nr)
     461             : {
     462             :         struct pid *pid;
     463             : 
     464             :         rcu_read_lock();
     465           0 :         pid = get_pid(find_vpid(nr));
     466             :         rcu_read_unlock();
     467             : 
     468           0 :         return pid;
     469             : }
     470             : EXPORT_SYMBOL_GPL(find_get_pid);
     471             : 
     472           0 : pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
     473             : {
     474             :         struct upid *upid;
     475         681 :         pid_t nr = 0;
     476             : 
     477         681 :         if (pid && ns->level <= pid->level) {
     478         681 :                 upid = &pid->numbers[ns->level];
     479         681 :                 if (upid->ns == ns)
     480         681 :                         nr = upid->nr;
     481             :         }
     482           0 :         return nr;
     483             : }
     484             : EXPORT_SYMBOL_GPL(pid_nr_ns);
     485             : 
     486         348 : pid_t pid_vnr(struct pid *pid)
     487             : {
     488        1044 :         return pid_nr_ns(pid, task_active_pid_ns(current));
     489             : }
     490             : EXPORT_SYMBOL_GPL(pid_vnr);
     491             : 
     492         333 : pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
     493             :                         struct pid_namespace *ns)
     494             : {
     495         333 :         pid_t nr = 0;
     496             : 
     497             :         rcu_read_lock();
     498         333 :         if (!ns)
     499           0 :                 ns = task_active_pid_ns(current);
     500         666 :         nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns);
     501             :         rcu_read_unlock();
     502             : 
     503         333 :         return nr;
     504             : }
     505             : EXPORT_SYMBOL(__task_pid_nr_ns);
     506             : 
     507         667 : struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
     508             : {
     509        2030 :         return ns_of_pid(task_pid(tsk));
     510             : }
     511             : EXPORT_SYMBOL_GPL(task_active_pid_ns);
     512             : 
     513             : /*
     514             :  * Used by proc to find the first pid that is greater than or equal to nr.
     515             :  *
     516             :  * If there is a pid at nr this function is exactly the same as find_pid_ns.
     517             :  */
     518           0 : struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
     519             : {
     520           0 :         return idr_get_next(&ns->idr, &nr);
     521             : }
     522             : EXPORT_SYMBOL_GPL(find_ge_pid);
     523             : 
     524           0 : struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags)
     525             : {
     526             :         struct fd f;
     527             :         struct pid *pid;
     528             : 
     529           0 :         f = fdget(fd);
     530           0 :         if (!f.file)
     531             :                 return ERR_PTR(-EBADF);
     532             : 
     533           0 :         pid = pidfd_pid(f.file);
     534           0 :         if (!IS_ERR(pid)) {
     535           0 :                 get_pid(pid);
     536           0 :                 *flags = f.file->f_flags;
     537             :         }
     538             : 
     539           0 :         fdput(f);
     540             :         return pid;
     541             : }
     542             : 
     543             : /**
     544             :  * pidfd_get_task() - Get the task associated with a pidfd
     545             :  *
     546             :  * @pidfd: pidfd for which to get the task
     547             :  * @flags: flags associated with this pidfd
     548             :  *
     549             :  * Return the task associated with @pidfd. The function takes a reference on
     550             :  * the returned task. The caller is responsible for releasing that reference.
     551             :  *
     552             :  * Currently, the process identified by @pidfd is always a thread-group leader.
     553             :  * This restriction currently exists for all aspects of pidfds including pidfd
     554             :  * creation (CLONE_PIDFD cannot be used with CLONE_THREAD) and pidfd polling
     555             :  * (only supports thread group leaders).
     556             :  *
     557             :  * Return: On success, the task_struct associated with the pidfd.
     558             :  *         On error, a negative errno number will be returned.
     559             :  */
     560           0 : struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags)
     561             : {
     562             :         unsigned int f_flags;
     563             :         struct pid *pid;
     564             :         struct task_struct *task;
     565             : 
     566           0 :         pid = pidfd_get_pid(pidfd, &f_flags);
     567           0 :         if (IS_ERR(pid))
     568             :                 return ERR_CAST(pid);
     569             : 
     570           0 :         task = get_pid_task(pid, PIDTYPE_TGID);
     571           0 :         put_pid(pid);
     572           0 :         if (!task)
     573             :                 return ERR_PTR(-ESRCH);
     574             : 
     575           0 :         *flags = f_flags;
     576           0 :         return task;
     577             : }
     578             : 
     579             : /**
     580             :  * pidfd_create() - Create a new pid file descriptor.
     581             :  *
     582             :  * @pid:   struct pid that the pidfd will reference
     583             :  * @flags: flags to pass
     584             :  *
     585             :  * This creates a new pid file descriptor with the O_CLOEXEC flag set.
     586             :  *
     587             :  * Note, that this function can only be called after the fd table has
     588             :  * been unshared to avoid leaking the pidfd to the new process.
     589             :  *
     590             :  * This symbol should not be explicitly exported to loadable modules.
     591             :  *
     592             :  * Return: On success, a cloexec pidfd is returned.
     593             :  *         On error, a negative errno number will be returned.
     594             :  */
     595           0 : int pidfd_create(struct pid *pid, unsigned int flags)
     596             : {
     597             :         int fd;
     598             : 
     599           0 :         if (!pid || !pid_has_task(pid, PIDTYPE_TGID))
     600             :                 return -EINVAL;
     601             : 
     602           0 :         if (flags & ~(O_NONBLOCK | O_RDWR | O_CLOEXEC))
     603             :                 return -EINVAL;
     604             : 
     605           0 :         fd = anon_inode_getfd("[pidfd]", &pidfd_fops, get_pid(pid),
     606           0 :                               flags | O_RDWR | O_CLOEXEC);
     607           0 :         if (fd < 0)
     608           0 :                 put_pid(pid);
     609             : 
     610             :         return fd;
     611             : }
     612             : 
     613             : /**
     614             :  * pidfd_open() - Open new pid file descriptor.
     615             :  *
     616             :  * @pid:   pid for which to retrieve a pidfd
     617             :  * @flags: flags to pass
     618             :  *
     619             :  * This creates a new pid file descriptor with the O_CLOEXEC flag set for
     620             :  * the process identified by @pid. Currently, the process identified by
     621             :  * @pid must be a thread-group leader. This restriction currently exists
     622             :  * for all aspects of pidfds including pidfd creation (CLONE_PIDFD cannot
     623             :  * be used with CLONE_THREAD) and pidfd polling (only supports thread group
     624             :  * leaders).
     625             :  *
     626             :  * Return: On success, a cloexec pidfd is returned.
     627             :  *         On error, a negative errno number will be returned.
     628             :  */
     629           0 : SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
     630             : {
     631             :         int fd;
     632             :         struct pid *p;
     633             : 
     634           0 :         if (flags & ~PIDFD_NONBLOCK)
     635             :                 return -EINVAL;
     636             : 
     637           0 :         if (pid <= 0)
     638             :                 return -EINVAL;
     639             : 
     640           0 :         p = find_get_pid(pid);
     641           0 :         if (!p)
     642             :                 return -ESRCH;
     643             : 
     644           0 :         fd = pidfd_create(p, flags);
     645             : 
     646           0 :         put_pid(p);
     647           0 :         return fd;
     648             : }
     649             : 
     650           1 : void __init pid_idr_init(void)
     651             : {
     652             :         /* Verify no one has done anything silly: */
     653             :         BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING);
     654             : 
     655             :         /* bump default and minimum pid_max based on number of cpus */
     656           1 :         pid_max = min(pid_max_max, max_t(int, pid_max,
     657             :                                 PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
     658           1 :         pid_max_min = max_t(int, pid_max_min,
     659             :                                 PIDS_PER_CPU_MIN * num_possible_cpus());
     660           1 :         pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
     661             : 
     662           1 :         idr_init(&init_pid_ns.idr);
     663             : 
     664           1 :         init_pid_ns.pid_cachep = KMEM_CACHE(pid,
     665             :                         SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
     666           1 : }
     667             : 
     668           0 : static struct file *__pidfd_fget(struct task_struct *task, int fd)
     669             : {
     670             :         struct file *file;
     671             :         int ret;
     672             : 
     673           0 :         ret = down_read_killable(&task->signal->exec_update_lock);
     674           0 :         if (ret)
     675           0 :                 return ERR_PTR(ret);
     676             : 
     677           0 :         if (ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS))
     678           0 :                 file = fget_task(task, fd);
     679             :         else
     680             :                 file = ERR_PTR(-EPERM);
     681             : 
     682           0 :         up_read(&task->signal->exec_update_lock);
     683             : 
     684           0 :         return file ?: ERR_PTR(-EBADF);
     685             : }
     686             : 
     687           0 : static int pidfd_getfd(struct pid *pid, int fd)
     688             : {
     689             :         struct task_struct *task;
     690             :         struct file *file;
     691             :         int ret;
     692             : 
     693           0 :         task = get_pid_task(pid, PIDTYPE_PID);
     694           0 :         if (!task)
     695             :                 return -ESRCH;
     696             : 
     697           0 :         file = __pidfd_fget(task, fd);
     698           0 :         put_task_struct(task);
     699           0 :         if (IS_ERR(file))
     700           0 :                 return PTR_ERR(file);
     701             : 
     702           0 :         ret = receive_fd(file, O_CLOEXEC);
     703           0 :         fput(file);
     704             : 
     705           0 :         return ret;
     706             : }
     707             : 
     708             : /**
     709             :  * sys_pidfd_getfd() - Get a file descriptor from another process
     710             :  *
     711             :  * @pidfd:      the pidfd file descriptor of the process
     712             :  * @fd:         the file descriptor number to get
     713             :  * @flags:      flags on how to get the fd (reserved)
     714             :  *
     715             :  * This syscall gets a copy of a file descriptor from another process
     716             :  * based on the pidfd, and file descriptor number. It requires that
     717             :  * the calling process has the ability to ptrace the process represented
     718             :  * by the pidfd. The process which is having its file descriptor copied
     719             :  * is otherwise unaffected.
     720             :  *
     721             :  * Return: On success, a cloexec file descriptor is returned.
     722             :  *         On error, a negative errno number will be returned.
     723             :  */
     724           0 : SYSCALL_DEFINE3(pidfd_getfd, int, pidfd, int, fd,
     725             :                 unsigned int, flags)
     726             : {
     727             :         struct pid *pid;
     728             :         struct fd f;
     729             :         int ret;
     730             : 
     731             :         /* flags is currently unused - make sure it's unset */
     732           0 :         if (flags)
     733             :                 return -EINVAL;
     734             : 
     735           0 :         f = fdget(pidfd);
     736           0 :         if (!f.file)
     737             :                 return -EBADF;
     738             : 
     739           0 :         pid = pidfd_pid(f.file);
     740           0 :         if (IS_ERR(pid))
     741           0 :                 ret = PTR_ERR(pid);
     742             :         else
     743           0 :                 ret = pidfd_getfd(pid, fd);
     744             : 
     745           0 :         fdput(f);
     746           0 :         return ret;
     747             : }

Generated by: LCOV version 1.14