Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * The "user cache".
4 : *
5 : * (C) Copyright 1991-2000 Linus Torvalds
6 : *
7 : * We have a per-user structure to keep track of how many
8 : * processes, files etc the user has claimed, in order to be
9 : * able to have per-user limits for system resources.
10 : */
11 :
12 : #include <linux/init.h>
13 : #include <linux/sched.h>
14 : #include <linux/slab.h>
15 : #include <linux/bitops.h>
16 : #include <linux/key.h>
17 : #include <linux/sched/user.h>
18 : #include <linux/interrupt.h>
19 : #include <linux/export.h>
20 : #include <linux/user_namespace.h>
21 : #include <linux/proc_ns.h>
22 :
23 : /*
24 : * userns count is 1 for root user, 1 for init_uts_ns,
25 : * and 1 for... ?
26 : */
27 : struct user_namespace init_user_ns = {
28 : .uid_map = {
29 : .nr_extents = 1,
30 : {
31 : .extent[0] = {
32 : .first = 0,
33 : .lower_first = 0,
34 : .count = 4294967295U,
35 : },
36 : },
37 : },
38 : .gid_map = {
39 : .nr_extents = 1,
40 : {
41 : .extent[0] = {
42 : .first = 0,
43 : .lower_first = 0,
44 : .count = 4294967295U,
45 : },
46 : },
47 : },
48 : .projid_map = {
49 : .nr_extents = 1,
50 : {
51 : .extent[0] = {
52 : .first = 0,
53 : .lower_first = 0,
54 : .count = 4294967295U,
55 : },
56 : },
57 : },
58 : .ns.count = REFCOUNT_INIT(3),
59 : .owner = GLOBAL_ROOT_UID,
60 : .group = GLOBAL_ROOT_GID,
61 : .ns.inum = PROC_USER_INIT_INO,
62 : #ifdef CONFIG_USER_NS
63 : .ns.ops = &userns_operations,
64 : #endif
65 : .flags = USERNS_INIT_FLAGS,
66 : #ifdef CONFIG_KEYS
67 : .keyring_name_list = LIST_HEAD_INIT(init_user_ns.keyring_name_list),
68 : .keyring_sem = __RWSEM_INITIALIZER(init_user_ns.keyring_sem),
69 : #endif
70 : };
71 : EXPORT_SYMBOL_GPL(init_user_ns);
72 :
73 : /*
74 : * UID task count cache, to get fast user lookup in "alloc_uid"
75 : * when changing user ID's (ie setuid() and friends).
76 : */
77 :
78 : #define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7)
79 : #define UIDHASH_SZ (1 << UIDHASH_BITS)
80 : #define UIDHASH_MASK (UIDHASH_SZ - 1)
81 : #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
82 : #define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid))))
83 :
84 : static struct kmem_cache *uid_cachep;
85 : static struct hlist_head uidhash_table[UIDHASH_SZ];
86 :
87 : /*
88 : * The uidhash_lock is mostly taken from process context, but it is
89 : * occasionally also taken from softirq/tasklet context, when
90 : * task-structs get RCU-freed. Hence all locking must be softirq-safe.
91 : * But free_uid() is also called with local interrupts disabled, and running
92 : * local_bh_enable() with local interrupts disabled is an error - we'll run
93 : * softirq callbacks, and they can unconditionally enable interrupts, and
94 : * the caller of free_uid() didn't expect that..
95 : */
96 : static DEFINE_SPINLOCK(uidhash_lock);
97 :
98 : /* root_user.__count is 1, for init task cred */
99 : struct user_struct root_user = {
100 : .__count = REFCOUNT_INIT(1),
101 : .uid = GLOBAL_ROOT_UID,
102 : .ratelimit = RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
103 : };
104 :
105 : /*
106 : * These routines must be called with the uidhash spinlock held!
107 : */
108 : static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
109 : {
110 2 : hlist_add_head(&up->uidhash_node, hashent);
111 : }
112 :
113 : static void uid_hash_remove(struct user_struct *up)
114 : {
115 0 : hlist_del_init(&up->uidhash_node);
116 : }
117 :
118 0 : static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
119 : {
120 : struct user_struct *user;
121 :
122 0 : hlist_for_each_entry(user, hashent, uidhash_node) {
123 0 : if (uid_eq(user->uid, uid)) {
124 0 : refcount_inc(&user->__count);
125 0 : return user;
126 : }
127 : }
128 :
129 : return NULL;
130 : }
131 :
132 : static int user_epoll_alloc(struct user_struct *up)
133 : {
134 : #ifdef CONFIG_EPOLL
135 2 : return percpu_counter_init(&up->epoll_watches, 0, GFP_KERNEL);
136 : #else
137 : return 0;
138 : #endif
139 : }
140 :
141 : static void user_epoll_free(struct user_struct *up)
142 : {
143 : #ifdef CONFIG_EPOLL
144 0 : percpu_counter_destroy(&up->epoll_watches);
145 : #endif
146 : }
147 :
148 : /* IRQs are disabled and uidhash_lock is held upon function entry.
149 : * IRQ state (as stored in flags) is restored and uidhash_lock released
150 : * upon function exit.
151 : */
152 0 : static void free_user(struct user_struct *up, unsigned long flags)
153 : __releases(&uidhash_lock)
154 : {
155 0 : uid_hash_remove(up);
156 0 : spin_unlock_irqrestore(&uidhash_lock, flags);
157 0 : user_epoll_free(up);
158 0 : kmem_cache_free(uid_cachep, up);
159 0 : }
160 :
161 : /*
162 : * Locate the user_struct for the passed UID. If found, take a ref on it. The
163 : * caller must undo that ref with free_uid().
164 : *
165 : * If the user_struct could not be found, return NULL.
166 : */
167 0 : struct user_struct *find_user(kuid_t uid)
168 : {
169 : struct user_struct *ret;
170 : unsigned long flags;
171 :
172 0 : spin_lock_irqsave(&uidhash_lock, flags);
173 0 : ret = uid_hash_find(uid, uidhashentry(uid));
174 0 : spin_unlock_irqrestore(&uidhash_lock, flags);
175 0 : return ret;
176 : }
177 :
178 159 : void free_uid(struct user_struct *up)
179 : {
180 : unsigned long flags;
181 :
182 159 : if (!up)
183 0 : return;
184 :
185 159 : if (refcount_dec_and_lock_irqsave(&up->__count, &uidhash_lock, &flags))
186 0 : free_user(up, flags);
187 : }
188 : EXPORT_SYMBOL_GPL(free_uid);
189 :
190 0 : struct user_struct *alloc_uid(kuid_t uid)
191 : {
192 0 : struct hlist_head *hashent = uidhashentry(uid);
193 : struct user_struct *up, *new;
194 :
195 0 : spin_lock_irq(&uidhash_lock);
196 0 : up = uid_hash_find(uid, hashent);
197 0 : spin_unlock_irq(&uidhash_lock);
198 :
199 0 : if (!up) {
200 0 : new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
201 0 : if (!new)
202 : return NULL;
203 :
204 0 : new->uid = uid;
205 0 : refcount_set(&new->__count, 1);
206 0 : if (user_epoll_alloc(new)) {
207 : kmem_cache_free(uid_cachep, new);
208 : return NULL;
209 : }
210 0 : ratelimit_state_init(&new->ratelimit, HZ, 100);
211 0 : ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
212 :
213 : /*
214 : * Before adding this, check whether we raced
215 : * on adding the same user already..
216 : */
217 0 : spin_lock_irq(&uidhash_lock);
218 0 : up = uid_hash_find(uid, hashent);
219 0 : if (up) {
220 : user_epoll_free(new);
221 0 : kmem_cache_free(uid_cachep, new);
222 : } else {
223 0 : uid_hash_insert(new, hashent);
224 0 : up = new;
225 : }
226 : spin_unlock_irq(&uidhash_lock);
227 : }
228 :
229 : return up;
230 : }
231 :
232 1 : static int __init uid_cache_init(void)
233 : {
234 : int n;
235 :
236 1 : uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
237 : 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
238 :
239 129 : for(n = 0; n < UIDHASH_SZ; ++n)
240 128 : INIT_HLIST_HEAD(uidhash_table + n);
241 :
242 1 : if (user_epoll_alloc(&root_user))
243 : panic("root_user epoll percpu counter alloc failed");
244 :
245 : /* Insert the root user immediately (init already runs as root) */
246 1 : spin_lock_irq(&uidhash_lock);
247 2 : uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
248 1 : spin_unlock_irq(&uidhash_lock);
249 :
250 1 : return 0;
251 : }
252 : subsys_initcall(uid_cache_init);
|