Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef __LINUX_UACCESS_H__
3 : #define __LINUX_UACCESS_H__
4 :
5 : #include <linux/fault-inject-usercopy.h>
6 : #include <linux/instrumented.h>
7 : #include <linux/minmax.h>
8 : #include <linux/sched.h>
9 : #include <linux/thread_info.h>
10 :
11 : #include <asm/uaccess.h>
12 :
13 : /*
14 : * Architectures should provide two primitives (raw_copy_{to,from}_user())
15 : * and get rid of their private instances of copy_{to,from}_user() and
16 : * __copy_{to,from}_user{,_inatomic}().
17 : *
18 : * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
19 : * return the amount left to copy. They should assume that access_ok() has
20 : * already been checked (and succeeded); they should *not* zero-pad anything.
21 : * No KASAN or object size checks either - those belong here.
22 : *
23 : * Both of these functions should attempt to copy size bytes starting at from
24 : * into the area starting at to. They must not fetch or store anything
25 : * outside of those areas. Return value must be between 0 (everything
26 : * copied successfully) and size (nothing copied).
27 : *
28 : * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
29 : * at to must become equal to the bytes fetched from the corresponding area
30 : * starting at from. All data past to + size - N must be left unmodified.
31 : *
32 : * If copying succeeds, the return value must be 0. If some data cannot be
33 : * fetched, it is permitted to copy less than had been fetched; the only
34 : * hard requirement is that not storing anything at all (i.e. returning size)
35 : * should happen only when nothing could be copied. In other words, you don't
36 : * have to squeeze as much as possible - it is allowed, but not necessary.
37 : *
38 : * For raw_copy_from_user() to always points to kernel memory and no faults
39 : * on store should happen. Interpretation of from is affected by set_fs().
40 : * For raw_copy_to_user() it's the other way round.
41 : *
42 : * Both can be inlined - it's up to architectures whether it wants to bother
43 : * with that. They should not be used directly; they are used to implement
44 : * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
45 : * that are used instead. Out of those, __... ones are inlined. Plain
46 : * copy_{to,from}_user() might or might not be inlined. If you want them
47 : * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
48 : *
49 : * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
50 : * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
51 : * at all; their callers absolutely must check the return value.
52 : *
53 : * Biarch ones should also provide raw_copy_in_user() - similar to the above,
54 : * but both source and destination are __user pointers (affected by set_fs()
55 : * as usual) and both source and destination can trigger faults.
56 : */
57 :
58 : static __always_inline __must_check unsigned long
59 : __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
60 : {
61 : unsigned long res;
62 :
63 0 : instrument_copy_from_user_before(to, from, n);
64 : check_object_size(to, n, false);
65 0 : res = raw_copy_from_user(to, from, n);
66 0 : instrument_copy_from_user_after(to, from, n, res);
67 : return res;
68 : }
69 :
70 : static __always_inline __must_check unsigned long
71 : __copy_from_user(void *to, const void __user *from, unsigned long n)
72 : {
73 : unsigned long res;
74 :
75 : might_fault();
76 : instrument_copy_from_user_before(to, from, n);
77 : if (should_fail_usercopy())
78 : return n;
79 : check_object_size(to, n, false);
80 : res = raw_copy_from_user(to, from, n);
81 : instrument_copy_from_user_after(to, from, n, res);
82 : return res;
83 : }
84 :
85 : /**
86 : * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
87 : * @to: Destination address, in user space.
88 : * @from: Source address, in kernel space.
89 : * @n: Number of bytes to copy.
90 : *
91 : * Context: User context only.
92 : *
93 : * Copy data from kernel space to user space. Caller must check
94 : * the specified block with access_ok() before calling this function.
95 : * The caller should also make sure he pins the user space address
96 : * so that we don't result in page fault and sleep.
97 : */
98 : static __always_inline __must_check unsigned long
99 : __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
100 : {
101 : if (should_fail_usercopy())
102 : return n;
103 0 : instrument_copy_to_user(to, from, n);
104 : check_object_size(from, n, true);
105 0 : return raw_copy_to_user(to, from, n);
106 : }
107 :
108 : static __always_inline __must_check unsigned long
109 : __copy_to_user(void __user *to, const void *from, unsigned long n)
110 : {
111 : might_fault();
112 : if (should_fail_usercopy())
113 : return n;
114 0 : instrument_copy_to_user(to, from, n);
115 : check_object_size(from, n, true);
116 0 : return raw_copy_to_user(to, from, n);
117 : }
118 :
119 : #ifdef INLINE_COPY_FROM_USER
120 : static inline __must_check unsigned long
121 0 : _copy_from_user(void *to, const void __user *from, unsigned long n)
122 : {
123 0 : unsigned long res = n;
124 : might_fault();
125 0 : if (!should_fail_usercopy() && likely(access_ok(from, n))) {
126 0 : instrument_copy_from_user_before(to, from, n);
127 0 : res = raw_copy_from_user(to, from, n);
128 : instrument_copy_from_user_after(to, from, n, res);
129 : }
130 0 : if (unlikely(res))
131 0 : memset(to + (n - res), 0, res);
132 0 : return res;
133 : }
134 : #else
135 : extern __must_check unsigned long
136 : _copy_from_user(void *, const void __user *, unsigned long);
137 : #endif
138 :
139 : #ifdef INLINE_COPY_TO_USER
140 : static inline __must_check unsigned long
141 0 : _copy_to_user(void __user *to, const void *from, unsigned long n)
142 : {
143 : might_fault();
144 : if (should_fail_usercopy())
145 : return n;
146 0 : if (access_ok(to, n)) {
147 0 : instrument_copy_to_user(to, from, n);
148 0 : n = raw_copy_to_user(to, from, n);
149 : }
150 : return n;
151 : }
152 : #else
153 : extern __must_check unsigned long
154 : _copy_to_user(void __user *, const void *, unsigned long);
155 : #endif
156 :
157 : static __always_inline unsigned long __must_check
158 : copy_from_user(void *to, const void __user *from, unsigned long n)
159 : {
160 0 : if (check_copy_size(to, n, false))
161 0 : n = _copy_from_user(to, from, n);
162 : return n;
163 : }
164 :
165 : static __always_inline unsigned long __must_check
166 : copy_to_user(void __user *to, const void *from, unsigned long n)
167 : {
168 0 : if (check_copy_size(from, n, true))
169 0 : n = _copy_to_user(to, from, n);
170 : return n;
171 : }
172 :
173 : #ifndef copy_mc_to_kernel
174 : /*
175 : * Without arch opt-in this generic copy_mc_to_kernel() will not handle
176 : * #MC (or arch equivalent) during source read.
177 : */
178 : static inline unsigned long __must_check
179 : copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
180 : {
181 : memcpy(dst, src, cnt);
182 : return 0;
183 : }
184 : #endif
185 :
186 : static __always_inline void pagefault_disabled_inc(void)
187 : {
188 0 : current->pagefault_disabled++;
189 : }
190 :
191 : static __always_inline void pagefault_disabled_dec(void)
192 : {
193 0 : current->pagefault_disabled--;
194 : }
195 :
196 : /*
197 : * These routines enable/disable the pagefault handler. If disabled, it will
198 : * not take any locks and go straight to the fixup table.
199 : *
200 : * User access methods will not sleep when called from a pagefault_disabled()
201 : * environment.
202 : */
203 : static inline void pagefault_disable(void)
204 : {
205 : pagefault_disabled_inc();
206 : /*
207 : * make sure to have issued the store before a pagefault
208 : * can hit.
209 : */
210 0 : barrier();
211 : }
212 :
213 : static inline void pagefault_enable(void)
214 : {
215 : /*
216 : * make sure to issue those last loads/stores before enabling
217 : * the pagefault handler again.
218 : */
219 0 : barrier();
220 : pagefault_disabled_dec();
221 : }
222 :
223 : /*
224 : * Is the pagefault handler disabled? If so, user access methods will not sleep.
225 : */
226 : static inline bool pagefault_disabled(void)
227 : {
228 0 : return current->pagefault_disabled != 0;
229 : }
230 :
231 : /*
232 : * The pagefault handler is in general disabled by pagefault_disable() or
233 : * when in irq context (via in_atomic()).
234 : *
235 : * This function should only be used by the fault handlers. Other users should
236 : * stick to pagefault_disabled().
237 : * Please NEVER use preempt_disable() to disable the fault handler. With
238 : * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
239 : * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
240 : */
241 : #define faulthandler_disabled() (pagefault_disabled() || in_atomic())
242 :
243 : #ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
244 :
245 : /**
246 : * probe_subpage_writeable: probe the user range for write faults at sub-page
247 : * granularity (e.g. arm64 MTE)
248 : * @uaddr: start of address range
249 : * @size: size of address range
250 : *
251 : * Returns 0 on success, the number of bytes not probed on fault.
252 : *
253 : * It is expected that the caller checked for the write permission of each
254 : * page in the range either by put_user() or GUP. The architecture port can
255 : * implement a more efficient get_user() probing if the same sub-page faults
256 : * are triggered by either a read or a write.
257 : */
258 : static inline size_t probe_subpage_writeable(char __user *uaddr, size_t size)
259 : {
260 : return 0;
261 : }
262 :
263 : #endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
264 :
265 : #ifndef ARCH_HAS_NOCACHE_UACCESS
266 :
267 : static inline __must_check unsigned long
268 : __copy_from_user_inatomic_nocache(void *to, const void __user *from,
269 : unsigned long n)
270 : {
271 0 : return __copy_from_user_inatomic(to, from, n);
272 : }
273 :
274 : #endif /* ARCH_HAS_NOCACHE_UACCESS */
275 :
276 : extern __must_check int check_zeroed_user(const void __user *from, size_t size);
277 :
278 : /**
279 : * copy_struct_from_user: copy a struct from userspace
280 : * @dst: Destination address, in kernel space. This buffer must be @ksize
281 : * bytes long.
282 : * @ksize: Size of @dst struct.
283 : * @src: Source address, in userspace.
284 : * @usize: (Alleged) size of @src struct.
285 : *
286 : * Copies a struct from userspace to kernel space, in a way that guarantees
287 : * backwards-compatibility for struct syscall arguments (as long as future
288 : * struct extensions are made such that all new fields are *appended* to the
289 : * old struct, and zeroed-out new fields have the same meaning as the old
290 : * struct).
291 : *
292 : * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
293 : * The recommended usage is something like the following:
294 : *
295 : * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
296 : * {
297 : * int err;
298 : * struct foo karg = {};
299 : *
300 : * if (usize > PAGE_SIZE)
301 : * return -E2BIG;
302 : * if (usize < FOO_SIZE_VER0)
303 : * return -EINVAL;
304 : *
305 : * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
306 : * if (err)
307 : * return err;
308 : *
309 : * // ...
310 : * }
311 : *
312 : * There are three cases to consider:
313 : * * If @usize == @ksize, then it's copied verbatim.
314 : * * If @usize < @ksize, then the userspace has passed an old struct to a
315 : * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
316 : * are to be zero-filled.
317 : * * If @usize > @ksize, then the userspace has passed a new struct to an
318 : * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
319 : * are checked to ensure they are zeroed, otherwise -E2BIG is returned.
320 : *
321 : * Returns (in all cases, some data may have been copied):
322 : * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src.
323 : * * -EFAULT: access to userspace failed.
324 : */
325 : static __always_inline __must_check int
326 : copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
327 : size_t usize)
328 : {
329 0 : size_t size = min(ksize, usize);
330 0 : size_t rest = max(ksize, usize) - size;
331 :
332 : /* Double check if ksize is larger than a known object size. */
333 0 : if (WARN_ON_ONCE(ksize > __builtin_object_size(dst, 1)))
334 : return -E2BIG;
335 :
336 : /* Deal with trailing bytes. */
337 0 : if (usize < ksize) {
338 0 : memset(dst + size, 0, rest);
339 0 : } else if (usize > ksize) {
340 0 : int ret = check_zeroed_user(src + size, rest);
341 0 : if (ret <= 0)
342 0 : return ret ?: -E2BIG;
343 : }
344 : /* Copy the interoperable parts of the struct. */
345 0 : if (copy_from_user(dst, src, size))
346 : return -EFAULT;
347 : return 0;
348 : }
349 :
350 : bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
351 :
352 : long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
353 : long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
354 :
355 : long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
356 : long notrace copy_to_user_nofault(void __user *dst, const void *src,
357 : size_t size);
358 :
359 : long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
360 : long count);
361 :
362 : long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
363 : long count);
364 : long strnlen_user_nofault(const void __user *unsafe_addr, long count);
365 :
366 : #ifndef __get_kernel_nofault
367 : #define __get_kernel_nofault(dst, src, type, label) \
368 : do { \
369 : type __user *p = (type __force __user *)(src); \
370 : type data; \
371 : if (__get_user(data, p)) \
372 : goto label; \
373 : *(type *)dst = data; \
374 : } while (0)
375 :
376 : #define __put_kernel_nofault(dst, src, type, label) \
377 : do { \
378 : type __user *p = (type __force __user *)(dst); \
379 : type data = *(type *)src; \
380 : if (__put_user(data, p)) \
381 : goto label; \
382 : } while (0)
383 : #endif
384 :
385 : /**
386 : * get_kernel_nofault(): safely attempt to read from a location
387 : * @val: read into this variable
388 : * @ptr: address to read from
389 : *
390 : * Returns 0 on success, or -EFAULT.
391 : */
392 : #define get_kernel_nofault(val, ptr) ({ \
393 : const typeof(val) *__gk_ptr = (ptr); \
394 : copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
395 : })
396 :
397 : #ifndef user_access_begin
398 : #define user_access_begin(ptr,len) access_ok(ptr, len)
399 : #define user_access_end() do { } while (0)
400 : #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
401 : #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
402 : #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
403 : #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
404 : #define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
405 : static inline unsigned long user_access_save(void) { return 0UL; }
406 : static inline void user_access_restore(unsigned long flags) { }
407 : #endif
408 : #ifndef user_write_access_begin
409 : #define user_write_access_begin user_access_begin
410 : #define user_write_access_end user_access_end
411 : #endif
412 : #ifndef user_read_access_begin
413 : #define user_read_access_begin user_access_begin
414 : #define user_read_access_end user_access_end
415 : #endif
416 :
417 : #ifdef CONFIG_HARDENED_USERCOPY
418 : void __noreturn usercopy_abort(const char *name, const char *detail,
419 : bool to_user, unsigned long offset,
420 : unsigned long len);
421 : #endif
422 :
423 : #endif /* __LINUX_UACCESS_H__ */
|