Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef _LINUX_HIGHMEM_H
3 : #define _LINUX_HIGHMEM_H
4 :
5 : #include <linux/fs.h>
6 : #include <linux/kernel.h>
7 : #include <linux/bug.h>
8 : #include <linux/cacheflush.h>
9 : #include <linux/kmsan.h>
10 : #include <linux/mm.h>
11 : #include <linux/uaccess.h>
12 : #include <linux/hardirq.h>
13 :
14 : #include "highmem-internal.h"
15 :
16 : /**
17 : * kmap - Map a page for long term usage
18 : * @page: Pointer to the page to be mapped
19 : *
20 : * Returns: The virtual address of the mapping
21 : *
22 : * Can only be invoked from preemptible task context because on 32bit
23 : * systems with CONFIG_HIGHMEM enabled this function might sleep.
24 : *
25 : * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
26 : * this returns the virtual address of the direct kernel mapping.
27 : *
28 : * The returned virtual address is globally visible and valid up to the
29 : * point where it is unmapped via kunmap(). The pointer can be handed to
30 : * other contexts.
31 : *
32 : * For highmem pages on 32bit systems this can be slow as the mapping space
33 : * is limited and protected by a global lock. In case that there is no
34 : * mapping slot available the function blocks until a slot is released via
35 : * kunmap().
36 : */
37 : static inline void *kmap(struct page *page);
38 :
39 : /**
40 : * kunmap - Unmap the virtual address mapped by kmap()
41 : * @page: Pointer to the page which was mapped by kmap()
42 : *
43 : * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
44 : * pages in the low memory area.
45 : */
46 : static inline void kunmap(struct page *page);
47 :
48 : /**
49 : * kmap_to_page - Get the page for a kmap'ed address
50 : * @addr: The address to look up
51 : *
52 : * Returns: The page which is mapped to @addr.
53 : */
54 : static inline struct page *kmap_to_page(void *addr);
55 :
56 : /**
57 : * kmap_flush_unused - Flush all unused kmap mappings in order to
58 : * remove stray mappings
59 : */
60 : static inline void kmap_flush_unused(void);
61 :
62 : /**
63 : * kmap_local_page - Map a page for temporary usage
64 : * @page: Pointer to the page to be mapped
65 : *
66 : * Returns: The virtual address of the mapping
67 : *
68 : * Can be invoked from any context, including interrupts.
69 : *
70 : * Requires careful handling when nesting multiple mappings because the map
71 : * management is stack based. The unmap has to be in the reverse order of
72 : * the map operation:
73 : *
74 : * addr1 = kmap_local_page(page1);
75 : * addr2 = kmap_local_page(page2);
76 : * ...
77 : * kunmap_local(addr2);
78 : * kunmap_local(addr1);
79 : *
80 : * Unmapping addr1 before addr2 is invalid and causes malfunction.
81 : *
82 : * Contrary to kmap() mappings the mapping is only valid in the context of
83 : * the caller and cannot be handed to other contexts.
84 : *
85 : * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
86 : * virtual address of the direct mapping. Only real highmem pages are
87 : * temporarily mapped.
88 : *
89 : * While kmap_local_page() is significantly faster than kmap() for the highmem
90 : * case it comes with restrictions about the pointer validity.
91 : *
92 : * On HIGHMEM enabled systems mapping a highmem page has the side effect of
93 : * disabling migration in order to keep the virtual address stable across
94 : * preemption. No caller of kmap_local_page() can rely on this side effect.
95 : */
96 : static inline void *kmap_local_page(struct page *page);
97 :
98 : /**
99 : * kmap_local_folio - Map a page in this folio for temporary usage
100 : * @folio: The folio containing the page.
101 : * @offset: The byte offset within the folio which identifies the page.
102 : *
103 : * Requires careful handling when nesting multiple mappings because the map
104 : * management is stack based. The unmap has to be in the reverse order of
105 : * the map operation::
106 : *
107 : * addr1 = kmap_local_folio(folio1, offset1);
108 : * addr2 = kmap_local_folio(folio2, offset2);
109 : * ...
110 : * kunmap_local(addr2);
111 : * kunmap_local(addr1);
112 : *
113 : * Unmapping addr1 before addr2 is invalid and causes malfunction.
114 : *
115 : * Contrary to kmap() mappings the mapping is only valid in the context of
116 : * the caller and cannot be handed to other contexts.
117 : *
118 : * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
119 : * virtual address of the direct mapping. Only real highmem pages are
120 : * temporarily mapped.
121 : *
122 : * While it is significantly faster than kmap() for the highmem case it
123 : * comes with restrictions about the pointer validity.
124 : *
125 : * On HIGHMEM enabled systems mapping a highmem page has the side effect of
126 : * disabling migration in order to keep the virtual address stable across
127 : * preemption. No caller of kmap_local_folio() can rely on this side effect.
128 : *
129 : * Context: Can be invoked from any context.
130 : * Return: The virtual address of @offset.
131 : */
132 : static inline void *kmap_local_folio(struct folio *folio, size_t offset);
133 :
134 : /**
135 : * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
136 : * @page: Pointer to the page to be mapped
137 : *
138 : * Returns: The virtual address of the mapping
139 : *
140 : * In fact a wrapper around kmap_local_page() which also disables pagefaults
141 : * and, depending on PREEMPT_RT configuration, also CPU migration and
142 : * preemption. Therefore users should not count on the latter two side effects.
143 : *
144 : * Mappings should always be released by kunmap_atomic().
145 : *
146 : * Do not use in new code. Use kmap_local_page() instead.
147 : *
148 : * It is used in atomic context when code wants to access the contents of a
149 : * page that might be allocated from high memory (see __GFP_HIGHMEM), for
150 : * example a page in the pagecache. The API has two functions, and they
151 : * can be used in a manner similar to the following::
152 : *
153 : * // Find the page of interest.
154 : * struct page *page = find_get_page(mapping, offset);
155 : *
156 : * // Gain access to the contents of that page.
157 : * void *vaddr = kmap_atomic(page);
158 : *
159 : * // Do something to the contents of that page.
160 : * memset(vaddr, 0, PAGE_SIZE);
161 : *
162 : * // Unmap that page.
163 : * kunmap_atomic(vaddr);
164 : *
165 : * Note that the kunmap_atomic() call takes the result of the kmap_atomic()
166 : * call, not the argument.
167 : *
168 : * If you need to map two pages because you want to copy from one page to
169 : * another you need to keep the kmap_atomic calls strictly nested, like:
170 : *
171 : * vaddr1 = kmap_atomic(page1);
172 : * vaddr2 = kmap_atomic(page2);
173 : *
174 : * memcpy(vaddr1, vaddr2, PAGE_SIZE);
175 : *
176 : * kunmap_atomic(vaddr2);
177 : * kunmap_atomic(vaddr1);
178 : */
179 : static inline void *kmap_atomic(struct page *page);
180 :
181 : /* Highmem related interfaces for management code */
182 : static inline unsigned int nr_free_highpages(void);
183 : static inline unsigned long totalhigh_pages(void);
184 :
185 : #ifndef ARCH_HAS_FLUSH_ANON_PAGE
186 : static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
187 : {
188 : }
189 : #endif
190 :
191 : #ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
192 : static inline void flush_kernel_vmap_range(void *vaddr, int size)
193 : {
194 : }
195 : static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
196 : {
197 : }
198 : #endif
199 :
200 : /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
201 : #ifndef clear_user_highpage
202 0 : static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
203 : {
204 0 : void *addr = kmap_local_page(page);
205 0 : clear_user_page(addr, vaddr, page);
206 : kunmap_local(addr);
207 0 : }
208 : #endif
209 :
210 : #ifndef vma_alloc_zeroed_movable_folio
211 : /**
212 : * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
213 : * @vma: The VMA the page is to be allocated for.
214 : * @vaddr: The virtual address the page will be inserted into.
215 : *
216 : * This function will allocate a page suitable for inserting into this
217 : * VMA at this virtual address. It may be allocated from highmem or
218 : * the movable zone. An architecture may provide its own implementation.
219 : *
220 : * Return: A folio containing one allocated and zeroed page or NULL if
221 : * we are out of memory.
222 : */
223 : static inline
224 0 : struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
225 : unsigned long vaddr)
226 : {
227 : struct folio *folio;
228 :
229 0 : folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false);
230 0 : if (folio)
231 0 : clear_user_highpage(&folio->page, vaddr);
232 :
233 0 : return folio;
234 : }
235 : #endif
236 :
237 0 : static inline void clear_highpage(struct page *page)
238 : {
239 0 : void *kaddr = kmap_local_page(page);
240 0 : clear_page(kaddr);
241 : kunmap_local(kaddr);
242 0 : }
243 :
244 74 : static inline void clear_highpage_kasan_tagged(struct page *page)
245 : {
246 74 : void *kaddr = kmap_local_page(page);
247 :
248 74 : clear_page(kasan_reset_tag(kaddr));
249 : kunmap_local(kaddr);
250 74 : }
251 :
252 : #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
253 :
254 : static inline void tag_clear_highpage(struct page *page)
255 : {
256 : }
257 :
258 : #endif
259 :
260 : /*
261 : * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
262 : * If we pass in a head page, we can zero up to the size of the compound page.
263 : */
264 : #ifdef CONFIG_HIGHMEM
265 : void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
266 : unsigned start2, unsigned end2);
267 : #else
268 0 : static inline void zero_user_segments(struct page *page,
269 : unsigned start1, unsigned end1,
270 : unsigned start2, unsigned end2)
271 : {
272 0 : void *kaddr = kmap_local_page(page);
273 : unsigned int i;
274 :
275 0 : BUG_ON(end1 > page_size(page) || end2 > page_size(page));
276 :
277 0 : if (end1 > start1)
278 0 : memset(kaddr + start1, 0, end1 - start1);
279 :
280 0 : if (end2 > start2)
281 0 : memset(kaddr + start2, 0, end2 - start2);
282 :
283 : kunmap_local(kaddr);
284 0 : for (i = 0; i < compound_nr(page); i++)
285 0 : flush_dcache_page(page + i);
286 0 : }
287 : #endif
288 :
289 : static inline void zero_user_segment(struct page *page,
290 : unsigned start, unsigned end)
291 : {
292 : zero_user_segments(page, start, end, 0, 0);
293 : }
294 :
295 : static inline void zero_user(struct page *page,
296 : unsigned start, unsigned size)
297 : {
298 0 : zero_user_segments(page, start, start + size, 0, 0);
299 : }
300 :
301 : #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
302 :
303 0 : static inline void copy_user_highpage(struct page *to, struct page *from,
304 : unsigned long vaddr, struct vm_area_struct *vma)
305 : {
306 : char *vfrom, *vto;
307 :
308 0 : vfrom = kmap_local_page(from);
309 0 : vto = kmap_local_page(to);
310 0 : copy_user_page(vto, vfrom, vaddr, to);
311 0 : kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
312 : kunmap_local(vto);
313 : kunmap_local(vfrom);
314 0 : }
315 :
316 : #endif
317 :
318 : #ifndef __HAVE_ARCH_COPY_HIGHPAGE
319 :
320 0 : static inline void copy_highpage(struct page *to, struct page *from)
321 : {
322 : char *vfrom, *vto;
323 :
324 0 : vfrom = kmap_local_page(from);
325 0 : vto = kmap_local_page(to);
326 0 : copy_page(vto, vfrom);
327 0 : kmsan_copy_page_meta(to, from);
328 : kunmap_local(vto);
329 : kunmap_local(vfrom);
330 0 : }
331 :
332 : #endif
333 :
334 : #ifdef copy_mc_to_kernel
335 : /*
336 : * If architecture supports machine check exception handling, define the
337 : * #MC versions of copy_user_highpage and copy_highpage. They copy a memory
338 : * page with #MC in source page (@from) handled, and return the number
339 : * of bytes not copied if there was a #MC, otherwise 0 for success.
340 : */
341 : static inline int copy_mc_user_highpage(struct page *to, struct page *from,
342 : unsigned long vaddr, struct vm_area_struct *vma)
343 : {
344 : unsigned long ret;
345 : char *vfrom, *vto;
346 :
347 : vfrom = kmap_local_page(from);
348 : vto = kmap_local_page(to);
349 : ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
350 : if (!ret)
351 : kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
352 : kunmap_local(vto);
353 : kunmap_local(vfrom);
354 :
355 : return ret;
356 : }
357 :
358 : static inline int copy_mc_highpage(struct page *to, struct page *from)
359 : {
360 : unsigned long ret;
361 : char *vfrom, *vto;
362 :
363 : vfrom = kmap_local_page(from);
364 : vto = kmap_local_page(to);
365 : ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
366 : if (!ret)
367 : kmsan_copy_page_meta(to, from);
368 : kunmap_local(vto);
369 : kunmap_local(vfrom);
370 :
371 : return ret;
372 : }
373 : #else
374 : static inline int copy_mc_user_highpage(struct page *to, struct page *from,
375 : unsigned long vaddr, struct vm_area_struct *vma)
376 : {
377 0 : copy_user_highpage(to, from, vaddr, vma);
378 : return 0;
379 : }
380 :
381 : static inline int copy_mc_highpage(struct page *to, struct page *from)
382 : {
383 : copy_highpage(to, from);
384 : return 0;
385 : }
386 : #endif
387 :
388 : static inline void memcpy_page(struct page *dst_page, size_t dst_off,
389 : struct page *src_page, size_t src_off,
390 : size_t len)
391 : {
392 : char *dst = kmap_local_page(dst_page);
393 : char *src = kmap_local_page(src_page);
394 :
395 : VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
396 : memcpy(dst + dst_off, src + src_off, len);
397 : kunmap_local(src);
398 : kunmap_local(dst);
399 : }
400 :
401 : static inline void memset_page(struct page *page, size_t offset, int val,
402 : size_t len)
403 : {
404 : char *addr = kmap_local_page(page);
405 :
406 : VM_BUG_ON(offset + len > PAGE_SIZE);
407 : memset(addr + offset, val, len);
408 : kunmap_local(addr);
409 : }
410 :
411 0 : static inline void memcpy_from_page(char *to, struct page *page,
412 : size_t offset, size_t len)
413 : {
414 0 : char *from = kmap_local_page(page);
415 :
416 : VM_BUG_ON(offset + len > PAGE_SIZE);
417 0 : memcpy(to, from + offset, len);
418 : kunmap_local(from);
419 0 : }
420 :
421 0 : static inline void memcpy_to_page(struct page *page, size_t offset,
422 : const char *from, size_t len)
423 : {
424 0 : char *to = kmap_local_page(page);
425 :
426 : VM_BUG_ON(offset + len > PAGE_SIZE);
427 0 : memcpy(to + offset, from, len);
428 0 : flush_dcache_page(page);
429 : kunmap_local(to);
430 0 : }
431 :
432 0 : static inline void memzero_page(struct page *page, size_t offset, size_t len)
433 : {
434 0 : char *addr = kmap_local_page(page);
435 :
436 : VM_BUG_ON(offset + len > PAGE_SIZE);
437 0 : memset(addr + offset, 0, len);
438 0 : flush_dcache_page(page);
439 : kunmap_local(addr);
440 0 : }
441 :
442 : /**
443 : * memcpy_from_file_folio - Copy some bytes from a file folio.
444 : * @to: The destination buffer.
445 : * @folio: The folio to copy from.
446 : * @pos: The position in the file.
447 : * @len: The maximum number of bytes to copy.
448 : *
449 : * Copy up to @len bytes from this folio. This may be limited by PAGE_SIZE
450 : * if the folio comes from HIGHMEM, and by the size of the folio.
451 : *
452 : * Return: The number of bytes copied from the folio.
453 : */
454 : static inline size_t memcpy_from_file_folio(char *to, struct folio *folio,
455 : loff_t pos, size_t len)
456 : {
457 : size_t offset = offset_in_folio(folio, pos);
458 : char *from = kmap_local_folio(folio, offset);
459 :
460 : if (folio_test_highmem(folio)) {
461 : offset = offset_in_page(offset);
462 : len = min_t(size_t, len, PAGE_SIZE - offset);
463 : } else
464 : len = min(len, folio_size(folio) - offset);
465 :
466 : memcpy(to, from, len);
467 : kunmap_local(from);
468 :
469 : return len;
470 : }
471 :
472 : /**
473 : * folio_zero_segments() - Zero two byte ranges in a folio.
474 : * @folio: The folio to write to.
475 : * @start1: The first byte to zero.
476 : * @xend1: One more than the last byte in the first range.
477 : * @start2: The first byte to zero in the second range.
478 : * @xend2: One more than the last byte in the second range.
479 : */
480 : static inline void folio_zero_segments(struct folio *folio,
481 : size_t start1, size_t xend1, size_t start2, size_t xend2)
482 : {
483 0 : zero_user_segments(&folio->page, start1, xend1, start2, xend2);
484 : }
485 :
486 : /**
487 : * folio_zero_segment() - Zero a byte range in a folio.
488 : * @folio: The folio to write to.
489 : * @start: The first byte to zero.
490 : * @xend: One more than the last byte to zero.
491 : */
492 : static inline void folio_zero_segment(struct folio *folio,
493 : size_t start, size_t xend)
494 : {
495 0 : zero_user_segments(&folio->page, start, xend, 0, 0);
496 : }
497 :
498 : /**
499 : * folio_zero_range() - Zero a byte range in a folio.
500 : * @folio: The folio to write to.
501 : * @start: The first byte to zero.
502 : * @length: The number of bytes to zero.
503 : */
504 : static inline void folio_zero_range(struct folio *folio,
505 : size_t start, size_t length)
506 : {
507 0 : zero_user_segments(&folio->page, start, start + length, 0, 0);
508 : }
509 :
510 : static inline void unmap_and_put_page(struct page *page, void *addr)
511 : {
512 : kunmap_local(addr);
513 : put_page(page);
514 : }
515 :
516 : #endif /* _LINUX_HIGHMEM_H */
|