Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef LINUX_MM_INLINE_H
3 : #define LINUX_MM_INLINE_H
4 :
5 : #include <linux/atomic.h>
6 : #include <linux/huge_mm.h>
7 : #include <linux/swap.h>
8 : #include <linux/string.h>
9 : #include <linux/userfaultfd_k.h>
10 : #include <linux/swapops.h>
11 :
12 : /**
13 : * folio_is_file_lru - Should the folio be on a file LRU or anon LRU?
14 : * @folio: The folio to test.
15 : *
16 : * We would like to get this info without a page flag, but the state
17 : * needs to survive until the folio is last deleted from the LRU, which
18 : * could be as far down as __page_cache_release.
19 : *
20 : * Return: An integer (not a boolean!) used to sort a folio onto the
21 : * right LRU list and to account folios correctly.
22 : * 1 if @folio is a regular filesystem backed page cache folio
23 : * or a lazily freed anonymous folio (e.g. via MADV_FREE).
24 : * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
25 : * ram or swap backed folio.
26 : */
27 : static inline int folio_is_file_lru(struct folio *folio)
28 : {
29 0 : return !folio_test_swapbacked(folio);
30 : }
31 :
32 : static inline int page_is_file_lru(struct page *page)
33 : {
34 0 : return folio_is_file_lru(page_folio(page));
35 : }
36 :
37 : static __always_inline void __update_lru_size(struct lruvec *lruvec,
38 : enum lru_list lru, enum zone_type zid,
39 : long nr_pages)
40 : {
41 0 : struct pglist_data *pgdat = lruvec_pgdat(lruvec);
42 :
43 : lockdep_assert_held(&lruvec->lru_lock);
44 0 : WARN_ON_ONCE(nr_pages != (int)nr_pages);
45 :
46 0 : __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
47 0 : __mod_zone_page_state(&pgdat->node_zones[zid],
48 0 : NR_ZONE_LRU_BASE + lru, nr_pages);
49 : }
50 :
51 : static __always_inline void update_lru_size(struct lruvec *lruvec,
52 : enum lru_list lru, enum zone_type zid,
53 : long nr_pages)
54 : {
55 0 : __update_lru_size(lruvec, lru, zid, nr_pages);
56 : #ifdef CONFIG_MEMCG
57 : mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
58 : #endif
59 : }
60 :
61 : /**
62 : * __folio_clear_lru_flags - Clear page lru flags before releasing a page.
63 : * @folio: The folio that was on lru and now has a zero reference.
64 : */
65 : static __always_inline void __folio_clear_lru_flags(struct folio *folio)
66 : {
67 : VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio);
68 :
69 0 : __folio_clear_lru(folio);
70 :
71 : /* this shouldn't happen, so leave the flags to bad_page() */
72 0 : if (folio_test_active(folio) && folio_test_unevictable(folio))
73 : return;
74 :
75 0 : __folio_clear_active(folio);
76 : __folio_clear_unevictable(folio);
77 : }
78 :
79 : /**
80 : * folio_lru_list - Which LRU list should a folio be on?
81 : * @folio: The folio to test.
82 : *
83 : * Return: The LRU list a folio should be on, as an index
84 : * into the array of LRU lists.
85 : */
86 : static __always_inline enum lru_list folio_lru_list(struct folio *folio)
87 : {
88 : enum lru_list lru;
89 :
90 : VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
91 :
92 0 : if (folio_test_unevictable(folio))
93 : return LRU_UNEVICTABLE;
94 :
95 0 : lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
96 0 : if (folio_test_active(folio))
97 0 : lru += LRU_ACTIVE;
98 :
99 : return lru;
100 : }
101 :
102 : #ifdef CONFIG_LRU_GEN
103 :
104 : #ifdef CONFIG_LRU_GEN_ENABLED
105 : static inline bool lru_gen_enabled(void)
106 : {
107 : DECLARE_STATIC_KEY_TRUE(lru_gen_caps[NR_LRU_GEN_CAPS]);
108 :
109 : return static_branch_likely(&lru_gen_caps[LRU_GEN_CORE]);
110 : }
111 : #else
112 : static inline bool lru_gen_enabled(void)
113 : {
114 : DECLARE_STATIC_KEY_FALSE(lru_gen_caps[NR_LRU_GEN_CAPS]);
115 :
116 : return static_branch_unlikely(&lru_gen_caps[LRU_GEN_CORE]);
117 : }
118 : #endif
119 :
120 : static inline bool lru_gen_in_fault(void)
121 : {
122 : return current->in_lru_fault;
123 : }
124 :
125 : static inline int lru_gen_from_seq(unsigned long seq)
126 : {
127 : return seq % MAX_NR_GENS;
128 : }
129 :
130 : static inline int lru_hist_from_seq(unsigned long seq)
131 : {
132 : return seq % NR_HIST_GENS;
133 : }
134 :
135 : static inline int lru_tier_from_refs(int refs)
136 : {
137 : VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH));
138 :
139 : /* see the comment in folio_lru_refs() */
140 : return order_base_2(refs + 1);
141 : }
142 :
143 : static inline int folio_lru_refs(struct folio *folio)
144 : {
145 : unsigned long flags = READ_ONCE(folio->flags);
146 : bool workingset = flags & BIT(PG_workingset);
147 :
148 : /*
149 : * Return the number of accesses beyond PG_referenced, i.e., N-1 if the
150 : * total number of accesses is N>1, since N=0,1 both map to the first
151 : * tier. lru_tier_from_refs() will account for this off-by-one. Also see
152 : * the comment on MAX_NR_TIERS.
153 : */
154 : return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset;
155 : }
156 :
157 : static inline int folio_lru_gen(struct folio *folio)
158 : {
159 : unsigned long flags = READ_ONCE(folio->flags);
160 :
161 : return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
162 : }
163 :
164 : static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
165 : {
166 : unsigned long max_seq = lruvec->lrugen.max_seq;
167 :
168 : VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
169 :
170 : /* see the comment on MIN_NR_GENS */
171 : return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
172 : }
173 :
174 : static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *folio,
175 : int old_gen, int new_gen)
176 : {
177 : int type = folio_is_file_lru(folio);
178 : int zone = folio_zonenum(folio);
179 : int delta = folio_nr_pages(folio);
180 : enum lru_list lru = type * LRU_INACTIVE_FILE;
181 : struct lru_gen_folio *lrugen = &lruvec->lrugen;
182 :
183 : VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
184 : VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
185 : VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1);
186 :
187 : if (old_gen >= 0)
188 : WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone],
189 : lrugen->nr_pages[old_gen][type][zone] - delta);
190 : if (new_gen >= 0)
191 : WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone],
192 : lrugen->nr_pages[new_gen][type][zone] + delta);
193 :
194 : /* addition */
195 : if (old_gen < 0) {
196 : if (lru_gen_is_active(lruvec, new_gen))
197 : lru += LRU_ACTIVE;
198 : __update_lru_size(lruvec, lru, zone, delta);
199 : return;
200 : }
201 :
202 : /* deletion */
203 : if (new_gen < 0) {
204 : if (lru_gen_is_active(lruvec, old_gen))
205 : lru += LRU_ACTIVE;
206 : __update_lru_size(lruvec, lru, zone, -delta);
207 : return;
208 : }
209 :
210 : /* promotion */
211 : if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) {
212 : __update_lru_size(lruvec, lru, zone, -delta);
213 : __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta);
214 : }
215 :
216 : /* demotion requires isolation, e.g., lru_deactivate_fn() */
217 : VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
218 : }
219 :
220 : static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
221 : {
222 : unsigned long seq;
223 : unsigned long flags;
224 : int gen = folio_lru_gen(folio);
225 : int type = folio_is_file_lru(folio);
226 : int zone = folio_zonenum(folio);
227 : struct lru_gen_folio *lrugen = &lruvec->lrugen;
228 :
229 : VM_WARN_ON_ONCE_FOLIO(gen != -1, folio);
230 :
231 : if (folio_test_unevictable(folio) || !lrugen->enabled)
232 : return false;
233 : /*
234 : * There are three common cases for this page:
235 : * 1. If it's hot, e.g., freshly faulted in or previously hot and
236 : * migrated, add it to the youngest generation.
237 : * 2. If it's cold but can't be evicted immediately, i.e., an anon page
238 : * not in swapcache or a dirty page pending writeback, add it to the
239 : * second oldest generation.
240 : * 3. Everything else (clean, cold) is added to the oldest generation.
241 : */
242 : if (folio_test_active(folio))
243 : seq = lrugen->max_seq;
244 : else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) ||
245 : (folio_test_reclaim(folio) &&
246 : (folio_test_dirty(folio) || folio_test_writeback(folio))))
247 : seq = lrugen->min_seq[type] + 1;
248 : else
249 : seq = lrugen->min_seq[type];
250 :
251 : gen = lru_gen_from_seq(seq);
252 : flags = (gen + 1UL) << LRU_GEN_PGOFF;
253 : /* see the comment on MIN_NR_GENS about PG_active */
254 : set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags);
255 :
256 : lru_gen_update_size(lruvec, folio, -1, gen);
257 : /* for folio_rotate_reclaimable() */
258 : if (reclaiming)
259 : list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
260 : else
261 : list_add(&folio->lru, &lrugen->folios[gen][type][zone]);
262 :
263 : return true;
264 : }
265 :
266 : static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
267 : {
268 : unsigned long flags;
269 : int gen = folio_lru_gen(folio);
270 :
271 : if (gen < 0)
272 : return false;
273 :
274 : VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
275 : VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
276 :
277 : /* for folio_migrate_flags() */
278 : flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0;
279 : flags = set_mask_bits(&folio->flags, LRU_GEN_MASK, flags);
280 : gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
281 :
282 : lru_gen_update_size(lruvec, folio, gen, -1);
283 : list_del(&folio->lru);
284 :
285 : return true;
286 : }
287 :
288 : #else /* !CONFIG_LRU_GEN */
289 :
290 : static inline bool lru_gen_enabled(void)
291 : {
292 : return false;
293 : }
294 :
295 : static inline bool lru_gen_in_fault(void)
296 : {
297 : return false;
298 : }
299 :
300 : static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
301 : {
302 : return false;
303 : }
304 :
305 : static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
306 : {
307 : return false;
308 : }
309 :
310 : #endif /* CONFIG_LRU_GEN */
311 :
312 : static __always_inline
313 : void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
314 : {
315 0 : enum lru_list lru = folio_lru_list(folio);
316 :
317 0 : if (lru_gen_add_folio(lruvec, folio, false))
318 : return;
319 :
320 0 : update_lru_size(lruvec, lru, folio_zonenum(folio),
321 : folio_nr_pages(folio));
322 0 : if (lru != LRU_UNEVICTABLE)
323 0 : list_add(&folio->lru, &lruvec->lists[lru]);
324 : }
325 :
326 : static __always_inline void add_page_to_lru_list(struct page *page,
327 : struct lruvec *lruvec)
328 : {
329 : lruvec_add_folio(lruvec, page_folio(page));
330 : }
331 :
332 : static __always_inline
333 : void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
334 : {
335 0 : enum lru_list lru = folio_lru_list(folio);
336 :
337 0 : if (lru_gen_add_folio(lruvec, folio, true))
338 : return;
339 :
340 0 : update_lru_size(lruvec, lru, folio_zonenum(folio),
341 : folio_nr_pages(folio));
342 : /* This is not expected to be used on LRU_UNEVICTABLE */
343 0 : list_add_tail(&folio->lru, &lruvec->lists[lru]);
344 : }
345 :
346 : static __always_inline
347 : void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
348 : {
349 0 : enum lru_list lru = folio_lru_list(folio);
350 :
351 0 : if (lru_gen_del_folio(lruvec, folio, false))
352 : return;
353 :
354 0 : if (lru != LRU_UNEVICTABLE)
355 0 : list_del(&folio->lru);
356 0 : update_lru_size(lruvec, lru, folio_zonenum(folio),
357 0 : -folio_nr_pages(folio));
358 : }
359 :
360 : static __always_inline void del_page_from_lru_list(struct page *page,
361 : struct lruvec *lruvec)
362 : {
363 0 : lruvec_del_folio(lruvec, page_folio(page));
364 : }
365 :
366 : #ifdef CONFIG_ANON_VMA_NAME
367 : /*
368 : * mmap_lock should be read-locked when calling anon_vma_name(). Caller should
369 : * either keep holding the lock while using the returned pointer or it should
370 : * raise anon_vma_name refcount before releasing the lock.
371 : */
372 : extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
373 : extern struct anon_vma_name *anon_vma_name_alloc(const char *name);
374 : extern void anon_vma_name_free(struct kref *kref);
375 :
376 : /* mmap_lock should be read-locked */
377 : static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
378 : {
379 : if (anon_name)
380 : kref_get(&anon_name->kref);
381 : }
382 :
383 : static inline void anon_vma_name_put(struct anon_vma_name *anon_name)
384 : {
385 : if (anon_name)
386 : kref_put(&anon_name->kref, anon_vma_name_free);
387 : }
388 :
389 : static inline
390 : struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name)
391 : {
392 : /* Prevent anon_name refcount saturation early on */
393 : if (kref_read(&anon_name->kref) < REFCOUNT_MAX) {
394 : anon_vma_name_get(anon_name);
395 : return anon_name;
396 :
397 : }
398 : return anon_vma_name_alloc(anon_name->name);
399 : }
400 :
401 : static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
402 : struct vm_area_struct *new_vma)
403 : {
404 : struct anon_vma_name *anon_name = anon_vma_name(orig_vma);
405 :
406 : if (anon_name)
407 : new_vma->anon_name = anon_vma_name_reuse(anon_name);
408 : }
409 :
410 : static inline void free_anon_vma_name(struct vm_area_struct *vma)
411 : {
412 : /*
413 : * Not using anon_vma_name because it generates a warning if mmap_lock
414 : * is not held, which might be the case here.
415 : */
416 : anon_vma_name_put(vma->anon_name);
417 : }
418 :
419 : static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
420 : struct anon_vma_name *anon_name2)
421 : {
422 : if (anon_name1 == anon_name2)
423 : return true;
424 :
425 : return anon_name1 && anon_name2 &&
426 : !strcmp(anon_name1->name, anon_name2->name);
427 : }
428 :
429 : #else /* CONFIG_ANON_VMA_NAME */
430 : static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
431 : {
432 : return NULL;
433 : }
434 :
435 : static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
436 : {
437 : return NULL;
438 : }
439 :
440 : static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
441 : static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
442 : static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
443 : struct vm_area_struct *new_vma) {}
444 : static inline void free_anon_vma_name(struct vm_area_struct *vma) {}
445 :
446 : static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
447 : struct anon_vma_name *anon_name2)
448 : {
449 : return true;
450 : }
451 :
452 : #endif /* CONFIG_ANON_VMA_NAME */
453 :
454 : static inline void init_tlb_flush_pending(struct mm_struct *mm)
455 : {
456 0 : atomic_set(&mm->tlb_flush_pending, 0);
457 : }
458 :
459 : static inline void inc_tlb_flush_pending(struct mm_struct *mm)
460 : {
461 0 : atomic_inc(&mm->tlb_flush_pending);
462 : /*
463 : * The only time this value is relevant is when there are indeed pages
464 : * to flush. And we'll only flush pages after changing them, which
465 : * requires the PTL.
466 : *
467 : * So the ordering here is:
468 : *
469 : * atomic_inc(&mm->tlb_flush_pending);
470 : * spin_lock(&ptl);
471 : * ...
472 : * set_pte_at();
473 : * spin_unlock(&ptl);
474 : *
475 : * spin_lock(&ptl)
476 : * mm_tlb_flush_pending();
477 : * ....
478 : * spin_unlock(&ptl);
479 : *
480 : * flush_tlb_range();
481 : * atomic_dec(&mm->tlb_flush_pending);
482 : *
483 : * Where the increment if constrained by the PTL unlock, it thus
484 : * ensures that the increment is visible if the PTE modification is
485 : * visible. After all, if there is no PTE modification, nobody cares
486 : * about TLB flushes either.
487 : *
488 : * This very much relies on users (mm_tlb_flush_pending() and
489 : * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
490 : * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
491 : * locks (PPC) the unlock of one doesn't order against the lock of
492 : * another PTL.
493 : *
494 : * The decrement is ordered by the flush_tlb_range(), such that
495 : * mm_tlb_flush_pending() will not return false unless all flushes have
496 : * completed.
497 : */
498 : }
499 :
500 : static inline void dec_tlb_flush_pending(struct mm_struct *mm)
501 : {
502 : /*
503 : * See inc_tlb_flush_pending().
504 : *
505 : * This cannot be smp_mb__before_atomic() because smp_mb() simply does
506 : * not order against TLB invalidate completion, which is what we need.
507 : *
508 : * Therefore we must rely on tlb_flush_*() to guarantee order.
509 : */
510 0 : atomic_dec(&mm->tlb_flush_pending);
511 : }
512 :
513 : static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
514 : {
515 : /*
516 : * Must be called after having acquired the PTL; orders against that
517 : * PTLs release and therefore ensures that if we observe the modified
518 : * PTE we must also observe the increment from inc_tlb_flush_pending().
519 : *
520 : * That is, it only guarantees to return true if there is a flush
521 : * pending for _this_ PTL.
522 : */
523 : return atomic_read(&mm->tlb_flush_pending);
524 : }
525 :
526 : static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
527 : {
528 : /*
529 : * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
530 : * for which there is a TLB flush pending in order to guarantee
531 : * we've seen both that PTE modification and the increment.
532 : *
533 : * (no requirement on actually still holding the PTL, that is irrelevant)
534 : */
535 0 : return atomic_read(&mm->tlb_flush_pending) > 1;
536 : }
537 :
538 : /*
539 : * If this pte is wr-protected by uffd-wp in any form, arm the special pte to
540 : * replace a none pte. NOTE! This should only be called when *pte is already
541 : * cleared so we will never accidentally replace something valuable. Meanwhile
542 : * none pte also means we are not demoting the pte so tlb flushed is not needed.
543 : * E.g., when pte cleared the caller should have taken care of the tlb flush.
544 : *
545 : * Must be called with pgtable lock held so that no thread will see the none
546 : * pte, and if they see it, they'll fault and serialize at the pgtable lock.
547 : *
548 : * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled.
549 : */
550 : static inline void
551 : pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
552 : pte_t *pte, pte_t pteval)
553 : {
554 : #ifdef CONFIG_PTE_MARKER_UFFD_WP
555 : bool arm_uffd_pte = false;
556 :
557 : /* The current status of the pte should be "cleared" before calling */
558 : WARN_ON_ONCE(!pte_none(*pte));
559 :
560 : if (vma_is_anonymous(vma) || !userfaultfd_wp(vma))
561 : return;
562 :
563 : /* A uffd-wp wr-protected normal pte */
564 : if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval)))
565 : arm_uffd_pte = true;
566 :
567 : /*
568 : * A uffd-wp wr-protected swap pte. Note: this should even cover an
569 : * existing pte marker with uffd-wp bit set.
570 : */
571 : if (unlikely(pte_swp_uffd_wp_any(pteval)))
572 : arm_uffd_pte = true;
573 :
574 : if (unlikely(arm_uffd_pte))
575 : set_pte_at(vma->vm_mm, addr, pte,
576 : make_pte_marker(PTE_MARKER_UFFD_WP));
577 : #endif
578 : }
579 :
580 : static inline bool vma_has_recency(struct vm_area_struct *vma)
581 : {
582 0 : if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
583 : return false;
584 :
585 0 : if (vma->vm_file && (vma->vm_file->f_mode & FMODE_NOREUSE))
586 : return false;
587 :
588 : return true;
589 : }
590 :
591 : #endif
|