Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
4 : *
5 : * Swap reorganised 29.12.95, Stephen Tweedie.
6 : * kswapd added: 7.1.96 sct
7 : * Removed kswapd_ctl limits, and swap out as many pages as needed
8 : * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
9 : * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
10 : * Multiqueue VM started 5.8.00, Rik van Riel.
11 : */
12 :
13 : #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 :
15 : #include <linux/mm.h>
16 : #include <linux/sched/mm.h>
17 : #include <linux/module.h>
18 : #include <linux/gfp.h>
19 : #include <linux/kernel_stat.h>
20 : #include <linux/swap.h>
21 : #include <linux/pagemap.h>
22 : #include <linux/init.h>
23 : #include <linux/highmem.h>
24 : #include <linux/vmpressure.h>
25 : #include <linux/vmstat.h>
26 : #include <linux/file.h>
27 : #include <linux/writeback.h>
28 : #include <linux/blkdev.h>
29 : #include <linux/buffer_head.h> /* for buffer_heads_over_limit */
30 : #include <linux/mm_inline.h>
31 : #include <linux/backing-dev.h>
32 : #include <linux/rmap.h>
33 : #include <linux/topology.h>
34 : #include <linux/cpu.h>
35 : #include <linux/cpuset.h>
36 : #include <linux/compaction.h>
37 : #include <linux/notifier.h>
38 : #include <linux/rwsem.h>
39 : #include <linux/delay.h>
40 : #include <linux/kthread.h>
41 : #include <linux/freezer.h>
42 : #include <linux/memcontrol.h>
43 : #include <linux/migrate.h>
44 : #include <linux/delayacct.h>
45 : #include <linux/sysctl.h>
46 : #include <linux/memory-tiers.h>
47 : #include <linux/oom.h>
48 : #include <linux/pagevec.h>
49 : #include <linux/prefetch.h>
50 : #include <linux/printk.h>
51 : #include <linux/dax.h>
52 : #include <linux/psi.h>
53 : #include <linux/pagewalk.h>
54 : #include <linux/shmem_fs.h>
55 : #include <linux/ctype.h>
56 : #include <linux/debugfs.h>
57 : #include <linux/khugepaged.h>
58 : #include <linux/rculist_nulls.h>
59 : #include <linux/random.h>
60 :
61 : #include <asm/tlbflush.h>
62 : #include <asm/div64.h>
63 :
64 : #include <linux/swapops.h>
65 : #include <linux/balloon_compaction.h>
66 : #include <linux/sched/sysctl.h>
67 :
68 : #include "internal.h"
69 : #include "swap.h"
70 :
71 : #define CREATE_TRACE_POINTS
72 : #include <trace/events/vmscan.h>
73 :
74 : struct scan_control {
75 : /* How many pages shrink_list() should reclaim */
76 : unsigned long nr_to_reclaim;
77 :
78 : /*
79 : * Nodemask of nodes allowed by the caller. If NULL, all nodes
80 : * are scanned.
81 : */
82 : nodemask_t *nodemask;
83 :
84 : /*
85 : * The memory cgroup that hit its limit and as a result is the
86 : * primary target of this reclaim invocation.
87 : */
88 : struct mem_cgroup *target_mem_cgroup;
89 :
90 : /*
91 : * Scan pressure balancing between anon and file LRUs
92 : */
93 : unsigned long anon_cost;
94 : unsigned long file_cost;
95 :
96 : /* Can active folios be deactivated as part of reclaim? */
97 : #define DEACTIVATE_ANON 1
98 : #define DEACTIVATE_FILE 2
99 : unsigned int may_deactivate:2;
100 : unsigned int force_deactivate:1;
101 : unsigned int skipped_deactivate:1;
102 :
103 : /* Writepage batching in laptop mode; RECLAIM_WRITE */
104 : unsigned int may_writepage:1;
105 :
106 : /* Can mapped folios be reclaimed? */
107 : unsigned int may_unmap:1;
108 :
109 : /* Can folios be swapped as part of reclaim? */
110 : unsigned int may_swap:1;
111 :
112 : /* Proactive reclaim invoked by userspace through memory.reclaim */
113 : unsigned int proactive:1;
114 :
115 : /*
116 : * Cgroup memory below memory.low is protected as long as we
117 : * don't threaten to OOM. If any cgroup is reclaimed at
118 : * reduced force or passed over entirely due to its memory.low
119 : * setting (memcg_low_skipped), and nothing is reclaimed as a
120 : * result, then go back for one more cycle that reclaims the protected
121 : * memory (memcg_low_reclaim) to avert OOM.
122 : */
123 : unsigned int memcg_low_reclaim:1;
124 : unsigned int memcg_low_skipped:1;
125 :
126 : unsigned int hibernation_mode:1;
127 :
128 : /* One of the zones is ready for compaction */
129 : unsigned int compaction_ready:1;
130 :
131 : /* There is easily reclaimable cold cache in the current node */
132 : unsigned int cache_trim_mode:1;
133 :
134 : /* The file folios on the current node are dangerously low */
135 : unsigned int file_is_tiny:1;
136 :
137 : /* Always discard instead of demoting to lower tier memory */
138 : unsigned int no_demotion:1;
139 :
140 : /* Allocation order */
141 : s8 order;
142 :
143 : /* Scan (total_size >> priority) pages at once */
144 : s8 priority;
145 :
146 : /* The highest zone to isolate folios for reclaim from */
147 : s8 reclaim_idx;
148 :
149 : /* This context's GFP mask */
150 : gfp_t gfp_mask;
151 :
152 : /* Incremented by the number of inactive pages that were scanned */
153 : unsigned long nr_scanned;
154 :
155 : /* Number of pages freed so far during a call to shrink_zones() */
156 : unsigned long nr_reclaimed;
157 :
158 : struct {
159 : unsigned int dirty;
160 : unsigned int unqueued_dirty;
161 : unsigned int congested;
162 : unsigned int writeback;
163 : unsigned int immediate;
164 : unsigned int file_taken;
165 : unsigned int taken;
166 : } nr;
167 :
168 : /* for recording the reclaimed slab by now */
169 : struct reclaim_state reclaim_state;
170 : };
171 :
172 : #ifdef ARCH_HAS_PREFETCHW
173 : #define prefetchw_prev_lru_folio(_folio, _base, _field) \
174 : do { \
175 : if ((_folio)->lru.prev != _base) { \
176 : struct folio *prev; \
177 : \
178 : prev = lru_to_folio(&(_folio->lru)); \
179 : prefetchw(&prev->_field); \
180 : } \
181 : } while (0)
182 : #else
183 : #define prefetchw_prev_lru_folio(_folio, _base, _field) do { } while (0)
184 : #endif
185 :
186 : /*
187 : * From 0 .. 200. Higher means more swappy.
188 : */
189 : int vm_swappiness = 60;
190 :
191 : LIST_HEAD(shrinker_list);
192 : DECLARE_RWSEM(shrinker_rwsem);
193 :
194 : #ifdef CONFIG_MEMCG
195 : static int shrinker_nr_max;
196 :
197 : /* The shrinker_info is expanded in a batch of BITS_PER_LONG */
198 : static inline int shrinker_map_size(int nr_items)
199 : {
200 : return (DIV_ROUND_UP(nr_items, BITS_PER_LONG) * sizeof(unsigned long));
201 : }
202 :
203 : static inline int shrinker_defer_size(int nr_items)
204 : {
205 : return (round_up(nr_items, BITS_PER_LONG) * sizeof(atomic_long_t));
206 : }
207 :
208 : static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg,
209 : int nid)
210 : {
211 : return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
212 : lockdep_is_held(&shrinker_rwsem));
213 : }
214 :
215 : static int expand_one_shrinker_info(struct mem_cgroup *memcg,
216 : int map_size, int defer_size,
217 : int old_map_size, int old_defer_size,
218 : int new_nr_max)
219 : {
220 : struct shrinker_info *new, *old;
221 : struct mem_cgroup_per_node *pn;
222 : int nid;
223 : int size = map_size + defer_size;
224 :
225 : for_each_node(nid) {
226 : pn = memcg->nodeinfo[nid];
227 : old = shrinker_info_protected(memcg, nid);
228 : /* Not yet online memcg */
229 : if (!old)
230 : return 0;
231 :
232 : /* Already expanded this shrinker_info */
233 : if (new_nr_max <= old->map_nr_max)
234 : continue;
235 :
236 : new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
237 : if (!new)
238 : return -ENOMEM;
239 :
240 : new->nr_deferred = (atomic_long_t *)(new + 1);
241 : new->map = (void *)new->nr_deferred + defer_size;
242 : new->map_nr_max = new_nr_max;
243 :
244 : /* map: set all old bits, clear all new bits */
245 : memset(new->map, (int)0xff, old_map_size);
246 : memset((void *)new->map + old_map_size, 0, map_size - old_map_size);
247 : /* nr_deferred: copy old values, clear all new values */
248 : memcpy(new->nr_deferred, old->nr_deferred, old_defer_size);
249 : memset((void *)new->nr_deferred + old_defer_size, 0,
250 : defer_size - old_defer_size);
251 :
252 : rcu_assign_pointer(pn->shrinker_info, new);
253 : kvfree_rcu(old, rcu);
254 : }
255 :
256 : return 0;
257 : }
258 :
259 : void free_shrinker_info(struct mem_cgroup *memcg)
260 : {
261 : struct mem_cgroup_per_node *pn;
262 : struct shrinker_info *info;
263 : int nid;
264 :
265 : for_each_node(nid) {
266 : pn = memcg->nodeinfo[nid];
267 : info = rcu_dereference_protected(pn->shrinker_info, true);
268 : kvfree(info);
269 : rcu_assign_pointer(pn->shrinker_info, NULL);
270 : }
271 : }
272 :
273 : int alloc_shrinker_info(struct mem_cgroup *memcg)
274 : {
275 : struct shrinker_info *info;
276 : int nid, size, ret = 0;
277 : int map_size, defer_size = 0;
278 :
279 : down_write(&shrinker_rwsem);
280 : map_size = shrinker_map_size(shrinker_nr_max);
281 : defer_size = shrinker_defer_size(shrinker_nr_max);
282 : size = map_size + defer_size;
283 : for_each_node(nid) {
284 : info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid);
285 : if (!info) {
286 : free_shrinker_info(memcg);
287 : ret = -ENOMEM;
288 : break;
289 : }
290 : info->nr_deferred = (atomic_long_t *)(info + 1);
291 : info->map = (void *)info->nr_deferred + defer_size;
292 : info->map_nr_max = shrinker_nr_max;
293 : rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
294 : }
295 : up_write(&shrinker_rwsem);
296 :
297 : return ret;
298 : }
299 :
300 : static int expand_shrinker_info(int new_id)
301 : {
302 : int ret = 0;
303 : int new_nr_max = round_up(new_id + 1, BITS_PER_LONG);
304 : int map_size, defer_size = 0;
305 : int old_map_size, old_defer_size = 0;
306 : struct mem_cgroup *memcg;
307 :
308 : if (!root_mem_cgroup)
309 : goto out;
310 :
311 : lockdep_assert_held(&shrinker_rwsem);
312 :
313 : map_size = shrinker_map_size(new_nr_max);
314 : defer_size = shrinker_defer_size(new_nr_max);
315 : old_map_size = shrinker_map_size(shrinker_nr_max);
316 : old_defer_size = shrinker_defer_size(shrinker_nr_max);
317 :
318 : memcg = mem_cgroup_iter(NULL, NULL, NULL);
319 : do {
320 : ret = expand_one_shrinker_info(memcg, map_size, defer_size,
321 : old_map_size, old_defer_size,
322 : new_nr_max);
323 : if (ret) {
324 : mem_cgroup_iter_break(NULL, memcg);
325 : goto out;
326 : }
327 : } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
328 : out:
329 : if (!ret)
330 : shrinker_nr_max = new_nr_max;
331 :
332 : return ret;
333 : }
334 :
335 : void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
336 : {
337 : if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
338 : struct shrinker_info *info;
339 :
340 : rcu_read_lock();
341 : info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
342 : if (!WARN_ON_ONCE(shrinker_id >= info->map_nr_max)) {
343 : /* Pairs with smp mb in shrink_slab() */
344 : smp_mb__before_atomic();
345 : set_bit(shrinker_id, info->map);
346 : }
347 : rcu_read_unlock();
348 : }
349 : }
350 :
351 : static DEFINE_IDR(shrinker_idr);
352 :
353 : static int prealloc_memcg_shrinker(struct shrinker *shrinker)
354 : {
355 : int id, ret = -ENOMEM;
356 :
357 : if (mem_cgroup_disabled())
358 : return -ENOSYS;
359 :
360 : down_write(&shrinker_rwsem);
361 : /* This may call shrinker, so it must use down_read_trylock() */
362 : id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL);
363 : if (id < 0)
364 : goto unlock;
365 :
366 : if (id >= shrinker_nr_max) {
367 : if (expand_shrinker_info(id)) {
368 : idr_remove(&shrinker_idr, id);
369 : goto unlock;
370 : }
371 : }
372 : shrinker->id = id;
373 : ret = 0;
374 : unlock:
375 : up_write(&shrinker_rwsem);
376 : return ret;
377 : }
378 :
379 : static void unregister_memcg_shrinker(struct shrinker *shrinker)
380 : {
381 : int id = shrinker->id;
382 :
383 : BUG_ON(id < 0);
384 :
385 : lockdep_assert_held(&shrinker_rwsem);
386 :
387 : idr_remove(&shrinker_idr, id);
388 : }
389 :
390 : static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker,
391 : struct mem_cgroup *memcg)
392 : {
393 : struct shrinker_info *info;
394 :
395 : info = shrinker_info_protected(memcg, nid);
396 : return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0);
397 : }
398 :
399 : static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
400 : struct mem_cgroup *memcg)
401 : {
402 : struct shrinker_info *info;
403 :
404 : info = shrinker_info_protected(memcg, nid);
405 : return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
406 : }
407 :
408 : void reparent_shrinker_deferred(struct mem_cgroup *memcg)
409 : {
410 : int i, nid;
411 : long nr;
412 : struct mem_cgroup *parent;
413 : struct shrinker_info *child_info, *parent_info;
414 :
415 : parent = parent_mem_cgroup(memcg);
416 : if (!parent)
417 : parent = root_mem_cgroup;
418 :
419 : /* Prevent from concurrent shrinker_info expand */
420 : down_read(&shrinker_rwsem);
421 : for_each_node(nid) {
422 : child_info = shrinker_info_protected(memcg, nid);
423 : parent_info = shrinker_info_protected(parent, nid);
424 : for (i = 0; i < child_info->map_nr_max; i++) {
425 : nr = atomic_long_read(&child_info->nr_deferred[i]);
426 : atomic_long_add(nr, &parent_info->nr_deferred[i]);
427 : }
428 : }
429 : up_read(&shrinker_rwsem);
430 : }
431 :
432 : /* Returns true for reclaim through cgroup limits or cgroup interfaces. */
433 : static bool cgroup_reclaim(struct scan_control *sc)
434 : {
435 : return sc->target_mem_cgroup;
436 : }
437 :
438 : /*
439 : * Returns true for reclaim on the root cgroup. This is true for direct
440 : * allocator reclaim and reclaim through cgroup interfaces on the root cgroup.
441 : */
442 : static bool root_reclaim(struct scan_control *sc)
443 : {
444 : return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup);
445 : }
446 :
447 : /**
448 : * writeback_throttling_sane - is the usual dirty throttling mechanism available?
449 : * @sc: scan_control in question
450 : *
451 : * The normal page dirty throttling mechanism in balance_dirty_pages() is
452 : * completely broken with the legacy memcg and direct stalling in
453 : * shrink_folio_list() is used for throttling instead, which lacks all the
454 : * niceties such as fairness, adaptive pausing, bandwidth proportional
455 : * allocation and configurability.
456 : *
457 : * This function tests whether the vmscan currently in progress can assume
458 : * that the normal dirty throttling mechanism is operational.
459 : */
460 : static bool writeback_throttling_sane(struct scan_control *sc)
461 : {
462 : if (!cgroup_reclaim(sc))
463 : return true;
464 : #ifdef CONFIG_CGROUP_WRITEBACK
465 : if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
466 : return true;
467 : #endif
468 : return false;
469 : }
470 : #else
471 : static int prealloc_memcg_shrinker(struct shrinker *shrinker)
472 : {
473 : return -ENOSYS;
474 : }
475 :
476 : static void unregister_memcg_shrinker(struct shrinker *shrinker)
477 : {
478 : }
479 :
480 : static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker,
481 : struct mem_cgroup *memcg)
482 : {
483 : return 0;
484 : }
485 :
486 : static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
487 : struct mem_cgroup *memcg)
488 : {
489 : return 0;
490 : }
491 :
492 : static bool cgroup_reclaim(struct scan_control *sc)
493 : {
494 : return false;
495 : }
496 :
497 : static bool root_reclaim(struct scan_control *sc)
498 : {
499 : return true;
500 : }
501 :
502 : static bool writeback_throttling_sane(struct scan_control *sc)
503 : {
504 : return true;
505 : }
506 : #endif
507 :
508 0 : static void set_task_reclaim_state(struct task_struct *task,
509 : struct reclaim_state *rs)
510 : {
511 : /* Check for an overwrite */
512 0 : WARN_ON_ONCE(rs && task->reclaim_state);
513 :
514 : /* Check for the nulling of an already-nulled member */
515 0 : WARN_ON_ONCE(!rs && !task->reclaim_state);
516 :
517 0 : task->reclaim_state = rs;
518 0 : }
519 :
520 : /*
521 : * flush_reclaim_state(): add pages reclaimed outside of LRU-based reclaim to
522 : * scan_control->nr_reclaimed.
523 : */
524 : static void flush_reclaim_state(struct scan_control *sc)
525 : {
526 : /*
527 : * Currently, reclaim_state->reclaimed includes three types of pages
528 : * freed outside of vmscan:
529 : * (1) Slab pages.
530 : * (2) Clean file pages from pruned inodes (on highmem systems).
531 : * (3) XFS freed buffer pages.
532 : *
533 : * For all of these cases, we cannot universally link the pages to a
534 : * single memcg. For example, a memcg-aware shrinker can free one object
535 : * charged to the target memcg, causing an entire page to be freed.
536 : * If we count the entire page as reclaimed from the memcg, we end up
537 : * overestimating the reclaimed amount (potentially under-reclaiming).
538 : *
539 : * Only count such pages for global reclaim to prevent under-reclaiming
540 : * from the target memcg; preventing unnecessary retries during memcg
541 : * charging and false positives from proactive reclaim.
542 : *
543 : * For uncommon cases where the freed pages were actually mostly
544 : * charged to the target memcg, we end up underestimating the reclaimed
545 : * amount. This should be fine. The freed pages will be uncharged
546 : * anyway, even if they are not counted here properly, and we will be
547 : * able to make forward progress in charging (which is usually in a
548 : * retry loop).
549 : *
550 : * We can go one step further, and report the uncharged objcg pages in
551 : * memcg reclaim, to make reporting more accurate and reduce
552 : * underestimation, but it's probably not worth the complexity for now.
553 : */
554 0 : if (current->reclaim_state && root_reclaim(sc)) {
555 0 : sc->nr_reclaimed += current->reclaim_state->reclaimed;
556 0 : current->reclaim_state->reclaimed = 0;
557 : }
558 : }
559 :
560 : static long xchg_nr_deferred(struct shrinker *shrinker,
561 : struct shrink_control *sc)
562 : {
563 0 : int nid = sc->nid;
564 :
565 0 : if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
566 0 : nid = 0;
567 :
568 0 : if (sc->memcg &&
569 0 : (shrinker->flags & SHRINKER_MEMCG_AWARE))
570 : return xchg_nr_deferred_memcg(nid, shrinker,
571 : sc->memcg);
572 :
573 0 : return atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
574 : }
575 :
576 :
577 : static long add_nr_deferred(long nr, struct shrinker *shrinker,
578 : struct shrink_control *sc)
579 : {
580 0 : int nid = sc->nid;
581 :
582 0 : if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
583 0 : nid = 0;
584 :
585 0 : if (sc->memcg &&
586 0 : (shrinker->flags & SHRINKER_MEMCG_AWARE))
587 : return add_nr_deferred_memcg(nr, nid, shrinker,
588 : sc->memcg);
589 :
590 0 : return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]);
591 : }
592 :
593 : static bool can_demote(int nid, struct scan_control *sc)
594 : {
595 : if (!numa_demotion_enabled)
596 : return false;
597 : if (sc && sc->no_demotion)
598 : return false;
599 : if (next_demotion_node(nid) == NUMA_NO_NODE)
600 : return false;
601 :
602 : return true;
603 : }
604 :
605 : static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg,
606 : int nid,
607 : struct scan_control *sc)
608 : {
609 : if (memcg == NULL) {
610 : /*
611 : * For non-memcg reclaim, is there
612 : * space in any swap device?
613 : */
614 0 : if (get_nr_swap_pages() > 0)
615 : return true;
616 : } else {
617 : /* Is the memcg below its swap limit? */
618 : if (mem_cgroup_get_nr_swap_pages(memcg) > 0)
619 : return true;
620 : }
621 :
622 : /*
623 : * The page can not be swapped.
624 : *
625 : * Can it be reclaimed from this node via demotion?
626 : */
627 : return can_demote(nid, sc);
628 : }
629 :
630 : /*
631 : * This misses isolated folios which are not accounted for to save counters.
632 : * As the data only determines if reclaim or compaction continues, it is
633 : * not expected that isolated folios will be a dominating factor.
634 : */
635 0 : unsigned long zone_reclaimable_pages(struct zone *zone)
636 : {
637 : unsigned long nr;
638 :
639 0 : nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
640 0 : zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
641 0 : if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL))
642 0 : nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
643 0 : zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
644 :
645 0 : return nr;
646 : }
647 :
648 : /**
649 : * lruvec_lru_size - Returns the number of pages on the given LRU list.
650 : * @lruvec: lru vector
651 : * @lru: lru to use
652 : * @zone_idx: zones to consider (use MAX_NR_ZONES - 1 for the whole LRU list)
653 : */
654 : static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru,
655 : int zone_idx)
656 : {
657 : unsigned long size = 0;
658 : int zid;
659 :
660 0 : for (zid = 0; zid <= zone_idx; zid++) {
661 0 : struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
662 :
663 0 : if (!managed_zone(zone))
664 0 : continue;
665 :
666 : if (!mem_cgroup_disabled())
667 : size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
668 : else
669 0 : size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru);
670 : }
671 : return size;
672 : }
673 :
674 : /*
675 : * Add a shrinker callback to be called from the vm.
676 : */
677 16 : static int __prealloc_shrinker(struct shrinker *shrinker)
678 : {
679 : unsigned int size;
680 : int err;
681 :
682 16 : if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
683 16 : err = prealloc_memcg_shrinker(shrinker);
684 : if (err != -ENOSYS)
685 : return err;
686 :
687 16 : shrinker->flags &= ~SHRINKER_MEMCG_AWARE;
688 : }
689 :
690 16 : size = sizeof(*shrinker->nr_deferred);
691 : if (shrinker->flags & SHRINKER_NUMA_AWARE)
692 : size *= nr_node_ids;
693 :
694 16 : shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
695 16 : if (!shrinker->nr_deferred)
696 : return -ENOMEM;
697 :
698 : return 0;
699 : }
700 :
701 : #ifdef CONFIG_SHRINKER_DEBUG
702 : int prealloc_shrinker(struct shrinker *shrinker, const char *fmt, ...)
703 : {
704 : va_list ap;
705 : int err;
706 :
707 : va_start(ap, fmt);
708 : shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
709 : va_end(ap);
710 : if (!shrinker->name)
711 : return -ENOMEM;
712 :
713 : err = __prealloc_shrinker(shrinker);
714 : if (err) {
715 : kfree_const(shrinker->name);
716 : shrinker->name = NULL;
717 : }
718 :
719 : return err;
720 : }
721 : #else
722 16 : int prealloc_shrinker(struct shrinker *shrinker, const char *fmt, ...)
723 : {
724 16 : return __prealloc_shrinker(shrinker);
725 : }
726 : #endif
727 :
728 0 : void free_prealloced_shrinker(struct shrinker *shrinker)
729 : {
730 : #ifdef CONFIG_SHRINKER_DEBUG
731 : kfree_const(shrinker->name);
732 : shrinker->name = NULL;
733 : #endif
734 0 : if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
735 0 : down_write(&shrinker_rwsem);
736 0 : unregister_memcg_shrinker(shrinker);
737 0 : up_write(&shrinker_rwsem);
738 0 : return;
739 : }
740 :
741 0 : kfree(shrinker->nr_deferred);
742 0 : shrinker->nr_deferred = NULL;
743 : }
744 :
745 16 : void register_shrinker_prepared(struct shrinker *shrinker)
746 : {
747 16 : down_write(&shrinker_rwsem);
748 32 : list_add_tail(&shrinker->list, &shrinker_list);
749 16 : shrinker->flags |= SHRINKER_REGISTERED;
750 16 : shrinker_debugfs_add(shrinker);
751 16 : up_write(&shrinker_rwsem);
752 16 : }
753 :
754 0 : static int __register_shrinker(struct shrinker *shrinker)
755 : {
756 0 : int err = __prealloc_shrinker(shrinker);
757 :
758 0 : if (err)
759 : return err;
760 0 : register_shrinker_prepared(shrinker);
761 0 : return 0;
762 : }
763 :
764 : #ifdef CONFIG_SHRINKER_DEBUG
765 : int register_shrinker(struct shrinker *shrinker, const char *fmt, ...)
766 : {
767 : va_list ap;
768 : int err;
769 :
770 : va_start(ap, fmt);
771 : shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
772 : va_end(ap);
773 : if (!shrinker->name)
774 : return -ENOMEM;
775 :
776 : err = __register_shrinker(shrinker);
777 : if (err) {
778 : kfree_const(shrinker->name);
779 : shrinker->name = NULL;
780 : }
781 : return err;
782 : }
783 : #else
784 0 : int register_shrinker(struct shrinker *shrinker, const char *fmt, ...)
785 : {
786 0 : return __register_shrinker(shrinker);
787 : }
788 : #endif
789 : EXPORT_SYMBOL(register_shrinker);
790 :
791 : /*
792 : * Remove one
793 : */
794 5 : void unregister_shrinker(struct shrinker *shrinker)
795 : {
796 : struct dentry *debugfs_entry;
797 : int debugfs_id;
798 :
799 5 : if (!(shrinker->flags & SHRINKER_REGISTERED))
800 : return;
801 :
802 5 : down_write(&shrinker_rwsem);
803 10 : list_del(&shrinker->list);
804 5 : shrinker->flags &= ~SHRINKER_REGISTERED;
805 : if (shrinker->flags & SHRINKER_MEMCG_AWARE)
806 : unregister_memcg_shrinker(shrinker);
807 10 : debugfs_entry = shrinker_debugfs_detach(shrinker, &debugfs_id);
808 5 : up_write(&shrinker_rwsem);
809 :
810 5 : shrinker_debugfs_remove(debugfs_entry, debugfs_id);
811 :
812 5 : kfree(shrinker->nr_deferred);
813 5 : shrinker->nr_deferred = NULL;
814 : }
815 : EXPORT_SYMBOL(unregister_shrinker);
816 :
817 : /**
818 : * synchronize_shrinkers - Wait for all running shrinkers to complete.
819 : *
820 : * This is equivalent to calling unregister_shrink() and register_shrinker(),
821 : * but atomically and with less overhead. This is useful to guarantee that all
822 : * shrinker invocations have seen an update, before freeing memory, similar to
823 : * rcu.
824 : */
825 0 : void synchronize_shrinkers(void)
826 : {
827 0 : down_write(&shrinker_rwsem);
828 0 : up_write(&shrinker_rwsem);
829 0 : }
830 : EXPORT_SYMBOL(synchronize_shrinkers);
831 :
832 : #define SHRINK_BATCH 128
833 :
834 0 : static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
835 : struct shrinker *shrinker, int priority)
836 : {
837 0 : unsigned long freed = 0;
838 : unsigned long long delta;
839 : long total_scan;
840 : long freeable;
841 : long nr;
842 : long new_nr;
843 0 : long batch_size = shrinker->batch ? shrinker->batch
844 0 : : SHRINK_BATCH;
845 0 : long scanned = 0, next_deferred;
846 :
847 0 : freeable = shrinker->count_objects(shrinker, shrinkctl);
848 0 : if (freeable == 0 || freeable == SHRINK_EMPTY)
849 : return freeable;
850 :
851 : /*
852 : * copy the current shrinker scan count into a local variable
853 : * and zero it so that other concurrent shrinker invocations
854 : * don't also do this scanning work.
855 : */
856 0 : nr = xchg_nr_deferred(shrinker, shrinkctl);
857 :
858 0 : if (shrinker->seeks) {
859 0 : delta = freeable >> priority;
860 0 : delta *= 4;
861 0 : do_div(delta, shrinker->seeks);
862 : } else {
863 : /*
864 : * These objects don't require any IO to create. Trim
865 : * them aggressively under memory pressure to keep
866 : * them from causing refetches in the IO caches.
867 : */
868 0 : delta = freeable / 2;
869 : }
870 :
871 0 : total_scan = nr >> priority;
872 0 : total_scan += delta;
873 0 : total_scan = min(total_scan, (2 * freeable));
874 :
875 0 : trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
876 : freeable, delta, total_scan, priority);
877 :
878 : /*
879 : * Normally, we should not scan less than batch_size objects in one
880 : * pass to avoid too frequent shrinker calls, but if the slab has less
881 : * than batch_size objects in total and we are really tight on memory,
882 : * we will try to reclaim all available objects, otherwise we can end
883 : * up failing allocations although there are plenty of reclaimable
884 : * objects spread over several slabs with usage less than the
885 : * batch_size.
886 : *
887 : * We detect the "tight on memory" situations by looking at the total
888 : * number of objects we want to scan (total_scan). If it is greater
889 : * than the total number of objects on slab (freeable), we must be
890 : * scanning at high prio and therefore should try to reclaim as much as
891 : * possible.
892 : */
893 0 : while (total_scan >= batch_size ||
894 0 : total_scan >= freeable) {
895 : unsigned long ret;
896 0 : unsigned long nr_to_scan = min(batch_size, total_scan);
897 :
898 0 : shrinkctl->nr_to_scan = nr_to_scan;
899 0 : shrinkctl->nr_scanned = nr_to_scan;
900 0 : ret = shrinker->scan_objects(shrinker, shrinkctl);
901 0 : if (ret == SHRINK_STOP)
902 : break;
903 0 : freed += ret;
904 :
905 0 : count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned);
906 0 : total_scan -= shrinkctl->nr_scanned;
907 0 : scanned += shrinkctl->nr_scanned;
908 :
909 0 : cond_resched();
910 : }
911 :
912 : /*
913 : * The deferred work is increased by any new work (delta) that wasn't
914 : * done, decreased by old deferred work that was done now.
915 : *
916 : * And it is capped to two times of the freeable items.
917 : */
918 0 : next_deferred = max_t(long, (nr + delta - scanned), 0);
919 0 : next_deferred = min(next_deferred, (2 * freeable));
920 :
921 : /*
922 : * move the unused scan count back into the shrinker in a
923 : * manner that handles concurrent updates.
924 : */
925 0 : new_nr = add_nr_deferred(next_deferred, shrinker, shrinkctl);
926 :
927 0 : trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan);
928 0 : return freed;
929 : }
930 :
931 : #ifdef CONFIG_MEMCG
932 : static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
933 : struct mem_cgroup *memcg, int priority)
934 : {
935 : struct shrinker_info *info;
936 : unsigned long ret, freed = 0;
937 : int i;
938 :
939 : if (!mem_cgroup_online(memcg))
940 : return 0;
941 :
942 : if (!down_read_trylock(&shrinker_rwsem))
943 : return 0;
944 :
945 : info = shrinker_info_protected(memcg, nid);
946 : if (unlikely(!info))
947 : goto unlock;
948 :
949 : for_each_set_bit(i, info->map, info->map_nr_max) {
950 : struct shrink_control sc = {
951 : .gfp_mask = gfp_mask,
952 : .nid = nid,
953 : .memcg = memcg,
954 : };
955 : struct shrinker *shrinker;
956 :
957 : shrinker = idr_find(&shrinker_idr, i);
958 : if (unlikely(!shrinker || !(shrinker->flags & SHRINKER_REGISTERED))) {
959 : if (!shrinker)
960 : clear_bit(i, info->map);
961 : continue;
962 : }
963 :
964 : /* Call non-slab shrinkers even though kmem is disabled */
965 : if (!memcg_kmem_online() &&
966 : !(shrinker->flags & SHRINKER_NONSLAB))
967 : continue;
968 :
969 : ret = do_shrink_slab(&sc, shrinker, priority);
970 : if (ret == SHRINK_EMPTY) {
971 : clear_bit(i, info->map);
972 : /*
973 : * After the shrinker reported that it had no objects to
974 : * free, but before we cleared the corresponding bit in
975 : * the memcg shrinker map, a new object might have been
976 : * added. To make sure, we have the bit set in this
977 : * case, we invoke the shrinker one more time and reset
978 : * the bit if it reports that it is not empty anymore.
979 : * The memory barrier here pairs with the barrier in
980 : * set_shrinker_bit():
981 : *
982 : * list_lru_add() shrink_slab_memcg()
983 : * list_add_tail() clear_bit()
984 : * <MB> <MB>
985 : * set_bit() do_shrink_slab()
986 : */
987 : smp_mb__after_atomic();
988 : ret = do_shrink_slab(&sc, shrinker, priority);
989 : if (ret == SHRINK_EMPTY)
990 : ret = 0;
991 : else
992 : set_shrinker_bit(memcg, nid, i);
993 : }
994 : freed += ret;
995 :
996 : if (rwsem_is_contended(&shrinker_rwsem)) {
997 : freed = freed ? : 1;
998 : break;
999 : }
1000 : }
1001 : unlock:
1002 : up_read(&shrinker_rwsem);
1003 : return freed;
1004 : }
1005 : #else /* CONFIG_MEMCG */
1006 : static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
1007 : struct mem_cgroup *memcg, int priority)
1008 : {
1009 : return 0;
1010 : }
1011 : #endif /* CONFIG_MEMCG */
1012 :
1013 : /**
1014 : * shrink_slab - shrink slab caches
1015 : * @gfp_mask: allocation context
1016 : * @nid: node whose slab caches to target
1017 : * @memcg: memory cgroup whose slab caches to target
1018 : * @priority: the reclaim priority
1019 : *
1020 : * Call the shrink functions to age shrinkable caches.
1021 : *
1022 : * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
1023 : * unaware shrinkers will receive a node id of 0 instead.
1024 : *
1025 : * @memcg specifies the memory cgroup to target. Unaware shrinkers
1026 : * are called only if it is the root cgroup.
1027 : *
1028 : * @priority is sc->priority, we take the number of objects and >> by priority
1029 : * in order to get the scan target.
1030 : *
1031 : * Returns the number of reclaimed slab objects.
1032 : */
1033 0 : static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
1034 : struct mem_cgroup *memcg,
1035 : int priority)
1036 : {
1037 0 : unsigned long ret, freed = 0;
1038 : struct shrinker *shrinker;
1039 :
1040 : /*
1041 : * The root memcg might be allocated even though memcg is disabled
1042 : * via "cgroup_disable=memory" boot parameter. This could make
1043 : * mem_cgroup_is_root() return false, then just run memcg slab
1044 : * shrink, but skip global shrink. This may result in premature
1045 : * oom.
1046 : */
1047 : if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
1048 : return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
1049 :
1050 0 : if (!down_read_trylock(&shrinker_rwsem))
1051 : goto out;
1052 :
1053 0 : list_for_each_entry(shrinker, &shrinker_list, list) {
1054 0 : struct shrink_control sc = {
1055 : .gfp_mask = gfp_mask,
1056 : .nid = nid,
1057 : .memcg = memcg,
1058 : };
1059 :
1060 0 : ret = do_shrink_slab(&sc, shrinker, priority);
1061 0 : if (ret == SHRINK_EMPTY)
1062 0 : ret = 0;
1063 0 : freed += ret;
1064 : /*
1065 : * Bail out if someone want to register a new shrinker to
1066 : * prevent the registration from being stalled for long periods
1067 : * by parallel ongoing shrinking.
1068 : */
1069 0 : if (rwsem_is_contended(&shrinker_rwsem)) {
1070 0 : freed = freed ? : 1;
1071 0 : break;
1072 : }
1073 : }
1074 :
1075 0 : up_read(&shrinker_rwsem);
1076 : out:
1077 0 : cond_resched();
1078 : return freed;
1079 : }
1080 :
1081 : static unsigned long drop_slab_node(int nid)
1082 : {
1083 0 : unsigned long freed = 0;
1084 0 : struct mem_cgroup *memcg = NULL;
1085 :
1086 0 : memcg = mem_cgroup_iter(NULL, NULL, NULL);
1087 : do {
1088 0 : freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
1089 0 : } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
1090 :
1091 : return freed;
1092 : }
1093 :
1094 0 : void drop_slab(void)
1095 : {
1096 : int nid;
1097 0 : int shift = 0;
1098 : unsigned long freed;
1099 :
1100 : do {
1101 0 : freed = 0;
1102 0 : for_each_online_node(nid) {
1103 0 : if (fatal_signal_pending(current))
1104 : return;
1105 :
1106 0 : freed += drop_slab_node(nid);
1107 : }
1108 0 : } while ((freed >> shift++) > 1);
1109 : }
1110 :
1111 : static int reclaimer_offset(void)
1112 : {
1113 : BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD !=
1114 : PGDEMOTE_DIRECT - PGDEMOTE_KSWAPD);
1115 : BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD !=
1116 : PGSCAN_DIRECT - PGSCAN_KSWAPD);
1117 : BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD !=
1118 : PGDEMOTE_KHUGEPAGED - PGDEMOTE_KSWAPD);
1119 : BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD !=
1120 : PGSCAN_KHUGEPAGED - PGSCAN_KSWAPD);
1121 :
1122 0 : if (current_is_kswapd())
1123 : return 0;
1124 : if (current_is_khugepaged())
1125 : return PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD;
1126 : return PGSTEAL_DIRECT - PGSTEAL_KSWAPD;
1127 : }
1128 :
1129 : static inline int is_page_cache_freeable(struct folio *folio)
1130 : {
1131 : /*
1132 : * A freeable page cache folio is referenced only by the caller
1133 : * that isolated the folio, the page cache and optional filesystem
1134 : * private data at folio->private.
1135 : */
1136 0 : return folio_ref_count(folio) - folio_test_private(folio) ==
1137 0 : 1 + folio_nr_pages(folio);
1138 : }
1139 :
1140 : /*
1141 : * We detected a synchronous write error writing a folio out. Probably
1142 : * -ENOSPC. We need to propagate that into the address_space for a subsequent
1143 : * fsync(), msync() or close().
1144 : *
1145 : * The tricky part is that after writepage we cannot touch the mapping: nothing
1146 : * prevents it from being freed up. But we have a ref on the folio and once
1147 : * that folio is locked, the mapping is pinned.
1148 : *
1149 : * We're allowed to run sleeping folio_lock() here because we know the caller has
1150 : * __GFP_FS.
1151 : */
1152 0 : static void handle_write_error(struct address_space *mapping,
1153 : struct folio *folio, int error)
1154 : {
1155 0 : folio_lock(folio);
1156 0 : if (folio_mapping(folio) == mapping)
1157 0 : mapping_set_error(mapping, error);
1158 0 : folio_unlock(folio);
1159 0 : }
1160 :
1161 0 : static bool skip_throttle_noprogress(pg_data_t *pgdat)
1162 : {
1163 0 : int reclaimable = 0, write_pending = 0;
1164 : int i;
1165 :
1166 : /*
1167 : * If kswapd is disabled, reschedule if necessary but do not
1168 : * throttle as the system is likely near OOM.
1169 : */
1170 0 : if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
1171 : return true;
1172 :
1173 : /*
1174 : * If there are a lot of dirty/writeback folios then do not
1175 : * throttle as throttling will occur when the folios cycle
1176 : * towards the end of the LRU if still under writeback.
1177 : */
1178 0 : for (i = 0; i < MAX_NR_ZONES; i++) {
1179 0 : struct zone *zone = pgdat->node_zones + i;
1180 :
1181 0 : if (!managed_zone(zone))
1182 0 : continue;
1183 :
1184 0 : reclaimable += zone_reclaimable_pages(zone);
1185 0 : write_pending += zone_page_state_snapshot(zone,
1186 : NR_ZONE_WRITE_PENDING);
1187 : }
1188 0 : if (2 * write_pending <= reclaimable)
1189 : return true;
1190 :
1191 0 : return false;
1192 : }
1193 :
1194 0 : void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason)
1195 : {
1196 0 : wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason];
1197 : long timeout, ret;
1198 0 : DEFINE_WAIT(wait);
1199 :
1200 : /*
1201 : * Do not throttle user workers, kthreads other than kswapd or
1202 : * workqueues. They may be required for reclaim to make
1203 : * forward progress (e.g. journalling workqueues or kthreads).
1204 : */
1205 0 : if (!current_is_kswapd() &&
1206 0 : current->flags & (PF_USER_WORKER|PF_KTHREAD)) {
1207 0 : cond_resched();
1208 0 : return;
1209 : }
1210 :
1211 : /*
1212 : * These figures are pulled out of thin air.
1213 : * VMSCAN_THROTTLE_ISOLATED is a transient condition based on too many
1214 : * parallel reclaimers which is a short-lived event so the timeout is
1215 : * short. Failing to make progress or waiting on writeback are
1216 : * potentially long-lived events so use a longer timeout. This is shaky
1217 : * logic as a failure to make progress could be due to anything from
1218 : * writeback to a slow device to excessive referenced folios at the tail
1219 : * of the inactive LRU.
1220 : */
1221 0 : switch(reason) {
1222 : case VMSCAN_THROTTLE_WRITEBACK:
1223 0 : timeout = HZ/10;
1224 :
1225 0 : if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) {
1226 0 : WRITE_ONCE(pgdat->nr_reclaim_start,
1227 : node_page_state(pgdat, NR_THROTTLED_WRITTEN));
1228 : }
1229 :
1230 : break;
1231 : case VMSCAN_THROTTLE_CONGESTED:
1232 : fallthrough;
1233 : case VMSCAN_THROTTLE_NOPROGRESS:
1234 0 : if (skip_throttle_noprogress(pgdat)) {
1235 0 : cond_resched();
1236 0 : return;
1237 : }
1238 :
1239 : timeout = 1;
1240 :
1241 : break;
1242 : case VMSCAN_THROTTLE_ISOLATED:
1243 : timeout = HZ/50;
1244 : break;
1245 : default:
1246 0 : WARN_ON_ONCE(1);
1247 : timeout = HZ;
1248 : break;
1249 : }
1250 :
1251 0 : prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1252 0 : ret = schedule_timeout(timeout);
1253 0 : finish_wait(wqh, &wait);
1254 :
1255 0 : if (reason == VMSCAN_THROTTLE_WRITEBACK)
1256 0 : atomic_dec(&pgdat->nr_writeback_throttled);
1257 :
1258 0 : trace_mm_vmscan_throttled(pgdat->node_id, jiffies_to_usecs(timeout),
1259 0 : jiffies_to_usecs(timeout - ret),
1260 : reason);
1261 : }
1262 :
1263 : /*
1264 : * Account for folios written if tasks are throttled waiting on dirty
1265 : * folios to clean. If enough folios have been cleaned since throttling
1266 : * started then wakeup the throttled tasks.
1267 : */
1268 0 : void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
1269 : int nr_throttled)
1270 : {
1271 : unsigned long nr_written;
1272 :
1273 0 : node_stat_add_folio(folio, NR_THROTTLED_WRITTEN);
1274 :
1275 : /*
1276 : * This is an inaccurate read as the per-cpu deltas may not
1277 : * be synchronised. However, given that the system is
1278 : * writeback throttled, it is not worth taking the penalty
1279 : * of getting an accurate count. At worst, the throttle
1280 : * timeout guarantees forward progress.
1281 : */
1282 0 : nr_written = node_page_state(pgdat, NR_THROTTLED_WRITTEN) -
1283 0 : READ_ONCE(pgdat->nr_reclaim_start);
1284 :
1285 0 : if (nr_written > SWAP_CLUSTER_MAX * nr_throttled)
1286 0 : wake_up(&pgdat->reclaim_wait[VMSCAN_THROTTLE_WRITEBACK]);
1287 0 : }
1288 :
1289 : /* possible outcome of pageout() */
1290 : typedef enum {
1291 : /* failed to write folio out, folio is locked */
1292 : PAGE_KEEP,
1293 : /* move folio to the active list, folio is locked */
1294 : PAGE_ACTIVATE,
1295 : /* folio has been sent to the disk successfully, folio is unlocked */
1296 : PAGE_SUCCESS,
1297 : /* folio is clean and locked */
1298 : PAGE_CLEAN,
1299 : } pageout_t;
1300 :
1301 : /*
1302 : * pageout is called by shrink_folio_list() for each dirty folio.
1303 : * Calls ->writepage().
1304 : */
1305 0 : static pageout_t pageout(struct folio *folio, struct address_space *mapping,
1306 : struct swap_iocb **plug)
1307 : {
1308 : /*
1309 : * If the folio is dirty, only perform writeback if that write
1310 : * will be non-blocking. To prevent this allocation from being
1311 : * stalled by pagecache activity. But note that there may be
1312 : * stalls if we need to run get_block(). We could test
1313 : * PagePrivate for that.
1314 : *
1315 : * If this process is currently in __generic_file_write_iter() against
1316 : * this folio's queue, we can perform writeback even if that
1317 : * will block.
1318 : *
1319 : * If the folio is swapcache, write it back even if that would
1320 : * block, for some throttling. This happens by accident, because
1321 : * swap_backing_dev_info is bust: it doesn't reflect the
1322 : * congestion state of the swapdevs. Easy to fix, if needed.
1323 : */
1324 0 : if (!is_page_cache_freeable(folio))
1325 : return PAGE_KEEP;
1326 0 : if (!mapping) {
1327 : /*
1328 : * Some data journaling orphaned folios can have
1329 : * folio->mapping == NULL while being dirty with clean buffers.
1330 : */
1331 0 : if (folio_test_private(folio)) {
1332 0 : if (try_to_free_buffers(folio)) {
1333 0 : folio_clear_dirty(folio);
1334 0 : pr_info("%s: orphaned folio\n", __func__);
1335 0 : return PAGE_CLEAN;
1336 : }
1337 : }
1338 : return PAGE_KEEP;
1339 : }
1340 0 : if (mapping->a_ops->writepage == NULL)
1341 : return PAGE_ACTIVATE;
1342 :
1343 0 : if (folio_clear_dirty_for_io(folio)) {
1344 : int res;
1345 0 : struct writeback_control wbc = {
1346 : .sync_mode = WB_SYNC_NONE,
1347 : .nr_to_write = SWAP_CLUSTER_MAX,
1348 : .range_start = 0,
1349 : .range_end = LLONG_MAX,
1350 : .for_reclaim = 1,
1351 : .swap_plug = plug,
1352 : };
1353 :
1354 0 : folio_set_reclaim(folio);
1355 0 : res = mapping->a_ops->writepage(&folio->page, &wbc);
1356 0 : if (res < 0)
1357 0 : handle_write_error(mapping, folio, res);
1358 0 : if (res == AOP_WRITEPAGE_ACTIVATE) {
1359 0 : folio_clear_reclaim(folio);
1360 0 : return PAGE_ACTIVATE;
1361 : }
1362 :
1363 0 : if (!folio_test_writeback(folio)) {
1364 : /* synchronous write or broken a_ops? */
1365 : folio_clear_reclaim(folio);
1366 : }
1367 0 : trace_mm_vmscan_write_folio(folio);
1368 0 : node_stat_add_folio(folio, NR_VMSCAN_WRITE);
1369 0 : return PAGE_SUCCESS;
1370 : }
1371 :
1372 : return PAGE_CLEAN;
1373 : }
1374 :
1375 : /*
1376 : * Same as remove_mapping, but if the folio is removed from the mapping, it
1377 : * gets returned with a refcount of 0.
1378 : */
1379 0 : static int __remove_mapping(struct address_space *mapping, struct folio *folio,
1380 : bool reclaimed, struct mem_cgroup *target_memcg)
1381 : {
1382 : int refcount;
1383 0 : void *shadow = NULL;
1384 :
1385 0 : BUG_ON(!folio_test_locked(folio));
1386 0 : BUG_ON(mapping != folio_mapping(folio));
1387 :
1388 0 : if (!folio_test_swapcache(folio))
1389 0 : spin_lock(&mapping->host->i_lock);
1390 0 : xa_lock_irq(&mapping->i_pages);
1391 : /*
1392 : * The non racy check for a busy folio.
1393 : *
1394 : * Must be careful with the order of the tests. When someone has
1395 : * a ref to the folio, it may be possible that they dirty it then
1396 : * drop the reference. So if the dirty flag is tested before the
1397 : * refcount here, then the following race may occur:
1398 : *
1399 : * get_user_pages(&page);
1400 : * [user mapping goes away]
1401 : * write_to(page);
1402 : * !folio_test_dirty(folio) [good]
1403 : * folio_set_dirty(folio);
1404 : * folio_put(folio);
1405 : * !refcount(folio) [good, discard it]
1406 : *
1407 : * [oops, our write_to data is lost]
1408 : *
1409 : * Reversing the order of the tests ensures such a situation cannot
1410 : * escape unnoticed. The smp_rmb is needed to ensure the folio->flags
1411 : * load is not satisfied before that of folio->_refcount.
1412 : *
1413 : * Note that if the dirty flag is always set via folio_mark_dirty,
1414 : * and thus under the i_pages lock, then this ordering is not required.
1415 : */
1416 0 : refcount = 1 + folio_nr_pages(folio);
1417 0 : if (!folio_ref_freeze(folio, refcount))
1418 : goto cannot_free;
1419 : /* note: atomic_cmpxchg in folio_ref_freeze provides the smp_rmb */
1420 0 : if (unlikely(folio_test_dirty(folio))) {
1421 : folio_ref_unfreeze(folio, refcount);
1422 : goto cannot_free;
1423 : }
1424 :
1425 0 : if (folio_test_swapcache(folio)) {
1426 0 : swp_entry_t swap = folio_swap_entry(folio);
1427 :
1428 0 : if (reclaimed && !mapping_exiting(mapping))
1429 0 : shadow = workingset_eviction(folio, target_memcg);
1430 0 : __delete_from_swap_cache(folio, swap, shadow);
1431 0 : mem_cgroup_swapout(folio, swap);
1432 0 : xa_unlock_irq(&mapping->i_pages);
1433 0 : put_swap_folio(folio, swap);
1434 : } else {
1435 : void (*free_folio)(struct folio *);
1436 :
1437 0 : free_folio = mapping->a_ops->free_folio;
1438 : /*
1439 : * Remember a shadow entry for reclaimed file cache in
1440 : * order to detect refaults, thus thrashing, later on.
1441 : *
1442 : * But don't store shadows in an address space that is
1443 : * already exiting. This is not just an optimization,
1444 : * inode reclaim needs to empty out the radix tree or
1445 : * the nodes are lost. Don't plant shadows behind its
1446 : * back.
1447 : *
1448 : * We also don't store shadows for DAX mappings because the
1449 : * only page cache folios found in these are zero pages
1450 : * covering holes, and because we don't want to mix DAX
1451 : * exceptional entries and shadow exceptional entries in the
1452 : * same address_space.
1453 : */
1454 0 : if (reclaimed && folio_is_file_lru(folio) &&
1455 0 : !mapping_exiting(mapping) && !dax_mapping(mapping))
1456 0 : shadow = workingset_eviction(folio, target_memcg);
1457 0 : __filemap_remove_folio(folio, shadow);
1458 0 : xa_unlock_irq(&mapping->i_pages);
1459 0 : if (mapping_shrinkable(mapping))
1460 0 : inode_add_lru(mapping->host);
1461 0 : spin_unlock(&mapping->host->i_lock);
1462 :
1463 0 : if (free_folio)
1464 0 : free_folio(folio);
1465 : }
1466 :
1467 : return 1;
1468 :
1469 : cannot_free:
1470 0 : xa_unlock_irq(&mapping->i_pages);
1471 0 : if (!folio_test_swapcache(folio))
1472 0 : spin_unlock(&mapping->host->i_lock);
1473 : return 0;
1474 : }
1475 :
1476 : /**
1477 : * remove_mapping() - Attempt to remove a folio from its mapping.
1478 : * @mapping: The address space.
1479 : * @folio: The folio to remove.
1480 : *
1481 : * If the folio is dirty, under writeback or if someone else has a ref
1482 : * on it, removal will fail.
1483 : * Return: The number of pages removed from the mapping. 0 if the folio
1484 : * could not be removed.
1485 : * Context: The caller should have a single refcount on the folio and
1486 : * hold its lock.
1487 : */
1488 0 : long remove_mapping(struct address_space *mapping, struct folio *folio)
1489 : {
1490 0 : if (__remove_mapping(mapping, folio, false, NULL)) {
1491 : /*
1492 : * Unfreezing the refcount with 1 effectively
1493 : * drops the pagecache ref for us without requiring another
1494 : * atomic operation.
1495 : */
1496 0 : folio_ref_unfreeze(folio, 1);
1497 : return folio_nr_pages(folio);
1498 : }
1499 : return 0;
1500 : }
1501 :
1502 : /**
1503 : * folio_putback_lru - Put previously isolated folio onto appropriate LRU list.
1504 : * @folio: Folio to be returned to an LRU list.
1505 : *
1506 : * Add previously isolated @folio to appropriate LRU list.
1507 : * The folio may still be unevictable for other reasons.
1508 : *
1509 : * Context: lru_lock must not be held, interrupts must be enabled.
1510 : */
1511 0 : void folio_putback_lru(struct folio *folio)
1512 : {
1513 0 : folio_add_lru(folio);
1514 0 : folio_put(folio); /* drop ref from isolate */
1515 0 : }
1516 :
1517 : enum folio_references {
1518 : FOLIOREF_RECLAIM,
1519 : FOLIOREF_RECLAIM_CLEAN,
1520 : FOLIOREF_KEEP,
1521 : FOLIOREF_ACTIVATE,
1522 : };
1523 :
1524 0 : static enum folio_references folio_check_references(struct folio *folio,
1525 : struct scan_control *sc)
1526 : {
1527 : int referenced_ptes, referenced_folio;
1528 : unsigned long vm_flags;
1529 :
1530 0 : referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup,
1531 : &vm_flags);
1532 0 : referenced_folio = folio_test_clear_referenced(folio);
1533 :
1534 : /*
1535 : * The supposedly reclaimable folio was found to be in a VM_LOCKED vma.
1536 : * Let the folio, now marked Mlocked, be moved to the unevictable list.
1537 : */
1538 0 : if (vm_flags & VM_LOCKED)
1539 : return FOLIOREF_ACTIVATE;
1540 :
1541 : /* rmap lock contention: rotate */
1542 0 : if (referenced_ptes == -1)
1543 : return FOLIOREF_KEEP;
1544 :
1545 0 : if (referenced_ptes) {
1546 : /*
1547 : * All mapped folios start out with page table
1548 : * references from the instantiating fault, so we need
1549 : * to look twice if a mapped file/anon folio is used more
1550 : * than once.
1551 : *
1552 : * Mark it and spare it for another trip around the
1553 : * inactive list. Another page table reference will
1554 : * lead to its activation.
1555 : *
1556 : * Note: the mark is set for activated folios as well
1557 : * so that recently deactivated but used folios are
1558 : * quickly recovered.
1559 : */
1560 0 : folio_set_referenced(folio);
1561 :
1562 0 : if (referenced_folio || referenced_ptes > 1)
1563 : return FOLIOREF_ACTIVATE;
1564 :
1565 : /*
1566 : * Activate file-backed executable folios after first usage.
1567 : */
1568 0 : if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio))
1569 : return FOLIOREF_ACTIVATE;
1570 :
1571 : return FOLIOREF_KEEP;
1572 : }
1573 :
1574 : /* Reclaim if clean, defer dirty folios to writeback */
1575 0 : if (referenced_folio && folio_is_file_lru(folio))
1576 : return FOLIOREF_RECLAIM_CLEAN;
1577 :
1578 : return FOLIOREF_RECLAIM;
1579 : }
1580 :
1581 : /* Check if a folio is dirty or under writeback */
1582 0 : static void folio_check_dirty_writeback(struct folio *folio,
1583 : bool *dirty, bool *writeback)
1584 : {
1585 : struct address_space *mapping;
1586 :
1587 : /*
1588 : * Anonymous folios are not handled by flushers and must be written
1589 : * from reclaim context. Do not stall reclaim based on them.
1590 : * MADV_FREE anonymous folios are put into inactive file list too.
1591 : * They could be mistakenly treated as file lru. So further anon
1592 : * test is needed.
1593 : */
1594 0 : if (!folio_is_file_lru(folio) ||
1595 0 : (folio_test_anon(folio) && !folio_test_swapbacked(folio))) {
1596 0 : *dirty = false;
1597 0 : *writeback = false;
1598 0 : return;
1599 : }
1600 :
1601 : /* By default assume that the folio flags are accurate */
1602 0 : *dirty = folio_test_dirty(folio);
1603 0 : *writeback = folio_test_writeback(folio);
1604 :
1605 : /* Verify dirty/writeback state if the filesystem supports it */
1606 0 : if (!folio_test_private(folio))
1607 : return;
1608 :
1609 0 : mapping = folio_mapping(folio);
1610 0 : if (mapping && mapping->a_ops->is_dirty_writeback)
1611 0 : mapping->a_ops->is_dirty_writeback(folio, dirty, writeback);
1612 : }
1613 :
1614 : static struct folio *alloc_demote_folio(struct folio *src,
1615 : unsigned long private)
1616 : {
1617 : struct folio *dst;
1618 : nodemask_t *allowed_mask;
1619 : struct migration_target_control *mtc;
1620 :
1621 : mtc = (struct migration_target_control *)private;
1622 :
1623 : allowed_mask = mtc->nmask;
1624 : /*
1625 : * make sure we allocate from the target node first also trying to
1626 : * demote or reclaim pages from the target node via kswapd if we are
1627 : * low on free memory on target node. If we don't do this and if
1628 : * we have free memory on the slower(lower) memtier, we would start
1629 : * allocating pages from slower(lower) memory tiers without even forcing
1630 : * a demotion of cold pages from the target memtier. This can result
1631 : * in the kernel placing hot pages in slower(lower) memory tiers.
1632 : */
1633 : mtc->nmask = NULL;
1634 : mtc->gfp_mask |= __GFP_THISNODE;
1635 : dst = alloc_migration_target(src, (unsigned long)mtc);
1636 : if (dst)
1637 : return dst;
1638 :
1639 : mtc->gfp_mask &= ~__GFP_THISNODE;
1640 : mtc->nmask = allowed_mask;
1641 :
1642 : return alloc_migration_target(src, (unsigned long)mtc);
1643 : }
1644 :
1645 : /*
1646 : * Take folios on @demote_folios and attempt to demote them to another node.
1647 : * Folios which are not demoted are left on @demote_folios.
1648 : */
1649 : static unsigned int demote_folio_list(struct list_head *demote_folios,
1650 : struct pglist_data *pgdat)
1651 : {
1652 0 : int target_nid = next_demotion_node(pgdat->node_id);
1653 : unsigned int nr_succeeded;
1654 : nodemask_t allowed_mask;
1655 :
1656 0 : struct migration_target_control mtc = {
1657 : /*
1658 : * Allocate from 'node', or fail quickly and quietly.
1659 : * When this happens, 'page' will likely just be discarded
1660 : * instead of migrated.
1661 : */
1662 : .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | __GFP_NOWARN |
1663 : __GFP_NOMEMALLOC | GFP_NOWAIT,
1664 : .nid = target_nid,
1665 : .nmask = &allowed_mask
1666 : };
1667 :
1668 0 : if (list_empty(demote_folios))
1669 : return 0;
1670 :
1671 : if (target_nid == NUMA_NO_NODE)
1672 : return 0;
1673 :
1674 : node_get_allowed_targets(pgdat, &allowed_mask);
1675 :
1676 : /* Demotion ignores all cpuset and mempolicy settings */
1677 : migrate_pages(demote_folios, alloc_demote_folio, NULL,
1678 : (unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION,
1679 : &nr_succeeded);
1680 :
1681 : __count_vm_events(PGDEMOTE_KSWAPD + reclaimer_offset(), nr_succeeded);
1682 :
1683 : return nr_succeeded;
1684 : }
1685 :
1686 0 : static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask)
1687 : {
1688 0 : if (gfp_mask & __GFP_FS)
1689 : return true;
1690 0 : if (!folio_test_swapcache(folio) || !(gfp_mask & __GFP_IO))
1691 : return false;
1692 : /*
1693 : * We can "enter_fs" for swap-cache with only __GFP_IO
1694 : * providing this isn't SWP_FS_OPS.
1695 : * ->flags can be updated non-atomicially (scan_swap_map_slots),
1696 : * but that will never affect SWP_FS_OPS, so the data_race
1697 : * is safe.
1698 : */
1699 0 : return !data_race(folio_swap_flags(folio) & SWP_FS_OPS);
1700 : }
1701 :
1702 : /*
1703 : * shrink_folio_list() returns the number of reclaimed pages
1704 : */
1705 0 : static unsigned int shrink_folio_list(struct list_head *folio_list,
1706 : struct pglist_data *pgdat, struct scan_control *sc,
1707 : struct reclaim_stat *stat, bool ignore_references)
1708 : {
1709 0 : LIST_HEAD(ret_folios);
1710 0 : LIST_HEAD(free_folios);
1711 0 : LIST_HEAD(demote_folios);
1712 0 : unsigned int nr_reclaimed = 0;
1713 0 : unsigned int pgactivate = 0;
1714 : bool do_demote_pass;
1715 0 : struct swap_iocb *plug = NULL;
1716 :
1717 0 : memset(stat, 0, sizeof(*stat));
1718 0 : cond_resched();
1719 0 : do_demote_pass = can_demote(pgdat->node_id, sc);
1720 :
1721 : retry:
1722 0 : while (!list_empty(folio_list)) {
1723 : struct address_space *mapping;
1724 : struct folio *folio;
1725 0 : enum folio_references references = FOLIOREF_RECLAIM;
1726 : bool dirty, writeback;
1727 : unsigned int nr_pages;
1728 :
1729 0 : cond_resched();
1730 :
1731 0 : folio = lru_to_folio(folio_list);
1732 0 : list_del(&folio->lru);
1733 :
1734 0 : if (!folio_trylock(folio))
1735 : goto keep;
1736 :
1737 : VM_BUG_ON_FOLIO(folio_test_active(folio), folio);
1738 :
1739 0 : nr_pages = folio_nr_pages(folio);
1740 :
1741 : /* Account the number of base pages */
1742 0 : sc->nr_scanned += nr_pages;
1743 :
1744 0 : if (unlikely(!folio_evictable(folio)))
1745 : goto activate_locked;
1746 :
1747 0 : if (!sc->may_unmap && folio_mapped(folio))
1748 : goto keep_locked;
1749 :
1750 : /* folio_update_gen() tried to promote this page? */
1751 : if (lru_gen_enabled() && !ignore_references &&
1752 : folio_mapped(folio) && folio_test_referenced(folio))
1753 : goto keep_locked;
1754 :
1755 : /*
1756 : * The number of dirty pages determines if a node is marked
1757 : * reclaim_congested. kswapd will stall and start writing
1758 : * folios if the tail of the LRU is all dirty unqueued folios.
1759 : */
1760 0 : folio_check_dirty_writeback(folio, &dirty, &writeback);
1761 0 : if (dirty || writeback)
1762 0 : stat->nr_dirty += nr_pages;
1763 :
1764 0 : if (dirty && !writeback)
1765 0 : stat->nr_unqueued_dirty += nr_pages;
1766 :
1767 : /*
1768 : * Treat this folio as congested if folios are cycling
1769 : * through the LRU so quickly that the folios marked
1770 : * for immediate reclaim are making it to the end of
1771 : * the LRU a second time.
1772 : */
1773 0 : if (writeback && folio_test_reclaim(folio))
1774 0 : stat->nr_congested += nr_pages;
1775 :
1776 : /*
1777 : * If a folio at the tail of the LRU is under writeback, there
1778 : * are three cases to consider.
1779 : *
1780 : * 1) If reclaim is encountering an excessive number
1781 : * of folios under writeback and this folio has both
1782 : * the writeback and reclaim flags set, then it
1783 : * indicates that folios are being queued for I/O but
1784 : * are being recycled through the LRU before the I/O
1785 : * can complete. Waiting on the folio itself risks an
1786 : * indefinite stall if it is impossible to writeback
1787 : * the folio due to I/O error or disconnected storage
1788 : * so instead note that the LRU is being scanned too
1789 : * quickly and the caller can stall after the folio
1790 : * list has been processed.
1791 : *
1792 : * 2) Global or new memcg reclaim encounters a folio that is
1793 : * not marked for immediate reclaim, or the caller does not
1794 : * have __GFP_FS (or __GFP_IO if it's simply going to swap,
1795 : * not to fs). In this case mark the folio for immediate
1796 : * reclaim and continue scanning.
1797 : *
1798 : * Require may_enter_fs() because we would wait on fs, which
1799 : * may not have submitted I/O yet. And the loop driver might
1800 : * enter reclaim, and deadlock if it waits on a folio for
1801 : * which it is needed to do the write (loop masks off
1802 : * __GFP_IO|__GFP_FS for this reason); but more thought
1803 : * would probably show more reasons.
1804 : *
1805 : * 3) Legacy memcg encounters a folio that already has the
1806 : * reclaim flag set. memcg does not have any dirty folio
1807 : * throttling so we could easily OOM just because too many
1808 : * folios are in writeback and there is nothing else to
1809 : * reclaim. Wait for the writeback to complete.
1810 : *
1811 : * In cases 1) and 2) we activate the folios to get them out of
1812 : * the way while we continue scanning for clean folios on the
1813 : * inactive list and refilling from the active list. The
1814 : * observation here is that waiting for disk writes is more
1815 : * expensive than potentially causing reloads down the line.
1816 : * Since they're marked for immediate reclaim, they won't put
1817 : * memory pressure on the cache working set any longer than it
1818 : * takes to write them to disk.
1819 : */
1820 0 : if (folio_test_writeback(folio)) {
1821 : /* Case 1 above */
1822 0 : if (current_is_kswapd() &&
1823 0 : folio_test_reclaim(folio) &&
1824 0 : test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
1825 0 : stat->nr_immediate += nr_pages;
1826 0 : goto activate_locked;
1827 :
1828 : /* Case 2 above */
1829 0 : } else if (writeback_throttling_sane(sc) ||
1830 : !folio_test_reclaim(folio) ||
1831 : !may_enter_fs(folio, sc->gfp_mask)) {
1832 : /*
1833 : * This is slightly racy -
1834 : * folio_end_writeback() might have
1835 : * just cleared the reclaim flag, then
1836 : * setting the reclaim flag here ends up
1837 : * interpreted as the readahead flag - but
1838 : * that does not matter enough to care.
1839 : * What we do want is for this folio to
1840 : * have the reclaim flag set next time
1841 : * memcg reclaim reaches the tests above,
1842 : * so it will then wait for writeback to
1843 : * avoid OOM; and it's also appropriate
1844 : * in global reclaim.
1845 : */
1846 0 : folio_set_reclaim(folio);
1847 0 : stat->nr_writeback += nr_pages;
1848 0 : goto activate_locked;
1849 :
1850 : /* Case 3 above */
1851 : } else {
1852 : folio_unlock(folio);
1853 : folio_wait_writeback(folio);
1854 : /* then go back and try same folio again */
1855 : list_add_tail(&folio->lru, folio_list);
1856 0 : continue;
1857 : }
1858 : }
1859 :
1860 0 : if (!ignore_references)
1861 0 : references = folio_check_references(folio, sc);
1862 :
1863 0 : switch (references) {
1864 : case FOLIOREF_ACTIVATE:
1865 : goto activate_locked;
1866 : case FOLIOREF_KEEP:
1867 0 : stat->nr_ref_keep += nr_pages;
1868 0 : goto keep_locked;
1869 : case FOLIOREF_RECLAIM:
1870 : case FOLIOREF_RECLAIM_CLEAN:
1871 : ; /* try to reclaim the folio below */
1872 : }
1873 :
1874 : /*
1875 : * Before reclaiming the folio, try to relocate
1876 : * its contents to another node.
1877 : */
1878 : if (do_demote_pass &&
1879 : (thp_migration_supported() || !folio_test_large(folio))) {
1880 : list_add(&folio->lru, &demote_folios);
1881 : folio_unlock(folio);
1882 : continue;
1883 : }
1884 :
1885 : /*
1886 : * Anonymous process memory has backing store?
1887 : * Try to allocate it some swap space here.
1888 : * Lazyfree folio could be freed directly
1889 : */
1890 0 : if (folio_test_anon(folio) && folio_test_swapbacked(folio)) {
1891 0 : if (!folio_test_swapcache(folio)) {
1892 0 : if (!(sc->gfp_mask & __GFP_IO))
1893 : goto keep_locked;
1894 0 : if (folio_maybe_dma_pinned(folio))
1895 : goto keep_locked;
1896 0 : if (folio_test_large(folio)) {
1897 : /* cannot split folio, skip it */
1898 : if (!can_split_folio(folio, NULL))
1899 : goto activate_locked;
1900 : /*
1901 : * Split folios without a PMD map right
1902 : * away. Chances are some or all of the
1903 : * tail pages can be freed without IO.
1904 : */
1905 : if (!folio_entire_mapcount(folio) &&
1906 : split_folio_to_list(folio,
1907 : folio_list))
1908 : goto activate_locked;
1909 : }
1910 0 : if (!add_to_swap(folio)) {
1911 0 : if (!folio_test_large(folio))
1912 : goto activate_locked_split;
1913 : /* Fallback to swap normal pages */
1914 0 : if (split_folio_to_list(folio,
1915 : folio_list))
1916 : goto activate_locked;
1917 : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1918 : count_vm_event(THP_SWPOUT_FALLBACK);
1919 : #endif
1920 0 : if (!add_to_swap(folio))
1921 : goto activate_locked_split;
1922 : }
1923 : }
1924 0 : } else if (folio_test_swapbacked(folio) &&
1925 0 : folio_test_large(folio)) {
1926 : /* Split shmem folio */
1927 : if (split_folio_to_list(folio, folio_list))
1928 : goto keep_locked;
1929 : }
1930 :
1931 : /*
1932 : * If the folio was split above, the tail pages will make
1933 : * their own pass through this function and be accounted
1934 : * then.
1935 : */
1936 0 : if ((nr_pages > 1) && !folio_test_large(folio)) {
1937 0 : sc->nr_scanned -= (nr_pages - 1);
1938 0 : nr_pages = 1;
1939 : }
1940 :
1941 : /*
1942 : * The folio is mapped into the page tables of one or more
1943 : * processes. Try to unmap it here.
1944 : */
1945 0 : if (folio_mapped(folio)) {
1946 0 : enum ttu_flags flags = TTU_BATCH_FLUSH;
1947 0 : bool was_swapbacked = folio_test_swapbacked(folio);
1948 :
1949 : if (folio_test_pmd_mappable(folio))
1950 : flags |= TTU_SPLIT_HUGE_PMD;
1951 :
1952 0 : try_to_unmap(folio, flags);
1953 0 : if (folio_mapped(folio)) {
1954 0 : stat->nr_unmap_fail += nr_pages;
1955 0 : if (!was_swapbacked &&
1956 0 : folio_test_swapbacked(folio))
1957 0 : stat->nr_lazyfree_fail += nr_pages;
1958 : goto activate_locked;
1959 : }
1960 : }
1961 :
1962 : /*
1963 : * Folio is unmapped now so it cannot be newly pinned anymore.
1964 : * No point in trying to reclaim folio if it is pinned.
1965 : * Furthermore we don't want to reclaim underlying fs metadata
1966 : * if the folio is pinned and thus potentially modified by the
1967 : * pinning process as that may upset the filesystem.
1968 : */
1969 0 : if (folio_maybe_dma_pinned(folio))
1970 : goto activate_locked;
1971 :
1972 0 : mapping = folio_mapping(folio);
1973 0 : if (folio_test_dirty(folio)) {
1974 : /*
1975 : * Only kswapd can writeback filesystem folios
1976 : * to avoid risk of stack overflow. But avoid
1977 : * injecting inefficient single-folio I/O into
1978 : * flusher writeback as much as possible: only
1979 : * write folios when we've encountered many
1980 : * dirty folios, and when we've already scanned
1981 : * the rest of the LRU for clean folios and see
1982 : * the same dirty folios again (with the reclaim
1983 : * flag set).
1984 : */
1985 0 : if (folio_is_file_lru(folio) &&
1986 0 : (!current_is_kswapd() ||
1987 0 : !folio_test_reclaim(folio) ||
1988 0 : !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
1989 : /*
1990 : * Immediately reclaim when written back.
1991 : * Similar in principle to folio_deactivate()
1992 : * except we already have the folio isolated
1993 : * and know it's dirty
1994 : */
1995 0 : node_stat_mod_folio(folio, NR_VMSCAN_IMMEDIATE,
1996 : nr_pages);
1997 : folio_set_reclaim(folio);
1998 :
1999 : goto activate_locked;
2000 : }
2001 :
2002 0 : if (references == FOLIOREF_RECLAIM_CLEAN)
2003 : goto keep_locked;
2004 0 : if (!may_enter_fs(folio, sc->gfp_mask))
2005 : goto keep_locked;
2006 0 : if (!sc->may_writepage)
2007 : goto keep_locked;
2008 :
2009 : /*
2010 : * Folio is dirty. Flush the TLB if a writable entry
2011 : * potentially exists to avoid CPU writes after I/O
2012 : * starts and then write it out here.
2013 : */
2014 : try_to_unmap_flush_dirty();
2015 0 : switch (pageout(folio, mapping, &plug)) {
2016 : case PAGE_KEEP:
2017 : goto keep_locked;
2018 : case PAGE_ACTIVATE:
2019 : goto activate_locked;
2020 : case PAGE_SUCCESS:
2021 0 : stat->nr_pageout += nr_pages;
2022 :
2023 0 : if (folio_test_writeback(folio))
2024 : goto keep;
2025 0 : if (folio_test_dirty(folio))
2026 : goto keep;
2027 :
2028 : /*
2029 : * A synchronous write - probably a ramdisk. Go
2030 : * ahead and try to reclaim the folio.
2031 : */
2032 0 : if (!folio_trylock(folio))
2033 : goto keep;
2034 0 : if (folio_test_dirty(folio) ||
2035 0 : folio_test_writeback(folio))
2036 : goto keep_locked;
2037 0 : mapping = folio_mapping(folio);
2038 : fallthrough;
2039 : case PAGE_CLEAN:
2040 : ; /* try to free the folio below */
2041 : }
2042 : }
2043 :
2044 : /*
2045 : * If the folio has buffers, try to free the buffer
2046 : * mappings associated with this folio. If we succeed
2047 : * we try to free the folio as well.
2048 : *
2049 : * We do this even if the folio is dirty.
2050 : * filemap_release_folio() does not perform I/O, but it
2051 : * is possible for a folio to have the dirty flag set,
2052 : * but it is actually clean (all its buffers are clean).
2053 : * This happens if the buffers were written out directly,
2054 : * with submit_bh(). ext3 will do this, as well as
2055 : * the blockdev mapping. filemap_release_folio() will
2056 : * discover that cleanness and will drop the buffers
2057 : * and mark the folio clean - it can be freed.
2058 : *
2059 : * Rarely, folios can have buffers and no ->mapping.
2060 : * These are the folios which were not successfully
2061 : * invalidated in truncate_cleanup_folio(). We try to
2062 : * drop those buffers here and if that worked, and the
2063 : * folio is no longer mapped into process address space
2064 : * (refcount == 1) it can be freed. Otherwise, leave
2065 : * the folio on the LRU so it is swappable.
2066 : */
2067 0 : if (folio_has_private(folio)) {
2068 0 : if (!filemap_release_folio(folio, sc->gfp_mask))
2069 : goto activate_locked;
2070 0 : if (!mapping && folio_ref_count(folio) == 1) {
2071 0 : folio_unlock(folio);
2072 0 : if (folio_put_testzero(folio))
2073 : goto free_it;
2074 : else {
2075 : /*
2076 : * rare race with speculative reference.
2077 : * the speculative reference will free
2078 : * this folio shortly, so we may
2079 : * increment nr_reclaimed here (and
2080 : * leave it off the LRU).
2081 : */
2082 0 : nr_reclaimed += nr_pages;
2083 0 : continue;
2084 : }
2085 : }
2086 : }
2087 :
2088 0 : if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) {
2089 : /* follow __remove_mapping for reference */
2090 0 : if (!folio_ref_freeze(folio, 1))
2091 : goto keep_locked;
2092 : /*
2093 : * The folio has only one reference left, which is
2094 : * from the isolation. After the caller puts the
2095 : * folio back on the lru and drops the reference, the
2096 : * folio will be freed anyway. It doesn't matter
2097 : * which lru it goes on. So we don't bother checking
2098 : * the dirty flag here.
2099 : */
2100 0 : count_vm_events(PGLAZYFREED, nr_pages);
2101 0 : count_memcg_folio_events(folio, PGLAZYFREED, nr_pages);
2102 0 : } else if (!mapping || !__remove_mapping(mapping, folio, true,
2103 : sc->target_mem_cgroup))
2104 : goto keep_locked;
2105 :
2106 0 : folio_unlock(folio);
2107 : free_it:
2108 : /*
2109 : * Folio may get swapped out as a whole, need to account
2110 : * all pages in it.
2111 : */
2112 0 : nr_reclaimed += nr_pages;
2113 :
2114 : /*
2115 : * Is there need to periodically free_folio_list? It would
2116 : * appear not as the counts should be low
2117 : */
2118 0 : if (unlikely(folio_test_large(folio)))
2119 0 : destroy_large_folio(folio);
2120 : else
2121 0 : list_add(&folio->lru, &free_folios);
2122 0 : continue;
2123 :
2124 : activate_locked_split:
2125 : /*
2126 : * The tail pages that are failed to add into swap cache
2127 : * reach here. Fixup nr_scanned and nr_pages.
2128 : */
2129 0 : if (nr_pages > 1) {
2130 0 : sc->nr_scanned -= (nr_pages - 1);
2131 0 : nr_pages = 1;
2132 : }
2133 : activate_locked:
2134 : /* Not a candidate for swapping, so reclaim swap space. */
2135 0 : if (folio_test_swapcache(folio) &&
2136 0 : (mem_cgroup_swap_full(folio) || folio_test_mlocked(folio)))
2137 0 : folio_free_swap(folio);
2138 : VM_BUG_ON_FOLIO(folio_test_active(folio), folio);
2139 0 : if (!folio_test_mlocked(folio)) {
2140 0 : int type = folio_is_file_lru(folio);
2141 0 : folio_set_active(folio);
2142 0 : stat->nr_activate[type] += nr_pages;
2143 0 : count_memcg_folio_events(folio, PGACTIVATE, nr_pages);
2144 : }
2145 : keep_locked:
2146 0 : folio_unlock(folio);
2147 : keep:
2148 0 : list_add(&folio->lru, &ret_folios);
2149 : VM_BUG_ON_FOLIO(folio_test_lru(folio) ||
2150 : folio_test_unevictable(folio), folio);
2151 : }
2152 : /* 'folio_list' is always empty here */
2153 :
2154 : /* Migrate folios selected for demotion */
2155 0 : nr_reclaimed += demote_folio_list(&demote_folios, pgdat);
2156 : /* Folios that could not be demoted are still in @demote_folios */
2157 0 : if (!list_empty(&demote_folios)) {
2158 : /* Folios which weren't demoted go back on @folio_list */
2159 0 : list_splice_init(&demote_folios, folio_list);
2160 :
2161 : /*
2162 : * goto retry to reclaim the undemoted folios in folio_list if
2163 : * desired.
2164 : *
2165 : * Reclaiming directly from top tier nodes is not often desired
2166 : * due to it breaking the LRU ordering: in general memory
2167 : * should be reclaimed from lower tier nodes and demoted from
2168 : * top tier nodes.
2169 : *
2170 : * However, disabling reclaim from top tier nodes entirely
2171 : * would cause ooms in edge scenarios where lower tier memory
2172 : * is unreclaimable for whatever reason, eg memory being
2173 : * mlocked or too hot to reclaim. We can disable reclaim
2174 : * from top tier nodes in proactive reclaim though as that is
2175 : * not real memory pressure.
2176 : */
2177 0 : if (!sc->proactive) {
2178 : do_demote_pass = false;
2179 : goto retry;
2180 : }
2181 : }
2182 :
2183 0 : pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
2184 :
2185 0 : mem_cgroup_uncharge_list(&free_folios);
2186 : try_to_unmap_flush();
2187 0 : free_unref_page_list(&free_folios);
2188 :
2189 0 : list_splice(&ret_folios, folio_list);
2190 0 : count_vm_events(PGACTIVATE, pgactivate);
2191 :
2192 0 : if (plug)
2193 0 : swap_write_unplug(plug);
2194 0 : return nr_reclaimed;
2195 : }
2196 :
2197 0 : unsigned int reclaim_clean_pages_from_list(struct zone *zone,
2198 : struct list_head *folio_list)
2199 : {
2200 0 : struct scan_control sc = {
2201 : .gfp_mask = GFP_KERNEL,
2202 : .may_unmap = 1,
2203 : };
2204 : struct reclaim_stat stat;
2205 : unsigned int nr_reclaimed;
2206 : struct folio *folio, *next;
2207 0 : LIST_HEAD(clean_folios);
2208 : unsigned int noreclaim_flag;
2209 :
2210 0 : list_for_each_entry_safe(folio, next, folio_list, lru) {
2211 0 : if (!folio_test_hugetlb(folio) && folio_is_file_lru(folio) &&
2212 0 : !folio_test_dirty(folio) && !__folio_test_movable(folio) &&
2213 0 : !folio_test_unevictable(folio)) {
2214 0 : folio_clear_active(folio);
2215 0 : list_move(&folio->lru, &clean_folios);
2216 : }
2217 : }
2218 :
2219 : /*
2220 : * We should be safe here since we are only dealing with file pages and
2221 : * we are not kswapd and therefore cannot write dirty file pages. But
2222 : * call memalloc_noreclaim_save() anyway, just in case these conditions
2223 : * change in the future.
2224 : */
2225 0 : noreclaim_flag = memalloc_noreclaim_save();
2226 0 : nr_reclaimed = shrink_folio_list(&clean_folios, zone->zone_pgdat, &sc,
2227 : &stat, true);
2228 0 : memalloc_noreclaim_restore(noreclaim_flag);
2229 :
2230 0 : list_splice(&clean_folios, folio_list);
2231 0 : mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
2232 : -(long)nr_reclaimed);
2233 : /*
2234 : * Since lazyfree pages are isolated from file LRU from the beginning,
2235 : * they will rotate back to anonymous LRU in the end if it failed to
2236 : * discard so isolated count will be mismatched.
2237 : * Compensate the isolated count for both LRU lists.
2238 : */
2239 0 : mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON,
2240 0 : stat.nr_lazyfree_fail);
2241 0 : mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
2242 0 : -(long)stat.nr_lazyfree_fail);
2243 0 : return nr_reclaimed;
2244 : }
2245 :
2246 : /*
2247 : * Update LRU sizes after isolating pages. The LRU size updates must
2248 : * be complete before mem_cgroup_update_lru_size due to a sanity check.
2249 : */
2250 : static __always_inline void update_lru_sizes(struct lruvec *lruvec,
2251 : enum lru_list lru, unsigned long *nr_zone_taken)
2252 : {
2253 : int zid;
2254 :
2255 0 : for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2256 0 : if (!nr_zone_taken[zid])
2257 0 : continue;
2258 :
2259 0 : update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
2260 : }
2261 :
2262 : }
2263 :
2264 : #ifdef CONFIG_CMA
2265 : /*
2266 : * It is waste of effort to scan and reclaim CMA pages if it is not available
2267 : * for current allocation context. Kswapd can not be enrolled as it can not
2268 : * distinguish this scenario by using sc->gfp_mask = GFP_KERNEL
2269 : */
2270 : static bool skip_cma(struct folio *folio, struct scan_control *sc)
2271 : {
2272 : return !current_is_kswapd() &&
2273 : gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
2274 : get_pageblock_migratetype(&folio->page) == MIGRATE_CMA;
2275 : }
2276 : #else
2277 : static bool skip_cma(struct folio *folio, struct scan_control *sc)
2278 : {
2279 : return false;
2280 : }
2281 : #endif
2282 :
2283 : /*
2284 : * Isolating page from the lruvec to fill in @dst list by nr_to_scan times.
2285 : *
2286 : * lruvec->lru_lock is heavily contended. Some of the functions that
2287 : * shrink the lists perform better by taking out a batch of pages
2288 : * and working on them outside the LRU lock.
2289 : *
2290 : * For pagecache intensive workloads, this function is the hottest
2291 : * spot in the kernel (apart from copy_*_user functions).
2292 : *
2293 : * Lru_lock must be held before calling this function.
2294 : *
2295 : * @nr_to_scan: The number of eligible pages to look through on the list.
2296 : * @lruvec: The LRU vector to pull pages from.
2297 : * @dst: The temp list to put pages on to.
2298 : * @nr_scanned: The number of pages that were scanned.
2299 : * @sc: The scan_control struct for this reclaim session
2300 : * @lru: LRU list id for isolating
2301 : *
2302 : * returns how many pages were moved onto *@dst.
2303 : */
2304 0 : static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
2305 : struct lruvec *lruvec, struct list_head *dst,
2306 : unsigned long *nr_scanned, struct scan_control *sc,
2307 : enum lru_list lru)
2308 : {
2309 0 : struct list_head *src = &lruvec->lists[lru];
2310 0 : unsigned long nr_taken = 0;
2311 0 : unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
2312 0 : unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
2313 0 : unsigned long skipped = 0;
2314 : unsigned long scan, total_scan, nr_pages;
2315 0 : LIST_HEAD(folios_skipped);
2316 :
2317 0 : total_scan = 0;
2318 0 : scan = 0;
2319 0 : while (scan < nr_to_scan && !list_empty(src)) {
2320 0 : struct list_head *move_to = src;
2321 : struct folio *folio;
2322 :
2323 0 : folio = lru_to_folio(src);
2324 : prefetchw_prev_lru_folio(folio, src, flags);
2325 :
2326 0 : nr_pages = folio_nr_pages(folio);
2327 0 : total_scan += nr_pages;
2328 :
2329 0 : if (folio_zonenum(folio) > sc->reclaim_idx ||
2330 0 : skip_cma(folio, sc)) {
2331 0 : nr_skipped[folio_zonenum(folio)] += nr_pages;
2332 0 : move_to = &folios_skipped;
2333 0 : goto move;
2334 : }
2335 :
2336 : /*
2337 : * Do not count skipped folios because that makes the function
2338 : * return with no isolated folios if the LRU mostly contains
2339 : * ineligible folios. This causes the VM to not reclaim any
2340 : * folios, triggering a premature OOM.
2341 : * Account all pages in a folio.
2342 : */
2343 0 : scan += nr_pages;
2344 :
2345 0 : if (!folio_test_lru(folio))
2346 : goto move;
2347 0 : if (!sc->may_unmap && folio_mapped(folio))
2348 : goto move;
2349 :
2350 : /*
2351 : * Be careful not to clear the lru flag until after we're
2352 : * sure the folio is not being freed elsewhere -- the
2353 : * folio release code relies on it.
2354 : */
2355 0 : if (unlikely(!folio_try_get(folio)))
2356 : goto move;
2357 :
2358 0 : if (!folio_test_clear_lru(folio)) {
2359 : /* Another thread is already isolating this folio */
2360 : folio_put(folio);
2361 : goto move;
2362 : }
2363 :
2364 0 : nr_taken += nr_pages;
2365 0 : nr_zone_taken[folio_zonenum(folio)] += nr_pages;
2366 0 : move_to = dst;
2367 : move:
2368 0 : list_move(&folio->lru, move_to);
2369 : }
2370 :
2371 : /*
2372 : * Splice any skipped folios to the start of the LRU list. Note that
2373 : * this disrupts the LRU order when reclaiming for lower zones but
2374 : * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
2375 : * scanning would soon rescan the same folios to skip and waste lots
2376 : * of cpu cycles.
2377 : */
2378 0 : if (!list_empty(&folios_skipped)) {
2379 : int zid;
2380 :
2381 : list_splice(&folios_skipped, src);
2382 0 : for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2383 0 : if (!nr_skipped[zid])
2384 0 : continue;
2385 :
2386 0 : __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
2387 0 : skipped += nr_skipped[zid];
2388 : }
2389 : }
2390 0 : *nr_scanned = total_scan;
2391 0 : trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
2392 : total_scan, skipped, nr_taken,
2393 : sc->may_unmap ? 0 : ISOLATE_UNMAPPED, lru);
2394 0 : update_lru_sizes(lruvec, lru, nr_zone_taken);
2395 0 : return nr_taken;
2396 : }
2397 :
2398 : /**
2399 : * folio_isolate_lru() - Try to isolate a folio from its LRU list.
2400 : * @folio: Folio to isolate from its LRU list.
2401 : *
2402 : * Isolate a @folio from an LRU list and adjust the vmstat statistic
2403 : * corresponding to whatever LRU list the folio was on.
2404 : *
2405 : * The folio will have its LRU flag cleared. If it was found on the
2406 : * active list, it will have the Active flag set. If it was found on the
2407 : * unevictable list, it will have the Unevictable flag set. These flags
2408 : * may need to be cleared by the caller before letting the page go.
2409 : *
2410 : * Context:
2411 : *
2412 : * (1) Must be called with an elevated refcount on the folio. This is a
2413 : * fundamental difference from isolate_lru_folios() (which is called
2414 : * without a stable reference).
2415 : * (2) The lru_lock must not be held.
2416 : * (3) Interrupts must be enabled.
2417 : *
2418 : * Return: true if the folio was removed from an LRU list.
2419 : * false if the folio was not on an LRU list.
2420 : */
2421 0 : bool folio_isolate_lru(struct folio *folio)
2422 : {
2423 0 : bool ret = false;
2424 :
2425 : VM_BUG_ON_FOLIO(!folio_ref_count(folio), folio);
2426 :
2427 0 : if (folio_test_clear_lru(folio)) {
2428 : struct lruvec *lruvec;
2429 :
2430 0 : folio_get(folio);
2431 0 : lruvec = folio_lruvec_lock_irq(folio);
2432 0 : lruvec_del_folio(lruvec, folio);
2433 : unlock_page_lruvec_irq(lruvec);
2434 0 : ret = true;
2435 : }
2436 :
2437 0 : return ret;
2438 : }
2439 :
2440 : /*
2441 : * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
2442 : * then get rescheduled. When there are massive number of tasks doing page
2443 : * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
2444 : * the LRU list will go small and be scanned faster than necessary, leading to
2445 : * unnecessary swapping, thrashing and OOM.
2446 : */
2447 0 : static int too_many_isolated(struct pglist_data *pgdat, int file,
2448 : struct scan_control *sc)
2449 : {
2450 : unsigned long inactive, isolated;
2451 : bool too_many;
2452 :
2453 0 : if (current_is_kswapd())
2454 : return 0;
2455 :
2456 0 : if (!writeback_throttling_sane(sc))
2457 : return 0;
2458 :
2459 0 : if (file) {
2460 0 : inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
2461 0 : isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
2462 : } else {
2463 0 : inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
2464 0 : isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
2465 : }
2466 :
2467 : /*
2468 : * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
2469 : * won't get blocked by normal direct-reclaimers, forming a circular
2470 : * deadlock.
2471 : */
2472 0 : if (gfp_has_io_fs(sc->gfp_mask))
2473 0 : inactive >>= 3;
2474 :
2475 0 : too_many = isolated > inactive;
2476 :
2477 : /* Wake up tasks throttled due to too_many_isolated. */
2478 0 : if (!too_many)
2479 : wake_throttle_isolated(pgdat);
2480 :
2481 0 : return too_many;
2482 : }
2483 :
2484 : /*
2485 : * move_folios_to_lru() moves folios from private @list to appropriate LRU list.
2486 : * On return, @list is reused as a list of folios to be freed by the caller.
2487 : *
2488 : * Returns the number of pages moved to the given lruvec.
2489 : */
2490 0 : static unsigned int move_folios_to_lru(struct lruvec *lruvec,
2491 : struct list_head *list)
2492 : {
2493 0 : int nr_pages, nr_moved = 0;
2494 0 : LIST_HEAD(folios_to_free);
2495 :
2496 0 : while (!list_empty(list)) {
2497 0 : struct folio *folio = lru_to_folio(list);
2498 :
2499 : VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
2500 0 : list_del(&folio->lru);
2501 0 : if (unlikely(!folio_evictable(folio))) {
2502 0 : spin_unlock_irq(&lruvec->lru_lock);
2503 0 : folio_putback_lru(folio);
2504 0 : spin_lock_irq(&lruvec->lru_lock);
2505 0 : continue;
2506 : }
2507 :
2508 : /*
2509 : * The folio_set_lru needs to be kept here for list integrity.
2510 : * Otherwise:
2511 : * #0 move_folios_to_lru #1 release_pages
2512 : * if (!folio_put_testzero())
2513 : * if (folio_put_testzero())
2514 : * !lru //skip lru_lock
2515 : * folio_set_lru()
2516 : * list_add(&folio->lru,)
2517 : * list_add(&folio->lru,)
2518 : */
2519 0 : folio_set_lru(folio);
2520 :
2521 0 : if (unlikely(folio_put_testzero(folio))) {
2522 0 : __folio_clear_lru_flags(folio);
2523 :
2524 0 : if (unlikely(folio_test_large(folio))) {
2525 0 : spin_unlock_irq(&lruvec->lru_lock);
2526 0 : destroy_large_folio(folio);
2527 0 : spin_lock_irq(&lruvec->lru_lock);
2528 : } else
2529 0 : list_add(&folio->lru, &folios_to_free);
2530 :
2531 0 : continue;
2532 : }
2533 :
2534 : /*
2535 : * All pages were isolated from the same lruvec (and isolation
2536 : * inhibits memcg migration).
2537 : */
2538 : VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
2539 0 : lruvec_add_folio(lruvec, folio);
2540 0 : nr_pages = folio_nr_pages(folio);
2541 0 : nr_moved += nr_pages;
2542 0 : if (folio_test_active(folio))
2543 0 : workingset_age_nonresident(lruvec, nr_pages);
2544 : }
2545 :
2546 : /*
2547 : * To save our caller's stack, now use input list for pages to free.
2548 : */
2549 0 : list_splice(&folios_to_free, list);
2550 :
2551 0 : return nr_moved;
2552 : }
2553 :
2554 : /*
2555 : * If a kernel thread (such as nfsd for loop-back mounts) services a backing
2556 : * device by writing to the page cache it sets PF_LOCAL_THROTTLE. In this case
2557 : * we should not throttle. Otherwise it is safe to do so.
2558 : */
2559 : static int current_may_throttle(void)
2560 : {
2561 0 : return !(current->flags & PF_LOCAL_THROTTLE);
2562 : }
2563 :
2564 : /*
2565 : * shrink_inactive_list() is a helper for shrink_node(). It returns the number
2566 : * of reclaimed pages
2567 : */
2568 0 : static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
2569 : struct lruvec *lruvec, struct scan_control *sc,
2570 : enum lru_list lru)
2571 : {
2572 0 : LIST_HEAD(folio_list);
2573 : unsigned long nr_scanned;
2574 0 : unsigned int nr_reclaimed = 0;
2575 : unsigned long nr_taken;
2576 : struct reclaim_stat stat;
2577 0 : bool file = is_file_lru(lru);
2578 : enum vm_event_item item;
2579 0 : struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2580 0 : bool stalled = false;
2581 :
2582 0 : while (unlikely(too_many_isolated(pgdat, file, sc))) {
2583 0 : if (stalled)
2584 : return 0;
2585 :
2586 : /* wait a bit for the reclaimer. */
2587 0 : stalled = true;
2588 0 : reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED);
2589 :
2590 : /* We are about to die and free our memory. Return now. */
2591 0 : if (fatal_signal_pending(current))
2592 : return SWAP_CLUSTER_MAX;
2593 : }
2594 :
2595 0 : lru_add_drain();
2596 :
2597 0 : spin_lock_irq(&lruvec->lru_lock);
2598 :
2599 0 : nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &folio_list,
2600 : &nr_scanned, sc, lru);
2601 :
2602 0 : __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
2603 0 : item = PGSCAN_KSWAPD + reclaimer_offset();
2604 0 : if (!cgroup_reclaim(sc))
2605 0 : __count_vm_events(item, nr_scanned);
2606 0 : __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
2607 0 : __count_vm_events(PGSCAN_ANON + file, nr_scanned);
2608 :
2609 0 : spin_unlock_irq(&lruvec->lru_lock);
2610 :
2611 0 : if (nr_taken == 0)
2612 : return 0;
2613 :
2614 0 : nr_reclaimed = shrink_folio_list(&folio_list, pgdat, sc, &stat, false);
2615 :
2616 0 : spin_lock_irq(&lruvec->lru_lock);
2617 0 : move_folios_to_lru(lruvec, &folio_list);
2618 :
2619 0 : __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2620 0 : item = PGSTEAL_KSWAPD + reclaimer_offset();
2621 0 : if (!cgroup_reclaim(sc))
2622 0 : __count_vm_events(item, nr_reclaimed);
2623 0 : __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
2624 0 : __count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
2625 0 : spin_unlock_irq(&lruvec->lru_lock);
2626 :
2627 0 : lru_note_cost(lruvec, file, stat.nr_pageout, nr_scanned - nr_reclaimed);
2628 0 : mem_cgroup_uncharge_list(&folio_list);
2629 0 : free_unref_page_list(&folio_list);
2630 :
2631 : /*
2632 : * If dirty folios are scanned that are not queued for IO, it
2633 : * implies that flushers are not doing their job. This can
2634 : * happen when memory pressure pushes dirty folios to the end of
2635 : * the LRU before the dirty limits are breached and the dirty
2636 : * data has expired. It can also happen when the proportion of
2637 : * dirty folios grows not through writes but through memory
2638 : * pressure reclaiming all the clean cache. And in some cases,
2639 : * the flushers simply cannot keep up with the allocation
2640 : * rate. Nudge the flusher threads in case they are asleep.
2641 : */
2642 0 : if (stat.nr_unqueued_dirty == nr_taken) {
2643 0 : wakeup_flusher_threads(WB_REASON_VMSCAN);
2644 : /*
2645 : * For cgroupv1 dirty throttling is achieved by waking up
2646 : * the kernel flusher here and later waiting on folios
2647 : * which are in writeback to finish (see shrink_folio_list()).
2648 : *
2649 : * Flusher may not be able to issue writeback quickly
2650 : * enough for cgroupv1 writeback throttling to work
2651 : * on a large system.
2652 : */
2653 0 : if (!writeback_throttling_sane(sc))
2654 : reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
2655 : }
2656 :
2657 0 : sc->nr.dirty += stat.nr_dirty;
2658 0 : sc->nr.congested += stat.nr_congested;
2659 0 : sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
2660 0 : sc->nr.writeback += stat.nr_writeback;
2661 0 : sc->nr.immediate += stat.nr_immediate;
2662 0 : sc->nr.taken += nr_taken;
2663 0 : if (file)
2664 0 : sc->nr.file_taken += nr_taken;
2665 :
2666 0 : trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
2667 0 : nr_scanned, nr_reclaimed, &stat, sc->priority, file);
2668 0 : return nr_reclaimed;
2669 : }
2670 :
2671 : /*
2672 : * shrink_active_list() moves folios from the active LRU to the inactive LRU.
2673 : *
2674 : * We move them the other way if the folio is referenced by one or more
2675 : * processes.
2676 : *
2677 : * If the folios are mostly unmapped, the processing is fast and it is
2678 : * appropriate to hold lru_lock across the whole operation. But if
2679 : * the folios are mapped, the processing is slow (folio_referenced()), so
2680 : * we should drop lru_lock around each folio. It's impossible to balance
2681 : * this, so instead we remove the folios from the LRU while processing them.
2682 : * It is safe to rely on the active flag against the non-LRU folios in here
2683 : * because nobody will play with that bit on a non-LRU folio.
2684 : *
2685 : * The downside is that we have to touch folio->_refcount against each folio.
2686 : * But we had to alter folio->flags anyway.
2687 : */
2688 0 : static void shrink_active_list(unsigned long nr_to_scan,
2689 : struct lruvec *lruvec,
2690 : struct scan_control *sc,
2691 : enum lru_list lru)
2692 : {
2693 : unsigned long nr_taken;
2694 : unsigned long nr_scanned;
2695 : unsigned long vm_flags;
2696 0 : LIST_HEAD(l_hold); /* The folios which were snipped off */
2697 0 : LIST_HEAD(l_active);
2698 0 : LIST_HEAD(l_inactive);
2699 : unsigned nr_deactivate, nr_activate;
2700 0 : unsigned nr_rotated = 0;
2701 0 : int file = is_file_lru(lru);
2702 0 : struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2703 :
2704 0 : lru_add_drain();
2705 :
2706 0 : spin_lock_irq(&lruvec->lru_lock);
2707 :
2708 0 : nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &l_hold,
2709 : &nr_scanned, sc, lru);
2710 :
2711 0 : __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
2712 :
2713 0 : if (!cgroup_reclaim(sc))
2714 0 : __count_vm_events(PGREFILL, nr_scanned);
2715 0 : __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
2716 :
2717 0 : spin_unlock_irq(&lruvec->lru_lock);
2718 :
2719 0 : while (!list_empty(&l_hold)) {
2720 : struct folio *folio;
2721 :
2722 0 : cond_resched();
2723 0 : folio = lru_to_folio(&l_hold);
2724 0 : list_del(&folio->lru);
2725 :
2726 0 : if (unlikely(!folio_evictable(folio))) {
2727 0 : folio_putback_lru(folio);
2728 0 : continue;
2729 : }
2730 :
2731 0 : if (unlikely(buffer_heads_over_limit)) {
2732 0 : if (folio_test_private(folio) && folio_trylock(folio)) {
2733 0 : if (folio_test_private(folio))
2734 0 : filemap_release_folio(folio, 0);
2735 0 : folio_unlock(folio);
2736 : }
2737 : }
2738 :
2739 : /* Referenced or rmap lock contention: rotate */
2740 0 : if (folio_referenced(folio, 0, sc->target_mem_cgroup,
2741 : &vm_flags) != 0) {
2742 : /*
2743 : * Identify referenced, file-backed active folios and
2744 : * give them one more trip around the active list. So
2745 : * that executable code get better chances to stay in
2746 : * memory under moderate memory pressure. Anon folios
2747 : * are not likely to be evicted by use-once streaming
2748 : * IO, plus JVM can create lots of anon VM_EXEC folios,
2749 : * so we ignore them here.
2750 : */
2751 0 : if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) {
2752 0 : nr_rotated += folio_nr_pages(folio);
2753 0 : list_add(&folio->lru, &l_active);
2754 0 : continue;
2755 : }
2756 : }
2757 :
2758 0 : folio_clear_active(folio); /* we are de-activating */
2759 0 : folio_set_workingset(folio);
2760 0 : list_add(&folio->lru, &l_inactive);
2761 : }
2762 :
2763 : /*
2764 : * Move folios back to the lru list.
2765 : */
2766 0 : spin_lock_irq(&lruvec->lru_lock);
2767 :
2768 0 : nr_activate = move_folios_to_lru(lruvec, &l_active);
2769 0 : nr_deactivate = move_folios_to_lru(lruvec, &l_inactive);
2770 : /* Keep all free folios in l_active list */
2771 0 : list_splice(&l_inactive, &l_active);
2772 :
2773 0 : __count_vm_events(PGDEACTIVATE, nr_deactivate);
2774 0 : __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
2775 :
2776 0 : __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2777 0 : spin_unlock_irq(&lruvec->lru_lock);
2778 :
2779 0 : if (nr_rotated)
2780 0 : lru_note_cost(lruvec, file, 0, nr_rotated);
2781 0 : mem_cgroup_uncharge_list(&l_active);
2782 0 : free_unref_page_list(&l_active);
2783 0 : trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
2784 0 : nr_deactivate, nr_rotated, sc->priority, file);
2785 0 : }
2786 :
2787 0 : static unsigned int reclaim_folio_list(struct list_head *folio_list,
2788 : struct pglist_data *pgdat)
2789 : {
2790 : struct reclaim_stat dummy_stat;
2791 : unsigned int nr_reclaimed;
2792 : struct folio *folio;
2793 0 : struct scan_control sc = {
2794 : .gfp_mask = GFP_KERNEL,
2795 : .may_writepage = 1,
2796 : .may_unmap = 1,
2797 : .may_swap = 1,
2798 : .no_demotion = 1,
2799 : };
2800 :
2801 0 : nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &dummy_stat, false);
2802 0 : while (!list_empty(folio_list)) {
2803 0 : folio = lru_to_folio(folio_list);
2804 0 : list_del(&folio->lru);
2805 0 : folio_putback_lru(folio);
2806 : }
2807 :
2808 0 : return nr_reclaimed;
2809 : }
2810 :
2811 0 : unsigned long reclaim_pages(struct list_head *folio_list)
2812 : {
2813 : int nid;
2814 0 : unsigned int nr_reclaimed = 0;
2815 0 : LIST_HEAD(node_folio_list);
2816 : unsigned int noreclaim_flag;
2817 :
2818 0 : if (list_empty(folio_list))
2819 : return nr_reclaimed;
2820 :
2821 0 : noreclaim_flag = memalloc_noreclaim_save();
2822 :
2823 0 : nid = folio_nid(lru_to_folio(folio_list));
2824 : do {
2825 0 : struct folio *folio = lru_to_folio(folio_list);
2826 :
2827 : if (nid == folio_nid(folio)) {
2828 0 : folio_clear_active(folio);
2829 0 : list_move(&folio->lru, &node_folio_list);
2830 0 : continue;
2831 : }
2832 :
2833 : nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid));
2834 : nid = folio_nid(lru_to_folio(folio_list));
2835 0 : } while (!list_empty(folio_list));
2836 :
2837 0 : nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid));
2838 :
2839 0 : memalloc_noreclaim_restore(noreclaim_flag);
2840 :
2841 0 : return nr_reclaimed;
2842 : }
2843 :
2844 0 : static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
2845 : struct lruvec *lruvec, struct scan_control *sc)
2846 : {
2847 0 : if (is_active_lru(lru)) {
2848 0 : if (sc->may_deactivate & (1 << is_file_lru(lru)))
2849 0 : shrink_active_list(nr_to_scan, lruvec, sc, lru);
2850 : else
2851 0 : sc->skipped_deactivate = 1;
2852 : return 0;
2853 : }
2854 :
2855 0 : return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
2856 : }
2857 :
2858 : /*
2859 : * The inactive anon list should be small enough that the VM never has
2860 : * to do too much work.
2861 : *
2862 : * The inactive file list should be small enough to leave most memory
2863 : * to the established workingset on the scan-resistant active list,
2864 : * but large enough to avoid thrashing the aggregate readahead window.
2865 : *
2866 : * Both inactive lists should also be large enough that each inactive
2867 : * folio has a chance to be referenced again before it is reclaimed.
2868 : *
2869 : * If that fails and refaulting is observed, the inactive list grows.
2870 : *
2871 : * The inactive_ratio is the target ratio of ACTIVE to INACTIVE folios
2872 : * on this LRU, maintained by the pageout code. An inactive_ratio
2873 : * of 3 means 3:1 or 25% of the folios are kept on the inactive list.
2874 : *
2875 : * total target max
2876 : * memory ratio inactive
2877 : * -------------------------------------
2878 : * 10MB 1 5MB
2879 : * 100MB 1 50MB
2880 : * 1GB 3 250MB
2881 : * 10GB 10 0.9GB
2882 : * 100GB 31 3GB
2883 : * 1TB 101 10GB
2884 : * 10TB 320 32GB
2885 : */
2886 0 : static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru)
2887 : {
2888 0 : enum lru_list active_lru = inactive_lru + LRU_ACTIVE;
2889 : unsigned long inactive, active;
2890 : unsigned long inactive_ratio;
2891 : unsigned long gb;
2892 :
2893 0 : inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru);
2894 0 : active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru);
2895 :
2896 0 : gb = (inactive + active) >> (30 - PAGE_SHIFT);
2897 0 : if (gb)
2898 0 : inactive_ratio = int_sqrt(10 * gb);
2899 : else
2900 : inactive_ratio = 1;
2901 :
2902 0 : return inactive * inactive_ratio < active;
2903 : }
2904 :
2905 : enum scan_balance {
2906 : SCAN_EQUAL,
2907 : SCAN_FRACT,
2908 : SCAN_ANON,
2909 : SCAN_FILE,
2910 : };
2911 :
2912 0 : static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
2913 : {
2914 : unsigned long file;
2915 : struct lruvec *target_lruvec;
2916 :
2917 : if (lru_gen_enabled())
2918 : return;
2919 :
2920 0 : target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
2921 :
2922 : /*
2923 : * Flush the memory cgroup stats, so that we read accurate per-memcg
2924 : * lruvec stats for heuristics.
2925 : */
2926 : mem_cgroup_flush_stats();
2927 :
2928 : /*
2929 : * Determine the scan balance between anon and file LRUs.
2930 : */
2931 0 : spin_lock_irq(&target_lruvec->lru_lock);
2932 0 : sc->anon_cost = target_lruvec->anon_cost;
2933 0 : sc->file_cost = target_lruvec->file_cost;
2934 0 : spin_unlock_irq(&target_lruvec->lru_lock);
2935 :
2936 : /*
2937 : * Target desirable inactive:active list ratios for the anon
2938 : * and file LRU lists.
2939 : */
2940 0 : if (!sc->force_deactivate) {
2941 : unsigned long refaults;
2942 :
2943 : /*
2944 : * When refaults are being observed, it means a new
2945 : * workingset is being established. Deactivate to get
2946 : * rid of any stale active pages quickly.
2947 : */
2948 0 : refaults = lruvec_page_state(target_lruvec,
2949 : WORKINGSET_ACTIVATE_ANON);
2950 0 : if (refaults != target_lruvec->refaults[WORKINGSET_ANON] ||
2951 0 : inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
2952 0 : sc->may_deactivate |= DEACTIVATE_ANON;
2953 : else
2954 0 : sc->may_deactivate &= ~DEACTIVATE_ANON;
2955 :
2956 0 : refaults = lruvec_page_state(target_lruvec,
2957 : WORKINGSET_ACTIVATE_FILE);
2958 0 : if (refaults != target_lruvec->refaults[WORKINGSET_FILE] ||
2959 0 : inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
2960 0 : sc->may_deactivate |= DEACTIVATE_FILE;
2961 : else
2962 0 : sc->may_deactivate &= ~DEACTIVATE_FILE;
2963 : } else
2964 0 : sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
2965 :
2966 : /*
2967 : * If we have plenty of inactive file pages that aren't
2968 : * thrashing, try to reclaim those first before touching
2969 : * anonymous pages.
2970 : */
2971 0 : file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
2972 0 : if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
2973 0 : sc->cache_trim_mode = 1;
2974 : else
2975 0 : sc->cache_trim_mode = 0;
2976 :
2977 : /*
2978 : * Prevent the reclaimer from falling into the cache trap: as
2979 : * cache pages start out inactive, every cache fault will tip
2980 : * the scan balance towards the file LRU. And as the file LRU
2981 : * shrinks, so does the window for rotation from references.
2982 : * This means we have a runaway feedback loop where a tiny
2983 : * thrashing file LRU becomes infinitely more attractive than
2984 : * anon pages. Try to detect this based on file LRU size.
2985 : */
2986 0 : if (!cgroup_reclaim(sc)) {
2987 0 : unsigned long total_high_wmark = 0;
2988 : unsigned long free, anon;
2989 : int z;
2990 :
2991 0 : free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
2992 0 : file = node_page_state(pgdat, NR_ACTIVE_FILE) +
2993 0 : node_page_state(pgdat, NR_INACTIVE_FILE);
2994 :
2995 0 : for (z = 0; z < MAX_NR_ZONES; z++) {
2996 0 : struct zone *zone = &pgdat->node_zones[z];
2997 :
2998 0 : if (!managed_zone(zone))
2999 0 : continue;
3000 :
3001 0 : total_high_wmark += high_wmark_pages(zone);
3002 : }
3003 :
3004 : /*
3005 : * Consider anon: if that's low too, this isn't a
3006 : * runaway file reclaim problem, but rather just
3007 : * extreme pressure. Reclaim as per usual then.
3008 : */
3009 0 : anon = node_page_state(pgdat, NR_INACTIVE_ANON);
3010 :
3011 0 : sc->file_is_tiny =
3012 0 : file + free <= total_high_wmark &&
3013 0 : !(sc->may_deactivate & DEACTIVATE_ANON) &&
3014 0 : anon >> sc->priority;
3015 : }
3016 : }
3017 :
3018 : /*
3019 : * Determine how aggressively the anon and file LRU lists should be
3020 : * scanned.
3021 : *
3022 : * nr[0] = anon inactive folios to scan; nr[1] = anon active folios to scan
3023 : * nr[2] = file inactive folios to scan; nr[3] = file active folios to scan
3024 : */
3025 0 : static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
3026 : unsigned long *nr)
3027 : {
3028 0 : struct pglist_data *pgdat = lruvec_pgdat(lruvec);
3029 0 : struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3030 : unsigned long anon_cost, file_cost, total_cost;
3031 0 : int swappiness = mem_cgroup_swappiness(memcg);
3032 : u64 fraction[ANON_AND_FILE];
3033 0 : u64 denominator = 0; /* gcc */
3034 : enum scan_balance scan_balance;
3035 : unsigned long ap, fp;
3036 : enum lru_list lru;
3037 :
3038 : /* If we have no swap space, do not bother scanning anon folios. */
3039 0 : if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) {
3040 : scan_balance = SCAN_FILE;
3041 : goto out;
3042 : }
3043 :
3044 : /*
3045 : * Global reclaim will swap to prevent OOM even with no
3046 : * swappiness, but memcg users want to use this knob to
3047 : * disable swapping for individual groups completely when
3048 : * using the memory controller's swap limit feature would be
3049 : * too expensive.
3050 : */
3051 0 : if (cgroup_reclaim(sc) && !swappiness) {
3052 : scan_balance = SCAN_FILE;
3053 : goto out;
3054 : }
3055 :
3056 : /*
3057 : * Do not apply any pressure balancing cleverness when the
3058 : * system is close to OOM, scan both anon and file equally
3059 : * (unless the swappiness setting disagrees with swapping).
3060 : */
3061 0 : if (!sc->priority && swappiness) {
3062 : scan_balance = SCAN_EQUAL;
3063 : goto out;
3064 : }
3065 :
3066 : /*
3067 : * If the system is almost out of file pages, force-scan anon.
3068 : */
3069 0 : if (sc->file_is_tiny) {
3070 : scan_balance = SCAN_ANON;
3071 : goto out;
3072 : }
3073 :
3074 : /*
3075 : * If there is enough inactive page cache, we do not reclaim
3076 : * anything from the anonymous working right now.
3077 : */
3078 0 : if (sc->cache_trim_mode) {
3079 : scan_balance = SCAN_FILE;
3080 : goto out;
3081 : }
3082 :
3083 0 : scan_balance = SCAN_FRACT;
3084 : /*
3085 : * Calculate the pressure balance between anon and file pages.
3086 : *
3087 : * The amount of pressure we put on each LRU is inversely
3088 : * proportional to the cost of reclaiming each list, as
3089 : * determined by the share of pages that are refaulting, times
3090 : * the relative IO cost of bringing back a swapped out
3091 : * anonymous page vs reloading a filesystem page (swappiness).
3092 : *
3093 : * Although we limit that influence to ensure no list gets
3094 : * left behind completely: at least a third of the pressure is
3095 : * applied, before swappiness.
3096 : *
3097 : * With swappiness at 100, anon and file have equal IO cost.
3098 : */
3099 0 : total_cost = sc->anon_cost + sc->file_cost;
3100 0 : anon_cost = total_cost + sc->anon_cost;
3101 0 : file_cost = total_cost + sc->file_cost;
3102 0 : total_cost = anon_cost + file_cost;
3103 :
3104 0 : ap = swappiness * (total_cost + 1);
3105 0 : ap /= anon_cost + 1;
3106 :
3107 0 : fp = (200 - swappiness) * (total_cost + 1);
3108 0 : fp /= file_cost + 1;
3109 :
3110 0 : fraction[0] = ap;
3111 0 : fraction[1] = fp;
3112 0 : denominator = ap + fp;
3113 : out:
3114 0 : for_each_evictable_lru(lru) {
3115 0 : int file = is_file_lru(lru);
3116 : unsigned long lruvec_size;
3117 : unsigned long low, min;
3118 : unsigned long scan;
3119 :
3120 0 : lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
3121 0 : mem_cgroup_protection(sc->target_mem_cgroup, memcg,
3122 : &min, &low);
3123 :
3124 : if (min || low) {
3125 : /*
3126 : * Scale a cgroup's reclaim pressure by proportioning
3127 : * its current usage to its memory.low or memory.min
3128 : * setting.
3129 : *
3130 : * This is important, as otherwise scanning aggression
3131 : * becomes extremely binary -- from nothing as we
3132 : * approach the memory protection threshold, to totally
3133 : * nominal as we exceed it. This results in requiring
3134 : * setting extremely liberal protection thresholds. It
3135 : * also means we simply get no protection at all if we
3136 : * set it too low, which is not ideal.
3137 : *
3138 : * If there is any protection in place, we reduce scan
3139 : * pressure by how much of the total memory used is
3140 : * within protection thresholds.
3141 : *
3142 : * There is one special case: in the first reclaim pass,
3143 : * we skip over all groups that are within their low
3144 : * protection. If that fails to reclaim enough pages to
3145 : * satisfy the reclaim goal, we come back and override
3146 : * the best-effort low protection. However, we still
3147 : * ideally want to honor how well-behaved groups are in
3148 : * that case instead of simply punishing them all
3149 : * equally. As such, we reclaim them based on how much
3150 : * memory they are using, reducing the scan pressure
3151 : * again by how much of the total memory used is under
3152 : * hard protection.
3153 : */
3154 : unsigned long cgroup_size = mem_cgroup_size(memcg);
3155 : unsigned long protection;
3156 :
3157 : /* memory.low scaling, make sure we retry before OOM */
3158 : if (!sc->memcg_low_reclaim && low > min) {
3159 : protection = low;
3160 : sc->memcg_low_skipped = 1;
3161 : } else {
3162 : protection = min;
3163 : }
3164 :
3165 : /* Avoid TOCTOU with earlier protection check */
3166 : cgroup_size = max(cgroup_size, protection);
3167 :
3168 : scan = lruvec_size - lruvec_size * protection /
3169 : (cgroup_size + 1);
3170 :
3171 : /*
3172 : * Minimally target SWAP_CLUSTER_MAX pages to keep
3173 : * reclaim moving forwards, avoiding decrementing
3174 : * sc->priority further than desirable.
3175 : */
3176 : scan = max(scan, SWAP_CLUSTER_MAX);
3177 : } else {
3178 0 : scan = lruvec_size;
3179 : }
3180 :
3181 0 : scan >>= sc->priority;
3182 :
3183 : /*
3184 : * If the cgroup's already been deleted, make sure to
3185 : * scrape out the remaining cache.
3186 : */
3187 : if (!scan && !mem_cgroup_online(memcg))
3188 : scan = min(lruvec_size, SWAP_CLUSTER_MAX);
3189 :
3190 0 : switch (scan_balance) {
3191 : case SCAN_EQUAL:
3192 : /* Scan lists relative to size */
3193 : break;
3194 : case SCAN_FRACT:
3195 : /*
3196 : * Scan types proportional to swappiness and
3197 : * their relative recent reclaim efficiency.
3198 : * Make sure we don't miss the last page on
3199 : * the offlined memory cgroups because of a
3200 : * round-off error.
3201 : */
3202 0 : scan = mem_cgroup_online(memcg) ?
3203 0 : div64_u64(scan * fraction[file], denominator) :
3204 : DIV64_U64_ROUND_UP(scan * fraction[file],
3205 : denominator);
3206 0 : break;
3207 : case SCAN_FILE:
3208 : case SCAN_ANON:
3209 : /* Scan one type exclusively */
3210 0 : if ((scan_balance == SCAN_FILE) != file)
3211 0 : scan = 0;
3212 : break;
3213 : default:
3214 : /* Look ma, no brain */
3215 0 : BUG();
3216 : }
3217 :
3218 0 : nr[lru] = scan;
3219 : }
3220 0 : }
3221 :
3222 : /*
3223 : * Anonymous LRU management is a waste if there is
3224 : * ultimately no way to reclaim the memory.
3225 : */
3226 : static bool can_age_anon_pages(struct pglist_data *pgdat,
3227 : struct scan_control *sc)
3228 : {
3229 : /* Aging the anon LRU is valuable if swap is present: */
3230 0 : if (total_swap_pages > 0)
3231 : return true;
3232 :
3233 : /* Also valuable if anon pages can be demoted: */
3234 0 : return can_demote(pgdat->node_id, sc);
3235 : }
3236 :
3237 : #ifdef CONFIG_LRU_GEN
3238 :
3239 : #ifdef CONFIG_LRU_GEN_ENABLED
3240 : DEFINE_STATIC_KEY_ARRAY_TRUE(lru_gen_caps, NR_LRU_GEN_CAPS);
3241 : #define get_cap(cap) static_branch_likely(&lru_gen_caps[cap])
3242 : #else
3243 : DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS);
3244 : #define get_cap(cap) static_branch_unlikely(&lru_gen_caps[cap])
3245 : #endif
3246 :
3247 : static bool should_walk_mmu(void)
3248 : {
3249 : return arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK);
3250 : }
3251 :
3252 : static bool should_clear_pmd_young(void)
3253 : {
3254 : return arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG);
3255 : }
3256 :
3257 : /******************************************************************************
3258 : * shorthand helpers
3259 : ******************************************************************************/
3260 :
3261 : #define LRU_REFS_FLAGS (BIT(PG_referenced) | BIT(PG_workingset))
3262 :
3263 : #define DEFINE_MAX_SEQ(lruvec) \
3264 : unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq)
3265 :
3266 : #define DEFINE_MIN_SEQ(lruvec) \
3267 : unsigned long min_seq[ANON_AND_FILE] = { \
3268 : READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \
3269 : READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \
3270 : }
3271 :
3272 : #define for_each_gen_type_zone(gen, type, zone) \
3273 : for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \
3274 : for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \
3275 : for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
3276 :
3277 : #define get_memcg_gen(seq) ((seq) % MEMCG_NR_GENS)
3278 : #define get_memcg_bin(bin) ((bin) % MEMCG_NR_BINS)
3279 :
3280 : static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
3281 : {
3282 : struct pglist_data *pgdat = NODE_DATA(nid);
3283 :
3284 : #ifdef CONFIG_MEMCG
3285 : if (memcg) {
3286 : struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec;
3287 :
3288 : /* see the comment in mem_cgroup_lruvec() */
3289 : if (!lruvec->pgdat)
3290 : lruvec->pgdat = pgdat;
3291 :
3292 : return lruvec;
3293 : }
3294 : #endif
3295 : VM_WARN_ON_ONCE(!mem_cgroup_disabled());
3296 :
3297 : return &pgdat->__lruvec;
3298 : }
3299 :
3300 : static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc)
3301 : {
3302 : struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3303 : struct pglist_data *pgdat = lruvec_pgdat(lruvec);
3304 :
3305 : if (!sc->may_swap)
3306 : return 0;
3307 :
3308 : if (!can_demote(pgdat->node_id, sc) &&
3309 : mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH)
3310 : return 0;
3311 :
3312 : return mem_cgroup_swappiness(memcg);
3313 : }
3314 :
3315 : static int get_nr_gens(struct lruvec *lruvec, int type)
3316 : {
3317 : return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1;
3318 : }
3319 :
3320 : static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
3321 : {
3322 : /* see the comment on lru_gen_folio */
3323 : return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS &&
3324 : get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) &&
3325 : get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS;
3326 : }
3327 :
3328 : /******************************************************************************
3329 : * Bloom filters
3330 : ******************************************************************************/
3331 :
3332 : /*
3333 : * Bloom filters with m=1<<15, k=2 and the false positive rates of ~1/5 when
3334 : * n=10,000 and ~1/2 when n=20,000, where, conventionally, m is the number of
3335 : * bits in a bitmap, k is the number of hash functions and n is the number of
3336 : * inserted items.
3337 : *
3338 : * Page table walkers use one of the two filters to reduce their search space.
3339 : * To get rid of non-leaf entries that no longer have enough leaf entries, the
3340 : * aging uses the double-buffering technique to flip to the other filter each
3341 : * time it produces a new generation. For non-leaf entries that have enough
3342 : * leaf entries, the aging carries them over to the next generation in
3343 : * walk_pmd_range(); the eviction also report them when walking the rmap
3344 : * in lru_gen_look_around().
3345 : *
3346 : * For future optimizations:
3347 : * 1. It's not necessary to keep both filters all the time. The spare one can be
3348 : * freed after the RCU grace period and reallocated if needed again.
3349 : * 2. And when reallocating, it's worth scaling its size according to the number
3350 : * of inserted entries in the other filter, to reduce the memory overhead on
3351 : * small systems and false positives on large systems.
3352 : * 3. Jenkins' hash function is an alternative to Knuth's.
3353 : */
3354 : #define BLOOM_FILTER_SHIFT 15
3355 :
3356 : static inline int filter_gen_from_seq(unsigned long seq)
3357 : {
3358 : return seq % NR_BLOOM_FILTERS;
3359 : }
3360 :
3361 : static void get_item_key(void *item, int *key)
3362 : {
3363 : u32 hash = hash_ptr(item, BLOOM_FILTER_SHIFT * 2);
3364 :
3365 : BUILD_BUG_ON(BLOOM_FILTER_SHIFT * 2 > BITS_PER_TYPE(u32));
3366 :
3367 : key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1);
3368 : key[1] = hash >> BLOOM_FILTER_SHIFT;
3369 : }
3370 :
3371 : static bool test_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item)
3372 : {
3373 : int key[2];
3374 : unsigned long *filter;
3375 : int gen = filter_gen_from_seq(seq);
3376 :
3377 : filter = READ_ONCE(lruvec->mm_state.filters[gen]);
3378 : if (!filter)
3379 : return true;
3380 :
3381 : get_item_key(item, key);
3382 :
3383 : return test_bit(key[0], filter) && test_bit(key[1], filter);
3384 : }
3385 :
3386 : static void update_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item)
3387 : {
3388 : int key[2];
3389 : unsigned long *filter;
3390 : int gen = filter_gen_from_seq(seq);
3391 :
3392 : filter = READ_ONCE(lruvec->mm_state.filters[gen]);
3393 : if (!filter)
3394 : return;
3395 :
3396 : get_item_key(item, key);
3397 :
3398 : if (!test_bit(key[0], filter))
3399 : set_bit(key[0], filter);
3400 : if (!test_bit(key[1], filter))
3401 : set_bit(key[1], filter);
3402 : }
3403 :
3404 : static void reset_bloom_filter(struct lruvec *lruvec, unsigned long seq)
3405 : {
3406 : unsigned long *filter;
3407 : int gen = filter_gen_from_seq(seq);
3408 :
3409 : filter = lruvec->mm_state.filters[gen];
3410 : if (filter) {
3411 : bitmap_clear(filter, 0, BIT(BLOOM_FILTER_SHIFT));
3412 : return;
3413 : }
3414 :
3415 : filter = bitmap_zalloc(BIT(BLOOM_FILTER_SHIFT),
3416 : __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
3417 : WRITE_ONCE(lruvec->mm_state.filters[gen], filter);
3418 : }
3419 :
3420 : /******************************************************************************
3421 : * mm_struct list
3422 : ******************************************************************************/
3423 :
3424 : static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
3425 : {
3426 : static struct lru_gen_mm_list mm_list = {
3427 : .fifo = LIST_HEAD_INIT(mm_list.fifo),
3428 : .lock = __SPIN_LOCK_UNLOCKED(mm_list.lock),
3429 : };
3430 :
3431 : #ifdef CONFIG_MEMCG
3432 : if (memcg)
3433 : return &memcg->mm_list;
3434 : #endif
3435 : VM_WARN_ON_ONCE(!mem_cgroup_disabled());
3436 :
3437 : return &mm_list;
3438 : }
3439 :
3440 : void lru_gen_add_mm(struct mm_struct *mm)
3441 : {
3442 : int nid;
3443 : struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
3444 : struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
3445 :
3446 : VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list));
3447 : #ifdef CONFIG_MEMCG
3448 : VM_WARN_ON_ONCE(mm->lru_gen.memcg);
3449 : mm->lru_gen.memcg = memcg;
3450 : #endif
3451 : spin_lock(&mm_list->lock);
3452 :
3453 : for_each_node_state(nid, N_MEMORY) {
3454 : struct lruvec *lruvec = get_lruvec(memcg, nid);
3455 :
3456 : /* the first addition since the last iteration */
3457 : if (lruvec->mm_state.tail == &mm_list->fifo)
3458 : lruvec->mm_state.tail = &mm->lru_gen.list;
3459 : }
3460 :
3461 : list_add_tail(&mm->lru_gen.list, &mm_list->fifo);
3462 :
3463 : spin_unlock(&mm_list->lock);
3464 : }
3465 :
3466 : void lru_gen_del_mm(struct mm_struct *mm)
3467 : {
3468 : int nid;
3469 : struct lru_gen_mm_list *mm_list;
3470 : struct mem_cgroup *memcg = NULL;
3471 :
3472 : if (list_empty(&mm->lru_gen.list))
3473 : return;
3474 :
3475 : #ifdef CONFIG_MEMCG
3476 : memcg = mm->lru_gen.memcg;
3477 : #endif
3478 : mm_list = get_mm_list(memcg);
3479 :
3480 : spin_lock(&mm_list->lock);
3481 :
3482 : for_each_node(nid) {
3483 : struct lruvec *lruvec = get_lruvec(memcg, nid);
3484 :
3485 : /* where the current iteration continues after */
3486 : if (lruvec->mm_state.head == &mm->lru_gen.list)
3487 : lruvec->mm_state.head = lruvec->mm_state.head->prev;
3488 :
3489 : /* where the last iteration ended before */
3490 : if (lruvec->mm_state.tail == &mm->lru_gen.list)
3491 : lruvec->mm_state.tail = lruvec->mm_state.tail->next;
3492 : }
3493 :
3494 : list_del_init(&mm->lru_gen.list);
3495 :
3496 : spin_unlock(&mm_list->lock);
3497 :
3498 : #ifdef CONFIG_MEMCG
3499 : mem_cgroup_put(mm->lru_gen.memcg);
3500 : mm->lru_gen.memcg = NULL;
3501 : #endif
3502 : }
3503 :
3504 : #ifdef CONFIG_MEMCG
3505 : void lru_gen_migrate_mm(struct mm_struct *mm)
3506 : {
3507 : struct mem_cgroup *memcg;
3508 : struct task_struct *task = rcu_dereference_protected(mm->owner, true);
3509 :
3510 : VM_WARN_ON_ONCE(task->mm != mm);
3511 : lockdep_assert_held(&task->alloc_lock);
3512 :
3513 : /* for mm_update_next_owner() */
3514 : if (mem_cgroup_disabled())
3515 : return;
3516 :
3517 : /* migration can happen before addition */
3518 : if (!mm->lru_gen.memcg)
3519 : return;
3520 :
3521 : rcu_read_lock();
3522 : memcg = mem_cgroup_from_task(task);
3523 : rcu_read_unlock();
3524 : if (memcg == mm->lru_gen.memcg)
3525 : return;
3526 :
3527 : VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list));
3528 :
3529 : lru_gen_del_mm(mm);
3530 : lru_gen_add_mm(mm);
3531 : }
3532 : #endif
3533 :
3534 : static void reset_mm_stats(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, bool last)
3535 : {
3536 : int i;
3537 : int hist;
3538 :
3539 : lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock);
3540 :
3541 : if (walk) {
3542 : hist = lru_hist_from_seq(walk->max_seq);
3543 :
3544 : for (i = 0; i < NR_MM_STATS; i++) {
3545 : WRITE_ONCE(lruvec->mm_state.stats[hist][i],
3546 : lruvec->mm_state.stats[hist][i] + walk->mm_stats[i]);
3547 : walk->mm_stats[i] = 0;
3548 : }
3549 : }
3550 :
3551 : if (NR_HIST_GENS > 1 && last) {
3552 : hist = lru_hist_from_seq(lruvec->mm_state.seq + 1);
3553 :
3554 : for (i = 0; i < NR_MM_STATS; i++)
3555 : WRITE_ONCE(lruvec->mm_state.stats[hist][i], 0);
3556 : }
3557 : }
3558 :
3559 : static bool should_skip_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
3560 : {
3561 : int type;
3562 : unsigned long size = 0;
3563 : struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3564 : int key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap);
3565 :
3566 : if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap))
3567 : return true;
3568 :
3569 : clear_bit(key, &mm->lru_gen.bitmap);
3570 :
3571 : for (type = !walk->can_swap; type < ANON_AND_FILE; type++) {
3572 : size += type ? get_mm_counter(mm, MM_FILEPAGES) :
3573 : get_mm_counter(mm, MM_ANONPAGES) +
3574 : get_mm_counter(mm, MM_SHMEMPAGES);
3575 : }
3576 :
3577 : if (size < MIN_LRU_BATCH)
3578 : return true;
3579 :
3580 : return !mmget_not_zero(mm);
3581 : }
3582 :
3583 : static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk,
3584 : struct mm_struct **iter)
3585 : {
3586 : bool first = false;
3587 : bool last = false;
3588 : struct mm_struct *mm = NULL;
3589 : struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3590 : struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
3591 : struct lru_gen_mm_state *mm_state = &lruvec->mm_state;
3592 :
3593 : /*
3594 : * mm_state->seq is incremented after each iteration of mm_list. There
3595 : * are three interesting cases for this page table walker:
3596 : * 1. It tries to start a new iteration with a stale max_seq: there is
3597 : * nothing left to do.
3598 : * 2. It started the next iteration: it needs to reset the Bloom filter
3599 : * so that a fresh set of PTE tables can be recorded.
3600 : * 3. It ended the current iteration: it needs to reset the mm stats
3601 : * counters and tell its caller to increment max_seq.
3602 : */
3603 : spin_lock(&mm_list->lock);
3604 :
3605 : VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->max_seq);
3606 :
3607 : if (walk->max_seq <= mm_state->seq)
3608 : goto done;
3609 :
3610 : if (!mm_state->head)
3611 : mm_state->head = &mm_list->fifo;
3612 :
3613 : if (mm_state->head == &mm_list->fifo)
3614 : first = true;
3615 :
3616 : do {
3617 : mm_state->head = mm_state->head->next;
3618 : if (mm_state->head == &mm_list->fifo) {
3619 : WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
3620 : last = true;
3621 : break;
3622 : }
3623 :
3624 : /* force scan for those added after the last iteration */
3625 : if (!mm_state->tail || mm_state->tail == mm_state->head) {
3626 : mm_state->tail = mm_state->head->next;
3627 : walk->force_scan = true;
3628 : }
3629 :
3630 : mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list);
3631 : if (should_skip_mm(mm, walk))
3632 : mm = NULL;
3633 : } while (!mm);
3634 : done:
3635 : if (*iter || last)
3636 : reset_mm_stats(lruvec, walk, last);
3637 :
3638 : spin_unlock(&mm_list->lock);
3639 :
3640 : if (mm && first)
3641 : reset_bloom_filter(lruvec, walk->max_seq + 1);
3642 :
3643 : if (*iter)
3644 : mmput_async(*iter);
3645 :
3646 : *iter = mm;
3647 :
3648 : return last;
3649 : }
3650 :
3651 : static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq)
3652 : {
3653 : bool success = false;
3654 : struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3655 : struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
3656 : struct lru_gen_mm_state *mm_state = &lruvec->mm_state;
3657 :
3658 : spin_lock(&mm_list->lock);
3659 :
3660 : VM_WARN_ON_ONCE(mm_state->seq + 1 < max_seq);
3661 :
3662 : if (max_seq > mm_state->seq) {
3663 : mm_state->head = NULL;
3664 : mm_state->tail = NULL;
3665 : WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
3666 : reset_mm_stats(lruvec, NULL, true);
3667 : success = true;
3668 : }
3669 :
3670 : spin_unlock(&mm_list->lock);
3671 :
3672 : return success;
3673 : }
3674 :
3675 : /******************************************************************************
3676 : * PID controller
3677 : ******************************************************************************/
3678 :
3679 : /*
3680 : * A feedback loop based on Proportional-Integral-Derivative (PID) controller.
3681 : *
3682 : * The P term is refaulted/(evicted+protected) from a tier in the generation
3683 : * currently being evicted; the I term is the exponential moving average of the
3684 : * P term over the generations previously evicted, using the smoothing factor
3685 : * 1/2; the D term isn't supported.
3686 : *
3687 : * The setpoint (SP) is always the first tier of one type; the process variable
3688 : * (PV) is either any tier of the other type or any other tier of the same
3689 : * type.
3690 : *
3691 : * The error is the difference between the SP and the PV; the correction is to
3692 : * turn off protection when SP>PV or turn on protection when SP<PV.
3693 : *
3694 : * For future optimizations:
3695 : * 1. The D term may discount the other two terms over time so that long-lived
3696 : * generations can resist stale information.
3697 : */
3698 : struct ctrl_pos {
3699 : unsigned long refaulted;
3700 : unsigned long total;
3701 : int gain;
3702 : };
3703 :
3704 : static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain,
3705 : struct ctrl_pos *pos)
3706 : {
3707 : struct lru_gen_folio *lrugen = &lruvec->lrugen;
3708 : int hist = lru_hist_from_seq(lrugen->min_seq[type]);
3709 :
3710 : pos->refaulted = lrugen->avg_refaulted[type][tier] +
3711 : atomic_long_read(&lrugen->refaulted[hist][type][tier]);
3712 : pos->total = lrugen->avg_total[type][tier] +
3713 : atomic_long_read(&lrugen->evicted[hist][type][tier]);
3714 : if (tier)
3715 : pos->total += lrugen->protected[hist][type][tier - 1];
3716 : pos->gain = gain;
3717 : }
3718 :
3719 : static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover)
3720 : {
3721 : int hist, tier;
3722 : struct lru_gen_folio *lrugen = &lruvec->lrugen;
3723 : bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1;
3724 : unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1;
3725 :
3726 : lockdep_assert_held(&lruvec->lru_lock);
3727 :
3728 : if (!carryover && !clear)
3729 : return;
3730 :
3731 : hist = lru_hist_from_seq(seq);
3732 :
3733 : for (tier = 0; tier < MAX_NR_TIERS; tier++) {
3734 : if (carryover) {
3735 : unsigned long sum;
3736 :
3737 : sum = lrugen->avg_refaulted[type][tier] +
3738 : atomic_long_read(&lrugen->refaulted[hist][type][tier]);
3739 : WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2);
3740 :
3741 : sum = lrugen->avg_total[type][tier] +
3742 : atomic_long_read(&lrugen->evicted[hist][type][tier]);
3743 : if (tier)
3744 : sum += lrugen->protected[hist][type][tier - 1];
3745 : WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2);
3746 : }
3747 :
3748 : if (clear) {
3749 : atomic_long_set(&lrugen->refaulted[hist][type][tier], 0);
3750 : atomic_long_set(&lrugen->evicted[hist][type][tier], 0);
3751 : if (tier)
3752 : WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 0);
3753 : }
3754 : }
3755 : }
3756 :
3757 : static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv)
3758 : {
3759 : /*
3760 : * Return true if the PV has a limited number of refaults or a lower
3761 : * refaulted/total than the SP.
3762 : */
3763 : return pv->refaulted < MIN_LRU_BATCH ||
3764 : pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <=
3765 : (sp->refaulted + 1) * pv->total * pv->gain;
3766 : }
3767 :
3768 : /******************************************************************************
3769 : * the aging
3770 : ******************************************************************************/
3771 :
3772 : /* promote pages accessed through page tables */
3773 : static int folio_update_gen(struct folio *folio, int gen)
3774 : {
3775 : unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
3776 :
3777 : VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
3778 : VM_WARN_ON_ONCE(!rcu_read_lock_held());
3779 :
3780 : do {
3781 : /* lru_gen_del_folio() has isolated this page? */
3782 : if (!(old_flags & LRU_GEN_MASK)) {
3783 : /* for shrink_folio_list() */
3784 : new_flags = old_flags | BIT(PG_referenced);
3785 : continue;
3786 : }
3787 :
3788 : new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
3789 : new_flags |= (gen + 1UL) << LRU_GEN_PGOFF;
3790 : } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
3791 :
3792 : return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
3793 : }
3794 :
3795 : /* protect pages accessed multiple times through file descriptors */
3796 : static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
3797 : {
3798 : int type = folio_is_file_lru(folio);
3799 : struct lru_gen_folio *lrugen = &lruvec->lrugen;
3800 : int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
3801 : unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
3802 :
3803 : VM_WARN_ON_ONCE_FOLIO(!(old_flags & LRU_GEN_MASK), folio);
3804 :
3805 : do {
3806 : new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
3807 : /* folio_update_gen() has promoted this page? */
3808 : if (new_gen >= 0 && new_gen != old_gen)
3809 : return new_gen;
3810 :
3811 : new_gen = (old_gen + 1) % MAX_NR_GENS;
3812 :
3813 : new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
3814 : new_flags |= (new_gen + 1UL) << LRU_GEN_PGOFF;
3815 : /* for folio_end_writeback() */
3816 : if (reclaiming)
3817 : new_flags |= BIT(PG_reclaim);
3818 : } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
3819 :
3820 : lru_gen_update_size(lruvec, folio, old_gen, new_gen);
3821 :
3822 : return new_gen;
3823 : }
3824 :
3825 : static void update_batch_size(struct lru_gen_mm_walk *walk, struct folio *folio,
3826 : int old_gen, int new_gen)
3827 : {
3828 : int type = folio_is_file_lru(folio);
3829 : int zone = folio_zonenum(folio);
3830 : int delta = folio_nr_pages(folio);
3831 :
3832 : VM_WARN_ON_ONCE(old_gen >= MAX_NR_GENS);
3833 : VM_WARN_ON_ONCE(new_gen >= MAX_NR_GENS);
3834 :
3835 : walk->batched++;
3836 :
3837 : walk->nr_pages[old_gen][type][zone] -= delta;
3838 : walk->nr_pages[new_gen][type][zone] += delta;
3839 : }
3840 :
3841 : static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk)
3842 : {
3843 : int gen, type, zone;
3844 : struct lru_gen_folio *lrugen = &lruvec->lrugen;
3845 :
3846 : walk->batched = 0;
3847 :
3848 : for_each_gen_type_zone(gen, type, zone) {
3849 : enum lru_list lru = type * LRU_INACTIVE_FILE;
3850 : int delta = walk->nr_pages[gen][type][zone];
3851 :
3852 : if (!delta)
3853 : continue;
3854 :
3855 : walk->nr_pages[gen][type][zone] = 0;
3856 : WRITE_ONCE(lrugen->nr_pages[gen][type][zone],
3857 : lrugen->nr_pages[gen][type][zone] + delta);
3858 :
3859 : if (lru_gen_is_active(lruvec, gen))
3860 : lru += LRU_ACTIVE;
3861 : __update_lru_size(lruvec, lru, zone, delta);
3862 : }
3863 : }
3864 :
3865 : static int should_skip_vma(unsigned long start, unsigned long end, struct mm_walk *args)
3866 : {
3867 : struct address_space *mapping;
3868 : struct vm_area_struct *vma = args->vma;
3869 : struct lru_gen_mm_walk *walk = args->private;
3870 :
3871 : if (!vma_is_accessible(vma))
3872 : return true;
3873 :
3874 : if (is_vm_hugetlb_page(vma))
3875 : return true;
3876 :
3877 : if (!vma_has_recency(vma))
3878 : return true;
3879 :
3880 : if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL))
3881 : return true;
3882 :
3883 : if (vma == get_gate_vma(vma->vm_mm))
3884 : return true;
3885 :
3886 : if (vma_is_anonymous(vma))
3887 : return !walk->can_swap;
3888 :
3889 : if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping))
3890 : return true;
3891 :
3892 : mapping = vma->vm_file->f_mapping;
3893 : if (mapping_unevictable(mapping))
3894 : return true;
3895 :
3896 : if (shmem_mapping(mapping))
3897 : return !walk->can_swap;
3898 :
3899 : /* to exclude special mappings like dax, etc. */
3900 : return !mapping->a_ops->read_folio;
3901 : }
3902 :
3903 : /*
3904 : * Some userspace memory allocators map many single-page VMAs. Instead of
3905 : * returning back to the PGD table for each of such VMAs, finish an entire PMD
3906 : * table to reduce zigzags and improve cache performance.
3907 : */
3908 : static bool get_next_vma(unsigned long mask, unsigned long size, struct mm_walk *args,
3909 : unsigned long *vm_start, unsigned long *vm_end)
3910 : {
3911 : unsigned long start = round_up(*vm_end, size);
3912 : unsigned long end = (start | ~mask) + 1;
3913 : VMA_ITERATOR(vmi, args->mm, start);
3914 :
3915 : VM_WARN_ON_ONCE(mask & size);
3916 : VM_WARN_ON_ONCE((start & mask) != (*vm_start & mask));
3917 :
3918 : for_each_vma(vmi, args->vma) {
3919 : if (end && end <= args->vma->vm_start)
3920 : return false;
3921 :
3922 : if (should_skip_vma(args->vma->vm_start, args->vma->vm_end, args))
3923 : continue;
3924 :
3925 : *vm_start = max(start, args->vma->vm_start);
3926 : *vm_end = min(end - 1, args->vma->vm_end - 1) + 1;
3927 :
3928 : return true;
3929 : }
3930 :
3931 : return false;
3932 : }
3933 :
3934 : static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr)
3935 : {
3936 : unsigned long pfn = pte_pfn(pte);
3937 :
3938 : VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end);
3939 :
3940 : if (!pte_present(pte) || is_zero_pfn(pfn))
3941 : return -1;
3942 :
3943 : if (WARN_ON_ONCE(pte_devmap(pte) || pte_special(pte)))
3944 : return -1;
3945 :
3946 : if (WARN_ON_ONCE(!pfn_valid(pfn)))
3947 : return -1;
3948 :
3949 : return pfn;
3950 : }
3951 :
3952 : #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
3953 : static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr)
3954 : {
3955 : unsigned long pfn = pmd_pfn(pmd);
3956 :
3957 : VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end);
3958 :
3959 : if (!pmd_present(pmd) || is_huge_zero_pmd(pmd))
3960 : return -1;
3961 :
3962 : if (WARN_ON_ONCE(pmd_devmap(pmd)))
3963 : return -1;
3964 :
3965 : if (WARN_ON_ONCE(!pfn_valid(pfn)))
3966 : return -1;
3967 :
3968 : return pfn;
3969 : }
3970 : #endif
3971 :
3972 : static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg,
3973 : struct pglist_data *pgdat, bool can_swap)
3974 : {
3975 : struct folio *folio;
3976 :
3977 : /* try to avoid unnecessary memory loads */
3978 : if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
3979 : return NULL;
3980 :
3981 : folio = pfn_folio(pfn);
3982 : if (folio_nid(folio) != pgdat->node_id)
3983 : return NULL;
3984 :
3985 : if (folio_memcg_rcu(folio) != memcg)
3986 : return NULL;
3987 :
3988 : /* file VMAs can contain anon pages from COW */
3989 : if (!folio_is_file_lru(folio) && !can_swap)
3990 : return NULL;
3991 :
3992 : return folio;
3993 : }
3994 :
3995 : static bool suitable_to_scan(int total, int young)
3996 : {
3997 : int n = clamp_t(int, cache_line_size() / sizeof(pte_t), 2, 8);
3998 :
3999 : /* suitable if the average number of young PTEs per cacheline is >=1 */
4000 : return young * n >= total;
4001 : }
4002 :
4003 : static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
4004 : struct mm_walk *args)
4005 : {
4006 : int i;
4007 : pte_t *pte;
4008 : spinlock_t *ptl;
4009 : unsigned long addr;
4010 : int total = 0;
4011 : int young = 0;
4012 : struct lru_gen_mm_walk *walk = args->private;
4013 : struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
4014 : struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
4015 : int old_gen, new_gen = lru_gen_from_seq(walk->max_seq);
4016 :
4017 : pte = pte_offset_map_nolock(args->mm, pmd, start & PMD_MASK, &ptl);
4018 : if (!pte)
4019 : return false;
4020 : if (!spin_trylock(ptl)) {
4021 : pte_unmap(pte);
4022 : return false;
4023 : }
4024 :
4025 : arch_enter_lazy_mmu_mode();
4026 : restart:
4027 : for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) {
4028 : unsigned long pfn;
4029 : struct folio *folio;
4030 : pte_t ptent = ptep_get(pte + i);
4031 :
4032 : total++;
4033 : walk->mm_stats[MM_LEAF_TOTAL]++;
4034 :
4035 : pfn = get_pte_pfn(ptent, args->vma, addr);
4036 : if (pfn == -1)
4037 : continue;
4038 :
4039 : if (!pte_young(ptent)) {
4040 : walk->mm_stats[MM_LEAF_OLD]++;
4041 : continue;
4042 : }
4043 :
4044 : folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap);
4045 : if (!folio)
4046 : continue;
4047 :
4048 : if (!ptep_test_and_clear_young(args->vma, addr, pte + i))
4049 : VM_WARN_ON_ONCE(true);
4050 :
4051 : young++;
4052 : walk->mm_stats[MM_LEAF_YOUNG]++;
4053 :
4054 : if (pte_dirty(ptent) && !folio_test_dirty(folio) &&
4055 : !(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
4056 : !folio_test_swapcache(folio)))
4057 : folio_mark_dirty(folio);
4058 :
4059 : old_gen = folio_update_gen(folio, new_gen);
4060 : if (old_gen >= 0 && old_gen != new_gen)
4061 : update_batch_size(walk, folio, old_gen, new_gen);
4062 : }
4063 :
4064 : if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end))
4065 : goto restart;
4066 :
4067 : arch_leave_lazy_mmu_mode();
4068 : pte_unmap_unlock(pte, ptl);
4069 :
4070 : return suitable_to_scan(total, young);
4071 : }
4072 :
4073 : #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
4074 : static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area_struct *vma,
4075 : struct mm_walk *args, unsigned long *bitmap, unsigned long *first)
4076 : {
4077 : int i;
4078 : pmd_t *pmd;
4079 : spinlock_t *ptl;
4080 : struct lru_gen_mm_walk *walk = args->private;
4081 : struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
4082 : struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
4083 : int old_gen, new_gen = lru_gen_from_seq(walk->max_seq);
4084 :
4085 : VM_WARN_ON_ONCE(pud_leaf(*pud));
4086 :
4087 : /* try to batch at most 1+MIN_LRU_BATCH+1 entries */
4088 : if (*first == -1) {
4089 : *first = addr;
4090 : bitmap_zero(bitmap, MIN_LRU_BATCH);
4091 : return;
4092 : }
4093 :
4094 : i = addr == -1 ? 0 : pmd_index(addr) - pmd_index(*first);
4095 : if (i && i <= MIN_LRU_BATCH) {
4096 : __set_bit(i - 1, bitmap);
4097 : return;
4098 : }
4099 :
4100 : pmd = pmd_offset(pud, *first);
4101 :
4102 : ptl = pmd_lockptr(args->mm, pmd);
4103 : if (!spin_trylock(ptl))
4104 : goto done;
4105 :
4106 : arch_enter_lazy_mmu_mode();
4107 :
4108 : do {
4109 : unsigned long pfn;
4110 : struct folio *folio;
4111 :
4112 : /* don't round down the first address */
4113 : addr = i ? (*first & PMD_MASK) + i * PMD_SIZE : *first;
4114 :
4115 : pfn = get_pmd_pfn(pmd[i], vma, addr);
4116 : if (pfn == -1)
4117 : goto next;
4118 :
4119 : if (!pmd_trans_huge(pmd[i])) {
4120 : if (should_clear_pmd_young())
4121 : pmdp_test_and_clear_young(vma, addr, pmd + i);
4122 : goto next;
4123 : }
4124 :
4125 : folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap);
4126 : if (!folio)
4127 : goto next;
4128 :
4129 : if (!pmdp_test_and_clear_young(vma, addr, pmd + i))
4130 : goto next;
4131 :
4132 : walk->mm_stats[MM_LEAF_YOUNG]++;
4133 :
4134 : if (pmd_dirty(pmd[i]) && !folio_test_dirty(folio) &&
4135 : !(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
4136 : !folio_test_swapcache(folio)))
4137 : folio_mark_dirty(folio);
4138 :
4139 : old_gen = folio_update_gen(folio, new_gen);
4140 : if (old_gen >= 0 && old_gen != new_gen)
4141 : update_batch_size(walk, folio, old_gen, new_gen);
4142 : next:
4143 : i = i > MIN_LRU_BATCH ? 0 : find_next_bit(bitmap, MIN_LRU_BATCH, i) + 1;
4144 : } while (i <= MIN_LRU_BATCH);
4145 :
4146 : arch_leave_lazy_mmu_mode();
4147 : spin_unlock(ptl);
4148 : done:
4149 : *first = -1;
4150 : }
4151 : #else
4152 : static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area_struct *vma,
4153 : struct mm_walk *args, unsigned long *bitmap, unsigned long *first)
4154 : {
4155 : }
4156 : #endif
4157 :
4158 : static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
4159 : struct mm_walk *args)
4160 : {
4161 : int i;
4162 : pmd_t *pmd;
4163 : unsigned long next;
4164 : unsigned long addr;
4165 : struct vm_area_struct *vma;
4166 : DECLARE_BITMAP(bitmap, MIN_LRU_BATCH);
4167 : unsigned long first = -1;
4168 : struct lru_gen_mm_walk *walk = args->private;
4169 :
4170 : VM_WARN_ON_ONCE(pud_leaf(*pud));
4171 :
4172 : /*
4173 : * Finish an entire PMD in two passes: the first only reaches to PTE
4174 : * tables to avoid taking the PMD lock; the second, if necessary, takes
4175 : * the PMD lock to clear the accessed bit in PMD entries.
4176 : */
4177 : pmd = pmd_offset(pud, start & PUD_MASK);
4178 : restart:
4179 : /* walk_pte_range() may call get_next_vma() */
4180 : vma = args->vma;
4181 : for (i = pmd_index(start), addr = start; addr != end; i++, addr = next) {
4182 : pmd_t val = pmdp_get_lockless(pmd + i);
4183 :
4184 : next = pmd_addr_end(addr, end);
4185 :
4186 : if (!pmd_present(val) || is_huge_zero_pmd(val)) {
4187 : walk->mm_stats[MM_LEAF_TOTAL]++;
4188 : continue;
4189 : }
4190 :
4191 : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4192 : if (pmd_trans_huge(val)) {
4193 : unsigned long pfn = pmd_pfn(val);
4194 : struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
4195 :
4196 : walk->mm_stats[MM_LEAF_TOTAL]++;
4197 :
4198 : if (!pmd_young(val)) {
4199 : walk->mm_stats[MM_LEAF_OLD]++;
4200 : continue;
4201 : }
4202 :
4203 : /* try to avoid unnecessary memory loads */
4204 : if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
4205 : continue;
4206 :
4207 : walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first);
4208 : continue;
4209 : }
4210 : #endif
4211 : walk->mm_stats[MM_NONLEAF_TOTAL]++;
4212 :
4213 : if (should_clear_pmd_young()) {
4214 : if (!pmd_young(val))
4215 : continue;
4216 :
4217 : walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first);
4218 : }
4219 :
4220 : if (!walk->force_scan && !test_bloom_filter(walk->lruvec, walk->max_seq, pmd + i))
4221 : continue;
4222 :
4223 : walk->mm_stats[MM_NONLEAF_FOUND]++;
4224 :
4225 : if (!walk_pte_range(&val, addr, next, args))
4226 : continue;
4227 :
4228 : walk->mm_stats[MM_NONLEAF_ADDED]++;
4229 :
4230 : /* carry over to the next generation */
4231 : update_bloom_filter(walk->lruvec, walk->max_seq + 1, pmd + i);
4232 : }
4233 :
4234 : walk_pmd_range_locked(pud, -1, vma, args, bitmap, &first);
4235 :
4236 : if (i < PTRS_PER_PMD && get_next_vma(PUD_MASK, PMD_SIZE, args, &start, &end))
4237 : goto restart;
4238 : }
4239 :
4240 : static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end,
4241 : struct mm_walk *args)
4242 : {
4243 : int i;
4244 : pud_t *pud;
4245 : unsigned long addr;
4246 : unsigned long next;
4247 : struct lru_gen_mm_walk *walk = args->private;
4248 :
4249 : VM_WARN_ON_ONCE(p4d_leaf(*p4d));
4250 :
4251 : pud = pud_offset(p4d, start & P4D_MASK);
4252 : restart:
4253 : for (i = pud_index(start), addr = start; addr != end; i++, addr = next) {
4254 : pud_t val = READ_ONCE(pud[i]);
4255 :
4256 : next = pud_addr_end(addr, end);
4257 :
4258 : if (!pud_present(val) || WARN_ON_ONCE(pud_leaf(val)))
4259 : continue;
4260 :
4261 : walk_pmd_range(&val, addr, next, args);
4262 :
4263 : if (need_resched() || walk->batched >= MAX_LRU_BATCH) {
4264 : end = (addr | ~PUD_MASK) + 1;
4265 : goto done;
4266 : }
4267 : }
4268 :
4269 : if (i < PTRS_PER_PUD && get_next_vma(P4D_MASK, PUD_SIZE, args, &start, &end))
4270 : goto restart;
4271 :
4272 : end = round_up(end, P4D_SIZE);
4273 : done:
4274 : if (!end || !args->vma)
4275 : return 1;
4276 :
4277 : walk->next_addr = max(end, args->vma->vm_start);
4278 :
4279 : return -EAGAIN;
4280 : }
4281 :
4282 : static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_mm_walk *walk)
4283 : {
4284 : static const struct mm_walk_ops mm_walk_ops = {
4285 : .test_walk = should_skip_vma,
4286 : .p4d_entry = walk_pud_range,
4287 : };
4288 :
4289 : int err;
4290 : struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4291 :
4292 : walk->next_addr = FIRST_USER_ADDRESS;
4293 :
4294 : do {
4295 : DEFINE_MAX_SEQ(lruvec);
4296 :
4297 : err = -EBUSY;
4298 :
4299 : /* another thread might have called inc_max_seq() */
4300 : if (walk->max_seq != max_seq)
4301 : break;
4302 :
4303 : /* folio_update_gen() requires stable folio_memcg() */
4304 : if (!mem_cgroup_trylock_pages(memcg))
4305 : break;
4306 :
4307 : /* the caller might be holding the lock for write */
4308 : if (mmap_read_trylock(mm)) {
4309 : err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk);
4310 :
4311 : mmap_read_unlock(mm);
4312 : }
4313 :
4314 : mem_cgroup_unlock_pages();
4315 :
4316 : if (walk->batched) {
4317 : spin_lock_irq(&lruvec->lru_lock);
4318 : reset_batch_size(lruvec, walk);
4319 : spin_unlock_irq(&lruvec->lru_lock);
4320 : }
4321 :
4322 : cond_resched();
4323 : } while (err == -EAGAIN);
4324 : }
4325 :
4326 : static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat, bool force_alloc)
4327 : {
4328 : struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
4329 :
4330 : if (pgdat && current_is_kswapd()) {
4331 : VM_WARN_ON_ONCE(walk);
4332 :
4333 : walk = &pgdat->mm_walk;
4334 : } else if (!walk && force_alloc) {
4335 : VM_WARN_ON_ONCE(current_is_kswapd());
4336 :
4337 : walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
4338 : }
4339 :
4340 : current->reclaim_state->mm_walk = walk;
4341 :
4342 : return walk;
4343 : }
4344 :
4345 : static void clear_mm_walk(void)
4346 : {
4347 : struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
4348 :
4349 : VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages)));
4350 : VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats)));
4351 :
4352 : current->reclaim_state->mm_walk = NULL;
4353 :
4354 : if (!current_is_kswapd())
4355 : kfree(walk);
4356 : }
4357 :
4358 : static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap)
4359 : {
4360 : int zone;
4361 : int remaining = MAX_LRU_BATCH;
4362 : struct lru_gen_folio *lrugen = &lruvec->lrugen;
4363 : int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
4364 :
4365 : if (type == LRU_GEN_ANON && !can_swap)
4366 : goto done;
4367 :
4368 : /* prevent cold/hot inversion if force_scan is true */
4369 : for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4370 : struct list_head *head = &lrugen->folios[old_gen][type][zone];
4371 :
4372 : while (!list_empty(head)) {
4373 : struct folio *folio = lru_to_folio(head);
4374 :
4375 : VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
4376 : VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
4377 : VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
4378 : VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
4379 :
4380 : new_gen = folio_inc_gen(lruvec, folio, false);
4381 : list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]);
4382 :
4383 : if (!--remaining)
4384 : return false;
4385 : }
4386 : }
4387 : done:
4388 : reset_ctrl_pos(lruvec, type, true);
4389 : WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1);
4390 :
4391 : return true;
4392 : }
4393 :
4394 : static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap)
4395 : {
4396 : int gen, type, zone;
4397 : bool success = false;
4398 : struct lru_gen_folio *lrugen = &lruvec->lrugen;
4399 : DEFINE_MIN_SEQ(lruvec);
4400 :
4401 : VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
4402 :
4403 : /* find the oldest populated generation */
4404 : for (type = !can_swap; type < ANON_AND_FILE; type++) {
4405 : while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) {
4406 : gen = lru_gen_from_seq(min_seq[type]);
4407 :
4408 : for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4409 : if (!list_empty(&lrugen->folios[gen][type][zone]))
4410 : goto next;
4411 : }
4412 :
4413 : min_seq[type]++;
4414 : }
4415 : next:
4416 : ;
4417 : }
4418 :
4419 : /* see the comment on lru_gen_folio */
4420 : if (can_swap) {
4421 : min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]);
4422 : min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]);
4423 : }
4424 :
4425 : for (type = !can_swap; type < ANON_AND_FILE; type++) {
4426 : if (min_seq[type] == lrugen->min_seq[type])
4427 : continue;
4428 :
4429 : reset_ctrl_pos(lruvec, type, true);
4430 : WRITE_ONCE(lrugen->min_seq[type], min_seq[type]);
4431 : success = true;
4432 : }
4433 :
4434 : return success;
4435 : }
4436 :
4437 : static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan)
4438 : {
4439 : int prev, next;
4440 : int type, zone;
4441 : struct lru_gen_folio *lrugen = &lruvec->lrugen;
4442 :
4443 : spin_lock_irq(&lruvec->lru_lock);
4444 :
4445 : VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
4446 :
4447 : for (type = ANON_AND_FILE - 1; type >= 0; type--) {
4448 : if (get_nr_gens(lruvec, type) != MAX_NR_GENS)
4449 : continue;
4450 :
4451 : VM_WARN_ON_ONCE(!force_scan && (type == LRU_GEN_FILE || can_swap));
4452 :
4453 : while (!inc_min_seq(lruvec, type, can_swap)) {
4454 : spin_unlock_irq(&lruvec->lru_lock);
4455 : cond_resched();
4456 : spin_lock_irq(&lruvec->lru_lock);
4457 : }
4458 : }
4459 :
4460 : /*
4461 : * Update the active/inactive LRU sizes for compatibility. Both sides of
4462 : * the current max_seq need to be covered, since max_seq+1 can overlap
4463 : * with min_seq[LRU_GEN_ANON] if swapping is constrained. And if they do
4464 : * overlap, cold/hot inversion happens.
4465 : */
4466 : prev = lru_gen_from_seq(lrugen->max_seq - 1);
4467 : next = lru_gen_from_seq(lrugen->max_seq + 1);
4468 :
4469 : for (type = 0; type < ANON_AND_FILE; type++) {
4470 : for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4471 : enum lru_list lru = type * LRU_INACTIVE_FILE;
4472 : long delta = lrugen->nr_pages[prev][type][zone] -
4473 : lrugen->nr_pages[next][type][zone];
4474 :
4475 : if (!delta)
4476 : continue;
4477 :
4478 : __update_lru_size(lruvec, lru, zone, delta);
4479 : __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta);
4480 : }
4481 : }
4482 :
4483 : for (type = 0; type < ANON_AND_FILE; type++)
4484 : reset_ctrl_pos(lruvec, type, false);
4485 :
4486 : WRITE_ONCE(lrugen->timestamps[next], jiffies);
4487 : /* make sure preceding modifications appear */
4488 : smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1);
4489 :
4490 : spin_unlock_irq(&lruvec->lru_lock);
4491 : }
4492 :
4493 : static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
4494 : struct scan_control *sc, bool can_swap, bool force_scan)
4495 : {
4496 : bool success;
4497 : struct lru_gen_mm_walk *walk;
4498 : struct mm_struct *mm = NULL;
4499 : struct lru_gen_folio *lrugen = &lruvec->lrugen;
4500 :
4501 : VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq));
4502 :
4503 : /* see the comment in iterate_mm_list() */
4504 : if (max_seq <= READ_ONCE(lruvec->mm_state.seq)) {
4505 : success = false;
4506 : goto done;
4507 : }
4508 :
4509 : /*
4510 : * If the hardware doesn't automatically set the accessed bit, fallback
4511 : * to lru_gen_look_around(), which only clears the accessed bit in a
4512 : * handful of PTEs. Spreading the work out over a period of time usually
4513 : * is less efficient, but it avoids bursty page faults.
4514 : */
4515 : if (!should_walk_mmu()) {
4516 : success = iterate_mm_list_nowalk(lruvec, max_seq);
4517 : goto done;
4518 : }
4519 :
4520 : walk = set_mm_walk(NULL, true);
4521 : if (!walk) {
4522 : success = iterate_mm_list_nowalk(lruvec, max_seq);
4523 : goto done;
4524 : }
4525 :
4526 : walk->lruvec = lruvec;
4527 : walk->max_seq = max_seq;
4528 : walk->can_swap = can_swap;
4529 : walk->force_scan = force_scan;
4530 :
4531 : do {
4532 : success = iterate_mm_list(lruvec, walk, &mm);
4533 : if (mm)
4534 : walk_mm(lruvec, mm, walk);
4535 : } while (mm);
4536 : done:
4537 : if (success)
4538 : inc_max_seq(lruvec, can_swap, force_scan);
4539 :
4540 : return success;
4541 : }
4542 :
4543 : /******************************************************************************
4544 : * working set protection
4545 : ******************************************************************************/
4546 :
4547 : static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
4548 : {
4549 : int gen, type, zone;
4550 : unsigned long total = 0;
4551 : bool can_swap = get_swappiness(lruvec, sc);
4552 : struct lru_gen_folio *lrugen = &lruvec->lrugen;
4553 : struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4554 : DEFINE_MAX_SEQ(lruvec);
4555 : DEFINE_MIN_SEQ(lruvec);
4556 :
4557 : for (type = !can_swap; type < ANON_AND_FILE; type++) {
4558 : unsigned long seq;
4559 :
4560 : for (seq = min_seq[type]; seq <= max_seq; seq++) {
4561 : gen = lru_gen_from_seq(seq);
4562 :
4563 : for (zone = 0; zone < MAX_NR_ZONES; zone++)
4564 : total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
4565 : }
4566 : }
4567 :
4568 : /* whether the size is big enough to be helpful */
4569 : return mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
4570 : }
4571 :
4572 : static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc,
4573 : unsigned long min_ttl)
4574 : {
4575 : int gen;
4576 : unsigned long birth;
4577 : struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4578 : DEFINE_MIN_SEQ(lruvec);
4579 :
4580 : /* see the comment on lru_gen_folio */
4581 : gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]);
4582 : birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
4583 :
4584 : if (time_is_after_jiffies(birth + min_ttl))
4585 : return false;
4586 :
4587 : if (!lruvec_is_sizable(lruvec, sc))
4588 : return false;
4589 :
4590 : mem_cgroup_calculate_protection(NULL, memcg);
4591 :
4592 : return !mem_cgroup_below_min(NULL, memcg);
4593 : }
4594 :
4595 : /* to protect the working set of the last N jiffies */
4596 : static unsigned long lru_gen_min_ttl __read_mostly;
4597 :
4598 : static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
4599 : {
4600 : struct mem_cgroup *memcg;
4601 : unsigned long min_ttl = READ_ONCE(lru_gen_min_ttl);
4602 :
4603 : VM_WARN_ON_ONCE(!current_is_kswapd());
4604 :
4605 : /* check the order to exclude compaction-induced reclaim */
4606 : if (!min_ttl || sc->order || sc->priority == DEF_PRIORITY)
4607 : return;
4608 :
4609 : memcg = mem_cgroup_iter(NULL, NULL, NULL);
4610 : do {
4611 : struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
4612 :
4613 : if (lruvec_is_reclaimable(lruvec, sc, min_ttl)) {
4614 : mem_cgroup_iter_break(NULL, memcg);
4615 : return;
4616 : }
4617 :
4618 : cond_resched();
4619 : } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
4620 :
4621 : /*
4622 : * The main goal is to OOM kill if every generation from all memcgs is
4623 : * younger than min_ttl. However, another possibility is all memcgs are
4624 : * either too small or below min.
4625 : */
4626 : if (mutex_trylock(&oom_lock)) {
4627 : struct oom_control oc = {
4628 : .gfp_mask = sc->gfp_mask,
4629 : };
4630 :
4631 : out_of_memory(&oc);
4632 :
4633 : mutex_unlock(&oom_lock);
4634 : }
4635 : }
4636 :
4637 : /******************************************************************************
4638 : * rmap/PT walk feedback
4639 : ******************************************************************************/
4640 :
4641 : /*
4642 : * This function exploits spatial locality when shrink_folio_list() walks the
4643 : * rmap. It scans the adjacent PTEs of a young PTE and promotes hot pages. If
4644 : * the scan was done cacheline efficiently, it adds the PMD entry pointing to
4645 : * the PTE table to the Bloom filter. This forms a feedback loop between the
4646 : * eviction and the aging.
4647 : */
4648 : void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
4649 : {
4650 : int i;
4651 : unsigned long start;
4652 : unsigned long end;
4653 : struct lru_gen_mm_walk *walk;
4654 : int young = 0;
4655 : pte_t *pte = pvmw->pte;
4656 : unsigned long addr = pvmw->address;
4657 : struct folio *folio = pfn_folio(pvmw->pfn);
4658 : struct mem_cgroup *memcg = folio_memcg(folio);
4659 : struct pglist_data *pgdat = folio_pgdat(folio);
4660 : struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
4661 : DEFINE_MAX_SEQ(lruvec);
4662 : int old_gen, new_gen = lru_gen_from_seq(max_seq);
4663 :
4664 : lockdep_assert_held(pvmw->ptl);
4665 : VM_WARN_ON_ONCE_FOLIO(folio_test_lru(folio), folio);
4666 :
4667 : if (spin_is_contended(pvmw->ptl))
4668 : return;
4669 :
4670 : /* avoid taking the LRU lock under the PTL when possible */
4671 : walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL;
4672 :
4673 : start = max(addr & PMD_MASK, pvmw->vma->vm_start);
4674 : end = min(addr | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1;
4675 :
4676 : if (end - start > MIN_LRU_BATCH * PAGE_SIZE) {
4677 : if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2)
4678 : end = start + MIN_LRU_BATCH * PAGE_SIZE;
4679 : else if (end - addr < MIN_LRU_BATCH * PAGE_SIZE / 2)
4680 : start = end - MIN_LRU_BATCH * PAGE_SIZE;
4681 : else {
4682 : start = addr - MIN_LRU_BATCH * PAGE_SIZE / 2;
4683 : end = addr + MIN_LRU_BATCH * PAGE_SIZE / 2;
4684 : }
4685 : }
4686 :
4687 : /* folio_update_gen() requires stable folio_memcg() */
4688 : if (!mem_cgroup_trylock_pages(memcg))
4689 : return;
4690 :
4691 : arch_enter_lazy_mmu_mode();
4692 :
4693 : pte -= (addr - start) / PAGE_SIZE;
4694 :
4695 : for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) {
4696 : unsigned long pfn;
4697 : pte_t ptent = ptep_get(pte + i);
4698 :
4699 : pfn = get_pte_pfn(ptent, pvmw->vma, addr);
4700 : if (pfn == -1)
4701 : continue;
4702 :
4703 : if (!pte_young(ptent))
4704 : continue;
4705 :
4706 : folio = get_pfn_folio(pfn, memcg, pgdat, !walk || walk->can_swap);
4707 : if (!folio)
4708 : continue;
4709 :
4710 : if (!ptep_test_and_clear_young(pvmw->vma, addr, pte + i))
4711 : VM_WARN_ON_ONCE(true);
4712 :
4713 : young++;
4714 :
4715 : if (pte_dirty(ptent) && !folio_test_dirty(folio) &&
4716 : !(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
4717 : !folio_test_swapcache(folio)))
4718 : folio_mark_dirty(folio);
4719 :
4720 : if (walk) {
4721 : old_gen = folio_update_gen(folio, new_gen);
4722 : if (old_gen >= 0 && old_gen != new_gen)
4723 : update_batch_size(walk, folio, old_gen, new_gen);
4724 :
4725 : continue;
4726 : }
4727 :
4728 : old_gen = folio_lru_gen(folio);
4729 : if (old_gen < 0)
4730 : folio_set_referenced(folio);
4731 : else if (old_gen != new_gen)
4732 : folio_activate(folio);
4733 : }
4734 :
4735 : arch_leave_lazy_mmu_mode();
4736 : mem_cgroup_unlock_pages();
4737 :
4738 : /* feedback from rmap walkers to page table walkers */
4739 : if (suitable_to_scan(i, young))
4740 : update_bloom_filter(lruvec, max_seq, pvmw->pmd);
4741 : }
4742 :
4743 : /******************************************************************************
4744 : * memcg LRU
4745 : ******************************************************************************/
4746 :
4747 : /* see the comment on MEMCG_NR_GENS */
4748 : enum {
4749 : MEMCG_LRU_NOP,
4750 : MEMCG_LRU_HEAD,
4751 : MEMCG_LRU_TAIL,
4752 : MEMCG_LRU_OLD,
4753 : MEMCG_LRU_YOUNG,
4754 : };
4755 :
4756 : #ifdef CONFIG_MEMCG
4757 :
4758 : static int lru_gen_memcg_seg(struct lruvec *lruvec)
4759 : {
4760 : return READ_ONCE(lruvec->lrugen.seg);
4761 : }
4762 :
4763 : static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
4764 : {
4765 : int seg;
4766 : int old, new;
4767 : unsigned long flags;
4768 : int bin = get_random_u32_below(MEMCG_NR_BINS);
4769 : struct pglist_data *pgdat = lruvec_pgdat(lruvec);
4770 :
4771 : spin_lock_irqsave(&pgdat->memcg_lru.lock, flags);
4772 :
4773 : VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list));
4774 :
4775 : seg = 0;
4776 : new = old = lruvec->lrugen.gen;
4777 :
4778 : /* see the comment on MEMCG_NR_GENS */
4779 : if (op == MEMCG_LRU_HEAD)
4780 : seg = MEMCG_LRU_HEAD;
4781 : else if (op == MEMCG_LRU_TAIL)
4782 : seg = MEMCG_LRU_TAIL;
4783 : else if (op == MEMCG_LRU_OLD)
4784 : new = get_memcg_gen(pgdat->memcg_lru.seq);
4785 : else if (op == MEMCG_LRU_YOUNG)
4786 : new = get_memcg_gen(pgdat->memcg_lru.seq + 1);
4787 : else
4788 : VM_WARN_ON_ONCE(true);
4789 :
4790 : hlist_nulls_del_rcu(&lruvec->lrugen.list);
4791 :
4792 : if (op == MEMCG_LRU_HEAD || op == MEMCG_LRU_OLD)
4793 : hlist_nulls_add_head_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]);
4794 : else
4795 : hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]);
4796 :
4797 : pgdat->memcg_lru.nr_memcgs[old]--;
4798 : pgdat->memcg_lru.nr_memcgs[new]++;
4799 :
4800 : lruvec->lrugen.gen = new;
4801 : WRITE_ONCE(lruvec->lrugen.seg, seg);
4802 :
4803 : if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq))
4804 : WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
4805 :
4806 : spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags);
4807 : }
4808 :
4809 : void lru_gen_online_memcg(struct mem_cgroup *memcg)
4810 : {
4811 : int gen;
4812 : int nid;
4813 : int bin = get_random_u32_below(MEMCG_NR_BINS);
4814 :
4815 : for_each_node(nid) {
4816 : struct pglist_data *pgdat = NODE_DATA(nid);
4817 : struct lruvec *lruvec = get_lruvec(memcg, nid);
4818 :
4819 : spin_lock_irq(&pgdat->memcg_lru.lock);
4820 :
4821 : VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list));
4822 :
4823 : gen = get_memcg_gen(pgdat->memcg_lru.seq);
4824 :
4825 : hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]);
4826 : pgdat->memcg_lru.nr_memcgs[gen]++;
4827 :
4828 : lruvec->lrugen.gen = gen;
4829 :
4830 : spin_unlock_irq(&pgdat->memcg_lru.lock);
4831 : }
4832 : }
4833 :
4834 : void lru_gen_offline_memcg(struct mem_cgroup *memcg)
4835 : {
4836 : int nid;
4837 :
4838 : for_each_node(nid) {
4839 : struct lruvec *lruvec = get_lruvec(memcg, nid);
4840 :
4841 : lru_gen_rotate_memcg(lruvec, MEMCG_LRU_OLD);
4842 : }
4843 : }
4844 :
4845 : void lru_gen_release_memcg(struct mem_cgroup *memcg)
4846 : {
4847 : int gen;
4848 : int nid;
4849 :
4850 : for_each_node(nid) {
4851 : struct pglist_data *pgdat = NODE_DATA(nid);
4852 : struct lruvec *lruvec = get_lruvec(memcg, nid);
4853 :
4854 : spin_lock_irq(&pgdat->memcg_lru.lock);
4855 :
4856 : VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list));
4857 :
4858 : gen = lruvec->lrugen.gen;
4859 :
4860 : hlist_nulls_del_rcu(&lruvec->lrugen.list);
4861 : pgdat->memcg_lru.nr_memcgs[gen]--;
4862 :
4863 : if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq))
4864 : WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
4865 :
4866 : spin_unlock_irq(&pgdat->memcg_lru.lock);
4867 : }
4868 : }
4869 :
4870 : void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
4871 : {
4872 : struct lruvec *lruvec = get_lruvec(memcg, nid);
4873 :
4874 : /* see the comment on MEMCG_NR_GENS */
4875 : if (lru_gen_memcg_seg(lruvec) != MEMCG_LRU_HEAD)
4876 : lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD);
4877 : }
4878 :
4879 : #else /* !CONFIG_MEMCG */
4880 :
4881 : static int lru_gen_memcg_seg(struct lruvec *lruvec)
4882 : {
4883 : return 0;
4884 : }
4885 :
4886 : #endif
4887 :
4888 : /******************************************************************************
4889 : * the eviction
4890 : ******************************************************************************/
4891 :
4892 : static bool sort_folio(struct lruvec *lruvec, struct folio *folio, int tier_idx)
4893 : {
4894 : bool success;
4895 : int gen = folio_lru_gen(folio);
4896 : int type = folio_is_file_lru(folio);
4897 : int zone = folio_zonenum(folio);
4898 : int delta = folio_nr_pages(folio);
4899 : int refs = folio_lru_refs(folio);
4900 : int tier = lru_tier_from_refs(refs);
4901 : struct lru_gen_folio *lrugen = &lruvec->lrugen;
4902 :
4903 : VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio);
4904 :
4905 : /* unevictable */
4906 : if (!folio_evictable(folio)) {
4907 : success = lru_gen_del_folio(lruvec, folio, true);
4908 : VM_WARN_ON_ONCE_FOLIO(!success, folio);
4909 : folio_set_unevictable(folio);
4910 : lruvec_add_folio(lruvec, folio);
4911 : __count_vm_events(UNEVICTABLE_PGCULLED, delta);
4912 : return true;
4913 : }
4914 :
4915 : /* dirty lazyfree */
4916 : if (type == LRU_GEN_FILE && folio_test_anon(folio) && folio_test_dirty(folio)) {
4917 : success = lru_gen_del_folio(lruvec, folio, true);
4918 : VM_WARN_ON_ONCE_FOLIO(!success, folio);
4919 : folio_set_swapbacked(folio);
4920 : lruvec_add_folio_tail(lruvec, folio);
4921 : return true;
4922 : }
4923 :
4924 : /* promoted */
4925 : if (gen != lru_gen_from_seq(lrugen->min_seq[type])) {
4926 : list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
4927 : return true;
4928 : }
4929 :
4930 : /* protected */
4931 : if (tier > tier_idx) {
4932 : int hist = lru_hist_from_seq(lrugen->min_seq[type]);
4933 :
4934 : gen = folio_inc_gen(lruvec, folio, false);
4935 : list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
4936 :
4937 : WRITE_ONCE(lrugen->protected[hist][type][tier - 1],
4938 : lrugen->protected[hist][type][tier - 1] + delta);
4939 : return true;
4940 : }
4941 :
4942 : /* waiting for writeback */
4943 : if (folio_test_locked(folio) || folio_test_writeback(folio) ||
4944 : (type == LRU_GEN_FILE && folio_test_dirty(folio))) {
4945 : gen = folio_inc_gen(lruvec, folio, true);
4946 : list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
4947 : return true;
4948 : }
4949 :
4950 : return false;
4951 : }
4952 :
4953 : static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc)
4954 : {
4955 : bool success;
4956 :
4957 : /* swapping inhibited */
4958 : if (!(sc->gfp_mask & __GFP_IO) &&
4959 : (folio_test_dirty(folio) ||
4960 : (folio_test_anon(folio) && !folio_test_swapcache(folio))))
4961 : return false;
4962 :
4963 : /* raced with release_pages() */
4964 : if (!folio_try_get(folio))
4965 : return false;
4966 :
4967 : /* raced with another isolation */
4968 : if (!folio_test_clear_lru(folio)) {
4969 : folio_put(folio);
4970 : return false;
4971 : }
4972 :
4973 : /* see the comment on MAX_NR_TIERS */
4974 : if (!folio_test_referenced(folio))
4975 : set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0);
4976 :
4977 : /* for shrink_folio_list() */
4978 : folio_clear_reclaim(folio);
4979 : folio_clear_referenced(folio);
4980 :
4981 : success = lru_gen_del_folio(lruvec, folio, true);
4982 : VM_WARN_ON_ONCE_FOLIO(!success, folio);
4983 :
4984 : return true;
4985 : }
4986 :
4987 : static int scan_folios(struct lruvec *lruvec, struct scan_control *sc,
4988 : int type, int tier, struct list_head *list)
4989 : {
4990 : int gen, zone;
4991 : enum vm_event_item item;
4992 : int sorted = 0;
4993 : int scanned = 0;
4994 : int isolated = 0;
4995 : int remaining = MAX_LRU_BATCH;
4996 : struct lru_gen_folio *lrugen = &lruvec->lrugen;
4997 : struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4998 :
4999 : VM_WARN_ON_ONCE(!list_empty(list));
5000 :
5001 : if (get_nr_gens(lruvec, type) == MIN_NR_GENS)
5002 : return 0;
5003 :
5004 : gen = lru_gen_from_seq(lrugen->min_seq[type]);
5005 :
5006 : for (zone = sc->reclaim_idx; zone >= 0; zone--) {
5007 : LIST_HEAD(moved);
5008 : int skipped = 0;
5009 : struct list_head *head = &lrugen->folios[gen][type][zone];
5010 :
5011 : while (!list_empty(head)) {
5012 : struct folio *folio = lru_to_folio(head);
5013 : int delta = folio_nr_pages(folio);
5014 :
5015 : VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
5016 : VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
5017 : VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
5018 : VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
5019 :
5020 : scanned += delta;
5021 :
5022 : if (sort_folio(lruvec, folio, tier))
5023 : sorted += delta;
5024 : else if (isolate_folio(lruvec, folio, sc)) {
5025 : list_add(&folio->lru, list);
5026 : isolated += delta;
5027 : } else {
5028 : list_move(&folio->lru, &moved);
5029 : skipped += delta;
5030 : }
5031 :
5032 : if (!--remaining || max(isolated, skipped) >= MIN_LRU_BATCH)
5033 : break;
5034 : }
5035 :
5036 : if (skipped) {
5037 : list_splice(&moved, head);
5038 : __count_zid_vm_events(PGSCAN_SKIP, zone, skipped);
5039 : }
5040 :
5041 : if (!remaining || isolated >= MIN_LRU_BATCH)
5042 : break;
5043 : }
5044 :
5045 : item = PGSCAN_KSWAPD + reclaimer_offset();
5046 : if (!cgroup_reclaim(sc)) {
5047 : __count_vm_events(item, isolated);
5048 : __count_vm_events(PGREFILL, sorted);
5049 : }
5050 : __count_memcg_events(memcg, item, isolated);
5051 : __count_memcg_events(memcg, PGREFILL, sorted);
5052 : __count_vm_events(PGSCAN_ANON + type, isolated);
5053 :
5054 : /*
5055 : * There might not be eligible folios due to reclaim_idx. Check the
5056 : * remaining to prevent livelock if it's not making progress.
5057 : */
5058 : return isolated || !remaining ? scanned : 0;
5059 : }
5060 :
5061 : static int get_tier_idx(struct lruvec *lruvec, int type)
5062 : {
5063 : int tier;
5064 : struct ctrl_pos sp, pv;
5065 :
5066 : /*
5067 : * To leave a margin for fluctuations, use a larger gain factor (1:2).
5068 : * This value is chosen because any other tier would have at least twice
5069 : * as many refaults as the first tier.
5070 : */
5071 : read_ctrl_pos(lruvec, type, 0, 1, &sp);
5072 : for (tier = 1; tier < MAX_NR_TIERS; tier++) {
5073 : read_ctrl_pos(lruvec, type, tier, 2, &pv);
5074 : if (!positive_ctrl_err(&sp, &pv))
5075 : break;
5076 : }
5077 :
5078 : return tier - 1;
5079 : }
5080 :
5081 : static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_idx)
5082 : {
5083 : int type, tier;
5084 : struct ctrl_pos sp, pv;
5085 : int gain[ANON_AND_FILE] = { swappiness, 200 - swappiness };
5086 :
5087 : /*
5088 : * Compare the first tier of anon with that of file to determine which
5089 : * type to scan. Also need to compare other tiers of the selected type
5090 : * with the first tier of the other type to determine the last tier (of
5091 : * the selected type) to evict.
5092 : */
5093 : read_ctrl_pos(lruvec, LRU_GEN_ANON, 0, gain[LRU_GEN_ANON], &sp);
5094 : read_ctrl_pos(lruvec, LRU_GEN_FILE, 0, gain[LRU_GEN_FILE], &pv);
5095 : type = positive_ctrl_err(&sp, &pv);
5096 :
5097 : read_ctrl_pos(lruvec, !type, 0, gain[!type], &sp);
5098 : for (tier = 1; tier < MAX_NR_TIERS; tier++) {
5099 : read_ctrl_pos(lruvec, type, tier, gain[type], &pv);
5100 : if (!positive_ctrl_err(&sp, &pv))
5101 : break;
5102 : }
5103 :
5104 : *tier_idx = tier - 1;
5105 :
5106 : return type;
5107 : }
5108 :
5109 : static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness,
5110 : int *type_scanned, struct list_head *list)
5111 : {
5112 : int i;
5113 : int type;
5114 : int scanned;
5115 : int tier = -1;
5116 : DEFINE_MIN_SEQ(lruvec);
5117 :
5118 : /*
5119 : * Try to make the obvious choice first. When anon and file are both
5120 : * available from the same generation, interpret swappiness 1 as file
5121 : * first and 200 as anon first.
5122 : */
5123 : if (!swappiness)
5124 : type = LRU_GEN_FILE;
5125 : else if (min_seq[LRU_GEN_ANON] < min_seq[LRU_GEN_FILE])
5126 : type = LRU_GEN_ANON;
5127 : else if (swappiness == 1)
5128 : type = LRU_GEN_FILE;
5129 : else if (swappiness == 200)
5130 : type = LRU_GEN_ANON;
5131 : else
5132 : type = get_type_to_scan(lruvec, swappiness, &tier);
5133 :
5134 : for (i = !swappiness; i < ANON_AND_FILE; i++) {
5135 : if (tier < 0)
5136 : tier = get_tier_idx(lruvec, type);
5137 :
5138 : scanned = scan_folios(lruvec, sc, type, tier, list);
5139 : if (scanned)
5140 : break;
5141 :
5142 : type = !type;
5143 : tier = -1;
5144 : }
5145 :
5146 : *type_scanned = type;
5147 :
5148 : return scanned;
5149 : }
5150 :
5151 : static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
5152 : {
5153 : int type;
5154 : int scanned;
5155 : int reclaimed;
5156 : LIST_HEAD(list);
5157 : LIST_HEAD(clean);
5158 : struct folio *folio;
5159 : struct folio *next;
5160 : enum vm_event_item item;
5161 : struct reclaim_stat stat;
5162 : struct lru_gen_mm_walk *walk;
5163 : bool skip_retry = false;
5164 : struct mem_cgroup *memcg = lruvec_memcg(lruvec);
5165 : struct pglist_data *pgdat = lruvec_pgdat(lruvec);
5166 :
5167 : spin_lock_irq(&lruvec->lru_lock);
5168 :
5169 : scanned = isolate_folios(lruvec, sc, swappiness, &type, &list);
5170 :
5171 : scanned += try_to_inc_min_seq(lruvec, swappiness);
5172 :
5173 : if (get_nr_gens(lruvec, !swappiness) == MIN_NR_GENS)
5174 : scanned = 0;
5175 :
5176 : spin_unlock_irq(&lruvec->lru_lock);
5177 :
5178 : if (list_empty(&list))
5179 : return scanned;
5180 : retry:
5181 : reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false);
5182 : sc->nr_reclaimed += reclaimed;
5183 :
5184 : list_for_each_entry_safe_reverse(folio, next, &list, lru) {
5185 : if (!folio_evictable(folio)) {
5186 : list_del(&folio->lru);
5187 : folio_putback_lru(folio);
5188 : continue;
5189 : }
5190 :
5191 : if (folio_test_reclaim(folio) &&
5192 : (folio_test_dirty(folio) || folio_test_writeback(folio))) {
5193 : /* restore LRU_REFS_FLAGS cleared by isolate_folio() */
5194 : if (folio_test_workingset(folio))
5195 : folio_set_referenced(folio);
5196 : continue;
5197 : }
5198 :
5199 : if (skip_retry || folio_test_active(folio) || folio_test_referenced(folio) ||
5200 : folio_mapped(folio) || folio_test_locked(folio) ||
5201 : folio_test_dirty(folio) || folio_test_writeback(folio)) {
5202 : /* don't add rejected folios to the oldest generation */
5203 : set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS,
5204 : BIT(PG_active));
5205 : continue;
5206 : }
5207 :
5208 : /* retry folios that may have missed folio_rotate_reclaimable() */
5209 : list_move(&folio->lru, &clean);
5210 : sc->nr_scanned -= folio_nr_pages(folio);
5211 : }
5212 :
5213 : spin_lock_irq(&lruvec->lru_lock);
5214 :
5215 : move_folios_to_lru(lruvec, &list);
5216 :
5217 : walk = current->reclaim_state->mm_walk;
5218 : if (walk && walk->batched)
5219 : reset_batch_size(lruvec, walk);
5220 :
5221 : item = PGSTEAL_KSWAPD + reclaimer_offset();
5222 : if (!cgroup_reclaim(sc))
5223 : __count_vm_events(item, reclaimed);
5224 : __count_memcg_events(memcg, item, reclaimed);
5225 : __count_vm_events(PGSTEAL_ANON + type, reclaimed);
5226 :
5227 : spin_unlock_irq(&lruvec->lru_lock);
5228 :
5229 : mem_cgroup_uncharge_list(&list);
5230 : free_unref_page_list(&list);
5231 :
5232 : INIT_LIST_HEAD(&list);
5233 : list_splice_init(&clean, &list);
5234 :
5235 : if (!list_empty(&list)) {
5236 : skip_retry = true;
5237 : goto retry;
5238 : }
5239 :
5240 : return scanned;
5241 : }
5242 :
5243 : static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
5244 : struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
5245 : {
5246 : int gen, type, zone;
5247 : unsigned long old = 0;
5248 : unsigned long young = 0;
5249 : unsigned long total = 0;
5250 : struct lru_gen_folio *lrugen = &lruvec->lrugen;
5251 : struct mem_cgroup *memcg = lruvec_memcg(lruvec);
5252 : DEFINE_MIN_SEQ(lruvec);
5253 :
5254 : /* whether this lruvec is completely out of cold folios */
5255 : if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) {
5256 : *nr_to_scan = 0;
5257 : return true;
5258 : }
5259 :
5260 : for (type = !can_swap; type < ANON_AND_FILE; type++) {
5261 : unsigned long seq;
5262 :
5263 : for (seq = min_seq[type]; seq <= max_seq; seq++) {
5264 : unsigned long size = 0;
5265 :
5266 : gen = lru_gen_from_seq(seq);
5267 :
5268 : for (zone = 0; zone < MAX_NR_ZONES; zone++)
5269 : size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
5270 :
5271 : total += size;
5272 : if (seq == max_seq)
5273 : young += size;
5274 : else if (seq + MIN_NR_GENS == max_seq)
5275 : old += size;
5276 : }
5277 : }
5278 :
5279 : /* try to scrape all its memory if this memcg was deleted */
5280 : *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
5281 :
5282 : /*
5283 : * The aging tries to be lazy to reduce the overhead, while the eviction
5284 : * stalls when the number of generations reaches MIN_NR_GENS. Hence, the
5285 : * ideal number of generations is MIN_NR_GENS+1.
5286 : */
5287 : if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
5288 : return false;
5289 :
5290 : /*
5291 : * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1)
5292 : * of the total number of pages for each generation. A reasonable range
5293 : * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The
5294 : * aging cares about the upper bound of hot pages, while the eviction
5295 : * cares about the lower bound of cold pages.
5296 : */
5297 : if (young * MIN_NR_GENS > total)
5298 : return true;
5299 : if (old * (MIN_NR_GENS + 2) < total)
5300 : return true;
5301 :
5302 : return false;
5303 : }
5304 :
5305 : /*
5306 : * For future optimizations:
5307 : * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
5308 : * reclaim.
5309 : */
5310 : static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool can_swap)
5311 : {
5312 : unsigned long nr_to_scan;
5313 : struct mem_cgroup *memcg = lruvec_memcg(lruvec);
5314 : DEFINE_MAX_SEQ(lruvec);
5315 :
5316 : if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
5317 : return 0;
5318 :
5319 : if (!should_run_aging(lruvec, max_seq, sc, can_swap, &nr_to_scan))
5320 : return nr_to_scan;
5321 :
5322 : /* skip the aging path at the default priority */
5323 : if (sc->priority == DEF_PRIORITY)
5324 : return nr_to_scan;
5325 :
5326 : /* skip this lruvec as it's low on cold folios */
5327 : return try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false) ? -1 : 0;
5328 : }
5329 :
5330 : static unsigned long get_nr_to_reclaim(struct scan_control *sc)
5331 : {
5332 : /* don't abort memcg reclaim to ensure fairness */
5333 : if (!root_reclaim(sc))
5334 : return -1;
5335 :
5336 : return max(sc->nr_to_reclaim, compact_gap(sc->order));
5337 : }
5338 :
5339 : static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
5340 : {
5341 : long nr_to_scan;
5342 : unsigned long scanned = 0;
5343 : unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
5344 : int swappiness = get_swappiness(lruvec, sc);
5345 :
5346 : /* clean file folios are more likely to exist */
5347 : if (swappiness && !(sc->gfp_mask & __GFP_IO))
5348 : swappiness = 1;
5349 :
5350 : while (true) {
5351 : int delta;
5352 :
5353 : nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
5354 : if (nr_to_scan <= 0)
5355 : break;
5356 :
5357 : delta = evict_folios(lruvec, sc, swappiness);
5358 : if (!delta)
5359 : break;
5360 :
5361 : scanned += delta;
5362 : if (scanned >= nr_to_scan)
5363 : break;
5364 :
5365 : if (sc->nr_reclaimed >= nr_to_reclaim)
5366 : break;
5367 :
5368 : cond_resched();
5369 : }
5370 :
5371 : /* whether try_to_inc_max_seq() was successful */
5372 : return nr_to_scan < 0;
5373 : }
5374 :
5375 : static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
5376 : {
5377 : bool success;
5378 : unsigned long scanned = sc->nr_scanned;
5379 : unsigned long reclaimed = sc->nr_reclaimed;
5380 : int seg = lru_gen_memcg_seg(lruvec);
5381 : struct mem_cgroup *memcg = lruvec_memcg(lruvec);
5382 : struct pglist_data *pgdat = lruvec_pgdat(lruvec);
5383 :
5384 : /* see the comment on MEMCG_NR_GENS */
5385 : if (!lruvec_is_sizable(lruvec, sc))
5386 : return seg != MEMCG_LRU_TAIL ? MEMCG_LRU_TAIL : MEMCG_LRU_YOUNG;
5387 :
5388 : mem_cgroup_calculate_protection(NULL, memcg);
5389 :
5390 : if (mem_cgroup_below_min(NULL, memcg))
5391 : return MEMCG_LRU_YOUNG;
5392 :
5393 : if (mem_cgroup_below_low(NULL, memcg)) {
5394 : /* see the comment on MEMCG_NR_GENS */
5395 : if (seg != MEMCG_LRU_TAIL)
5396 : return MEMCG_LRU_TAIL;
5397 :
5398 : memcg_memory_event(memcg, MEMCG_LOW);
5399 : }
5400 :
5401 : success = try_to_shrink_lruvec(lruvec, sc);
5402 :
5403 : shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority);
5404 :
5405 : if (!sc->proactive)
5406 : vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned,
5407 : sc->nr_reclaimed - reclaimed);
5408 :
5409 : flush_reclaim_state(sc);
5410 :
5411 : return success ? MEMCG_LRU_YOUNG : 0;
5412 : }
5413 :
5414 : #ifdef CONFIG_MEMCG
5415 :
5416 : static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
5417 : {
5418 : int op;
5419 : int gen;
5420 : int bin;
5421 : int first_bin;
5422 : struct lruvec *lruvec;
5423 : struct lru_gen_folio *lrugen;
5424 : struct mem_cgroup *memcg;
5425 : const struct hlist_nulls_node *pos;
5426 : unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
5427 :
5428 : bin = first_bin = get_random_u32_below(MEMCG_NR_BINS);
5429 : restart:
5430 : op = 0;
5431 : memcg = NULL;
5432 : gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq));
5433 :
5434 : rcu_read_lock();
5435 :
5436 : hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) {
5437 : if (op)
5438 : lru_gen_rotate_memcg(lruvec, op);
5439 :
5440 : mem_cgroup_put(memcg);
5441 :
5442 : lruvec = container_of(lrugen, struct lruvec, lrugen);
5443 : memcg = lruvec_memcg(lruvec);
5444 :
5445 : if (!mem_cgroup_tryget(memcg)) {
5446 : op = 0;
5447 : memcg = NULL;
5448 : continue;
5449 : }
5450 :
5451 : rcu_read_unlock();
5452 :
5453 : op = shrink_one(lruvec, sc);
5454 :
5455 : rcu_read_lock();
5456 :
5457 : if (sc->nr_reclaimed >= nr_to_reclaim)
5458 : break;
5459 : }
5460 :
5461 : rcu_read_unlock();
5462 :
5463 : if (op)
5464 : lru_gen_rotate_memcg(lruvec, op);
5465 :
5466 : mem_cgroup_put(memcg);
5467 :
5468 : if (sc->nr_reclaimed >= nr_to_reclaim)
5469 : return;
5470 :
5471 : /* restart if raced with lru_gen_rotate_memcg() */
5472 : if (gen != get_nulls_value(pos))
5473 : goto restart;
5474 :
5475 : /* try the rest of the bins of the current generation */
5476 : bin = get_memcg_bin(bin + 1);
5477 : if (bin != first_bin)
5478 : goto restart;
5479 : }
5480 :
5481 : static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
5482 : {
5483 : struct blk_plug plug;
5484 :
5485 : VM_WARN_ON_ONCE(root_reclaim(sc));
5486 : VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap);
5487 :
5488 : lru_add_drain();
5489 :
5490 : blk_start_plug(&plug);
5491 :
5492 : set_mm_walk(NULL, sc->proactive);
5493 :
5494 : if (try_to_shrink_lruvec(lruvec, sc))
5495 : lru_gen_rotate_memcg(lruvec, MEMCG_LRU_YOUNG);
5496 :
5497 : clear_mm_walk();
5498 :
5499 : blk_finish_plug(&plug);
5500 : }
5501 :
5502 : #else /* !CONFIG_MEMCG */
5503 :
5504 : static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
5505 : {
5506 : BUILD_BUG();
5507 : }
5508 :
5509 : static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
5510 : {
5511 : BUILD_BUG();
5512 : }
5513 :
5514 : #endif
5515 :
5516 : static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc)
5517 : {
5518 : int priority;
5519 : unsigned long reclaimable;
5520 : struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
5521 :
5522 : if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH)
5523 : return;
5524 : /*
5525 : * Determine the initial priority based on ((total / MEMCG_NR_GENS) >>
5526 : * priority) * reclaimed_to_scanned_ratio = nr_to_reclaim, where the
5527 : * estimated reclaimed_to_scanned_ratio = inactive / total.
5528 : */
5529 : reclaimable = node_page_state(pgdat, NR_INACTIVE_FILE);
5530 : if (get_swappiness(lruvec, sc))
5531 : reclaimable += node_page_state(pgdat, NR_INACTIVE_ANON);
5532 :
5533 : reclaimable /= MEMCG_NR_GENS;
5534 :
5535 : /* round down reclaimable and round up sc->nr_to_reclaim */
5536 : priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1);
5537 :
5538 : sc->priority = clamp(priority, 0, DEF_PRIORITY);
5539 : }
5540 :
5541 : static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc)
5542 : {
5543 : struct blk_plug plug;
5544 : unsigned long reclaimed = sc->nr_reclaimed;
5545 :
5546 : VM_WARN_ON_ONCE(!root_reclaim(sc));
5547 :
5548 : /*
5549 : * Unmapped clean folios are already prioritized. Scanning for more of
5550 : * them is likely futile and can cause high reclaim latency when there
5551 : * is a large number of memcgs.
5552 : */
5553 : if (!sc->may_writepage || !sc->may_unmap)
5554 : goto done;
5555 :
5556 : lru_add_drain();
5557 :
5558 : blk_start_plug(&plug);
5559 :
5560 : set_mm_walk(pgdat, sc->proactive);
5561 :
5562 : set_initial_priority(pgdat, sc);
5563 :
5564 : if (current_is_kswapd())
5565 : sc->nr_reclaimed = 0;
5566 :
5567 : if (mem_cgroup_disabled())
5568 : shrink_one(&pgdat->__lruvec, sc);
5569 : else
5570 : shrink_many(pgdat, sc);
5571 :
5572 : if (current_is_kswapd())
5573 : sc->nr_reclaimed += reclaimed;
5574 :
5575 : clear_mm_walk();
5576 :
5577 : blk_finish_plug(&plug);
5578 : done:
5579 : /* kswapd should never fail */
5580 : pgdat->kswapd_failures = 0;
5581 : }
5582 :
5583 : /******************************************************************************
5584 : * state change
5585 : ******************************************************************************/
5586 :
5587 : static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
5588 : {
5589 : struct lru_gen_folio *lrugen = &lruvec->lrugen;
5590 :
5591 : if (lrugen->enabled) {
5592 : enum lru_list lru;
5593 :
5594 : for_each_evictable_lru(lru) {
5595 : if (!list_empty(&lruvec->lists[lru]))
5596 : return false;
5597 : }
5598 : } else {
5599 : int gen, type, zone;
5600 :
5601 : for_each_gen_type_zone(gen, type, zone) {
5602 : if (!list_empty(&lrugen->folios[gen][type][zone]))
5603 : return false;
5604 : }
5605 : }
5606 :
5607 : return true;
5608 : }
5609 :
5610 : static bool fill_evictable(struct lruvec *lruvec)
5611 : {
5612 : enum lru_list lru;
5613 : int remaining = MAX_LRU_BATCH;
5614 :
5615 : for_each_evictable_lru(lru) {
5616 : int type = is_file_lru(lru);
5617 : bool active = is_active_lru(lru);
5618 : struct list_head *head = &lruvec->lists[lru];
5619 :
5620 : while (!list_empty(head)) {
5621 : bool success;
5622 : struct folio *folio = lru_to_folio(head);
5623 :
5624 : VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
5625 : VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio) != active, folio);
5626 : VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
5627 : VM_WARN_ON_ONCE_FOLIO(folio_lru_gen(folio) != -1, folio);
5628 :
5629 : lruvec_del_folio(lruvec, folio);
5630 : success = lru_gen_add_folio(lruvec, folio, false);
5631 : VM_WARN_ON_ONCE(!success);
5632 :
5633 : if (!--remaining)
5634 : return false;
5635 : }
5636 : }
5637 :
5638 : return true;
5639 : }
5640 :
5641 : static bool drain_evictable(struct lruvec *lruvec)
5642 : {
5643 : int gen, type, zone;
5644 : int remaining = MAX_LRU_BATCH;
5645 :
5646 : for_each_gen_type_zone(gen, type, zone) {
5647 : struct list_head *head = &lruvec->lrugen.folios[gen][type][zone];
5648 :
5649 : while (!list_empty(head)) {
5650 : bool success;
5651 : struct folio *folio = lru_to_folio(head);
5652 :
5653 : VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
5654 : VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
5655 : VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
5656 : VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
5657 :
5658 : success = lru_gen_del_folio(lruvec, folio, false);
5659 : VM_WARN_ON_ONCE(!success);
5660 : lruvec_add_folio(lruvec, folio);
5661 :
5662 : if (!--remaining)
5663 : return false;
5664 : }
5665 : }
5666 :
5667 : return true;
5668 : }
5669 :
5670 : static void lru_gen_change_state(bool enabled)
5671 : {
5672 : static DEFINE_MUTEX(state_mutex);
5673 :
5674 : struct mem_cgroup *memcg;
5675 :
5676 : cgroup_lock();
5677 : cpus_read_lock();
5678 : get_online_mems();
5679 : mutex_lock(&state_mutex);
5680 :
5681 : if (enabled == lru_gen_enabled())
5682 : goto unlock;
5683 :
5684 : if (enabled)
5685 : static_branch_enable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]);
5686 : else
5687 : static_branch_disable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]);
5688 :
5689 : memcg = mem_cgroup_iter(NULL, NULL, NULL);
5690 : do {
5691 : int nid;
5692 :
5693 : for_each_node(nid) {
5694 : struct lruvec *lruvec = get_lruvec(memcg, nid);
5695 :
5696 : spin_lock_irq(&lruvec->lru_lock);
5697 :
5698 : VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
5699 : VM_WARN_ON_ONCE(!state_is_valid(lruvec));
5700 :
5701 : lruvec->lrugen.enabled = enabled;
5702 :
5703 : while (!(enabled ? fill_evictable(lruvec) : drain_evictable(lruvec))) {
5704 : spin_unlock_irq(&lruvec->lru_lock);
5705 : cond_resched();
5706 : spin_lock_irq(&lruvec->lru_lock);
5707 : }
5708 :
5709 : spin_unlock_irq(&lruvec->lru_lock);
5710 : }
5711 :
5712 : cond_resched();
5713 : } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
5714 : unlock:
5715 : mutex_unlock(&state_mutex);
5716 : put_online_mems();
5717 : cpus_read_unlock();
5718 : cgroup_unlock();
5719 : }
5720 :
5721 : /******************************************************************************
5722 : * sysfs interface
5723 : ******************************************************************************/
5724 :
5725 : static ssize_t min_ttl_ms_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
5726 : {
5727 : return sysfs_emit(buf, "%u\n", jiffies_to_msecs(READ_ONCE(lru_gen_min_ttl)));
5728 : }
5729 :
5730 : /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5731 : static ssize_t min_ttl_ms_store(struct kobject *kobj, struct kobj_attribute *attr,
5732 : const char *buf, size_t len)
5733 : {
5734 : unsigned int msecs;
5735 :
5736 : if (kstrtouint(buf, 0, &msecs))
5737 : return -EINVAL;
5738 :
5739 : WRITE_ONCE(lru_gen_min_ttl, msecs_to_jiffies(msecs));
5740 :
5741 : return len;
5742 : }
5743 :
5744 : static struct kobj_attribute lru_gen_min_ttl_attr = __ATTR_RW(min_ttl_ms);
5745 :
5746 : static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
5747 : {
5748 : unsigned int caps = 0;
5749 :
5750 : if (get_cap(LRU_GEN_CORE))
5751 : caps |= BIT(LRU_GEN_CORE);
5752 :
5753 : if (should_walk_mmu())
5754 : caps |= BIT(LRU_GEN_MM_WALK);
5755 :
5756 : if (should_clear_pmd_young())
5757 : caps |= BIT(LRU_GEN_NONLEAF_YOUNG);
5758 :
5759 : return sysfs_emit(buf, "0x%04x\n", caps);
5760 : }
5761 :
5762 : /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5763 : static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
5764 : const char *buf, size_t len)
5765 : {
5766 : int i;
5767 : unsigned int caps;
5768 :
5769 : if (tolower(*buf) == 'n')
5770 : caps = 0;
5771 : else if (tolower(*buf) == 'y')
5772 : caps = -1;
5773 : else if (kstrtouint(buf, 0, &caps))
5774 : return -EINVAL;
5775 :
5776 : for (i = 0; i < NR_LRU_GEN_CAPS; i++) {
5777 : bool enabled = caps & BIT(i);
5778 :
5779 : if (i == LRU_GEN_CORE)
5780 : lru_gen_change_state(enabled);
5781 : else if (enabled)
5782 : static_branch_enable(&lru_gen_caps[i]);
5783 : else
5784 : static_branch_disable(&lru_gen_caps[i]);
5785 : }
5786 :
5787 : return len;
5788 : }
5789 :
5790 : static struct kobj_attribute lru_gen_enabled_attr = __ATTR_RW(enabled);
5791 :
5792 : static struct attribute *lru_gen_attrs[] = {
5793 : &lru_gen_min_ttl_attr.attr,
5794 : &lru_gen_enabled_attr.attr,
5795 : NULL
5796 : };
5797 :
5798 : static const struct attribute_group lru_gen_attr_group = {
5799 : .name = "lru_gen",
5800 : .attrs = lru_gen_attrs,
5801 : };
5802 :
5803 : /******************************************************************************
5804 : * debugfs interface
5805 : ******************************************************************************/
5806 :
5807 : static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos)
5808 : {
5809 : struct mem_cgroup *memcg;
5810 : loff_t nr_to_skip = *pos;
5811 :
5812 : m->private = kvmalloc(PATH_MAX, GFP_KERNEL);
5813 : if (!m->private)
5814 : return ERR_PTR(-ENOMEM);
5815 :
5816 : memcg = mem_cgroup_iter(NULL, NULL, NULL);
5817 : do {
5818 : int nid;
5819 :
5820 : for_each_node_state(nid, N_MEMORY) {
5821 : if (!nr_to_skip--)
5822 : return get_lruvec(memcg, nid);
5823 : }
5824 : } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
5825 :
5826 : return NULL;
5827 : }
5828 :
5829 : static void lru_gen_seq_stop(struct seq_file *m, void *v)
5830 : {
5831 : if (!IS_ERR_OR_NULL(v))
5832 : mem_cgroup_iter_break(NULL, lruvec_memcg(v));
5833 :
5834 : kvfree(m->private);
5835 : m->private = NULL;
5836 : }
5837 :
5838 : static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos)
5839 : {
5840 : int nid = lruvec_pgdat(v)->node_id;
5841 : struct mem_cgroup *memcg = lruvec_memcg(v);
5842 :
5843 : ++*pos;
5844 :
5845 : nid = next_memory_node(nid);
5846 : if (nid == MAX_NUMNODES) {
5847 : memcg = mem_cgroup_iter(NULL, memcg, NULL);
5848 : if (!memcg)
5849 : return NULL;
5850 :
5851 : nid = first_memory_node;
5852 : }
5853 :
5854 : return get_lruvec(memcg, nid);
5855 : }
5856 :
5857 : static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
5858 : unsigned long max_seq, unsigned long *min_seq,
5859 : unsigned long seq)
5860 : {
5861 : int i;
5862 : int type, tier;
5863 : int hist = lru_hist_from_seq(seq);
5864 : struct lru_gen_folio *lrugen = &lruvec->lrugen;
5865 :
5866 : for (tier = 0; tier < MAX_NR_TIERS; tier++) {
5867 : seq_printf(m, " %10d", tier);
5868 : for (type = 0; type < ANON_AND_FILE; type++) {
5869 : const char *s = " ";
5870 : unsigned long n[3] = {};
5871 :
5872 : if (seq == max_seq) {
5873 : s = "RT ";
5874 : n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]);
5875 : n[1] = READ_ONCE(lrugen->avg_total[type][tier]);
5876 : } else if (seq == min_seq[type] || NR_HIST_GENS > 1) {
5877 : s = "rep";
5878 : n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]);
5879 : n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]);
5880 : if (tier)
5881 : n[2] = READ_ONCE(lrugen->protected[hist][type][tier - 1]);
5882 : }
5883 :
5884 : for (i = 0; i < 3; i++)
5885 : seq_printf(m, " %10lu%c", n[i], s[i]);
5886 : }
5887 : seq_putc(m, '\n');
5888 : }
5889 :
5890 : seq_puts(m, " ");
5891 : for (i = 0; i < NR_MM_STATS; i++) {
5892 : const char *s = " ";
5893 : unsigned long n = 0;
5894 :
5895 : if (seq == max_seq && NR_HIST_GENS == 1) {
5896 : s = "LOYNFA";
5897 : n = READ_ONCE(lruvec->mm_state.stats[hist][i]);
5898 : } else if (seq != max_seq && NR_HIST_GENS > 1) {
5899 : s = "loynfa";
5900 : n = READ_ONCE(lruvec->mm_state.stats[hist][i]);
5901 : }
5902 :
5903 : seq_printf(m, " %10lu%c", n, s[i]);
5904 : }
5905 : seq_putc(m, '\n');
5906 : }
5907 :
5908 : /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5909 : static int lru_gen_seq_show(struct seq_file *m, void *v)
5910 : {
5911 : unsigned long seq;
5912 : bool full = !debugfs_real_fops(m->file)->write;
5913 : struct lruvec *lruvec = v;
5914 : struct lru_gen_folio *lrugen = &lruvec->lrugen;
5915 : int nid = lruvec_pgdat(lruvec)->node_id;
5916 : struct mem_cgroup *memcg = lruvec_memcg(lruvec);
5917 : DEFINE_MAX_SEQ(lruvec);
5918 : DEFINE_MIN_SEQ(lruvec);
5919 :
5920 : if (nid == first_memory_node) {
5921 : const char *path = memcg ? m->private : "";
5922 :
5923 : #ifdef CONFIG_MEMCG
5924 : if (memcg)
5925 : cgroup_path(memcg->css.cgroup, m->private, PATH_MAX);
5926 : #endif
5927 : seq_printf(m, "memcg %5hu %s\n", mem_cgroup_id(memcg), path);
5928 : }
5929 :
5930 : seq_printf(m, " node %5d\n", nid);
5931 :
5932 : if (!full)
5933 : seq = min_seq[LRU_GEN_ANON];
5934 : else if (max_seq >= MAX_NR_GENS)
5935 : seq = max_seq - MAX_NR_GENS + 1;
5936 : else
5937 : seq = 0;
5938 :
5939 : for (; seq <= max_seq; seq++) {
5940 : int type, zone;
5941 : int gen = lru_gen_from_seq(seq);
5942 : unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
5943 :
5944 : seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth));
5945 :
5946 : for (type = 0; type < ANON_AND_FILE; type++) {
5947 : unsigned long size = 0;
5948 : char mark = full && seq < min_seq[type] ? 'x' : ' ';
5949 :
5950 : for (zone = 0; zone < MAX_NR_ZONES; zone++)
5951 : size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
5952 :
5953 : seq_printf(m, " %10lu%c", size, mark);
5954 : }
5955 :
5956 : seq_putc(m, '\n');
5957 :
5958 : if (full)
5959 : lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq);
5960 : }
5961 :
5962 : return 0;
5963 : }
5964 :
5965 : static const struct seq_operations lru_gen_seq_ops = {
5966 : .start = lru_gen_seq_start,
5967 : .stop = lru_gen_seq_stop,
5968 : .next = lru_gen_seq_next,
5969 : .show = lru_gen_seq_show,
5970 : };
5971 :
5972 : static int run_aging(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
5973 : bool can_swap, bool force_scan)
5974 : {
5975 : DEFINE_MAX_SEQ(lruvec);
5976 : DEFINE_MIN_SEQ(lruvec);
5977 :
5978 : if (seq < max_seq)
5979 : return 0;
5980 :
5981 : if (seq > max_seq)
5982 : return -EINVAL;
5983 :
5984 : if (!force_scan && min_seq[!can_swap] + MAX_NR_GENS - 1 <= max_seq)
5985 : return -ERANGE;
5986 :
5987 : try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, force_scan);
5988 :
5989 : return 0;
5990 : }
5991 :
5992 : static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
5993 : int swappiness, unsigned long nr_to_reclaim)
5994 : {
5995 : DEFINE_MAX_SEQ(lruvec);
5996 :
5997 : if (seq + MIN_NR_GENS > max_seq)
5998 : return -EINVAL;
5999 :
6000 : sc->nr_reclaimed = 0;
6001 :
6002 : while (!signal_pending(current)) {
6003 : DEFINE_MIN_SEQ(lruvec);
6004 :
6005 : if (seq < min_seq[!swappiness])
6006 : return 0;
6007 :
6008 : if (sc->nr_reclaimed >= nr_to_reclaim)
6009 : return 0;
6010 :
6011 : if (!evict_folios(lruvec, sc, swappiness))
6012 : return 0;
6013 :
6014 : cond_resched();
6015 : }
6016 :
6017 : return -EINTR;
6018 : }
6019 :
6020 : static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq,
6021 : struct scan_control *sc, int swappiness, unsigned long opt)
6022 : {
6023 : struct lruvec *lruvec;
6024 : int err = -EINVAL;
6025 : struct mem_cgroup *memcg = NULL;
6026 :
6027 : if (nid < 0 || nid >= MAX_NUMNODES || !node_state(nid, N_MEMORY))
6028 : return -EINVAL;
6029 :
6030 : if (!mem_cgroup_disabled()) {
6031 : rcu_read_lock();
6032 :
6033 : memcg = mem_cgroup_from_id(memcg_id);
6034 : if (!mem_cgroup_tryget(memcg))
6035 : memcg = NULL;
6036 :
6037 : rcu_read_unlock();
6038 :
6039 : if (!memcg)
6040 : return -EINVAL;
6041 : }
6042 :
6043 : if (memcg_id != mem_cgroup_id(memcg))
6044 : goto done;
6045 :
6046 : lruvec = get_lruvec(memcg, nid);
6047 :
6048 : if (swappiness < 0)
6049 : swappiness = get_swappiness(lruvec, sc);
6050 : else if (swappiness > 200)
6051 : goto done;
6052 :
6053 : switch (cmd) {
6054 : case '+':
6055 : err = run_aging(lruvec, seq, sc, swappiness, opt);
6056 : break;
6057 : case '-':
6058 : err = run_eviction(lruvec, seq, sc, swappiness, opt);
6059 : break;
6060 : }
6061 : done:
6062 : mem_cgroup_put(memcg);
6063 :
6064 : return err;
6065 : }
6066 :
6067 : /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
6068 : static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
6069 : size_t len, loff_t *pos)
6070 : {
6071 : void *buf;
6072 : char *cur, *next;
6073 : unsigned int flags;
6074 : struct blk_plug plug;
6075 : int err = -EINVAL;
6076 : struct scan_control sc = {
6077 : .may_writepage = true,
6078 : .may_unmap = true,
6079 : .may_swap = true,
6080 : .reclaim_idx = MAX_NR_ZONES - 1,
6081 : .gfp_mask = GFP_KERNEL,
6082 : };
6083 :
6084 : buf = kvmalloc(len + 1, GFP_KERNEL);
6085 : if (!buf)
6086 : return -ENOMEM;
6087 :
6088 : if (copy_from_user(buf, src, len)) {
6089 : kvfree(buf);
6090 : return -EFAULT;
6091 : }
6092 :
6093 : set_task_reclaim_state(current, &sc.reclaim_state);
6094 : flags = memalloc_noreclaim_save();
6095 : blk_start_plug(&plug);
6096 : if (!set_mm_walk(NULL, true)) {
6097 : err = -ENOMEM;
6098 : goto done;
6099 : }
6100 :
6101 : next = buf;
6102 : next[len] = '\0';
6103 :
6104 : while ((cur = strsep(&next, ",;\n"))) {
6105 : int n;
6106 : int end;
6107 : char cmd;
6108 : unsigned int memcg_id;
6109 : unsigned int nid;
6110 : unsigned long seq;
6111 : unsigned int swappiness = -1;
6112 : unsigned long opt = -1;
6113 :
6114 : cur = skip_spaces(cur);
6115 : if (!*cur)
6116 : continue;
6117 :
6118 : n = sscanf(cur, "%c %u %u %lu %n %u %n %lu %n", &cmd, &memcg_id, &nid,
6119 : &seq, &end, &swappiness, &end, &opt, &end);
6120 : if (n < 4 || cur[end]) {
6121 : err = -EINVAL;
6122 : break;
6123 : }
6124 :
6125 : err = run_cmd(cmd, memcg_id, nid, seq, &sc, swappiness, opt);
6126 : if (err)
6127 : break;
6128 : }
6129 : done:
6130 : clear_mm_walk();
6131 : blk_finish_plug(&plug);
6132 : memalloc_noreclaim_restore(flags);
6133 : set_task_reclaim_state(current, NULL);
6134 :
6135 : kvfree(buf);
6136 :
6137 : return err ? : len;
6138 : }
6139 :
6140 : static int lru_gen_seq_open(struct inode *inode, struct file *file)
6141 : {
6142 : return seq_open(file, &lru_gen_seq_ops);
6143 : }
6144 :
6145 : static const struct file_operations lru_gen_rw_fops = {
6146 : .open = lru_gen_seq_open,
6147 : .read = seq_read,
6148 : .write = lru_gen_seq_write,
6149 : .llseek = seq_lseek,
6150 : .release = seq_release,
6151 : };
6152 :
6153 : static const struct file_operations lru_gen_ro_fops = {
6154 : .open = lru_gen_seq_open,
6155 : .read = seq_read,
6156 : .llseek = seq_lseek,
6157 : .release = seq_release,
6158 : };
6159 :
6160 : /******************************************************************************
6161 : * initialization
6162 : ******************************************************************************/
6163 :
6164 : void lru_gen_init_lruvec(struct lruvec *lruvec)
6165 : {
6166 : int i;
6167 : int gen, type, zone;
6168 : struct lru_gen_folio *lrugen = &lruvec->lrugen;
6169 :
6170 : lrugen->max_seq = MIN_NR_GENS + 1;
6171 : lrugen->enabled = lru_gen_enabled();
6172 :
6173 : for (i = 0; i <= MIN_NR_GENS + 1; i++)
6174 : lrugen->timestamps[i] = jiffies;
6175 :
6176 : for_each_gen_type_zone(gen, type, zone)
6177 : INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]);
6178 :
6179 : lruvec->mm_state.seq = MIN_NR_GENS;
6180 : }
6181 :
6182 : #ifdef CONFIG_MEMCG
6183 :
6184 : void lru_gen_init_pgdat(struct pglist_data *pgdat)
6185 : {
6186 : int i, j;
6187 :
6188 : spin_lock_init(&pgdat->memcg_lru.lock);
6189 :
6190 : for (i = 0; i < MEMCG_NR_GENS; i++) {
6191 : for (j = 0; j < MEMCG_NR_BINS; j++)
6192 : INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i);
6193 : }
6194 : }
6195 :
6196 : void lru_gen_init_memcg(struct mem_cgroup *memcg)
6197 : {
6198 : INIT_LIST_HEAD(&memcg->mm_list.fifo);
6199 : spin_lock_init(&memcg->mm_list.lock);
6200 : }
6201 :
6202 : void lru_gen_exit_memcg(struct mem_cgroup *memcg)
6203 : {
6204 : int i;
6205 : int nid;
6206 :
6207 : VM_WARN_ON_ONCE(!list_empty(&memcg->mm_list.fifo));
6208 :
6209 : for_each_node(nid) {
6210 : struct lruvec *lruvec = get_lruvec(memcg, nid);
6211 :
6212 : VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0,
6213 : sizeof(lruvec->lrugen.nr_pages)));
6214 :
6215 : lruvec->lrugen.list.next = LIST_POISON1;
6216 :
6217 : for (i = 0; i < NR_BLOOM_FILTERS; i++) {
6218 : bitmap_free(lruvec->mm_state.filters[i]);
6219 : lruvec->mm_state.filters[i] = NULL;
6220 : }
6221 : }
6222 : }
6223 :
6224 : #endif /* CONFIG_MEMCG */
6225 :
6226 : static int __init init_lru_gen(void)
6227 : {
6228 : BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS);
6229 : BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS);
6230 :
6231 : if (sysfs_create_group(mm_kobj, &lru_gen_attr_group))
6232 : pr_err("lru_gen: failed to create sysfs group\n");
6233 :
6234 : debugfs_create_file("lru_gen", 0644, NULL, NULL, &lru_gen_rw_fops);
6235 : debugfs_create_file("lru_gen_full", 0444, NULL, NULL, &lru_gen_ro_fops);
6236 :
6237 : return 0;
6238 : };
6239 : late_initcall(init_lru_gen);
6240 :
6241 : #else /* !CONFIG_LRU_GEN */
6242 :
6243 : static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
6244 : {
6245 : }
6246 :
6247 : static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
6248 : {
6249 : }
6250 :
6251 : static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc)
6252 : {
6253 : }
6254 :
6255 : #endif /* CONFIG_LRU_GEN */
6256 :
6257 0 : static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
6258 : {
6259 : unsigned long nr[NR_LRU_LISTS];
6260 : unsigned long targets[NR_LRU_LISTS];
6261 : unsigned long nr_to_scan;
6262 : enum lru_list lru;
6263 0 : unsigned long nr_reclaimed = 0;
6264 0 : unsigned long nr_to_reclaim = sc->nr_to_reclaim;
6265 : bool proportional_reclaim;
6266 : struct blk_plug plug;
6267 :
6268 : if (lru_gen_enabled() && !root_reclaim(sc)) {
6269 : lru_gen_shrink_lruvec(lruvec, sc);
6270 : return;
6271 : }
6272 :
6273 0 : get_scan_count(lruvec, sc, nr);
6274 :
6275 : /* Record the original scan target for proportional adjustments later */
6276 0 : memcpy(targets, nr, sizeof(nr));
6277 :
6278 : /*
6279 : * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
6280 : * event that can occur when there is little memory pressure e.g.
6281 : * multiple streaming readers/writers. Hence, we do not abort scanning
6282 : * when the requested number of pages are reclaimed when scanning at
6283 : * DEF_PRIORITY on the assumption that the fact we are direct
6284 : * reclaiming implies that kswapd is not keeping up and it is best to
6285 : * do a batch of work at once. For memcg reclaim one check is made to
6286 : * abort proportional reclaim if either the file or anon lru has already
6287 : * dropped to zero at the first pass.
6288 : */
6289 0 : proportional_reclaim = (!cgroup_reclaim(sc) && !current_is_kswapd() &&
6290 0 : sc->priority == DEF_PRIORITY);
6291 :
6292 0 : blk_start_plug(&plug);
6293 0 : while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
6294 0 : nr[LRU_INACTIVE_FILE]) {
6295 : unsigned long nr_anon, nr_file, percentage;
6296 : unsigned long nr_scanned;
6297 :
6298 0 : for_each_evictable_lru(lru) {
6299 0 : if (nr[lru]) {
6300 0 : nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
6301 0 : nr[lru] -= nr_to_scan;
6302 :
6303 0 : nr_reclaimed += shrink_list(lru, nr_to_scan,
6304 : lruvec, sc);
6305 : }
6306 : }
6307 :
6308 0 : cond_resched();
6309 :
6310 0 : if (nr_reclaimed < nr_to_reclaim || proportional_reclaim)
6311 0 : continue;
6312 :
6313 : /*
6314 : * For kswapd and memcg, reclaim at least the number of pages
6315 : * requested. Ensure that the anon and file LRUs are scanned
6316 : * proportionally what was requested by get_scan_count(). We
6317 : * stop reclaiming one LRU and reduce the amount scanning
6318 : * proportional to the original scan target.
6319 : */
6320 0 : nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
6321 0 : nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
6322 :
6323 : /*
6324 : * It's just vindictive to attack the larger once the smaller
6325 : * has gone to zero. And given the way we stop scanning the
6326 : * smaller below, this makes sure that we only make one nudge
6327 : * towards proportionality once we've got nr_to_reclaim.
6328 : */
6329 0 : if (!nr_file || !nr_anon)
6330 : break;
6331 :
6332 0 : if (nr_file > nr_anon) {
6333 0 : unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
6334 0 : targets[LRU_ACTIVE_ANON] + 1;
6335 0 : lru = LRU_BASE;
6336 0 : percentage = nr_anon * 100 / scan_target;
6337 : } else {
6338 0 : unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
6339 0 : targets[LRU_ACTIVE_FILE] + 1;
6340 0 : lru = LRU_FILE;
6341 0 : percentage = nr_file * 100 / scan_target;
6342 : }
6343 :
6344 : /* Stop scanning the smaller of the LRU */
6345 0 : nr[lru] = 0;
6346 0 : nr[lru + LRU_ACTIVE] = 0;
6347 :
6348 : /*
6349 : * Recalculate the other LRU scan count based on its original
6350 : * scan target and the percentage scanning already complete
6351 : */
6352 0 : lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
6353 0 : nr_scanned = targets[lru] - nr[lru];
6354 0 : nr[lru] = targets[lru] * (100 - percentage) / 100;
6355 0 : nr[lru] -= min(nr[lru], nr_scanned);
6356 :
6357 0 : lru += LRU_ACTIVE;
6358 0 : nr_scanned = targets[lru] - nr[lru];
6359 0 : nr[lru] = targets[lru] * (100 - percentage) / 100;
6360 0 : nr[lru] -= min(nr[lru], nr_scanned);
6361 : }
6362 0 : blk_finish_plug(&plug);
6363 0 : sc->nr_reclaimed += nr_reclaimed;
6364 :
6365 : /*
6366 : * Even if we did not try to evict anon pages at all, we want to
6367 : * rebalance the anon lru active/inactive ratio.
6368 : */
6369 0 : if (can_age_anon_pages(lruvec_pgdat(lruvec), sc) &&
6370 0 : inactive_is_low(lruvec, LRU_INACTIVE_ANON))
6371 0 : shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
6372 : sc, LRU_ACTIVE_ANON);
6373 : }
6374 :
6375 : /* Use reclaim/compaction for costly allocs or under memory pressure */
6376 : static bool in_reclaim_compaction(struct scan_control *sc)
6377 : {
6378 0 : if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
6379 0 : (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
6380 0 : sc->priority < DEF_PRIORITY - 2))
6381 : return true;
6382 :
6383 : return false;
6384 : }
6385 :
6386 : /*
6387 : * Reclaim/compaction is used for high-order allocation requests. It reclaims
6388 : * order-0 pages before compacting the zone. should_continue_reclaim() returns
6389 : * true if more pages should be reclaimed such that when the page allocator
6390 : * calls try_to_compact_pages() that it will have enough free pages to succeed.
6391 : * It will give up earlier than that if there is difficulty reclaiming pages.
6392 : */
6393 0 : static inline bool should_continue_reclaim(struct pglist_data *pgdat,
6394 : unsigned long nr_reclaimed,
6395 : struct scan_control *sc)
6396 : {
6397 : unsigned long pages_for_compaction;
6398 : unsigned long inactive_lru_pages;
6399 : int z;
6400 :
6401 : /* If not in reclaim/compaction mode, stop */
6402 0 : if (!in_reclaim_compaction(sc))
6403 : return false;
6404 :
6405 : /*
6406 : * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX
6407 : * number of pages that were scanned. This will return to the caller
6408 : * with the risk reclaim/compaction and the resulting allocation attempt
6409 : * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL
6410 : * allocations through requiring that the full LRU list has been scanned
6411 : * first, by assuming that zero delta of sc->nr_scanned means full LRU
6412 : * scan, but that approximation was wrong, and there were corner cases
6413 : * where always a non-zero amount of pages were scanned.
6414 : */
6415 0 : if (!nr_reclaimed)
6416 : return false;
6417 :
6418 : /* If compaction would go ahead or the allocation would succeed, stop */
6419 0 : for (z = 0; z <= sc->reclaim_idx; z++) {
6420 0 : struct zone *zone = &pgdat->node_zones[z];
6421 0 : if (!managed_zone(zone))
6422 0 : continue;
6423 :
6424 : /* Allocation can already succeed, nothing to do */
6425 0 : if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
6426 : sc->reclaim_idx, 0))
6427 : return false;
6428 :
6429 0 : if (compaction_suitable(zone, sc->order, sc->reclaim_idx))
6430 : return false;
6431 : }
6432 :
6433 : /*
6434 : * If we have not reclaimed enough pages for compaction and the
6435 : * inactive lists are large enough, continue reclaiming
6436 : */
6437 0 : pages_for_compaction = compact_gap(sc->order);
6438 0 : inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
6439 0 : if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc))
6440 0 : inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
6441 :
6442 0 : return inactive_lru_pages > pages_for_compaction;
6443 : }
6444 :
6445 0 : static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
6446 : {
6447 0 : struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
6448 : struct mem_cgroup *memcg;
6449 :
6450 0 : memcg = mem_cgroup_iter(target_memcg, NULL, NULL);
6451 : do {
6452 0 : struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
6453 : unsigned long reclaimed;
6454 : unsigned long scanned;
6455 :
6456 : /*
6457 : * This loop can become CPU-bound when target memcgs
6458 : * aren't eligible for reclaim - either because they
6459 : * don't have any reclaimable pages, or because their
6460 : * memory is explicitly protected. Avoid soft lockups.
6461 : */
6462 0 : cond_resched();
6463 :
6464 0 : mem_cgroup_calculate_protection(target_memcg, memcg);
6465 :
6466 0 : if (mem_cgroup_below_min(target_memcg, memcg)) {
6467 : /*
6468 : * Hard protection.
6469 : * If there is no reclaimable memory, OOM.
6470 : */
6471 : continue;
6472 0 : } else if (mem_cgroup_below_low(target_memcg, memcg)) {
6473 : /*
6474 : * Soft protection.
6475 : * Respect the protection only as long as
6476 : * there is an unprotected supply
6477 : * of reclaimable memory from other cgroups.
6478 : */
6479 : if (!sc->memcg_low_reclaim) {
6480 : sc->memcg_low_skipped = 1;
6481 : continue;
6482 : }
6483 : memcg_memory_event(memcg, MEMCG_LOW);
6484 : }
6485 :
6486 0 : reclaimed = sc->nr_reclaimed;
6487 0 : scanned = sc->nr_scanned;
6488 :
6489 0 : shrink_lruvec(lruvec, sc);
6490 :
6491 0 : shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
6492 0 : sc->priority);
6493 :
6494 : /* Record the group's reclaim efficiency */
6495 : if (!sc->proactive)
6496 : vmpressure(sc->gfp_mask, memcg, false,
6497 : sc->nr_scanned - scanned,
6498 : sc->nr_reclaimed - reclaimed);
6499 :
6500 0 : } while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL)));
6501 0 : }
6502 :
6503 0 : static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
6504 : {
6505 : unsigned long nr_reclaimed, nr_scanned, nr_node_reclaimed;
6506 : struct lruvec *target_lruvec;
6507 0 : bool reclaimable = false;
6508 :
6509 : if (lru_gen_enabled() && root_reclaim(sc)) {
6510 : lru_gen_shrink_node(pgdat, sc);
6511 : return;
6512 : }
6513 :
6514 0 : target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
6515 :
6516 : again:
6517 0 : memset(&sc->nr, 0, sizeof(sc->nr));
6518 :
6519 0 : nr_reclaimed = sc->nr_reclaimed;
6520 0 : nr_scanned = sc->nr_scanned;
6521 :
6522 0 : prepare_scan_count(pgdat, sc);
6523 :
6524 0 : shrink_node_memcgs(pgdat, sc);
6525 :
6526 0 : flush_reclaim_state(sc);
6527 :
6528 0 : nr_node_reclaimed = sc->nr_reclaimed - nr_reclaimed;
6529 :
6530 : /* Record the subtree's reclaim efficiency */
6531 : if (!sc->proactive)
6532 : vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
6533 : sc->nr_scanned - nr_scanned, nr_node_reclaimed);
6534 :
6535 0 : if (nr_node_reclaimed)
6536 0 : reclaimable = true;
6537 :
6538 0 : if (current_is_kswapd()) {
6539 : /*
6540 : * If reclaim is isolating dirty pages under writeback,
6541 : * it implies that the long-lived page allocation rate
6542 : * is exceeding the page laundering rate. Either the
6543 : * global limits are not being effective at throttling
6544 : * processes due to the page distribution throughout
6545 : * zones or there is heavy usage of a slow backing
6546 : * device. The only option is to throttle from reclaim
6547 : * context which is not ideal as there is no guarantee
6548 : * the dirtying process is throttled in the same way
6549 : * balance_dirty_pages() manages.
6550 : *
6551 : * Once a node is flagged PGDAT_WRITEBACK, kswapd will
6552 : * count the number of pages under pages flagged for
6553 : * immediate reclaim and stall if any are encountered
6554 : * in the nr_immediate check below.
6555 : */
6556 0 : if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
6557 0 : set_bit(PGDAT_WRITEBACK, &pgdat->flags);
6558 :
6559 : /* Allow kswapd to start writing pages during reclaim.*/
6560 0 : if (sc->nr.unqueued_dirty == sc->nr.file_taken)
6561 0 : set_bit(PGDAT_DIRTY, &pgdat->flags);
6562 :
6563 : /*
6564 : * If kswapd scans pages marked for immediate
6565 : * reclaim and under writeback (nr_immediate), it
6566 : * implies that pages are cycling through the LRU
6567 : * faster than they are written so forcibly stall
6568 : * until some pages complete writeback.
6569 : */
6570 0 : if (sc->nr.immediate)
6571 0 : reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
6572 : }
6573 :
6574 : /*
6575 : * Tag a node/memcg as congested if all the dirty pages were marked
6576 : * for writeback and immediate reclaim (counted in nr.congested).
6577 : *
6578 : * Legacy memcg will stall in page writeback so avoid forcibly
6579 : * stalling in reclaim_throttle().
6580 : */
6581 0 : if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested) {
6582 0 : if (cgroup_reclaim(sc) && writeback_throttling_sane(sc))
6583 : set_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags);
6584 :
6585 0 : if (current_is_kswapd())
6586 0 : set_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags);
6587 : }
6588 :
6589 : /*
6590 : * Stall direct reclaim for IO completions if the lruvec is
6591 : * node is congested. Allow kswapd to continue until it
6592 : * starts encountering unqueued dirty pages or cycling through
6593 : * the LRU too quickly.
6594 : */
6595 0 : if (!current_is_kswapd() && current_may_throttle() &&
6596 0 : !sc->hibernation_mode &&
6597 0 : (test_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags) ||
6598 0 : test_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags)))
6599 0 : reclaim_throttle(pgdat, VMSCAN_THROTTLE_CONGESTED);
6600 :
6601 0 : if (should_continue_reclaim(pgdat, nr_node_reclaimed, sc))
6602 : goto again;
6603 :
6604 : /*
6605 : * Kswapd gives up on balancing particular nodes after too
6606 : * many failures to reclaim anything from them and goes to
6607 : * sleep. On reclaim progress, reset the failure counter. A
6608 : * successful direct reclaim run will revive a dormant kswapd.
6609 : */
6610 0 : if (reclaimable)
6611 0 : pgdat->kswapd_failures = 0;
6612 : }
6613 :
6614 : /*
6615 : * Returns true if compaction should go ahead for a costly-order request, or
6616 : * the allocation would already succeed without compaction. Return false if we
6617 : * should reclaim first.
6618 : */
6619 0 : static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
6620 : {
6621 : unsigned long watermark;
6622 :
6623 : /* Allocation can already succeed, nothing to do */
6624 0 : if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
6625 0 : sc->reclaim_idx, 0))
6626 : return true;
6627 :
6628 : /* Compaction cannot yet proceed. Do reclaim. */
6629 0 : if (!compaction_suitable(zone, sc->order, sc->reclaim_idx))
6630 : return false;
6631 :
6632 : /*
6633 : * Compaction is already possible, but it takes time to run and there
6634 : * are potentially other callers using the pages just freed. So proceed
6635 : * with reclaim to make a buffer of free pages available to give
6636 : * compaction a reasonable chance of completing and allocating the page.
6637 : * Note that we won't actually reclaim the whole buffer in one attempt
6638 : * as the target watermark in should_continue_reclaim() is lower. But if
6639 : * we are already above the high+gap watermark, don't reclaim at all.
6640 : */
6641 0 : watermark = high_wmark_pages(zone) + compact_gap(sc->order);
6642 :
6643 0 : return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
6644 : }
6645 :
6646 0 : static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc)
6647 : {
6648 : /*
6649 : * If reclaim is making progress greater than 12% efficiency then
6650 : * wake all the NOPROGRESS throttled tasks.
6651 : */
6652 0 : if (sc->nr_reclaimed > (sc->nr_scanned >> 3)) {
6653 : wait_queue_head_t *wqh;
6654 :
6655 0 : wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS];
6656 0 : if (waitqueue_active(wqh))
6657 0 : wake_up(wqh);
6658 :
6659 : return;
6660 : }
6661 :
6662 : /*
6663 : * Do not throttle kswapd or cgroup reclaim on NOPROGRESS as it will
6664 : * throttle on VMSCAN_THROTTLE_WRITEBACK if there are too many pages
6665 : * under writeback and marked for immediate reclaim at the tail of the
6666 : * LRU.
6667 : */
6668 0 : if (current_is_kswapd() || cgroup_reclaim(sc))
6669 : return;
6670 :
6671 : /* Throttle if making no progress at high prioities. */
6672 0 : if (sc->priority == 1 && !sc->nr_reclaimed)
6673 0 : reclaim_throttle(pgdat, VMSCAN_THROTTLE_NOPROGRESS);
6674 : }
6675 :
6676 : /*
6677 : * This is the direct reclaim path, for page-allocating processes. We only
6678 : * try to reclaim pages from zones which will satisfy the caller's allocation
6679 : * request.
6680 : *
6681 : * If a zone is deemed to be full of pinned pages then just give it a light
6682 : * scan then give up on it.
6683 : */
6684 0 : static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
6685 : {
6686 : struct zoneref *z;
6687 : struct zone *zone;
6688 : unsigned long nr_soft_reclaimed;
6689 : unsigned long nr_soft_scanned;
6690 : gfp_t orig_mask;
6691 0 : pg_data_t *last_pgdat = NULL;
6692 0 : pg_data_t *first_pgdat = NULL;
6693 :
6694 : /*
6695 : * If the number of buffer_heads in the machine exceeds the maximum
6696 : * allowed level, force direct reclaim to scan the highmem zone as
6697 : * highmem pages could be pinning lowmem pages storing buffer_heads
6698 : */
6699 0 : orig_mask = sc->gfp_mask;
6700 0 : if (buffer_heads_over_limit) {
6701 0 : sc->gfp_mask |= __GFP_HIGHMEM;
6702 0 : sc->reclaim_idx = gfp_zone(sc->gfp_mask);
6703 : }
6704 :
6705 0 : for_each_zone_zonelist_nodemask(zone, z, zonelist,
6706 : sc->reclaim_idx, sc->nodemask) {
6707 : /*
6708 : * Take care memory controller reclaiming has small influence
6709 : * to global LRU.
6710 : */
6711 0 : if (!cgroup_reclaim(sc)) {
6712 0 : if (!cpuset_zone_allowed(zone,
6713 : GFP_KERNEL | __GFP_HARDWALL))
6714 : continue;
6715 :
6716 : /*
6717 : * If we already have plenty of memory free for
6718 : * compaction in this zone, don't free any more.
6719 : * Even though compaction is invoked for any
6720 : * non-zero order, only frequent costly order
6721 : * reclamation is disruptive enough to become a
6722 : * noticeable problem, like transparent huge
6723 : * page allocations.
6724 : */
6725 0 : if (IS_ENABLED(CONFIG_COMPACTION) &&
6726 0 : sc->order > PAGE_ALLOC_COSTLY_ORDER &&
6727 0 : compaction_ready(zone, sc)) {
6728 0 : sc->compaction_ready = true;
6729 0 : continue;
6730 : }
6731 :
6732 : /*
6733 : * Shrink each node in the zonelist once. If the
6734 : * zonelist is ordered by zone (not the default) then a
6735 : * node may be shrunk multiple times but in that case
6736 : * the user prefers lower zones being preserved.
6737 : */
6738 0 : if (zone->zone_pgdat == last_pgdat)
6739 0 : continue;
6740 :
6741 : /*
6742 : * This steals pages from memory cgroups over softlimit
6743 : * and returns the number of reclaimed pages and
6744 : * scanned pages. This works for global memory pressure
6745 : * and balancing, not for a memcg's limit.
6746 : */
6747 0 : nr_soft_scanned = 0;
6748 0 : nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat,
6749 0 : sc->order, sc->gfp_mask,
6750 : &nr_soft_scanned);
6751 : sc->nr_reclaimed += nr_soft_reclaimed;
6752 : sc->nr_scanned += nr_soft_scanned;
6753 : /* need some check for avoid more shrink_zone() */
6754 : }
6755 :
6756 0 : if (!first_pgdat)
6757 0 : first_pgdat = zone->zone_pgdat;
6758 :
6759 : /* See comment about same check for global reclaim above */
6760 : if (zone->zone_pgdat == last_pgdat)
6761 : continue;
6762 0 : last_pgdat = zone->zone_pgdat;
6763 0 : shrink_node(zone->zone_pgdat, sc);
6764 : }
6765 :
6766 0 : if (first_pgdat)
6767 0 : consider_reclaim_throttle(first_pgdat, sc);
6768 :
6769 : /*
6770 : * Restore to original mask to avoid the impact on the caller if we
6771 : * promoted it to __GFP_HIGHMEM.
6772 : */
6773 0 : sc->gfp_mask = orig_mask;
6774 0 : }
6775 :
6776 : static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
6777 : {
6778 : struct lruvec *target_lruvec;
6779 : unsigned long refaults;
6780 :
6781 : if (lru_gen_enabled())
6782 : return;
6783 :
6784 0 : target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
6785 0 : refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON);
6786 0 : target_lruvec->refaults[WORKINGSET_ANON] = refaults;
6787 0 : refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE);
6788 0 : target_lruvec->refaults[WORKINGSET_FILE] = refaults;
6789 : }
6790 :
6791 : /*
6792 : * This is the main entry point to direct page reclaim.
6793 : *
6794 : * If a full scan of the inactive list fails to free enough memory then we
6795 : * are "out of memory" and something needs to be killed.
6796 : *
6797 : * If the caller is !__GFP_FS then the probability of a failure is reasonably
6798 : * high - the zone may be full of dirty or under-writeback pages, which this
6799 : * caller can't do much about. We kick the writeback threads and take explicit
6800 : * naps in the hope that some of these pages can be written. But if the
6801 : * allocating task holds filesystem locks which prevent writeout this might not
6802 : * work, and the allocation attempt will fail.
6803 : *
6804 : * returns: 0, if no pages reclaimed
6805 : * else, the number of pages reclaimed
6806 : */
6807 0 : static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
6808 : struct scan_control *sc)
6809 : {
6810 0 : int initial_priority = sc->priority;
6811 : pg_data_t *last_pgdat;
6812 : struct zoneref *z;
6813 : struct zone *zone;
6814 : retry:
6815 : delayacct_freepages_start();
6816 :
6817 0 : if (!cgroup_reclaim(sc))
6818 0 : __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
6819 :
6820 : do {
6821 : if (!sc->proactive)
6822 : vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
6823 : sc->priority);
6824 0 : sc->nr_scanned = 0;
6825 0 : shrink_zones(zonelist, sc);
6826 :
6827 0 : if (sc->nr_reclaimed >= sc->nr_to_reclaim)
6828 : break;
6829 :
6830 0 : if (sc->compaction_ready)
6831 : break;
6832 :
6833 : /*
6834 : * If we're getting trouble reclaiming, start doing
6835 : * writepage even in laptop mode.
6836 : */
6837 0 : if (sc->priority < DEF_PRIORITY - 2)
6838 0 : sc->may_writepage = 1;
6839 0 : } while (--sc->priority >= 0);
6840 :
6841 0 : last_pgdat = NULL;
6842 0 : for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx,
6843 : sc->nodemask) {
6844 0 : if (zone->zone_pgdat == last_pgdat)
6845 0 : continue;
6846 0 : last_pgdat = zone->zone_pgdat;
6847 :
6848 0 : snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
6849 :
6850 0 : if (cgroup_reclaim(sc)) {
6851 : struct lruvec *lruvec;
6852 :
6853 : lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup,
6854 : zone->zone_pgdat);
6855 : clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags);
6856 : }
6857 : }
6858 :
6859 : delayacct_freepages_end();
6860 :
6861 0 : if (sc->nr_reclaimed)
6862 : return sc->nr_reclaimed;
6863 :
6864 : /* Aborted reclaim to try compaction? don't OOM, then */
6865 0 : if (sc->compaction_ready)
6866 : return 1;
6867 :
6868 : /*
6869 : * We make inactive:active ratio decisions based on the node's
6870 : * composition of memory, but a restrictive reclaim_idx or a
6871 : * memory.low cgroup setting can exempt large amounts of
6872 : * memory from reclaim. Neither of which are very common, so
6873 : * instead of doing costly eligibility calculations of the
6874 : * entire cgroup subtree up front, we assume the estimates are
6875 : * good, and retry with forcible deactivation if that fails.
6876 : */
6877 0 : if (sc->skipped_deactivate) {
6878 0 : sc->priority = initial_priority;
6879 0 : sc->force_deactivate = 1;
6880 0 : sc->skipped_deactivate = 0;
6881 0 : goto retry;
6882 : }
6883 :
6884 : /* Untapped cgroup reserves? Don't OOM, retry. */
6885 0 : if (sc->memcg_low_skipped) {
6886 0 : sc->priority = initial_priority;
6887 0 : sc->force_deactivate = 0;
6888 0 : sc->memcg_low_reclaim = 1;
6889 0 : sc->memcg_low_skipped = 0;
6890 0 : goto retry;
6891 : }
6892 :
6893 : return 0;
6894 : }
6895 :
6896 0 : static bool allow_direct_reclaim(pg_data_t *pgdat)
6897 : {
6898 : struct zone *zone;
6899 0 : unsigned long pfmemalloc_reserve = 0;
6900 0 : unsigned long free_pages = 0;
6901 : int i;
6902 : bool wmark_ok;
6903 :
6904 0 : if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
6905 : return true;
6906 :
6907 0 : for (i = 0; i <= ZONE_NORMAL; i++) {
6908 0 : zone = &pgdat->node_zones[i];
6909 0 : if (!managed_zone(zone))
6910 0 : continue;
6911 :
6912 0 : if (!zone_reclaimable_pages(zone))
6913 0 : continue;
6914 :
6915 0 : pfmemalloc_reserve += min_wmark_pages(zone);
6916 0 : free_pages += zone_page_state_snapshot(zone, NR_FREE_PAGES);
6917 : }
6918 :
6919 : /* If there are no reserves (unexpected config) then do not throttle */
6920 0 : if (!pfmemalloc_reserve)
6921 : return true;
6922 :
6923 0 : wmark_ok = free_pages > pfmemalloc_reserve / 2;
6924 :
6925 : /* kswapd must be awake if processes are being throttled */
6926 0 : if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
6927 0 : if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL)
6928 0 : WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL);
6929 :
6930 0 : wake_up_interruptible(&pgdat->kswapd_wait);
6931 : }
6932 :
6933 : return wmark_ok;
6934 : }
6935 :
6936 : /*
6937 : * Throttle direct reclaimers if backing storage is backed by the network
6938 : * and the PFMEMALLOC reserve for the preferred node is getting dangerously
6939 : * depleted. kswapd will continue to make progress and wake the processes
6940 : * when the low watermark is reached.
6941 : *
6942 : * Returns true if a fatal signal was delivered during throttling. If this
6943 : * happens, the page allocator should not consider triggering the OOM killer.
6944 : */
6945 0 : static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
6946 : nodemask_t *nodemask)
6947 : {
6948 : struct zoneref *z;
6949 : struct zone *zone;
6950 0 : pg_data_t *pgdat = NULL;
6951 :
6952 : /*
6953 : * Kernel threads should not be throttled as they may be indirectly
6954 : * responsible for cleaning pages necessary for reclaim to make forward
6955 : * progress. kjournald for example may enter direct reclaim while
6956 : * committing a transaction where throttling it could forcing other
6957 : * processes to block on log_wait_commit().
6958 : */
6959 0 : if (current->flags & PF_KTHREAD)
6960 : goto out;
6961 :
6962 : /*
6963 : * If a fatal signal is pending, this process should not throttle.
6964 : * It should return quickly so it can exit and free its memory
6965 : */
6966 0 : if (fatal_signal_pending(current))
6967 : goto out;
6968 :
6969 : /*
6970 : * Check if the pfmemalloc reserves are ok by finding the first node
6971 : * with a usable ZONE_NORMAL or lower zone. The expectation is that
6972 : * GFP_KERNEL will be required for allocating network buffers when
6973 : * swapping over the network so ZONE_HIGHMEM is unusable.
6974 : *
6975 : * Throttling is based on the first usable node and throttled processes
6976 : * wait on a queue until kswapd makes progress and wakes them. There
6977 : * is an affinity then between processes waking up and where reclaim
6978 : * progress has been made assuming the process wakes on the same node.
6979 : * More importantly, processes running on remote nodes will not compete
6980 : * for remote pfmemalloc reserves and processes on different nodes
6981 : * should make reasonable progress.
6982 : */
6983 0 : for_each_zone_zonelist_nodemask(zone, z, zonelist,
6984 : gfp_zone(gfp_mask), nodemask) {
6985 0 : if (zone_idx(zone) > ZONE_NORMAL)
6986 0 : continue;
6987 :
6988 : /* Throttle based on the first usable node */
6989 0 : pgdat = zone->zone_pgdat;
6990 0 : if (allow_direct_reclaim(pgdat))
6991 : goto out;
6992 : break;
6993 : }
6994 :
6995 : /* If no zone was usable by the allocation flags then do not throttle */
6996 0 : if (!pgdat)
6997 : goto out;
6998 :
6999 : /* Account for the throttling */
7000 0 : count_vm_event(PGSCAN_DIRECT_THROTTLE);
7001 :
7002 : /*
7003 : * If the caller cannot enter the filesystem, it's possible that it
7004 : * is due to the caller holding an FS lock or performing a journal
7005 : * transaction in the case of a filesystem like ext[3|4]. In this case,
7006 : * it is not safe to block on pfmemalloc_wait as kswapd could be
7007 : * blocked waiting on the same lock. Instead, throttle for up to a
7008 : * second before continuing.
7009 : */
7010 0 : if (!(gfp_mask & __GFP_FS))
7011 0 : wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
7012 : allow_direct_reclaim(pgdat), HZ);
7013 : else
7014 : /* Throttle until kswapd wakes the process */
7015 0 : wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
7016 : allow_direct_reclaim(pgdat));
7017 :
7018 0 : if (fatal_signal_pending(current))
7019 : return true;
7020 :
7021 : out:
7022 : return false;
7023 : }
7024 :
7025 0 : unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
7026 : gfp_t gfp_mask, nodemask_t *nodemask)
7027 : {
7028 : unsigned long nr_reclaimed;
7029 0 : struct scan_control sc = {
7030 : .nr_to_reclaim = SWAP_CLUSTER_MAX,
7031 0 : .gfp_mask = current_gfp_context(gfp_mask),
7032 0 : .reclaim_idx = gfp_zone(gfp_mask),
7033 : .order = order,
7034 : .nodemask = nodemask,
7035 : .priority = DEF_PRIORITY,
7036 0 : .may_writepage = !laptop_mode,
7037 : .may_unmap = 1,
7038 : .may_swap = 1,
7039 : };
7040 :
7041 : /*
7042 : * scan_control uses s8 fields for order, priority, and reclaim_idx.
7043 : * Confirm they are large enough for max values.
7044 : */
7045 : BUILD_BUG_ON(MAX_ORDER >= S8_MAX);
7046 : BUILD_BUG_ON(DEF_PRIORITY > S8_MAX);
7047 : BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX);
7048 :
7049 : /*
7050 : * Do not enter reclaim if fatal signal was delivered while throttled.
7051 : * 1 is returned so that the page allocator does not OOM kill at this
7052 : * point.
7053 : */
7054 0 : if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
7055 : return 1;
7056 :
7057 0 : set_task_reclaim_state(current, &sc.reclaim_state);
7058 0 : trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
7059 :
7060 0 : nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
7061 :
7062 0 : trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
7063 0 : set_task_reclaim_state(current, NULL);
7064 :
7065 0 : return nr_reclaimed;
7066 : }
7067 :
7068 : #ifdef CONFIG_MEMCG
7069 :
7070 : /* Only used by soft limit reclaim. Do not reuse for anything else. */
7071 : unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
7072 : gfp_t gfp_mask, bool noswap,
7073 : pg_data_t *pgdat,
7074 : unsigned long *nr_scanned)
7075 : {
7076 : struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
7077 : struct scan_control sc = {
7078 : .nr_to_reclaim = SWAP_CLUSTER_MAX,
7079 : .target_mem_cgroup = memcg,
7080 : .may_writepage = !laptop_mode,
7081 : .may_unmap = 1,
7082 : .reclaim_idx = MAX_NR_ZONES - 1,
7083 : .may_swap = !noswap,
7084 : };
7085 :
7086 : WARN_ON_ONCE(!current->reclaim_state);
7087 :
7088 : sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
7089 : (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
7090 :
7091 : trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
7092 : sc.gfp_mask);
7093 :
7094 : /*
7095 : * NOTE: Although we can get the priority field, using it
7096 : * here is not a good idea, since it limits the pages we can scan.
7097 : * if we don't reclaim here, the shrink_node from balance_pgdat
7098 : * will pick up pages from other mem cgroup's as well. We hack
7099 : * the priority and make it zero.
7100 : */
7101 : shrink_lruvec(lruvec, &sc);
7102 :
7103 : trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
7104 :
7105 : *nr_scanned = sc.nr_scanned;
7106 :
7107 : return sc.nr_reclaimed;
7108 : }
7109 :
7110 : unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
7111 : unsigned long nr_pages,
7112 : gfp_t gfp_mask,
7113 : unsigned int reclaim_options)
7114 : {
7115 : unsigned long nr_reclaimed;
7116 : unsigned int noreclaim_flag;
7117 : struct scan_control sc = {
7118 : .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
7119 : .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) |
7120 : (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
7121 : .reclaim_idx = MAX_NR_ZONES - 1,
7122 : .target_mem_cgroup = memcg,
7123 : .priority = DEF_PRIORITY,
7124 : .may_writepage = !laptop_mode,
7125 : .may_unmap = 1,
7126 : .may_swap = !!(reclaim_options & MEMCG_RECLAIM_MAY_SWAP),
7127 : .proactive = !!(reclaim_options & MEMCG_RECLAIM_PROACTIVE),
7128 : };
7129 : /*
7130 : * Traverse the ZONELIST_FALLBACK zonelist of the current node to put
7131 : * equal pressure on all the nodes. This is based on the assumption that
7132 : * the reclaim does not bail out early.
7133 : */
7134 : struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
7135 :
7136 : set_task_reclaim_state(current, &sc.reclaim_state);
7137 : trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
7138 : noreclaim_flag = memalloc_noreclaim_save();
7139 :
7140 : nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
7141 :
7142 : memalloc_noreclaim_restore(noreclaim_flag);
7143 : trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
7144 : set_task_reclaim_state(current, NULL);
7145 :
7146 : return nr_reclaimed;
7147 : }
7148 : #endif
7149 :
7150 0 : static void kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc)
7151 : {
7152 : struct mem_cgroup *memcg;
7153 : struct lruvec *lruvec;
7154 :
7155 : if (lru_gen_enabled()) {
7156 : lru_gen_age_node(pgdat, sc);
7157 : return;
7158 : }
7159 :
7160 0 : if (!can_age_anon_pages(pgdat, sc))
7161 : return;
7162 :
7163 0 : lruvec = mem_cgroup_lruvec(NULL, pgdat);
7164 0 : if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON))
7165 : return;
7166 :
7167 0 : memcg = mem_cgroup_iter(NULL, NULL, NULL);
7168 : do {
7169 0 : lruvec = mem_cgroup_lruvec(memcg, pgdat);
7170 0 : shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
7171 : sc, LRU_ACTIVE_ANON);
7172 0 : memcg = mem_cgroup_iter(NULL, memcg, NULL);
7173 : } while (memcg);
7174 : }
7175 :
7176 : static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx)
7177 : {
7178 : int i;
7179 : struct zone *zone;
7180 :
7181 : /*
7182 : * Check for watermark boosts top-down as the higher zones
7183 : * are more likely to be boosted. Both watermarks and boosts
7184 : * should not be checked at the same time as reclaim would
7185 : * start prematurely when there is no boosting and a lower
7186 : * zone is balanced.
7187 : */
7188 0 : for (i = highest_zoneidx; i >= 0; i--) {
7189 0 : zone = pgdat->node_zones + i;
7190 0 : if (!managed_zone(zone))
7191 0 : continue;
7192 :
7193 0 : if (zone->watermark_boost)
7194 : return true;
7195 : }
7196 :
7197 : return false;
7198 : }
7199 :
7200 : /*
7201 : * Returns true if there is an eligible zone balanced for the request order
7202 : * and highest_zoneidx
7203 : */
7204 1 : static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx)
7205 : {
7206 : int i;
7207 1 : unsigned long mark = -1;
7208 : struct zone *zone;
7209 :
7210 : /*
7211 : * Check watermarks bottom-up as lower zones are more likely to
7212 : * meet watermarks.
7213 : */
7214 1 : for (i = 0; i <= highest_zoneidx; i++) {
7215 1 : zone = pgdat->node_zones + i;
7216 :
7217 1 : if (!managed_zone(zone))
7218 0 : continue;
7219 :
7220 : if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)
7221 : mark = wmark_pages(zone, WMARK_PROMO);
7222 : else
7223 1 : mark = high_wmark_pages(zone);
7224 1 : if (zone_watermark_ok_safe(zone, order, mark, highest_zoneidx))
7225 : return true;
7226 : }
7227 :
7228 : /*
7229 : * If a node has no managed zone within highest_zoneidx, it does not
7230 : * need balancing by definition. This can happen if a zone-restricted
7231 : * allocation tries to wake a remote kswapd.
7232 : */
7233 0 : if (mark == -1)
7234 : return true;
7235 :
7236 0 : return false;
7237 : }
7238 :
7239 : /* Clear pgdat state for congested, dirty or under writeback. */
7240 : static void clear_pgdat_congested(pg_data_t *pgdat)
7241 : {
7242 1 : struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
7243 :
7244 2 : clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags);
7245 2 : clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags);
7246 2 : clear_bit(PGDAT_DIRTY, &pgdat->flags);
7247 2 : clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
7248 : }
7249 :
7250 : /*
7251 : * Prepare kswapd for sleeping. This verifies that there are no processes
7252 : * waiting in throttle_direct_reclaim() and that watermarks have been met.
7253 : *
7254 : * Returns true if kswapd is ready to sleep
7255 : */
7256 1 : static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order,
7257 : int highest_zoneidx)
7258 : {
7259 : /*
7260 : * The throttled processes are normally woken up in balance_pgdat() as
7261 : * soon as allow_direct_reclaim() is true. But there is a potential
7262 : * race between when kswapd checks the watermarks and a process gets
7263 : * throttled. There is also a potential race if processes get
7264 : * throttled, kswapd wakes, a large process exits thereby balancing the
7265 : * zones, which causes kswapd to exit balance_pgdat() before reaching
7266 : * the wake up checks. If kswapd is going to sleep, no process should
7267 : * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
7268 : * the wake up is premature, processes will wake kswapd and get
7269 : * throttled again. The difference from wake ups in balance_pgdat() is
7270 : * that here we are under prepare_to_wait().
7271 : */
7272 2 : if (waitqueue_active(&pgdat->pfmemalloc_wait))
7273 0 : wake_up_all(&pgdat->pfmemalloc_wait);
7274 :
7275 : /* Hopeless node, leave it to direct reclaim */
7276 1 : if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
7277 : return true;
7278 :
7279 1 : if (pgdat_balanced(pgdat, order, highest_zoneidx)) {
7280 1 : clear_pgdat_congested(pgdat);
7281 1 : return true;
7282 : }
7283 :
7284 : return false;
7285 : }
7286 :
7287 : /*
7288 : * kswapd shrinks a node of pages that are at or below the highest usable
7289 : * zone that is currently unbalanced.
7290 : *
7291 : * Returns true if kswapd scanned at least the requested number of pages to
7292 : * reclaim or if the lack of progress was due to pages under writeback.
7293 : * This is used to determine if the scanning priority needs to be raised.
7294 : */
7295 0 : static bool kswapd_shrink_node(pg_data_t *pgdat,
7296 : struct scan_control *sc)
7297 : {
7298 : struct zone *zone;
7299 : int z;
7300 :
7301 : /* Reclaim a number of pages proportional to the number of zones */
7302 0 : sc->nr_to_reclaim = 0;
7303 0 : for (z = 0; z <= sc->reclaim_idx; z++) {
7304 0 : zone = pgdat->node_zones + z;
7305 0 : if (!managed_zone(zone))
7306 0 : continue;
7307 :
7308 0 : sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
7309 : }
7310 :
7311 : /*
7312 : * Historically care was taken to put equal pressure on all zones but
7313 : * now pressure is applied based on node LRU order.
7314 : */
7315 0 : shrink_node(pgdat, sc);
7316 :
7317 : /*
7318 : * Fragmentation may mean that the system cannot be rebalanced for
7319 : * high-order allocations. If twice the allocation size has been
7320 : * reclaimed then recheck watermarks only at order-0 to prevent
7321 : * excessive reclaim. Assume that a process requested a high-order
7322 : * can direct reclaim/compact.
7323 : */
7324 0 : if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
7325 0 : sc->order = 0;
7326 :
7327 0 : return sc->nr_scanned >= sc->nr_to_reclaim;
7328 : }
7329 :
7330 : /* Page allocator PCP high watermark is lowered if reclaim is active. */
7331 : static inline void
7332 : update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active)
7333 : {
7334 : int i;
7335 : struct zone *zone;
7336 :
7337 0 : for (i = 0; i <= highest_zoneidx; i++) {
7338 0 : zone = pgdat->node_zones + i;
7339 :
7340 0 : if (!managed_zone(zone))
7341 0 : continue;
7342 :
7343 : if (active)
7344 0 : set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags);
7345 : else
7346 0 : clear_bit(ZONE_RECLAIM_ACTIVE, &zone->flags);
7347 : }
7348 : }
7349 :
7350 : static inline void
7351 : set_reclaim_active(pg_data_t *pgdat, int highest_zoneidx)
7352 : {
7353 0 : update_reclaim_active(pgdat, highest_zoneidx, true);
7354 : }
7355 :
7356 : static inline void
7357 : clear_reclaim_active(pg_data_t *pgdat, int highest_zoneidx)
7358 : {
7359 0 : update_reclaim_active(pgdat, highest_zoneidx, false);
7360 : }
7361 :
7362 : /*
7363 : * For kswapd, balance_pgdat() will reclaim pages across a node from zones
7364 : * that are eligible for use by the caller until at least one zone is
7365 : * balanced.
7366 : *
7367 : * Returns the order kswapd finished reclaiming at.
7368 : *
7369 : * kswapd scans the zones in the highmem->normal->dma direction. It skips
7370 : * zones which have free_pages > high_wmark_pages(zone), but once a zone is
7371 : * found to have free_pages <= high_wmark_pages(zone), any page in that zone
7372 : * or lower is eligible for reclaim until at least one usable zone is
7373 : * balanced.
7374 : */
7375 0 : static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
7376 : {
7377 : int i;
7378 : unsigned long nr_soft_reclaimed;
7379 : unsigned long nr_soft_scanned;
7380 : unsigned long pflags;
7381 : unsigned long nr_boost_reclaim;
7382 0 : unsigned long zone_boosts[MAX_NR_ZONES] = { 0, };
7383 : bool boosted;
7384 : struct zone *zone;
7385 0 : struct scan_control sc = {
7386 : .gfp_mask = GFP_KERNEL,
7387 : .order = order,
7388 : .may_unmap = 1,
7389 : };
7390 :
7391 0 : set_task_reclaim_state(current, &sc.reclaim_state);
7392 0 : psi_memstall_enter(&pflags);
7393 0 : __fs_reclaim_acquire(_THIS_IP_);
7394 :
7395 0 : count_vm_event(PAGEOUTRUN);
7396 :
7397 : /*
7398 : * Account for the reclaim boost. Note that the zone boost is left in
7399 : * place so that parallel allocations that are near the watermark will
7400 : * stall or direct reclaim until kswapd is finished.
7401 : */
7402 0 : nr_boost_reclaim = 0;
7403 0 : for (i = 0; i <= highest_zoneidx; i++) {
7404 0 : zone = pgdat->node_zones + i;
7405 0 : if (!managed_zone(zone))
7406 0 : continue;
7407 :
7408 0 : nr_boost_reclaim += zone->watermark_boost;
7409 0 : zone_boosts[i] = zone->watermark_boost;
7410 : }
7411 : boosted = nr_boost_reclaim;
7412 :
7413 : restart:
7414 0 : set_reclaim_active(pgdat, highest_zoneidx);
7415 0 : sc.priority = DEF_PRIORITY;
7416 : do {
7417 0 : unsigned long nr_reclaimed = sc.nr_reclaimed;
7418 0 : bool raise_priority = true;
7419 : bool balanced;
7420 : bool ret;
7421 :
7422 0 : sc.reclaim_idx = highest_zoneidx;
7423 :
7424 : /*
7425 : * If the number of buffer_heads exceeds the maximum allowed
7426 : * then consider reclaiming from all zones. This has a dual
7427 : * purpose -- on 64-bit systems it is expected that
7428 : * buffer_heads are stripped during active rotation. On 32-bit
7429 : * systems, highmem pages can pin lowmem memory and shrinking
7430 : * buffers can relieve lowmem pressure. Reclaim may still not
7431 : * go ahead if all eligible zones for the original allocation
7432 : * request are balanced to avoid excessive reclaim from kswapd.
7433 : */
7434 0 : if (buffer_heads_over_limit) {
7435 0 : for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
7436 0 : zone = pgdat->node_zones + i;
7437 0 : if (!managed_zone(zone))
7438 0 : continue;
7439 :
7440 0 : sc.reclaim_idx = i;
7441 0 : break;
7442 : }
7443 : }
7444 :
7445 : /*
7446 : * If the pgdat is imbalanced then ignore boosting and preserve
7447 : * the watermarks for a later time and restart. Note that the
7448 : * zone watermarks will be still reset at the end of balancing
7449 : * on the grounds that the normal reclaim should be enough to
7450 : * re-evaluate if boosting is required when kswapd next wakes.
7451 : */
7452 0 : balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx);
7453 0 : if (!balanced && nr_boost_reclaim) {
7454 : nr_boost_reclaim = 0;
7455 : goto restart;
7456 : }
7457 :
7458 : /*
7459 : * If boosting is not active then only reclaim if there are no
7460 : * eligible zones. Note that sc.reclaim_idx is not used as
7461 : * buffer_heads_over_limit may have adjusted it.
7462 : */
7463 0 : if (!nr_boost_reclaim && balanced)
7464 : goto out;
7465 :
7466 : /* Limit the priority of boosting to avoid reclaim writeback */
7467 0 : if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2)
7468 0 : raise_priority = false;
7469 :
7470 : /*
7471 : * Do not writeback or swap pages for boosted reclaim. The
7472 : * intent is to relieve pressure not issue sub-optimal IO
7473 : * from reclaim context. If no pages are reclaimed, the
7474 : * reclaim will be aborted.
7475 : */
7476 0 : sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
7477 0 : sc.may_swap = !nr_boost_reclaim;
7478 :
7479 : /*
7480 : * Do some background aging, to give pages a chance to be
7481 : * referenced before reclaiming. All pages are rotated
7482 : * regardless of classzone as this is about consistent aging.
7483 : */
7484 0 : kswapd_age_node(pgdat, &sc);
7485 :
7486 : /*
7487 : * If we're getting trouble reclaiming, start doing writepage
7488 : * even in laptop mode.
7489 : */
7490 0 : if (sc.priority < DEF_PRIORITY - 2)
7491 0 : sc.may_writepage = 1;
7492 :
7493 : /* Call soft limit reclaim before calling shrink_node. */
7494 0 : sc.nr_scanned = 0;
7495 0 : nr_soft_scanned = 0;
7496 0 : nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
7497 : sc.gfp_mask, &nr_soft_scanned);
7498 : sc.nr_reclaimed += nr_soft_reclaimed;
7499 :
7500 : /*
7501 : * There should be no need to raise the scanning priority if
7502 : * enough pages are already being scanned that that high
7503 : * watermark would be met at 100% efficiency.
7504 : */
7505 0 : if (kswapd_shrink_node(pgdat, &sc))
7506 0 : raise_priority = false;
7507 :
7508 : /*
7509 : * If the low watermark is met there is no need for processes
7510 : * to be throttled on pfmemalloc_wait as they should not be
7511 : * able to safely make forward progress. Wake them
7512 : */
7513 0 : if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
7514 0 : allow_direct_reclaim(pgdat))
7515 0 : wake_up_all(&pgdat->pfmemalloc_wait);
7516 :
7517 : /* Check if kswapd should be suspending */
7518 0 : __fs_reclaim_release(_THIS_IP_);
7519 0 : ret = try_to_freeze();
7520 0 : __fs_reclaim_acquire(_THIS_IP_);
7521 0 : if (ret || kthread_should_stop())
7522 : break;
7523 :
7524 : /*
7525 : * Raise priority if scanning rate is too low or there was no
7526 : * progress in reclaiming pages
7527 : */
7528 0 : nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;
7529 0 : nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed);
7530 :
7531 : /*
7532 : * If reclaim made no progress for a boost, stop reclaim as
7533 : * IO cannot be queued and it could be an infinite loop in
7534 : * extreme circumstances.
7535 : */
7536 0 : if (nr_boost_reclaim && !nr_reclaimed)
7537 : break;
7538 :
7539 0 : if (raise_priority || !nr_reclaimed)
7540 0 : sc.priority--;
7541 0 : } while (sc.priority >= 1);
7542 :
7543 0 : if (!sc.nr_reclaimed)
7544 0 : pgdat->kswapd_failures++;
7545 :
7546 : out:
7547 0 : clear_reclaim_active(pgdat, highest_zoneidx);
7548 :
7549 : /* If reclaim was boosted, account for the reclaim done in this pass */
7550 0 : if (boosted) {
7551 : unsigned long flags;
7552 :
7553 0 : for (i = 0; i <= highest_zoneidx; i++) {
7554 0 : if (!zone_boosts[i])
7555 0 : continue;
7556 :
7557 : /* Increments are under the zone lock */
7558 0 : zone = pgdat->node_zones + i;
7559 0 : spin_lock_irqsave(&zone->lock, flags);
7560 0 : zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]);
7561 0 : spin_unlock_irqrestore(&zone->lock, flags);
7562 : }
7563 :
7564 : /*
7565 : * As there is now likely space, wakeup kcompact to defragment
7566 : * pageblocks.
7567 : */
7568 0 : wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx);
7569 : }
7570 :
7571 0 : snapshot_refaults(NULL, pgdat);
7572 0 : __fs_reclaim_release(_THIS_IP_);
7573 0 : psi_memstall_leave(&pflags);
7574 0 : set_task_reclaim_state(current, NULL);
7575 :
7576 : /*
7577 : * Return the order kswapd stopped reclaiming at as
7578 : * prepare_kswapd_sleep() takes it into account. If another caller
7579 : * entered the allocator slow path while kswapd was awake, order will
7580 : * remain at the higher level.
7581 : */
7582 0 : return sc.order;
7583 : }
7584 :
7585 : /*
7586 : * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to
7587 : * be reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is
7588 : * not a valid index then either kswapd runs for first time or kswapd couldn't
7589 : * sleep after previous reclaim attempt (node is still unbalanced). In that
7590 : * case return the zone index of the previous kswapd reclaim cycle.
7591 : */
7592 : static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat,
7593 : enum zone_type prev_highest_zoneidx)
7594 : {
7595 1 : enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx);
7596 :
7597 1 : return curr_idx == MAX_NR_ZONES ? prev_highest_zoneidx : curr_idx;
7598 : }
7599 :
7600 1 : static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
7601 : unsigned int highest_zoneidx)
7602 : {
7603 1 : long remaining = 0;
7604 2 : DEFINE_WAIT(wait);
7605 :
7606 2 : if (freezing(current) || kthread_should_stop())
7607 0 : return;
7608 :
7609 1 : prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
7610 :
7611 : /*
7612 : * Try to sleep for a short interval. Note that kcompactd will only be
7613 : * woken if it is possible to sleep for a short interval. This is
7614 : * deliberate on the assumption that if reclaim cannot keep an
7615 : * eligible zone balanced that it's also unlikely that compaction will
7616 : * succeed.
7617 : */
7618 1 : if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {
7619 : /*
7620 : * Compaction records what page blocks it recently failed to
7621 : * isolate pages from and skips them in the future scanning.
7622 : * When kswapd is going to sleep, it is reasonable to assume
7623 : * that pages and compaction may succeed so reset the cache.
7624 : */
7625 1 : reset_isolation_suitable(pgdat);
7626 :
7627 : /*
7628 : * We have freed the memory, now we should compact it to make
7629 : * allocation of the requested order possible.
7630 : */
7631 1 : wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx);
7632 :
7633 1 : remaining = schedule_timeout(HZ/10);
7634 :
7635 : /*
7636 : * If woken prematurely then reset kswapd_highest_zoneidx and
7637 : * order. The values will either be from a wakeup request or
7638 : * the previous request that slept prematurely.
7639 : */
7640 0 : if (remaining) {
7641 0 : WRITE_ONCE(pgdat->kswapd_highest_zoneidx,
7642 : kswapd_highest_zoneidx(pgdat,
7643 : highest_zoneidx));
7644 :
7645 0 : if (READ_ONCE(pgdat->kswapd_order) < reclaim_order)
7646 0 : WRITE_ONCE(pgdat->kswapd_order, reclaim_order);
7647 : }
7648 :
7649 0 : finish_wait(&pgdat->kswapd_wait, &wait);
7650 0 : prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
7651 : }
7652 :
7653 : /*
7654 : * After a short sleep, check if it was a premature sleep. If not, then
7655 : * go fully to sleep until explicitly woken up.
7656 : */
7657 0 : if (!remaining &&
7658 0 : prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {
7659 0 : trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
7660 :
7661 : /*
7662 : * vmstat counters are not perfectly accurate and the estimated
7663 : * value for counters such as NR_FREE_PAGES can deviate from the
7664 : * true value by nr_online_cpus * threshold. To avoid the zone
7665 : * watermarks being breached while under pressure, we reduce the
7666 : * per-cpu vmstat threshold while kswapd is awake and restore
7667 : * them before going back to sleep.
7668 : */
7669 : set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
7670 :
7671 0 : if (!kthread_should_stop())
7672 0 : schedule();
7673 :
7674 : set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
7675 : } else {
7676 0 : if (remaining)
7677 0 : count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
7678 : else
7679 0 : count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
7680 : }
7681 0 : finish_wait(&pgdat->kswapd_wait, &wait);
7682 : }
7683 :
7684 : /*
7685 : * The background pageout daemon, started as a kernel thread
7686 : * from the init process.
7687 : *
7688 : * This basically trickles out pages so that we have _some_
7689 : * free memory available even if there is no other activity
7690 : * that frees anything up. This is needed for things like routing
7691 : * etc, where we otherwise might have all activity going on in
7692 : * asynchronous contexts that cannot page things out.
7693 : *
7694 : * If there are applications that are active memory-allocators
7695 : * (most normal use), this basically shouldn't matter.
7696 : */
7697 1 : static int kswapd(void *p)
7698 : {
7699 : unsigned int alloc_order, reclaim_order;
7700 1 : unsigned int highest_zoneidx = MAX_NR_ZONES - 1;
7701 1 : pg_data_t *pgdat = (pg_data_t *)p;
7702 1 : struct task_struct *tsk = current;
7703 1 : const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
7704 :
7705 1 : if (!cpumask_empty(cpumask))
7706 1 : set_cpus_allowed_ptr(tsk, cpumask);
7707 :
7708 : /*
7709 : * Tell the memory management that we're a "memory allocator",
7710 : * and that if we need more memory we should get access to it
7711 : * regardless (see "__alloc_pages()"). "kswapd" should
7712 : * never get caught in the normal page freeing logic.
7713 : *
7714 : * (Kswapd normally doesn't need memory anyway, but sometimes
7715 : * you need a small amount of memory in order to be able to
7716 : * page out something else, and this flag essentially protects
7717 : * us from recursively trying to free more memory as we're
7718 : * trying to free the first piece of memory in the first place).
7719 : */
7720 1 : tsk->flags |= PF_MEMALLOC | PF_KSWAPD;
7721 1 : set_freezable();
7722 :
7723 1 : WRITE_ONCE(pgdat->kswapd_order, 0);
7724 1 : WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
7725 1 : atomic_set(&pgdat->nr_writeback_throttled, 0);
7726 : for ( ; ; ) {
7727 : bool ret;
7728 :
7729 1 : alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
7730 : highest_zoneidx = kswapd_highest_zoneidx(pgdat,
7731 : highest_zoneidx);
7732 :
7733 : kswapd_try_sleep:
7734 1 : kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
7735 : highest_zoneidx);
7736 :
7737 : /* Read the new order and highest_zoneidx */
7738 0 : alloc_order = READ_ONCE(pgdat->kswapd_order);
7739 0 : highest_zoneidx = kswapd_highest_zoneidx(pgdat,
7740 : highest_zoneidx);
7741 0 : WRITE_ONCE(pgdat->kswapd_order, 0);
7742 0 : WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
7743 :
7744 0 : ret = try_to_freeze();
7745 0 : if (kthread_should_stop())
7746 : break;
7747 :
7748 : /*
7749 : * We can speed up thawing tasks if we don't call balance_pgdat
7750 : * after returning from the refrigerator
7751 : */
7752 0 : if (ret)
7753 0 : continue;
7754 :
7755 : /*
7756 : * Reclaim begins at the requested order but if a high-order
7757 : * reclaim fails then kswapd falls back to reclaiming for
7758 : * order-0. If that happens, kswapd will consider sleeping
7759 : * for the order it finished reclaiming at (reclaim_order)
7760 : * but kcompactd is woken to compact for the original
7761 : * request (alloc_order).
7762 : */
7763 0 : trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx,
7764 : alloc_order);
7765 0 : reclaim_order = balance_pgdat(pgdat, alloc_order,
7766 : highest_zoneidx);
7767 0 : if (reclaim_order < alloc_order)
7768 : goto kswapd_try_sleep;
7769 : }
7770 :
7771 0 : tsk->flags &= ~(PF_MEMALLOC | PF_KSWAPD);
7772 :
7773 0 : return 0;
7774 : }
7775 :
7776 : /*
7777 : * A zone is low on free memory or too fragmented for high-order memory. If
7778 : * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's
7779 : * pgdat. It will wake up kcompactd after reclaiming memory. If kswapd reclaim
7780 : * has failed or is not needed, still wake up kcompactd if only compaction is
7781 : * needed.
7782 : */
7783 0 : void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
7784 : enum zone_type highest_zoneidx)
7785 : {
7786 : pg_data_t *pgdat;
7787 : enum zone_type curr_idx;
7788 :
7789 0 : if (!managed_zone(zone))
7790 : return;
7791 :
7792 0 : if (!cpuset_zone_allowed(zone, gfp_flags))
7793 : return;
7794 :
7795 0 : pgdat = zone->zone_pgdat;
7796 0 : curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx);
7797 :
7798 0 : if (curr_idx == MAX_NR_ZONES || curr_idx < highest_zoneidx)
7799 0 : WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx);
7800 :
7801 0 : if (READ_ONCE(pgdat->kswapd_order) < order)
7802 0 : WRITE_ONCE(pgdat->kswapd_order, order);
7803 :
7804 0 : if (!waitqueue_active(&pgdat->kswapd_wait))
7805 : return;
7806 :
7807 : /* Hopeless node, leave it to direct reclaim if possible */
7808 0 : if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ||
7809 0 : (pgdat_balanced(pgdat, order, highest_zoneidx) &&
7810 0 : !pgdat_watermark_boosted(pgdat, highest_zoneidx))) {
7811 : /*
7812 : * There may be plenty of free memory available, but it's too
7813 : * fragmented for high-order allocations. Wake up kcompactd
7814 : * and rely on compaction_suitable() to determine if it's
7815 : * needed. If it fails, it will defer subsequent attempts to
7816 : * ratelimit its work.
7817 : */
7818 0 : if (!(gfp_flags & __GFP_DIRECT_RECLAIM))
7819 0 : wakeup_kcompactd(pgdat, order, highest_zoneidx);
7820 : return;
7821 : }
7822 :
7823 0 : trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order,
7824 : gfp_flags);
7825 0 : wake_up_interruptible(&pgdat->kswapd_wait);
7826 : }
7827 :
7828 : #ifdef CONFIG_HIBERNATION
7829 : /*
7830 : * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
7831 : * freed pages.
7832 : *
7833 : * Rather than trying to age LRUs the aim is to preserve the overall
7834 : * LRU order by reclaiming preferentially
7835 : * inactive > active > active referenced > active mapped
7836 : */
7837 : unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
7838 : {
7839 : struct scan_control sc = {
7840 : .nr_to_reclaim = nr_to_reclaim,
7841 : .gfp_mask = GFP_HIGHUSER_MOVABLE,
7842 : .reclaim_idx = MAX_NR_ZONES - 1,
7843 : .priority = DEF_PRIORITY,
7844 : .may_writepage = 1,
7845 : .may_unmap = 1,
7846 : .may_swap = 1,
7847 : .hibernation_mode = 1,
7848 : };
7849 : struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
7850 : unsigned long nr_reclaimed;
7851 : unsigned int noreclaim_flag;
7852 :
7853 : fs_reclaim_acquire(sc.gfp_mask);
7854 : noreclaim_flag = memalloc_noreclaim_save();
7855 : set_task_reclaim_state(current, &sc.reclaim_state);
7856 :
7857 : nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
7858 :
7859 : set_task_reclaim_state(current, NULL);
7860 : memalloc_noreclaim_restore(noreclaim_flag);
7861 : fs_reclaim_release(sc.gfp_mask);
7862 :
7863 : return nr_reclaimed;
7864 : }
7865 : #endif /* CONFIG_HIBERNATION */
7866 :
7867 : /*
7868 : * This kswapd start function will be called by init and node-hot-add.
7869 : */
7870 1 : void __meminit kswapd_run(int nid)
7871 : {
7872 1 : pg_data_t *pgdat = NODE_DATA(nid);
7873 :
7874 : pgdat_kswapd_lock(pgdat);
7875 1 : if (!pgdat->kswapd) {
7876 2 : pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
7877 1 : if (IS_ERR(pgdat->kswapd)) {
7878 : /* failure at boot is fatal */
7879 0 : BUG_ON(system_state < SYSTEM_RUNNING);
7880 0 : pr_err("Failed to start kswapd on node %d\n", nid);
7881 0 : pgdat->kswapd = NULL;
7882 : }
7883 : }
7884 : pgdat_kswapd_unlock(pgdat);
7885 1 : }
7886 :
7887 : /*
7888 : * Called by memory hotplug when all memory in a node is offlined. Caller must
7889 : * be holding mem_hotplug_begin/done().
7890 : */
7891 0 : void __meminit kswapd_stop(int nid)
7892 : {
7893 0 : pg_data_t *pgdat = NODE_DATA(nid);
7894 : struct task_struct *kswapd;
7895 :
7896 : pgdat_kswapd_lock(pgdat);
7897 0 : kswapd = pgdat->kswapd;
7898 0 : if (kswapd) {
7899 0 : kthread_stop(kswapd);
7900 0 : pgdat->kswapd = NULL;
7901 : }
7902 : pgdat_kswapd_unlock(pgdat);
7903 0 : }
7904 :
7905 1 : static int __init kswapd_init(void)
7906 : {
7907 : int nid;
7908 :
7909 1 : swap_setup();
7910 2 : for_each_node_state(nid, N_MEMORY)
7911 1 : kswapd_run(nid);
7912 1 : return 0;
7913 : }
7914 :
7915 : module_init(kswapd_init)
7916 :
7917 : #ifdef CONFIG_NUMA
7918 : /*
7919 : * Node reclaim mode
7920 : *
7921 : * If non-zero call node_reclaim when the number of free pages falls below
7922 : * the watermarks.
7923 : */
7924 : int node_reclaim_mode __read_mostly;
7925 :
7926 : /*
7927 : * Priority for NODE_RECLAIM. This determines the fraction of pages
7928 : * of a node considered for each zone_reclaim. 4 scans 1/16th of
7929 : * a zone.
7930 : */
7931 : #define NODE_RECLAIM_PRIORITY 4
7932 :
7933 : /*
7934 : * Percentage of pages in a zone that must be unmapped for node_reclaim to
7935 : * occur.
7936 : */
7937 : int sysctl_min_unmapped_ratio = 1;
7938 :
7939 : /*
7940 : * If the number of slab pages in a zone grows beyond this percentage then
7941 : * slab reclaim needs to occur.
7942 : */
7943 : int sysctl_min_slab_ratio = 5;
7944 :
7945 : static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
7946 : {
7947 : unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
7948 : unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
7949 : node_page_state(pgdat, NR_ACTIVE_FILE);
7950 :
7951 : /*
7952 : * It's possible for there to be more file mapped pages than
7953 : * accounted for by the pages on the file LRU lists because
7954 : * tmpfs pages accounted for as ANON can also be FILE_MAPPED
7955 : */
7956 : return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
7957 : }
7958 :
7959 : /* Work out how many page cache pages we can reclaim in this reclaim_mode */
7960 : static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
7961 : {
7962 : unsigned long nr_pagecache_reclaimable;
7963 : unsigned long delta = 0;
7964 :
7965 : /*
7966 : * If RECLAIM_UNMAP is set, then all file pages are considered
7967 : * potentially reclaimable. Otherwise, we have to worry about
7968 : * pages like swapcache and node_unmapped_file_pages() provides
7969 : * a better estimate
7970 : */
7971 : if (node_reclaim_mode & RECLAIM_UNMAP)
7972 : nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
7973 : else
7974 : nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
7975 :
7976 : /* If we can't clean pages, remove dirty pages from consideration */
7977 : if (!(node_reclaim_mode & RECLAIM_WRITE))
7978 : delta += node_page_state(pgdat, NR_FILE_DIRTY);
7979 :
7980 : /* Watch for any possible underflows due to delta */
7981 : if (unlikely(delta > nr_pagecache_reclaimable))
7982 : delta = nr_pagecache_reclaimable;
7983 :
7984 : return nr_pagecache_reclaimable - delta;
7985 : }
7986 :
7987 : /*
7988 : * Try to free up some pages from this node through reclaim.
7989 : */
7990 : static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
7991 : {
7992 : /* Minimum pages needed in order to stay on node */
7993 : const unsigned long nr_pages = 1 << order;
7994 : struct task_struct *p = current;
7995 : unsigned int noreclaim_flag;
7996 : struct scan_control sc = {
7997 : .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
7998 : .gfp_mask = current_gfp_context(gfp_mask),
7999 : .order = order,
8000 : .priority = NODE_RECLAIM_PRIORITY,
8001 : .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
8002 : .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
8003 : .may_swap = 1,
8004 : .reclaim_idx = gfp_zone(gfp_mask),
8005 : };
8006 : unsigned long pflags;
8007 :
8008 : trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
8009 : sc.gfp_mask);
8010 :
8011 : cond_resched();
8012 : psi_memstall_enter(&pflags);
8013 : fs_reclaim_acquire(sc.gfp_mask);
8014 : /*
8015 : * We need to be able to allocate from the reserves for RECLAIM_UNMAP
8016 : */
8017 : noreclaim_flag = memalloc_noreclaim_save();
8018 : set_task_reclaim_state(p, &sc.reclaim_state);
8019 :
8020 : if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages ||
8021 : node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) {
8022 : /*
8023 : * Free memory by calling shrink node with increasing
8024 : * priorities until we have enough memory freed.
8025 : */
8026 : do {
8027 : shrink_node(pgdat, &sc);
8028 : } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
8029 : }
8030 :
8031 : set_task_reclaim_state(p, NULL);
8032 : memalloc_noreclaim_restore(noreclaim_flag);
8033 : fs_reclaim_release(sc.gfp_mask);
8034 : psi_memstall_leave(&pflags);
8035 :
8036 : trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);
8037 :
8038 : return sc.nr_reclaimed >= nr_pages;
8039 : }
8040 :
8041 : int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
8042 : {
8043 : int ret;
8044 :
8045 : /*
8046 : * Node reclaim reclaims unmapped file backed pages and
8047 : * slab pages if we are over the defined limits.
8048 : *
8049 : * A small portion of unmapped file backed pages is needed for
8050 : * file I/O otherwise pages read by file I/O will be immediately
8051 : * thrown out if the node is overallocated. So we do not reclaim
8052 : * if less than a specified percentage of the node is used by
8053 : * unmapped file backed pages.
8054 : */
8055 : if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
8056 : node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <=
8057 : pgdat->min_slab_pages)
8058 : return NODE_RECLAIM_FULL;
8059 :
8060 : /*
8061 : * Do not scan if the allocation should not be delayed.
8062 : */
8063 : if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
8064 : return NODE_RECLAIM_NOSCAN;
8065 :
8066 : /*
8067 : * Only run node reclaim on the local node or on nodes that do not
8068 : * have associated processors. This will favor the local processor
8069 : * over remote processors and spread off node memory allocations
8070 : * as wide as possible.
8071 : */
8072 : if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
8073 : return NODE_RECLAIM_NOSCAN;
8074 :
8075 : if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
8076 : return NODE_RECLAIM_NOSCAN;
8077 :
8078 : ret = __node_reclaim(pgdat, gfp_mask, order);
8079 : clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
8080 :
8081 : if (!ret)
8082 : count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
8083 :
8084 : return ret;
8085 : }
8086 : #endif
8087 :
8088 : /**
8089 : * check_move_unevictable_folios - Move evictable folios to appropriate zone
8090 : * lru list
8091 : * @fbatch: Batch of lru folios to check.
8092 : *
8093 : * Checks folios for evictability, if an evictable folio is in the unevictable
8094 : * lru list, moves it to the appropriate evictable lru list. This function
8095 : * should be only used for lru folios.
8096 : */
8097 0 : void check_move_unevictable_folios(struct folio_batch *fbatch)
8098 : {
8099 0 : struct lruvec *lruvec = NULL;
8100 0 : int pgscanned = 0;
8101 0 : int pgrescued = 0;
8102 : int i;
8103 :
8104 0 : for (i = 0; i < fbatch->nr; i++) {
8105 0 : struct folio *folio = fbatch->folios[i];
8106 0 : int nr_pages = folio_nr_pages(folio);
8107 :
8108 0 : pgscanned += nr_pages;
8109 :
8110 : /* block memcg migration while the folio moves between lrus */
8111 0 : if (!folio_test_clear_lru(folio))
8112 0 : continue;
8113 :
8114 0 : lruvec = folio_lruvec_relock_irq(folio, lruvec);
8115 0 : if (folio_evictable(folio) && folio_test_unevictable(folio)) {
8116 0 : lruvec_del_folio(lruvec, folio);
8117 0 : folio_clear_unevictable(folio);
8118 0 : lruvec_add_folio(lruvec, folio);
8119 0 : pgrescued += nr_pages;
8120 : }
8121 : folio_set_lru(folio);
8122 : }
8123 :
8124 0 : if (lruvec) {
8125 0 : __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
8126 0 : __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
8127 0 : unlock_page_lruvec_irq(lruvec);
8128 0 : } else if (pgscanned) {
8129 0 : count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
8130 : }
8131 0 : }
8132 : EXPORT_SYMBOL_GPL(check_move_unevictable_folios);
|