LCOV - code coverage report
Current view: top level - kernel/sched - sched.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 51 93 54.8 %
Date: 2023-03-27 20:00:47 Functions: 3 7 42.9 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : /*
       3             :  * Scheduler internal types and methods:
       4             :  */
       5             : #ifndef _KERNEL_SCHED_SCHED_H
       6             : #define _KERNEL_SCHED_SCHED_H
       7             : 
       8             : #include <linux/sched/affinity.h>
       9             : #include <linux/sched/autogroup.h>
      10             : #include <linux/sched/cpufreq.h>
      11             : #include <linux/sched/deadline.h>
      12             : #include <linux/sched.h>
      13             : #include <linux/sched/loadavg.h>
      14             : #include <linux/sched/mm.h>
      15             : #include <linux/sched/rseq_api.h>
      16             : #include <linux/sched/signal.h>
      17             : #include <linux/sched/smt.h>
      18             : #include <linux/sched/stat.h>
      19             : #include <linux/sched/sysctl.h>
      20             : #include <linux/sched/task_flags.h>
      21             : #include <linux/sched/task.h>
      22             : #include <linux/sched/topology.h>
      23             : 
      24             : #include <linux/atomic.h>
      25             : #include <linux/bitmap.h>
      26             : #include <linux/bug.h>
      27             : #include <linux/capability.h>
      28             : #include <linux/cgroup_api.h>
      29             : #include <linux/cgroup.h>
      30             : #include <linux/context_tracking.h>
      31             : #include <linux/cpufreq.h>
      32             : #include <linux/cpumask_api.h>
      33             : #include <linux/ctype.h>
      34             : #include <linux/file.h>
      35             : #include <linux/fs_api.h>
      36             : #include <linux/hrtimer_api.h>
      37             : #include <linux/interrupt.h>
      38             : #include <linux/irq_work.h>
      39             : #include <linux/jiffies.h>
      40             : #include <linux/kref_api.h>
      41             : #include <linux/kthread.h>
      42             : #include <linux/ktime_api.h>
      43             : #include <linux/lockdep_api.h>
      44             : #include <linux/lockdep.h>
      45             : #include <linux/minmax.h>
      46             : #include <linux/mm.h>
      47             : #include <linux/module.h>
      48             : #include <linux/mutex_api.h>
      49             : #include <linux/plist.h>
      50             : #include <linux/poll.h>
      51             : #include <linux/proc_fs.h>
      52             : #include <linux/profile.h>
      53             : #include <linux/psi.h>
      54             : #include <linux/rcupdate.h>
      55             : #include <linux/seq_file.h>
      56             : #include <linux/seqlock.h>
      57             : #include <linux/softirq.h>
      58             : #include <linux/spinlock_api.h>
      59             : #include <linux/static_key.h>
      60             : #include <linux/stop_machine.h>
      61             : #include <linux/syscalls_api.h>
      62             : #include <linux/syscalls.h>
      63             : #include <linux/tick.h>
      64             : #include <linux/topology.h>
      65             : #include <linux/types.h>
      66             : #include <linux/u64_stats_sync_api.h>
      67             : #include <linux/uaccess.h>
      68             : #include <linux/wait_api.h>
      69             : #include <linux/wait_bit.h>
      70             : #include <linux/workqueue_api.h>
      71             : 
      72             : #include <trace/events/power.h>
      73             : #include <trace/events/sched.h>
      74             : 
      75             : #include "../workqueue_internal.h"
      76             : 
      77             : #ifdef CONFIG_CGROUP_SCHED
      78             : #include <linux/cgroup.h>
      79             : #include <linux/psi.h>
      80             : #endif
      81             : 
      82             : #ifdef CONFIG_SCHED_DEBUG
      83             : # include <linux/static_key.h>
      84             : #endif
      85             : 
      86             : #ifdef CONFIG_PARAVIRT
      87             : # include <asm/paravirt.h>
      88             : # include <asm/paravirt_api_clock.h>
      89             : #endif
      90             : 
      91             : #include "cpupri.h"
      92             : #include "cpudeadline.h"
      93             : 
      94             : #ifdef CONFIG_SCHED_DEBUG
      95             : # define SCHED_WARN_ON(x)      WARN_ONCE(x, #x)
      96             : #else
      97             : # define SCHED_WARN_ON(x)      ({ (void)(x), 0; })
      98             : #endif
      99             : 
     100             : struct rq;
     101             : struct cpuidle_state;
     102             : 
     103             : /* task_struct::on_rq states: */
     104             : #define TASK_ON_RQ_QUEUED       1
     105             : #define TASK_ON_RQ_MIGRATING    2
     106             : 
     107             : extern __read_mostly int scheduler_running;
     108             : 
     109             : extern unsigned long calc_load_update;
     110             : extern atomic_long_t calc_load_tasks;
     111             : 
     112             : extern unsigned int sysctl_sched_child_runs_first;
     113             : 
     114             : extern void calc_global_load_tick(struct rq *this_rq);
     115             : extern long calc_load_fold_active(struct rq *this_rq, long adjust);
     116             : 
     117             : extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
     118             : 
     119             : extern unsigned int sysctl_sched_rt_period;
     120             : extern int sysctl_sched_rt_runtime;
     121             : extern int sched_rr_timeslice;
     122             : 
     123             : /*
     124             :  * Helpers for converting nanosecond timing to jiffy resolution
     125             :  */
     126             : #define NS_TO_JIFFIES(TIME)     ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
     127             : 
     128             : /*
     129             :  * Increase resolution of nice-level calculations for 64-bit architectures.
     130             :  * The extra resolution improves shares distribution and load balancing of
     131             :  * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
     132             :  * hierarchies, especially on larger systems. This is not a user-visible change
     133             :  * and does not change the user-interface for setting shares/weights.
     134             :  *
     135             :  * We increase resolution only if we have enough bits to allow this increased
     136             :  * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
     137             :  * are pretty high and the returns do not justify the increased costs.
     138             :  *
     139             :  * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
     140             :  * increase coverage and consistency always enable it on 64-bit platforms.
     141             :  */
     142             : #ifdef CONFIG_64BIT
     143             : # define NICE_0_LOAD_SHIFT      (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
     144             : # define scale_load(w)          ((w) << SCHED_FIXEDPOINT_SHIFT)
     145             : # define scale_load_down(w) \
     146             : ({ \
     147             :         unsigned long __w = (w); \
     148             :         if (__w) \
     149             :                 __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
     150             :         __w; \
     151             : })
     152             : #else
     153             : # define NICE_0_LOAD_SHIFT      (SCHED_FIXEDPOINT_SHIFT)
     154             : # define scale_load(w)          (w)
     155             : # define scale_load_down(w)     (w)
     156             : #endif
     157             : 
     158             : /*
     159             :  * Task weight (visible to users) and its load (invisible to users) have
     160             :  * independent resolution, but they should be well calibrated. We use
     161             :  * scale_load() and scale_load_down(w) to convert between them. The
     162             :  * following must be true:
     163             :  *
     164             :  *  scale_load(sched_prio_to_weight[NICE_TO_PRIO(0)-MAX_RT_PRIO]) == NICE_0_LOAD
     165             :  *
     166             :  */
     167             : #define NICE_0_LOAD             (1L << NICE_0_LOAD_SHIFT)
     168             : 
     169             : /*
     170             :  * Single value that decides SCHED_DEADLINE internal math precision.
     171             :  * 10 -> just above 1us
     172             :  * 9  -> just above 0.5us
     173             :  */
     174             : #define DL_SCALE                10
     175             : 
     176             : /*
     177             :  * Single value that denotes runtime == period, ie unlimited time.
     178             :  */
     179             : #define RUNTIME_INF             ((u64)~0ULL)
     180             : 
     181             : static inline int idle_policy(int policy)
     182             : {
     183        6041 :         return policy == SCHED_IDLE;
     184             : }
     185             : static inline int fair_policy(int policy)
     186             : {
     187         676 :         return policy == SCHED_NORMAL || policy == SCHED_BATCH;
     188             : }
     189             : 
     190             : static inline int rt_policy(int policy)
     191             : {
     192         348 :         return policy == SCHED_FIFO || policy == SCHED_RR;
     193             : }
     194             : 
     195             : static inline int dl_policy(int policy)
     196             : {
     197             :         return policy == SCHED_DEADLINE;
     198             : }
     199             : static inline bool valid_policy(int policy)
     200             : {
     201         676 :         return idle_policy(policy) || fair_policy(policy) ||
     202         338 :                 rt_policy(policy) || dl_policy(policy);
     203             : }
     204             : 
     205             : static inline int task_has_idle_policy(struct task_struct *p)
     206             : {
     207       12088 :         return idle_policy(p->policy);
     208             : }
     209             : 
     210             : static inline int task_has_rt_policy(struct task_struct *p)
     211             : {
     212          10 :         return rt_policy(p->policy);
     213             : }
     214             : 
     215             : static inline int task_has_dl_policy(struct task_struct *p)
     216             : {
     217           5 :         return dl_policy(p->policy);
     218             : }
     219             : 
     220             : #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
     221             : 
     222             : static inline void update_avg(u64 *avg, u64 sample)
     223             : {
     224             :         s64 diff = sample - *avg;
     225             :         *avg += diff / 8;
     226             : }
     227             : 
     228             : /*
     229             :  * Shifting a value by an exponent greater *or equal* to the size of said value
     230             :  * is UB; cap at size-1.
     231             :  */
     232             : #define shr_bound(val, shift)                                                   \
     233             :         (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1))
     234             : 
     235             : /*
     236             :  * !! For sched_setattr_nocheck() (kernel) only !!
     237             :  *
     238             :  * This is actually gross. :(
     239             :  *
     240             :  * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
     241             :  * tasks, but still be able to sleep. We need this on platforms that cannot
     242             :  * atomically change clock frequency. Remove once fast switching will be
     243             :  * available on such platforms.
     244             :  *
     245             :  * SUGOV stands for SchedUtil GOVernor.
     246             :  */
     247             : #define SCHED_FLAG_SUGOV        0x10000000
     248             : 
     249             : #define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV)
     250             : 
     251             : static inline bool dl_entity_is_special(const struct sched_dl_entity *dl_se)
     252             : {
     253             : #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
     254             :         return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
     255             : #else
     256             :         return false;
     257             : #endif
     258             : }
     259             : 
     260             : /*
     261             :  * Tells if entity @a should preempt entity @b.
     262             :  */
     263             : static inline bool dl_entity_preempt(const struct sched_dl_entity *a,
     264             :                                      const struct sched_dl_entity *b)
     265             : {
     266           0 :         return dl_entity_is_special(a) ||
     267           0 :                dl_time_before(a->deadline, b->deadline);
     268             : }
     269             : 
     270             : /*
     271             :  * This is the priority-queue data structure of the RT scheduling class:
     272             :  */
     273             : struct rt_prio_array {
     274             :         DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
     275             :         struct list_head queue[MAX_RT_PRIO];
     276             : };
     277             : 
     278             : struct rt_bandwidth {
     279             :         /* nests inside the rq lock: */
     280             :         raw_spinlock_t          rt_runtime_lock;
     281             :         ktime_t                 rt_period;
     282             :         u64                     rt_runtime;
     283             :         struct hrtimer          rt_period_timer;
     284             :         unsigned int            rt_period_active;
     285             : };
     286             : 
     287             : void __dl_clear_params(struct task_struct *p);
     288             : 
     289             : struct dl_bandwidth {
     290             :         raw_spinlock_t          dl_runtime_lock;
     291             :         u64                     dl_runtime;
     292             :         u64                     dl_period;
     293             : };
     294             : 
     295             : static inline int dl_bandwidth_enabled(void)
     296             : {
     297             :         return sysctl_sched_rt_runtime >= 0;
     298             : }
     299             : 
     300             : /*
     301             :  * To keep the bandwidth of -deadline tasks under control
     302             :  * we need some place where:
     303             :  *  - store the maximum -deadline bandwidth of each cpu;
     304             :  *  - cache the fraction of bandwidth that is currently allocated in
     305             :  *    each root domain;
     306             :  *
     307             :  * This is all done in the data structure below. It is similar to the
     308             :  * one used for RT-throttling (rt_bandwidth), with the main difference
     309             :  * that, since here we are only interested in admission control, we
     310             :  * do not decrease any runtime while the group "executes", neither we
     311             :  * need a timer to replenish it.
     312             :  *
     313             :  * With respect to SMP, bandwidth is given on a per root domain basis,
     314             :  * meaning that:
     315             :  *  - bw (< 100%) is the deadline bandwidth of each CPU;
     316             :  *  - total_bw is the currently allocated bandwidth in each root domain;
     317             :  */
     318             : struct dl_bw {
     319             :         raw_spinlock_t          lock;
     320             :         u64                     bw;
     321             :         u64                     total_bw;
     322             : };
     323             : 
     324             : extern void init_dl_bw(struct dl_bw *dl_b);
     325             : extern int  sched_dl_global_validate(void);
     326             : extern void sched_dl_do_global(void);
     327             : extern int  sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr);
     328             : extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr);
     329             : extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
     330             : extern bool __checkparam_dl(const struct sched_attr *attr);
     331             : extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
     332             : extern int  dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
     333             : extern int  dl_cpu_busy(int cpu, struct task_struct *p);
     334             : 
     335             : #ifdef CONFIG_CGROUP_SCHED
     336             : 
     337             : struct cfs_rq;
     338             : struct rt_rq;
     339             : 
     340             : extern struct list_head task_groups;
     341             : 
     342             : struct cfs_bandwidth {
     343             : #ifdef CONFIG_CFS_BANDWIDTH
     344             :         raw_spinlock_t          lock;
     345             :         ktime_t                 period;
     346             :         u64                     quota;
     347             :         u64                     runtime;
     348             :         u64                     burst;
     349             :         u64                     runtime_snap;
     350             :         s64                     hierarchical_quota;
     351             : 
     352             :         u8                      idle;
     353             :         u8                      period_active;
     354             :         u8                      slack_started;
     355             :         struct hrtimer          period_timer;
     356             :         struct hrtimer          slack_timer;
     357             :         struct list_head        throttled_cfs_rq;
     358             : 
     359             :         /* Statistics: */
     360             :         int                     nr_periods;
     361             :         int                     nr_throttled;
     362             :         int                     nr_burst;
     363             :         u64                     throttled_time;
     364             :         u64                     burst_time;
     365             : #endif
     366             : };
     367             : 
     368             : /* Task group related information */
     369             : struct task_group {
     370             :         struct cgroup_subsys_state css;
     371             : 
     372             : #ifdef CONFIG_FAIR_GROUP_SCHED
     373             :         /* schedulable entities of this group on each CPU */
     374             :         struct sched_entity     **se;
     375             :         /* runqueue "owned" by this group on each CPU */
     376             :         struct cfs_rq           **cfs_rq;
     377             :         unsigned long           shares;
     378             : 
     379             :         /* A positive value indicates that this is a SCHED_IDLE group. */
     380             :         int                     idle;
     381             : 
     382             : #ifdef  CONFIG_SMP
     383             :         /*
     384             :          * load_avg can be heavily contended at clock tick time, so put
     385             :          * it in its own cacheline separated from the fields above which
     386             :          * will also be accessed at each tick.
     387             :          */
     388             :         atomic_long_t           load_avg ____cacheline_aligned;
     389             : #endif
     390             : #endif
     391             : 
     392             : #ifdef CONFIG_RT_GROUP_SCHED
     393             :         struct sched_rt_entity  **rt_se;
     394             :         struct rt_rq            **rt_rq;
     395             : 
     396             :         struct rt_bandwidth     rt_bandwidth;
     397             : #endif
     398             : 
     399             :         struct rcu_head         rcu;
     400             :         struct list_head        list;
     401             : 
     402             :         struct task_group       *parent;
     403             :         struct list_head        siblings;
     404             :         struct list_head        children;
     405             : 
     406             : #ifdef CONFIG_SCHED_AUTOGROUP
     407             :         struct autogroup        *autogroup;
     408             : #endif
     409             : 
     410             :         struct cfs_bandwidth    cfs_bandwidth;
     411             : 
     412             : #ifdef CONFIG_UCLAMP_TASK_GROUP
     413             :         /* The two decimal precision [%] value requested from user-space */
     414             :         unsigned int            uclamp_pct[UCLAMP_CNT];
     415             :         /* Clamp values requested for a task group */
     416             :         struct uclamp_se        uclamp_req[UCLAMP_CNT];
     417             :         /* Effective clamp values used for a task group */
     418             :         struct uclamp_se        uclamp[UCLAMP_CNT];
     419             : #endif
     420             : 
     421             : };
     422             : 
     423             : #ifdef CONFIG_FAIR_GROUP_SCHED
     424             : #define ROOT_TASK_GROUP_LOAD    NICE_0_LOAD
     425             : 
     426             : /*
     427             :  * A weight of 0 or 1 can cause arithmetics problems.
     428             :  * A weight of a cfs_rq is the sum of weights of which entities
     429             :  * are queued on this cfs_rq, so a weight of a entity should not be
     430             :  * too large, so as the shares value of a task group.
     431             :  * (The default weight is 1024 - so there's no practical
     432             :  *  limitation from this.)
     433             :  */
     434             : #define MIN_SHARES              (1UL <<  1)
     435             : #define MAX_SHARES              (1UL << 18)
     436             : #endif
     437             : 
     438             : typedef int (*tg_visitor)(struct task_group *, void *);
     439             : 
     440             : extern int walk_tg_tree_from(struct task_group *from,
     441             :                              tg_visitor down, tg_visitor up, void *data);
     442             : 
     443             : /*
     444             :  * Iterate the full tree, calling @down when first entering a node and @up when
     445             :  * leaving it for the final time.
     446             :  *
     447             :  * Caller must hold rcu_lock or sufficient equivalent.
     448             :  */
     449             : static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
     450             : {
     451             :         return walk_tg_tree_from(&root_task_group, down, up, data);
     452             : }
     453             : 
     454             : extern int tg_nop(struct task_group *tg, void *data);
     455             : 
     456             : extern void free_fair_sched_group(struct task_group *tg);
     457             : extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
     458             : extern void online_fair_sched_group(struct task_group *tg);
     459             : extern void unregister_fair_sched_group(struct task_group *tg);
     460             : extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
     461             :                         struct sched_entity *se, int cpu,
     462             :                         struct sched_entity *parent);
     463             : extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
     464             : 
     465             : extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
     466             : extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
     467             : extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
     468             : 
     469             : extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
     470             :                 struct sched_rt_entity *rt_se, int cpu,
     471             :                 struct sched_rt_entity *parent);
     472             : extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us);
     473             : extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us);
     474             : extern long sched_group_rt_runtime(struct task_group *tg);
     475             : extern long sched_group_rt_period(struct task_group *tg);
     476             : extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
     477             : 
     478             : extern struct task_group *sched_create_group(struct task_group *parent);
     479             : extern void sched_online_group(struct task_group *tg,
     480             :                                struct task_group *parent);
     481             : extern void sched_destroy_group(struct task_group *tg);
     482             : extern void sched_release_group(struct task_group *tg);
     483             : 
     484             : extern void sched_move_task(struct task_struct *tsk);
     485             : 
     486             : #ifdef CONFIG_FAIR_GROUP_SCHED
     487             : extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
     488             : 
     489             : extern int sched_group_set_idle(struct task_group *tg, long idle);
     490             : 
     491             : #ifdef CONFIG_SMP
     492             : extern void set_task_rq_fair(struct sched_entity *se,
     493             :                              struct cfs_rq *prev, struct cfs_rq *next);
     494             : #else /* !CONFIG_SMP */
     495             : static inline void set_task_rq_fair(struct sched_entity *se,
     496             :                              struct cfs_rq *prev, struct cfs_rq *next) { }
     497             : #endif /* CONFIG_SMP */
     498             : #endif /* CONFIG_FAIR_GROUP_SCHED */
     499             : 
     500             : #else /* CONFIG_CGROUP_SCHED */
     501             : 
     502             : struct cfs_bandwidth { };
     503             : 
     504             : #endif  /* CONFIG_CGROUP_SCHED */
     505             : 
     506             : extern void unregister_rt_sched_group(struct task_group *tg);
     507             : extern void free_rt_sched_group(struct task_group *tg);
     508             : extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
     509             : 
     510             : /*
     511             :  * u64_u32_load/u64_u32_store
     512             :  *
     513             :  * Use a copy of a u64 value to protect against data race. This is only
     514             :  * applicable for 32-bits architectures.
     515             :  */
     516             : #ifdef CONFIG_64BIT
     517             : # define u64_u32_load_copy(var, copy)       var
     518             : # define u64_u32_store_copy(var, copy, val) (var = val)
     519             : #else
     520             : # define u64_u32_load_copy(var, copy)                                   \
     521             : ({                                                                      \
     522             :         u64 __val, __val_copy;                                          \
     523             :         do {                                                            \
     524             :                 __val_copy = copy;                                      \
     525             :                 /*                                                      \
     526             :                  * paired with u64_u32_store_copy(), ordering access    \
     527             :                  * to var and copy.                                     \
     528             :                  */                                                     \
     529             :                 smp_rmb();                                              \
     530             :                 __val = var;                                            \
     531             :         } while (__val != __val_copy);                                  \
     532             :         __val;                                                          \
     533             : })
     534             : # define u64_u32_store_copy(var, copy, val)                             \
     535             : do {                                                                    \
     536             :         typeof(val) __val = (val);                                      \
     537             :         var = __val;                                                    \
     538             :         /*                                                              \
     539             :          * paired with u64_u32_load_copy(), ordering access to var and  \
     540             :          * copy.                                                        \
     541             :          */                                                             \
     542             :         smp_wmb();                                                      \
     543             :         copy = __val;                                                   \
     544             : } while (0)
     545             : #endif
     546             : # define u64_u32_load(var)      u64_u32_load_copy(var, var##_copy)
     547             : # define u64_u32_store(var, val) u64_u32_store_copy(var, var##_copy, val)
     548             : 
     549             : /* CFS-related fields in a runqueue */
     550             : struct cfs_rq {
     551             :         struct load_weight      load;
     552             :         unsigned int            nr_running;
     553             :         unsigned int            h_nr_running;      /* SCHED_{NORMAL,BATCH,IDLE} */
     554             :         unsigned int            idle_nr_running;   /* SCHED_IDLE */
     555             :         unsigned int            idle_h_nr_running; /* SCHED_IDLE */
     556             : 
     557             :         u64                     exec_clock;
     558             :         u64                     min_vruntime;
     559             : #ifdef CONFIG_SCHED_CORE
     560             :         unsigned int            forceidle_seq;
     561             :         u64                     min_vruntime_fi;
     562             : #endif
     563             : 
     564             : #ifndef CONFIG_64BIT
     565             :         u64                     min_vruntime_copy;
     566             : #endif
     567             : 
     568             :         struct rb_root_cached   tasks_timeline;
     569             : 
     570             :         /*
     571             :          * 'curr' points to currently running entity on this cfs_rq.
     572             :          * It is set to NULL otherwise (i.e when none are currently running).
     573             :          */
     574             :         struct sched_entity     *curr;
     575             :         struct sched_entity     *next;
     576             :         struct sched_entity     *last;
     577             :         struct sched_entity     *skip;
     578             : 
     579             : #ifdef  CONFIG_SCHED_DEBUG
     580             :         unsigned int            nr_spread_over;
     581             : #endif
     582             : 
     583             : #ifdef CONFIG_SMP
     584             :         /*
     585             :          * CFS load tracking
     586             :          */
     587             :         struct sched_avg        avg;
     588             : #ifndef CONFIG_64BIT
     589             :         u64                     last_update_time_copy;
     590             : #endif
     591             :         struct {
     592             :                 raw_spinlock_t  lock ____cacheline_aligned;
     593             :                 int             nr;
     594             :                 unsigned long   load_avg;
     595             :                 unsigned long   util_avg;
     596             :                 unsigned long   runnable_avg;
     597             :         } removed;
     598             : 
     599             : #ifdef CONFIG_FAIR_GROUP_SCHED
     600             :         unsigned long           tg_load_avg_contrib;
     601             :         long                    propagate;
     602             :         long                    prop_runnable_sum;
     603             : 
     604             :         /*
     605             :          *   h_load = weight * f(tg)
     606             :          *
     607             :          * Where f(tg) is the recursive weight fraction assigned to
     608             :          * this group.
     609             :          */
     610             :         unsigned long           h_load;
     611             :         u64                     last_h_load_update;
     612             :         struct sched_entity     *h_load_next;
     613             : #endif /* CONFIG_FAIR_GROUP_SCHED */
     614             : #endif /* CONFIG_SMP */
     615             : 
     616             : #ifdef CONFIG_FAIR_GROUP_SCHED
     617             :         struct rq               *rq;    /* CPU runqueue to which this cfs_rq is attached */
     618             : 
     619             :         /*
     620             :          * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
     621             :          * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
     622             :          * (like users, containers etc.)
     623             :          *
     624             :          * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU.
     625             :          * This list is used during load balance.
     626             :          */
     627             :         int                     on_list;
     628             :         struct list_head        leaf_cfs_rq_list;
     629             :         struct task_group       *tg;    /* group that "owns" this runqueue */
     630             : 
     631             :         /* Locally cached copy of our task_group's idle value */
     632             :         int                     idle;
     633             : 
     634             : #ifdef CONFIG_CFS_BANDWIDTH
     635             :         int                     runtime_enabled;
     636             :         s64                     runtime_remaining;
     637             : 
     638             :         u64                     throttled_pelt_idle;
     639             : #ifndef CONFIG_64BIT
     640             :         u64                     throttled_pelt_idle_copy;
     641             : #endif
     642             :         u64                     throttled_clock;
     643             :         u64                     throttled_clock_pelt;
     644             :         u64                     throttled_clock_pelt_time;
     645             :         int                     throttled;
     646             :         int                     throttle_count;
     647             :         struct list_head        throttled_list;
     648             : #ifdef CONFIG_SMP
     649             :         struct list_head        throttled_csd_list;
     650             : #endif
     651             : #endif /* CONFIG_CFS_BANDWIDTH */
     652             : #endif /* CONFIG_FAIR_GROUP_SCHED */
     653             : };
     654             : 
     655             : static inline int rt_bandwidth_enabled(void)
     656             : {
     657           0 :         return sysctl_sched_rt_runtime >= 0;
     658             : }
     659             : 
     660             : /* RT IPI pull logic requires IRQ_WORK */
     661             : #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
     662             : # define HAVE_RT_PUSH_IPI
     663             : #endif
     664             : 
     665             : /* Real-Time classes' related field in a runqueue: */
     666             : struct rt_rq {
     667             :         struct rt_prio_array    active;
     668             :         unsigned int            rt_nr_running;
     669             :         unsigned int            rr_nr_running;
     670             : #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
     671             :         struct {
     672             :                 int             curr; /* highest queued rt task prio */
     673             : #ifdef CONFIG_SMP
     674             :                 int             next; /* next highest */
     675             : #endif
     676             :         } highest_prio;
     677             : #endif
     678             : #ifdef CONFIG_SMP
     679             :         unsigned int            rt_nr_migratory;
     680             :         unsigned int            rt_nr_total;
     681             :         int                     overloaded;
     682             :         struct plist_head       pushable_tasks;
     683             : 
     684             : #endif /* CONFIG_SMP */
     685             :         int                     rt_queued;
     686             : 
     687             :         int                     rt_throttled;
     688             :         u64                     rt_time;
     689             :         u64                     rt_runtime;
     690             :         /* Nests inside the rq lock: */
     691             :         raw_spinlock_t          rt_runtime_lock;
     692             : 
     693             : #ifdef CONFIG_RT_GROUP_SCHED
     694             :         unsigned int            rt_nr_boosted;
     695             : 
     696             :         struct rq               *rq;
     697             :         struct task_group       *tg;
     698             : #endif
     699             : };
     700             : 
     701             : static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
     702             : {
     703             :         return rt_rq->rt_queued && rt_rq->rt_nr_running;
     704             : }
     705             : 
     706             : /* Deadline class' related fields in a runqueue */
     707             : struct dl_rq {
     708             :         /* runqueue is an rbtree, ordered by deadline */
     709             :         struct rb_root_cached   root;
     710             : 
     711             :         unsigned int            dl_nr_running;
     712             : 
     713             : #ifdef CONFIG_SMP
     714             :         /*
     715             :          * Deadline values of the currently executing and the
     716             :          * earliest ready task on this rq. Caching these facilitates
     717             :          * the decision whether or not a ready but not running task
     718             :          * should migrate somewhere else.
     719             :          */
     720             :         struct {
     721             :                 u64             curr;
     722             :                 u64             next;
     723             :         } earliest_dl;
     724             : 
     725             :         unsigned int            dl_nr_migratory;
     726             :         int                     overloaded;
     727             : 
     728             :         /*
     729             :          * Tasks on this rq that can be pushed away. They are kept in
     730             :          * an rb-tree, ordered by tasks' deadlines, with caching
     731             :          * of the leftmost (earliest deadline) element.
     732             :          */
     733             :         struct rb_root_cached   pushable_dl_tasks_root;
     734             : #else
     735             :         struct dl_bw            dl_bw;
     736             : #endif
     737             :         /*
     738             :          * "Active utilization" for this runqueue: increased when a
     739             :          * task wakes up (becomes TASK_RUNNING) and decreased when a
     740             :          * task blocks
     741             :          */
     742             :         u64                     running_bw;
     743             : 
     744             :         /*
     745             :          * Utilization of the tasks "assigned" to this runqueue (including
     746             :          * the tasks that are in runqueue and the tasks that executed on this
     747             :          * CPU and blocked). Increased when a task moves to this runqueue, and
     748             :          * decreased when the task moves away (migrates, changes scheduling
     749             :          * policy, or terminates).
     750             :          * This is needed to compute the "inactive utilization" for the
     751             :          * runqueue (inactive utilization = this_bw - running_bw).
     752             :          */
     753             :         u64                     this_bw;
     754             :         u64                     extra_bw;
     755             : 
     756             :         /*
     757             :          * Inverse of the fraction of CPU utilization that can be reclaimed
     758             :          * by the GRUB algorithm.
     759             :          */
     760             :         u64                     bw_ratio;
     761             : };
     762             : 
     763             : #ifdef CONFIG_FAIR_GROUP_SCHED
     764             : /* An entity is a task if it doesn't "own" a runqueue */
     765             : #define entity_is_task(se)      (!se->my_q)
     766             : 
     767             : static inline void se_update_runnable(struct sched_entity *se)
     768             : {
     769             :         if (!entity_is_task(se))
     770             :                 se->runnable_weight = se->my_q->h_nr_running;
     771             : }
     772             : 
     773             : static inline long se_runnable(struct sched_entity *se)
     774             : {
     775             :         if (entity_is_task(se))
     776             :                 return !!se->on_rq;
     777             :         else
     778             :                 return se->runnable_weight;
     779             : }
     780             : 
     781             : #else
     782             : #define entity_is_task(se)      1
     783             : 
     784             : static inline void se_update_runnable(struct sched_entity *se) {}
     785             : 
     786             : static inline long se_runnable(struct sched_entity *se)
     787             : {
     788             :         return !!se->on_rq;
     789             : }
     790             : #endif
     791             : 
     792             : #ifdef CONFIG_SMP
     793             : /*
     794             :  * XXX we want to get rid of these helpers and use the full load resolution.
     795             :  */
     796             : static inline long se_weight(struct sched_entity *se)
     797             : {
     798             :         return scale_load_down(se->load.weight);
     799             : }
     800             : 
     801             : 
     802             : static inline bool sched_asym_prefer(int a, int b)
     803             : {
     804             :         return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
     805             : }
     806             : 
     807             : struct perf_domain {
     808             :         struct em_perf_domain *em_pd;
     809             :         struct perf_domain *next;
     810             :         struct rcu_head rcu;
     811             : };
     812             : 
     813             : /* Scheduling group status flags */
     814             : #define SG_OVERLOAD             0x1 /* More than one runnable task on a CPU. */
     815             : #define SG_OVERUTILIZED         0x2 /* One or more CPUs are over-utilized. */
     816             : 
     817             : /*
     818             :  * We add the notion of a root-domain which will be used to define per-domain
     819             :  * variables. Each exclusive cpuset essentially defines an island domain by
     820             :  * fully partitioning the member CPUs from any other cpuset. Whenever a new
     821             :  * exclusive cpuset is created, we also create and attach a new root-domain
     822             :  * object.
     823             :  *
     824             :  */
     825             : struct root_domain {
     826             :         atomic_t                refcount;
     827             :         atomic_t                rto_count;
     828             :         struct rcu_head         rcu;
     829             :         cpumask_var_t           span;
     830             :         cpumask_var_t           online;
     831             : 
     832             :         /*
     833             :          * Indicate pullable load on at least one CPU, e.g:
     834             :          * - More than one runnable task
     835             :          * - Running task is misfit
     836             :          */
     837             :         int                     overload;
     838             : 
     839             :         /* Indicate one or more cpus over-utilized (tipping point) */
     840             :         int                     overutilized;
     841             : 
     842             :         /*
     843             :          * The bit corresponding to a CPU gets set here if such CPU has more
     844             :          * than one runnable -deadline task (as it is below for RT tasks).
     845             :          */
     846             :         cpumask_var_t           dlo_mask;
     847             :         atomic_t                dlo_count;
     848             :         struct dl_bw            dl_bw;
     849             :         struct cpudl            cpudl;
     850             : 
     851             :         /*
     852             :          * Indicate whether a root_domain's dl_bw has been checked or
     853             :          * updated. It's monotonously increasing value.
     854             :          *
     855             :          * Also, some corner cases, like 'wrap around' is dangerous, but given
     856             :          * that u64 is 'big enough'. So that shouldn't be a concern.
     857             :          */
     858             :         u64 visit_gen;
     859             : 
     860             : #ifdef HAVE_RT_PUSH_IPI
     861             :         /*
     862             :          * For IPI pull requests, loop across the rto_mask.
     863             :          */
     864             :         struct irq_work         rto_push_work;
     865             :         raw_spinlock_t          rto_lock;
     866             :         /* These are only updated and read within rto_lock */
     867             :         int                     rto_loop;
     868             :         int                     rto_cpu;
     869             :         /* These atomics are updated outside of a lock */
     870             :         atomic_t                rto_loop_next;
     871             :         atomic_t                rto_loop_start;
     872             : #endif
     873             :         /*
     874             :          * The "RT overload" flag: it gets set if a CPU has more than
     875             :          * one runnable RT task.
     876             :          */
     877             :         cpumask_var_t           rto_mask;
     878             :         struct cpupri           cpupri;
     879             : 
     880             :         unsigned long           max_cpu_capacity;
     881             : 
     882             :         /*
     883             :          * NULL-terminated list of performance domains intersecting with the
     884             :          * CPUs of the rd. Protected by RCU.
     885             :          */
     886             :         struct perf_domain __rcu *pd;
     887             : };
     888             : 
     889             : extern void init_defrootdomain(void);
     890             : extern int sched_init_domains(const struct cpumask *cpu_map);
     891             : extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
     892             : extern void sched_get_rd(struct root_domain *rd);
     893             : extern void sched_put_rd(struct root_domain *rd);
     894             : 
     895             : #ifdef HAVE_RT_PUSH_IPI
     896             : extern void rto_push_irq_work_func(struct irq_work *work);
     897             : #endif
     898             : #endif /* CONFIG_SMP */
     899             : 
     900             : #ifdef CONFIG_UCLAMP_TASK
     901             : /*
     902             :  * struct uclamp_bucket - Utilization clamp bucket
     903             :  * @value: utilization clamp value for tasks on this clamp bucket
     904             :  * @tasks: number of RUNNABLE tasks on this clamp bucket
     905             :  *
     906             :  * Keep track of how many tasks are RUNNABLE for a given utilization
     907             :  * clamp value.
     908             :  */
     909             : struct uclamp_bucket {
     910             :         unsigned long value : bits_per(SCHED_CAPACITY_SCALE);
     911             :         unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE);
     912             : };
     913             : 
     914             : /*
     915             :  * struct uclamp_rq - rq's utilization clamp
     916             :  * @value: currently active clamp values for a rq
     917             :  * @bucket: utilization clamp buckets affecting a rq
     918             :  *
     919             :  * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values.
     920             :  * A clamp value is affecting a rq when there is at least one task RUNNABLE
     921             :  * (or actually running) with that value.
     922             :  *
     923             :  * There are up to UCLAMP_CNT possible different clamp values, currently there
     924             :  * are only two: minimum utilization and maximum utilization.
     925             :  *
     926             :  * All utilization clamping values are MAX aggregated, since:
     927             :  * - for util_min: we want to run the CPU at least at the max of the minimum
     928             :  *   utilization required by its currently RUNNABLE tasks.
     929             :  * - for util_max: we want to allow the CPU to run up to the max of the
     930             :  *   maximum utilization allowed by its currently RUNNABLE tasks.
     931             :  *
     932             :  * Since on each system we expect only a limited number of different
     933             :  * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track
     934             :  * the metrics required to compute all the per-rq utilization clamp values.
     935             :  */
     936             : struct uclamp_rq {
     937             :         unsigned int value;
     938             :         struct uclamp_bucket bucket[UCLAMP_BUCKETS];
     939             : };
     940             : 
     941             : DECLARE_STATIC_KEY_FALSE(sched_uclamp_used);
     942             : #endif /* CONFIG_UCLAMP_TASK */
     943             : 
     944             : struct rq;
     945             : struct balance_callback {
     946             :         struct balance_callback *next;
     947             :         void (*func)(struct rq *rq);
     948             : };
     949             : 
     950             : /*
     951             :  * This is the main, per-CPU runqueue data structure.
     952             :  *
     953             :  * Locking rule: those places that want to lock multiple runqueues
     954             :  * (such as the load balancing or the thread migration code), lock
     955             :  * acquire operations must be ordered by ascending &runqueue.
     956             :  */
     957             : struct rq {
     958             :         /* runqueue lock: */
     959             :         raw_spinlock_t          __lock;
     960             : 
     961             :         /*
     962             :          * nr_running and cpu_load should be in the same cacheline because
     963             :          * remote CPUs use both these fields when doing load calculation.
     964             :          */
     965             :         unsigned int            nr_running;
     966             : #ifdef CONFIG_NUMA_BALANCING
     967             :         unsigned int            nr_numa_running;
     968             :         unsigned int            nr_preferred_running;
     969             :         unsigned int            numa_migrate_on;
     970             : #endif
     971             : #ifdef CONFIG_NO_HZ_COMMON
     972             : #ifdef CONFIG_SMP
     973             :         unsigned long           last_blocked_load_update_tick;
     974             :         unsigned int            has_blocked_load;
     975             :         call_single_data_t      nohz_csd;
     976             : #endif /* CONFIG_SMP */
     977             :         unsigned int            nohz_tick_stopped;
     978             :         atomic_t                nohz_flags;
     979             : #endif /* CONFIG_NO_HZ_COMMON */
     980             : 
     981             : #ifdef CONFIG_SMP
     982             :         unsigned int            ttwu_pending;
     983             : #endif
     984             :         u64                     nr_switches;
     985             : 
     986             : #ifdef CONFIG_UCLAMP_TASK
     987             :         /* Utilization clamp values based on CPU's RUNNABLE tasks */
     988             :         struct uclamp_rq        uclamp[UCLAMP_CNT] ____cacheline_aligned;
     989             :         unsigned int            uclamp_flags;
     990             : #define UCLAMP_FLAG_IDLE 0x01
     991             : #endif
     992             : 
     993             :         struct cfs_rq           cfs;
     994             :         struct rt_rq            rt;
     995             :         struct dl_rq            dl;
     996             : 
     997             : #ifdef CONFIG_FAIR_GROUP_SCHED
     998             :         /* list of leaf cfs_rq on this CPU: */
     999             :         struct list_head        leaf_cfs_rq_list;
    1000             :         struct list_head        *tmp_alone_branch;
    1001             : #endif /* CONFIG_FAIR_GROUP_SCHED */
    1002             : 
    1003             :         /*
    1004             :          * This is part of a global counter where only the total sum
    1005             :          * over all CPUs matters. A task can increase this counter on
    1006             :          * one CPU and if it got migrated afterwards it may decrease
    1007             :          * it on another CPU. Always updated under the runqueue lock:
    1008             :          */
    1009             :         unsigned int            nr_uninterruptible;
    1010             : 
    1011             :         struct task_struct __rcu        *curr;
    1012             :         struct task_struct      *idle;
    1013             :         struct task_struct      *stop;
    1014             :         unsigned long           next_balance;
    1015             :         struct mm_struct        *prev_mm;
    1016             : 
    1017             :         unsigned int            clock_update_flags;
    1018             :         u64                     clock;
    1019             :         /* Ensure that all clocks are in the same cache line */
    1020             :         u64                     clock_task ____cacheline_aligned;
    1021             :         u64                     clock_pelt;
    1022             :         unsigned long           lost_idle_time;
    1023             :         u64                     clock_pelt_idle;
    1024             :         u64                     clock_idle;
    1025             : #ifndef CONFIG_64BIT
    1026             :         u64                     clock_pelt_idle_copy;
    1027             :         u64                     clock_idle_copy;
    1028             : #endif
    1029             : 
    1030             :         atomic_t                nr_iowait;
    1031             : 
    1032             : #ifdef CONFIG_SCHED_DEBUG
    1033             :         u64 last_seen_need_resched_ns;
    1034             :         int ticks_without_resched;
    1035             : #endif
    1036             : 
    1037             : #ifdef CONFIG_MEMBARRIER
    1038             :         int membarrier_state;
    1039             : #endif
    1040             : 
    1041             : #ifdef CONFIG_SMP
    1042             :         struct root_domain              *rd;
    1043             :         struct sched_domain __rcu       *sd;
    1044             : 
    1045             :         unsigned long           cpu_capacity;
    1046             :         unsigned long           cpu_capacity_orig;
    1047             : 
    1048             :         struct balance_callback *balance_callback;
    1049             : 
    1050             :         unsigned char           nohz_idle_balance;
    1051             :         unsigned char           idle_balance;
    1052             : 
    1053             :         unsigned long           misfit_task_load;
    1054             : 
    1055             :         /* For active balancing */
    1056             :         int                     active_balance;
    1057             :         int                     push_cpu;
    1058             :         struct cpu_stop_work    active_balance_work;
    1059             : 
    1060             :         /* CPU of this runqueue: */
    1061             :         int                     cpu;
    1062             :         int                     online;
    1063             : 
    1064             :         struct list_head cfs_tasks;
    1065             : 
    1066             :         struct sched_avg        avg_rt;
    1067             :         struct sched_avg        avg_dl;
    1068             : #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
    1069             :         struct sched_avg        avg_irq;
    1070             : #endif
    1071             : #ifdef CONFIG_SCHED_THERMAL_PRESSURE
    1072             :         struct sched_avg        avg_thermal;
    1073             : #endif
    1074             :         u64                     idle_stamp;
    1075             :         u64                     avg_idle;
    1076             : 
    1077             :         unsigned long           wake_stamp;
    1078             :         u64                     wake_avg_idle;
    1079             : 
    1080             :         /* This is used to determine avg_idle's max value */
    1081             :         u64                     max_idle_balance_cost;
    1082             : 
    1083             : #ifdef CONFIG_HOTPLUG_CPU
    1084             :         struct rcuwait          hotplug_wait;
    1085             : #endif
    1086             : #endif /* CONFIG_SMP */
    1087             : 
    1088             : #ifdef CONFIG_IRQ_TIME_ACCOUNTING
    1089             :         u64                     prev_irq_time;
    1090             : #endif
    1091             : #ifdef CONFIG_PARAVIRT
    1092             :         u64                     prev_steal_time;
    1093             : #endif
    1094             : #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
    1095             :         u64                     prev_steal_time_rq;
    1096             : #endif
    1097             : 
    1098             :         /* calc_load related fields */
    1099             :         unsigned long           calc_load_update;
    1100             :         long                    calc_load_active;
    1101             : 
    1102             : #ifdef CONFIG_SCHED_HRTICK
    1103             : #ifdef CONFIG_SMP
    1104             :         call_single_data_t      hrtick_csd;
    1105             : #endif
    1106             :         struct hrtimer          hrtick_timer;
    1107             :         ktime_t                 hrtick_time;
    1108             : #endif
    1109             : 
    1110             : #ifdef CONFIG_SCHEDSTATS
    1111             :         /* latency stats */
    1112             :         struct sched_info       rq_sched_info;
    1113             :         unsigned long long      rq_cpu_time;
    1114             :         /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
    1115             : 
    1116             :         /* sys_sched_yield() stats */
    1117             :         unsigned int            yld_count;
    1118             : 
    1119             :         /* schedule() stats */
    1120             :         unsigned int            sched_count;
    1121             :         unsigned int            sched_goidle;
    1122             : 
    1123             :         /* try_to_wake_up() stats */
    1124             :         unsigned int            ttwu_count;
    1125             :         unsigned int            ttwu_local;
    1126             : #endif
    1127             : 
    1128             : #ifdef CONFIG_CPU_IDLE
    1129             :         /* Must be inspected within a rcu lock section */
    1130             :         struct cpuidle_state    *idle_state;
    1131             : #endif
    1132             : 
    1133             : #ifdef CONFIG_SMP
    1134             :         unsigned int            nr_pinned;
    1135             : #endif
    1136             :         unsigned int            push_busy;
    1137             :         struct cpu_stop_work    push_work;
    1138             : 
    1139             : #ifdef CONFIG_SCHED_CORE
    1140             :         /* per rq */
    1141             :         struct rq               *core;
    1142             :         struct task_struct      *core_pick;
    1143             :         unsigned int            core_enabled;
    1144             :         unsigned int            core_sched_seq;
    1145             :         struct rb_root          core_tree;
    1146             : 
    1147             :         /* shared state -- careful with sched_core_cpu_deactivate() */
    1148             :         unsigned int            core_task_seq;
    1149             :         unsigned int            core_pick_seq;
    1150             :         unsigned long           core_cookie;
    1151             :         unsigned int            core_forceidle_count;
    1152             :         unsigned int            core_forceidle_seq;
    1153             :         unsigned int            core_forceidle_occupation;
    1154             :         u64                     core_forceidle_start;
    1155             : #endif
    1156             : 
    1157             :         /* Scratch cpumask to be temporarily used under rq_lock */
    1158             :         cpumask_var_t           scratch_mask;
    1159             : 
    1160             : #if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SMP)
    1161             :         call_single_data_t      cfsb_csd;
    1162             :         struct list_head        cfsb_csd_list;
    1163             : #endif
    1164             : };
    1165             : 
    1166             : #ifdef CONFIG_FAIR_GROUP_SCHED
    1167             : 
    1168             : /* CPU runqueue to which this cfs_rq is attached */
    1169             : static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
    1170             : {
    1171             :         return cfs_rq->rq;
    1172             : }
    1173             : 
    1174             : #else
    1175             : 
    1176             : static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
    1177             : {
    1178       21092 :         return container_of(cfs_rq, struct rq, cfs);
    1179             : }
    1180             : #endif
    1181             : 
    1182             : static inline int cpu_of(struct rq *rq)
    1183             : {
    1184             : #ifdef CONFIG_SMP
    1185             :         return rq->cpu;
    1186             : #else
    1187             :         return 0;
    1188             : #endif
    1189             : }
    1190             : 
    1191             : #define MDF_PUSH        0x01
    1192             : 
    1193             : static inline bool is_migration_disabled(struct task_struct *p)
    1194             : {
    1195             : #ifdef CONFIG_SMP
    1196             :         return p->migration_disabled;
    1197             : #else
    1198             :         return false;
    1199             : #endif
    1200             : }
    1201             : 
    1202             : DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
    1203             : 
    1204             : #define cpu_rq(cpu)             (&per_cpu(runqueues, (cpu)))
    1205             : #define this_rq()               this_cpu_ptr(&runqueues)
    1206             : #define task_rq(p)              cpu_rq(task_cpu(p))
    1207             : #define cpu_curr(cpu)           (cpu_rq(cpu)->curr)
    1208             : #define raw_rq()                raw_cpu_ptr(&runqueues)
    1209             : 
    1210             : struct sched_group;
    1211             : #ifdef CONFIG_SCHED_CORE
    1212             : static inline struct cpumask *sched_group_span(struct sched_group *sg);
    1213             : 
    1214             : DECLARE_STATIC_KEY_FALSE(__sched_core_enabled);
    1215             : 
    1216             : static inline bool sched_core_enabled(struct rq *rq)
    1217             : {
    1218             :         return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled;
    1219             : }
    1220             : 
    1221             : static inline bool sched_core_disabled(void)
    1222             : {
    1223             :         return !static_branch_unlikely(&__sched_core_enabled);
    1224             : }
    1225             : 
    1226             : /*
    1227             :  * Be careful with this function; not for general use. The return value isn't
    1228             :  * stable unless you actually hold a relevant rq->__lock.
    1229             :  */
    1230             : static inline raw_spinlock_t *rq_lockp(struct rq *rq)
    1231             : {
    1232             :         if (sched_core_enabled(rq))
    1233             :                 return &rq->core->__lock;
    1234             : 
    1235             :         return &rq->__lock;
    1236             : }
    1237             : 
    1238             : static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
    1239             : {
    1240             :         if (rq->core_enabled)
    1241             :                 return &rq->core->__lock;
    1242             : 
    1243             :         return &rq->__lock;
    1244             : }
    1245             : 
    1246             : bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
    1247             :                         bool fi);
    1248             : 
    1249             : /*
    1250             :  * Helpers to check if the CPU's core cookie matches with the task's cookie
    1251             :  * when core scheduling is enabled.
    1252             :  * A special case is that the task's cookie always matches with CPU's core
    1253             :  * cookie if the CPU is in an idle core.
    1254             :  */
    1255             : static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
    1256             : {
    1257             :         /* Ignore cookie match if core scheduler is not enabled on the CPU. */
    1258             :         if (!sched_core_enabled(rq))
    1259             :                 return true;
    1260             : 
    1261             :         return rq->core->core_cookie == p->core_cookie;
    1262             : }
    1263             : 
    1264             : static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p)
    1265             : {
    1266             :         bool idle_core = true;
    1267             :         int cpu;
    1268             : 
    1269             :         /* Ignore cookie match if core scheduler is not enabled on the CPU. */
    1270             :         if (!sched_core_enabled(rq))
    1271             :                 return true;
    1272             : 
    1273             :         for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) {
    1274             :                 if (!available_idle_cpu(cpu)) {
    1275             :                         idle_core = false;
    1276             :                         break;
    1277             :                 }
    1278             :         }
    1279             : 
    1280             :         /*
    1281             :          * A CPU in an idle core is always the best choice for tasks with
    1282             :          * cookies.
    1283             :          */
    1284             :         return idle_core || rq->core->core_cookie == p->core_cookie;
    1285             : }
    1286             : 
    1287             : static inline bool sched_group_cookie_match(struct rq *rq,
    1288             :                                             struct task_struct *p,
    1289             :                                             struct sched_group *group)
    1290             : {
    1291             :         int cpu;
    1292             : 
    1293             :         /* Ignore cookie match if core scheduler is not enabled on the CPU. */
    1294             :         if (!sched_core_enabled(rq))
    1295             :                 return true;
    1296             : 
    1297             :         for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) {
    1298             :                 if (sched_core_cookie_match(cpu_rq(cpu), p))
    1299             :                         return true;
    1300             :         }
    1301             :         return false;
    1302             : }
    1303             : 
    1304             : static inline bool sched_core_enqueued(struct task_struct *p)
    1305             : {
    1306             :         return !RB_EMPTY_NODE(&p->core_node);
    1307             : }
    1308             : 
    1309             : extern void sched_core_enqueue(struct rq *rq, struct task_struct *p);
    1310             : extern void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags);
    1311             : 
    1312             : extern void sched_core_get(void);
    1313             : extern void sched_core_put(void);
    1314             : 
    1315             : #else /* !CONFIG_SCHED_CORE */
    1316             : 
    1317             : static inline bool sched_core_enabled(struct rq *rq)
    1318             : {
    1319             :         return false;
    1320             : }
    1321             : 
    1322             : static inline bool sched_core_disabled(void)
    1323             : {
    1324             :         return true;
    1325             : }
    1326             : 
    1327             : static inline raw_spinlock_t *rq_lockp(struct rq *rq)
    1328             : {
    1329             :         return &rq->__lock;
    1330             : }
    1331             : 
    1332             : static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
    1333             : {
    1334             :         return &rq->__lock;
    1335             : }
    1336             : 
    1337             : static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
    1338             : {
    1339             :         return true;
    1340             : }
    1341             : 
    1342             : static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p)
    1343             : {
    1344             :         return true;
    1345             : }
    1346             : 
    1347             : static inline bool sched_group_cookie_match(struct rq *rq,
    1348             :                                             struct task_struct *p,
    1349             :                                             struct sched_group *group)
    1350             : {
    1351             :         return true;
    1352             : }
    1353             : #endif /* CONFIG_SCHED_CORE */
    1354             : 
    1355             : static inline void lockdep_assert_rq_held(struct rq *rq)
    1356             : {
    1357       27745 :         lockdep_assert_held(__rq_lockp(rq));
    1358             : }
    1359             : 
    1360             : extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
    1361             : extern bool raw_spin_rq_trylock(struct rq *rq);
    1362             : extern void raw_spin_rq_unlock(struct rq *rq);
    1363             : 
    1364             : static inline void raw_spin_rq_lock(struct rq *rq)
    1365             : {
    1366        7745 :         raw_spin_rq_lock_nested(rq, 0);
    1367             : }
    1368             : 
    1369             : static inline void raw_spin_rq_lock_irq(struct rq *rq)
    1370             : {
    1371           0 :         local_irq_disable();
    1372           0 :         raw_spin_rq_lock(rq);
    1373             : }
    1374             : 
    1375             : static inline void raw_spin_rq_unlock_irq(struct rq *rq)
    1376             : {
    1377        2210 :         raw_spin_rq_unlock(rq);
    1378             :         local_irq_enable();
    1379             : }
    1380             : 
    1381             : static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq)
    1382             : {
    1383             :         unsigned long flags;
    1384           0 :         local_irq_save(flags);
    1385           0 :         raw_spin_rq_lock(rq);
    1386             :         return flags;
    1387             : }
    1388             : 
    1389             : static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags)
    1390             : {
    1391           0 :         raw_spin_rq_unlock(rq);
    1392           0 :         local_irq_restore(flags);
    1393             : }
    1394             : 
    1395             : #define raw_spin_rq_lock_irqsave(rq, flags)     \
    1396             : do {                                            \
    1397             :         flags = _raw_spin_rq_lock_irqsave(rq);  \
    1398             : } while (0)
    1399             : 
    1400             : #ifdef CONFIG_SCHED_SMT
    1401             : extern void __update_idle_core(struct rq *rq);
    1402             : 
    1403             : static inline void update_idle_core(struct rq *rq)
    1404             : {
    1405             :         if (static_branch_unlikely(&sched_smt_present))
    1406             :                 __update_idle_core(rq);
    1407             : }
    1408             : 
    1409             : #else
    1410             : static inline void update_idle_core(struct rq *rq) { }
    1411             : #endif
    1412             : 
    1413             : #ifdef CONFIG_FAIR_GROUP_SCHED
    1414             : static inline struct task_struct *task_of(struct sched_entity *se)
    1415             : {
    1416             :         SCHED_WARN_ON(!entity_is_task(se));
    1417             :         return container_of(se, struct task_struct, se);
    1418             : }
    1419             : 
    1420             : static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
    1421             : {
    1422             :         return p->se.cfs_rq;
    1423             : }
    1424             : 
    1425             : /* runqueue on which this entity is (to be) queued */
    1426             : static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se)
    1427             : {
    1428             :         return se->cfs_rq;
    1429             : }
    1430             : 
    1431             : /* runqueue "owned" by this group */
    1432             : static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
    1433             : {
    1434             :         return grp->my_q;
    1435             : }
    1436             : 
    1437             : #else
    1438             : 
    1439             : #define task_of(_se)    container_of(_se, struct task_struct, se)
    1440             : 
    1441             : static inline struct cfs_rq *task_cfs_rq(const struct task_struct *p)
    1442             : {
    1443        2465 :         return &task_rq(p)->cfs;
    1444             : }
    1445             : 
    1446             : static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se)
    1447             : {
    1448       15063 :         const struct task_struct *p = task_of(se);
    1449       15063 :         struct rq *rq = task_rq(p);
    1450             : 
    1451             :         return &rq->cfs;
    1452             : }
    1453             : 
    1454             : /* runqueue "owned" by this group */
    1455             : static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
    1456             : {
    1457             :         return NULL;
    1458             : }
    1459             : #endif
    1460             : 
    1461             : extern void update_rq_clock(struct rq *rq);
    1462             : 
    1463             : /*
    1464             :  * rq::clock_update_flags bits
    1465             :  *
    1466             :  * %RQCF_REQ_SKIP - will request skipping of clock update on the next
    1467             :  *  call to __schedule(). This is an optimisation to avoid
    1468             :  *  neighbouring rq clock updates.
    1469             :  *
    1470             :  * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is
    1471             :  *  in effect and calls to update_rq_clock() are being ignored.
    1472             :  *
    1473             :  * %RQCF_UPDATED - is a debug flag that indicates whether a call has been
    1474             :  *  made to update_rq_clock() since the last time rq::lock was pinned.
    1475             :  *
    1476             :  * If inside of __schedule(), clock_update_flags will have been
    1477             :  * shifted left (a left shift is a cheap operation for the fast path
    1478             :  * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use,
    1479             :  *
    1480             :  *      if (rq-clock_update_flags >= RQCF_UPDATED)
    1481             :  *
    1482             :  * to check if %RQCF_UPDATED is set. It'll never be shifted more than
    1483             :  * one position though, because the next rq_unpin_lock() will shift it
    1484             :  * back.
    1485             :  */
    1486             : #define RQCF_REQ_SKIP           0x01
    1487             : #define RQCF_ACT_SKIP           0x02
    1488             : #define RQCF_UPDATED            0x04
    1489             : 
    1490       16244 : static inline void assert_clock_updated(struct rq *rq)
    1491             : {
    1492             :         /*
    1493             :          * The only reason for not seeing a clock update since the
    1494             :          * last rq_pin_lock() is if we're currently skipping updates.
    1495             :          */
    1496       16244 :         SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP);
    1497       16244 : }
    1498             : 
    1499             : static inline u64 rq_clock(struct rq *rq)
    1500             : {
    1501           0 :         lockdep_assert_rq_held(rq);
    1502           0 :         assert_clock_updated(rq);
    1503             : 
    1504           0 :         return rq->clock;
    1505             : }
    1506             : 
    1507             : static inline u64 rq_clock_task(struct rq *rq)
    1508             : {
    1509        2723 :         lockdep_assert_rq_held(rq);
    1510       16244 :         assert_clock_updated(rq);
    1511             : 
    1512       13521 :         return rq->clock_task;
    1513             : }
    1514             : 
    1515             : /**
    1516             :  * By default the decay is the default pelt decay period.
    1517             :  * The decay shift can change the decay period in
    1518             :  * multiples of 32.
    1519             :  *  Decay shift         Decay period(ms)
    1520             :  *      0                       32
    1521             :  *      1                       64
    1522             :  *      2                       128
    1523             :  *      3                       256
    1524             :  *      4                       512
    1525             :  */
    1526             : extern int sched_thermal_decay_shift;
    1527             : 
    1528             : static inline u64 rq_clock_thermal(struct rq *rq)
    1529             : {
    1530        2723 :         return rq_clock_task(rq) >> sched_thermal_decay_shift;
    1531             : }
    1532             : 
    1533             : static inline void rq_clock_skip_update(struct rq *rq)
    1534             : {
    1535        1095 :         lockdep_assert_rq_held(rq);
    1536        1095 :         rq->clock_update_flags |= RQCF_REQ_SKIP;
    1537             : }
    1538             : 
    1539             : /*
    1540             :  * See rt task throttling, which is the only time a skip
    1541             :  * request is canceled.
    1542             :  */
    1543             : static inline void rq_clock_cancel_skipupdate(struct rq *rq)
    1544             : {
    1545           0 :         lockdep_assert_rq_held(rq);
    1546           0 :         rq->clock_update_flags &= ~RQCF_REQ_SKIP;
    1547             : }
    1548             : 
    1549             : struct rq_flags {
    1550             :         unsigned long flags;
    1551             :         struct pin_cookie cookie;
    1552             : #ifdef CONFIG_SCHED_DEBUG
    1553             :         /*
    1554             :          * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the
    1555             :          * current pin context is stashed here in case it needs to be
    1556             :          * restored in rq_repin_lock().
    1557             :          */
    1558             :         unsigned int clock_update_flags;
    1559             : #endif
    1560             : };
    1561             : 
    1562             : extern struct balance_callback balance_push_callback;
    1563             : 
    1564             : /*
    1565             :  * Lockdep annotation that avoids accidental unlocks; it's like a
    1566             :  * sticky/continuous lockdep_assert_held().
    1567             :  *
    1568             :  * This avoids code that has access to 'struct rq *rq' (basically everything in
    1569             :  * the scheduler) from accidentally unlocking the rq if they do not also have a
    1570             :  * copy of the (on-stack) 'struct rq_flags rf'.
    1571             :  *
    1572             :  * Also see Documentation/locking/lockdep-design.rst.
    1573             :  */
    1574             : static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
    1575             : {
    1576             :         rf->cookie = lockdep_pin_lock(__rq_lockp(rq));
    1577             : 
    1578             : #ifdef CONFIG_SCHED_DEBUG
    1579        7744 :         rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
    1580        7744 :         rf->clock_update_flags = 0;
    1581             : #ifdef CONFIG_SMP
    1582             :         SCHED_WARN_ON(rq->balance_callback && rq->balance_callback != &balance_push_callback);
    1583             : #endif
    1584             : #endif
    1585             : }
    1586             : 
    1587             : static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
    1588             : {
    1589             : #ifdef CONFIG_SCHED_DEBUG
    1590        2553 :         if (rq->clock_update_flags > RQCF_ACT_SKIP)
    1591        1803 :                 rf->clock_update_flags = RQCF_UPDATED;
    1592             : #endif
    1593             : 
    1594        7744 :         lockdep_unpin_lock(__rq_lockp(rq), rf->cookie);
    1595             : }
    1596             : 
    1597             : static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
    1598             : {
    1599             :         lockdep_repin_lock(__rq_lockp(rq), rf->cookie);
    1600             : 
    1601             : #ifdef CONFIG_SCHED_DEBUG
    1602             :         /*
    1603             :          * Restore the value we stashed in @rf for this pin context.
    1604             :          */
    1605             :         rq->clock_update_flags |= rf->clock_update_flags;
    1606             : #endif
    1607             : }
    1608             : 
    1609             : struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
    1610             :         __acquires(rq->lock);
    1611             : 
    1612             : struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
    1613             :         __acquires(p->pi_lock)
    1614             :         __acquires(rq->lock);
    1615             : 
    1616             : static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
    1617             :         __releases(rq->lock)
    1618             : {
    1619           0 :         rq_unpin_lock(rq, rf);
    1620           0 :         raw_spin_rq_unlock(rq);
    1621             : }
    1622             : 
    1623             : static inline void
    1624           0 : task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
    1625             :         __releases(rq->lock)
    1626             :         __releases(p->pi_lock)
    1627             : {
    1628        1366 :         rq_unpin_lock(rq, rf);
    1629         683 :         raw_spin_rq_unlock(rq);
    1630        1366 :         raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
    1631           0 : }
    1632             : 
    1633             : static inline void
    1634             : rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
    1635             :         __acquires(rq->lock)
    1636             : {
    1637             :         raw_spin_rq_lock_irqsave(rq, rf->flags);
    1638             :         rq_pin_lock(rq, rf);
    1639             : }
    1640             : 
    1641             : static inline void
    1642             : rq_lock_irq(struct rq *rq, struct rq_flags *rf)
    1643             :         __acquires(rq->lock)
    1644             : {
    1645             :         raw_spin_rq_lock_irq(rq);
    1646             :         rq_pin_lock(rq, rf);
    1647             : }
    1648             : 
    1649             : static inline void
    1650             : rq_lock(struct rq *rq, struct rq_flags *rf)
    1651             :         __acquires(rq->lock)
    1652             : {
    1653        7061 :         raw_spin_rq_lock(rq);
    1654        7061 :         rq_pin_lock(rq, rf);
    1655             : }
    1656             : 
    1657             : static inline void
    1658             : rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
    1659             :         __releases(rq->lock)
    1660             : {
    1661             :         rq_unpin_lock(rq, rf);
    1662             :         raw_spin_rq_unlock_irqrestore(rq, rf->flags);
    1663             : }
    1664             : 
    1665             : static inline void
    1666             : rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
    1667             :         __releases(rq->lock)
    1668             : {
    1669           0 :         rq_unpin_lock(rq, rf);
    1670             :         raw_spin_rq_unlock_irq(rq);
    1671             : }
    1672             : 
    1673             : static inline void
    1674             : rq_unlock(struct rq *rq, struct rq_flags *rf)
    1675             :         __releases(rq->lock)
    1676             : {
    1677        9362 :         rq_unpin_lock(rq, rf);
    1678        4851 :         raw_spin_rq_unlock(rq);
    1679             : }
    1680             : 
    1681             : static inline struct rq *
    1682             : this_rq_lock_irq(struct rq_flags *rf)
    1683             :         __acquires(rq->lock)
    1684             : {
    1685             :         struct rq *rq;
    1686             : 
    1687             :         local_irq_disable();
    1688           0 :         rq = this_rq();
    1689           0 :         rq_lock(rq, rf);
    1690             :         return rq;
    1691             : }
    1692             : 
    1693             : #ifdef CONFIG_NUMA
    1694             : enum numa_topology_type {
    1695             :         NUMA_DIRECT,
    1696             :         NUMA_GLUELESS_MESH,
    1697             :         NUMA_BACKPLANE,
    1698             : };
    1699             : extern enum numa_topology_type sched_numa_topology_type;
    1700             : extern int sched_max_numa_distance;
    1701             : extern bool find_numa_distance(int distance);
    1702             : extern void sched_init_numa(int offline_node);
    1703             : extern void sched_update_numa(int cpu, bool online);
    1704             : extern void sched_domains_numa_masks_set(unsigned int cpu);
    1705             : extern void sched_domains_numa_masks_clear(unsigned int cpu);
    1706             : extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
    1707             : #else
    1708             : static inline void sched_init_numa(int offline_node) { }
    1709             : static inline void sched_update_numa(int cpu, bool online) { }
    1710             : static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
    1711             : static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
    1712             : static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
    1713             : {
    1714             :         return nr_cpu_ids;
    1715             : }
    1716             : #endif
    1717             : 
    1718             : #ifdef CONFIG_NUMA_BALANCING
    1719             : /* The regions in numa_faults array from task_struct */
    1720             : enum numa_faults_stats {
    1721             :         NUMA_MEM = 0,
    1722             :         NUMA_CPU,
    1723             :         NUMA_MEMBUF,
    1724             :         NUMA_CPUBUF
    1725             : };
    1726             : extern void sched_setnuma(struct task_struct *p, int node);
    1727             : extern int migrate_task_to(struct task_struct *p, int cpu);
    1728             : extern int migrate_swap(struct task_struct *p, struct task_struct *t,
    1729             :                         int cpu, int scpu);
    1730             : extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
    1731             : #else
    1732             : static inline void
    1733             : init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
    1734             : {
    1735             : }
    1736             : #endif /* CONFIG_NUMA_BALANCING */
    1737             : 
    1738             : #ifdef CONFIG_SMP
    1739             : 
    1740             : static inline void
    1741             : queue_balance_callback(struct rq *rq,
    1742             :                        struct balance_callback *head,
    1743             :                        void (*func)(struct rq *rq))
    1744             : {
    1745             :         lockdep_assert_rq_held(rq);
    1746             : 
    1747             :         /*
    1748             :          * Don't (re)queue an already queued item; nor queue anything when
    1749             :          * balance_push() is active, see the comment with
    1750             :          * balance_push_callback.
    1751             :          */
    1752             :         if (unlikely(head->next || rq->balance_callback == &balance_push_callback))
    1753             :                 return;
    1754             : 
    1755             :         head->func = func;
    1756             :         head->next = rq->balance_callback;
    1757             :         rq->balance_callback = head;
    1758             : }
    1759             : 
    1760             : #define rcu_dereference_check_sched_domain(p) \
    1761             :         rcu_dereference_check((p), \
    1762             :                               lockdep_is_held(&sched_domains_mutex))
    1763             : 
    1764             : /*
    1765             :  * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
    1766             :  * See destroy_sched_domains: call_rcu for details.
    1767             :  *
    1768             :  * The domain tree of any CPU may only be accessed from within
    1769             :  * preempt-disabled sections.
    1770             :  */
    1771             : #define for_each_domain(cpu, __sd) \
    1772             :         for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
    1773             :                         __sd; __sd = __sd->parent)
    1774             : 
    1775             : /**
    1776             :  * highest_flag_domain - Return highest sched_domain containing flag.
    1777             :  * @cpu:        The CPU whose highest level of sched domain is to
    1778             :  *              be returned.
    1779             :  * @flag:       The flag to check for the highest sched_domain
    1780             :  *              for the given CPU.
    1781             :  *
    1782             :  * Returns the highest sched_domain of a CPU which contains the given flag.
    1783             :  */
    1784             : static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
    1785             : {
    1786             :         struct sched_domain *sd, *hsd = NULL;
    1787             : 
    1788             :         for_each_domain(cpu, sd) {
    1789             :                 if (!(sd->flags & flag))
    1790             :                         break;
    1791             :                 hsd = sd;
    1792             :         }
    1793             : 
    1794             :         return hsd;
    1795             : }
    1796             : 
    1797             : static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
    1798             : {
    1799             :         struct sched_domain *sd;
    1800             : 
    1801             :         for_each_domain(cpu, sd) {
    1802             :                 if (sd->flags & flag)
    1803             :                         break;
    1804             :         }
    1805             : 
    1806             :         return sd;
    1807             : }
    1808             : 
    1809             : DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc);
    1810             : DECLARE_PER_CPU(int, sd_llc_size);
    1811             : DECLARE_PER_CPU(int, sd_llc_id);
    1812             : DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
    1813             : DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
    1814             : DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
    1815             : DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
    1816             : extern struct static_key_false sched_asym_cpucapacity;
    1817             : 
    1818             : static __always_inline bool sched_asym_cpucap_active(void)
    1819             : {
    1820             :         return static_branch_unlikely(&sched_asym_cpucapacity);
    1821             : }
    1822             : 
    1823             : struct sched_group_capacity {
    1824             :         atomic_t                ref;
    1825             :         /*
    1826             :          * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
    1827             :          * for a single CPU.
    1828             :          */
    1829             :         unsigned long           capacity;
    1830             :         unsigned long           min_capacity;           /* Min per-CPU capacity in group */
    1831             :         unsigned long           max_capacity;           /* Max per-CPU capacity in group */
    1832             :         unsigned long           next_update;
    1833             :         int                     imbalance;              /* XXX unrelated to capacity but shared group state */
    1834             : 
    1835             : #ifdef CONFIG_SCHED_DEBUG
    1836             :         int                     id;
    1837             : #endif
    1838             : 
    1839             :         unsigned long           cpumask[];              /* Balance mask */
    1840             : };
    1841             : 
    1842             : struct sched_group {
    1843             :         struct sched_group      *next;                  /* Must be a circular list */
    1844             :         atomic_t                ref;
    1845             : 
    1846             :         unsigned int            group_weight;
    1847             :         struct sched_group_capacity *sgc;
    1848             :         int                     asym_prefer_cpu;        /* CPU of highest priority in group */
    1849             :         int                     flags;
    1850             : 
    1851             :         /*
    1852             :          * The CPUs this group covers.
    1853             :          *
    1854             :          * NOTE: this field is variable length. (Allocated dynamically
    1855             :          * by attaching extra space to the end of the structure,
    1856             :          * depending on how many CPUs the kernel has booted up with)
    1857             :          */
    1858             :         unsigned long           cpumask[];
    1859             : };
    1860             : 
    1861             : static inline struct cpumask *sched_group_span(struct sched_group *sg)
    1862             : {
    1863             :         return to_cpumask(sg->cpumask);
    1864             : }
    1865             : 
    1866             : /*
    1867             :  * See build_balance_mask().
    1868             :  */
    1869             : static inline struct cpumask *group_balance_mask(struct sched_group *sg)
    1870             : {
    1871             :         return to_cpumask(sg->sgc->cpumask);
    1872             : }
    1873             : 
    1874             : extern int group_balance_cpu(struct sched_group *sg);
    1875             : 
    1876             : #ifdef CONFIG_SCHED_DEBUG
    1877             : void update_sched_domain_debugfs(void);
    1878             : void dirty_sched_domain_sysctl(int cpu);
    1879             : #else
    1880             : static inline void update_sched_domain_debugfs(void)
    1881             : {
    1882             : }
    1883             : static inline void dirty_sched_domain_sysctl(int cpu)
    1884             : {
    1885             : }
    1886             : #endif
    1887             : 
    1888             : extern int sched_update_scaling(void);
    1889             : 
    1890             : static inline const struct cpumask *task_user_cpus(struct task_struct *p)
    1891             : {
    1892             :         if (!p->user_cpus_ptr)
    1893             :                 return cpu_possible_mask; /* &init_task.cpus_mask */
    1894             :         return p->user_cpus_ptr;
    1895             : }
    1896             : #endif /* CONFIG_SMP */
    1897             : 
    1898             : #include "stats.h"
    1899             : 
    1900             : #if defined(CONFIG_SCHED_CORE) && defined(CONFIG_SCHEDSTATS)
    1901             : 
    1902             : extern void __sched_core_account_forceidle(struct rq *rq);
    1903             : 
    1904             : static inline void sched_core_account_forceidle(struct rq *rq)
    1905             : {
    1906             :         if (schedstat_enabled())
    1907             :                 __sched_core_account_forceidle(rq);
    1908             : }
    1909             : 
    1910             : extern void __sched_core_tick(struct rq *rq);
    1911             : 
    1912             : static inline void sched_core_tick(struct rq *rq)
    1913             : {
    1914             :         if (sched_core_enabled(rq) && schedstat_enabled())
    1915             :                 __sched_core_tick(rq);
    1916             : }
    1917             : 
    1918             : #else
    1919             : 
    1920             : static inline void sched_core_account_forceidle(struct rq *rq) {}
    1921             : 
    1922             : static inline void sched_core_tick(struct rq *rq) {}
    1923             : 
    1924             : #endif /* CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS */
    1925             : 
    1926             : #ifdef CONFIG_CGROUP_SCHED
    1927             : 
    1928             : /*
    1929             :  * Return the group to which this tasks belongs.
    1930             :  *
    1931             :  * We cannot use task_css() and friends because the cgroup subsystem
    1932             :  * changes that value before the cgroup_subsys::attach() method is called,
    1933             :  * therefore we cannot pin it and might observe the wrong value.
    1934             :  *
    1935             :  * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
    1936             :  * core changes this before calling sched_move_task().
    1937             :  *
    1938             :  * Instead we use a 'copy' which is updated from sched_move_task() while
    1939             :  * holding both task_struct::pi_lock and rq::lock.
    1940             :  */
    1941             : static inline struct task_group *task_group(struct task_struct *p)
    1942             : {
    1943             :         return p->sched_task_group;
    1944             : }
    1945             : 
    1946             : /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
    1947             : static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
    1948             : {
    1949             : #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
    1950             :         struct task_group *tg = task_group(p);
    1951             : #endif
    1952             : 
    1953             : #ifdef CONFIG_FAIR_GROUP_SCHED
    1954             :         set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
    1955             :         p->se.cfs_rq = tg->cfs_rq[cpu];
    1956             :         p->se.parent = tg->se[cpu];
    1957             :         p->se.depth = tg->se[cpu] ? tg->se[cpu]->depth + 1 : 0;
    1958             : #endif
    1959             : 
    1960             : #ifdef CONFIG_RT_GROUP_SCHED
    1961             :         p->rt.rt_rq  = tg->rt_rq[cpu];
    1962             :         p->rt.parent = tg->rt_se[cpu];
    1963             : #endif
    1964             : }
    1965             : 
    1966             : #else /* CONFIG_CGROUP_SCHED */
    1967             : 
    1968             : static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
    1969             : static inline struct task_group *task_group(struct task_struct *p)
    1970             : {
    1971             :         return NULL;
    1972             : }
    1973             : 
    1974             : #endif /* CONFIG_CGROUP_SCHED */
    1975             : 
    1976             : static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
    1977             : {
    1978         341 :         set_task_rq(p, cpu);
    1979             : #ifdef CONFIG_SMP
    1980             :         /*
    1981             :          * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
    1982             :          * successfully executed on another CPU. We must ensure that updates of
    1983             :          * per-task data have been completed by this moment.
    1984             :          */
    1985             :         smp_wmb();
    1986             :         WRITE_ONCE(task_thread_info(p)->cpu, cpu);
    1987             :         p->wake_cpu = cpu;
    1988             : #endif
    1989             : }
    1990             : 
    1991             : /*
    1992             :  * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
    1993             :  */
    1994             : #ifdef CONFIG_SCHED_DEBUG
    1995             : # define const_debug __read_mostly
    1996             : #else
    1997             : # define const_debug const
    1998             : #endif
    1999             : 
    2000             : #define SCHED_FEAT(name, enabled)       \
    2001             :         __SCHED_FEAT_##name ,
    2002             : 
    2003             : enum {
    2004             : #include "features.h"
    2005             :         __SCHED_FEAT_NR,
    2006             : };
    2007             : 
    2008             : #undef SCHED_FEAT
    2009             : 
    2010             : #ifdef CONFIG_SCHED_DEBUG
    2011             : 
    2012             : /*
    2013             :  * To support run-time toggling of sched features, all the translation units
    2014             :  * (but core.c) reference the sysctl_sched_features defined in core.c.
    2015             :  */
    2016             : extern const_debug unsigned int sysctl_sched_features;
    2017             : 
    2018             : #ifdef CONFIG_JUMP_LABEL
    2019             : #define SCHED_FEAT(name, enabled)                                       \
    2020             : static __always_inline bool static_branch_##name(struct static_key *key) \
    2021             : {                                                                       \
    2022             :         return static_key_##enabled(key);                               \
    2023             : }
    2024             : 
    2025             : #include "features.h"
    2026             : #undef SCHED_FEAT
    2027             : 
    2028             : extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
    2029             : #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
    2030             : 
    2031             : #else /* !CONFIG_JUMP_LABEL */
    2032             : 
    2033             : #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
    2034             : 
    2035             : #endif /* CONFIG_JUMP_LABEL */
    2036             : 
    2037             : #else /* !SCHED_DEBUG */
    2038             : 
    2039             : /*
    2040             :  * Each translation unit has its own copy of sysctl_sched_features to allow
    2041             :  * constants propagation at compile time and compiler optimization based on
    2042             :  * features default.
    2043             :  */
    2044             : #define SCHED_FEAT(name, enabled)       \
    2045             :         (1UL << __SCHED_FEAT_##name) * enabled |
    2046             : static const_debug __maybe_unused unsigned int sysctl_sched_features =
    2047             : #include "features.h"
    2048             :         0;
    2049             : #undef SCHED_FEAT
    2050             : 
    2051             : #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
    2052             : 
    2053             : #endif /* SCHED_DEBUG */
    2054             : 
    2055             : extern struct static_key_false sched_numa_balancing;
    2056             : extern struct static_key_false sched_schedstats;
    2057             : 
    2058             : static inline u64 global_rt_period(void)
    2059             : {
    2060           4 :         return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
    2061             : }
    2062             : 
    2063             : static inline u64 global_rt_runtime(void)
    2064             : {
    2065           4 :         if (sysctl_sched_rt_runtime < 0)
    2066             :                 return RUNTIME_INF;
    2067             : 
    2068           4 :         return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
    2069             : }
    2070             : 
    2071             : static inline int task_current(struct rq *rq, struct task_struct *p)
    2072             : {
    2073             :         return rq->curr == p;
    2074             : }
    2075             : 
    2076             : static inline int task_on_cpu(struct rq *rq, struct task_struct *p)
    2077             : {
    2078             : #ifdef CONFIG_SMP
    2079             :         return p->on_cpu;
    2080             : #else
    2081           0 :         return task_current(rq, p);
    2082             : #endif
    2083             : }
    2084             : 
    2085             : static inline int task_on_rq_queued(struct task_struct *p)
    2086             : {
    2087           0 :         return p->on_rq == TASK_ON_RQ_QUEUED;
    2088             : }
    2089             : 
    2090             : static inline int task_on_rq_migrating(struct task_struct *p)
    2091             : {
    2092        2813 :         return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
    2093             : }
    2094             : 
    2095             : /* Wake flags. The first three directly map to some SD flag value */
    2096             : #define WF_EXEC     0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */
    2097             : #define WF_FORK     0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */
    2098             : #define WF_TTWU     0x08 /* Wakeup;            maps to SD_BALANCE_WAKE */
    2099             : 
    2100             : #define WF_SYNC     0x10 /* Waker goes to sleep after wakeup */
    2101             : #define WF_MIGRATED 0x20 /* Internal use, task got migrated */
    2102             : 
    2103             : #ifdef CONFIG_SMP
    2104             : static_assert(WF_EXEC == SD_BALANCE_EXEC);
    2105             : static_assert(WF_FORK == SD_BALANCE_FORK);
    2106             : static_assert(WF_TTWU == SD_BALANCE_WAKE);
    2107             : #endif
    2108             : 
    2109             : /*
    2110             :  * To aid in avoiding the subversion of "niceness" due to uneven distribution
    2111             :  * of tasks with abnormal "nice" values across CPUs the contribution that
    2112             :  * each task makes to its run queue's load is weighted according to its
    2113             :  * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
    2114             :  * scaled version of the new time slice allocation that they receive on time
    2115             :  * slice expiry etc.
    2116             :  */
    2117             : 
    2118             : #define WEIGHT_IDLEPRIO         3
    2119             : #define WMULT_IDLEPRIO          1431655765
    2120             : 
    2121             : extern const int                sched_prio_to_weight[40];
    2122             : extern const u32                sched_prio_to_wmult[40];
    2123             : 
    2124             : /*
    2125             :  * {de,en}queue flags:
    2126             :  *
    2127             :  * DEQUEUE_SLEEP  - task is no longer runnable
    2128             :  * ENQUEUE_WAKEUP - task just became runnable
    2129             :  *
    2130             :  * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
    2131             :  *                are in a known state which allows modification. Such pairs
    2132             :  *                should preserve as much state as possible.
    2133             :  *
    2134             :  * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
    2135             :  *        in the runqueue.
    2136             :  *
    2137             :  * ENQUEUE_HEAD      - place at front of runqueue (tail if not specified)
    2138             :  * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
    2139             :  * ENQUEUE_MIGRATED  - the task was migrated during wakeup
    2140             :  *
    2141             :  */
    2142             : 
    2143             : #define DEQUEUE_SLEEP           0x01
    2144             : #define DEQUEUE_SAVE            0x02 /* Matches ENQUEUE_RESTORE */
    2145             : #define DEQUEUE_MOVE            0x04 /* Matches ENQUEUE_MOVE */
    2146             : #define DEQUEUE_NOCLOCK         0x08 /* Matches ENQUEUE_NOCLOCK */
    2147             : 
    2148             : #define ENQUEUE_WAKEUP          0x01
    2149             : #define ENQUEUE_RESTORE         0x02
    2150             : #define ENQUEUE_MOVE            0x04
    2151             : #define ENQUEUE_NOCLOCK         0x08
    2152             : 
    2153             : #define ENQUEUE_HEAD            0x10
    2154             : #define ENQUEUE_REPLENISH       0x20
    2155             : #ifdef CONFIG_SMP
    2156             : #define ENQUEUE_MIGRATED        0x40
    2157             : #else
    2158             : #define ENQUEUE_MIGRATED        0x00
    2159             : #endif
    2160             : 
    2161             : #define RETRY_TASK              ((void *)-1UL)
    2162             : 
    2163             : struct affinity_context {
    2164             :         const struct cpumask *new_mask;
    2165             :         struct cpumask *user_mask;
    2166             :         unsigned int flags;
    2167             : };
    2168             : 
    2169             : struct sched_class {
    2170             : 
    2171             : #ifdef CONFIG_UCLAMP_TASK
    2172             :         int uclamp_enabled;
    2173             : #endif
    2174             : 
    2175             :         void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
    2176             :         void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
    2177             :         void (*yield_task)   (struct rq *rq);
    2178             :         bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
    2179             : 
    2180             :         void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
    2181             : 
    2182             :         struct task_struct *(*pick_next_task)(struct rq *rq);
    2183             : 
    2184             :         void (*put_prev_task)(struct rq *rq, struct task_struct *p);
    2185             :         void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
    2186             : 
    2187             : #ifdef CONFIG_SMP
    2188             :         int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
    2189             :         int  (*select_task_rq)(struct task_struct *p, int task_cpu, int flags);
    2190             : 
    2191             :         struct task_struct * (*pick_task)(struct rq *rq);
    2192             : 
    2193             :         void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
    2194             : 
    2195             :         void (*task_woken)(struct rq *this_rq, struct task_struct *task);
    2196             : 
    2197             :         void (*set_cpus_allowed)(struct task_struct *p, struct affinity_context *ctx);
    2198             : 
    2199             :         void (*rq_online)(struct rq *rq);
    2200             :         void (*rq_offline)(struct rq *rq);
    2201             : 
    2202             :         struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq);
    2203             : #endif
    2204             : 
    2205             :         void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
    2206             :         void (*task_fork)(struct task_struct *p);
    2207             :         void (*task_dead)(struct task_struct *p);
    2208             : 
    2209             :         /*
    2210             :          * The switched_from() call is allowed to drop rq->lock, therefore we
    2211             :          * cannot assume the switched_from/switched_to pair is serialized by
    2212             :          * rq->lock. They are however serialized by p->pi_lock.
    2213             :          */
    2214             :         void (*switched_from)(struct rq *this_rq, struct task_struct *task);
    2215             :         void (*switched_to)  (struct rq *this_rq, struct task_struct *task);
    2216             :         void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
    2217             :                               int oldprio);
    2218             : 
    2219             :         unsigned int (*get_rr_interval)(struct rq *rq,
    2220             :                                         struct task_struct *task);
    2221             : 
    2222             :         void (*update_curr)(struct rq *rq);
    2223             : 
    2224             : #ifdef CONFIG_FAIR_GROUP_SCHED
    2225             :         void (*task_change_group)(struct task_struct *p);
    2226             : #endif
    2227             : };
    2228             : 
    2229        2214 : static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
    2230             : {
    2231        2214 :         WARN_ON_ONCE(rq->curr != prev);
    2232        2214 :         prev->sched_class->put_prev_task(rq, prev);
    2233        2214 : }
    2234             : 
    2235             : static inline void set_next_task(struct rq *rq, struct task_struct *next)
    2236             : {
    2237           4 :         next->sched_class->set_next_task(rq, next, false);
    2238             : }
    2239             : 
    2240             : 
    2241             : /*
    2242             :  * Helper to define a sched_class instance; each one is placed in a separate
    2243             :  * section which is ordered by the linker script:
    2244             :  *
    2245             :  *   include/asm-generic/vmlinux.lds.h
    2246             :  *
    2247             :  * *CAREFUL* they are laid out in *REVERSE* order!!!
    2248             :  *
    2249             :  * Also enforce alignment on the instance, not the type, to guarantee layout.
    2250             :  */
    2251             : #define DEFINE_SCHED_CLASS(name) \
    2252             : const struct sched_class name##_sched_class \
    2253             :         __aligned(__alignof__(struct sched_class)) \
    2254             :         __section("__" #name "_sched_class")
    2255             : 
    2256             : /* Defined in include/asm-generic/vmlinux.lds.h */
    2257             : extern struct sched_class __sched_class_highest[];
    2258             : extern struct sched_class __sched_class_lowest[];
    2259             : 
    2260             : #define for_class_range(class, _from, _to) \
    2261             :         for (class = (_from); class < (_to); class++)
    2262             : 
    2263             : #define for_each_class(class) \
    2264             :         for_class_range(class, __sched_class_highest, __sched_class_lowest)
    2265             : 
    2266             : #define sched_class_above(_a, _b)       ((_a) < (_b))
    2267             : 
    2268             : extern const struct sched_class stop_sched_class;
    2269             : extern const struct sched_class dl_sched_class;
    2270             : extern const struct sched_class rt_sched_class;
    2271             : extern const struct sched_class fair_sched_class;
    2272             : extern const struct sched_class idle_sched_class;
    2273             : 
    2274             : static inline bool sched_stop_runnable(struct rq *rq)
    2275             : {
    2276             :         return rq->stop && task_on_rq_queued(rq->stop);
    2277             : }
    2278             : 
    2279             : static inline bool sched_dl_runnable(struct rq *rq)
    2280             : {
    2281             :         return rq->dl.dl_nr_running > 0;
    2282             : }
    2283             : 
    2284             : static inline bool sched_rt_runnable(struct rq *rq)
    2285             : {
    2286             :         return rq->rt.rt_queued > 0;
    2287             : }
    2288             : 
    2289             : static inline bool sched_fair_runnable(struct rq *rq)
    2290             : {
    2291             :         return rq->cfs.nr_running > 0;
    2292             : }
    2293             : 
    2294             : extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
    2295             : extern struct task_struct *pick_next_task_idle(struct rq *rq);
    2296             : 
    2297             : #define SCA_CHECK               0x01
    2298             : #define SCA_MIGRATE_DISABLE     0x02
    2299             : #define SCA_MIGRATE_ENABLE      0x04
    2300             : #define SCA_USER                0x08
    2301             : 
    2302             : #ifdef CONFIG_SMP
    2303             : 
    2304             : extern void update_group_capacity(struct sched_domain *sd, int cpu);
    2305             : 
    2306             : extern void trigger_load_balance(struct rq *rq);
    2307             : 
    2308             : extern void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx);
    2309             : 
    2310             : static inline struct task_struct *get_push_task(struct rq *rq)
    2311             : {
    2312             :         struct task_struct *p = rq->curr;
    2313             : 
    2314             :         lockdep_assert_rq_held(rq);
    2315             : 
    2316             :         if (rq->push_busy)
    2317             :                 return NULL;
    2318             : 
    2319             :         if (p->nr_cpus_allowed == 1)
    2320             :                 return NULL;
    2321             : 
    2322             :         if (p->migration_disabled)
    2323             :                 return NULL;
    2324             : 
    2325             :         rq->push_busy = true;
    2326             :         return get_task_struct(p);
    2327             : }
    2328             : 
    2329             : extern int push_cpu_stop(void *arg);
    2330             : 
    2331             : #endif
    2332             : 
    2333             : #ifdef CONFIG_CPU_IDLE
    2334             : static inline void idle_set_state(struct rq *rq,
    2335             :                                   struct cpuidle_state *idle_state)
    2336             : {
    2337             :         rq->idle_state = idle_state;
    2338             : }
    2339             : 
    2340             : static inline struct cpuidle_state *idle_get_state(struct rq *rq)
    2341             : {
    2342             :         SCHED_WARN_ON(!rcu_read_lock_held());
    2343             : 
    2344             :         return rq->idle_state;
    2345             : }
    2346             : #else
    2347             : static inline void idle_set_state(struct rq *rq,
    2348             :                                   struct cpuidle_state *idle_state)
    2349             : {
    2350             : }
    2351             : 
    2352             : static inline struct cpuidle_state *idle_get_state(struct rq *rq)
    2353             : {
    2354             :         return NULL;
    2355             : }
    2356             : #endif
    2357             : 
    2358             : extern void schedule_idle(void);
    2359             : 
    2360             : extern void sysrq_sched_debug_show(void);
    2361             : extern void sched_init_granularity(void);
    2362             : extern void update_max_interval(void);
    2363             : 
    2364             : extern void init_sched_dl_class(void);
    2365             : extern void init_sched_rt_class(void);
    2366             : extern void init_sched_fair_class(void);
    2367             : 
    2368             : extern void reweight_task(struct task_struct *p, int prio);
    2369             : 
    2370             : extern void resched_curr(struct rq *rq);
    2371             : extern void resched_cpu(int cpu);
    2372             : 
    2373             : extern struct rt_bandwidth def_rt_bandwidth;
    2374             : extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
    2375             : extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
    2376             : 
    2377             : extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
    2378             : extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
    2379             : extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
    2380             : 
    2381             : #define BW_SHIFT                20
    2382             : #define BW_UNIT                 (1 << BW_SHIFT)
    2383             : #define RATIO_SHIFT             8
    2384             : #define MAX_BW_BITS             (64 - BW_SHIFT)
    2385             : #define MAX_BW                  ((1ULL << MAX_BW_BITS) - 1)
    2386             : unsigned long to_ratio(u64 period, u64 runtime);
    2387             : 
    2388             : extern void init_entity_runnable_average(struct sched_entity *se);
    2389             : extern void post_init_entity_util_avg(struct task_struct *p);
    2390             : 
    2391             : #ifdef CONFIG_NO_HZ_FULL
    2392             : extern bool sched_can_stop_tick(struct rq *rq);
    2393             : extern int __init sched_tick_offload_init(void);
    2394             : 
    2395             : /*
    2396             :  * Tick may be needed by tasks in the runqueue depending on their policy and
    2397             :  * requirements. If tick is needed, lets send the target an IPI to kick it out of
    2398             :  * nohz mode if necessary.
    2399             :  */
    2400             : static inline void sched_update_tick_dependency(struct rq *rq)
    2401             : {
    2402             :         int cpu = cpu_of(rq);
    2403             : 
    2404             :         if (!tick_nohz_full_cpu(cpu))
    2405             :                 return;
    2406             : 
    2407             :         if (sched_can_stop_tick(rq))
    2408             :                 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
    2409             :         else
    2410             :                 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
    2411             : }
    2412             : #else
    2413             : static inline int sched_tick_offload_init(void) { return 0; }
    2414             : static inline void sched_update_tick_dependency(struct rq *rq) { }
    2415             : #endif
    2416             : 
    2417             : static inline void add_nr_running(struct rq *rq, unsigned count)
    2418             : {
    2419        2132 :         unsigned prev_nr = rq->nr_running;
    2420             : 
    2421        2132 :         rq->nr_running = prev_nr + count;
    2422             :         if (trace_sched_update_nr_running_tp_enabled()) {
    2423             :                 call_trace_sched_update_nr_running(rq, count);
    2424             :         }
    2425             : 
    2426             : #ifdef CONFIG_SMP
    2427             :         if (prev_nr < 2 && rq->nr_running >= 2) {
    2428             :                 if (!READ_ONCE(rq->rd->overload))
    2429             :                         WRITE_ONCE(rq->rd->overload, 1);
    2430             :         }
    2431             : #endif
    2432             : 
    2433        2132 :         sched_update_tick_dependency(rq);
    2434             : }
    2435             : 
    2436             : static inline void sub_nr_running(struct rq *rq, unsigned count)
    2437             : {
    2438        2130 :         rq->nr_running -= count;
    2439             :         if (trace_sched_update_nr_running_tp_enabled()) {
    2440             :                 call_trace_sched_update_nr_running(rq, -count);
    2441             :         }
    2442             : 
    2443             :         /* Check if we still need preemption */
    2444        2130 :         sched_update_tick_dependency(rq);
    2445             : }
    2446             : 
    2447             : extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
    2448             : extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
    2449             : 
    2450             : extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
    2451             : 
    2452             : #ifdef CONFIG_PREEMPT_RT
    2453             : #define SCHED_NR_MIGRATE_BREAK 8
    2454             : #else
    2455             : #define SCHED_NR_MIGRATE_BREAK 32
    2456             : #endif
    2457             : 
    2458             : extern const_debug unsigned int sysctl_sched_nr_migrate;
    2459             : extern const_debug unsigned int sysctl_sched_migration_cost;
    2460             : 
    2461             : #ifdef CONFIG_SCHED_DEBUG
    2462             : extern unsigned int sysctl_sched_latency;
    2463             : extern unsigned int sysctl_sched_min_granularity;
    2464             : extern unsigned int sysctl_sched_idle_min_granularity;
    2465             : extern unsigned int sysctl_sched_wakeup_granularity;
    2466             : extern int sysctl_resched_latency_warn_ms;
    2467             : extern int sysctl_resched_latency_warn_once;
    2468             : 
    2469             : extern unsigned int sysctl_sched_tunable_scaling;
    2470             : 
    2471             : extern unsigned int sysctl_numa_balancing_scan_delay;
    2472             : extern unsigned int sysctl_numa_balancing_scan_period_min;
    2473             : extern unsigned int sysctl_numa_balancing_scan_period_max;
    2474             : extern unsigned int sysctl_numa_balancing_scan_size;
    2475             : extern unsigned int sysctl_numa_balancing_hot_threshold;
    2476             : #endif
    2477             : 
    2478             : #ifdef CONFIG_SCHED_HRTICK
    2479             : 
    2480             : /*
    2481             :  * Use hrtick when:
    2482             :  *  - enabled by features
    2483             :  *  - hrtimer is actually high res
    2484             :  */
    2485             : static inline int hrtick_enabled(struct rq *rq)
    2486             : {
    2487             :         if (!cpu_active(cpu_of(rq)))
    2488             :                 return 0;
    2489             :         return hrtimer_is_hres_active(&rq->hrtick_timer);
    2490             : }
    2491             : 
    2492             : static inline int hrtick_enabled_fair(struct rq *rq)
    2493             : {
    2494             :         if (!sched_feat(HRTICK))
    2495             :                 return 0;
    2496             :         return hrtick_enabled(rq);
    2497             : }
    2498             : 
    2499             : static inline int hrtick_enabled_dl(struct rq *rq)
    2500             : {
    2501             :         if (!sched_feat(HRTICK_DL))
    2502             :                 return 0;
    2503             :         return hrtick_enabled(rq);
    2504             : }
    2505             : 
    2506             : void hrtick_start(struct rq *rq, u64 delay);
    2507             : 
    2508             : #else
    2509             : 
    2510             : static inline int hrtick_enabled_fair(struct rq *rq)
    2511             : {
    2512             :         return 0;
    2513             : }
    2514             : 
    2515             : static inline int hrtick_enabled_dl(struct rq *rq)
    2516             : {
    2517             :         return 0;
    2518             : }
    2519             : 
    2520             : static inline int hrtick_enabled(struct rq *rq)
    2521             : {
    2522             :         return 0;
    2523             : }
    2524             : 
    2525             : #endif /* CONFIG_SCHED_HRTICK */
    2526             : 
    2527             : #ifndef arch_scale_freq_tick
    2528             : static __always_inline
    2529             : void arch_scale_freq_tick(void)
    2530             : {
    2531             : }
    2532             : #endif
    2533             : 
    2534             : #ifndef arch_scale_freq_capacity
    2535             : /**
    2536             :  * arch_scale_freq_capacity - get the frequency scale factor of a given CPU.
    2537             :  * @cpu: the CPU in question.
    2538             :  *
    2539             :  * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e.
    2540             :  *
    2541             :  *     f_curr
    2542             :  *     ------ * SCHED_CAPACITY_SCALE
    2543             :  *     f_max
    2544             :  */
    2545             : static __always_inline
    2546             : unsigned long arch_scale_freq_capacity(int cpu)
    2547             : {
    2548             :         return SCHED_CAPACITY_SCALE;
    2549             : }
    2550             : #endif
    2551             : 
    2552             : #ifdef CONFIG_SCHED_DEBUG
    2553             : /*
    2554             :  * In double_lock_balance()/double_rq_lock(), we use raw_spin_rq_lock() to
    2555             :  * acquire rq lock instead of rq_lock(). So at the end of these two functions
    2556             :  * we need to call double_rq_clock_clear_update() to clear RQCF_UPDATED of
    2557             :  * rq->clock_update_flags to avoid the WARN_DOUBLE_CLOCK warning.
    2558             :  */
    2559             : static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2)
    2560             : {
    2561           0 :         rq1->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
    2562             :         /* rq1 == rq2 for !CONFIG_SMP, so just clear RQCF_UPDATED once. */
    2563             : #ifdef CONFIG_SMP
    2564             :         rq2->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
    2565             : #endif
    2566             : }
    2567             : #else
    2568             : static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) {}
    2569             : #endif
    2570             : 
    2571             : #ifdef CONFIG_SMP
    2572             : 
    2573             : static inline bool rq_order_less(struct rq *rq1, struct rq *rq2)
    2574             : {
    2575             : #ifdef CONFIG_SCHED_CORE
    2576             :         /*
    2577             :          * In order to not have {0,2},{1,3} turn into into an AB-BA,
    2578             :          * order by core-id first and cpu-id second.
    2579             :          *
    2580             :          * Notably:
    2581             :          *
    2582             :          *      double_rq_lock(0,3); will take core-0, core-1 lock
    2583             :          *      double_rq_lock(1,2); will take core-1, core-0 lock
    2584             :          *
    2585             :          * when only cpu-id is considered.
    2586             :          */
    2587             :         if (rq1->core->cpu < rq2->core->cpu)
    2588             :                 return true;
    2589             :         if (rq1->core->cpu > rq2->core->cpu)
    2590             :                 return false;
    2591             : 
    2592             :         /*
    2593             :          * __sched_core_flip() relies on SMT having cpu-id lock order.
    2594             :          */
    2595             : #endif
    2596             :         return rq1->cpu < rq2->cpu;
    2597             : }
    2598             : 
    2599             : extern void double_rq_lock(struct rq *rq1, struct rq *rq2);
    2600             : 
    2601             : #ifdef CONFIG_PREEMPTION
    2602             : 
    2603             : /*
    2604             :  * fair double_lock_balance: Safely acquires both rq->locks in a fair
    2605             :  * way at the expense of forcing extra atomic operations in all
    2606             :  * invocations.  This assures that the double_lock is acquired using the
    2607             :  * same underlying policy as the spinlock_t on this architecture, which
    2608             :  * reduces latency compared to the unfair variant below.  However, it
    2609             :  * also adds more overhead and therefore may reduce throughput.
    2610             :  */
    2611             : static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
    2612             :         __releases(this_rq->lock)
    2613             :         __acquires(busiest->lock)
    2614             :         __acquires(this_rq->lock)
    2615             : {
    2616             :         raw_spin_rq_unlock(this_rq);
    2617             :         double_rq_lock(this_rq, busiest);
    2618             : 
    2619             :         return 1;
    2620             : }
    2621             : 
    2622             : #else
    2623             : /*
    2624             :  * Unfair double_lock_balance: Optimizes throughput at the expense of
    2625             :  * latency by eliminating extra atomic operations when the locks are
    2626             :  * already in proper order on entry.  This favors lower CPU-ids and will
    2627             :  * grant the double lock to lower CPUs over higher ids under contention,
    2628             :  * regardless of entry order into the function.
    2629             :  */
    2630             : static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
    2631             :         __releases(this_rq->lock)
    2632             :         __acquires(busiest->lock)
    2633             :         __acquires(this_rq->lock)
    2634             : {
    2635             :         if (__rq_lockp(this_rq) == __rq_lockp(busiest) ||
    2636             :             likely(raw_spin_rq_trylock(busiest))) {
    2637             :                 double_rq_clock_clear_update(this_rq, busiest);
    2638             :                 return 0;
    2639             :         }
    2640             : 
    2641             :         if (rq_order_less(this_rq, busiest)) {
    2642             :                 raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING);
    2643             :                 double_rq_clock_clear_update(this_rq, busiest);
    2644             :                 return 0;
    2645             :         }
    2646             : 
    2647             :         raw_spin_rq_unlock(this_rq);
    2648             :         double_rq_lock(this_rq, busiest);
    2649             : 
    2650             :         return 1;
    2651             : }
    2652             : 
    2653             : #endif /* CONFIG_PREEMPTION */
    2654             : 
    2655             : /*
    2656             :  * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
    2657             :  */
    2658             : static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
    2659             : {
    2660             :         lockdep_assert_irqs_disabled();
    2661             : 
    2662             :         return _double_lock_balance(this_rq, busiest);
    2663             : }
    2664             : 
    2665             : static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
    2666             :         __releases(busiest->lock)
    2667             : {
    2668             :         if (__rq_lockp(this_rq) != __rq_lockp(busiest))
    2669             :                 raw_spin_rq_unlock(busiest);
    2670             :         lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_);
    2671             : }
    2672             : 
    2673             : static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
    2674             : {
    2675             :         if (l1 > l2)
    2676             :                 swap(l1, l2);
    2677             : 
    2678             :         spin_lock(l1);
    2679             :         spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
    2680             : }
    2681             : 
    2682             : static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
    2683             : {
    2684             :         if (l1 > l2)
    2685             :                 swap(l1, l2);
    2686             : 
    2687             :         spin_lock_irq(l1);
    2688             :         spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
    2689             : }
    2690             : 
    2691             : static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
    2692             : {
    2693             :         if (l1 > l2)
    2694             :                 swap(l1, l2);
    2695             : 
    2696             :         raw_spin_lock(l1);
    2697             :         raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
    2698             : }
    2699             : 
    2700             : /*
    2701             :  * double_rq_unlock - safely unlock two runqueues
    2702             :  *
    2703             :  * Note this does not restore interrupts like task_rq_unlock,
    2704             :  * you need to do so manually after calling.
    2705             :  */
    2706             : static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
    2707             :         __releases(rq1->lock)
    2708             :         __releases(rq2->lock)
    2709             : {
    2710             :         if (__rq_lockp(rq1) != __rq_lockp(rq2))
    2711             :                 raw_spin_rq_unlock(rq2);
    2712             :         else
    2713             :                 __release(rq2->lock);
    2714             :         raw_spin_rq_unlock(rq1);
    2715             : }
    2716             : 
    2717             : extern void set_rq_online (struct rq *rq);
    2718             : extern void set_rq_offline(struct rq *rq);
    2719             : extern bool sched_smp_initialized;
    2720             : 
    2721             : #else /* CONFIG_SMP */
    2722             : 
    2723             : /*
    2724             :  * double_rq_lock - safely lock two runqueues
    2725             :  *
    2726             :  * Note this does not disable interrupts like task_rq_lock,
    2727             :  * you need to do so manually before calling.
    2728             :  */
    2729           0 : static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
    2730             :         __acquires(rq1->lock)
    2731             :         __acquires(rq2->lock)
    2732             : {
    2733           0 :         WARN_ON_ONCE(!irqs_disabled());
    2734           0 :         WARN_ON_ONCE(rq1 != rq2);
    2735           0 :         raw_spin_rq_lock(rq1);
    2736             :         __acquire(rq2->lock);        /* Fake it out ;) */
    2737           0 :         double_rq_clock_clear_update(rq1, rq2);
    2738           0 : }
    2739             : 
    2740             : /*
    2741             :  * double_rq_unlock - safely unlock two runqueues
    2742             :  *
    2743             :  * Note this does not restore interrupts like task_rq_unlock,
    2744             :  * you need to do so manually after calling.
    2745             :  */
    2746           0 : static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
    2747             :         __releases(rq1->lock)
    2748             :         __releases(rq2->lock)
    2749             : {
    2750           0 :         WARN_ON_ONCE(rq1 != rq2);
    2751           0 :         raw_spin_rq_unlock(rq1);
    2752             :         __release(rq2->lock);
    2753           0 : }
    2754             : 
    2755             : #endif
    2756             : 
    2757             : extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
    2758             : extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
    2759             : 
    2760             : #ifdef  CONFIG_SCHED_DEBUG
    2761             : extern bool sched_debug_verbose;
    2762             : 
    2763             : extern void print_cfs_stats(struct seq_file *m, int cpu);
    2764             : extern void print_rt_stats(struct seq_file *m, int cpu);
    2765             : extern void print_dl_stats(struct seq_file *m, int cpu);
    2766             : extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
    2767             : extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
    2768             : extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
    2769             : 
    2770             : extern void resched_latency_warn(int cpu, u64 latency);
    2771             : #ifdef CONFIG_NUMA_BALANCING
    2772             : extern void
    2773             : show_numa_stats(struct task_struct *p, struct seq_file *m);
    2774             : extern void
    2775             : print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
    2776             :         unsigned long tpf, unsigned long gsf, unsigned long gpf);
    2777             : #endif /* CONFIG_NUMA_BALANCING */
    2778             : #else
    2779             : static inline void resched_latency_warn(int cpu, u64 latency) {}
    2780             : #endif /* CONFIG_SCHED_DEBUG */
    2781             : 
    2782             : extern void init_cfs_rq(struct cfs_rq *cfs_rq);
    2783             : extern void init_rt_rq(struct rt_rq *rt_rq);
    2784             : extern void init_dl_rq(struct dl_rq *dl_rq);
    2785             : 
    2786             : extern void cfs_bandwidth_usage_inc(void);
    2787             : extern void cfs_bandwidth_usage_dec(void);
    2788             : 
    2789             : #ifdef CONFIG_NO_HZ_COMMON
    2790             : #define NOHZ_BALANCE_KICK_BIT   0
    2791             : #define NOHZ_STATS_KICK_BIT     1
    2792             : #define NOHZ_NEWILB_KICK_BIT    2
    2793             : #define NOHZ_NEXT_KICK_BIT      3
    2794             : 
    2795             : /* Run rebalance_domains() */
    2796             : #define NOHZ_BALANCE_KICK       BIT(NOHZ_BALANCE_KICK_BIT)
    2797             : /* Update blocked load */
    2798             : #define NOHZ_STATS_KICK         BIT(NOHZ_STATS_KICK_BIT)
    2799             : /* Update blocked load when entering idle */
    2800             : #define NOHZ_NEWILB_KICK        BIT(NOHZ_NEWILB_KICK_BIT)
    2801             : /* Update nohz.next_balance */
    2802             : #define NOHZ_NEXT_KICK          BIT(NOHZ_NEXT_KICK_BIT)
    2803             : 
    2804             : #define NOHZ_KICK_MASK  (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK | NOHZ_NEXT_KICK)
    2805             : 
    2806             : #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
    2807             : 
    2808             : extern void nohz_balance_exit_idle(struct rq *rq);
    2809             : #else
    2810             : static inline void nohz_balance_exit_idle(struct rq *rq) { }
    2811             : #endif
    2812             : 
    2813             : #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
    2814             : extern void nohz_run_idle_balance(int cpu);
    2815             : #else
    2816             : static inline void nohz_run_idle_balance(int cpu) { }
    2817             : #endif
    2818             : 
    2819             : #ifdef CONFIG_IRQ_TIME_ACCOUNTING
    2820             : struct irqtime {
    2821             :         u64                     total;
    2822             :         u64                     tick_delta;
    2823             :         u64                     irq_start_time;
    2824             :         struct u64_stats_sync   sync;
    2825             : };
    2826             : 
    2827             : DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
    2828             : 
    2829             : /*
    2830             :  * Returns the irqtime minus the softirq time computed by ksoftirqd.
    2831             :  * Otherwise ksoftirqd's sum_exec_runtime is subtracted its own runtime
    2832             :  * and never move forward.
    2833             :  */
    2834             : static inline u64 irq_time_read(int cpu)
    2835             : {
    2836             :         struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
    2837             :         unsigned int seq;
    2838             :         u64 total;
    2839             : 
    2840             :         do {
    2841             :                 seq = __u64_stats_fetch_begin(&irqtime->sync);
    2842             :                 total = irqtime->total;
    2843             :         } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
    2844             : 
    2845             :         return total;
    2846             : }
    2847             : #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
    2848             : 
    2849             : #ifdef CONFIG_CPU_FREQ
    2850             : DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
    2851             : 
    2852             : /**
    2853             :  * cpufreq_update_util - Take a note about CPU utilization changes.
    2854             :  * @rq: Runqueue to carry out the update for.
    2855             :  * @flags: Update reason flags.
    2856             :  *
    2857             :  * This function is called by the scheduler on the CPU whose utilization is
    2858             :  * being updated.
    2859             :  *
    2860             :  * It can only be called from RCU-sched read-side critical sections.
    2861             :  *
    2862             :  * The way cpufreq is currently arranged requires it to evaluate the CPU
    2863             :  * performance state (frequency/voltage) on a regular basis to prevent it from
    2864             :  * being stuck in a completely inadequate performance level for too long.
    2865             :  * That is not guaranteed to happen if the updates are only triggered from CFS
    2866             :  * and DL, though, because they may not be coming in if only RT tasks are
    2867             :  * active all the time (or there are RT tasks only).
    2868             :  *
    2869             :  * As a workaround for that issue, this function is called periodically by the
    2870             :  * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
    2871             :  * but that really is a band-aid.  Going forward it should be replaced with
    2872             :  * solutions targeted more specifically at RT tasks.
    2873             :  */
    2874             : static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
    2875             : {
    2876             :         struct update_util_data *data;
    2877             : 
    2878             :         data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
    2879             :                                                   cpu_of(rq)));
    2880             :         if (data)
    2881             :                 data->func(data, rq_clock(rq), flags);
    2882             : }
    2883             : #else
    2884             : static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
    2885             : #endif /* CONFIG_CPU_FREQ */
    2886             : 
    2887             : #ifdef arch_scale_freq_capacity
    2888             : # ifndef arch_scale_freq_invariant
    2889             : #  define arch_scale_freq_invariant()   true
    2890             : # endif
    2891             : #else
    2892             : # define arch_scale_freq_invariant()    false
    2893             : #endif
    2894             : 
    2895             : #ifdef CONFIG_SMP
    2896             : static inline unsigned long capacity_orig_of(int cpu)
    2897             : {
    2898             :         return cpu_rq(cpu)->cpu_capacity_orig;
    2899             : }
    2900             : 
    2901             : /**
    2902             :  * enum cpu_util_type - CPU utilization type
    2903             :  * @FREQUENCY_UTIL:     Utilization used to select frequency
    2904             :  * @ENERGY_UTIL:        Utilization used during energy calculation
    2905             :  *
    2906             :  * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time
    2907             :  * need to be aggregated differently depending on the usage made of them. This
    2908             :  * enum is used within effective_cpu_util() to differentiate the types of
    2909             :  * utilization expected by the callers, and adjust the aggregation accordingly.
    2910             :  */
    2911             : enum cpu_util_type {
    2912             :         FREQUENCY_UTIL,
    2913             :         ENERGY_UTIL,
    2914             : };
    2915             : 
    2916             : unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
    2917             :                                  enum cpu_util_type type,
    2918             :                                  struct task_struct *p);
    2919             : 
    2920             : /*
    2921             :  * Verify the fitness of task @p to run on @cpu taking into account the
    2922             :  * CPU original capacity and the runtime/deadline ratio of the task.
    2923             :  *
    2924             :  * The function will return true if the original capacity of @cpu is
    2925             :  * greater than or equal to task's deadline density right shifted by
    2926             :  * (BW_SHIFT - SCHED_CAPACITY_SHIFT) and false otherwise.
    2927             :  */
    2928             : static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu)
    2929             : {
    2930             :         unsigned long cap = arch_scale_cpu_capacity(cpu);
    2931             : 
    2932             :         return cap >= p->dl.dl_density >> (BW_SHIFT - SCHED_CAPACITY_SHIFT);
    2933             : }
    2934             : 
    2935             : static inline unsigned long cpu_bw_dl(struct rq *rq)
    2936             : {
    2937             :         return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
    2938             : }
    2939             : 
    2940             : static inline unsigned long cpu_util_dl(struct rq *rq)
    2941             : {
    2942             :         return READ_ONCE(rq->avg_dl.util_avg);
    2943             : }
    2944             : 
    2945             : /**
    2946             :  * cpu_util_cfs() - Estimates the amount of CPU capacity used by CFS tasks.
    2947             :  * @cpu: the CPU to get the utilization for.
    2948             :  *
    2949             :  * The unit of the return value must be the same as the one of CPU capacity
    2950             :  * so that CPU utilization can be compared with CPU capacity.
    2951             :  *
    2952             :  * CPU utilization is the sum of running time of runnable tasks plus the
    2953             :  * recent utilization of currently non-runnable tasks on that CPU.
    2954             :  * It represents the amount of CPU capacity currently used by CFS tasks in
    2955             :  * the range [0..max CPU capacity] with max CPU capacity being the CPU
    2956             :  * capacity at f_max.
    2957             :  *
    2958             :  * The estimated CPU utilization is defined as the maximum between CPU
    2959             :  * utilization and sum of the estimated utilization of the currently
    2960             :  * runnable tasks on that CPU. It preserves a utilization "snapshot" of
    2961             :  * previously-executed tasks, which helps better deduce how busy a CPU will
    2962             :  * be when a long-sleeping task wakes up. The contribution to CPU utilization
    2963             :  * of such a task would be significantly decayed at this point of time.
    2964             :  *
    2965             :  * CPU utilization can be higher than the current CPU capacity
    2966             :  * (f_curr/f_max * max CPU capacity) or even the max CPU capacity because
    2967             :  * of rounding errors as well as task migrations or wakeups of new tasks.
    2968             :  * CPU utilization has to be capped to fit into the [0..max CPU capacity]
    2969             :  * range. Otherwise a group of CPUs (CPU0 util = 121% + CPU1 util = 80%)
    2970             :  * could be seen as over-utilized even though CPU1 has 20% of spare CPU
    2971             :  * capacity. CPU utilization is allowed to overshoot current CPU capacity
    2972             :  * though since this is useful for predicting the CPU capacity required
    2973             :  * after task migrations (scheduler-driven DVFS).
    2974             :  *
    2975             :  * Return: (Estimated) utilization for the specified CPU.
    2976             :  */
    2977             : static inline unsigned long cpu_util_cfs(int cpu)
    2978             : {
    2979             :         struct cfs_rq *cfs_rq;
    2980             :         unsigned long util;
    2981             : 
    2982             :         cfs_rq = &cpu_rq(cpu)->cfs;
    2983             :         util = READ_ONCE(cfs_rq->avg.util_avg);
    2984             : 
    2985             :         if (sched_feat(UTIL_EST)) {
    2986             :                 util = max_t(unsigned long, util,
    2987             :                              READ_ONCE(cfs_rq->avg.util_est.enqueued));
    2988             :         }
    2989             : 
    2990             :         return min(util, capacity_orig_of(cpu));
    2991             : }
    2992             : 
    2993             : static inline unsigned long cpu_util_rt(struct rq *rq)
    2994             : {
    2995             :         return READ_ONCE(rq->avg_rt.util_avg);
    2996             : }
    2997             : #endif
    2998             : 
    2999             : #ifdef CONFIG_UCLAMP_TASK
    3000             : unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
    3001             : 
    3002             : static inline unsigned long uclamp_rq_get(struct rq *rq,
    3003             :                                           enum uclamp_id clamp_id)
    3004             : {
    3005             :         return READ_ONCE(rq->uclamp[clamp_id].value);
    3006             : }
    3007             : 
    3008             : static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id,
    3009             :                                  unsigned int value)
    3010             : {
    3011             :         WRITE_ONCE(rq->uclamp[clamp_id].value, value);
    3012             : }
    3013             : 
    3014             : static inline bool uclamp_rq_is_idle(struct rq *rq)
    3015             : {
    3016             :         return rq->uclamp_flags & UCLAMP_FLAG_IDLE;
    3017             : }
    3018             : 
    3019             : /**
    3020             :  * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values.
    3021             :  * @rq:         The rq to clamp against. Must not be NULL.
    3022             :  * @util:       The util value to clamp.
    3023             :  * @p:          The task to clamp against. Can be NULL if you want to clamp
    3024             :  *              against @rq only.
    3025             :  *
    3026             :  * Clamps the passed @util to the max(@rq, @p) effective uclamp values.
    3027             :  *
    3028             :  * If sched_uclamp_used static key is disabled, then just return the util
    3029             :  * without any clamping since uclamp aggregation at the rq level in the fast
    3030             :  * path is disabled, rendering this operation a NOP.
    3031             :  *
    3032             :  * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It
    3033             :  * will return the correct effective uclamp value of the task even if the
    3034             :  * static key is disabled.
    3035             :  */
    3036             : static __always_inline
    3037             : unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
    3038             :                                   struct task_struct *p)
    3039             : {
    3040             :         unsigned long min_util = 0;
    3041             :         unsigned long max_util = 0;
    3042             : 
    3043             :         if (!static_branch_likely(&sched_uclamp_used))
    3044             :                 return util;
    3045             : 
    3046             :         if (p) {
    3047             :                 min_util = uclamp_eff_value(p, UCLAMP_MIN);
    3048             :                 max_util = uclamp_eff_value(p, UCLAMP_MAX);
    3049             : 
    3050             :                 /*
    3051             :                  * Ignore last runnable task's max clamp, as this task will
    3052             :                  * reset it. Similarly, no need to read the rq's min clamp.
    3053             :                  */
    3054             :                 if (uclamp_rq_is_idle(rq))
    3055             :                         goto out;
    3056             :         }
    3057             : 
    3058             :         min_util = max_t(unsigned long, min_util, uclamp_rq_get(rq, UCLAMP_MIN));
    3059             :         max_util = max_t(unsigned long, max_util, uclamp_rq_get(rq, UCLAMP_MAX));
    3060             : out:
    3061             :         /*
    3062             :          * Since CPU's {min,max}_util clamps are MAX aggregated considering
    3063             :          * RUNNABLE tasks with _different_ clamps, we can end up with an
    3064             :          * inversion. Fix it now when the clamps are applied.
    3065             :          */
    3066             :         if (unlikely(min_util >= max_util))
    3067             :                 return min_util;
    3068             : 
    3069             :         return clamp(util, min_util, max_util);
    3070             : }
    3071             : 
    3072             : /* Is the rq being capped/throttled by uclamp_max? */
    3073             : static inline bool uclamp_rq_is_capped(struct rq *rq)
    3074             : {
    3075             :         unsigned long rq_util;
    3076             :         unsigned long max_util;
    3077             : 
    3078             :         if (!static_branch_likely(&sched_uclamp_used))
    3079             :                 return false;
    3080             : 
    3081             :         rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq);
    3082             :         max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
    3083             : 
    3084             :         return max_util != SCHED_CAPACITY_SCALE && rq_util >= max_util;
    3085             : }
    3086             : 
    3087             : /*
    3088             :  * When uclamp is compiled in, the aggregation at rq level is 'turned off'
    3089             :  * by default in the fast path and only gets turned on once userspace performs
    3090             :  * an operation that requires it.
    3091             :  *
    3092             :  * Returns true if userspace opted-in to use uclamp and aggregation at rq level
    3093             :  * hence is active.
    3094             :  */
    3095             : static inline bool uclamp_is_used(void)
    3096             : {
    3097             :         return static_branch_likely(&sched_uclamp_used);
    3098             : }
    3099             : #else /* CONFIG_UCLAMP_TASK */
    3100             : static inline unsigned long uclamp_eff_value(struct task_struct *p,
    3101             :                                              enum uclamp_id clamp_id)
    3102             : {
    3103             :         if (clamp_id == UCLAMP_MIN)
    3104             :                 return 0;
    3105             : 
    3106             :         return SCHED_CAPACITY_SCALE;
    3107             : }
    3108             : 
    3109             : static inline
    3110             : unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
    3111             :                                   struct task_struct *p)
    3112             : {
    3113             :         return util;
    3114             : }
    3115             : 
    3116             : static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
    3117             : 
    3118             : static inline bool uclamp_is_used(void)
    3119             : {
    3120             :         return false;
    3121             : }
    3122             : 
    3123             : static inline unsigned long uclamp_rq_get(struct rq *rq,
    3124             :                                           enum uclamp_id clamp_id)
    3125             : {
    3126             :         if (clamp_id == UCLAMP_MIN)
    3127             :                 return 0;
    3128             : 
    3129             :         return SCHED_CAPACITY_SCALE;
    3130             : }
    3131             : 
    3132             : static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id,
    3133             :                                  unsigned int value)
    3134             : {
    3135             : }
    3136             : 
    3137             : static inline bool uclamp_rq_is_idle(struct rq *rq)
    3138             : {
    3139             :         return false;
    3140             : }
    3141             : #endif /* CONFIG_UCLAMP_TASK */
    3142             : 
    3143             : #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
    3144             : static inline unsigned long cpu_util_irq(struct rq *rq)
    3145             : {
    3146             :         return rq->avg_irq.util_avg;
    3147             : }
    3148             : 
    3149             : static inline
    3150             : unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
    3151             : {
    3152             :         util *= (max - irq);
    3153             :         util /= max;
    3154             : 
    3155             :         return util;
    3156             : 
    3157             : }
    3158             : #else
    3159             : static inline unsigned long cpu_util_irq(struct rq *rq)
    3160             : {
    3161             :         return 0;
    3162             : }
    3163             : 
    3164             : static inline
    3165             : unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
    3166             : {
    3167             :         return util;
    3168             : }
    3169             : #endif
    3170             : 
    3171             : #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
    3172             : 
    3173             : #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
    3174             : 
    3175             : DECLARE_STATIC_KEY_FALSE(sched_energy_present);
    3176             : 
    3177             : static inline bool sched_energy_enabled(void)
    3178             : {
    3179             :         return static_branch_unlikely(&sched_energy_present);
    3180             : }
    3181             : 
    3182             : #else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */
    3183             : 
    3184             : #define perf_domain_span(pd) NULL
    3185             : static inline bool sched_energy_enabled(void) { return false; }
    3186             : 
    3187             : #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
    3188             : 
    3189             : #ifdef CONFIG_MEMBARRIER
    3190             : /*
    3191             :  * The scheduler provides memory barriers required by membarrier between:
    3192             :  * - prior user-space memory accesses and store to rq->membarrier_state,
    3193             :  * - store to rq->membarrier_state and following user-space memory accesses.
    3194             :  * In the same way it provides those guarantees around store to rq->curr.
    3195             :  */
    3196             : static inline void membarrier_switch_mm(struct rq *rq,
    3197             :                                         struct mm_struct *prev_mm,
    3198             :                                         struct mm_struct *next_mm)
    3199             : {
    3200             :         int membarrier_state;
    3201             : 
    3202           0 :         if (prev_mm == next_mm)
    3203             :                 return;
    3204             : 
    3205           0 :         membarrier_state = atomic_read(&next_mm->membarrier_state);
    3206           0 :         if (READ_ONCE(rq->membarrier_state) == membarrier_state)
    3207             :                 return;
    3208             : 
    3209           0 :         WRITE_ONCE(rq->membarrier_state, membarrier_state);
    3210             : }
    3211             : #else
    3212             : static inline void membarrier_switch_mm(struct rq *rq,
    3213             :                                         struct mm_struct *prev_mm,
    3214             :                                         struct mm_struct *next_mm)
    3215             : {
    3216             : }
    3217             : #endif
    3218             : 
    3219             : #ifdef CONFIG_SMP
    3220             : static inline bool is_per_cpu_kthread(struct task_struct *p)
    3221             : {
    3222             :         if (!(p->flags & PF_KTHREAD))
    3223             :                 return false;
    3224             : 
    3225             :         if (p->nr_cpus_allowed != 1)
    3226             :                 return false;
    3227             : 
    3228             :         return true;
    3229             : }
    3230             : #endif
    3231             : 
    3232             : extern void swake_up_all_locked(struct swait_queue_head *q);
    3233             : extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
    3234             : 
    3235             : #ifdef CONFIG_PREEMPT_DYNAMIC
    3236             : extern int preempt_dynamic_mode;
    3237             : extern int sched_dynamic_mode(const char *str);
    3238             : extern void sched_dynamic_update(int mode);
    3239             : #endif
    3240             : 
    3241             : static inline void update_current_exec_runtime(struct task_struct *curr,
    3242             :                                                 u64 now, u64 delta_exec)
    3243             : {
    3244           0 :         curr->se.sum_exec_runtime += delta_exec;
    3245           0 :         account_group_exec_runtime(curr, delta_exec);
    3246             : 
    3247           0 :         curr->se.exec_start = now;
    3248           0 :         cgroup_account_cputime(curr, delta_exec);
    3249             : }
    3250             : 
    3251             : #ifdef CONFIG_SCHED_MM_CID
    3252             : static inline int __mm_cid_get(struct mm_struct *mm)
    3253             : {
    3254             :         struct cpumask *cpumask;
    3255             :         int cid;
    3256             : 
    3257             :         cpumask = mm_cidmask(mm);
    3258             :         cid = cpumask_first_zero(cpumask);
    3259             :         if (cid >= nr_cpu_ids)
    3260             :                 return -1;
    3261             :         __cpumask_set_cpu(cid, cpumask);
    3262             :         return cid;
    3263             : }
    3264             : 
    3265             : static inline void mm_cid_put(struct mm_struct *mm, int cid)
    3266             : {
    3267             :         lockdep_assert_irqs_disabled();
    3268             :         if (cid < 0)
    3269             :                 return;
    3270             :         raw_spin_lock(&mm->cid_lock);
    3271             :         __cpumask_clear_cpu(cid, mm_cidmask(mm));
    3272             :         raw_spin_unlock(&mm->cid_lock);
    3273             : }
    3274             : 
    3275             : static inline int mm_cid_get(struct mm_struct *mm)
    3276             : {
    3277             :         int ret;
    3278             : 
    3279             :         lockdep_assert_irqs_disabled();
    3280             :         raw_spin_lock(&mm->cid_lock);
    3281             :         ret = __mm_cid_get(mm);
    3282             :         raw_spin_unlock(&mm->cid_lock);
    3283             :         return ret;
    3284             : }
    3285             : 
    3286             : static inline void switch_mm_cid(struct task_struct *prev, struct task_struct *next)
    3287             : {
    3288             :         if (prev->mm_cid_active) {
    3289             :                 if (next->mm_cid_active && next->mm == prev->mm) {
    3290             :                         /*
    3291             :                          * Context switch between threads in same mm, hand over
    3292             :                          * the mm_cid from prev to next.
    3293             :                          */
    3294             :                         next->mm_cid = prev->mm_cid;
    3295             :                         prev->mm_cid = -1;
    3296             :                         return;
    3297             :                 }
    3298             :                 mm_cid_put(prev->mm, prev->mm_cid);
    3299             :                 prev->mm_cid = -1;
    3300             :         }
    3301             :         if (next->mm_cid_active)
    3302             :                 next->mm_cid = mm_cid_get(next->mm);
    3303             : }
    3304             : 
    3305             : #else
    3306             : static inline void switch_mm_cid(struct task_struct *prev, struct task_struct *next) { }
    3307             : #endif
    3308             : 
    3309             : #endif /* _KERNEL_SCHED_SCHED_H */

Generated by: LCOV version 1.14