LCOV - code coverage report
Current view: top level - kernel - cpu.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 97 201 48.3 %
Date: 2023-07-19 18:55:55 Functions: 11 21 52.4 %

          Line data    Source code
       1             : /* CPU control.
       2             :  * (C) 2001, 2002, 2003, 2004 Rusty Russell
       3             :  *
       4             :  * This code is licenced under the GPL.
       5             :  */
       6             : #include <linux/sched/mm.h>
       7             : #include <linux/proc_fs.h>
       8             : #include <linux/smp.h>
       9             : #include <linux/init.h>
      10             : #include <linux/notifier.h>
      11             : #include <linux/sched/signal.h>
      12             : #include <linux/sched/hotplug.h>
      13             : #include <linux/sched/isolation.h>
      14             : #include <linux/sched/task.h>
      15             : #include <linux/sched/smt.h>
      16             : #include <linux/unistd.h>
      17             : #include <linux/cpu.h>
      18             : #include <linux/oom.h>
      19             : #include <linux/rcupdate.h>
      20             : #include <linux/export.h>
      21             : #include <linux/bug.h>
      22             : #include <linux/kthread.h>
      23             : #include <linux/stop_machine.h>
      24             : #include <linux/mutex.h>
      25             : #include <linux/gfp.h>
      26             : #include <linux/suspend.h>
      27             : #include <linux/lockdep.h>
      28             : #include <linux/tick.h>
      29             : #include <linux/irq.h>
      30             : #include <linux/nmi.h>
      31             : #include <linux/smpboot.h>
      32             : #include <linux/relay.h>
      33             : #include <linux/slab.h>
      34             : #include <linux/scs.h>
      35             : #include <linux/percpu-rwsem.h>
      36             : #include <linux/cpuset.h>
      37             : #include <linux/random.h>
      38             : #include <linux/cc_platform.h>
      39             : 
      40             : #include <trace/events/power.h>
      41             : #define CREATE_TRACE_POINTS
      42             : #include <trace/events/cpuhp.h>
      43             : 
      44             : #include "smpboot.h"
      45             : 
      46             : /**
      47             :  * struct cpuhp_cpu_state - Per cpu hotplug state storage
      48             :  * @state:      The current cpu state
      49             :  * @target:     The target state
      50             :  * @fail:       Current CPU hotplug callback state
      51             :  * @thread:     Pointer to the hotplug thread
      52             :  * @should_run: Thread should execute
      53             :  * @rollback:   Perform a rollback
      54             :  * @single:     Single callback invocation
      55             :  * @bringup:    Single callback bringup or teardown selector
      56             :  * @cpu:        CPU number
      57             :  * @node:       Remote CPU node; for multi-instance, do a
      58             :  *              single entry callback for install/remove
      59             :  * @last:       For multi-instance rollback, remember how far we got
      60             :  * @cb_state:   The state for a single callback (install/uninstall)
      61             :  * @result:     Result of the operation
      62             :  * @done_up:    Signal completion to the issuer of the task for cpu-up
      63             :  * @done_down:  Signal completion to the issuer of the task for cpu-down
      64             :  */
      65             : struct cpuhp_cpu_state {
      66             :         enum cpuhp_state        state;
      67             :         enum cpuhp_state        target;
      68             :         enum cpuhp_state        fail;
      69             : #ifdef CONFIG_SMP
      70             :         struct task_struct      *thread;
      71             :         bool                    should_run;
      72             :         bool                    rollback;
      73             :         bool                    single;
      74             :         bool                    bringup;
      75             :         struct hlist_node       *node;
      76             :         struct hlist_node       *last;
      77             :         enum cpuhp_state        cb_state;
      78             :         int                     result;
      79             :         struct completion       done_up;
      80             :         struct completion       done_down;
      81             : #endif
      82             : };
      83             : 
      84             : static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
      85             :         .fail = CPUHP_INVALID,
      86             : };
      87             : 
      88             : #ifdef CONFIG_SMP
      89             : cpumask_t cpus_booted_once_mask;
      90             : #endif
      91             : 
      92             : #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
      93             : static struct lockdep_map cpuhp_state_up_map =
      94             :         STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
      95             : static struct lockdep_map cpuhp_state_down_map =
      96             :         STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
      97             : 
      98             : 
      99             : static inline void cpuhp_lock_acquire(bool bringup)
     100             : {
     101             :         lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
     102             : }
     103             : 
     104             : static inline void cpuhp_lock_release(bool bringup)
     105             : {
     106             :         lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
     107             : }
     108             : #else
     109             : 
     110             : static inline void cpuhp_lock_acquire(bool bringup) { }
     111             : static inline void cpuhp_lock_release(bool bringup) { }
     112             : 
     113             : #endif
     114             : 
     115             : /**
     116             :  * struct cpuhp_step - Hotplug state machine step
     117             :  * @name:       Name of the step
     118             :  * @startup:    Startup function of the step
     119             :  * @teardown:   Teardown function of the step
     120             :  * @cant_stop:  Bringup/teardown can't be stopped at this step
     121             :  * @multi_instance:     State has multiple instances which get added afterwards
     122             :  */
     123             : struct cpuhp_step {
     124             :         const char              *name;
     125             :         union {
     126             :                 int             (*single)(unsigned int cpu);
     127             :                 int             (*multi)(unsigned int cpu,
     128             :                                          struct hlist_node *node);
     129             :         } startup;
     130             :         union {
     131             :                 int             (*single)(unsigned int cpu);
     132             :                 int             (*multi)(unsigned int cpu,
     133             :                                          struct hlist_node *node);
     134             :         } teardown;
     135             :         /* private: */
     136             :         struct hlist_head       list;
     137             :         /* public: */
     138             :         bool                    cant_stop;
     139             :         bool                    multi_instance;
     140             : };
     141             : 
     142             : static DEFINE_MUTEX(cpuhp_state_mutex);
     143             : static struct cpuhp_step cpuhp_hp_states[];
     144             : 
     145             : static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
     146             : {
     147          27 :         return cpuhp_hp_states + state;
     148             : }
     149             : 
     150             : static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step)
     151             : {
     152           6 :         return bringup ? !step->startup.single : !step->teardown.single;
     153             : }
     154             : 
     155             : /**
     156             :  * cpuhp_invoke_callback - Invoke the callbacks for a given state
     157             :  * @cpu:        The cpu for which the callback should be invoked
     158             :  * @state:      The state to do callbacks for
     159             :  * @bringup:    True if the bringup callback should be invoked
     160             :  * @node:       For multi-instance, do a single entry callback for install/remove
     161             :  * @lastp:      For multi-instance rollback, remember how far we got
     162             :  *
     163             :  * Called from cpu hotplug and from the state register machinery.
     164             :  *
     165             :  * Return: %0 on success or a negative errno code
     166             :  */
     167           3 : static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
     168             :                                  bool bringup, struct hlist_node *node,
     169             :                                  struct hlist_node **lastp)
     170             : {
     171           3 :         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
     172           3 :         struct cpuhp_step *step = cpuhp_get_step(state);
     173             :         int (*cbm)(unsigned int cpu, struct hlist_node *node);
     174             :         int (*cb)(unsigned int cpu);
     175             :         int ret, cnt;
     176             : 
     177           3 :         if (st->fail == state) {
     178           0 :                 st->fail = CPUHP_INVALID;
     179           0 :                 return -EAGAIN;
     180             :         }
     181             : 
     182           3 :         if (cpuhp_step_empty(bringup, step)) {
     183           0 :                 WARN_ON_ONCE(1);
     184             :                 return 0;
     185             :         }
     186             : 
     187           3 :         if (!step->multi_instance) {
     188           3 :                 WARN_ON_ONCE(lastp && *lastp);
     189           3 :                 cb = bringup ? step->startup.single : step->teardown.single;
     190             : 
     191           3 :                 trace_cpuhp_enter(cpu, st->target, state, cb);
     192           3 :                 ret = cb(cpu);
     193           3 :                 trace_cpuhp_exit(cpu, st->state, state, ret);
     194           3 :                 return ret;
     195             :         }
     196           0 :         cbm = bringup ? step->startup.multi : step->teardown.multi;
     197             : 
     198             :         /* Single invocation for instance add/remove */
     199           0 :         if (node) {
     200           0 :                 WARN_ON_ONCE(lastp && *lastp);
     201           0 :                 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
     202           0 :                 ret = cbm(cpu, node);
     203           0 :                 trace_cpuhp_exit(cpu, st->state, state, ret);
     204           0 :                 return ret;
     205             :         }
     206             : 
     207             :         /* State transition. Invoke on all instances */
     208           0 :         cnt = 0;
     209           0 :         hlist_for_each(node, &step->list) {
     210           0 :                 if (lastp && node == *lastp)
     211             :                         break;
     212             : 
     213           0 :                 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
     214           0 :                 ret = cbm(cpu, node);
     215           0 :                 trace_cpuhp_exit(cpu, st->state, state, ret);
     216           0 :                 if (ret) {
     217           0 :                         if (!lastp)
     218             :                                 goto err;
     219             : 
     220           0 :                         *lastp = node;
     221           0 :                         return ret;
     222             :                 }
     223           0 :                 cnt++;
     224             :         }
     225           0 :         if (lastp)
     226           0 :                 *lastp = NULL;
     227             :         return 0;
     228             : err:
     229             :         /* Rollback the instances if one failed */
     230           0 :         cbm = !bringup ? step->startup.multi : step->teardown.multi;
     231           0 :         if (!cbm)
     232             :                 return ret;
     233             : 
     234           0 :         hlist_for_each(node, &step->list) {
     235           0 :                 if (!cnt--)
     236             :                         break;
     237             : 
     238           0 :                 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
     239           0 :                 ret = cbm(cpu, node);
     240           0 :                 trace_cpuhp_exit(cpu, st->state, state, ret);
     241             :                 /*
     242             :                  * Rollback must not fail,
     243             :                  */
     244           0 :                 WARN_ON_ONCE(ret);
     245             :         }
     246             :         return ret;
     247             : }
     248             : 
     249             : #ifdef CONFIG_SMP
     250             : static bool cpuhp_is_ap_state(enum cpuhp_state state)
     251             : {
     252             :         /*
     253             :          * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
     254             :          * purposes as that state is handled explicitly in cpu_down.
     255             :          */
     256             :         return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
     257             : }
     258             : 
     259             : static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
     260             : {
     261             :         struct completion *done = bringup ? &st->done_up : &st->done_down;
     262             :         wait_for_completion(done);
     263             : }
     264             : 
     265             : static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
     266             : {
     267             :         struct completion *done = bringup ? &st->done_up : &st->done_down;
     268             :         complete(done);
     269             : }
     270             : 
     271             : /*
     272             :  * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
     273             :  */
     274             : static bool cpuhp_is_atomic_state(enum cpuhp_state state)
     275             : {
     276             :         return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
     277             : }
     278             : 
     279             : /* Serializes the updates to cpu_online_mask, cpu_present_mask */
     280             : static DEFINE_MUTEX(cpu_add_remove_lock);
     281             : bool cpuhp_tasks_frozen;
     282             : EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
     283             : 
     284             : /*
     285             :  * The following two APIs (cpu_maps_update_begin/done) must be used when
     286             :  * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
     287             :  */
     288             : void cpu_maps_update_begin(void)
     289             : {
     290             :         mutex_lock(&cpu_add_remove_lock);
     291             : }
     292             : 
     293             : void cpu_maps_update_done(void)
     294             : {
     295             :         mutex_unlock(&cpu_add_remove_lock);
     296             : }
     297             : 
     298             : /*
     299             :  * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
     300             :  * Should always be manipulated under cpu_add_remove_lock
     301             :  */
     302             : static int cpu_hotplug_disabled;
     303             : 
     304             : #ifdef CONFIG_HOTPLUG_CPU
     305             : 
     306             : DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
     307             : 
     308             : void cpus_read_lock(void)
     309             : {
     310             :         percpu_down_read(&cpu_hotplug_lock);
     311             : }
     312             : EXPORT_SYMBOL_GPL(cpus_read_lock);
     313             : 
     314             : int cpus_read_trylock(void)
     315             : {
     316             :         return percpu_down_read_trylock(&cpu_hotplug_lock);
     317             : }
     318             : EXPORT_SYMBOL_GPL(cpus_read_trylock);
     319             : 
     320             : void cpus_read_unlock(void)
     321             : {
     322             :         percpu_up_read(&cpu_hotplug_lock);
     323             : }
     324             : EXPORT_SYMBOL_GPL(cpus_read_unlock);
     325             : 
     326             : void cpus_write_lock(void)
     327             : {
     328             :         percpu_down_write(&cpu_hotplug_lock);
     329             : }
     330             : 
     331             : void cpus_write_unlock(void)
     332             : {
     333             :         percpu_up_write(&cpu_hotplug_lock);
     334             : }
     335             : 
     336             : void lockdep_assert_cpus_held(void)
     337             : {
     338             :         /*
     339             :          * We can't have hotplug operations before userspace starts running,
     340             :          * and some init codepaths will knowingly not take the hotplug lock.
     341             :          * This is all valid, so mute lockdep until it makes sense to report
     342             :          * unheld locks.
     343             :          */
     344             :         if (system_state < SYSTEM_RUNNING)
     345             :                 return;
     346             : 
     347             :         percpu_rwsem_assert_held(&cpu_hotplug_lock);
     348             : }
     349             : 
     350             : #ifdef CONFIG_LOCKDEP
     351             : int lockdep_is_cpus_held(void)
     352             : {
     353             :         return percpu_rwsem_is_held(&cpu_hotplug_lock);
     354             : }
     355             : #endif
     356             : 
     357             : static void lockdep_acquire_cpus_lock(void)
     358             : {
     359             :         rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
     360             : }
     361             : 
     362             : static void lockdep_release_cpus_lock(void)
     363             : {
     364             :         rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
     365             : }
     366             : 
     367             : /*
     368             :  * Wait for currently running CPU hotplug operations to complete (if any) and
     369             :  * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
     370             :  * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
     371             :  * hotplug path before performing hotplug operations. So acquiring that lock
     372             :  * guarantees mutual exclusion from any currently running hotplug operations.
     373             :  */
     374             : void cpu_hotplug_disable(void)
     375             : {
     376             :         cpu_maps_update_begin();
     377             :         cpu_hotplug_disabled++;
     378             :         cpu_maps_update_done();
     379             : }
     380             : EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
     381             : 
     382             : static void __cpu_hotplug_enable(void)
     383             : {
     384             :         if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
     385             :                 return;
     386             :         cpu_hotplug_disabled--;
     387             : }
     388             : 
     389             : void cpu_hotplug_enable(void)
     390             : {
     391             :         cpu_maps_update_begin();
     392             :         __cpu_hotplug_enable();
     393             :         cpu_maps_update_done();
     394             : }
     395             : EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
     396             : 
     397             : #else
     398             : 
     399             : static void lockdep_acquire_cpus_lock(void)
     400             : {
     401             : }
     402             : 
     403             : static void lockdep_release_cpus_lock(void)
     404             : {
     405             : }
     406             : 
     407             : #endif  /* CONFIG_HOTPLUG_CPU */
     408             : 
     409             : /*
     410             :  * Architectures that need SMT-specific errata handling during SMT hotplug
     411             :  * should override this.
     412             :  */
     413             : void __weak arch_smt_update(void) { }
     414             : 
     415             : #ifdef CONFIG_HOTPLUG_SMT
     416             : enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
     417             : 
     418             : void __init cpu_smt_disable(bool force)
     419             : {
     420             :         if (!cpu_smt_possible())
     421             :                 return;
     422             : 
     423             :         if (force) {
     424             :                 pr_info("SMT: Force disabled\n");
     425             :                 cpu_smt_control = CPU_SMT_FORCE_DISABLED;
     426             :         } else {
     427             :                 pr_info("SMT: disabled\n");
     428             :                 cpu_smt_control = CPU_SMT_DISABLED;
     429             :         }
     430             : }
     431             : 
     432             : /*
     433             :  * The decision whether SMT is supported can only be done after the full
     434             :  * CPU identification. Called from architecture code.
     435             :  */
     436             : void __init cpu_smt_check_topology(void)
     437             : {
     438             :         if (!topology_smt_supported())
     439             :                 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
     440             : }
     441             : 
     442             : static int __init smt_cmdline_disable(char *str)
     443             : {
     444             :         cpu_smt_disable(str && !strcmp(str, "force"));
     445             :         return 0;
     446             : }
     447             : early_param("nosmt", smt_cmdline_disable);
     448             : 
     449             : static inline bool cpu_smt_allowed(unsigned int cpu)
     450             : {
     451             :         if (cpu_smt_control == CPU_SMT_ENABLED)
     452             :                 return true;
     453             : 
     454             :         if (topology_is_primary_thread(cpu))
     455             :                 return true;
     456             : 
     457             :         /*
     458             :          * On x86 it's required to boot all logical CPUs at least once so
     459             :          * that the init code can get a chance to set CR4.MCE on each
     460             :          * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any
     461             :          * core will shutdown the machine.
     462             :          */
     463             :         return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
     464             : }
     465             : 
     466             : /* Returns true if SMT is not supported of forcefully (irreversibly) disabled */
     467             : bool cpu_smt_possible(void)
     468             : {
     469             :         return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
     470             :                 cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
     471             : }
     472             : EXPORT_SYMBOL_GPL(cpu_smt_possible);
     473             : #else
     474             : static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
     475             : #endif
     476             : 
     477             : static inline enum cpuhp_state
     478             : cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
     479             : {
     480             :         enum cpuhp_state prev_state = st->state;
     481             :         bool bringup = st->state < target;
     482             : 
     483             :         st->rollback = false;
     484             :         st->last = NULL;
     485             : 
     486             :         st->target = target;
     487             :         st->single = false;
     488             :         st->bringup = bringup;
     489             :         if (cpu_dying(cpu) != !bringup)
     490             :                 set_cpu_dying(cpu, !bringup);
     491             : 
     492             :         return prev_state;
     493             : }
     494             : 
     495             : static inline void
     496             : cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
     497             :                   enum cpuhp_state prev_state)
     498             : {
     499             :         bool bringup = !st->bringup;
     500             : 
     501             :         st->target = prev_state;
     502             : 
     503             :         /*
     504             :          * Already rolling back. No need invert the bringup value or to change
     505             :          * the current state.
     506             :          */
     507             :         if (st->rollback)
     508             :                 return;
     509             : 
     510             :         st->rollback = true;
     511             : 
     512             :         /*
     513             :          * If we have st->last we need to undo partial multi_instance of this
     514             :          * state first. Otherwise start undo at the previous state.
     515             :          */
     516             :         if (!st->last) {
     517             :                 if (st->bringup)
     518             :                         st->state--;
     519             :                 else
     520             :                         st->state++;
     521             :         }
     522             : 
     523             :         st->bringup = bringup;
     524             :         if (cpu_dying(cpu) != !bringup)
     525             :                 set_cpu_dying(cpu, !bringup);
     526             : }
     527             : 
     528             : /* Regular hotplug invocation of the AP hotplug thread */
     529             : static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
     530             : {
     531             :         if (!st->single && st->state == st->target)
     532             :                 return;
     533             : 
     534             :         st->result = 0;
     535             :         /*
     536             :          * Make sure the above stores are visible before should_run becomes
     537             :          * true. Paired with the mb() above in cpuhp_thread_fun()
     538             :          */
     539             :         smp_mb();
     540             :         st->should_run = true;
     541             :         wake_up_process(st->thread);
     542             :         wait_for_ap_thread(st, st->bringup);
     543             : }
     544             : 
     545             : static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
     546             :                          enum cpuhp_state target)
     547             : {
     548             :         enum cpuhp_state prev_state;
     549             :         int ret;
     550             : 
     551             :         prev_state = cpuhp_set_state(cpu, st, target);
     552             :         __cpuhp_kick_ap(st);
     553             :         if ((ret = st->result)) {
     554             :                 cpuhp_reset_state(cpu, st, prev_state);
     555             :                 __cpuhp_kick_ap(st);
     556             :         }
     557             : 
     558             :         return ret;
     559             : }
     560             : 
     561             : static int bringup_wait_for_ap(unsigned int cpu)
     562             : {
     563             :         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
     564             : 
     565             :         /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
     566             :         wait_for_ap_thread(st, true);
     567             :         if (WARN_ON_ONCE((!cpu_online(cpu))))
     568             :                 return -ECANCELED;
     569             : 
     570             :         /* Unpark the hotplug thread of the target cpu */
     571             :         kthread_unpark(st->thread);
     572             : 
     573             :         /*
     574             :          * SMT soft disabling on X86 requires to bring the CPU out of the
     575             :          * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit.  The
     576             :          * CPU marked itself as booted_once in notify_cpu_starting() so the
     577             :          * cpu_smt_allowed() check will now return false if this is not the
     578             :          * primary sibling.
     579             :          */
     580             :         if (!cpu_smt_allowed(cpu))
     581             :                 return -ECANCELED;
     582             : 
     583             :         if (st->target <= CPUHP_AP_ONLINE_IDLE)
     584             :                 return 0;
     585             : 
     586             :         return cpuhp_kick_ap(cpu, st, st->target);
     587             : }
     588             : 
     589             : static int bringup_cpu(unsigned int cpu)
     590             : {
     591             :         struct task_struct *idle = idle_thread_get(cpu);
     592             :         int ret;
     593             : 
     594             :         /*
     595             :          * Reset stale stack state from the last time this CPU was online.
     596             :          */
     597             :         scs_task_reset(idle);
     598             :         kasan_unpoison_task_stack(idle);
     599             : 
     600             :         /*
     601             :          * Some architectures have to walk the irq descriptors to
     602             :          * setup the vector space for the cpu which comes online.
     603             :          * Prevent irq alloc/free across the bringup.
     604             :          */
     605             :         irq_lock_sparse();
     606             : 
     607             :         /* Arch-specific enabling code. */
     608             :         ret = __cpu_up(cpu, idle);
     609             :         irq_unlock_sparse();
     610             :         if (ret)
     611             :                 return ret;
     612             :         return bringup_wait_for_ap(cpu);
     613             : }
     614             : 
     615             : static int finish_cpu(unsigned int cpu)
     616             : {
     617             :         struct task_struct *idle = idle_thread_get(cpu);
     618             :         struct mm_struct *mm = idle->active_mm;
     619             : 
     620             :         /*
     621             :          * idle_task_exit() will have switched to &init_mm, now
     622             :          * clean up any remaining active_mm state.
     623             :          */
     624             :         if (mm != &init_mm)
     625             :                 idle->active_mm = &init_mm;
     626             :         mmdrop_lazy_tlb(mm);
     627             :         return 0;
     628             : }
     629             : 
     630             : /*
     631             :  * Hotplug state machine related functions
     632             :  */
     633             : 
     634             : /*
     635             :  * Get the next state to run. Empty ones will be skipped. Returns true if a
     636             :  * state must be run.
     637             :  *
     638             :  * st->state will be modified ahead of time, to match state_to_run, as if it
     639             :  * has already ran.
     640             :  */
     641             : static bool cpuhp_next_state(bool bringup,
     642             :                              enum cpuhp_state *state_to_run,
     643             :                              struct cpuhp_cpu_state *st,
     644             :                              enum cpuhp_state target)
     645             : {
     646             :         do {
     647             :                 if (bringup) {
     648             :                         if (st->state >= target)
     649             :                                 return false;
     650             : 
     651             :                         *state_to_run = ++st->state;
     652             :                 } else {
     653             :                         if (st->state <= target)
     654             :                                 return false;
     655             : 
     656             :                         *state_to_run = st->state--;
     657             :                 }
     658             : 
     659             :                 if (!cpuhp_step_empty(bringup, cpuhp_get_step(*state_to_run)))
     660             :                         break;
     661             :         } while (true);
     662             : 
     663             :         return true;
     664             : }
     665             : 
     666             : static int __cpuhp_invoke_callback_range(bool bringup,
     667             :                                          unsigned int cpu,
     668             :                                          struct cpuhp_cpu_state *st,
     669             :                                          enum cpuhp_state target,
     670             :                                          bool nofail)
     671             : {
     672             :         enum cpuhp_state state;
     673             :         int ret = 0;
     674             : 
     675             :         while (cpuhp_next_state(bringup, &state, st, target)) {
     676             :                 int err;
     677             : 
     678             :                 err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL);
     679             :                 if (!err)
     680             :                         continue;
     681             : 
     682             :                 if (nofail) {
     683             :                         pr_warn("CPU %u %s state %s (%d) failed (%d)\n",
     684             :                                 cpu, bringup ? "UP" : "DOWN",
     685             :                                 cpuhp_get_step(st->state)->name,
     686             :                                 st->state, err);
     687             :                         ret = -1;
     688             :                 } else {
     689             :                         ret = err;
     690             :                         break;
     691             :                 }
     692             :         }
     693             : 
     694             :         return ret;
     695             : }
     696             : 
     697             : static inline int cpuhp_invoke_callback_range(bool bringup,
     698             :                                               unsigned int cpu,
     699             :                                               struct cpuhp_cpu_state *st,
     700             :                                               enum cpuhp_state target)
     701             : {
     702             :         return __cpuhp_invoke_callback_range(bringup, cpu, st, target, false);
     703             : }
     704             : 
     705             : static inline void cpuhp_invoke_callback_range_nofail(bool bringup,
     706             :                                                       unsigned int cpu,
     707             :                                                       struct cpuhp_cpu_state *st,
     708             :                                                       enum cpuhp_state target)
     709             : {
     710             :         __cpuhp_invoke_callback_range(bringup, cpu, st, target, true);
     711             : }
     712             : 
     713             : static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
     714             : {
     715             :         if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
     716             :                 return true;
     717             :         /*
     718             :          * When CPU hotplug is disabled, then taking the CPU down is not
     719             :          * possible because takedown_cpu() and the architecture and
     720             :          * subsystem specific mechanisms are not available. So the CPU
     721             :          * which would be completely unplugged again needs to stay around
     722             :          * in the current state.
     723             :          */
     724             :         return st->state <= CPUHP_BRINGUP_CPU;
     725             : }
     726             : 
     727             : static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
     728             :                               enum cpuhp_state target)
     729             : {
     730             :         enum cpuhp_state prev_state = st->state;
     731             :         int ret = 0;
     732             : 
     733             :         ret = cpuhp_invoke_callback_range(true, cpu, st, target);
     734             :         if (ret) {
     735             :                 pr_debug("CPU UP failed (%d) CPU %u state %s (%d)\n",
     736             :                          ret, cpu, cpuhp_get_step(st->state)->name,
     737             :                          st->state);
     738             : 
     739             :                 cpuhp_reset_state(cpu, st, prev_state);
     740             :                 if (can_rollback_cpu(st))
     741             :                         WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
     742             :                                                             prev_state));
     743             :         }
     744             :         return ret;
     745             : }
     746             : 
     747             : /*
     748             :  * The cpu hotplug threads manage the bringup and teardown of the cpus
     749             :  */
     750             : static int cpuhp_should_run(unsigned int cpu)
     751             : {
     752             :         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
     753             : 
     754             :         return st->should_run;
     755             : }
     756             : 
     757             : /*
     758             :  * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
     759             :  * callbacks when a state gets [un]installed at runtime.
     760             :  *
     761             :  * Each invocation of this function by the smpboot thread does a single AP
     762             :  * state callback.
     763             :  *
     764             :  * It has 3 modes of operation:
     765             :  *  - single: runs st->cb_state
     766             :  *  - up:     runs ++st->state, while st->state < st->target
     767             :  *  - down:   runs st->state--, while st->state > st->target
     768             :  *
     769             :  * When complete or on error, should_run is cleared and the completion is fired.
     770             :  */
     771             : static void cpuhp_thread_fun(unsigned int cpu)
     772             : {
     773             :         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
     774             :         bool bringup = st->bringup;
     775             :         enum cpuhp_state state;
     776             : 
     777             :         if (WARN_ON_ONCE(!st->should_run))
     778             :                 return;
     779             : 
     780             :         /*
     781             :          * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
     782             :          * that if we see ->should_run we also see the rest of the state.
     783             :          */
     784             :         smp_mb();
     785             : 
     786             :         /*
     787             :          * The BP holds the hotplug lock, but we're now running on the AP,
     788             :          * ensure that anybody asserting the lock is held, will actually find
     789             :          * it so.
     790             :          */
     791             :         lockdep_acquire_cpus_lock();
     792             :         cpuhp_lock_acquire(bringup);
     793             : 
     794             :         if (st->single) {
     795             :                 state = st->cb_state;
     796             :                 st->should_run = false;
     797             :         } else {
     798             :                 st->should_run = cpuhp_next_state(bringup, &state, st, st->target);
     799             :                 if (!st->should_run)
     800             :                         goto end;
     801             :         }
     802             : 
     803             :         WARN_ON_ONCE(!cpuhp_is_ap_state(state));
     804             : 
     805             :         if (cpuhp_is_atomic_state(state)) {
     806             :                 local_irq_disable();
     807             :                 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
     808             :                 local_irq_enable();
     809             : 
     810             :                 /*
     811             :                  * STARTING/DYING must not fail!
     812             :                  */
     813             :                 WARN_ON_ONCE(st->result);
     814             :         } else {
     815             :                 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
     816             :         }
     817             : 
     818             :         if (st->result) {
     819             :                 /*
     820             :                  * If we fail on a rollback, we're up a creek without no
     821             :                  * paddle, no way forward, no way back. We loose, thanks for
     822             :                  * playing.
     823             :                  */
     824             :                 WARN_ON_ONCE(st->rollback);
     825             :                 st->should_run = false;
     826             :         }
     827             : 
     828             : end:
     829             :         cpuhp_lock_release(bringup);
     830             :         lockdep_release_cpus_lock();
     831             : 
     832             :         if (!st->should_run)
     833             :                 complete_ap_thread(st, bringup);
     834             : }
     835             : 
     836             : /* Invoke a single callback on a remote cpu */
     837             : static int
     838             : cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
     839             :                          struct hlist_node *node)
     840             : {
     841             :         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
     842             :         int ret;
     843             : 
     844             :         if (!cpu_online(cpu))
     845             :                 return 0;
     846             : 
     847             :         cpuhp_lock_acquire(false);
     848             :         cpuhp_lock_release(false);
     849             : 
     850             :         cpuhp_lock_acquire(true);
     851             :         cpuhp_lock_release(true);
     852             : 
     853             :         /*
     854             :          * If we are up and running, use the hotplug thread. For early calls
     855             :          * we invoke the thread function directly.
     856             :          */
     857             :         if (!st->thread)
     858             :                 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
     859             : 
     860             :         st->rollback = false;
     861             :         st->last = NULL;
     862             : 
     863             :         st->node = node;
     864             :         st->bringup = bringup;
     865             :         st->cb_state = state;
     866             :         st->single = true;
     867             : 
     868             :         __cpuhp_kick_ap(st);
     869             : 
     870             :         /*
     871             :          * If we failed and did a partial, do a rollback.
     872             :          */
     873             :         if ((ret = st->result) && st->last) {
     874             :                 st->rollback = true;
     875             :                 st->bringup = !bringup;
     876             : 
     877             :                 __cpuhp_kick_ap(st);
     878             :         }
     879             : 
     880             :         /*
     881             :          * Clean up the leftovers so the next hotplug operation wont use stale
     882             :          * data.
     883             :          */
     884             :         st->node = st->last = NULL;
     885             :         return ret;
     886             : }
     887             : 
     888             : static int cpuhp_kick_ap_work(unsigned int cpu)
     889             : {
     890             :         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
     891             :         enum cpuhp_state prev_state = st->state;
     892             :         int ret;
     893             : 
     894             :         cpuhp_lock_acquire(false);
     895             :         cpuhp_lock_release(false);
     896             : 
     897             :         cpuhp_lock_acquire(true);
     898             :         cpuhp_lock_release(true);
     899             : 
     900             :         trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
     901             :         ret = cpuhp_kick_ap(cpu, st, st->target);
     902             :         trace_cpuhp_exit(cpu, st->state, prev_state, ret);
     903             : 
     904             :         return ret;
     905             : }
     906             : 
     907             : static struct smp_hotplug_thread cpuhp_threads = {
     908             :         .store                  = &cpuhp_state.thread,
     909             :         .thread_should_run      = cpuhp_should_run,
     910             :         .thread_fn              = cpuhp_thread_fun,
     911             :         .thread_comm            = "cpuhp/%u",
     912             :         .selfparking            = true,
     913             : };
     914             : 
     915             : static __init void cpuhp_init_state(void)
     916             : {
     917             :         struct cpuhp_cpu_state *st;
     918             :         int cpu;
     919             : 
     920             :         for_each_possible_cpu(cpu) {
     921             :                 st = per_cpu_ptr(&cpuhp_state, cpu);
     922             :                 init_completion(&st->done_up);
     923             :                 init_completion(&st->done_down);
     924             :         }
     925             : }
     926             : 
     927             : void __init cpuhp_threads_init(void)
     928             : {
     929             :         cpuhp_init_state();
     930             :         BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
     931             :         kthread_unpark(this_cpu_read(cpuhp_state.thread));
     932             : }
     933             : 
     934             : /*
     935             :  *
     936             :  * Serialize hotplug trainwrecks outside of the cpu_hotplug_lock
     937             :  * protected region.
     938             :  *
     939             :  * The operation is still serialized against concurrent CPU hotplug via
     940             :  * cpu_add_remove_lock, i.e. CPU map protection.  But it is _not_
     941             :  * serialized against other hotplug related activity like adding or
     942             :  * removing of state callbacks and state instances, which invoke either the
     943             :  * startup or the teardown callback of the affected state.
     944             :  *
     945             :  * This is required for subsystems which are unfixable vs. CPU hotplug and
     946             :  * evade lock inversion problems by scheduling work which has to be
     947             :  * completed _before_ cpu_up()/_cpu_down() returns.
     948             :  *
     949             :  * Don't even think about adding anything to this for any new code or even
     950             :  * drivers. It's only purpose is to keep existing lock order trainwrecks
     951             :  * working.
     952             :  *
     953             :  * For cpu_down() there might be valid reasons to finish cleanups which are
     954             :  * not required to be done under cpu_hotplug_lock, but that's a different
     955             :  * story and would be not invoked via this.
     956             :  */
     957             : static void cpu_up_down_serialize_trainwrecks(bool tasks_frozen)
     958             : {
     959             :         /*
     960             :          * cpusets delegate hotplug operations to a worker to "solve" the
     961             :          * lock order problems. Wait for the worker, but only if tasks are
     962             :          * _not_ frozen (suspend, hibernate) as that would wait forever.
     963             :          *
     964             :          * The wait is required because otherwise the hotplug operation
     965             :          * returns with inconsistent state, which could even be observed in
     966             :          * user space when a new CPU is brought up. The CPU plug uevent
     967             :          * would be delivered and user space reacting on it would fail to
     968             :          * move tasks to the newly plugged CPU up to the point where the
     969             :          * work has finished because up to that point the newly plugged CPU
     970             :          * is not assignable in cpusets/cgroups. On unplug that's not
     971             :          * necessarily a visible issue, but it is still inconsistent state,
     972             :          * which is the real problem which needs to be "fixed". This can't
     973             :          * prevent the transient state between scheduling the work and
     974             :          * returning from waiting for it.
     975             :          */
     976             :         if (!tasks_frozen)
     977             :                 cpuset_wait_for_hotplug();
     978             : }
     979             : 
     980             : #ifdef CONFIG_HOTPLUG_CPU
     981             : #ifndef arch_clear_mm_cpumask_cpu
     982             : #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
     983             : #endif
     984             : 
     985             : /**
     986             :  * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
     987             :  * @cpu: a CPU id
     988             :  *
     989             :  * This function walks all processes, finds a valid mm struct for each one and
     990             :  * then clears a corresponding bit in mm's cpumask.  While this all sounds
     991             :  * trivial, there are various non-obvious corner cases, which this function
     992             :  * tries to solve in a safe manner.
     993             :  *
     994             :  * Also note that the function uses a somewhat relaxed locking scheme, so it may
     995             :  * be called only for an already offlined CPU.
     996             :  */
     997             : void clear_tasks_mm_cpumask(int cpu)
     998             : {
     999             :         struct task_struct *p;
    1000             : 
    1001             :         /*
    1002             :          * This function is called after the cpu is taken down and marked
    1003             :          * offline, so its not like new tasks will ever get this cpu set in
    1004             :          * their mm mask. -- Peter Zijlstra
    1005             :          * Thus, we may use rcu_read_lock() here, instead of grabbing
    1006             :          * full-fledged tasklist_lock.
    1007             :          */
    1008             :         WARN_ON(cpu_online(cpu));
    1009             :         rcu_read_lock();
    1010             :         for_each_process(p) {
    1011             :                 struct task_struct *t;
    1012             : 
    1013             :                 /*
    1014             :                  * Main thread might exit, but other threads may still have
    1015             :                  * a valid mm. Find one.
    1016             :                  */
    1017             :                 t = find_lock_task_mm(p);
    1018             :                 if (!t)
    1019             :                         continue;
    1020             :                 arch_clear_mm_cpumask_cpu(cpu, t->mm);
    1021             :                 task_unlock(t);
    1022             :         }
    1023             :         rcu_read_unlock();
    1024             : }
    1025             : 
    1026             : /* Take this CPU down. */
    1027             : static int take_cpu_down(void *_param)
    1028             : {
    1029             :         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
    1030             :         enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
    1031             :         int err, cpu = smp_processor_id();
    1032             : 
    1033             :         /* Ensure this CPU doesn't handle any more interrupts. */
    1034             :         err = __cpu_disable();
    1035             :         if (err < 0)
    1036             :                 return err;
    1037             : 
    1038             :         /*
    1039             :          * Must be called from CPUHP_TEARDOWN_CPU, which means, as we are going
    1040             :          * down, that the current state is CPUHP_TEARDOWN_CPU - 1.
    1041             :          */
    1042             :         WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1));
    1043             : 
    1044             :         /*
    1045             :          * Invoke the former CPU_DYING callbacks. DYING must not fail!
    1046             :          */
    1047             :         cpuhp_invoke_callback_range_nofail(false, cpu, st, target);
    1048             : 
    1049             :         /* Give up timekeeping duties */
    1050             :         tick_handover_do_timer();
    1051             :         /* Remove CPU from timer broadcasting */
    1052             :         tick_offline_cpu(cpu);
    1053             :         /* Park the stopper thread */
    1054             :         stop_machine_park(cpu);
    1055             :         return 0;
    1056             : }
    1057             : 
    1058             : static int takedown_cpu(unsigned int cpu)
    1059             : {
    1060             :         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
    1061             :         int err;
    1062             : 
    1063             :         /* Park the smpboot threads */
    1064             :         kthread_park(st->thread);
    1065             : 
    1066             :         /*
    1067             :          * Prevent irq alloc/free while the dying cpu reorganizes the
    1068             :          * interrupt affinities.
    1069             :          */
    1070             :         irq_lock_sparse();
    1071             : 
    1072             :         /*
    1073             :          * So now all preempt/rcu users must observe !cpu_active().
    1074             :          */
    1075             :         err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
    1076             :         if (err) {
    1077             :                 /* CPU refused to die */
    1078             :                 irq_unlock_sparse();
    1079             :                 /* Unpark the hotplug thread so we can rollback there */
    1080             :                 kthread_unpark(st->thread);
    1081             :                 return err;
    1082             :         }
    1083             :         BUG_ON(cpu_online(cpu));
    1084             : 
    1085             :         /*
    1086             :          * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
    1087             :          * all runnable tasks from the CPU, there's only the idle task left now
    1088             :          * that the migration thread is done doing the stop_machine thing.
    1089             :          *
    1090             :          * Wait for the stop thread to go away.
    1091             :          */
    1092             :         wait_for_ap_thread(st, false);
    1093             :         BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
    1094             : 
    1095             :         /* Interrupts are moved away from the dying cpu, reenable alloc/free */
    1096             :         irq_unlock_sparse();
    1097             : 
    1098             :         hotplug_cpu__broadcast_tick_pull(cpu);
    1099             :         /* This actually kills the CPU. */
    1100             :         __cpu_die(cpu);
    1101             : 
    1102             :         tick_cleanup_dead_cpu(cpu);
    1103             :         rcutree_migrate_callbacks(cpu);
    1104             :         return 0;
    1105             : }
    1106             : 
    1107             : static void cpuhp_complete_idle_dead(void *arg)
    1108             : {
    1109             :         struct cpuhp_cpu_state *st = arg;
    1110             : 
    1111             :         complete_ap_thread(st, false);
    1112             : }
    1113             : 
    1114             : void cpuhp_report_idle_dead(void)
    1115             : {
    1116             :         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
    1117             : 
    1118             :         BUG_ON(st->state != CPUHP_AP_OFFLINE);
    1119             :         rcu_report_dead(smp_processor_id());
    1120             :         st->state = CPUHP_AP_IDLE_DEAD;
    1121             :         /*
    1122             :          * We cannot call complete after rcu_report_dead() so we delegate it
    1123             :          * to an online cpu.
    1124             :          */
    1125             :         smp_call_function_single(cpumask_first(cpu_online_mask),
    1126             :                                  cpuhp_complete_idle_dead, st, 0);
    1127             : }
    1128             : 
    1129             : static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
    1130             :                                 enum cpuhp_state target)
    1131             : {
    1132             :         enum cpuhp_state prev_state = st->state;
    1133             :         int ret = 0;
    1134             : 
    1135             :         ret = cpuhp_invoke_callback_range(false, cpu, st, target);
    1136             :         if (ret) {
    1137             :                 pr_debug("CPU DOWN failed (%d) CPU %u state %s (%d)\n",
    1138             :                          ret, cpu, cpuhp_get_step(st->state)->name,
    1139             :                          st->state);
    1140             : 
    1141             :                 cpuhp_reset_state(cpu, st, prev_state);
    1142             : 
    1143             :                 if (st->state < prev_state)
    1144             :                         WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
    1145             :                                                             prev_state));
    1146             :         }
    1147             : 
    1148             :         return ret;
    1149             : }
    1150             : 
    1151             : /* Requires cpu_add_remove_lock to be held */
    1152             : static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
    1153             :                            enum cpuhp_state target)
    1154             : {
    1155             :         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
    1156             :         int prev_state, ret = 0;
    1157             : 
    1158             :         if (num_online_cpus() == 1)
    1159             :                 return -EBUSY;
    1160             : 
    1161             :         if (!cpu_present(cpu))
    1162             :                 return -EINVAL;
    1163             : 
    1164             :         cpus_write_lock();
    1165             : 
    1166             :         cpuhp_tasks_frozen = tasks_frozen;
    1167             : 
    1168             :         prev_state = cpuhp_set_state(cpu, st, target);
    1169             :         /*
    1170             :          * If the current CPU state is in the range of the AP hotplug thread,
    1171             :          * then we need to kick the thread.
    1172             :          */
    1173             :         if (st->state > CPUHP_TEARDOWN_CPU) {
    1174             :                 st->target = max((int)target, CPUHP_TEARDOWN_CPU);
    1175             :                 ret = cpuhp_kick_ap_work(cpu);
    1176             :                 /*
    1177             :                  * The AP side has done the error rollback already. Just
    1178             :                  * return the error code..
    1179             :                  */
    1180             :                 if (ret)
    1181             :                         goto out;
    1182             : 
    1183             :                 /*
    1184             :                  * We might have stopped still in the range of the AP hotplug
    1185             :                  * thread. Nothing to do anymore.
    1186             :                  */
    1187             :                 if (st->state > CPUHP_TEARDOWN_CPU)
    1188             :                         goto out;
    1189             : 
    1190             :                 st->target = target;
    1191             :         }
    1192             :         /*
    1193             :          * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
    1194             :          * to do the further cleanups.
    1195             :          */
    1196             :         ret = cpuhp_down_callbacks(cpu, st, target);
    1197             :         if (ret && st->state < prev_state) {
    1198             :                 if (st->state == CPUHP_TEARDOWN_CPU) {
    1199             :                         cpuhp_reset_state(cpu, st, prev_state);
    1200             :                         __cpuhp_kick_ap(st);
    1201             :                 } else {
    1202             :                         WARN(1, "DEAD callback error for CPU%d", cpu);
    1203             :                 }
    1204             :         }
    1205             : 
    1206             : out:
    1207             :         cpus_write_unlock();
    1208             :         /*
    1209             :          * Do post unplug cleanup. This is still protected against
    1210             :          * concurrent CPU hotplug via cpu_add_remove_lock.
    1211             :          */
    1212             :         lockup_detector_cleanup();
    1213             :         arch_smt_update();
    1214             :         cpu_up_down_serialize_trainwrecks(tasks_frozen);
    1215             :         return ret;
    1216             : }
    1217             : 
    1218             : static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
    1219             : {
    1220             :         /*
    1221             :          * If the platform does not support hotplug, report it explicitly to
    1222             :          * differentiate it from a transient offlining failure.
    1223             :          */
    1224             :         if (cc_platform_has(CC_ATTR_HOTPLUG_DISABLED))
    1225             :                 return -EOPNOTSUPP;
    1226             :         if (cpu_hotplug_disabled)
    1227             :                 return -EBUSY;
    1228             :         return _cpu_down(cpu, 0, target);
    1229             : }
    1230             : 
    1231             : static int cpu_down(unsigned int cpu, enum cpuhp_state target)
    1232             : {
    1233             :         int err;
    1234             : 
    1235             :         cpu_maps_update_begin();
    1236             :         err = cpu_down_maps_locked(cpu, target);
    1237             :         cpu_maps_update_done();
    1238             :         return err;
    1239             : }
    1240             : 
    1241             : /**
    1242             :  * cpu_device_down - Bring down a cpu device
    1243             :  * @dev: Pointer to the cpu device to offline
    1244             :  *
    1245             :  * This function is meant to be used by device core cpu subsystem only.
    1246             :  *
    1247             :  * Other subsystems should use remove_cpu() instead.
    1248             :  *
    1249             :  * Return: %0 on success or a negative errno code
    1250             :  */
    1251             : int cpu_device_down(struct device *dev)
    1252             : {
    1253             :         return cpu_down(dev->id, CPUHP_OFFLINE);
    1254             : }
    1255             : 
    1256             : int remove_cpu(unsigned int cpu)
    1257             : {
    1258             :         int ret;
    1259             : 
    1260             :         lock_device_hotplug();
    1261             :         ret = device_offline(get_cpu_device(cpu));
    1262             :         unlock_device_hotplug();
    1263             : 
    1264             :         return ret;
    1265             : }
    1266             : EXPORT_SYMBOL_GPL(remove_cpu);
    1267             : 
    1268             : void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
    1269             : {
    1270             :         unsigned int cpu;
    1271             :         int error;
    1272             : 
    1273             :         cpu_maps_update_begin();
    1274             : 
    1275             :         /*
    1276             :          * Make certain the cpu I'm about to reboot on is online.
    1277             :          *
    1278             :          * This is inline to what migrate_to_reboot_cpu() already do.
    1279             :          */
    1280             :         if (!cpu_online(primary_cpu))
    1281             :                 primary_cpu = cpumask_first(cpu_online_mask);
    1282             : 
    1283             :         for_each_online_cpu(cpu) {
    1284             :                 if (cpu == primary_cpu)
    1285             :                         continue;
    1286             : 
    1287             :                 error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
    1288             :                 if (error) {
    1289             :                         pr_err("Failed to offline CPU%d - error=%d",
    1290             :                                 cpu, error);
    1291             :                         break;
    1292             :                 }
    1293             :         }
    1294             : 
    1295             :         /*
    1296             :          * Ensure all but the reboot CPU are offline.
    1297             :          */
    1298             :         BUG_ON(num_online_cpus() > 1);
    1299             : 
    1300             :         /*
    1301             :          * Make sure the CPUs won't be enabled by someone else after this
    1302             :          * point. Kexec will reboot to a new kernel shortly resetting
    1303             :          * everything along the way.
    1304             :          */
    1305             :         cpu_hotplug_disabled++;
    1306             : 
    1307             :         cpu_maps_update_done();
    1308             : }
    1309             : 
    1310             : #else
    1311             : #define takedown_cpu            NULL
    1312             : #endif /*CONFIG_HOTPLUG_CPU*/
    1313             : 
    1314             : /**
    1315             :  * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
    1316             :  * @cpu: cpu that just started
    1317             :  *
    1318             :  * It must be called by the arch code on the new cpu, before the new cpu
    1319             :  * enables interrupts and before the "boot" cpu returns from __cpu_up().
    1320             :  */
    1321             : void notify_cpu_starting(unsigned int cpu)
    1322             : {
    1323             :         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
    1324             :         enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
    1325             : 
    1326             :         rcu_cpu_starting(cpu);  /* Enables RCU usage on this CPU. */
    1327             :         cpumask_set_cpu(cpu, &cpus_booted_once_mask);
    1328             : 
    1329             :         /*
    1330             :          * STARTING must not fail!
    1331             :          */
    1332             :         cpuhp_invoke_callback_range_nofail(true, cpu, st, target);
    1333             : }
    1334             : 
    1335             : /*
    1336             :  * Called from the idle task. Wake up the controlling task which brings the
    1337             :  * hotplug thread of the upcoming CPU up and then delegates the rest of the
    1338             :  * online bringup to the hotplug thread.
    1339             :  */
    1340             : void cpuhp_online_idle(enum cpuhp_state state)
    1341             : {
    1342             :         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
    1343             : 
    1344             :         /* Happens for the boot cpu */
    1345             :         if (state != CPUHP_AP_ONLINE_IDLE)
    1346             :                 return;
    1347             : 
    1348             :         /*
    1349             :          * Unpart the stopper thread before we start the idle loop (and start
    1350             :          * scheduling); this ensures the stopper task is always available.
    1351             :          */
    1352             :         stop_machine_unpark(smp_processor_id());
    1353             : 
    1354             :         st->state = CPUHP_AP_ONLINE_IDLE;
    1355             :         complete_ap_thread(st, true);
    1356             : }
    1357             : 
    1358             : /* Requires cpu_add_remove_lock to be held */
    1359             : static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
    1360             : {
    1361             :         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
    1362             :         struct task_struct *idle;
    1363             :         int ret = 0;
    1364             : 
    1365             :         cpus_write_lock();
    1366             : 
    1367             :         if (!cpu_present(cpu)) {
    1368             :                 ret = -EINVAL;
    1369             :                 goto out;
    1370             :         }
    1371             : 
    1372             :         /*
    1373             :          * The caller of cpu_up() might have raced with another
    1374             :          * caller. Nothing to do.
    1375             :          */
    1376             :         if (st->state >= target)
    1377             :                 goto out;
    1378             : 
    1379             :         if (st->state == CPUHP_OFFLINE) {
    1380             :                 /* Let it fail before we try to bring the cpu up */
    1381             :                 idle = idle_thread_get(cpu);
    1382             :                 if (IS_ERR(idle)) {
    1383             :                         ret = PTR_ERR(idle);
    1384             :                         goto out;
    1385             :                 }
    1386             :         }
    1387             : 
    1388             :         cpuhp_tasks_frozen = tasks_frozen;
    1389             : 
    1390             :         cpuhp_set_state(cpu, st, target);
    1391             :         /*
    1392             :          * If the current CPU state is in the range of the AP hotplug thread,
    1393             :          * then we need to kick the thread once more.
    1394             :          */
    1395             :         if (st->state > CPUHP_BRINGUP_CPU) {
    1396             :                 ret = cpuhp_kick_ap_work(cpu);
    1397             :                 /*
    1398             :                  * The AP side has done the error rollback already. Just
    1399             :                  * return the error code..
    1400             :                  */
    1401             :                 if (ret)
    1402             :                         goto out;
    1403             :         }
    1404             : 
    1405             :         /*
    1406             :          * Try to reach the target state. We max out on the BP at
    1407             :          * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
    1408             :          * responsible for bringing it up to the target state.
    1409             :          */
    1410             :         target = min((int)target, CPUHP_BRINGUP_CPU);
    1411             :         ret = cpuhp_up_callbacks(cpu, st, target);
    1412             : out:
    1413             :         cpus_write_unlock();
    1414             :         arch_smt_update();
    1415             :         cpu_up_down_serialize_trainwrecks(tasks_frozen);
    1416             :         return ret;
    1417             : }
    1418             : 
    1419             : static int cpu_up(unsigned int cpu, enum cpuhp_state target)
    1420             : {
    1421             :         int err = 0;
    1422             : 
    1423             :         if (!cpu_possible(cpu)) {
    1424             :                 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
    1425             :                        cpu);
    1426             : #if defined(CONFIG_IA64)
    1427             :                 pr_err("please check additional_cpus= boot parameter\n");
    1428             : #endif
    1429             :                 return -EINVAL;
    1430             :         }
    1431             : 
    1432             :         err = try_online_node(cpu_to_node(cpu));
    1433             :         if (err)
    1434             :                 return err;
    1435             : 
    1436             :         cpu_maps_update_begin();
    1437             : 
    1438             :         if (cpu_hotplug_disabled) {
    1439             :                 err = -EBUSY;
    1440             :                 goto out;
    1441             :         }
    1442             :         if (!cpu_smt_allowed(cpu)) {
    1443             :                 err = -EPERM;
    1444             :                 goto out;
    1445             :         }
    1446             : 
    1447             :         err = _cpu_up(cpu, 0, target);
    1448             : out:
    1449             :         cpu_maps_update_done();
    1450             :         return err;
    1451             : }
    1452             : 
    1453             : /**
    1454             :  * cpu_device_up - Bring up a cpu device
    1455             :  * @dev: Pointer to the cpu device to online
    1456             :  *
    1457             :  * This function is meant to be used by device core cpu subsystem only.
    1458             :  *
    1459             :  * Other subsystems should use add_cpu() instead.
    1460             :  *
    1461             :  * Return: %0 on success or a negative errno code
    1462             :  */
    1463             : int cpu_device_up(struct device *dev)
    1464             : {
    1465             :         return cpu_up(dev->id, CPUHP_ONLINE);
    1466             : }
    1467             : 
    1468             : int add_cpu(unsigned int cpu)
    1469             : {
    1470             :         int ret;
    1471             : 
    1472             :         lock_device_hotplug();
    1473             :         ret = device_online(get_cpu_device(cpu));
    1474             :         unlock_device_hotplug();
    1475             : 
    1476             :         return ret;
    1477             : }
    1478             : EXPORT_SYMBOL_GPL(add_cpu);
    1479             : 
    1480             : /**
    1481             :  * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
    1482             :  * @sleep_cpu: The cpu we hibernated on and should be brought up.
    1483             :  *
    1484             :  * On some architectures like arm64, we can hibernate on any CPU, but on
    1485             :  * wake up the CPU we hibernated on might be offline as a side effect of
    1486             :  * using maxcpus= for example.
    1487             :  *
    1488             :  * Return: %0 on success or a negative errno code
    1489             :  */
    1490             : int bringup_hibernate_cpu(unsigned int sleep_cpu)
    1491             : {
    1492             :         int ret;
    1493             : 
    1494             :         if (!cpu_online(sleep_cpu)) {
    1495             :                 pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
    1496             :                 ret = cpu_up(sleep_cpu, CPUHP_ONLINE);
    1497             :                 if (ret) {
    1498             :                         pr_err("Failed to bring hibernate-CPU up!\n");
    1499             :                         return ret;
    1500             :                 }
    1501             :         }
    1502             :         return 0;
    1503             : }
    1504             : 
    1505             : void bringup_nonboot_cpus(unsigned int setup_max_cpus)
    1506             : {
    1507             :         unsigned int cpu;
    1508             : 
    1509             :         for_each_present_cpu(cpu) {
    1510             :                 if (num_online_cpus() >= setup_max_cpus)
    1511             :                         break;
    1512             :                 if (!cpu_online(cpu))
    1513             :                         cpu_up(cpu, CPUHP_ONLINE);
    1514             :         }
    1515             : }
    1516             : 
    1517             : #ifdef CONFIG_PM_SLEEP_SMP
    1518             : static cpumask_var_t frozen_cpus;
    1519             : 
    1520             : int freeze_secondary_cpus(int primary)
    1521             : {
    1522             :         int cpu, error = 0;
    1523             : 
    1524             :         cpu_maps_update_begin();
    1525             :         if (primary == -1) {
    1526             :                 primary = cpumask_first(cpu_online_mask);
    1527             :                 if (!housekeeping_cpu(primary, HK_TYPE_TIMER))
    1528             :                         primary = housekeeping_any_cpu(HK_TYPE_TIMER);
    1529             :         } else {
    1530             :                 if (!cpu_online(primary))
    1531             :                         primary = cpumask_first(cpu_online_mask);
    1532             :         }
    1533             : 
    1534             :         /*
    1535             :          * We take down all of the non-boot CPUs in one shot to avoid races
    1536             :          * with the userspace trying to use the CPU hotplug at the same time
    1537             :          */
    1538             :         cpumask_clear(frozen_cpus);
    1539             : 
    1540             :         pr_info("Disabling non-boot CPUs ...\n");
    1541             :         for_each_online_cpu(cpu) {
    1542             :                 if (cpu == primary)
    1543             :                         continue;
    1544             : 
    1545             :                 if (pm_wakeup_pending()) {
    1546             :                         pr_info("Wakeup pending. Abort CPU freeze\n");
    1547             :                         error = -EBUSY;
    1548             :                         break;
    1549             :                 }
    1550             : 
    1551             :                 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
    1552             :                 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
    1553             :                 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
    1554             :                 if (!error)
    1555             :                         cpumask_set_cpu(cpu, frozen_cpus);
    1556             :                 else {
    1557             :                         pr_err("Error taking CPU%d down: %d\n", cpu, error);
    1558             :                         break;
    1559             :                 }
    1560             :         }
    1561             : 
    1562             :         if (!error)
    1563             :                 BUG_ON(num_online_cpus() > 1);
    1564             :         else
    1565             :                 pr_err("Non-boot CPUs are not disabled\n");
    1566             : 
    1567             :         /*
    1568             :          * Make sure the CPUs won't be enabled by someone else. We need to do
    1569             :          * this even in case of failure as all freeze_secondary_cpus() users are
    1570             :          * supposed to do thaw_secondary_cpus() on the failure path.
    1571             :          */
    1572             :         cpu_hotplug_disabled++;
    1573             : 
    1574             :         cpu_maps_update_done();
    1575             :         return error;
    1576             : }
    1577             : 
    1578             : void __weak arch_thaw_secondary_cpus_begin(void)
    1579             : {
    1580             : }
    1581             : 
    1582             : void __weak arch_thaw_secondary_cpus_end(void)
    1583             : {
    1584             : }
    1585             : 
    1586             : void thaw_secondary_cpus(void)
    1587             : {
    1588             :         int cpu, error;
    1589             : 
    1590             :         /* Allow everyone to use the CPU hotplug again */
    1591             :         cpu_maps_update_begin();
    1592             :         __cpu_hotplug_enable();
    1593             :         if (cpumask_empty(frozen_cpus))
    1594             :                 goto out;
    1595             : 
    1596             :         pr_info("Enabling non-boot CPUs ...\n");
    1597             : 
    1598             :         arch_thaw_secondary_cpus_begin();
    1599             : 
    1600             :         for_each_cpu(cpu, frozen_cpus) {
    1601             :                 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
    1602             :                 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
    1603             :                 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
    1604             :                 if (!error) {
    1605             :                         pr_info("CPU%d is up\n", cpu);
    1606             :                         continue;
    1607             :                 }
    1608             :                 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
    1609             :         }
    1610             : 
    1611             :         arch_thaw_secondary_cpus_end();
    1612             : 
    1613             :         cpumask_clear(frozen_cpus);
    1614             : out:
    1615             :         cpu_maps_update_done();
    1616             : }
    1617             : 
    1618             : static int __init alloc_frozen_cpus(void)
    1619             : {
    1620             :         if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
    1621             :                 return -ENOMEM;
    1622             :         return 0;
    1623             : }
    1624             : core_initcall(alloc_frozen_cpus);
    1625             : 
    1626             : /*
    1627             :  * When callbacks for CPU hotplug notifications are being executed, we must
    1628             :  * ensure that the state of the system with respect to the tasks being frozen
    1629             :  * or not, as reported by the notification, remains unchanged *throughout the
    1630             :  * duration* of the execution of the callbacks.
    1631             :  * Hence we need to prevent the freezer from racing with regular CPU hotplug.
    1632             :  *
    1633             :  * This synchronization is implemented by mutually excluding regular CPU
    1634             :  * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
    1635             :  * Hibernate notifications.
    1636             :  */
    1637             : static int
    1638             : cpu_hotplug_pm_callback(struct notifier_block *nb,
    1639             :                         unsigned long action, void *ptr)
    1640             : {
    1641             :         switch (action) {
    1642             : 
    1643             :         case PM_SUSPEND_PREPARE:
    1644             :         case PM_HIBERNATION_PREPARE:
    1645             :                 cpu_hotplug_disable();
    1646             :                 break;
    1647             : 
    1648             :         case PM_POST_SUSPEND:
    1649             :         case PM_POST_HIBERNATION:
    1650             :                 cpu_hotplug_enable();
    1651             :                 break;
    1652             : 
    1653             :         default:
    1654             :                 return NOTIFY_DONE;
    1655             :         }
    1656             : 
    1657             :         return NOTIFY_OK;
    1658             : }
    1659             : 
    1660             : 
    1661             : static int __init cpu_hotplug_pm_sync_init(void)
    1662             : {
    1663             :         /*
    1664             :          * cpu_hotplug_pm_callback has higher priority than x86
    1665             :          * bsp_pm_callback which depends on cpu_hotplug_pm_callback
    1666             :          * to disable cpu hotplug to avoid cpu hotplug race.
    1667             :          */
    1668             :         pm_notifier(cpu_hotplug_pm_callback, 0);
    1669             :         return 0;
    1670             : }
    1671             : core_initcall(cpu_hotplug_pm_sync_init);
    1672             : 
    1673             : #endif /* CONFIG_PM_SLEEP_SMP */
    1674             : 
    1675             : int __boot_cpu_id;
    1676             : 
    1677             : #endif /* CONFIG_SMP */
    1678             : 
    1679             : /* Boot processor state steps */
    1680             : static struct cpuhp_step cpuhp_hp_states[] = {
    1681             :         [CPUHP_OFFLINE] = {
    1682             :                 .name                   = "offline",
    1683             :                 .startup.single         = NULL,
    1684             :                 .teardown.single        = NULL,
    1685             :         },
    1686             : #ifdef CONFIG_SMP
    1687             :         [CPUHP_CREATE_THREADS]= {
    1688             :                 .name                   = "threads:prepare",
    1689             :                 .startup.single         = smpboot_create_threads,
    1690             :                 .teardown.single        = NULL,
    1691             :                 .cant_stop              = true,
    1692             :         },
    1693             :         [CPUHP_PERF_PREPARE] = {
    1694             :                 .name                   = "perf:prepare",
    1695             :                 .startup.single         = perf_event_init_cpu,
    1696             :                 .teardown.single        = perf_event_exit_cpu,
    1697             :         },
    1698             :         [CPUHP_RANDOM_PREPARE] = {
    1699             :                 .name                   = "random:prepare",
    1700             :                 .startup.single         = random_prepare_cpu,
    1701             :                 .teardown.single        = NULL,
    1702             :         },
    1703             :         [CPUHP_WORKQUEUE_PREP] = {
    1704             :                 .name                   = "workqueue:prepare",
    1705             :                 .startup.single         = workqueue_prepare_cpu,
    1706             :                 .teardown.single        = NULL,
    1707             :         },
    1708             :         [CPUHP_HRTIMERS_PREPARE] = {
    1709             :                 .name                   = "hrtimers:prepare",
    1710             :                 .startup.single         = hrtimers_prepare_cpu,
    1711             :                 .teardown.single        = hrtimers_dead_cpu,
    1712             :         },
    1713             :         [CPUHP_SMPCFD_PREPARE] = {
    1714             :                 .name                   = "smpcfd:prepare",
    1715             :                 .startup.single         = smpcfd_prepare_cpu,
    1716             :                 .teardown.single        = smpcfd_dead_cpu,
    1717             :         },
    1718             :         [CPUHP_RELAY_PREPARE] = {
    1719             :                 .name                   = "relay:prepare",
    1720             :                 .startup.single         = relay_prepare_cpu,
    1721             :                 .teardown.single        = NULL,
    1722             :         },
    1723             :         [CPUHP_SLAB_PREPARE] = {
    1724             :                 .name                   = "slab:prepare",
    1725             :                 .startup.single         = slab_prepare_cpu,
    1726             :                 .teardown.single        = slab_dead_cpu,
    1727             :         },
    1728             :         [CPUHP_RCUTREE_PREP] = {
    1729             :                 .name                   = "RCU/tree:prepare",
    1730             :                 .startup.single         = rcutree_prepare_cpu,
    1731             :                 .teardown.single        = rcutree_dead_cpu,
    1732             :         },
    1733             :         /*
    1734             :          * On the tear-down path, timers_dead_cpu() must be invoked
    1735             :          * before blk_mq_queue_reinit_notify() from notify_dead(),
    1736             :          * otherwise a RCU stall occurs.
    1737             :          */
    1738             :         [CPUHP_TIMERS_PREPARE] = {
    1739             :                 .name                   = "timers:prepare",
    1740             :                 .startup.single         = timers_prepare_cpu,
    1741             :                 .teardown.single        = timers_dead_cpu,
    1742             :         },
    1743             :         /* Kicks the plugged cpu into life */
    1744             :         [CPUHP_BRINGUP_CPU] = {
    1745             :                 .name                   = "cpu:bringup",
    1746             :                 .startup.single         = bringup_cpu,
    1747             :                 .teardown.single        = finish_cpu,
    1748             :                 .cant_stop              = true,
    1749             :         },
    1750             :         /* Final state before CPU kills itself */
    1751             :         [CPUHP_AP_IDLE_DEAD] = {
    1752             :                 .name                   = "idle:dead",
    1753             :         },
    1754             :         /*
    1755             :          * Last state before CPU enters the idle loop to die. Transient state
    1756             :          * for synchronization.
    1757             :          */
    1758             :         [CPUHP_AP_OFFLINE] = {
    1759             :                 .name                   = "ap:offline",
    1760             :                 .cant_stop              = true,
    1761             :         },
    1762             :         /* First state is scheduler control. Interrupts are disabled */
    1763             :         [CPUHP_AP_SCHED_STARTING] = {
    1764             :                 .name                   = "sched:starting",
    1765             :                 .startup.single         = sched_cpu_starting,
    1766             :                 .teardown.single        = sched_cpu_dying,
    1767             :         },
    1768             :         [CPUHP_AP_RCUTREE_DYING] = {
    1769             :                 .name                   = "RCU/tree:dying",
    1770             :                 .startup.single         = NULL,
    1771             :                 .teardown.single        = rcutree_dying_cpu,
    1772             :         },
    1773             :         [CPUHP_AP_SMPCFD_DYING] = {
    1774             :                 .name                   = "smpcfd:dying",
    1775             :                 .startup.single         = NULL,
    1776             :                 .teardown.single        = smpcfd_dying_cpu,
    1777             :         },
    1778             :         /* Entry state on starting. Interrupts enabled from here on. Transient
    1779             :          * state for synchronsization */
    1780             :         [CPUHP_AP_ONLINE] = {
    1781             :                 .name                   = "ap:online",
    1782             :         },
    1783             :         /*
    1784             :          * Handled on control processor until the plugged processor manages
    1785             :          * this itself.
    1786             :          */
    1787             :         [CPUHP_TEARDOWN_CPU] = {
    1788             :                 .name                   = "cpu:teardown",
    1789             :                 .startup.single         = NULL,
    1790             :                 .teardown.single        = takedown_cpu,
    1791             :                 .cant_stop              = true,
    1792             :         },
    1793             : 
    1794             :         [CPUHP_AP_SCHED_WAIT_EMPTY] = {
    1795             :                 .name                   = "sched:waitempty",
    1796             :                 .startup.single         = NULL,
    1797             :                 .teardown.single        = sched_cpu_wait_empty,
    1798             :         },
    1799             : 
    1800             :         /* Handle smpboot threads park/unpark */
    1801             :         [CPUHP_AP_SMPBOOT_THREADS] = {
    1802             :                 .name                   = "smpboot/threads:online",
    1803             :                 .startup.single         = smpboot_unpark_threads,
    1804             :                 .teardown.single        = smpboot_park_threads,
    1805             :         },
    1806             :         [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
    1807             :                 .name                   = "irq/affinity:online",
    1808             :                 .startup.single         = irq_affinity_online_cpu,
    1809             :                 .teardown.single        = NULL,
    1810             :         },
    1811             :         [CPUHP_AP_PERF_ONLINE] = {
    1812             :                 .name                   = "perf:online",
    1813             :                 .startup.single         = perf_event_init_cpu,
    1814             :                 .teardown.single        = perf_event_exit_cpu,
    1815             :         },
    1816             :         [CPUHP_AP_WATCHDOG_ONLINE] = {
    1817             :                 .name                   = "lockup_detector:online",
    1818             :                 .startup.single         = lockup_detector_online_cpu,
    1819             :                 .teardown.single        = lockup_detector_offline_cpu,
    1820             :         },
    1821             :         [CPUHP_AP_WORKQUEUE_ONLINE] = {
    1822             :                 .name                   = "workqueue:online",
    1823             :                 .startup.single         = workqueue_online_cpu,
    1824             :                 .teardown.single        = workqueue_offline_cpu,
    1825             :         },
    1826             :         [CPUHP_AP_RANDOM_ONLINE] = {
    1827             :                 .name                   = "random:online",
    1828             :                 .startup.single         = random_online_cpu,
    1829             :                 .teardown.single        = NULL,
    1830             :         },
    1831             :         [CPUHP_AP_RCUTREE_ONLINE] = {
    1832             :                 .name                   = "RCU/tree:online",
    1833             :                 .startup.single         = rcutree_online_cpu,
    1834             :                 .teardown.single        = rcutree_offline_cpu,
    1835             :         },
    1836             : #endif
    1837             :         /*
    1838             :          * The dynamically registered state space is here
    1839             :          */
    1840             : 
    1841             : #ifdef CONFIG_SMP
    1842             :         /* Last state is scheduler control setting the cpu active */
    1843             :         [CPUHP_AP_ACTIVE] = {
    1844             :                 .name                   = "sched:active",
    1845             :                 .startup.single         = sched_cpu_activate,
    1846             :                 .teardown.single        = sched_cpu_deactivate,
    1847             :         },
    1848             : #endif
    1849             : 
    1850             :         /* CPU is fully up and running. */
    1851             :         [CPUHP_ONLINE] = {
    1852             :                 .name                   = "online",
    1853             :                 .startup.single         = NULL,
    1854             :                 .teardown.single        = NULL,
    1855             :         },
    1856             : };
    1857             : 
    1858             : /* Sanity check for callbacks */
    1859             : static int cpuhp_cb_check(enum cpuhp_state state)
    1860             : {
    1861          18 :         if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
    1862             :                 return -EINVAL;
    1863             :         return 0;
    1864             : }
    1865             : 
    1866             : /*
    1867             :  * Returns a free for dynamic slot assignment of the Online state. The states
    1868             :  * are protected by the cpuhp_slot_states mutex and an empty slot is identified
    1869             :  * by having no name assigned.
    1870             :  */
    1871           5 : static int cpuhp_reserve_state(enum cpuhp_state state)
    1872             : {
    1873             :         enum cpuhp_state i, end;
    1874             :         struct cpuhp_step *step;
    1875             : 
    1876           5 :         switch (state) {
    1877             :         case CPUHP_AP_ONLINE_DYN:
    1878             :                 step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
    1879             :                 end = CPUHP_AP_ONLINE_DYN_END;
    1880             :                 break;
    1881             :         case CPUHP_BP_PREPARE_DYN:
    1882           1 :                 step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
    1883           1 :                 end = CPUHP_BP_PREPARE_DYN_END;
    1884           1 :                 break;
    1885             :         default:
    1886             :                 return -EINVAL;
    1887             :         }
    1888             : 
    1889          11 :         for (i = state; i <= end; i++, step++) {
    1890          11 :                 if (!step->name)
    1891             :                         return i;
    1892             :         }
    1893           0 :         WARN(1, "No more dynamic states available for CPU hotplug\n");
    1894           0 :         return -ENOSPC;
    1895             : }
    1896             : 
    1897          18 : static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
    1898             :                                  int (*startup)(unsigned int cpu),
    1899             :                                  int (*teardown)(unsigned int cpu),
    1900             :                                  bool multi_instance)
    1901             : {
    1902             :         /* (Un)Install the callbacks for further cpu hotplug operations */
    1903             :         struct cpuhp_step *sp;
    1904          19 :         int ret = 0;
    1905             : 
    1906             :         /*
    1907             :          * If name is NULL, then the state gets removed.
    1908             :          *
    1909             :          * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
    1910             :          * the first allocation from these dynamic ranges, so the removal
    1911             :          * would trigger a new allocation and clear the wrong (already
    1912             :          * empty) state, leaving the callbacks of the to be cleared state
    1913             :          * dangling, which causes wreckage on the next hotplug operation.
    1914             :          */
    1915          36 :         if (name && (state == CPUHP_AP_ONLINE_DYN ||
    1916          18 :                      state == CPUHP_BP_PREPARE_DYN)) {
    1917           5 :                 ret = cpuhp_reserve_state(state);
    1918           5 :                 if (ret < 0)
    1919             :                         return ret;
    1920             :                 state = ret;
    1921             :         }
    1922          19 :         sp = cpuhp_get_step(state);
    1923          18 :         if (name && sp->name)
    1924             :                 return -EBUSY;
    1925             : 
    1926          19 :         sp->startup.single = startup;
    1927          19 :         sp->teardown.single = teardown;
    1928          19 :         sp->name = name;
    1929          19 :         sp->multi_instance = multi_instance;
    1930          19 :         INIT_HLIST_HEAD(&sp->list);
    1931          18 :         return ret;
    1932             : }
    1933             : 
    1934             : static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
    1935             : {
    1936           0 :         return cpuhp_get_step(state)->teardown.single;
    1937             : }
    1938             : 
    1939             : /*
    1940             :  * Call the startup/teardown function for a step either on the AP or
    1941             :  * on the current CPU.
    1942             :  */
    1943           3 : static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
    1944             :                             struct hlist_node *node)
    1945             : {
    1946           3 :         struct cpuhp_step *sp = cpuhp_get_step(state);
    1947             :         int ret;
    1948             : 
    1949             :         /*
    1950             :          * If there's nothing to do, we done.
    1951             :          * Relies on the union for multi_instance.
    1952             :          */
    1953           6 :         if (cpuhp_step_empty(bringup, sp))
    1954             :                 return 0;
    1955             :         /*
    1956             :          * The non AP bound callbacks can fail on bringup. On teardown
    1957             :          * e.g. module removal we crash for now.
    1958             :          */
    1959             : #ifdef CONFIG_SMP
    1960             :         if (cpuhp_is_ap_state(state))
    1961             :                 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
    1962             :         else
    1963             :                 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
    1964             : #else
    1965           3 :         ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
    1966             : #endif
    1967           3 :         BUG_ON(ret && !bringup);
    1968             :         return ret;
    1969             : }
    1970             : 
    1971             : /*
    1972             :  * Called from __cpuhp_setup_state on a recoverable failure.
    1973             :  *
    1974             :  * Note: The teardown callbacks for rollback are not allowed to fail!
    1975             :  */
    1976           1 : static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
    1977             :                                    struct hlist_node *node)
    1978             : {
    1979             :         int cpu;
    1980             : 
    1981             :         /* Roll back the already executed steps on the other cpus */
    1982           1 :         for_each_present_cpu(cpu) {
    1983           1 :                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
    1984           1 :                 int cpustate = st->state;
    1985             : 
    1986           1 :                 if (cpu >= failedcpu)
    1987             :                         break;
    1988             : 
    1989             :                 /* Did we invoke the startup call on that cpu ? */
    1990           0 :                 if (cpustate >= state)
    1991           0 :                         cpuhp_issue_call(cpu, state, false, node);
    1992             :         }
    1993           1 : }
    1994             : 
    1995           2 : int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
    1996             :                                           struct hlist_node *node,
    1997             :                                           bool invoke)
    1998             : {
    1999             :         struct cpuhp_step *sp;
    2000             :         int cpu;
    2001             :         int ret;
    2002             : 
    2003           2 :         lockdep_assert_cpus_held();
    2004             : 
    2005           2 :         sp = cpuhp_get_step(state);
    2006           2 :         if (sp->multi_instance == false)
    2007             :                 return -EINVAL;
    2008             : 
    2009           2 :         mutex_lock(&cpuhp_state_mutex);
    2010             : 
    2011           2 :         if (!invoke || !sp->startup.multi)
    2012             :                 goto add_node;
    2013             : 
    2014             :         /*
    2015             :          * Try to call the startup callback for each present cpu
    2016             :          * depending on the hotplug state of the cpu.
    2017             :          */
    2018           0 :         for_each_present_cpu(cpu) {
    2019           0 :                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
    2020           0 :                 int cpustate = st->state;
    2021             : 
    2022           0 :                 if (cpustate < state)
    2023           0 :                         continue;
    2024             : 
    2025           0 :                 ret = cpuhp_issue_call(cpu, state, true, node);
    2026           0 :                 if (ret) {
    2027           0 :                         if (sp->teardown.multi)
    2028           0 :                                 cpuhp_rollback_install(cpu, state, node);
    2029             :                         goto unlock;
    2030             :                 }
    2031             :         }
    2032             : add_node:
    2033           2 :         ret = 0;
    2034           2 :         hlist_add_head(node, &sp->list);
    2035             : unlock:
    2036           2 :         mutex_unlock(&cpuhp_state_mutex);
    2037           2 :         return ret;
    2038             : }
    2039             : 
    2040           2 : int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
    2041             :                                bool invoke)
    2042             : {
    2043             :         int ret;
    2044             : 
    2045             :         cpus_read_lock();
    2046           2 :         ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
    2047             :         cpus_read_unlock();
    2048           2 :         return ret;
    2049             : }
    2050             : EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
    2051             : 
    2052             : /**
    2053             :  * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
    2054             :  * @state:              The state to setup
    2055             :  * @name:               Name of the step
    2056             :  * @invoke:             If true, the startup function is invoked for cpus where
    2057             :  *                      cpu state >= @state
    2058             :  * @startup:            startup callback function
    2059             :  * @teardown:           teardown callback function
    2060             :  * @multi_instance:     State is set up for multiple instances which get
    2061             :  *                      added afterwards.
    2062             :  *
    2063             :  * The caller needs to hold cpus read locked while calling this function.
    2064             :  * Return:
    2065             :  *   On success:
    2066             :  *      Positive state number if @state is CPUHP_AP_ONLINE_DYN;
    2067             :  *      0 for all other states
    2068             :  *   On failure: proper (negative) error code
    2069             :  */
    2070          18 : int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
    2071             :                                    const char *name, bool invoke,
    2072             :                                    int (*startup)(unsigned int cpu),
    2073             :                                    int (*teardown)(unsigned int cpu),
    2074             :                                    bool multi_instance)
    2075             : {
    2076          18 :         int cpu, ret = 0;
    2077             :         bool dynstate;
    2078             : 
    2079          18 :         lockdep_assert_cpus_held();
    2080             : 
    2081          18 :         if (cpuhp_cb_check(state) || !name)
    2082             :                 return -EINVAL;
    2083             : 
    2084          18 :         mutex_lock(&cpuhp_state_mutex);
    2085             : 
    2086          18 :         ret = cpuhp_store_callbacks(state, name, startup, teardown,
    2087             :                                     multi_instance);
    2088             : 
    2089          18 :         dynstate = state == CPUHP_AP_ONLINE_DYN;
    2090          18 :         if (ret > 0 && dynstate) {
    2091           4 :                 state = ret;
    2092           4 :                 ret = 0;
    2093             :         }
    2094             : 
    2095          18 :         if (ret || !invoke || !startup)
    2096             :                 goto out;
    2097             : 
    2098             :         /*
    2099             :          * Try to call the startup callback for each present cpu
    2100             :          * depending on the hotplug state of the cpu.
    2101             :          */
    2102           2 :         for_each_present_cpu(cpu) {
    2103           3 :                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
    2104           3 :                 int cpustate = st->state;
    2105             : 
    2106           3 :                 if (cpustate < state)
    2107           0 :                         continue;
    2108             : 
    2109           3 :                 ret = cpuhp_issue_call(cpu, state, true, NULL);
    2110           3 :                 if (ret) {
    2111           1 :                         if (teardown)
    2112           1 :                                 cpuhp_rollback_install(cpu, state, NULL);
    2113             :                         cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
    2114             :                         goto out;
    2115             :                 }
    2116             :         }
    2117             : out:
    2118          18 :         mutex_unlock(&cpuhp_state_mutex);
    2119             :         /*
    2120             :          * If the requested state is CPUHP_AP_ONLINE_DYN, return the
    2121             :          * dynamically allocated state in case of success.
    2122             :          */
    2123          18 :         if (!ret && dynstate)
    2124             :                 return state;
    2125          14 :         return ret;
    2126             : }
    2127             : EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
    2128             : 
    2129          18 : int __cpuhp_setup_state(enum cpuhp_state state,
    2130             :                         const char *name, bool invoke,
    2131             :                         int (*startup)(unsigned int cpu),
    2132             :                         int (*teardown)(unsigned int cpu),
    2133             :                         bool multi_instance)
    2134             : {
    2135             :         int ret;
    2136             : 
    2137             :         cpus_read_lock();
    2138          18 :         ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
    2139             :                                              teardown, multi_instance);
    2140             :         cpus_read_unlock();
    2141          18 :         return ret;
    2142             : }
    2143             : EXPORT_SYMBOL(__cpuhp_setup_state);
    2144             : 
    2145           0 : int __cpuhp_state_remove_instance(enum cpuhp_state state,
    2146             :                                   struct hlist_node *node, bool invoke)
    2147             : {
    2148           0 :         struct cpuhp_step *sp = cpuhp_get_step(state);
    2149             :         int cpu;
    2150             : 
    2151           0 :         BUG_ON(cpuhp_cb_check(state));
    2152             : 
    2153           0 :         if (!sp->multi_instance)
    2154             :                 return -EINVAL;
    2155             : 
    2156             :         cpus_read_lock();
    2157           0 :         mutex_lock(&cpuhp_state_mutex);
    2158             : 
    2159           0 :         if (!invoke || !cpuhp_get_teardown_cb(state))
    2160             :                 goto remove;
    2161             :         /*
    2162             :          * Call the teardown callback for each present cpu depending
    2163             :          * on the hotplug state of the cpu. This function is not
    2164             :          * allowed to fail currently!
    2165             :          */
    2166           0 :         for_each_present_cpu(cpu) {
    2167           0 :                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
    2168           0 :                 int cpustate = st->state;
    2169             : 
    2170           0 :                 if (cpustate >= state)
    2171           0 :                         cpuhp_issue_call(cpu, state, false, node);
    2172             :         }
    2173             : 
    2174             : remove:
    2175           0 :         hlist_del(node);
    2176           0 :         mutex_unlock(&cpuhp_state_mutex);
    2177             :         cpus_read_unlock();
    2178             : 
    2179           0 :         return 0;
    2180             : }
    2181             : EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
    2182             : 
    2183             : /**
    2184             :  * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
    2185             :  * @state:      The state to remove
    2186             :  * @invoke:     If true, the teardown function is invoked for cpus where
    2187             :  *              cpu state >= @state
    2188             :  *
    2189             :  * The caller needs to hold cpus read locked while calling this function.
    2190             :  * The teardown callback is currently not allowed to fail. Think
    2191             :  * about module removal!
    2192             :  */
    2193           0 : void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
    2194             : {
    2195           0 :         struct cpuhp_step *sp = cpuhp_get_step(state);
    2196             :         int cpu;
    2197             : 
    2198           0 :         BUG_ON(cpuhp_cb_check(state));
    2199             : 
    2200             :         lockdep_assert_cpus_held();
    2201             : 
    2202           0 :         mutex_lock(&cpuhp_state_mutex);
    2203           0 :         if (sp->multi_instance) {
    2204           0 :                 WARN(!hlist_empty(&sp->list),
    2205             :                      "Error: Removing state %d which has instances left.\n",
    2206             :                      state);
    2207             :                 goto remove;
    2208             :         }
    2209             : 
    2210           0 :         if (!invoke || !cpuhp_get_teardown_cb(state))
    2211             :                 goto remove;
    2212             : 
    2213             :         /*
    2214             :          * Call the teardown callback for each present cpu depending
    2215             :          * on the hotplug state of the cpu. This function is not
    2216             :          * allowed to fail currently!
    2217             :          */
    2218           0 :         for_each_present_cpu(cpu) {
    2219           0 :                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
    2220           0 :                 int cpustate = st->state;
    2221             : 
    2222           0 :                 if (cpustate >= state)
    2223           0 :                         cpuhp_issue_call(cpu, state, false, NULL);
    2224             :         }
    2225             : remove:
    2226           0 :         cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
    2227           0 :         mutex_unlock(&cpuhp_state_mutex);
    2228           0 : }
    2229             : EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
    2230             : 
    2231           0 : void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
    2232             : {
    2233             :         cpus_read_lock();
    2234           0 :         __cpuhp_remove_state_cpuslocked(state, invoke);
    2235             :         cpus_read_unlock();
    2236           0 : }
    2237             : EXPORT_SYMBOL(__cpuhp_remove_state);
    2238             : 
    2239             : #ifdef CONFIG_HOTPLUG_SMT
    2240             : static void cpuhp_offline_cpu_device(unsigned int cpu)
    2241             : {
    2242             :         struct device *dev = get_cpu_device(cpu);
    2243             : 
    2244             :         dev->offline = true;
    2245             :         /* Tell user space about the state change */
    2246             :         kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
    2247             : }
    2248             : 
    2249             : static void cpuhp_online_cpu_device(unsigned int cpu)
    2250             : {
    2251             :         struct device *dev = get_cpu_device(cpu);
    2252             : 
    2253             :         dev->offline = false;
    2254             :         /* Tell user space about the state change */
    2255             :         kobject_uevent(&dev->kobj, KOBJ_ONLINE);
    2256             : }
    2257             : 
    2258             : int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
    2259             : {
    2260             :         int cpu, ret = 0;
    2261             : 
    2262             :         cpu_maps_update_begin();
    2263             :         for_each_online_cpu(cpu) {
    2264             :                 if (topology_is_primary_thread(cpu))
    2265             :                         continue;
    2266             :                 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
    2267             :                 if (ret)
    2268             :                         break;
    2269             :                 /*
    2270             :                  * As this needs to hold the cpu maps lock it's impossible
    2271             :                  * to call device_offline() because that ends up calling
    2272             :                  * cpu_down() which takes cpu maps lock. cpu maps lock
    2273             :                  * needs to be held as this might race against in kernel
    2274             :                  * abusers of the hotplug machinery (thermal management).
    2275             :                  *
    2276             :                  * So nothing would update device:offline state. That would
    2277             :                  * leave the sysfs entry stale and prevent onlining after
    2278             :                  * smt control has been changed to 'off' again. This is
    2279             :                  * called under the sysfs hotplug lock, so it is properly
    2280             :                  * serialized against the regular offline usage.
    2281             :                  */
    2282             :                 cpuhp_offline_cpu_device(cpu);
    2283             :         }
    2284             :         if (!ret)
    2285             :                 cpu_smt_control = ctrlval;
    2286             :         cpu_maps_update_done();
    2287             :         return ret;
    2288             : }
    2289             : 
    2290             : int cpuhp_smt_enable(void)
    2291             : {
    2292             :         int cpu, ret = 0;
    2293             : 
    2294             :         cpu_maps_update_begin();
    2295             :         cpu_smt_control = CPU_SMT_ENABLED;
    2296             :         for_each_present_cpu(cpu) {
    2297             :                 /* Skip online CPUs and CPUs on offline nodes */
    2298             :                 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
    2299             :                         continue;
    2300             :                 ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
    2301             :                 if (ret)
    2302             :                         break;
    2303             :                 /* See comment in cpuhp_smt_disable() */
    2304             :                 cpuhp_online_cpu_device(cpu);
    2305             :         }
    2306             :         cpu_maps_update_done();
    2307             :         return ret;
    2308             : }
    2309             : #endif
    2310             : 
    2311             : #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
    2312             : static ssize_t state_show(struct device *dev,
    2313             :                           struct device_attribute *attr, char *buf)
    2314             : {
    2315             :         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
    2316             : 
    2317             :         return sprintf(buf, "%d\n", st->state);
    2318             : }
    2319             : static DEVICE_ATTR_RO(state);
    2320             : 
    2321             : static ssize_t target_store(struct device *dev, struct device_attribute *attr,
    2322             :                             const char *buf, size_t count)
    2323             : {
    2324             :         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
    2325             :         struct cpuhp_step *sp;
    2326             :         int target, ret;
    2327             : 
    2328             :         ret = kstrtoint(buf, 10, &target);
    2329             :         if (ret)
    2330             :                 return ret;
    2331             : 
    2332             : #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
    2333             :         if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
    2334             :                 return -EINVAL;
    2335             : #else
    2336             :         if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
    2337             :                 return -EINVAL;
    2338             : #endif
    2339             : 
    2340             :         ret = lock_device_hotplug_sysfs();
    2341             :         if (ret)
    2342             :                 return ret;
    2343             : 
    2344             :         mutex_lock(&cpuhp_state_mutex);
    2345             :         sp = cpuhp_get_step(target);
    2346             :         ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
    2347             :         mutex_unlock(&cpuhp_state_mutex);
    2348             :         if (ret)
    2349             :                 goto out;
    2350             : 
    2351             :         if (st->state < target)
    2352             :                 ret = cpu_up(dev->id, target);
    2353             :         else if (st->state > target)
    2354             :                 ret = cpu_down(dev->id, target);
    2355             :         else if (WARN_ON(st->target != target))
    2356             :                 st->target = target;
    2357             : out:
    2358             :         unlock_device_hotplug();
    2359             :         return ret ? ret : count;
    2360             : }
    2361             : 
    2362             : static ssize_t target_show(struct device *dev,
    2363             :                            struct device_attribute *attr, char *buf)
    2364             : {
    2365             :         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
    2366             : 
    2367             :         return sprintf(buf, "%d\n", st->target);
    2368             : }
    2369             : static DEVICE_ATTR_RW(target);
    2370             : 
    2371             : static ssize_t fail_store(struct device *dev, struct device_attribute *attr,
    2372             :                           const char *buf, size_t count)
    2373             : {
    2374             :         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
    2375             :         struct cpuhp_step *sp;
    2376             :         int fail, ret;
    2377             : 
    2378             :         ret = kstrtoint(buf, 10, &fail);
    2379             :         if (ret)
    2380             :                 return ret;
    2381             : 
    2382             :         if (fail == CPUHP_INVALID) {
    2383             :                 st->fail = fail;
    2384             :                 return count;
    2385             :         }
    2386             : 
    2387             :         if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
    2388             :                 return -EINVAL;
    2389             : 
    2390             :         /*
    2391             :          * Cannot fail STARTING/DYING callbacks.
    2392             :          */
    2393             :         if (cpuhp_is_atomic_state(fail))
    2394             :                 return -EINVAL;
    2395             : 
    2396             :         /*
    2397             :          * DEAD callbacks cannot fail...
    2398             :          * ... neither can CPUHP_BRINGUP_CPU during hotunplug. The latter
    2399             :          * triggering STARTING callbacks, a failure in this state would
    2400             :          * hinder rollback.
    2401             :          */
    2402             :         if (fail <= CPUHP_BRINGUP_CPU && st->state > CPUHP_BRINGUP_CPU)
    2403             :                 return -EINVAL;
    2404             : 
    2405             :         /*
    2406             :          * Cannot fail anything that doesn't have callbacks.
    2407             :          */
    2408             :         mutex_lock(&cpuhp_state_mutex);
    2409             :         sp = cpuhp_get_step(fail);
    2410             :         if (!sp->startup.single && !sp->teardown.single)
    2411             :                 ret = -EINVAL;
    2412             :         mutex_unlock(&cpuhp_state_mutex);
    2413             :         if (ret)
    2414             :                 return ret;
    2415             : 
    2416             :         st->fail = fail;
    2417             : 
    2418             :         return count;
    2419             : }
    2420             : 
    2421             : static ssize_t fail_show(struct device *dev,
    2422             :                          struct device_attribute *attr, char *buf)
    2423             : {
    2424             :         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
    2425             : 
    2426             :         return sprintf(buf, "%d\n", st->fail);
    2427             : }
    2428             : 
    2429             : static DEVICE_ATTR_RW(fail);
    2430             : 
    2431             : static struct attribute *cpuhp_cpu_attrs[] = {
    2432             :         &dev_attr_state.attr,
    2433             :         &dev_attr_target.attr,
    2434             :         &dev_attr_fail.attr,
    2435             :         NULL
    2436             : };
    2437             : 
    2438             : static const struct attribute_group cpuhp_cpu_attr_group = {
    2439             :         .attrs = cpuhp_cpu_attrs,
    2440             :         .name = "hotplug",
    2441             :         NULL
    2442             : };
    2443             : 
    2444             : static ssize_t states_show(struct device *dev,
    2445             :                                  struct device_attribute *attr, char *buf)
    2446             : {
    2447             :         ssize_t cur, res = 0;
    2448             :         int i;
    2449             : 
    2450             :         mutex_lock(&cpuhp_state_mutex);
    2451             :         for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
    2452             :                 struct cpuhp_step *sp = cpuhp_get_step(i);
    2453             : 
    2454             :                 if (sp->name) {
    2455             :                         cur = sprintf(buf, "%3d: %s\n", i, sp->name);
    2456             :                         buf += cur;
    2457             :                         res += cur;
    2458             :                 }
    2459             :         }
    2460             :         mutex_unlock(&cpuhp_state_mutex);
    2461             :         return res;
    2462             : }
    2463             : static DEVICE_ATTR_RO(states);
    2464             : 
    2465             : static struct attribute *cpuhp_cpu_root_attrs[] = {
    2466             :         &dev_attr_states.attr,
    2467             :         NULL
    2468             : };
    2469             : 
    2470             : static const struct attribute_group cpuhp_cpu_root_attr_group = {
    2471             :         .attrs = cpuhp_cpu_root_attrs,
    2472             :         .name = "hotplug",
    2473             :         NULL
    2474             : };
    2475             : 
    2476             : #ifdef CONFIG_HOTPLUG_SMT
    2477             : 
    2478             : static ssize_t
    2479             : __store_smt_control(struct device *dev, struct device_attribute *attr,
    2480             :                     const char *buf, size_t count)
    2481             : {
    2482             :         int ctrlval, ret;
    2483             : 
    2484             :         if (sysfs_streq(buf, "on"))
    2485             :                 ctrlval = CPU_SMT_ENABLED;
    2486             :         else if (sysfs_streq(buf, "off"))
    2487             :                 ctrlval = CPU_SMT_DISABLED;
    2488             :         else if (sysfs_streq(buf, "forceoff"))
    2489             :                 ctrlval = CPU_SMT_FORCE_DISABLED;
    2490             :         else
    2491             :                 return -EINVAL;
    2492             : 
    2493             :         if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
    2494             :                 return -EPERM;
    2495             : 
    2496             :         if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
    2497             :                 return -ENODEV;
    2498             : 
    2499             :         ret = lock_device_hotplug_sysfs();
    2500             :         if (ret)
    2501             :                 return ret;
    2502             : 
    2503             :         if (ctrlval != cpu_smt_control) {
    2504             :                 switch (ctrlval) {
    2505             :                 case CPU_SMT_ENABLED:
    2506             :                         ret = cpuhp_smt_enable();
    2507             :                         break;
    2508             :                 case CPU_SMT_DISABLED:
    2509             :                 case CPU_SMT_FORCE_DISABLED:
    2510             :                         ret = cpuhp_smt_disable(ctrlval);
    2511             :                         break;
    2512             :                 }
    2513             :         }
    2514             : 
    2515             :         unlock_device_hotplug();
    2516             :         return ret ? ret : count;
    2517             : }
    2518             : 
    2519             : #else /* !CONFIG_HOTPLUG_SMT */
    2520             : static ssize_t
    2521             : __store_smt_control(struct device *dev, struct device_attribute *attr,
    2522             :                     const char *buf, size_t count)
    2523             : {
    2524             :         return -ENODEV;
    2525             : }
    2526             : #endif /* CONFIG_HOTPLUG_SMT */
    2527             : 
    2528             : static const char *smt_states[] = {
    2529             :         [CPU_SMT_ENABLED]               = "on",
    2530             :         [CPU_SMT_DISABLED]              = "off",
    2531             :         [CPU_SMT_FORCE_DISABLED]        = "forceoff",
    2532             :         [CPU_SMT_NOT_SUPPORTED]         = "notsupported",
    2533             :         [CPU_SMT_NOT_IMPLEMENTED]       = "notimplemented",
    2534             : };
    2535             : 
    2536             : static ssize_t control_show(struct device *dev,
    2537             :                             struct device_attribute *attr, char *buf)
    2538             : {
    2539             :         const char *state = smt_states[cpu_smt_control];
    2540             : 
    2541             :         return snprintf(buf, PAGE_SIZE - 2, "%s\n", state);
    2542             : }
    2543             : 
    2544             : static ssize_t control_store(struct device *dev, struct device_attribute *attr,
    2545             :                              const char *buf, size_t count)
    2546             : {
    2547             :         return __store_smt_control(dev, attr, buf, count);
    2548             : }
    2549             : static DEVICE_ATTR_RW(control);
    2550             : 
    2551             : static ssize_t active_show(struct device *dev,
    2552             :                            struct device_attribute *attr, char *buf)
    2553             : {
    2554             :         return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active());
    2555             : }
    2556             : static DEVICE_ATTR_RO(active);
    2557             : 
    2558             : static struct attribute *cpuhp_smt_attrs[] = {
    2559             :         &dev_attr_control.attr,
    2560             :         &dev_attr_active.attr,
    2561             :         NULL
    2562             : };
    2563             : 
    2564             : static const struct attribute_group cpuhp_smt_attr_group = {
    2565             :         .attrs = cpuhp_smt_attrs,
    2566             :         .name = "smt",
    2567             :         NULL
    2568             : };
    2569             : 
    2570             : static int __init cpu_smt_sysfs_init(void)
    2571             : {
    2572             :         struct device *dev_root;
    2573             :         int ret = -ENODEV;
    2574             : 
    2575             :         dev_root = bus_get_dev_root(&cpu_subsys);
    2576             :         if (dev_root) {
    2577             :                 ret = sysfs_create_group(&dev_root->kobj, &cpuhp_smt_attr_group);
    2578             :                 put_device(dev_root);
    2579             :         }
    2580             :         return ret;
    2581             : }
    2582             : 
    2583             : static int __init cpuhp_sysfs_init(void)
    2584             : {
    2585             :         struct device *dev_root;
    2586             :         int cpu, ret;
    2587             : 
    2588             :         ret = cpu_smt_sysfs_init();
    2589             :         if (ret)
    2590             :                 return ret;
    2591             : 
    2592             :         dev_root = bus_get_dev_root(&cpu_subsys);
    2593             :         if (dev_root) {
    2594             :                 ret = sysfs_create_group(&dev_root->kobj, &cpuhp_cpu_root_attr_group);
    2595             :                 put_device(dev_root);
    2596             :                 if (ret)
    2597             :                         return ret;
    2598             :         }
    2599             : 
    2600             :         for_each_possible_cpu(cpu) {
    2601             :                 struct device *dev = get_cpu_device(cpu);
    2602             : 
    2603             :                 if (!dev)
    2604             :                         continue;
    2605             :                 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
    2606             :                 if (ret)
    2607             :                         return ret;
    2608             :         }
    2609             :         return 0;
    2610             : }
    2611             : device_initcall(cpuhp_sysfs_init);
    2612             : #endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
    2613             : 
    2614             : /*
    2615             :  * cpu_bit_bitmap[] is a special, "compressed" data structure that
    2616             :  * represents all NR_CPUS bits binary values of 1<<nr.
    2617             :  *
    2618             :  * It is used by cpumask_of() to get a constant address to a CPU
    2619             :  * mask value that has a single bit set only.
    2620             :  */
    2621             : 
    2622             : /* cpu_bit_bitmap[0] is empty - so we can back into it */
    2623             : #define MASK_DECLARE_1(x)       [x+1][0] = (1UL << (x))
    2624             : #define MASK_DECLARE_2(x)       MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
    2625             : #define MASK_DECLARE_4(x)       MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
    2626             : #define MASK_DECLARE_8(x)       MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
    2627             : 
    2628             : const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
    2629             : 
    2630             :         MASK_DECLARE_8(0),      MASK_DECLARE_8(8),
    2631             :         MASK_DECLARE_8(16),     MASK_DECLARE_8(24),
    2632             : #if BITS_PER_LONG > 32
    2633             :         MASK_DECLARE_8(32),     MASK_DECLARE_8(40),
    2634             :         MASK_DECLARE_8(48),     MASK_DECLARE_8(56),
    2635             : #endif
    2636             : };
    2637             : EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
    2638             : 
    2639             : const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
    2640             : EXPORT_SYMBOL(cpu_all_bits);
    2641             : 
    2642             : #ifdef CONFIG_INIT_ALL_POSSIBLE
    2643             : struct cpumask __cpu_possible_mask __read_mostly
    2644             :         = {CPU_BITS_ALL};
    2645             : #else
    2646             : struct cpumask __cpu_possible_mask __read_mostly;
    2647             : #endif
    2648             : EXPORT_SYMBOL(__cpu_possible_mask);
    2649             : 
    2650             : struct cpumask __cpu_online_mask __read_mostly;
    2651             : EXPORT_SYMBOL(__cpu_online_mask);
    2652             : 
    2653             : struct cpumask __cpu_present_mask __read_mostly;
    2654             : EXPORT_SYMBOL(__cpu_present_mask);
    2655             : 
    2656             : struct cpumask __cpu_active_mask __read_mostly;
    2657             : EXPORT_SYMBOL(__cpu_active_mask);
    2658             : 
    2659             : struct cpumask __cpu_dying_mask __read_mostly;
    2660             : EXPORT_SYMBOL(__cpu_dying_mask);
    2661             : 
    2662             : atomic_t __num_online_cpus __read_mostly;
    2663             : EXPORT_SYMBOL(__num_online_cpus);
    2664             : 
    2665           0 : void init_cpu_present(const struct cpumask *src)
    2666             : {
    2667           0 :         cpumask_copy(&__cpu_present_mask, src);
    2668           0 : }
    2669             : 
    2670           0 : void init_cpu_possible(const struct cpumask *src)
    2671             : {
    2672           0 :         cpumask_copy(&__cpu_possible_mask, src);
    2673           0 : }
    2674             : 
    2675           0 : void init_cpu_online(const struct cpumask *src)
    2676             : {
    2677           0 :         cpumask_copy(&__cpu_online_mask, src);
    2678           0 : }
    2679             : 
    2680           0 : void set_cpu_online(unsigned int cpu, bool online)
    2681             : {
    2682             :         /*
    2683             :          * atomic_inc/dec() is required to handle the horrid abuse of this
    2684             :          * function by the reboot and kexec code which invoke it from
    2685             :          * IPI/NMI broadcasts when shutting down CPUs. Invocation from
    2686             :          * regular CPU hotplug is properly serialized.
    2687             :          *
    2688             :          * Note, that the fact that __num_online_cpus is of type atomic_t
    2689             :          * does not protect readers which are not serialized against
    2690             :          * concurrent hotplug operations.
    2691             :          */
    2692           0 :         if (online) {
    2693           2 :                 if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
    2694             :                         atomic_inc(&__num_online_cpus);
    2695             :         } else {
    2696           0 :                 if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
    2697             :                         atomic_dec(&__num_online_cpus);
    2698             :         }
    2699           0 : }
    2700             : 
    2701             : /*
    2702             :  * Activate the first processor.
    2703             :  */
    2704           1 : void __init boot_cpu_init(void)
    2705             : {
    2706           1 :         int cpu = smp_processor_id();
    2707             : 
    2708             :         /* Mark the boot cpu "present", "online" etc for SMP and UP case */
    2709           2 :         set_cpu_online(cpu, true);
    2710           2 :         set_cpu_active(cpu, true);
    2711           2 :         set_cpu_present(cpu, true);
    2712           2 :         set_cpu_possible(cpu, true);
    2713             : 
    2714             : #ifdef CONFIG_SMP
    2715             :         __boot_cpu_id = cpu;
    2716             : #endif
    2717           1 : }
    2718             : 
    2719             : /*
    2720             :  * Must be called _AFTER_ setting up the per_cpu areas
    2721             :  */
    2722           1 : void __init boot_cpu_hotplug_init(void)
    2723             : {
    2724             : #ifdef CONFIG_SMP
    2725             :         cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
    2726             : #endif
    2727           3 :         this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
    2728           3 :         this_cpu_write(cpuhp_state.target, CPUHP_ONLINE);
    2729           1 : }
    2730             : 
    2731             : /*
    2732             :  * These are used for a global "mitigations=" cmdline option for toggling
    2733             :  * optional CPU mitigations.
    2734             :  */
    2735             : enum cpu_mitigations {
    2736             :         CPU_MITIGATIONS_OFF,
    2737             :         CPU_MITIGATIONS_AUTO,
    2738             :         CPU_MITIGATIONS_AUTO_NOSMT,
    2739             : };
    2740             : 
    2741             : static enum cpu_mitigations cpu_mitigations __ro_after_init =
    2742             :         CPU_MITIGATIONS_AUTO;
    2743             : 
    2744           0 : static int __init mitigations_parse_cmdline(char *arg)
    2745             : {
    2746           0 :         if (!strcmp(arg, "off"))
    2747           0 :                 cpu_mitigations = CPU_MITIGATIONS_OFF;
    2748           0 :         else if (!strcmp(arg, "auto"))
    2749           0 :                 cpu_mitigations = CPU_MITIGATIONS_AUTO;
    2750           0 :         else if (!strcmp(arg, "auto,nosmt"))
    2751           0 :                 cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
    2752             :         else
    2753           0 :                 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
    2754             :                         arg);
    2755             : 
    2756           0 :         return 0;
    2757             : }
    2758             : early_param("mitigations", mitigations_parse_cmdline);
    2759             : 
    2760             : /* mitigations=off */
    2761           0 : bool cpu_mitigations_off(void)
    2762             : {
    2763           0 :         return cpu_mitigations == CPU_MITIGATIONS_OFF;
    2764             : }
    2765             : EXPORT_SYMBOL_GPL(cpu_mitigations_off);
    2766             : 
    2767             : /* mitigations=auto,nosmt */
    2768           0 : bool cpu_mitigations_auto_nosmt(void)
    2769             : {
    2770           0 :         return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
    2771             : }
    2772             : EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);

Generated by: LCOV version 1.14