LCOV - code coverage report
Current view: top level - drivers/base/power - main.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 44 777 5.7 %
Date: 2023-04-06 08:38:28 Functions: 5 59 8.5 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * drivers/base/power/main.c - Where the driver meets power management.
       4             :  *
       5             :  * Copyright (c) 2003 Patrick Mochel
       6             :  * Copyright (c) 2003 Open Source Development Lab
       7             :  *
       8             :  * The driver model core calls device_pm_add() when a device is registered.
       9             :  * This will initialize the embedded device_pm_info object in the device
      10             :  * and add it to the list of power-controlled devices. sysfs entries for
      11             :  * controlling device power management will also be added.
      12             :  *
      13             :  * A separate list is used for keeping track of power info, because the power
      14             :  * domain dependencies may differ from the ancestral dependencies that the
      15             :  * subsystem list maintains.
      16             :  */
      17             : 
      18             : #define pr_fmt(fmt) "PM: " fmt
      19             : #define dev_fmt pr_fmt
      20             : 
      21             : #include <linux/device.h>
      22             : #include <linux/export.h>
      23             : #include <linux/mutex.h>
      24             : #include <linux/pm.h>
      25             : #include <linux/pm_runtime.h>
      26             : #include <linux/pm-trace.h>
      27             : #include <linux/pm_wakeirq.h>
      28             : #include <linux/interrupt.h>
      29             : #include <linux/sched.h>
      30             : #include <linux/sched/debug.h>
      31             : #include <linux/async.h>
      32             : #include <linux/suspend.h>
      33             : #include <trace/events/power.h>
      34             : #include <linux/cpufreq.h>
      35             : #include <linux/devfreq.h>
      36             : #include <linux/timer.h>
      37             : 
      38             : #include "../base.h"
      39             : #include "power.h"
      40             : 
      41             : typedef int (*pm_callback_t)(struct device *);
      42             : 
      43             : #define list_for_each_entry_rcu_locked(pos, head, member) \
      44             :         list_for_each_entry_rcu(pos, head, member, \
      45             :                         device_links_read_lock_held())
      46             : 
      47             : /*
      48             :  * The entries in the dpm_list list are in a depth first order, simply
      49             :  * because children are guaranteed to be discovered after parents, and
      50             :  * are inserted at the back of the list on discovery.
      51             :  *
      52             :  * Since device_pm_add() may be called with a device lock held,
      53             :  * we must never try to acquire a device lock while holding
      54             :  * dpm_list_mutex.
      55             :  */
      56             : 
      57             : LIST_HEAD(dpm_list);
      58             : static LIST_HEAD(dpm_prepared_list);
      59             : static LIST_HEAD(dpm_suspended_list);
      60             : static LIST_HEAD(dpm_late_early_list);
      61             : static LIST_HEAD(dpm_noirq_list);
      62             : 
      63             : struct suspend_stats suspend_stats;
      64             : static DEFINE_MUTEX(dpm_list_mtx);
      65             : static pm_message_t pm_transition;
      66             : 
      67             : static int async_error;
      68             : 
      69           0 : static const char *pm_verb(int event)
      70             : {
      71           0 :         switch (event) {
      72             :         case PM_EVENT_SUSPEND:
      73             :                 return "suspend";
      74             :         case PM_EVENT_RESUME:
      75           0 :                 return "resume";
      76             :         case PM_EVENT_FREEZE:
      77           0 :                 return "freeze";
      78             :         case PM_EVENT_QUIESCE:
      79           0 :                 return "quiesce";
      80             :         case PM_EVENT_HIBERNATE:
      81           0 :                 return "hibernate";
      82             :         case PM_EVENT_THAW:
      83           0 :                 return "thaw";
      84             :         case PM_EVENT_RESTORE:
      85           0 :                 return "restore";
      86             :         case PM_EVENT_RECOVER:
      87           0 :                 return "recover";
      88             :         default:
      89           0 :                 return "(unknown PM event)";
      90             :         }
      91             : }
      92             : 
      93             : /**
      94             :  * device_pm_sleep_init - Initialize system suspend-related device fields.
      95             :  * @dev: Device object being initialized.
      96             :  */
      97         570 : void device_pm_sleep_init(struct device *dev)
      98             : {
      99         570 :         dev->power.is_prepared = false;
     100         570 :         dev->power.is_suspended = false;
     101         570 :         dev->power.is_noirq_suspended = false;
     102         570 :         dev->power.is_late_suspended = false;
     103        1140 :         init_completion(&dev->power.completion);
     104         570 :         complete_all(&dev->power.completion);
     105         570 :         dev->power.wakeup = NULL;
     106        1140 :         INIT_LIST_HEAD(&dev->power.entry);
     107         570 : }
     108             : 
     109             : /**
     110             :  * device_pm_lock - Lock the list of active devices used by the PM core.
     111             :  */
     112           0 : void device_pm_lock(void)
     113             : {
     114           0 :         mutex_lock(&dpm_list_mtx);
     115           0 : }
     116             : 
     117             : /**
     118             :  * device_pm_unlock - Unlock the list of active devices used by the PM core.
     119             :  */
     120           0 : void device_pm_unlock(void)
     121             : {
     122           0 :         mutex_unlock(&dpm_list_mtx);
     123           0 : }
     124             : 
     125             : /**
     126             :  * device_pm_add - Add a device to the PM core's list of active devices.
     127             :  * @dev: Device to add to the list.
     128             :  */
     129         554 : void device_pm_add(struct device *dev)
     130             : {
     131             :         /* Skip PM setup/initialization. */
     132         554 :         if (device_pm_not_required(dev))
     133             :                 return;
     134             : 
     135             :         pr_debug("Adding info for %s:%s\n",
     136             :                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
     137         554 :         device_pm_check_callbacks(dev);
     138         554 :         mutex_lock(&dpm_list_mtx);
     139         554 :         if (dev->parent && dev->parent->power.is_prepared)
     140           0 :                 dev_warn(dev, "parent %s should not be sleeping\n",
     141             :                         dev_name(dev->parent));
     142        1108 :         list_add_tail(&dev->power.entry, &dpm_list);
     143         554 :         dev->power.in_dpm_list = true;
     144         554 :         mutex_unlock(&dpm_list_mtx);
     145             : }
     146             : 
     147             : /**
     148             :  * device_pm_remove - Remove a device from the PM core's list of active devices.
     149             :  * @dev: Device to be removed from the list.
     150             :  */
     151          18 : void device_pm_remove(struct device *dev)
     152             : {
     153          18 :         if (device_pm_not_required(dev))
     154             :                 return;
     155             : 
     156             :         pr_debug("Removing info for %s:%s\n",
     157             :                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
     158          18 :         complete_all(&dev->power.completion);
     159          18 :         mutex_lock(&dpm_list_mtx);
     160          36 :         list_del_init(&dev->power.entry);
     161          18 :         dev->power.in_dpm_list = false;
     162          18 :         mutex_unlock(&dpm_list_mtx);
     163          18 :         device_wakeup_disable(dev);
     164          18 :         pm_runtime_remove(dev);
     165          18 :         device_pm_check_callbacks(dev);
     166             : }
     167             : 
     168             : /**
     169             :  * device_pm_move_before - Move device in the PM core's list of active devices.
     170             :  * @deva: Device to move in dpm_list.
     171             :  * @devb: Device @deva should come before.
     172             :  */
     173           0 : void device_pm_move_before(struct device *deva, struct device *devb)
     174             : {
     175             :         pr_debug("Moving %s:%s before %s:%s\n",
     176             :                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
     177             :                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
     178             :         /* Delete deva from dpm_list and reinsert before devb. */
     179           0 :         list_move_tail(&deva->power.entry, &devb->power.entry);
     180           0 : }
     181             : 
     182             : /**
     183             :  * device_pm_move_after - Move device in the PM core's list of active devices.
     184             :  * @deva: Device to move in dpm_list.
     185             :  * @devb: Device @deva should come after.
     186             :  */
     187           0 : void device_pm_move_after(struct device *deva, struct device *devb)
     188             : {
     189             :         pr_debug("Moving %s:%s after %s:%s\n",
     190             :                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
     191             :                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
     192             :         /* Delete deva from dpm_list and reinsert after devb. */
     193           0 :         list_move(&deva->power.entry, &devb->power.entry);
     194           0 : }
     195             : 
     196             : /**
     197             :  * device_pm_move_last - Move device to end of the PM core's list of devices.
     198             :  * @dev: Device to move in dpm_list.
     199             :  */
     200           0 : void device_pm_move_last(struct device *dev)
     201             : {
     202             :         pr_debug("Moving %s:%s to end of list\n",
     203             :                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
     204           0 :         list_move_tail(&dev->power.entry, &dpm_list);
     205           0 : }
     206             : 
     207             : static ktime_t initcall_debug_start(struct device *dev, void *cb)
     208             : {
     209             :         if (!pm_print_times_enabled)
     210             :                 return 0;
     211             : 
     212             :         dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
     213             :                  task_pid_nr(current),
     214             :                  dev->parent ? dev_name(dev->parent) : "none");
     215             :         return ktime_get();
     216             : }
     217             : 
     218             : static void initcall_debug_report(struct device *dev, ktime_t calltime,
     219             :                                   void *cb, int error)
     220             : {
     221             :         ktime_t rettime;
     222             : 
     223             :         if (!pm_print_times_enabled)
     224             :                 return;
     225             : 
     226             :         rettime = ktime_get();
     227             :         dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
     228             :                  (unsigned long long)ktime_us_delta(rettime, calltime));
     229             : }
     230             : 
     231             : /**
     232             :  * dpm_wait - Wait for a PM operation to complete.
     233             :  * @dev: Device to wait for.
     234             :  * @async: If unset, wait only if the device's power.async_suspend flag is set.
     235             :  */
     236           0 : static void dpm_wait(struct device *dev, bool async)
     237             : {
     238           0 :         if (!dev)
     239             :                 return;
     240             : 
     241           0 :         if (async || (pm_async_enabled && dev->power.async_suspend))
     242           0 :                 wait_for_completion(&dev->power.completion);
     243             : }
     244             : 
     245           0 : static int dpm_wait_fn(struct device *dev, void *async_ptr)
     246             : {
     247           0 :         dpm_wait(dev, *((bool *)async_ptr));
     248           0 :         return 0;
     249             : }
     250             : 
     251             : static void dpm_wait_for_children(struct device *dev, bool async)
     252             : {
     253           0 :        device_for_each_child(dev, &async, dpm_wait_fn);
     254             : }
     255             : 
     256           0 : static void dpm_wait_for_suppliers(struct device *dev, bool async)
     257             : {
     258             :         struct device_link *link;
     259             :         int idx;
     260             : 
     261           0 :         idx = device_links_read_lock();
     262             : 
     263             :         /*
     264             :          * If the supplier goes away right after we've checked the link to it,
     265             :          * we'll wait for its completion to change the state, but that's fine,
     266             :          * because the only things that will block as a result are the SRCU
     267             :          * callbacks freeing the link objects for the links in the list we're
     268             :          * walking.
     269             :          */
     270           0 :         list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
     271           0 :                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
     272           0 :                         dpm_wait(link->supplier, async);
     273             : 
     274           0 :         device_links_read_unlock(idx);
     275           0 : }
     276             : 
     277           0 : static bool dpm_wait_for_superior(struct device *dev, bool async)
     278             : {
     279             :         struct device *parent;
     280             : 
     281             :         /*
     282             :          * If the device is resumed asynchronously and the parent's callback
     283             :          * deletes both the device and the parent itself, the parent object may
     284             :          * be freed while this function is running, so avoid that by reference
     285             :          * counting the parent once more unless the device has been deleted
     286             :          * already (in which case return right away).
     287             :          */
     288           0 :         mutex_lock(&dpm_list_mtx);
     289             : 
     290           0 :         if (!device_pm_initialized(dev)) {
     291           0 :                 mutex_unlock(&dpm_list_mtx);
     292           0 :                 return false;
     293             :         }
     294             : 
     295           0 :         parent = get_device(dev->parent);
     296             : 
     297           0 :         mutex_unlock(&dpm_list_mtx);
     298             : 
     299           0 :         dpm_wait(parent, async);
     300           0 :         put_device(parent);
     301             : 
     302           0 :         dpm_wait_for_suppliers(dev, async);
     303             : 
     304             :         /*
     305             :          * If the parent's callback has deleted the device, attempting to resume
     306             :          * it would be invalid, so avoid doing that then.
     307             :          */
     308           0 :         return device_pm_initialized(dev);
     309             : }
     310             : 
     311           0 : static void dpm_wait_for_consumers(struct device *dev, bool async)
     312             : {
     313             :         struct device_link *link;
     314             :         int idx;
     315             : 
     316           0 :         idx = device_links_read_lock();
     317             : 
     318             :         /*
     319             :          * The status of a device link can only be changed from "dormant" by a
     320             :          * probe, but that cannot happen during system suspend/resume.  In
     321             :          * theory it can change to "dormant" at that time, but then it is
     322             :          * reasonable to wait for the target device anyway (eg. if it goes
     323             :          * away, it's better to wait for it to go away completely and then
     324             :          * continue instead of trying to continue in parallel with its
     325             :          * unregistration).
     326             :          */
     327           0 :         list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
     328           0 :                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
     329           0 :                         dpm_wait(link->consumer, async);
     330             : 
     331           0 :         device_links_read_unlock(idx);
     332           0 : }
     333             : 
     334           0 : static void dpm_wait_for_subordinate(struct device *dev, bool async)
     335             : {
     336           0 :         dpm_wait_for_children(dev, async);
     337           0 :         dpm_wait_for_consumers(dev, async);
     338           0 : }
     339             : 
     340             : /**
     341             :  * pm_op - Return the PM operation appropriate for given PM event.
     342             :  * @ops: PM operations to choose from.
     343             :  * @state: PM transition of the system being carried out.
     344             :  */
     345             : static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
     346             : {
     347           0 :         switch (state.event) {
     348             : #ifdef CONFIG_SUSPEND
     349             :         case PM_EVENT_SUSPEND:
     350           0 :                 return ops->suspend;
     351             :         case PM_EVENT_RESUME:
     352           0 :                 return ops->resume;
     353             : #endif /* CONFIG_SUSPEND */
     354             : #ifdef CONFIG_HIBERNATE_CALLBACKS
     355             :         case PM_EVENT_FREEZE:
     356             :         case PM_EVENT_QUIESCE:
     357             :                 return ops->freeze;
     358             :         case PM_EVENT_HIBERNATE:
     359             :                 return ops->poweroff;
     360             :         case PM_EVENT_THAW:
     361             :         case PM_EVENT_RECOVER:
     362             :                 return ops->thaw;
     363             :         case PM_EVENT_RESTORE:
     364             :                 return ops->restore;
     365             : #endif /* CONFIG_HIBERNATE_CALLBACKS */
     366             :         }
     367             : 
     368             :         return NULL;
     369             : }
     370             : 
     371             : /**
     372             :  * pm_late_early_op - Return the PM operation appropriate for given PM event.
     373             :  * @ops: PM operations to choose from.
     374             :  * @state: PM transition of the system being carried out.
     375             :  *
     376             :  * Runtime PM is disabled for @dev while this function is being executed.
     377             :  */
     378             : static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
     379             :                                       pm_message_t state)
     380             : {
     381           0 :         switch (state.event) {
     382             : #ifdef CONFIG_SUSPEND
     383             :         case PM_EVENT_SUSPEND:
     384           0 :                 return ops->suspend_late;
     385             :         case PM_EVENT_RESUME:
     386           0 :                 return ops->resume_early;
     387             : #endif /* CONFIG_SUSPEND */
     388             : #ifdef CONFIG_HIBERNATE_CALLBACKS
     389             :         case PM_EVENT_FREEZE:
     390             :         case PM_EVENT_QUIESCE:
     391             :                 return ops->freeze_late;
     392             :         case PM_EVENT_HIBERNATE:
     393             :                 return ops->poweroff_late;
     394             :         case PM_EVENT_THAW:
     395             :         case PM_EVENT_RECOVER:
     396             :                 return ops->thaw_early;
     397             :         case PM_EVENT_RESTORE:
     398             :                 return ops->restore_early;
     399             : #endif /* CONFIG_HIBERNATE_CALLBACKS */
     400             :         }
     401             : 
     402             :         return NULL;
     403             : }
     404             : 
     405             : /**
     406             :  * pm_noirq_op - Return the PM operation appropriate for given PM event.
     407             :  * @ops: PM operations to choose from.
     408             :  * @state: PM transition of the system being carried out.
     409             :  *
     410             :  * The driver of @dev will not receive interrupts while this function is being
     411             :  * executed.
     412             :  */
     413             : static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
     414             : {
     415           0 :         switch (state.event) {
     416             : #ifdef CONFIG_SUSPEND
     417             :         case PM_EVENT_SUSPEND:
     418           0 :                 return ops->suspend_noirq;
     419             :         case PM_EVENT_RESUME:
     420           0 :                 return ops->resume_noirq;
     421             : #endif /* CONFIG_SUSPEND */
     422             : #ifdef CONFIG_HIBERNATE_CALLBACKS
     423             :         case PM_EVENT_FREEZE:
     424             :         case PM_EVENT_QUIESCE:
     425             :                 return ops->freeze_noirq;
     426             :         case PM_EVENT_HIBERNATE:
     427             :                 return ops->poweroff_noirq;
     428             :         case PM_EVENT_THAW:
     429             :         case PM_EVENT_RECOVER:
     430             :                 return ops->thaw_noirq;
     431             :         case PM_EVENT_RESTORE:
     432             :                 return ops->restore_noirq;
     433             : #endif /* CONFIG_HIBERNATE_CALLBACKS */
     434             :         }
     435             : 
     436             :         return NULL;
     437             : }
     438             : 
     439             : static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
     440             : {
     441             :         dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
     442             :                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
     443             :                 ", may wakeup" : "", dev->power.driver_flags);
     444             : }
     445             : 
     446             : static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
     447             :                         int error)
     448             : {
     449           0 :         dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
     450             :                 error);
     451             : }
     452             : 
     453             : static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
     454             :                           const char *info)
     455             : {
     456             :         ktime_t calltime;
     457             :         u64 usecs64;
     458             :         int usecs;
     459             : 
     460           0 :         calltime = ktime_get();
     461           0 :         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
     462           0 :         do_div(usecs64, NSEC_PER_USEC);
     463           0 :         usecs = usecs64;
     464             :         if (usecs == 0)
     465             :                 usecs = 1;
     466             : 
     467             :         pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
     468             :                   info ?: "", info ? " " : "", pm_verb(state.event),
     469             :                   error ? "aborted" : "complete",
     470             :                   usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
     471             : }
     472             : 
     473           0 : static int dpm_run_callback(pm_callback_t cb, struct device *dev,
     474             :                             pm_message_t state, const char *info)
     475             : {
     476             :         ktime_t calltime;
     477             :         int error;
     478             : 
     479           0 :         if (!cb)
     480             :                 return 0;
     481             : 
     482           0 :         calltime = initcall_debug_start(dev, cb);
     483             : 
     484           0 :         pm_dev_dbg(dev, state, info);
     485           0 :         trace_device_pm_callback_start(dev, info, state.event);
     486           0 :         error = cb(dev);
     487           0 :         trace_device_pm_callback_end(dev, error);
     488             :         suspend_report_result(dev, cb, error);
     489             : 
     490             :         initcall_debug_report(dev, calltime, cb, error);
     491             : 
     492             :         return error;
     493             : }
     494             : 
     495             : #ifdef CONFIG_DPM_WATCHDOG
     496             : struct dpm_watchdog {
     497             :         struct device           *dev;
     498             :         struct task_struct      *tsk;
     499             :         struct timer_list       timer;
     500             : };
     501             : 
     502             : #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
     503             :         struct dpm_watchdog wd
     504             : 
     505             : /**
     506             :  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
     507             :  * @t: The timer that PM watchdog depends on.
     508             :  *
     509             :  * Called when a driver has timed out suspending or resuming.
     510             :  * There's not much we can do here to recover so panic() to
     511             :  * capture a crash-dump in pstore.
     512             :  */
     513             : static void dpm_watchdog_handler(struct timer_list *t)
     514             : {
     515             :         struct dpm_watchdog *wd = from_timer(wd, t, timer);
     516             : 
     517             :         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
     518             :         show_stack(wd->tsk, NULL, KERN_EMERG);
     519             :         panic("%s %s: unrecoverable failure\n",
     520             :                 dev_driver_string(wd->dev), dev_name(wd->dev));
     521             : }
     522             : 
     523             : /**
     524             :  * dpm_watchdog_set - Enable pm watchdog for given device.
     525             :  * @wd: Watchdog. Must be allocated on the stack.
     526             :  * @dev: Device to handle.
     527             :  */
     528             : static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
     529             : {
     530             :         struct timer_list *timer = &wd->timer;
     531             : 
     532             :         wd->dev = dev;
     533             :         wd->tsk = current;
     534             : 
     535             :         timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
     536             :         /* use same timeout value for both suspend and resume */
     537             :         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
     538             :         add_timer(timer);
     539             : }
     540             : 
     541             : /**
     542             :  * dpm_watchdog_clear - Disable suspend/resume watchdog.
     543             :  * @wd: Watchdog to disable.
     544             :  */
     545             : static void dpm_watchdog_clear(struct dpm_watchdog *wd)
     546             : {
     547             :         struct timer_list *timer = &wd->timer;
     548             : 
     549             :         del_timer_sync(timer);
     550             :         destroy_timer_on_stack(timer);
     551             : }
     552             : #else
     553             : #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
     554             : #define dpm_watchdog_set(x, y)
     555             : #define dpm_watchdog_clear(x)
     556             : #endif
     557             : 
     558             : /*------------------------- Resume routines -------------------------*/
     559             : 
     560             : /**
     561             :  * dev_pm_skip_resume - System-wide device resume optimization check.
     562             :  * @dev: Target device.
     563             :  *
     564             :  * Return:
     565             :  * - %false if the transition under way is RESTORE.
     566             :  * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
     567             :  * - The logical negation of %power.must_resume otherwise (that is, when the
     568             :  *   transition under way is RESUME).
     569             :  */
     570           0 : bool dev_pm_skip_resume(struct device *dev)
     571             : {
     572           0 :         if (pm_transition.event == PM_EVENT_RESTORE)
     573             :                 return false;
     574             : 
     575           0 :         if (pm_transition.event == PM_EVENT_THAW)
     576           0 :                 return dev_pm_skip_suspend(dev);
     577             : 
     578           0 :         return !dev->power.must_resume;
     579             : }
     580             : 
     581             : /**
     582             :  * device_resume_noirq - Execute a "noirq resume" callback for given device.
     583             :  * @dev: Device to handle.
     584             :  * @state: PM transition of the system being carried out.
     585             :  * @async: If true, the device is being resumed asynchronously.
     586             :  *
     587             :  * The driver of @dev will not receive interrupts while this function is being
     588             :  * executed.
     589             :  */
     590           0 : static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
     591             : {
     592           0 :         pm_callback_t callback = NULL;
     593           0 :         const char *info = NULL;
     594             :         bool skip_resume;
     595           0 :         int error = 0;
     596             : 
     597             :         TRACE_DEVICE(dev);
     598             :         TRACE_RESUME(0);
     599             : 
     600           0 :         if (dev->power.syscore || dev->power.direct_complete)
     601             :                 goto Out;
     602             : 
     603           0 :         if (!dev->power.is_noirq_suspended)
     604             :                 goto Out;
     605             : 
     606           0 :         if (!dpm_wait_for_superior(dev, async))
     607             :                 goto Out;
     608             : 
     609           0 :         skip_resume = dev_pm_skip_resume(dev);
     610             :         /*
     611             :          * If the driver callback is skipped below or by the middle layer
     612             :          * callback and device_resume_early() also skips the driver callback for
     613             :          * this device later, it needs to appear as "suspended" to PM-runtime,
     614             :          * so change its status accordingly.
     615             :          *
     616             :          * Otherwise, the device is going to be resumed, so set its PM-runtime
     617             :          * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
     618             :          * to avoid confusing drivers that don't use it.
     619             :          */
     620           0 :         if (skip_resume)
     621             :                 pm_runtime_set_suspended(dev);
     622           0 :         else if (dev_pm_skip_suspend(dev))
     623             :                 pm_runtime_set_active(dev);
     624             : 
     625           0 :         if (dev->pm_domain) {
     626           0 :                 info = "noirq power domain ";
     627           0 :                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
     628           0 :         } else if (dev->type && dev->type->pm) {
     629           0 :                 info = "noirq type ";
     630           0 :                 callback = pm_noirq_op(dev->type->pm, state);
     631           0 :         } else if (dev->class && dev->class->pm) {
     632           0 :                 info = "noirq class ";
     633           0 :                 callback = pm_noirq_op(dev->class->pm, state);
     634           0 :         } else if (dev->bus && dev->bus->pm) {
     635           0 :                 info = "noirq bus ";
     636           0 :                 callback = pm_noirq_op(dev->bus->pm, state);
     637             :         }
     638           0 :         if (callback)
     639             :                 goto Run;
     640             : 
     641           0 :         if (skip_resume)
     642             :                 goto Skip;
     643             : 
     644           0 :         if (dev->driver && dev->driver->pm) {
     645           0 :                 info = "noirq driver ";
     646           0 :                 callback = pm_noirq_op(dev->driver->pm, state);
     647             :         }
     648             : 
     649             : Run:
     650           0 :         error = dpm_run_callback(callback, dev, state, info);
     651             : 
     652             : Skip:
     653           0 :         dev->power.is_noirq_suspended = false;
     654             : 
     655             : Out:
     656           0 :         complete_all(&dev->power.completion);
     657             :         TRACE_RESUME(error);
     658           0 :         return error;
     659             : }
     660             : 
     661             : static bool is_async(struct device *dev)
     662             : {
     663           0 :         return dev->power.async_suspend && pm_async_enabled
     664           0 :                 && !pm_trace_is_enabled();
     665             : }
     666             : 
     667           0 : static bool dpm_async_fn(struct device *dev, async_func_t func)
     668             : {
     669           0 :         reinit_completion(&dev->power.completion);
     670             : 
     671           0 :         if (is_async(dev)) {
     672           0 :                 get_device(dev);
     673           0 :                 async_schedule_dev(func, dev);
     674           0 :                 return true;
     675             :         }
     676             : 
     677             :         return false;
     678             : }
     679             : 
     680           0 : static void async_resume_noirq(void *data, async_cookie_t cookie)
     681             : {
     682           0 :         struct device *dev = (struct device *)data;
     683             :         int error;
     684             : 
     685           0 :         error = device_resume_noirq(dev, pm_transition, true);
     686           0 :         if (error)
     687             :                 pm_dev_err(dev, pm_transition, " async", error);
     688             : 
     689           0 :         put_device(dev);
     690           0 : }
     691             : 
     692           0 : static void dpm_noirq_resume_devices(pm_message_t state)
     693             : {
     694             :         struct device *dev;
     695           0 :         ktime_t starttime = ktime_get();
     696             : 
     697           0 :         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
     698           0 :         mutex_lock(&dpm_list_mtx);
     699           0 :         pm_transition = state;
     700             : 
     701             :         /*
     702             :          * Advanced the async threads upfront,
     703             :          * in case the starting of async threads is
     704             :          * delayed by non-async resuming devices.
     705             :          */
     706           0 :         list_for_each_entry(dev, &dpm_noirq_list, power.entry)
     707           0 :                 dpm_async_fn(dev, async_resume_noirq);
     708             : 
     709           0 :         while (!list_empty(&dpm_noirq_list)) {
     710           0 :                 dev = to_device(dpm_noirq_list.next);
     711           0 :                 get_device(dev);
     712           0 :                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
     713             : 
     714           0 :                 mutex_unlock(&dpm_list_mtx);
     715             : 
     716           0 :                 if (!is_async(dev)) {
     717             :                         int error;
     718             : 
     719           0 :                         error = device_resume_noirq(dev, state, false);
     720           0 :                         if (error) {
     721           0 :                                 suspend_stats.failed_resume_noirq++;
     722           0 :                                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
     723           0 :                                 dpm_save_failed_dev(dev_name(dev));
     724             :                                 pm_dev_err(dev, state, " noirq", error);
     725             :                         }
     726             :                 }
     727             : 
     728           0 :                 put_device(dev);
     729             : 
     730           0 :                 mutex_lock(&dpm_list_mtx);
     731             :         }
     732           0 :         mutex_unlock(&dpm_list_mtx);
     733           0 :         async_synchronize_full();
     734           0 :         dpm_show_time(starttime, state, 0, "noirq");
     735           0 :         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
     736           0 : }
     737             : 
     738             : /**
     739             :  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
     740             :  * @state: PM transition of the system being carried out.
     741             :  *
     742             :  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
     743             :  * allow device drivers' interrupt handlers to be called.
     744             :  */
     745           0 : void dpm_resume_noirq(pm_message_t state)
     746             : {
     747           0 :         dpm_noirq_resume_devices(state);
     748             : 
     749           0 :         resume_device_irqs();
     750           0 :         device_wakeup_disarm_wake_irqs();
     751           0 : }
     752             : 
     753             : /**
     754             :  * device_resume_early - Execute an "early resume" callback for given device.
     755             :  * @dev: Device to handle.
     756             :  * @state: PM transition of the system being carried out.
     757             :  * @async: If true, the device is being resumed asynchronously.
     758             :  *
     759             :  * Runtime PM is disabled for @dev while this function is being executed.
     760             :  */
     761           0 : static int device_resume_early(struct device *dev, pm_message_t state, bool async)
     762             : {
     763           0 :         pm_callback_t callback = NULL;
     764           0 :         const char *info = NULL;
     765           0 :         int error = 0;
     766             : 
     767             :         TRACE_DEVICE(dev);
     768             :         TRACE_RESUME(0);
     769             : 
     770           0 :         if (dev->power.syscore || dev->power.direct_complete)
     771             :                 goto Out;
     772             : 
     773           0 :         if (!dev->power.is_late_suspended)
     774             :                 goto Out;
     775             : 
     776           0 :         if (!dpm_wait_for_superior(dev, async))
     777             :                 goto Out;
     778             : 
     779           0 :         if (dev->pm_domain) {
     780           0 :                 info = "early power domain ";
     781           0 :                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
     782           0 :         } else if (dev->type && dev->type->pm) {
     783           0 :                 info = "early type ";
     784           0 :                 callback = pm_late_early_op(dev->type->pm, state);
     785           0 :         } else if (dev->class && dev->class->pm) {
     786           0 :                 info = "early class ";
     787           0 :                 callback = pm_late_early_op(dev->class->pm, state);
     788           0 :         } else if (dev->bus && dev->bus->pm) {
     789           0 :                 info = "early bus ";
     790           0 :                 callback = pm_late_early_op(dev->bus->pm, state);
     791             :         }
     792           0 :         if (callback)
     793             :                 goto Run;
     794             : 
     795           0 :         if (dev_pm_skip_resume(dev))
     796             :                 goto Skip;
     797             : 
     798           0 :         if (dev->driver && dev->driver->pm) {
     799           0 :                 info = "early driver ";
     800           0 :                 callback = pm_late_early_op(dev->driver->pm, state);
     801             :         }
     802             : 
     803             : Run:
     804           0 :         error = dpm_run_callback(callback, dev, state, info);
     805             : 
     806             : Skip:
     807           0 :         dev->power.is_late_suspended = false;
     808             : 
     809             : Out:
     810             :         TRACE_RESUME(error);
     811             : 
     812           0 :         pm_runtime_enable(dev);
     813           0 :         complete_all(&dev->power.completion);
     814           0 :         return error;
     815             : }
     816             : 
     817           0 : static void async_resume_early(void *data, async_cookie_t cookie)
     818             : {
     819           0 :         struct device *dev = (struct device *)data;
     820             :         int error;
     821             : 
     822           0 :         error = device_resume_early(dev, pm_transition, true);
     823           0 :         if (error)
     824             :                 pm_dev_err(dev, pm_transition, " async", error);
     825             : 
     826           0 :         put_device(dev);
     827           0 : }
     828             : 
     829             : /**
     830             :  * dpm_resume_early - Execute "early resume" callbacks for all devices.
     831             :  * @state: PM transition of the system being carried out.
     832             :  */
     833           0 : void dpm_resume_early(pm_message_t state)
     834             : {
     835             :         struct device *dev;
     836           0 :         ktime_t starttime = ktime_get();
     837             : 
     838           0 :         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
     839           0 :         mutex_lock(&dpm_list_mtx);
     840           0 :         pm_transition = state;
     841             : 
     842             :         /*
     843             :          * Advanced the async threads upfront,
     844             :          * in case the starting of async threads is
     845             :          * delayed by non-async resuming devices.
     846             :          */
     847           0 :         list_for_each_entry(dev, &dpm_late_early_list, power.entry)
     848           0 :                 dpm_async_fn(dev, async_resume_early);
     849             : 
     850           0 :         while (!list_empty(&dpm_late_early_list)) {
     851           0 :                 dev = to_device(dpm_late_early_list.next);
     852           0 :                 get_device(dev);
     853           0 :                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
     854             : 
     855           0 :                 mutex_unlock(&dpm_list_mtx);
     856             : 
     857           0 :                 if (!is_async(dev)) {
     858             :                         int error;
     859             : 
     860           0 :                         error = device_resume_early(dev, state, false);
     861           0 :                         if (error) {
     862           0 :                                 suspend_stats.failed_resume_early++;
     863           0 :                                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
     864           0 :                                 dpm_save_failed_dev(dev_name(dev));
     865             :                                 pm_dev_err(dev, state, " early", error);
     866             :                         }
     867             :                 }
     868             : 
     869           0 :                 put_device(dev);
     870             : 
     871           0 :                 mutex_lock(&dpm_list_mtx);
     872             :         }
     873           0 :         mutex_unlock(&dpm_list_mtx);
     874           0 :         async_synchronize_full();
     875           0 :         dpm_show_time(starttime, state, 0, "early");
     876           0 :         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
     877           0 : }
     878             : 
     879             : /**
     880             :  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
     881             :  * @state: PM transition of the system being carried out.
     882             :  */
     883           0 : void dpm_resume_start(pm_message_t state)
     884             : {
     885             :         dpm_resume_noirq(state);
     886           0 :         dpm_resume_early(state);
     887           0 : }
     888             : EXPORT_SYMBOL_GPL(dpm_resume_start);
     889             : 
     890             : /**
     891             :  * device_resume - Execute "resume" callbacks for given device.
     892             :  * @dev: Device to handle.
     893             :  * @state: PM transition of the system being carried out.
     894             :  * @async: If true, the device is being resumed asynchronously.
     895             :  */
     896           0 : static int device_resume(struct device *dev, pm_message_t state, bool async)
     897             : {
     898           0 :         pm_callback_t callback = NULL;
     899           0 :         const char *info = NULL;
     900           0 :         int error = 0;
     901             :         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
     902             : 
     903             :         TRACE_DEVICE(dev);
     904             :         TRACE_RESUME(0);
     905             : 
     906           0 :         if (dev->power.syscore)
     907             :                 goto Complete;
     908             : 
     909           0 :         if (dev->power.direct_complete) {
     910             :                 /* Match the pm_runtime_disable() in __device_suspend(). */
     911           0 :                 pm_runtime_enable(dev);
     912           0 :                 goto Complete;
     913             :         }
     914             : 
     915           0 :         if (!dpm_wait_for_superior(dev, async))
     916             :                 goto Complete;
     917             : 
     918             :         dpm_watchdog_set(&wd, dev);
     919           0 :         device_lock(dev);
     920             : 
     921             :         /*
     922             :          * This is a fib.  But we'll allow new children to be added below
     923             :          * a resumed device, even if the device hasn't been completed yet.
     924             :          */
     925           0 :         dev->power.is_prepared = false;
     926             : 
     927           0 :         if (!dev->power.is_suspended)
     928             :                 goto Unlock;
     929             : 
     930           0 :         if (dev->pm_domain) {
     931           0 :                 info = "power domain ";
     932           0 :                 callback = pm_op(&dev->pm_domain->ops, state);
     933             :                 goto Driver;
     934             :         }
     935             : 
     936           0 :         if (dev->type && dev->type->pm) {
     937           0 :                 info = "type ";
     938           0 :                 callback = pm_op(dev->type->pm, state);
     939             :                 goto Driver;
     940             :         }
     941             : 
     942           0 :         if (dev->class && dev->class->pm) {
     943           0 :                 info = "class ";
     944           0 :                 callback = pm_op(dev->class->pm, state);
     945             :                 goto Driver;
     946             :         }
     947             : 
     948           0 :         if (dev->bus) {
     949           0 :                 if (dev->bus->pm) {
     950           0 :                         info = "bus ";
     951           0 :                         callback = pm_op(dev->bus->pm, state);
     952           0 :                 } else if (dev->bus->resume) {
     953             :                         info = "legacy bus ";
     954             :                         callback = dev->bus->resume;
     955             :                         goto End;
     956             :                 }
     957             :         }
     958             : 
     959             :  Driver:
     960           0 :         if (!callback && dev->driver && dev->driver->pm) {
     961           0 :                 info = "driver ";
     962           0 :                 callback = pm_op(dev->driver->pm, state);
     963             :         }
     964             : 
     965             :  End:
     966           0 :         error = dpm_run_callback(callback, dev, state, info);
     967           0 :         dev->power.is_suspended = false;
     968             : 
     969             :  Unlock:
     970             :         device_unlock(dev);
     971             :         dpm_watchdog_clear(&wd);
     972             : 
     973             :  Complete:
     974           0 :         complete_all(&dev->power.completion);
     975             : 
     976             :         TRACE_RESUME(error);
     977             : 
     978           0 :         return error;
     979             : }
     980             : 
     981           0 : static void async_resume(void *data, async_cookie_t cookie)
     982             : {
     983           0 :         struct device *dev = (struct device *)data;
     984             :         int error;
     985             : 
     986           0 :         error = device_resume(dev, pm_transition, true);
     987           0 :         if (error)
     988             :                 pm_dev_err(dev, pm_transition, " async", error);
     989           0 :         put_device(dev);
     990           0 : }
     991             : 
     992             : /**
     993             :  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
     994             :  * @state: PM transition of the system being carried out.
     995             :  *
     996             :  * Execute the appropriate "resume" callback for all devices whose status
     997             :  * indicates that they are suspended.
     998             :  */
     999           0 : void dpm_resume(pm_message_t state)
    1000             : {
    1001             :         struct device *dev;
    1002           0 :         ktime_t starttime = ktime_get();
    1003             : 
    1004           0 :         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
    1005             :         might_sleep();
    1006             : 
    1007           0 :         mutex_lock(&dpm_list_mtx);
    1008           0 :         pm_transition = state;
    1009           0 :         async_error = 0;
    1010             : 
    1011           0 :         list_for_each_entry(dev, &dpm_suspended_list, power.entry)
    1012           0 :                 dpm_async_fn(dev, async_resume);
    1013             : 
    1014           0 :         while (!list_empty(&dpm_suspended_list)) {
    1015           0 :                 dev = to_device(dpm_suspended_list.next);
    1016           0 :                 get_device(dev);
    1017           0 :                 if (!is_async(dev)) {
    1018             :                         int error;
    1019             : 
    1020           0 :                         mutex_unlock(&dpm_list_mtx);
    1021             : 
    1022           0 :                         error = device_resume(dev, state, false);
    1023           0 :                         if (error) {
    1024           0 :                                 suspend_stats.failed_resume++;
    1025           0 :                                 dpm_save_failed_step(SUSPEND_RESUME);
    1026           0 :                                 dpm_save_failed_dev(dev_name(dev));
    1027             :                                 pm_dev_err(dev, state, "", error);
    1028             :                         }
    1029             : 
    1030           0 :                         mutex_lock(&dpm_list_mtx);
    1031             :                 }
    1032           0 :                 if (!list_empty(&dev->power.entry))
    1033           0 :                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
    1034             : 
    1035           0 :                 mutex_unlock(&dpm_list_mtx);
    1036             : 
    1037           0 :                 put_device(dev);
    1038             : 
    1039           0 :                 mutex_lock(&dpm_list_mtx);
    1040             :         }
    1041           0 :         mutex_unlock(&dpm_list_mtx);
    1042           0 :         async_synchronize_full();
    1043           0 :         dpm_show_time(starttime, state, 0, NULL);
    1044             : 
    1045             :         cpufreq_resume();
    1046             :         devfreq_resume();
    1047           0 :         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
    1048           0 : }
    1049             : 
    1050             : /**
    1051             :  * device_complete - Complete a PM transition for given device.
    1052             :  * @dev: Device to handle.
    1053             :  * @state: PM transition of the system being carried out.
    1054             :  */
    1055           0 : static void device_complete(struct device *dev, pm_message_t state)
    1056             : {
    1057           0 :         void (*callback)(struct device *) = NULL;
    1058           0 :         const char *info = NULL;
    1059             : 
    1060           0 :         if (dev->power.syscore)
    1061             :                 goto out;
    1062             : 
    1063           0 :         device_lock(dev);
    1064             : 
    1065           0 :         if (dev->pm_domain) {
    1066           0 :                 info = "completing power domain ";
    1067           0 :                 callback = dev->pm_domain->ops.complete;
    1068           0 :         } else if (dev->type && dev->type->pm) {
    1069           0 :                 info = "completing type ";
    1070           0 :                 callback = dev->type->pm->complete;
    1071           0 :         } else if (dev->class && dev->class->pm) {
    1072           0 :                 info = "completing class ";
    1073           0 :                 callback = dev->class->pm->complete;
    1074           0 :         } else if (dev->bus && dev->bus->pm) {
    1075           0 :                 info = "completing bus ";
    1076           0 :                 callback = dev->bus->pm->complete;
    1077             :         }
    1078             : 
    1079           0 :         if (!callback && dev->driver && dev->driver->pm) {
    1080           0 :                 info = "completing driver ";
    1081           0 :                 callback = dev->driver->pm->complete;
    1082             :         }
    1083             : 
    1084           0 :         if (callback) {
    1085           0 :                 pm_dev_dbg(dev, state, info);
    1086           0 :                 callback(dev);
    1087             :         }
    1088             : 
    1089             :         device_unlock(dev);
    1090             : 
    1091             : out:
    1092           0 :         pm_runtime_put(dev);
    1093           0 : }
    1094             : 
    1095             : /**
    1096             :  * dpm_complete - Complete a PM transition for all non-sysdev devices.
    1097             :  * @state: PM transition of the system being carried out.
    1098             :  *
    1099             :  * Execute the ->complete() callbacks for all devices whose PM status is not
    1100             :  * DPM_ON (this allows new devices to be registered).
    1101             :  */
    1102           0 : void dpm_complete(pm_message_t state)
    1103             : {
    1104             :         struct list_head list;
    1105             : 
    1106           0 :         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
    1107             :         might_sleep();
    1108             : 
    1109           0 :         INIT_LIST_HEAD(&list);
    1110           0 :         mutex_lock(&dpm_list_mtx);
    1111           0 :         while (!list_empty(&dpm_prepared_list)) {
    1112           0 :                 struct device *dev = to_device(dpm_prepared_list.prev);
    1113             : 
    1114           0 :                 get_device(dev);
    1115           0 :                 dev->power.is_prepared = false;
    1116           0 :                 list_move(&dev->power.entry, &list);
    1117             : 
    1118           0 :                 mutex_unlock(&dpm_list_mtx);
    1119             : 
    1120           0 :                 trace_device_pm_callback_start(dev, "", state.event);
    1121           0 :                 device_complete(dev, state);
    1122           0 :                 trace_device_pm_callback_end(dev, 0);
    1123             : 
    1124           0 :                 put_device(dev);
    1125             : 
    1126           0 :                 mutex_lock(&dpm_list_mtx);
    1127             :         }
    1128           0 :         list_splice(&list, &dpm_list);
    1129           0 :         mutex_unlock(&dpm_list_mtx);
    1130             : 
    1131             :         /* Allow device probing and trigger re-probing of deferred devices */
    1132           0 :         device_unblock_probing();
    1133           0 :         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
    1134           0 : }
    1135             : 
    1136             : /**
    1137             :  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
    1138             :  * @state: PM transition of the system being carried out.
    1139             :  *
    1140             :  * Execute "resume" callbacks for all devices and complete the PM transition of
    1141             :  * the system.
    1142             :  */
    1143           0 : void dpm_resume_end(pm_message_t state)
    1144             : {
    1145           0 :         dpm_resume(state);
    1146           0 :         dpm_complete(state);
    1147           0 : }
    1148             : EXPORT_SYMBOL_GPL(dpm_resume_end);
    1149             : 
    1150             : 
    1151             : /*------------------------- Suspend routines -------------------------*/
    1152             : 
    1153             : /**
    1154             :  * resume_event - Return a "resume" message for given "suspend" sleep state.
    1155             :  * @sleep_state: PM message representing a sleep state.
    1156             :  *
    1157             :  * Return a PM message representing the resume event corresponding to given
    1158             :  * sleep state.
    1159             :  */
    1160             : static pm_message_t resume_event(pm_message_t sleep_state)
    1161             : {
    1162           0 :         switch (sleep_state.event) {
    1163             :         case PM_EVENT_SUSPEND:
    1164             :                 return PMSG_RESUME;
    1165             :         case PM_EVENT_FREEZE:
    1166             :         case PM_EVENT_QUIESCE:
    1167             :                 return PMSG_RECOVER;
    1168             :         case PM_EVENT_HIBERNATE:
    1169             :                 return PMSG_RESTORE;
    1170             :         }
    1171             :         return PMSG_ON;
    1172             : }
    1173             : 
    1174           0 : static void dpm_superior_set_must_resume(struct device *dev)
    1175             : {
    1176             :         struct device_link *link;
    1177             :         int idx;
    1178             : 
    1179           0 :         if (dev->parent)
    1180           0 :                 dev->parent->power.must_resume = true;
    1181             : 
    1182           0 :         idx = device_links_read_lock();
    1183             : 
    1184           0 :         list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
    1185           0 :                 link->supplier->power.must_resume = true;
    1186             : 
    1187           0 :         device_links_read_unlock(idx);
    1188           0 : }
    1189             : 
    1190             : /**
    1191             :  * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
    1192             :  * @dev: Device to handle.
    1193             :  * @state: PM transition of the system being carried out.
    1194             :  * @async: If true, the device is being suspended asynchronously.
    1195             :  *
    1196             :  * The driver of @dev will not receive interrupts while this function is being
    1197             :  * executed.
    1198             :  */
    1199           0 : static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
    1200             : {
    1201           0 :         pm_callback_t callback = NULL;
    1202           0 :         const char *info = NULL;
    1203           0 :         int error = 0;
    1204             : 
    1205             :         TRACE_DEVICE(dev);
    1206             :         TRACE_SUSPEND(0);
    1207             : 
    1208           0 :         dpm_wait_for_subordinate(dev, async);
    1209             : 
    1210           0 :         if (async_error)
    1211             :                 goto Complete;
    1212             : 
    1213           0 :         if (dev->power.syscore || dev->power.direct_complete)
    1214             :                 goto Complete;
    1215             : 
    1216           0 :         if (dev->pm_domain) {
    1217           0 :                 info = "noirq power domain ";
    1218           0 :                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
    1219           0 :         } else if (dev->type && dev->type->pm) {
    1220           0 :                 info = "noirq type ";
    1221           0 :                 callback = pm_noirq_op(dev->type->pm, state);
    1222           0 :         } else if (dev->class && dev->class->pm) {
    1223           0 :                 info = "noirq class ";
    1224           0 :                 callback = pm_noirq_op(dev->class->pm, state);
    1225           0 :         } else if (dev->bus && dev->bus->pm) {
    1226           0 :                 info = "noirq bus ";
    1227           0 :                 callback = pm_noirq_op(dev->bus->pm, state);
    1228             :         }
    1229           0 :         if (callback)
    1230             :                 goto Run;
    1231             : 
    1232           0 :         if (dev_pm_skip_suspend(dev))
    1233             :                 goto Skip;
    1234             : 
    1235           0 :         if (dev->driver && dev->driver->pm) {
    1236           0 :                 info = "noirq driver ";
    1237           0 :                 callback = pm_noirq_op(dev->driver->pm, state);
    1238             :         }
    1239             : 
    1240             : Run:
    1241           0 :         error = dpm_run_callback(callback, dev, state, info);
    1242           0 :         if (error) {
    1243           0 :                 async_error = error;
    1244           0 :                 goto Complete;
    1245             :         }
    1246             : 
    1247             : Skip:
    1248           0 :         dev->power.is_noirq_suspended = true;
    1249             : 
    1250             :         /*
    1251             :          * Skipping the resume of devices that were in use right before the
    1252             :          * system suspend (as indicated by their PM-runtime usage counters)
    1253             :          * would be suboptimal.  Also resume them if doing that is not allowed
    1254             :          * to be skipped.
    1255             :          */
    1256           0 :         if (atomic_read(&dev->power.usage_count) > 1 ||
    1257           0 :             !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
    1258             :               dev->power.may_skip_resume))
    1259           0 :                 dev->power.must_resume = true;
    1260             : 
    1261           0 :         if (dev->power.must_resume)
    1262           0 :                 dpm_superior_set_must_resume(dev);
    1263             : 
    1264             : Complete:
    1265           0 :         complete_all(&dev->power.completion);
    1266             :         TRACE_SUSPEND(error);
    1267           0 :         return error;
    1268             : }
    1269             : 
    1270           0 : static void async_suspend_noirq(void *data, async_cookie_t cookie)
    1271             : {
    1272           0 :         struct device *dev = (struct device *)data;
    1273             :         int error;
    1274             : 
    1275           0 :         error = __device_suspend_noirq(dev, pm_transition, true);
    1276           0 :         if (error) {
    1277           0 :                 dpm_save_failed_dev(dev_name(dev));
    1278             :                 pm_dev_err(dev, pm_transition, " async", error);
    1279             :         }
    1280             : 
    1281           0 :         put_device(dev);
    1282           0 : }
    1283             : 
    1284           0 : static int device_suspend_noirq(struct device *dev)
    1285             : {
    1286           0 :         if (dpm_async_fn(dev, async_suspend_noirq))
    1287             :                 return 0;
    1288             : 
    1289           0 :         return __device_suspend_noirq(dev, pm_transition, false);
    1290             : }
    1291             : 
    1292           0 : static int dpm_noirq_suspend_devices(pm_message_t state)
    1293             : {
    1294           0 :         ktime_t starttime = ktime_get();
    1295           0 :         int error = 0;
    1296             : 
    1297           0 :         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
    1298           0 :         mutex_lock(&dpm_list_mtx);
    1299           0 :         pm_transition = state;
    1300           0 :         async_error = 0;
    1301             : 
    1302           0 :         while (!list_empty(&dpm_late_early_list)) {
    1303           0 :                 struct device *dev = to_device(dpm_late_early_list.prev);
    1304             : 
    1305           0 :                 get_device(dev);
    1306           0 :                 mutex_unlock(&dpm_list_mtx);
    1307             : 
    1308           0 :                 error = device_suspend_noirq(dev);
    1309             : 
    1310           0 :                 mutex_lock(&dpm_list_mtx);
    1311             : 
    1312           0 :                 if (error) {
    1313           0 :                         pm_dev_err(dev, state, " noirq", error);
    1314           0 :                         dpm_save_failed_dev(dev_name(dev));
    1315           0 :                 } else if (!list_empty(&dev->power.entry)) {
    1316           0 :                         list_move(&dev->power.entry, &dpm_noirq_list);
    1317             :                 }
    1318             : 
    1319           0 :                 mutex_unlock(&dpm_list_mtx);
    1320             : 
    1321           0 :                 put_device(dev);
    1322             : 
    1323           0 :                 mutex_lock(&dpm_list_mtx);
    1324             : 
    1325           0 :                 if (error || async_error)
    1326             :                         break;
    1327             :         }
    1328           0 :         mutex_unlock(&dpm_list_mtx);
    1329           0 :         async_synchronize_full();
    1330           0 :         if (!error)
    1331           0 :                 error = async_error;
    1332             : 
    1333           0 :         if (error) {
    1334           0 :                 suspend_stats.failed_suspend_noirq++;
    1335             :                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
    1336             :         }
    1337           0 :         dpm_show_time(starttime, state, error, "noirq");
    1338           0 :         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
    1339           0 :         return error;
    1340             : }
    1341             : 
    1342             : /**
    1343             :  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
    1344             :  * @state: PM transition of the system being carried out.
    1345             :  *
    1346             :  * Prevent device drivers' interrupt handlers from being called and invoke
    1347             :  * "noirq" suspend callbacks for all non-sysdev devices.
    1348             :  */
    1349           0 : int dpm_suspend_noirq(pm_message_t state)
    1350             : {
    1351             :         int ret;
    1352             : 
    1353           0 :         device_wakeup_arm_wake_irqs();
    1354           0 :         suspend_device_irqs();
    1355             : 
    1356           0 :         ret = dpm_noirq_suspend_devices(state);
    1357           0 :         if (ret)
    1358             :                 dpm_resume_noirq(resume_event(state));
    1359             : 
    1360           0 :         return ret;
    1361             : }
    1362             : 
    1363           0 : static void dpm_propagate_wakeup_to_parent(struct device *dev)
    1364             : {
    1365           0 :         struct device *parent = dev->parent;
    1366             : 
    1367           0 :         if (!parent)
    1368             :                 return;
    1369             : 
    1370           0 :         spin_lock_irq(&parent->power.lock);
    1371             : 
    1372           0 :         if (device_wakeup_path(dev) && !parent->power.ignore_children)
    1373           0 :                 parent->power.wakeup_path = true;
    1374             : 
    1375           0 :         spin_unlock_irq(&parent->power.lock);
    1376             : }
    1377             : 
    1378             : /**
    1379             :  * __device_suspend_late - Execute a "late suspend" callback for given device.
    1380             :  * @dev: Device to handle.
    1381             :  * @state: PM transition of the system being carried out.
    1382             :  * @async: If true, the device is being suspended asynchronously.
    1383             :  *
    1384             :  * Runtime PM is disabled for @dev while this function is being executed.
    1385             :  */
    1386           0 : static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
    1387             : {
    1388           0 :         pm_callback_t callback = NULL;
    1389           0 :         const char *info = NULL;
    1390           0 :         int error = 0;
    1391             : 
    1392             :         TRACE_DEVICE(dev);
    1393             :         TRACE_SUSPEND(0);
    1394             : 
    1395           0 :         __pm_runtime_disable(dev, false);
    1396             : 
    1397           0 :         dpm_wait_for_subordinate(dev, async);
    1398             : 
    1399           0 :         if (async_error)
    1400             :                 goto Complete;
    1401             : 
    1402           0 :         if (pm_wakeup_pending()) {
    1403           0 :                 async_error = -EBUSY;
    1404           0 :                 goto Complete;
    1405             :         }
    1406             : 
    1407           0 :         if (dev->power.syscore || dev->power.direct_complete)
    1408             :                 goto Complete;
    1409             : 
    1410           0 :         if (dev->pm_domain) {
    1411           0 :                 info = "late power domain ";
    1412           0 :                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
    1413           0 :         } else if (dev->type && dev->type->pm) {
    1414           0 :                 info = "late type ";
    1415           0 :                 callback = pm_late_early_op(dev->type->pm, state);
    1416           0 :         } else if (dev->class && dev->class->pm) {
    1417           0 :                 info = "late class ";
    1418           0 :                 callback = pm_late_early_op(dev->class->pm, state);
    1419           0 :         } else if (dev->bus && dev->bus->pm) {
    1420           0 :                 info = "late bus ";
    1421           0 :                 callback = pm_late_early_op(dev->bus->pm, state);
    1422             :         }
    1423           0 :         if (callback)
    1424             :                 goto Run;
    1425             : 
    1426           0 :         if (dev_pm_skip_suspend(dev))
    1427             :                 goto Skip;
    1428             : 
    1429           0 :         if (dev->driver && dev->driver->pm) {
    1430           0 :                 info = "late driver ";
    1431           0 :                 callback = pm_late_early_op(dev->driver->pm, state);
    1432             :         }
    1433             : 
    1434             : Run:
    1435           0 :         error = dpm_run_callback(callback, dev, state, info);
    1436           0 :         if (error) {
    1437           0 :                 async_error = error;
    1438           0 :                 goto Complete;
    1439             :         }
    1440           0 :         dpm_propagate_wakeup_to_parent(dev);
    1441             : 
    1442             : Skip:
    1443           0 :         dev->power.is_late_suspended = true;
    1444             : 
    1445             : Complete:
    1446             :         TRACE_SUSPEND(error);
    1447           0 :         complete_all(&dev->power.completion);
    1448           0 :         return error;
    1449             : }
    1450             : 
    1451           0 : static void async_suspend_late(void *data, async_cookie_t cookie)
    1452             : {
    1453           0 :         struct device *dev = (struct device *)data;
    1454             :         int error;
    1455             : 
    1456           0 :         error = __device_suspend_late(dev, pm_transition, true);
    1457           0 :         if (error) {
    1458           0 :                 dpm_save_failed_dev(dev_name(dev));
    1459             :                 pm_dev_err(dev, pm_transition, " async", error);
    1460             :         }
    1461           0 :         put_device(dev);
    1462           0 : }
    1463             : 
    1464           0 : static int device_suspend_late(struct device *dev)
    1465             : {
    1466           0 :         if (dpm_async_fn(dev, async_suspend_late))
    1467             :                 return 0;
    1468             : 
    1469           0 :         return __device_suspend_late(dev, pm_transition, false);
    1470             : }
    1471             : 
    1472             : /**
    1473             :  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
    1474             :  * @state: PM transition of the system being carried out.
    1475             :  */
    1476           0 : int dpm_suspend_late(pm_message_t state)
    1477             : {
    1478           0 :         ktime_t starttime = ktime_get();
    1479           0 :         int error = 0;
    1480             : 
    1481           0 :         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
    1482             :         wake_up_all_idle_cpus();
    1483           0 :         mutex_lock(&dpm_list_mtx);
    1484           0 :         pm_transition = state;
    1485           0 :         async_error = 0;
    1486             : 
    1487           0 :         while (!list_empty(&dpm_suspended_list)) {
    1488           0 :                 struct device *dev = to_device(dpm_suspended_list.prev);
    1489             : 
    1490           0 :                 get_device(dev);
    1491             : 
    1492           0 :                 mutex_unlock(&dpm_list_mtx);
    1493             : 
    1494           0 :                 error = device_suspend_late(dev);
    1495             : 
    1496           0 :                 mutex_lock(&dpm_list_mtx);
    1497             : 
    1498           0 :                 if (!list_empty(&dev->power.entry))
    1499           0 :                         list_move(&dev->power.entry, &dpm_late_early_list);
    1500             : 
    1501           0 :                 if (error) {
    1502           0 :                         pm_dev_err(dev, state, " late", error);
    1503           0 :                         dpm_save_failed_dev(dev_name(dev));
    1504             :                 }
    1505             : 
    1506           0 :                 mutex_unlock(&dpm_list_mtx);
    1507             : 
    1508           0 :                 put_device(dev);
    1509             : 
    1510           0 :                 mutex_lock(&dpm_list_mtx);
    1511             : 
    1512           0 :                 if (error || async_error)
    1513             :                         break;
    1514             :         }
    1515           0 :         mutex_unlock(&dpm_list_mtx);
    1516           0 :         async_synchronize_full();
    1517           0 :         if (!error)
    1518           0 :                 error = async_error;
    1519           0 :         if (error) {
    1520           0 :                 suspend_stats.failed_suspend_late++;
    1521           0 :                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
    1522           0 :                 dpm_resume_early(resume_event(state));
    1523             :         }
    1524           0 :         dpm_show_time(starttime, state, error, "late");
    1525           0 :         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
    1526           0 :         return error;
    1527             : }
    1528             : 
    1529             : /**
    1530             :  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
    1531             :  * @state: PM transition of the system being carried out.
    1532             :  */
    1533           0 : int dpm_suspend_end(pm_message_t state)
    1534             : {
    1535           0 :         ktime_t starttime = ktime_get();
    1536             :         int error;
    1537             : 
    1538           0 :         error = dpm_suspend_late(state);
    1539           0 :         if (error)
    1540             :                 goto out;
    1541             : 
    1542           0 :         error = dpm_suspend_noirq(state);
    1543           0 :         if (error)
    1544           0 :                 dpm_resume_early(resume_event(state));
    1545             : 
    1546             : out:
    1547           0 :         dpm_show_time(starttime, state, error, "end");
    1548           0 :         return error;
    1549             : }
    1550             : EXPORT_SYMBOL_GPL(dpm_suspend_end);
    1551             : 
    1552             : /**
    1553             :  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
    1554             :  * @dev: Device to suspend.
    1555             :  * @state: PM transition of the system being carried out.
    1556             :  * @cb: Suspend callback to execute.
    1557             :  * @info: string description of caller.
    1558             :  */
    1559           0 : static int legacy_suspend(struct device *dev, pm_message_t state,
    1560             :                           int (*cb)(struct device *dev, pm_message_t state),
    1561             :                           const char *info)
    1562             : {
    1563             :         int error;
    1564             :         ktime_t calltime;
    1565             : 
    1566           0 :         calltime = initcall_debug_start(dev, cb);
    1567             : 
    1568           0 :         trace_device_pm_callback_start(dev, info, state.event);
    1569           0 :         error = cb(dev, state);
    1570           0 :         trace_device_pm_callback_end(dev, error);
    1571           0 :         suspend_report_result(dev, cb, error);
    1572             : 
    1573           0 :         initcall_debug_report(dev, calltime, cb, error);
    1574             : 
    1575           0 :         return error;
    1576             : }
    1577             : 
    1578           0 : static void dpm_clear_superiors_direct_complete(struct device *dev)
    1579             : {
    1580             :         struct device_link *link;
    1581             :         int idx;
    1582             : 
    1583           0 :         if (dev->parent) {
    1584           0 :                 spin_lock_irq(&dev->parent->power.lock);
    1585           0 :                 dev->parent->power.direct_complete = false;
    1586           0 :                 spin_unlock_irq(&dev->parent->power.lock);
    1587             :         }
    1588             : 
    1589           0 :         idx = device_links_read_lock();
    1590             : 
    1591           0 :         list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
    1592           0 :                 spin_lock_irq(&link->supplier->power.lock);
    1593           0 :                 link->supplier->power.direct_complete = false;
    1594           0 :                 spin_unlock_irq(&link->supplier->power.lock);
    1595             :         }
    1596             : 
    1597           0 :         device_links_read_unlock(idx);
    1598           0 : }
    1599             : 
    1600             : /**
    1601             :  * __device_suspend - Execute "suspend" callbacks for given device.
    1602             :  * @dev: Device to handle.
    1603             :  * @state: PM transition of the system being carried out.
    1604             :  * @async: If true, the device is being suspended asynchronously.
    1605             :  */
    1606           0 : static int __device_suspend(struct device *dev, pm_message_t state, bool async)
    1607             : {
    1608           0 :         pm_callback_t callback = NULL;
    1609           0 :         const char *info = NULL;
    1610           0 :         int error = 0;
    1611             :         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
    1612             : 
    1613             :         TRACE_DEVICE(dev);
    1614             :         TRACE_SUSPEND(0);
    1615             : 
    1616           0 :         dpm_wait_for_subordinate(dev, async);
    1617             : 
    1618           0 :         if (async_error) {
    1619           0 :                 dev->power.direct_complete = false;
    1620           0 :                 goto Complete;
    1621             :         }
    1622             : 
    1623             :         /*
    1624             :          * Wait for possible runtime PM transitions of the device in progress
    1625             :          * to complete and if there's a runtime resume request pending for it,
    1626             :          * resume it before proceeding with invoking the system-wide suspend
    1627             :          * callbacks for it.
    1628             :          *
    1629             :          * If the system-wide suspend callbacks below change the configuration
    1630             :          * of the device, they must disable runtime PM for it or otherwise
    1631             :          * ensure that its runtime-resume callbacks will not be confused by that
    1632             :          * change in case they are invoked going forward.
    1633             :          */
    1634           0 :         pm_runtime_barrier(dev);
    1635             : 
    1636           0 :         if (pm_wakeup_pending()) {
    1637           0 :                 dev->power.direct_complete = false;
    1638           0 :                 async_error = -EBUSY;
    1639           0 :                 goto Complete;
    1640             :         }
    1641             : 
    1642           0 :         if (dev->power.syscore)
    1643             :                 goto Complete;
    1644             : 
    1645             :         /* Avoid direct_complete to let wakeup_path propagate. */
    1646           0 :         if (device_may_wakeup(dev) || device_wakeup_path(dev))
    1647           0 :                 dev->power.direct_complete = false;
    1648             : 
    1649           0 :         if (dev->power.direct_complete) {
    1650           0 :                 if (pm_runtime_status_suspended(dev)) {
    1651           0 :                         pm_runtime_disable(dev);
    1652           0 :                         if (pm_runtime_status_suspended(dev)) {
    1653             :                                 pm_dev_dbg(dev, state, "direct-complete ");
    1654             :                                 goto Complete;
    1655             :                         }
    1656             : 
    1657           0 :                         pm_runtime_enable(dev);
    1658             :                 }
    1659           0 :                 dev->power.direct_complete = false;
    1660             :         }
    1661             : 
    1662           0 :         dev->power.may_skip_resume = true;
    1663           0 :         dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
    1664             : 
    1665             :         dpm_watchdog_set(&wd, dev);
    1666           0 :         device_lock(dev);
    1667             : 
    1668           0 :         if (dev->pm_domain) {
    1669           0 :                 info = "power domain ";
    1670           0 :                 callback = pm_op(&dev->pm_domain->ops, state);
    1671             :                 goto Run;
    1672             :         }
    1673             : 
    1674           0 :         if (dev->type && dev->type->pm) {
    1675           0 :                 info = "type ";
    1676           0 :                 callback = pm_op(dev->type->pm, state);
    1677             :                 goto Run;
    1678             :         }
    1679             : 
    1680           0 :         if (dev->class && dev->class->pm) {
    1681           0 :                 info = "class ";
    1682           0 :                 callback = pm_op(dev->class->pm, state);
    1683             :                 goto Run;
    1684             :         }
    1685             : 
    1686           0 :         if (dev->bus) {
    1687           0 :                 if (dev->bus->pm) {
    1688           0 :                         info = "bus ";
    1689           0 :                         callback = pm_op(dev->bus->pm, state);
    1690           0 :                 } else if (dev->bus->suspend) {
    1691           0 :                         pm_dev_dbg(dev, state, "legacy bus ");
    1692           0 :                         error = legacy_suspend(dev, state, dev->bus->suspend,
    1693             :                                                 "legacy bus ");
    1694           0 :                         goto End;
    1695             :                 }
    1696             :         }
    1697             : 
    1698             :  Run:
    1699           0 :         if (!callback && dev->driver && dev->driver->pm) {
    1700           0 :                 info = "driver ";
    1701           0 :                 callback = pm_op(dev->driver->pm, state);
    1702             :         }
    1703             : 
    1704           0 :         error = dpm_run_callback(callback, dev, state, info);
    1705             : 
    1706             :  End:
    1707           0 :         if (!error) {
    1708           0 :                 dev->power.is_suspended = true;
    1709           0 :                 if (device_may_wakeup(dev))
    1710           0 :                         dev->power.wakeup_path = true;
    1711             : 
    1712           0 :                 dpm_propagate_wakeup_to_parent(dev);
    1713           0 :                 dpm_clear_superiors_direct_complete(dev);
    1714             :         }
    1715             : 
    1716             :         device_unlock(dev);
    1717             :         dpm_watchdog_clear(&wd);
    1718             : 
    1719             :  Complete:
    1720           0 :         if (error)
    1721           0 :                 async_error = error;
    1722             : 
    1723           0 :         complete_all(&dev->power.completion);
    1724             :         TRACE_SUSPEND(error);
    1725           0 :         return error;
    1726             : }
    1727             : 
    1728           0 : static void async_suspend(void *data, async_cookie_t cookie)
    1729             : {
    1730           0 :         struct device *dev = (struct device *)data;
    1731             :         int error;
    1732             : 
    1733           0 :         error = __device_suspend(dev, pm_transition, true);
    1734           0 :         if (error) {
    1735           0 :                 dpm_save_failed_dev(dev_name(dev));
    1736             :                 pm_dev_err(dev, pm_transition, " async", error);
    1737             :         }
    1738             : 
    1739           0 :         put_device(dev);
    1740           0 : }
    1741             : 
    1742           0 : static int device_suspend(struct device *dev)
    1743             : {
    1744           0 :         if (dpm_async_fn(dev, async_suspend))
    1745             :                 return 0;
    1746             : 
    1747           0 :         return __device_suspend(dev, pm_transition, false);
    1748             : }
    1749             : 
    1750             : /**
    1751             :  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
    1752             :  * @state: PM transition of the system being carried out.
    1753             :  */
    1754           0 : int dpm_suspend(pm_message_t state)
    1755             : {
    1756           0 :         ktime_t starttime = ktime_get();
    1757           0 :         int error = 0;
    1758             : 
    1759           0 :         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
    1760             :         might_sleep();
    1761             : 
    1762             :         devfreq_suspend();
    1763             :         cpufreq_suspend();
    1764             : 
    1765           0 :         mutex_lock(&dpm_list_mtx);
    1766           0 :         pm_transition = state;
    1767           0 :         async_error = 0;
    1768           0 :         while (!list_empty(&dpm_prepared_list)) {
    1769           0 :                 struct device *dev = to_device(dpm_prepared_list.prev);
    1770             : 
    1771           0 :                 get_device(dev);
    1772             : 
    1773           0 :                 mutex_unlock(&dpm_list_mtx);
    1774             : 
    1775           0 :                 error = device_suspend(dev);
    1776             : 
    1777           0 :                 mutex_lock(&dpm_list_mtx);
    1778             : 
    1779           0 :                 if (error) {
    1780           0 :                         pm_dev_err(dev, state, "", error);
    1781           0 :                         dpm_save_failed_dev(dev_name(dev));
    1782           0 :                 } else if (!list_empty(&dev->power.entry)) {
    1783           0 :                         list_move(&dev->power.entry, &dpm_suspended_list);
    1784             :                 }
    1785             : 
    1786           0 :                 mutex_unlock(&dpm_list_mtx);
    1787             : 
    1788           0 :                 put_device(dev);
    1789             : 
    1790           0 :                 mutex_lock(&dpm_list_mtx);
    1791             : 
    1792           0 :                 if (error || async_error)
    1793             :                         break;
    1794             :         }
    1795           0 :         mutex_unlock(&dpm_list_mtx);
    1796           0 :         async_synchronize_full();
    1797           0 :         if (!error)
    1798           0 :                 error = async_error;
    1799           0 :         if (error) {
    1800           0 :                 suspend_stats.failed_suspend++;
    1801             :                 dpm_save_failed_step(SUSPEND_SUSPEND);
    1802             :         }
    1803           0 :         dpm_show_time(starttime, state, error, NULL);
    1804           0 :         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
    1805           0 :         return error;
    1806             : }
    1807             : 
    1808             : /**
    1809             :  * device_prepare - Prepare a device for system power transition.
    1810             :  * @dev: Device to handle.
    1811             :  * @state: PM transition of the system being carried out.
    1812             :  *
    1813             :  * Execute the ->prepare() callback(s) for given device.  No new children of the
    1814             :  * device may be registered after this function has returned.
    1815             :  */
    1816           0 : static int device_prepare(struct device *dev, pm_message_t state)
    1817             : {
    1818           0 :         int (*callback)(struct device *) = NULL;
    1819           0 :         int ret = 0;
    1820             : 
    1821             :         /*
    1822             :          * If a device's parent goes into runtime suspend at the wrong time,
    1823             :          * it won't be possible to resume the device.  To prevent this we
    1824             :          * block runtime suspend here, during the prepare phase, and allow
    1825             :          * it again during the complete phase.
    1826             :          */
    1827           0 :         pm_runtime_get_noresume(dev);
    1828             : 
    1829           0 :         if (dev->power.syscore)
    1830             :                 return 0;
    1831             : 
    1832           0 :         device_lock(dev);
    1833             : 
    1834           0 :         dev->power.wakeup_path = false;
    1835             : 
    1836           0 :         if (dev->power.no_pm_callbacks)
    1837             :                 goto unlock;
    1838             : 
    1839           0 :         if (dev->pm_domain)
    1840           0 :                 callback = dev->pm_domain->ops.prepare;
    1841           0 :         else if (dev->type && dev->type->pm)
    1842           0 :                 callback = dev->type->pm->prepare;
    1843           0 :         else if (dev->class && dev->class->pm)
    1844           0 :                 callback = dev->class->pm->prepare;
    1845           0 :         else if (dev->bus && dev->bus->pm)
    1846           0 :                 callback = dev->bus->pm->prepare;
    1847             : 
    1848           0 :         if (!callback && dev->driver && dev->driver->pm)
    1849           0 :                 callback = dev->driver->pm->prepare;
    1850             : 
    1851           0 :         if (callback)
    1852           0 :                 ret = callback(dev);
    1853             : 
    1854             : unlock:
    1855           0 :         device_unlock(dev);
    1856             : 
    1857           0 :         if (ret < 0) {
    1858           0 :                 suspend_report_result(dev, callback, ret);
    1859           0 :                 pm_runtime_put(dev);
    1860           0 :                 return ret;
    1861             :         }
    1862             :         /*
    1863             :          * A positive return value from ->prepare() means "this device appears
    1864             :          * to be runtime-suspended and its state is fine, so if it really is
    1865             :          * runtime-suspended, you can leave it in that state provided that you
    1866             :          * will do the same thing with all of its descendants".  This only
    1867             :          * applies to suspend transitions, however.
    1868             :          */
    1869           0 :         spin_lock_irq(&dev->power.lock);
    1870           0 :         dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
    1871           0 :                 (ret > 0 || dev->power.no_pm_callbacks) &&
    1872           0 :                 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
    1873           0 :         spin_unlock_irq(&dev->power.lock);
    1874           0 :         return 0;
    1875             : }
    1876             : 
    1877             : /**
    1878             :  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
    1879             :  * @state: PM transition of the system being carried out.
    1880             :  *
    1881             :  * Execute the ->prepare() callback(s) for all devices.
    1882             :  */
    1883           0 : int dpm_prepare(pm_message_t state)
    1884             : {
    1885           0 :         int error = 0;
    1886             : 
    1887           0 :         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
    1888             :         might_sleep();
    1889             : 
    1890             :         /*
    1891             :          * Give a chance for the known devices to complete their probes, before
    1892             :          * disable probing of devices. This sync point is important at least
    1893             :          * at boot time + hibernation restore.
    1894             :          */
    1895           0 :         wait_for_device_probe();
    1896             :         /*
    1897             :          * It is unsafe if probing of devices will happen during suspend or
    1898             :          * hibernation and system behavior will be unpredictable in this case.
    1899             :          * So, let's prohibit device's probing here and defer their probes
    1900             :          * instead. The normal behavior will be restored in dpm_complete().
    1901             :          */
    1902           0 :         device_block_probing();
    1903             : 
    1904           0 :         mutex_lock(&dpm_list_mtx);
    1905           0 :         while (!list_empty(&dpm_list) && !error) {
    1906           0 :                 struct device *dev = to_device(dpm_list.next);
    1907             : 
    1908           0 :                 get_device(dev);
    1909             : 
    1910           0 :                 mutex_unlock(&dpm_list_mtx);
    1911             : 
    1912           0 :                 trace_device_pm_callback_start(dev, "", state.event);
    1913           0 :                 error = device_prepare(dev, state);
    1914           0 :                 trace_device_pm_callback_end(dev, error);
    1915             : 
    1916           0 :                 mutex_lock(&dpm_list_mtx);
    1917             : 
    1918           0 :                 if (!error) {
    1919           0 :                         dev->power.is_prepared = true;
    1920           0 :                         if (!list_empty(&dev->power.entry))
    1921           0 :                                 list_move_tail(&dev->power.entry, &dpm_prepared_list);
    1922           0 :                 } else if (error == -EAGAIN) {
    1923             :                         error = 0;
    1924             :                 } else {
    1925           0 :                         dev_info(dev, "not prepared for power transition: code %d\n",
    1926             :                                  error);
    1927             :                 }
    1928             : 
    1929           0 :                 mutex_unlock(&dpm_list_mtx);
    1930             : 
    1931           0 :                 put_device(dev);
    1932             : 
    1933           0 :                 mutex_lock(&dpm_list_mtx);
    1934             :         }
    1935           0 :         mutex_unlock(&dpm_list_mtx);
    1936           0 :         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
    1937           0 :         return error;
    1938             : }
    1939             : 
    1940             : /**
    1941             :  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
    1942             :  * @state: PM transition of the system being carried out.
    1943             :  *
    1944             :  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
    1945             :  * callbacks for them.
    1946             :  */
    1947           0 : int dpm_suspend_start(pm_message_t state)
    1948             : {
    1949           0 :         ktime_t starttime = ktime_get();
    1950             :         int error;
    1951             : 
    1952           0 :         error = dpm_prepare(state);
    1953           0 :         if (error) {
    1954           0 :                 suspend_stats.failed_prepare++;
    1955             :                 dpm_save_failed_step(SUSPEND_PREPARE);
    1956             :         } else
    1957           0 :                 error = dpm_suspend(state);
    1958           0 :         dpm_show_time(starttime, state, error, "start");
    1959           0 :         return error;
    1960             : }
    1961             : EXPORT_SYMBOL_GPL(dpm_suspend_start);
    1962             : 
    1963           0 : void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
    1964             : {
    1965           0 :         if (ret)
    1966           0 :                 dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
    1967           0 : }
    1968             : EXPORT_SYMBOL_GPL(__suspend_report_result);
    1969             : 
    1970             : /**
    1971             :  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
    1972             :  * @subordinate: Device that needs to wait for @dev.
    1973             :  * @dev: Device to wait for.
    1974             :  */
    1975           0 : int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
    1976             : {
    1977           0 :         dpm_wait(dev, subordinate->power.async_suspend);
    1978           0 :         return async_error;
    1979             : }
    1980             : EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
    1981             : 
    1982             : /**
    1983             :  * dpm_for_each_dev - device iterator.
    1984             :  * @data: data for the callback.
    1985             :  * @fn: function to be called for each device.
    1986             :  *
    1987             :  * Iterate over devices in dpm_list, and call @fn for each device,
    1988             :  * passing it @data.
    1989             :  */
    1990           0 : void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
    1991             : {
    1992             :         struct device *dev;
    1993             : 
    1994           0 :         if (!fn)
    1995             :                 return;
    1996             : 
    1997             :         device_pm_lock();
    1998           0 :         list_for_each_entry(dev, &dpm_list, power.entry)
    1999           0 :                 fn(dev, data);
    2000             :         device_pm_unlock();
    2001             : }
    2002             : EXPORT_SYMBOL_GPL(dpm_for_each_dev);
    2003             : 
    2004         602 : static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
    2005             : {
    2006         602 :         if (!ops)
    2007             :                 return true;
    2008             : 
    2009         136 :         return !ops->prepare &&
    2010          68 :                !ops->suspend &&
    2011           0 :                !ops->suspend_late &&
    2012           0 :                !ops->suspend_noirq &&
    2013           0 :                !ops->resume_noirq &&
    2014           0 :                !ops->resume_early &&
    2015          68 :                !ops->resume &&
    2016           0 :                !ops->complete;
    2017             : }
    2018             : 
    2019         606 : void device_pm_check_callbacks(struct device *dev)
    2020             : {
    2021             :         unsigned long flags;
    2022             : 
    2023         606 :         spin_lock_irqsave(&dev->power.lock, flags);
    2024         606 :         dev->power.no_pm_callbacks =
    2025         682 :                 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
    2026         546 :                  !dev->bus->suspend && !dev->bus->resume)) &&
    2027        1604 :                 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
    2028        1078 :                 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
    2029        2288 :                 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
    2030         538 :                 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
    2031           0 :                  !dev->driver->suspend && !dev->driver->resume));
    2032        1212 :         spin_unlock_irqrestore(&dev->power.lock, flags);
    2033         606 : }
    2034             : 
    2035           0 : bool dev_pm_skip_suspend(struct device *dev)
    2036             : {
    2037           0 :         return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
    2038           0 :                 pm_runtime_status_suspended(dev);
    2039             : }

Generated by: LCOV version 1.14